tag:blogger.com,1999:blog-485238649578433452024-03-13T04:26:53.385-07:00Don't fear the dataUnknownnoreply@blogger.comBlogger6125tag:blogger.com,1999:blog-48523864957843345.post-36764905184247039912016-02-03T09:19:00.002-08:002016-02-03T09:19:38.188-08:00sweet holy graphing in sashttp://robslink.com/SAS/Home.htmUnknownnoreply@blogger.com0tag:blogger.com,1999:blog-48523864957843345.post-88497003488708796722015-07-10T10:02:00.000-07:002015-07-10T10:02:29.884-07:00working w/ microdata, recoding race.*This is a big document to recode race and ethnicity;<br />
*there are now way fewer categories;<br />
*it also includes a way to sort by state;<br />
<br />
PROC IMPORT OUT= WORK.recode_race<br />
DATAFILE= "\\tsclient\Y\recoding_race_in_pumas_2000\recode_race.csv"<br />
DBMS=CSV REPLACE;<br />
GETNAMES=YES;<br />
DATAROW=2;<br />
RUN;<br />
<br />
*make pumas 5 digits- keep leading zeros;<br />
DATA file_name2;<br />
SET recode_race;<br />
PUMA = PUT(US00A_PUMA, z5.);<br />
RUN;<br />
<br />
*make statefip 2 digits (keep a leading zero);<br />
options obs=max;<br />
DATA file_name2;<br />
SET file_name2;<br />
FIP = PUT(US00A_STATEFIP, z2.);<br />
RUN;<br />
<br />
*now i want to turn my NIU's (9999999) and (9999998) into nulls;<br />
options obs=max;<br />
data file_name2;<br />
set file_name2;<br />
if inctot = 9999999 then inctot =.;<br />
if inctot = 9999999 then inctot =.;<br />
run;<br />
<br />
*rename puma to puma1 to stayclean;<br />
options obs=max;<br />
data file_name2;<br />
set file_name2;<br />
rename PUMA=PUMA1;run;<br />
<br />
*concatenate fip+puma1 to make puma;<br />
options obs=max;<br />
data file_name2;<br />
set file_name2;<br />
PUMA=catt(fip,puma1);run;<br />
<br />
*make sure puma is 7 digit;<br />
*drop puma1;<br />
*it doesn't seem to like z7 here, think it's a char$ v.s #(number) issue);<br />
options obs=max;<br />
DATA file_name2;<br />
SET file_name2;<br />
PUMA = PUT(PUMA, z7.);run;<br />
options obs=max;<br />
DATA file_name2;<br />
SET file_name2;<br />
drop puma1;<br />
RUN;<br />
<br />
*I want to create a table of means;<br />
*I want it based on a puma level;<br />
*first I want to format so that my gender is clear, my edattan is clear, my race is clear;<br />
<br />
proc format;<br />
value rac<br />
10='White'<br />
20='Black'<br />
21='Black African'<br />
22='Black Caribbean'<br />
23='Afro-Ecuadorian'<br />
24='Other Black'<br />
30='Indigenous'<br />
31='American Indian'<br />
40='Asian'<br />
41='Chinese'<br />
42='Japanese'<br />
43='Korean'<br />
44='Vietnamese'<br />
45='Filipino'<br />
46='Indian'<br />
47='Pakistani'<br />
48='Bangladeshi'<br />
49='Other Asian'<br />
50='Mixed race'<br />
51='Brown (Brazil)'<br />
52='Mestizo (Indigenous and White)'<br />
53='Mulatto (Black and White) (Ecuador)'<br />
54='Coloured (South Africa)'<br />
55='Two or more races'<br />
60='Other'<br />
61='Montubio (Ecuador)'<br />
99='Unknown';<br />
<br />
<br />
value educ<br />
0='NIU (not in universe)'<br />
1='Less than primary completed'<br />
2='Primary completed'<br />
3='Secondary completed'<br />
4='University completed'<br />
9='Unknown';<br />
<br />
<br />
value sx<br />
9='Unknown'<br />
2='Female'<br />
1='Male';run;<br />
<br />
<br />
proc format;<br />
value fips<br />
01='Alabama'<br />
02='Alaska'<br />
04='Arizona'<br />
05='Arkansas'<br />
06='California'<br />
08='Colorado'<br />
09='Connecticut'<br />
10='Delaware'<br />
11='District of Columbia'<br />
12='Florida'<br />
13='Georgia'<br />
15='Hawaii'<br />
16='Idaho'<br />
17='Illinois'<br />
18='Indiana'<br />
19='Iowa'<br />
20='Kansas'<br />
21='Kentucky'<br />
22='Louisiana'<br />
23='Maine'<br />
24='Maryland'<br />
25='Massachusetts'<br />
26='Michigan'<br />
27='Minnesota'<br />
28='Mississippi'<br />
29='Missouri'<br />
30='Montana'<br />
31='Nebraska'<br />
32='Nevada'<br />
33='New Hampshire'<br />
34='New Jersey'<br />
35='New Mexico'<br />
36='New York'<br />
37='North Carolina'<br />
38='North Dakota'<br />
39='Ohio'<br />
40='Oklahoma'<br />
41='Oregon'<br />
42='Pennsylvania'<br />
44='Rhode island'<br />
45='South Carolina'<br />
46='South Dakota'<br />
47='Tennessee'<br />
48='Texas'<br />
49='Utah'<br />
50='Vermont'<br />
51='Virginia'<br />
53='Washington'<br />
54='West Virginia'<br />
55='Wisconsin'<br />
56='Wyoming';<br />
<br />
proc format;<br />
value HISP<br />
000='Not hispanic'<br />
99='Hispanic'<br />
100='Mexican'<br />
101='Mexican, n.e.c.'<br />
102='Mexican American'<br />
103='Mexicano/Mexicana'<br />
104='Chicano/Chicana'<br />
105='La Raza'<br />
106='Mexican American Indian'<br />
107='Mexico'<br />
200='Puerto Rican'<br />
300='Cuban'<br />
400='Other Spanish, 1980'<br />
410='Central/South American, 1970'<br />
411='Costa Rican'<br />
412='Guatemalan'<br />
413='Honduran'<br />
414='Nicaraguan'<br />
415='Panamanian'<br />
416='Salvadoran'<br />
417='Central American'<br />
418='Central American Indian'<br />
419='Canal Zone'<br />
420='Argentinean'<br />
421='Bolivian'<br />
422='Chilean'<br />
423='Colombian'<br />
424='Ecuadorian'<br />
425='Paraguayan'<br />
426='Peruvian'<br />
427='Uruguayan'<br />
428='Venezuelan'<br />
429='South American Indian'<br />
430='Criollo'<br />
431='South American'<br />
440='Other Spanish, 1970'<br />
450='Spaniard'<br />
451='Andalusian'<br />
452='Asturian'<br />
453='Castillian'<br />
454='Catalonian'<br />
455='Balearic Islander'<br />
456='Gallego'<br />
457='Valencian'<br />
458='Canarian'<br />
459='Spanish Basque'<br />
460='Dominican'<br />
465='Latin American'<br />
470='Hispanic'<br />
480='Spanish'<br />
490='Californio'<br />
491='Tejano'<br />
492='Nuevo Mexicano'<br />
493='Spanish American'<br />
494='Spanish American Indian'<br />
495='Meso American Indian'<br />
496='Mestizo'<br />
497='Other Spanish, Hispanic, Latino'<br />
498='Not specified (FOSDIC)'<br />
499='Not classified'<br />
999='Not reported'; run;<br />
<br />
*recode fip to numeric;<br />
options obs = max;<br />
data Twincities2010;<br />
set Twincities2010;<br />
newfip = input(fip,2.);<br />
run;<br />
<br />
<br />
<br />
*to make a data set for minn 2000 ONLY;<br />
*you make ;<br />
data raceMN;<br />
set work.file_name2;<br />
if fip = 27;<br />
run;<br />
<br />
*write the 2000 data for mn to a csv;<br />
proc export data=work.raceMN<br />
outfile="\\tsclient\Y\recoding_race_in_pumas_2000\MNrace.csv"<br />
dbms=csv<br />
replace;run;<br />
<br />
<br />
*lose some unneccessary variables;<br />
*us;<br />
data Racemn;<br />
set work.Racemn;<br />
drop wtper sample cntry resident marstd edattand us00A_puma;<br />
run;<br />
<br />
*write the 2000 data for mn to a csv;<br />
proc export data=work.raceMN<br />
outfile="\\tsclient\Y\recoding_race_in_pumas_2000\MNrace.csv"<br />
dbms=csv<br />
replace;run;<br />
<br />
<br />
*to make a data set for CA 2000 ONLY;<br />
*you make ;<br />
data raceCA;<br />
set work.file_name2;<br />
if fip = 06;<br />
run;<br />
<br />
*lose some unneccessary variables;<br />
*us;<br />
data Raceca;<br />
set work.Raceca;<br />
drop wtper sample cntry resident marstd edattand us00A_puma;<br />
run;<br />
<br />
*write the 2000 data for ca to a csv;<br />
proc export data=work.raceCA<br />
outfile="\\tsclient\Y\recoding_race_in_pumas_2000\CArace.csv"<br />
dbms=csv<br />
replace;run;<br />
<br />
<br />
*to make a data set for twin cities 2000 ONLY;<br />
*you make ;<br />
data twincities2010;<br />
set work.Racemn;<br />
drop US00A_STATEFIP newfip;<br />
if puma = 2701401 or puma = 2701402 or puma = 2701403 or puma = 2701404 or puma = 2701405<br />
or puma=2701301 or puma = 2701302 or puma = 2701303 or puma = 2701406 or puma = 2701601 or puma = 2701602 or puma = 2701501 or puma = 2701502;<br />
run;<br />
<br />
*recode fip to numeric;<br />
options obs = max;<br />
data Twincities2010;<br />
set Twincities2010;<br />
newfip = input(fip,2.);<br />
run;<br />
<br />
*make a new code for hisp2 that combines categories;<br />
data twincities2010;<br />
set twincities2010;<br />
hisp2 =0;<br />
if (hispan >=100) and (hispan <=107) then hisp2=99;<br />
if (hispan >=108) and (hispan <=497) then hisp2=99;<br />
run;<br />
<br />
*make a newcode3 for race2 that combines categories;<br />
data twincities2010;<br />
set twincities2010;<br />
race2 =60;<br />
if (race = 10) then race2 =10;<br />
if (race >= 20) and (race <=24) then race2 =20;<br />
if (race = 30) or (race=31) then race2 = 30;<br />
if (race >= 40) and (race <=49) then race2=40;<br />
if (race >=50) and (race <=55) then race2=55;<br />
run;<br />
<br />
<br />
*tabulate twin cities, and export;<br />
*this will include code to export;<br />
*it also includes the creation of a nifty table;<br />
*that will be exported;<br />
*you can skip this if you just want to make a;<br />
*csv for export;<br />
<br />
ods tagsets.excelxp<br />
file="\\tsclient\Y\recoding_race_in_pumas_2000\TwinCitiesRace.xls"<br />
style = minimal<br />
options (orientation = 'landscape'<br />
fittopage = 'yes'<br />
pages_fitwidth = '1'<br />
pages_fitheight = '100');<br />
<br />
<br />
*now the proc tab code;<br />
options obs = max;<br />
proc tabulate data = Twincities2010;<br />
class newfip puma edattan race2 sex hisp2;<br />
var inctot;<br />
table newfip = 'state' * puma = 'PUMA area' , edattan='Edattan' * (rowpctn) race2='race' * (rowpctn) sex ='sex' * (rowpctn) hisp2 = 'hispanic' * (rowpctn) inctot = 'income' * (mean);<br />
format newfip fips. sex sx. hisp2 HISP. edattan educ. race2 rac. inctot;<br />
run;<br />
<br />
ods tagsets.excelxp close;<br />
<div>
<br /></div>
Unknownnoreply@blogger.com0tag:blogger.com,1999:blog-48523864957843345.post-70011570953209850952015-07-08T08:45:00.001-07:002015-07-08T09:50:01.920-07:00Working with MicroData, Creating Pumas*upload your .csv file that you generated as an ipums extract;<br />
*in ipums you can select for a state- this code assumes that you have done just that;<br />
*this code works to make big table of all pumas for whatever csv you start with;<br />
*check your leading zeros in .xls;<br />
*remember to include statefip in your download;<br />
<br />
PROC IMPORT OUT= WORK.file_name1<br />
DATAFILE= "filepath.csv"<br />
DBMS=CSV REPLACE;<br />
GETNAMES=YES;<br />
DATAROW=2;<br />
RUN;<br />
<br />
*make pumas 5 digits- keep leading zeros;<br />
DATA file_name2;<br />
SET file_name1;<br />
PUMA = PUT(US00A_PUMA, z5.);<br />
RUN;<br />
<br />
*make statefip 2 digits (keep a leading zero);<br />
options obs=max;<br />
DATA file_name2;<br />
SET file_name2;<br />
FIP = PUT(US00A_STATEFIP, z2.);<br />
RUN;<br />
<br />
*now i want to turn my NIU's (9999999) and (9999998) into nulls;<br />
*this is specific to my data cet, but shows how to make a value a null;<br />
options obs=max;<br />
data file_name2;<br />
set file_name2;<br />
if inctot = 9999999 then inctot =.;<br />
if inctot = 9999999 then inctot =.;<br />
run;<br />
<br />
*rename puma to puma1 to stayclean;<br />
*rename a variable;<br />
options obs=max;<br />
data file_name2;<br />
set file_name2;<br />
rename PUMA=PUMA1;run;<br />
<br />
*concatenate fip+puma1 to make puma;<br />
options obs=max;<br />
data file_name2;<br />
set file_name2;<br />
PUMA=catt(fip,puma1);run;<br />
<br />
*make sure puma is 7 digit;<br />
*drop puma1;<br />
*it doesn't seem to like z7 here, think it's a char$ v.s #(number) issue);<br />
*also dropping some variables here;<br />
options obs=max;<br />
DATA file_name2;<br />
SET file_name2;<br />
PUMA = PUT(PUMA, z7.);run;<br />
options obs=max;<br />
DATA file_name2;<br />
SET file_name2;<br />
drop puma1 geolev1;<br />
RUN;<br />
<br />
*I want to create a table of means;<br />
*I want it based on a puma level;<br />
*first I want to format so that my gender is clear, my edattan is clear, my race is clear;<br />
<br />
proc format;<br />
value rac<br />
10='White'<br />
20='Black'<br />
21='Black African'<br />
22='Black Caribbean'<br />
23='Afro-Ecuadorian'<br />
24='Other Black'<br />
30='Indigenous'<br />
31='American Indian'<br />
40='Asian'<br />
41='Chinese'<br />
42='Japanese'<br />
43='Korean'<br />
44='Vietnamese'<br />
45='Filipino'<br />
46='Indian'<br />
47='Pakistani'<br />
48='Bangladeshi'<br />
49='Other Asian'<br />
50='Mixed race'<br />
51='Brown (Brazil)'<br />
52='Mestizo (Indigenous and White)'<br />
53='Mulatto (Black and White) (Ecuador)'<br />
54='Coloured (South Africa)'<br />
55='Two or more races'<br />
60='Other'<br />
61='Montubio (Ecuador)'<br />
99='Unknown';<br />
<br />
<br />
value educ<br />
0='NIU (not in universe)'<br />
1='Less than primary completed'<br />
2='Primary completed'<br />
3='Secondary completed'<br />
4='University completed'<br />
9='Unknown';<br />
<br />
<br />
value sx<br />
9='Unknown'<br />
2='Female'<br />
1='Male';run;<br />
<br />
<br />
proc format;<br />
value fips<br />
01='Alabama'<br />
02='Alaska'<br />
04='Arizona'<br />
05='Arkansas'<br />
06='California'<br />
08='Colorado'<br />
09='Connecticut'<br />
10='Delaware'<br />
11='District of Columbia'<br />
12='Florida'<br />
13='Georgia'<br />
15='Hawaii'<br />
16='Idaho'<br />
17='Illinois'<br />
18='Indiana'<br />
19='Iowa'<br />
20='Kansas'<br />
21='Kentucky'<br />
22='Louisiana'<br />
23='Maine'<br />
24='Maryland'<br />
25='Massachusetts'<br />
26='Michigan'<br />
27='Minnesota'<br />
28='Mississippi'<br />
29='Missouri'<br />
30='Montana'<br />
31='Nebraska'<br />
32='Nevada'<br />
33='New Hampshire'<br />
34='New Jersey'<br />
35='New Mexico'<br />
36='New York'<br />
37='North Carolina'<br />
38='North Dakota'<br />
39='Ohio'<br />
40='Oklahoma'<br />
41='Oregon'<br />
42='Pennsylvania'<br />
44='Rhode island'<br />
45='South Carolina'<br />
46='South Dakota'<br />
47='Tennessee'<br />
48='Texas'<br />
49='Utah'<br />
50='Vermont'<br />
51='Virginia'<br />
53='Washington'<br />
54='West Virginia'<br />
55='Wisconsin'<br />
56='Wyoming';<br />
<br />
*recode fip to numeric;<br />
data file_name2;<br />
set file_name2;<br />
newfip = input(fip,2.);<br />
run;<br />
<br />
*this will include code to export;<br />
*it also includes the creation of a nifty table;<br />
*that will be exported;<br />
*you can skip this if you just want to make a;<br />
*csv for export;<br />
<br />
ods tagsets.excelxp<br />
file="\\tsclient\Y\ipumsCodap\Ipums_and_Pumas\export_name.xls"<br />
style = minimal<br />
options (orientation = 'landscape'<br />
fittopage = 'yes'<br />
pages_fitwidth = '1'<br />
pages_fitheight = '100');<br />
<br />
<br />
*now the proc tab code;<br />
options obs = 100;<br />
proc tabulate data = file_name2;<br />
class newfip puma edattan raceus sex;<br />
var inctot;<br />
table newfip = 'state' * puma = 'PUMA area' , edattan='Edattan' * (n rowpctn) raceus='race' sex ='sex' inctot = 'income' * (mean);<br />
format newfip fips. sex sx. edattan educ. raceus rac. inctot;<br />
run;<br />
<br />
*lose some unneccessary variables;<br />
*us;<br />
data file_name2;<br />
set work.file_name2;<br />
drop wtper sample cntry resident marstd edattand us00A_puma;<br />
run;<br />
<br />
<br />
*to make a data set for BERKELEY 2000 ONLY;<br />
*you make ;<br />
data BERKDAT00;<br />
set work.file_name2;<br />
drop US00A_STATEFIP newfip;<br />
if puma = 0602401;<br />
run;<br />
<br />
*write the 2000 data for BERKELEY to a csv;<br />
proc export data=work.BERKDAT00<br />
outfile="\\tsclient\Y\ipumsCodap\Ipums_and_Pumas\berkeley_puma_ca_00.csv"<br />
dbms=csv<br />
replace;run;<br />
<br />
<br />
*make a data set for MARIN 2000 ONLY;<br />
data MARINDAT00;<br />
set work.file_name2;<br />
drop US00A_STATEFIP newfip;<br />
if puma = 0601201 or puma = 0601202;<br />
run;<br />
<br />
*make a data set for OTOWN 2000 ONLY;<br />
data OAKLANDDAT00;<br />
set work.file_name2;<br />
if puma = 0602402 or puma = 0602403 or puma = 0602404;<br />
drop US00A_STATEFIP newfip;<br />
run;<br />
<br />
*make a data set for part of tulare counte;<br />
data tulare_part00;<br />
set work.file_name2;<br />
if puma = 0603503;<br />
drop US00A_STATEFIP newfip;<br />
run;<br />
<br />
*make a data set for all of tulare counte;<br />
data tulare_all00;<br />
set work.file_name2;<br />
if puma = 0603503 or puma= 0603501 or puma=0603502;<br />
drop US00A_STATEFIP newfip;<br />
run;<br />
<br />
*write the 2000 data for BERKELEY to a csv;<br />
proc export data=work.BERKDAT00<br />
outfile="\\tsclient\Y\ipumsCodap\Ipums_and_Pumas\berkeley_puma_ca_00.csv"<br />
dbms=csv<br />
replace;run;<br />
<br />
*write the 2000 data for tulare to a csv;<br />
proc export data=work.tulare_part00<br />
outfile="\\tsclient\Y\ipumsCodap\Ipums_and_Pumas\tulare_part_00.csv"<br />
dbms=csv<br />
replace;run;<br />
<br />
*write the 2000 data for tulare_all to a csv;<br />
proc export data=work.tulare_all00<br />
outfile="\\tsclient\Y\ipumsCodap\Ipums_and_Pumas\tulare_all_00.csv"<br />
dbms=csv<br />
replace;run;<br />
<br />
*write the 2000 data for OAKLAND to a csv;<br />
proc export data=work.OAKLANDDAT00<br />
outfile="\\tsclient\Y\ipumsCodap\Ipums_and_Pumas\otown_puma_ca_00.csv"<br />
dbms=csv<br />
replace;run;<br />
<br />
*write the 2000 data for MARIN to a csv;<br />
proc export data=work.MARINDAT00<br />
outfile="\\tsclient\Y\ipumsCodap\Ipums_and_Pumas\marin_puma_ca_00.csv"<br />
dbms=csv<br />
replace;run;<br />
<br />
<br />Unknownnoreply@blogger.com0tag:blogger.com,1999:blog-48523864957843345.post-61639043299192903062015-06-24T08:35:00.002-07:002015-06-24T08:36:16.775-07:00SAS CODE - "DROP VARIABLE"*this code snippet takes data demos1, and drops 3 unnecessary ;<br />
*variables from it;<br />
*obs =max is a safe thing to put in if you've been messing with less obs;<br />
<br />
options obs = max;<br />
data demos1;<br />
set work.demos1;<br />
drop wtper sample cntry;<br />
run;Unknownnoreply@blogger.com0tag:blogger.com,1999:blog-48523864957843345.post-83556896400182244802015-06-24T08:08:00.000-07:002015-07-08T08:02:34.424-07:00SAS CODE "Simple Random Sample and Export"<span style="font-family: Arial, Helvetica, sans-serif;">*This following piece of sas imports the really long and wide file;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;">* terrapop_extract_1016.csv, and randomly draws 5000 households;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;">* out of it. In this case, a household is determined by a shared serial;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;">* number -- everyone in the household has the same serial number,;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;">* so 5000 households exports as approximately 11000 people.;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;">*seed is part of the randomization process, srs is simple random sample;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;"><br /></span>
<span style="font-family: Arial, Helvetica, sans-serif;">PROC IMPORT OUT= WORK.demos </span><br />
<span style="font-family: Arial, Helvetica, sans-serif;"> DATAFILE= "filepath.csv" </span><br />
<span style="font-family: Arial, Helvetica, sans-serif;"> DBMS=CSV REPLACE;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;"> GETNAMES=YES;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;"> DATAROW=2; </span><br />
<span style="font-family: Arial, Helvetica, sans-serif;">RUN;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;"><br /></span>
<span style="font-family: Arial, Helvetica, sans-serif;"> proc surveyselect data= demos out=demos1 method=srs sampsize=5000 seed=377183 noprint;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;">samplingunit serial;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;"> run;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;">*this is my 5000 person random sample;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;">options obs=max;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;">proc export data=work.demos1</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;"> outfile='Y:\demos1.csv'</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;"> dbms=csv</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;"> replace;run;</span><br />
<span style="font-family: Arial, Helvetica, sans-serif;"><br /></span>
<span style="font-family: Arial, Helvetica, sans-serif;"><br /></span>
<span style="font-family: Arial, Helvetica, sans-serif;">Here is a little snapshot of the data...</span><br />
<div class="separator" style="clear: both; text-align: center;">
<a href="http://1.bp.blogspot.com/-noZAbz30mSg/VYrHBHi9w1I/AAAAAAAABN0/xCZo3R4DxZA/s1600/output1.png" imageanchor="1" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="80" src="http://1.bp.blogspot.com/-noZAbz30mSg/VYrHBHi9w1I/AAAAAAAABN0/xCZo3R4DxZA/s640/output1.png" width="640" /></a></div>
<span style="font-family: Arial, Helvetica, sans-serif;"><br /></span>
<span style="font-family: Arial, Helvetica, sans-serif;"><br /></span>
<br />
<table border="0" cellpadding="0" cellspacing="0" style="border-collapse: collapse; width: 2496px;">
<colgroup><col span="39" style="width: 48pt;" width="64"></col>
</colgroup><tbody>
<tr height="20" style="height: 15.0pt;">
<td height="20" style="height: 15.0pt; width: 48pt;" width="64"></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td><td style="width: 48pt;" width="64"><br /></td></tr>
<tr height="20" style="height: 15.0pt;"><td align="right" height="20" style="height: 15.0pt;"><br /></td><td align="right"><br /></td><td align="right"><br /></td><td align="right"><br /></td><td align="right"><br /></td><td align="right"><br /></td><td align="right"><br /></td><td align="right"><br /></td><td align="right"><br /></td><td align="right"><br /></td><td align="right"><br /></td><td align="right">44135083</td>
<td align="right">201896</td>
<td align="right">6</td>
<td align="right">840001</td>
<td align="right">840001</td>
<td align="right">0</td>
<td align="right">2</td>
<td align="right">999</td>
<td align="right">12000</td>
<td align="right">20</td>
<td align="right">350</td>
<td align="right">11</td>
<td align="right">1</td>
<td align="right">2.65E+08</td>
<td align="right">4583402</td>
<td align="right">99</td>
<td align="right">110</td>
<td align="right">1</td>
<td align="right">2</td>
<td align="right">2</td>
<td align="right">100</td>
<td align="right">5</td>
<td align="right">8407</td>
<td align="right">2</td>
<td align="right">45</td>
<td align="right">0</td>
<td align="right">11900</td>
<td align="right">2010</td>
</tr>
</tbody></table>
Unknownnoreply@blogger.com0tag:blogger.com,1999:blog-48523864957843345.post-11032115323411922372014-06-25T12:44:00.000-07:002014-06-25T12:44:14.152-07:00Using Data in the classroom<a href="http://www.socialexplorer.com/">Social Explorer</a> has varied license levels<br />
<br />
<a href="http://www.socialexplorer.com/">http://www.socialexplorer.com/</a><br />
<br />
<br />
<br />Unknownnoreply@blogger.com0