id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3379739
|
import django
from myapp.models import Author, Book, Course, Student
# 5.
# a. list all the books int the db
Book.objects.all()
# b. list all the authors int the db
Author.objects.all()
# c. list all the courses int the db
Course.objects.all()
# 6. Write queries to do the following.
# a. List all Authors whose first name is ‘John’
Author.objects.filter(firstname = 'John')
# b. List all Books whose has an author with first name is ‘John’
Book.objects.filter(author__firstname='John')
# c. List all Books with the word ‘Networks’ in its title.
Book.objects.filter(title__contains = 'Networks')
# d. List all Books that have the word ‘Networks’ in its title and are used in a course
Book.objects.filter(title__contains = 'Networks').filter(course__textbook__isnull=False)
# e. List all the Courses that use the book 'Python Programming'
Course.objects.filter(textbook__title__exact = 'Python Programming')
# f. List the Authors born after 1978
Author.objects.filter(birthdate__year__gt = 1978)
# g. List the Authors born in January
Author.objects.filter(birthdate__month = 1)
# h. List the Courses that use a book written by <NAME>
Course.objects.filter(textbook__author__firstname='Alan', textbook__author__lastname='Jones')
# i. List the Books currently in stock
Book.objects.filter(in_stock = True)
# j. List the Books written by Mary Hall
Book.objects.filter(author__firstname='Mary', author__lastname='Hall')
# k. Get the first name of the Author of the textbook used in course 567.
Course.objects.get(course_no = 567).textbook.author.firstname
# l. List all students registered in course 567
Course.objects.get(course_no = 567).students.all()
# m. List all the courses the Josh is registered in.
Student.objects.get(first_name='Josh', last_name='James').course_set.all()
# n. List the textbook used in the course that Luis is registered in
Student.objects.get(first_name='Luis').course_set.all()[0].textbook()
# o. List all students with last name ‘James’.
Student.objects.filter(last_name='James')
|
StarcoderdataPython
|
3233127
|
<gh_stars>1-10
import validators
import datetime
def allowsNone(f):
def wrapper(element):
if element is None:
return True
return f(element)
return wrapper
def requires_keys(*keys):
def decorator(f):
def wrapper(element):
if not isinstance(element, dict):
return False
for key in keys:
if key not in element:
return False
return f(element)
return wrapper
return decorator
@allowsNone
def validDefault(element) -> bool:
return True
def validRequired(element) -> bool:
return element is not None and element != ""
@allowsNone
def validUrl(element) -> bool:
return validators.url(element)
@allowsNone
def validEmail(element) -> bool:
return validators.email(element)
@allowsNone
@requires_keys('url', 'length', 'type')
def validEnclosure(element) -> bool:
url = element['url']
length = element['length']
return validators.url(url) and validInt(length)
@allowsNone
def validDate(element) -> bool:
return isinstance(element, datetime.datetime)
@allowsNone
@requires_keys('url', 'text')
def validSource(element) -> bool:
url = element['url']
return validators.url(url)
@allowsNone
def validLanguage(element) -> bool:
return True
@allowsNone
def validCategory(element) -> bool:
if isinstance(element, dict):
return "text" in element
return allowsNone(element)
@allowsNone
def validCategories(element) -> bool:
return all(validCategory(cat) for cat in element)
@allowsNone
@requires_keys('domain', 'port', 'path', 'registerProcedure', 'protocol')
def validCloud(element) -> bool:
port = element['port']
return validInt(port)
@allowsNone
@requires_keys('url', 'title', 'link')
def validImage(element) -> bool:
url = element['url']
width,height = element.get('width'),element.get('height')
wf, hf = True, True
if width is not None:
wf = validInt(width) and (int(width) <= 144)
if height is not None:
hf = validInt(height) and (int(height) <= 400)
return validators.url(url) and wf and hf
@allowsNone
@requires_keys('title', 'description', 'name', 'link')
def validInput(element) -> bool:
return True
@allowsNone
def validInt(element) -> bool:
return isinstance(element, int) or element.isdigit()
@allowsNone
def validGuid(element) -> bool:
if isinstance(element, dict):
return "text" in element
return True
@allowsNone
def validSkipHours(element) -> bool:
return True
@allowsNone
def validSkipHours(element) -> bool:
return True
@allowsNone
def validSkipDays(element) -> bool:
return True
|
StarcoderdataPython
|
3294484
|
<gh_stars>0
default_app_config = 'greenbudget.app.fringe.apps.FringeConfig'
|
StarcoderdataPython
|
1703017
|
<gh_stars>10-100
word2int_it = {
"Island":4228,
"indossare":2012,
"Rossa":3722,
"Ludovico":4354,
"radicale":2361,
"vetta":3213,
"iniziale":1473,
"incidente":1343,
"finire":515,
"ettaro":4600,
"sotterraneo":3688,
"gas":1643,
"pregare":3761,
"mln":2096,
"estremità":3273,
"maggior":550,
"Germania":776,
"avvolgere":4339,
"Cile":4246,
"parecchio":2210,
"intervista":1699,
"Santa":630,
"Emanuele":2968,
"rotazione":3339,
"frutta":4348,
"durata":1720,
"scuro":2638,
"salutare":4024,
"prima":177,
"M.":3862,
"giornata":1229,
"unico":273,
"provvisorio":4359,
"Ben":4591,
"birra":4460,
"Company":4277,
"Benedetto":2909,
"protocollo":3881,
"avanzare":1858,
"processore":4453,
"parlamentare":2253,
"sequenza":1895,
"esistente":1957,
"secondo":144,
"citazione":3413,
"spesso":294,
"uccidere":479,
"vivo":1274,
"spessore":4076,
"Cesare":2018,
"Gabriele":3542,
"credere":531,
"River":4991,
"soggiorno":3754,
"Virginia":4700,
"vampiro":4362,
"desiderare":2356,
"solito":676,
"normanno":4815,
"desiderio":1918,
"conferma":3724,
"guerriero":2896,
"vapore":3449,
"lento":1728,
"ragazzo":345,
"fortunato":2795,
"truppa":1024,
"via":233,
"bolla":4380,
"solidarietà":3579,
"mercato":635,
"Mario":1441,
"Storia":2850,
"villaggio":1341,
"Pierre":4058,
"ancora":173,
"piegare":5016,
"linea":332,
"precedenza":2501,
"mondo":214,
"radiazione":3764,
"rivolta":2035,
"fondo":807,
"poesia":1446,
"spesa":1689,
"sospetto":2879,
"automobilistico":3966,
"argomento":1258,
"governativo":3615,
"promuovere":1316,
"celebrare":1725,
"vantare":2756,
"United":3174,
"peccato":1592,
"comandante":1738,
"estivo":2171,
"monumento":1537,
"diavolo":4913,
"erede":2029,
"rilascio":4915,
"collegamento":196,
"razzo":3855,
"funzionalità":4532,
"sommare":4902,
"permanente":3135,
"danza":2640,
"capoluogo":1984,
"visitatore":1550,
"simbolo":1129,
"immobile":4430,
"appunto":1217,
"poi":154,
"Charles":2243,
"Basilica":4172,
"bersaglio":3319,
"cintura":5002,
"abile":4014,
"sacerdote":2129,
"cervello":2996,
"connessione":3313,
"dividere":1271,
"arrivare":247,
"senza":185,
"Adriano":3440,
"consumatore":3623,
"ritrovamento":3638,
"libro":405,
"alpino":2808,
"sapere":254,
"tranquillo":2763,
"inverno":2105,
"Savoia":2981,
"santuario":3298,
"seno":4035,
"alimento":4803,
"forza":353,
"inoltre":259,
"contratto":1017,
"Ginevra":4396,
"supremo":4654,
"Jones":3459,
"documentazione":4344,
"tradurre":1810,
"incoraggiare":4438,
"biblioteca":2579,
"limitare":959,
"XII":2301,
"insetto":3460,
"nazista":2498,
"funzionario":3549,
"sbagliare":3078,
"proteggere":1423,
"nn":2835,
"recitare":1736,
"interazione":3831,
"Medio":5071,
"striscia":2762,
"poema":4038,
"corrente":1199,
"perchè":712,
"coscienza":2226,
"sposare":759,
"questo":117,
"dette":3386,
"accesso":1378,
"condanna":2857,
"costruzione":548,
"provino":4454,
"borghese":4030,
"eredità":2738,
"permanenza":4149,
"Costanzo":4664,
"industriale":1372,
"tabella":3257,
"maggiormente":2397,
"Street":3745,
"elaborazione":4179,
"scelta":680,
"aggiudicare":3316,
"sereno":4210,
"cittadinanza":3969,
"ancor":2624,
"grande":155,
"tutt'":1992,
"IV":1240,
"Bianca":3870,
"riflesso":4420,
"aeroporto":2013,
"oscuro":2869,
"ambientazione":4871,
"approvazione":3767,
"lupo":4203,
"Nato":2023,
"partecipare":393,
"cancellare":2668,
"indagare":2992,
"Ungheria":3187,
"italiano":164,
"alcuno":150,
"presente":279,
"sostanziale":4225,
"La7":2596,
"dormire":3283,
"compleanno":4743,
"arrivo":1152,
"preventivo":4742,
"metodo":1001,
"entusiasmo":4716,
"Thomas":2327,
"pacifico":4676,
"edificio":607,
"personalmente":3105,
"trattenere":4822,
"VII":2402,
"quale":147,
"allora":354,
"sintomo":3475,
"cellulare":2485,
"satellite":2788,
"consolidare":4129,
"legato":1093,
"qua":3952,
"ricco":919,
"American":3024,
"Lucia":3752,
"attesa":1873,
"estraneo":4804,
"eius":4492,
"rituale":4479,
"rito":2454,
"Siria":4279,
"simpatico":4075,
"High":4373,
"parametro":3136,
"odio":3740,
"flusso":2344,
"potenza":885,
"sinistro":2037,
"genovese":3897,
"amore":672,
"contrastare":3089,
"reggimento":4000,
"marittimo":4798,
"Africa":1455,
"Martino":3133,
"schierare":2429,
"alimentare":1493,
"autorità":1333,
"Piero":2799,
"Castello":2176,
"<PAD>":0,
"stesse":1677,
"viaggiare":2292,
"viceversa":4239,
"Discografia":2483,
"aprile":547,
"giustificare":3008,
"formalmente":4352,
"finanziario":1587,
"sforzare":3403,
"sfidare":3488,
"camera":1596,
"persona":239,
"parete":1610,
"visione":1427,
"calore":3049,
"assemblea":3103,
"Rosso":3861,
"completo":501,
"preciso":1227,
"schermo":2116,
"combustibile":4855,
"antenna":4912,
"Socialista":5084,
"calcolo":2636,
"comico":2698,
"Valentino":4960,
"demolire":4053,
"visivo":3900,
"Presidente":1230,
"israeliano":2416,
"inizialmente":923,
"fisica":3751,
"chiudere":845,
"aperto":1116,
"consecutivo":2892,
"Comics":3290,
"raggio":1727,
"cantare":1198,
"rafforzare":3393,
"mappa":3066,
"Inter":3278,
"presentare":268,
"celeste":2614,
"apportare":4502,
"Ugo":4287,
"indigeno":4176,
"luminoso":3224,
"povero":1530,
"sufficiente":1753,
"civili":4752,
"pelle":2062,
"scegliere":652,
"Antonio":817,
"Tom":2733,
"voi":1414,
"Teresa":3830,
"impiego":2100,
"nipote":1951,
"pop":2655,
"occupare":553,
"esatto":1644,
"eterno":3669,
"introdurre":884,
"salone":3639,
"ragione":873,
"cinese":1374,
"totalmente":2227,
"messaggio":1175,
"volontario":2285,
"domestico":3592,
"Alto":3493,
"risalire":704,
"impegno":1658,
"dio":2364,
"soffitto":4937,
"quadrato":2407,
"repertorio":3913,
"colore":602,
"imponente":3371,
"marchio":1931,
"proposito":1875,
"grazia":4366,
"Impero":1216,
"intero":457,
"scendere":1334,
"fiducia":2538,
"circolo":3201,
"arcivescovo":2926,
"etnico":3304,
"rilevare":2187,
"leggere":600,
"of":295,
"arbitro":4958,
"lancio":1839,
"inserire":749,
"partito":434,
"ovunque":3989,
"meridionale":1296,
"governare":2219,
"politico":270,
"dentro":1521,
"Lombardia":2780,
"gemello":3504,
"Oceano":4709,
"Silvio":3381,
"estensione":2369,
"casuale":4686,
"beneficio":3673,
"omicida":4784,
"comitato":2737,
"panchina":4414,
"match":2214,
"capello":2316,
"solo":142,
"catturare":1539,
"sperimentazione":4102,
"massa":1052,
"Simon":4100,
"esplorare":4461,
"capacità":802,
"controllo":559,
"uso":411,
"usare":237,
"passato":813,
"mattino":3716,
"illustre":3938,
"tv":892,
"rinunciare":2138,
"distante":3497,
"Park":3059,
"tipologia":2322,
"lineare":3234,
"Mondo":2043,
"Leone":2757,
"settimo":2840,
"punta":2103,
"egiziano":4578,
"ed":129,
"citare":837,
"costantemente":4091,
"dintorno":4680,
"function.mysql-connect":2529,
"pratico":1202,
"Repubblica":716,
"frammento":3004,
"cum":4734,
"tecnologia":1167,
"abbastanza":1314,
"fortemente":1764,
"procurare":3703,
"Lady":4627,
"miniera":3832,
"elettrico":1309,
"mercoledì":3599,
"ingegneria":5017,
"possibile":342,
"sè":3829,
"motore":461,
"istante":4504,
"regolarmente":4022,
"Amici":3125,
"Serie":1049,
"riflettere":2703,
"celebrazione":3864,
"provenire":2047,
"pittore":1722,
"traccia":914,
"logica":2686,
"guidare":815,
"centro":292,
"particella":2842,
"Islam":5040,
"bloccare":1893,
"molecola":3239,
"milione":456,
"trasporto":1058,
"duro":1278,
"Luciano":3694,
"arricchire":2943,
"gruppo":174,
"adolescente":4471,
"malattia":1169,
"Marco":960,
"sguardo":2944,
"assorbire":3079,
"gioco":396,
"concessione":3412,
"riscontrare":3399,
"rivoluzionario":2250,
"nativo":3378,
"portare":190,
"causa":300,
"campagna":785,
"ambientale":2207,
"calcio":781,
"resistere":3108,
"selvaggio":4185,
"Formula":541,
"determinante":5078,
"capitare":2303,
"efficiente":3865,
"Uefa":3874,
"Life":3923,
"suddivisione":4174,
"suddetto":4637,
"organizzare":763,
"dic":2154,
"comodo":4065,
"vent'":4399,
"giornalistico":4330,
"insistere":4722,
"sera":975,
"Isola":1246,
"Apollo":4612,
"atterraggio":4859,
"romano":490,
"laterale":1864,
"riunire":1730,
"nozze":3763,
"rotta":3060,
"sfruttare":1452,
"<OUT>":1,
"settanta":1930,
"fumo":4013,
"adeguato":3676,
"particolare":272,
"luce":599,
"rallentare":5046,
"autostrada":3555,
"determinazione":4671,
"credito":2493,
"ammontare":4782,
"spezzare":4305,
"esaurire":4765,
"agire":1782,
"debito":2648,
"precedentemente":2617,
"straordinario":1769,
"periferico":5041,
"feudale":5095,
"drammatico":1954,
"quello":125,
"ve":3617,
"cerchio":2930,
"secondario":2064,
"Andrea":1354,
"avere":113,
"coinvolgere":1306,
"militare":425,
"maschio":1744,
"Rio":3630,
"azienda":727,
"minore":846,
"impiegare":1413,
"bensì":3225,
"Brown":4198,
"narrativa":4695,
"contraddistinguere":4660,
"attività":366,
"Stefano":1673,
"editore":1724,
"procedura":2328,
"affermare":726,
"My":3130,
"combinare":3270,
"sviluppare":522,
"annuo":4141,
"destinare":1010,
"consenso":2409,
"oriente":2117,
"farmaco":2712,
"ordinamento":3525,
"tale":315,
"esso":711,
"business":5061,
"attentato":3015,
"Romano":2183,
"esplicito":3007,
"concetto":1140,
"scappare":2422,
"teorema":3544,
"suggestivo":4748,
"Via":2351,
"passione":1845,
"fuggire":1557,
"ponte":1328,
"progettazione":3407,
"umido":4424,
"amministrativo":1554,
"stavolta":3936,
"processione":4216,
"URL":2802,
"strategico":2288,
"umano":499,
"tenere":365,
"Pisa":2542,
"bit":3450,
"privo":1627,
"eventuale":1522,
"raccontare":822,
"musical":4837,
"Borgo":4777,
"ambasciatore":4096,
"ceramica":4309,
"concentrazione":3206,
"spirituale":2630,
"dimensione":665,
"spaziale":2431,
"festa":1174,
"caro":2031,
"patrono":4862,
"tonnellata":3311,
"chimico":2467,
"militante":3797,
"combattente":4876,
"chiusura":2266,
"generico":3560,
"Rossi":2245,
"dedicare":380,
"bandiera":2057,
"votare":1029,
"fauna":4139,
"metropolitano":3760,
"fama":2259,
"Luca":1322,
"patria":2380,
"esplosivo":4486,
"telespettatore":2254,
"longobardo":4662,
"formato":1632,
"dunque":780,
"divenire":299,
"otto":1387,
"bocca":2240,
"tradizionale":1040,
"De":592,
"Young":4834,
"allevamento":3748,
"ut":4663,
"avventura":1697,
"profitto":4364,
"avvertire":3043,
"medievale":2155,
"adattare":2263,
"Los":2120,
"seppur":3973,
"classificare":2182,
"Iva":3950,
"guerra":207,
"rubare":2541,
"aggiungere":717,
"eccellente":3786,
"alternativo":3569,
"strategia":2230,
"matrice":2849,
"conseguente":2573,
"accordare":4721,
"ciclo":1494,
"classifica":841,
"due":135,
"caldo":1729,
"intenso":1907,
"cristianesimo":3211,
"aereo":546,
"audio":3501,
"dicembre":526,
"colle":3858,
"Corea":3798,
"esperimento":2166,
"eccesso":4531,
"Fin":4821,
"guadagno":4771,
"ignoto":4828,
"bianco":656,
"disputa":3178,
"Reno":4869,
"matematica":3589,
"portata":3076,
"New":696,
"laser":5020,
"mirare":5009,
"cadere":966,
"sé":1136,
"antichità":3963,
"quanto":195,
"rispettare":2104,
"facile":1400,
"frate":4544,
"dotare":1183,
"minimo":1190,
"elettrone":4205,
"attore":571,
"estrarre":2269,
"successivo":213,
"giudizio":1813,
"Puglia":3683,
"debuttare":2267,
"rappresentanza":4566,
"artista":638,
"ballo":3262,
"sostegno":2091,
"It":3857,
"tutti":3899,
"complesso":690,
"partenza":1619,
"galleria":1228,
"concorrenza":3157,
"ieri":1377,
"XI":4116,
"realizzazione":1260,
"zero":2475,
"arresto":2504,
"comune":274,
"includere":824,
"pubblicazione":1252,
"avvenimento":2800,
"degno":3149,
"strage":3608,
"odiare":4489,
"scritto":2189,
"cioè":579,
"pagare":946,
"ostacolo":3816,
"affresco":1773,
"certezza":3728,
"lettera":855,
"odierno":2321,
"croce":2387,
"sardo":3844,
"line":566,
"sorpresa":2365,
"Papa":1766,
"separazione":3483,
"One":3086,
"stessi":893,
"poetico":3687,
"clero":4896,
"Federconsorzi":4795,
"Joe":3253,
"conduzione":4202,
"dire":151,
"dinastia":2290,
"canzone":449,
"difficilmente":4658,
"Lega":2235,
"sorta":1161,
"posto":293,
"You":2055,
"bellezza":2264,
"cinque":598,
"associazione":980,
"ulteriormente":2557,
"casa":216,
"leggenda":1762,
"formale":3139,
"differenza":724,
"sposa":4597,
"Stato":987,
"Accademia":1890,
"presiedere":4275,
"recupero":3146,
"assetto":4347,
"fresco":3254,
"illustrare":3517,
"Israele":1671,
"trascinare":3889,
"geometria":4422,
"romanico":4477,
"Red":3276,
"Salvatore":3260,
"John":792,
"nè":4533,
"creativo":3039,
"panno":3746,
"pagamento":2375,
"P.":3100,
"affondare":3811,
"sale":4565,
"attivo":868,
"battuta":3067,
"insomma":2011,
"serbatoio":3981,
"marito":1307,
"telefono":2984,
"Royal":2724,
"testo":578,
"scuola":471,
"insegna":4255,
"Maria":440,
"efficacia":3841,
"maggio":460,
"fondare":494,
"simpatia":5000,
"Guerra":2256,
"colonia":2192,
"agricolo":1884,
"batteria":1990,
"finora":3441,
"stato":312,
"Radio":2623,
"Dio":851,
"iscrizione":2499,
"evoluzione":1166,
"commentare":2001,
"Chris":3552,
"concludere":876,
"l'":633,
"retrocessione":5060,
"creare":304,
"diario":3943,
"presidenza":3223,
"sorella":1233,
"<EMP>":4,
"eventualmente":4747,
"cui":134,
"riforma":1605,
"rilasciare":1425,
"nessun":1450,
"approfittare":3261,
"questione":860,
"buono":563,
"Duomo":3626,
"figlio":200,
"assistere":1569,
"St.":4929,
"Rocca":4440,
"Blue":4326,
"politica":688,
"interessato":3424,
"partecipante":2436,
"indietro":2355,
"Luna":2841,
"esplosione":3195,
"peggio":3315,
"vigore":3374,
"Di":2477,
"MTV":4719,
"ingresso":1159,
"dimettere":4033,
"legione":4368,
"navata":2239,
"occidente":5074,
"cimitero":3186,
"distinzione":3768,
"bisognare":1256,
"supplied":754,
"'s":1038,
"qui":356,
"trasferimento":2556,
"pannello":3759,
"vite":2946,
"sopportare":3893,
"scorrere":2789,
"arrestare":1553,
"gusto":2144,
"attrarre":3839,
"Piacenza":4883,
"attendere":1926,
"bambino":529,
"Vergine":2852,
"persecuzione":4965,
"periferia":4188,
"freccia":4605,
"invitare":1406,
"Roger":4421,
"interruzione":5035,
"ora":204,
"potente":1053,
"aderire":2208,
"Sacro":3856,
"abitudine":2993,
"ex":616,
"YouTube":2350,
"pacchetto":3331,
"Ventura":4939,
"supporre":3341,
"stile":560,
"acquisto":2142,
"piazza":641,
"Mark":3092,
"custodire":3614,
"ideologia":4400,
"popolare":735,
"visualizzare":4648,
"imbarcare":4493,
"bravo":2167,
"terrorista":4398,
"composizione":1411,
"ombra":2598,
"sacrificio":3474,
"Marcello":3665,
"piccolo":227,
"Aquila":4367,
"fenomeno":1036,
"deposito":3011,
"lato":682,
"it":4560,
"artiglieria":2948,
"Esercito":3561,
"elicottero":3163,
"documentare":3312,
"padrone":2395,
"magistrato":3586,
"sezione":1115,
"principato":5053,
"generazione":1417,
"alfabeto":4603,
"stagione":288,
"time":2550,
"vela":4823,
"tensione":1848,
"sito":477,
"cifra":2251,
"ordinare":1398,
"naturalmente":2080,
"caduta":1944,
"frequentemente":4505,
"direttivo":4987,
"costituzione":2205,
"abbazia":3563,
"Fondazione":3907,
"singolare":2929,
"cantiere":3173,
"O'":3414,
"ridotto":2523,
"sette":1099,
"precipitare":3904,
"Taylor":4346,
"cittadino":551,
"portoghese":4708,
"vino":1821,
"tattico":3470,
"Madonna":1319,
"atmosfera":2070,
"sollevare":3003,
"gestire":1163,
"parallelo":1968,
"virus":3531,
"subito":536,
"liberare":1363,
"origine":400,
"letteralmente":2822,
"ideare":3017,
"timore":4534,
"ottobre":519,
"armato":1013,
"anno":128,
"liberale":3001,
"comando":1003,
"Olimpiadi":3876,
"barone":4564,
"impegnare":1125,
"cammino":3792,
"contenere":524,
"Valerio":4589,
"ufficio":1394,
"esterno":179,
"magari":1746,
"grafica":4614,
"componente":1723,
"spegnere":3731,
"argument":753,
"pensare":358,
"inaspettato":4684,
"demografico":2546,
"ricostruzione":2325,
"radiofonico":3492,
"frequenza":1852,
"cemento":4357,
"Coppa":1160,
"sparire":3689,
"legare":556,
"comunista":1349,
"gradualmente":4853,
"fondatore":1638,
"annunciare":1236,
"atto":669,
"Conti":4298,
"manovra":3088,
"ampliamento":5005,
"prestigio":4383,
"fino":171,
"soddisfare":2897,
"andata":4152,
"sconfiggere":1081,
"carattere":862,
"emanare":4356,
"proporre":617,
"pericolo":1888,
"quantità":1211,
"quadro":1504,
"Emilio":4281,
"conquista":1776,
"ubicare":4955,
"to":1221,
"crescita":1595,
"Edward":4155,
"dirigente":2063,
"estrazione":4437,
"regola":1069,
"collana":4707,
"esteso":4011,
"combattere":1075,
"restante":4238,
"banca":1770,
"correggere":2977,
"Umberto":3526,
"suggerire":1952,
"Battaglia":4496,
"Bambino":4514,
"apposito":1853,
"discutere":2152,
"gravidanza":4783,
"schieramento":4997,
"sostanzialmente":3212,
"lontano":1067,
"saga":2866,
"navale":2478,
"iracheno":4405,
"Francia":528,
"riportare":673,
"fascismo":3882,
"presa":2508,
"mondiale":433,
"sin":1956,
"stabilità":3964,
"condannare":1551,
"palco":3356,
"sponda":4143,
"compagnia":809,
"sindacato":3766,
"esame":2095,
"tragico":3237,
"ligure":3142,
"cattivo":2038,
"Secchione":2794,
"segmento":3528,
"fusione":2564,
"Daniel":3410,
"nervoso":4290,
"cieco":4609,
"Bernardo":4315,
"scambiare":3164,
"cambiare":580,
"raccolta":1346,
"carico":1529,
"punto":197,
"seguito":252,
"ciascuna":3019,
"forzare":3610,
"ferroviario":1464,
"foglio":4249,
"dipendere":1508,
"così":168,
"veneto":3806,
"Università":1059,
"pietà":4922,
"George":1559,
"staccare":3208,
"sessuale":1965,
"pavimento":4409,
"infinito":2284,
"Carlo":603,
"circa":235,
"consumare":2732,
"linguistico":3132,
"tracciare":4943,
"difensivo":2983,
"stessa":376,
"tavolo":3275,
"universitario":2381,
"and":694,
"decidere":317,
"Concilio":4854,
"esporre":1275,
"sfuggire":2809,
"titolare":1937,
"piuttosto":768,
"Day":4484,
"tributo":4763,
"trasmissione":1014,
"oro":769,
"consigliere":2311,
"ottico":4731,
"opposto":2024,
"lanciare":882,
"euro":736,
"scacco":4781,
"coniare":3926,
"struttura":426,
"muovere":1070,
"serbo":3842,
"merito":1577,
"gene":4392,
"controllare":1155,
"miliardo":1702,
"millennio":4242,
"avvento":3515,
"Richard":2241,
"mestiere":4466,
"calibro":3495,
"quaranta":4685,
"nonno":3293,
"rilevante":2729,
"volume":1137,
"permettere":368,
"consegnare":1902,
"Davide":3183,
"mafioso":4150,
"introduzione":2131,
"regolare":1495,
"chiesa":262,
"Duca":3779,
"giornalismo":4576,
"finale":423,
"specifico":1016,
"eppure":3144,
"recare":1105,
"cantante":953,
"materiale":675,
"statistica":2528,
"National":2270,
"Sarah":4607,
"valid":755,
"automobile":2390,
"anticipare":3099,
"montare":1860,
"Associazione":2228,
"ergere":4926,
"inizio":337,
"uniforme":4355,
"diventare":248,
"segretario":1971,
"altrove":4967,
"dotazione":4755,
"vostro":1179,
"Filippo":1586,
"censimento":4126,
"versare":3706,
"Juan":3628,
"Rai":697,
"Militare":3583,
"esordio":1851,
"verdi":3083,
"territoriale":2767,
"Anna":1692,
"aggregare":4868,
"von":2003,
"svegliare":4720,
"suicidio":4168,
"distanza":937,
"Colonna":4235,
"violento":1912,
"musulmano":2793,
"status":3578,
"decorare":2440,
"ribellione":4524,
"esercito":629,
"islamico":3090,
"ascolto":1543,
"campionato":431,
"operativo":1386,
"organico":2746,
"Madrid":3002,
"Del":4064,
"strano":1710,
"super":3266,
"giuridico":2248,
"vendetta":2991,
"aprire":497,
"Mauro":3279,
"piatto":2140,
"richiamare":2065,
"buttare":3388,
"sole":1121,
"Auditel":3631,
"miglioramento":3116,
"educazione":2735,
"ormai":593,
"Jean":2753,
"definitivo":867,
"effetto":482,
"settimanale":3464,
"trattativa":4049,
"rivelazione":5069,
"addetto":4047,
"bisogno":1149,
"frutto":1581,
"caso":199,
"giornale":1197,
"società":379,
"Georgia":5090,
"impressione":3686,
"qualificare":2905,
"club":730,
"Onu":4304,
"Champions":3990,
"autorizzazione":4213,
"crescente":2889,
"An":4537,
"alluminio":4264,
"not":691,
"amministratore":3334,
"li":269,
"inquinamento":4259,
"Irlanda":2761,
"talvolta":1809,
"Napoli":810,
"imperiale":1772,
"argentino":3850,
"medicina":2451,
"imparare":1995,
"giurisdizione":4706,
"mio":319,
"canadese":3499,
"polo":3659,
"Hotel":4019,
"donna":346,
"basilica":3416,
"stretto":1041,
"News":4746,
"accendere":3385,
"giallo":1870,
"prevalere":3446,
"preferenza":4971,
"redigere":3824,
"Eric":3859,
"decimo":4081,
"verità":1313,
"massimo":511,
"strumento":766,
"terremoto":2665,
"forse":439,
"fallimento":2684,
"per":109,
"standard":1428,
"Clemente":4005,
"agente":1472,
"semplificare":4802,
"faro":4924,
"creatore":4604,
"soprattutto":320,
"condizionare":4389,
"calo":4393,
"successore":2473,
"colono":4764,
"Gregorio":3817,
"episodio":517,
"ribelle":3277,
"potenziale":2433,
"pro":4727,
"correlare":296,
"privilegio":3726,
"immaginare":2420,
"Australia":2106,
"socio":2913,
"orizzonte":4594,
"regista":1143,
"secolo":198,
"maschera":3539,
"Team":3353,
"testa":699,
"responsabilità":1618,
"ordine":452,
"diverso":181,
"regno":870,
"variazione":2370,
"spirito":1524,
"Art":4015,
"collezionare":4963,
"futuro":666,
"Hollywood":4181,
"né":850,
"facilitare":4211,
"chiunque":2656,
"civico":4773,
"documentario":2971,
"estinzione":4690,
"forte":420,
"se":139,
"puntare":1818,
"battaglione":4975,
"cristallo":4551,
"stanno":2280,
"monaco":2571,
"Serbia":4918,
"verso":221,
"Russia":1517,
"virtù":2906,
"contemporaneo":899,
"percepire":3320,
"addirittura":1172,
"estremo":871,
"raffigurare":1654,
"preparazione":2604,
"congregazione":4125,
"rifiuto":1886,
"Camera":2180,
"nessuno":891,
"MySQL":2462,
"Google":4836,
"rapido":1242,
"Francesco":715,
"mitologia":3602,
"William":2033,
"evidenziare":2229,
"classico":761,
"basare":650,
"accedere":2294,
"Vecchio":4267,
"utilizzo":1209,
"giocare":554,
"compito":1247,
"stanza":1711,
"implicare":3512,
"Ottocento":3097,
"torneo":1384,
"legittimo":3256,
"Enrico":1234,
"sintesi":3228,
"inaugurare":2124,
"Society":4506,
"Walter":3243,
"pianura":3063,
"minacciare":2741,
"ampliare":2811,
"inquadrare":4687,
"persino":1953,
"Reggio":3045,
"dell":852,
"Nascondi":4665,
"sistematico":3294,
"fratello":503,
"bizantino":2313,
"sorgente":2651,
"strumentale":4256,
"pesare":3849,
"quindi":193,
"morto":1540,
"dramma":3521,
"trasmettere":981,
"passare":256,
"protesta":2405,
"parte":138,
"Nick":4647,
"Cagliari":3668,
"ritirata":4775,
"nickname":3072,
"intuire":4819,
"enorme":1344,
"Scuola":2626,
"patto":2824,
"autobus":4135,
"anzi":1502,
"volare":1676,
"elevare":4112,
"punk":3780,
"democratico":1520,
"Ordine":1908,
"target":4027,
"novembre":581,
"Giochi":2966,
"segreto":1072,
"deciso":1382,
"nessuna":1617,
"quartiere":1205,
"provvedimento":2969,
"replicare":4770,
"Toscana":2560,
"biglietto":3370,
"succedere":1633,
"azione":525,
"Reale":4200,
"sfondo":2881,
"scrivere":249,
"integrazione":3604,
"elencare":3955,
"benzina":3442,
"bacino":2734,
"promozione":1637,
"bomba":2201,
"convenzionale":4187,
"sepolto":2690,
"Open":4283,
"Teatro":1867,
"servizio":348,
"World":1829,
"filosofico":2891,
"Media":3658,
"ironico":4619,
"roccioso":4968,
"capitale":894,
"intenzione":1887,
"sesto":2160,
"continuare":343,
"uguale":2008,
"gigantesco":4728,
"specialmente":1903,
"Museo":1765,
"vergogna":2858,
"sicurezza":936,
"certo":280,
"lettura":1861,
"biografico":4861,
"Sir":4957,
"saltare":2693,
"cambio":1068,
"Lord":2662,
"condividere":1705,
"chiuso":1970,
"campana":4827,
"spiaggia":2645,
"taglio":2242,
"nodo":2594,
"temporaneo":3000,
"Alex":3803,
"operaio":2015,
"cento":1927,
"bicicletta":5072,
"accurato":3994,
"aziendale":4789,
"Marino":3197,
"regia":1606,
"abolire":4365,
"spettatore":1941,
"seme":2931,
"apparecchio":3986,
"eccessivo":2471,
"Taranto":4498,
"moda":2400,
"alleare":2912,
"London":4310,
"decisivo":2864,
"II":418,
"leggendario":3934,
"BMW":3520,
"proseguire":1206,
"Renato":3809,
"prospettiva":2669,
"niente":1350,
"archeologico":2025,
"gol":1663,
"spostare":1219,
"statunitense":859,
"dopoguerra":2569,
"significare":940,
"scena":591,
"salvare":757,
"rango":3462,
"abituare":4358,
"approvare":1830,
"cattedra":4376,
"barocco":3194,
"moneta":1648,
"tutelare":4545,
"equilibrio":2324,
"dovuto":4762,
"terreno":974,
"mistero":2904,
"risultato":435,
"strada":387,
"d'":137,
"stupire":4215,
"Bruce":4572,
"lombardo":3166,
"biologico":2487,
"ostilità":4935,
"lavoratore":1614,
"crescere":921,
"richiamo":4271,
"era":2559,
"accogliere":1532,
"scheda":2848,
"collaboratore":2880,
"sostituire":710,
"istituzione":1373,
"Gianni":3041,
"preparare":1340,
"aggiunta":2277,
"comandare":2333,
"ennesimo":3026,
"dieci":1210,
"variabile":1718,
"Comuni":1803,
"eccezione":1751,
"ferita":4778,
"sonorità":4925,
"tassa":3162,
"sperare":1358,
"incorporare":4037,
"filo":2441,
"ispirato":2447,
"spostamento":3091,
"carbone":4736,
"conferire":2330,
"sottolineare":1789,
"definizione":1503,
"Signore":1788,
"ritornare":1045,
"divinità":2706,
"Night":4649,
"cogliere":2417,
"privare":4699,
"Vietnam":4069,
"metro":576,
"integrale":3458,
"donare":2209,
"beh":4900,
"statale":2086,
"trenta":2785,
"migliore":523,
"posteriore":1523,
"premiare":3264,
"analogo":2017,
"diminuire":2507,
"Albert":4757,
"Uniti":585,
"peraltro":2621,
"redazione":3209,
"impostare":4562,
"indagine":1703,
"ambientare":3996,
"totalizzare":4812,
"maturità":4940,
"originare":4713,
"mandare":934,
"riscontro":4441,
"tutela":2837,
"Championship":4923,
"nazionale":265,
"medioevale":4423,
"Fratello":1721,
"venti":2438,
"distare":3607,
"avvocato":2077,
"accumulare":3946,
"Cappella":4553,
"dove":161,
"penisola":3112,
"angolo":1573,
"pallone":4549,
"abbracciare":4457,
"rimozione":3749,
"reciproco":4122,
"commerciale":938,
"paesaggio":2801,
"tranne":2773,
"banco":4744,
"or":3979,
"salute":1672,
"Carlos":3896,
"star":4183,
"benché":2770,
"continuo":1088,
"criminale":2570,
"Mac":4099,
"abbandono":3081,
"reparto":2600,
"identificazione":5031,
"Afghanistan":4978,
"edilizio":3597,
"Verona":2455,
"ambiente":758,
"rifiutare":1201,
"rifornimento":4528,
"combinazione":3267,
"bilancio":2531,
"produrre":364,
"utile":1345,
"corrispondente":2218,
"insegnamento":2257,
"copertura":2663,
"australiano":3422,
"defunto":4217,
"Time":4245,
"Divisione":3296,
"salita":3765,
"anello":2146,
"teologia":4186,
"spalla":1626,
"pianoforte":3263,
"individuo":1628,
"prestazione":1480,
"là":1816,
"pace":1156,
"mentale":2921,
"fedeli":2923,
"band":698,
"Christian":4177,
"valutazione":2553,
"Totale":4785,
"tendere":1445,
"attrazione":4318,
"superiore":623,
"asse":2619,
"Giulio":2319,
"tenuta":4404,
"organizzato":4268,
"radiale":4788,
"Zelanda":4910,
"presso":351,
"classe":622,
"assai":1894,
"previsione":3772,
"Boston":4840,
"stazione":774,
"performance":3674,
"lottare":3363,
"selezionare":3576,
"turismo":2748,
"manifestare":1868,
"determinare":1133,
"destra":1119,
"Comune":1582,
"zampa":3903,
"collina":2197,
"pareggio":3382,
"essere":103,
"impresa":1195,
"fantascienza":3854,
"atleta":3205,
"imporre":1113,
"forum":4127,
"ascesa":3931,
"Super":2936,
"ingaggiare":3408,
"cugino":2595,
"risultare":604,
"esponente":4386,
"violazione":4048,
"zucchero":4375,
"mancato":3568,
"estero":1311,
"incidere":1815,
"chiamata":1938,
"divisione":1315,
"mysql_connect":2525,
"intimo":4286,
"commissione":1130,
"riprendere":695,
"indubbio":4931,
"coraggio":2958,
"Lost":4942,
"Bari":3141,
"stabile":2459,
"cessare":2990,
"tortura":4807,
"informare":2111,
"berlina":5048,
"inserimento":5064,
"armatura":4444,
"terribile":3023,
"successione":2418,
"ebraico":1920,
"nascere":284,
"statuto":3898,
"greca":1771,
"orientamento":3872,
"differente":1189,
"restauro":2383,
"monarchia":5093,
"Isabella":4670,
"documento":1018,
"festival":2272,
"normalmente":2318,
"dote":3318,
"rete":401,
"convertire":2510,
"ulteriore":1180,
"gamba":2708,
"scomparsa":3455,
"falso":1741,
"partita":658,
"orologio":4391,
"condotta":4973,
"fondazione":1843,
"ritrovare":872,
"maturo":4567,
"Santo":1878,
"Inghilterra":1154,
"studioso":1807,
"livello":375,
"dimostrare":608,
"sottostante":4704,
"olimpico":2427,
"seconda":4547,
"buon":1139,
"brutto":2666,
"robusto":4113,
"trasformazione":1914,
"rientro":4302,
"interrogare":4473,
"sofferenza":3284,
"liquido":2495,
"Euro":4282,
"riposo":4794,
"maglia":2074,
"Texas":3785,
"settembre":386,
"argento":1916,
"diritto":371,
"foto":1426,
"culturale":911,
"possedimento":4689,
"primitivo":3235,
"geografia":2040,
"Sudafrica":4468,
"unità":702,
"metallo":2647,
"fotografo":4904,
"contribuire":1266,
"Maggiore":2871,
"Girolamo":5066,
"intensità":3942,
"tubo":3389,
"giovane":329,
"reale":788,
"declinare":3660,
"giudice":1604,
"Turchia":3778,
"zona":340,
"tutta":3471,
"grave":1034,
"fuori":521,
"radar":2908,
"anche":120,
"attuare":2995,
"suolo":2007,
"mantenere":642,
"Ivan":4240,
"interfaccia":4832,
"pelo":3940,
"finché":2315,
"quotidiano":1173,
"traguardo":5073,
"Prussia":5088,
"collezione":2042,
"istanza":4992,
"Internet":2170,
"verticale":3140,
"riserva":2048,
"legame":1774,
"esistenza":1327,
"canale":651,
"avvenire":402,
"pronto":1269,
"divertente":3415,
"felicità":4462,
"risparmio":4735,
"adottare":1141,
"apr":1712,
"riprodurre":2997,
"Ministro":3784,
"cilindro":2306,
"Bologna":1410,
"Palermo":1842,
"antico":331,
"indice":3226,
"stiamo":3820,
"dottore":3347,
"spettro":5025,
"separare":1755,
"King":2768,
"restituire":2903,
"pellicola":2020,
"tasso":2612,
"porzione":3259,
"spazio":561,
"nord":718,
"vivente":3101,
"termine":314,
"carriera":455,
"margine":2813,
"DNA":4490,
"rosa":2583,
"sabato":1611,
"rivedere":3358,
"Nord":1182,
"veneziano":3481,
"installazione":3875,
"franco":3680,
"registrare":543,
"attuale":512,
"James":1634,
"sovrano":1650,
"rischiare":2109,
"commercio":1613,
"alleanza":2246,
"regnare":4611,
"descrizione":1366,
"proprietà":819,
"manoscritto":3510,
"Sant'":1207,
"Bruno":2772,
"tiro":2185,
"realmente":3295,
"Miglior":3384,
"africano":2657,
"divisa":1825,
"unito":1935,
"Canada":2128,
"recente":896,
"vietare":2986,
"Luigi":1047,
"delegato":4580,
"raro":1295,
"diretta":2151,
"studente":1265,
"sbagliato":4691,
"collettivo":2386,
"legno":1541,
"consigliare":2592,
"propri":3649,
"regolamento":2797,
"du":3380,
"saggio":2005,
"Le":3954,
"meno":310,
"Golfo":4413,
"pressi":1462,
"partner":4105,
"lista":1224,
"live":2838,
"proteina":3675,
"MySQL-Link":985,
"Johnny":4508,
"Steve":2988,
"aspettare":1241,
"fanciullo":4982,
"tirare":2101,
"insediamento":1929,
"Biblioteca":4288,
"Blogo.it":3147,
"esplorazione":5044,
"art.":2472,
"boreale":4598,
"Frank":3172,
"carica":933,
"acciaio":2868,
"cosiddetto":1094,
"umanità":2222,
"magnitudine":2309,
"chiarire":3487,
"destro":1947,
"fede":1679,
"battezzare":4156,
"Riccardo":2730,
"neanche":2123,
"braccia":3997,
"rischio":1218,
"parere":877,
"sospendere":2588,
"esecuzione":1885,
"prigione":2951,
"appoggiare":2258,
"assessore":4191,
"vedere":156,
"eliminazione":3536,
"cuore":1074,
"affare":1983,
"D'":3094,
"ampio":800,
"tastiera":4017,
"primavera":2295,
"aggiornare":3851,
"fucile":3974,
"seguace":4384,
"pianta":973,
"ke":3799,
"impiegato":3505,
"Elisabetta":3678,
"est":1006,
"soccorso":3547,
"Type":4959,
"impero":1889,
"console":2882,
"animazione":3199,
"Golden":4349,
"leggermente":2357,
"Medioevo":3550,
"abitato":3202,
"femminile":1150,
"pirata":4231,
"intelligente":3486,
"ottica":3939,
"anime":1898,
"elegante":3484,
"torrente":2989,
"aumento":1636,
"coloro":1560,
"metal":2491,
"principio":1526,
"collega":1909,
"Trieste":3330,
"esercitare":1899,
"Is":2834,
"intento":2307,
"ognuna":4642,
"economia":1015,
"presunto":2713,
"consacrare":3485,
"virtuale":4171,
"duca":1777,
"pittura":2238,
"numerico":4153,
"contattare":2544,
"lingua":451,
"depositare":4697,
"esemplare":1268,
"grado":414,
"occupazione":1945,
"suo":115,
"selezione":2820,
"psicologia":4729,
"professor":4705,
"ispirare":1946,
"pubblico":260,
"cibo":2051,
"abitante":640,
"molto":140,
"pertanto":1448,
"Lucas":3598,
"Egitto":2049,
"sottomarino":4436,
"Mantova":4463,
"habitat":3718,
"partigiano":3137,
"gotico":3632,
"protagonisti":1850,
"soldato":1196,
"competenza":2667,
"sospettare":4948,
"marmo":2719,
"subentrare":4749,
"Grazie":2252,
"conversione":3877,
"adesso":1600,
"Imperatore":3325,
"pugno":4146,
"blog":1695,
"svedese":2831,
"neve":3430,
"sigla":2261,
"studiare":826,
"assurdo":4092,
"El":2894,
"considerare":287,
"buio":3612,
"affacciare":3170,
"tecnico":701,
"Assemblea":3932,
"impianto":1096,
"fa":509,
"marcia":2354,
"bottega":4682,
"limitato":3554,
"assieme":889,
"proiezione":4325,
"accessibile":4754,
"invasione":1994,
"email":3400,
"Big":3636,
"pista":1750,
"allestire":4004,
"nulla":849,
"chiarimento":4949,
"n.":1194,
"Spagna":1025,
"reperto":3755,
"vincitore":1261,
"Honda":4098,
"scarpa":4450,
"monumentale":3971,
"adibire":4208,
"semifinale":3265,
"Edizioni":3496,
"contesto":1623,
"cilindrata":4206,
"Regina":3735,
"stadio":1454,
"Parigi":968,
"alcun":1525,
"mediterraneo":1962,
"facente":4382,
"palestinese":2847,
"medioevo":4848,
"principe":778,
"scientifico":1048,
"legislativo":3556,
"mille":2602,
"continente":2659,
"costituito":4103,
"Show":2391,
"Dan":4730,
"incontrare":742,
"riassumere":2689,
"VIII":3427,
"discografico":2093,
"soffrire":2353,
"Cenni":3324,
"motivo":655,
"solido":2555,
"merce":2652,
"produttore":1282,
"giocatore":722,
"parcheggio":5083,
"ugualmente":4786,
"lavoro":303,
"convento":2163,
"americano":481,
"cima":2511,
"Attività":4569,
"maggioranza":1467,
"scrittura":2046,
"contea":2088,
"oramai":4080,
"bastare":1090,
"Barbara":3434,
"fingere":4559,
"Uomo":4509,
"cd":4718,
"tavola":2255,
"italiana":1987,
"fatto":298,
"ritrarre":4241,
"aviazione":4313,
"gen":1733,
"voto":1806,
"dimenticare":1649,
"passeggero":2572,
"interessante":1336,
"valore":443,
"correre":1421,
"appartenere":756,
"gridare":4071,
"rendere":305,
"Jack":2480,
"Onorificenze":4341,
"ricorrere":2674,
"volta":159,
"da":107,
"pressoché":4044,
"anonimo":2494,
"abitazione":2368,
"illuminare":3930,
"speciale":874,
"raggruppare":4657,
"rinvenire":2540,
"traduzione":2172,
"costare":2234,
"cattolico":901,
"illegale":4535,
"famiglia":226,
"Lago":3056,
"rigoroso":4145,
"industria":1416,
"armare":3355,
"tanto":251,
"operatore":2069,
"sfruttamento":3985,
"paio":2334,
"Augusto":3336,
"rompere":2360,
"tentativo":971,
"spiccare":3332,
"instaurare":4767,
"Mondiale":2141,
"limite":1044,
"confinare":4465,
"città":169,
"compenso":5029,
"'t":3679,
"firmare":1330,
"installare":2575,
"invernale":2917,
"cassa":3443,
"improvviso":2203,
"Treviso":4495,
"automatico":1568,
"Montalbano":4577,
"eccellenza":4934,
"violenza":1609,
"riduzione":2332,
"evoluto":4608,
"Charlie":4556,
"spiegazione":3327,
"applicazione":1283,
"fluido":4332,
"abito":4435,
"spettacolo":1132,
"comprensione":4745,
"profilo":2298,
"Città":1612,
"mai":261,
"ideologico":4950,
"chitarrista":2606,
"Messina":2874,
"assenza":1874,
"ricerca":458,
"Smith":3227,
"finalmente":1370,
"Legge":3951,
"comma":4169,
"Music":2634,
"hotel":4101,
"prevedere":484,
"Congresso":3075,
"frequente":2153,
"testimonianza":1763,
"adeguare":3757,
"segnalazione":4901,
"sportivo":1035,
"mobile":2156,
"male":1060,
"tono":2633,
"pratica":1046,
"XIII":2186,
"eccezionale":3387,
"riconoscimento":1743,
"trasportare":1959,
"affermazione":2586,
"relazione":681,
"invenzione":3702,
"Edoardo":3878,
"vaso":4082,
"atomo":4054,
"esposizione":2345,
"turno":1630,
"letto":2188,
"fondamentale":1039,
"Oscar":2715,
"necessità":1390,
"vitale":4083,
"Monza":4738,
"brasiliano":3129,
"dolore":2099,
"S.":1652,
"Gerusalemme":2786,
"anteprima":4962,
"detta":4879,
"moderno":686,
"annuncio":4272,
"contestare":3838,
"fantasia":3662,
"vendere":838,
"linguaggio":1397,
"veloce":1631,
"Natale":2685,
"crimine":3182,
"occorrere":1967,
"elettorale":2002,
"recentemente":1975,
"lago":1456,
"consistere":1438,
"mossa":3869,
"Linux":4790,
"Armata":3695,
"orientare":3188,
"ricorrente":4908,
"arte":492,
"pulito":4844,
"esplodere":3605,
"opposizione":1955,
"Belgio":3158,
"Castle":4945,
"agricoltura":2308,
"schiavo":2899,
"giardino":1977,
"statua":1440,
"gola":5058,
"Elena":3348,
"suddividere":1680,
"iscrivere":2607,
"Pietro":863,
"Bassi":4020,
"dipingere":2396,
"Museum":4497,
"integrare":2421,
"telefilm":2861,
"palazzo":831,
"innamorare":2121,
"dose":4201,
"preghiera":3516,
"minerale":3444,
"sincero":4114,
"lusso":4737,
"restaurare":3106,
"siciliano":2769,
"letterario":1479,
"intervenire":1688,
"massiccio":2582,
"acuto":3821,
"transizione":4452,
"verbo":3720,
"replica":2945,
"me":367,
"sviluppo":500,
"California":2347,
"teatrale":1653,
"Calcio":2833,
"attento":2631,
"girone":2139,
"morte":321,
"chitarra":1928,
"pubblicitario":2443,
"sottile":2752,
"etico":5047,
"Croce":2412,
"governo":374,
"studi":4678,
"respingere":2846,
"Bolzano":4814,
"psicologico":2999,
"autorizzare":3189,
"matrimonio":1091,
"Tito":4916,
"valutare":2271,
"esaminare":3988,
"Modena":3600,
"confusione":4278,
"traffico":1734,
"grandezza":3280,
"sede":491,
"svolgimento":4847,
"bene":390,
"circostanza":2960,
"aumentare":888,
"vasto":1469,
"incaricare":3085,
"basso":485,
"Maestro":4378,
"aspetto":671,
"is":678,
"rapire":3558,
"Maurizio":2720,
"bellico":3104,
"azzurro":2547,
"perdita":1396,
"scoprire":475,
"provenienti":1645,
"pretendere":3629,
"malato":2506,
"entrambe":1620,
"Cattedrale":4618,
"attestare":2627,
"asiatico":3833,
"mente":1640,
"popolazione":459,
"dollaro":1368,
"qual":4914,
"specializzare":3405,
"messa":2148,
"scritta":3328,
"British":4643,
"riferire":979,
"Scienze":4338,
"moderazione":3351,
"fantastico":3250,
"apparentemente":3151,
"ritratto":2643,
"France":4296,
"crollare":4542,
"contempo":4055,
"ecco":1385,
"tardi":1253,
"videogioco":2278,
"pesante":1376,
"dipinto":1538,
"intervento":878,
"uomo":225,
"negozio":2206,
"tramite":725,
"Famosi":1966,
"frontale":4353,
"confermare":1057,
"frequentare":1583,
"Stazione":4791,
"annuale":3184,
"re":436,
"misurare":2265,
"sindaco":1784,
"accordo":833,
"amministrare":4850,
"cliente":2133,
"paura":1709,
"questi":2650,
"missile":1819,
"rappresentare":378,
"sino":1509,
"opporre":2058,
"Awards":3506,
"piangere":3609,
"buco":4363,
"fungere":4322,
"più":119,
"artificiale":2714,
"suscitare":2843,
"sessanta":2237,
"espressione":1342,
"litro":2503,
"mano":427,
"Love":2727,
"Bianco":4472,
"fermo":2577,
"individuare":1694,
"IX":2766,
"Governo":4617,
"Album":3467,
"media":997,
"sesso":2168,
"nonché":1298,
"annullare":3543,
"marchese":4214,
"ti":464,
"permesso":3813,
"complessità":4280,
"proporzione":4774,
"ripetere":1799,
"amministrazione":1369,
"edizione":584,
"sperimentale":3138,
"comunicato":3734,
"formula":1365,
"Carta":5091,
"University":2527,
"mattina":2223,
"concezione":2954,
"Portogallo":3082,
"tappa":2126,
"nell":2883,
"avviso":4725,
"Stone":5056,
"Wilson":4905,
"ristorante":3061,
"denaro":1980,
"avanti":1188,
"fase":596,
"facoltà":2962,
"grosso":1212,
"login":3625,
"orientale":1286,
"Palestina":4636,
"antenato":4328,
"fila":3417,
"esilio":3282,
"scaricare":3633,
"avvicinare":1558,
"gratuito":2449,
"compiere":984,
"Chiesa":1061,
"TV":830,
"manager":3591,
"volere":183,
"anniversario":4907,
"attacco":692,
"tuttora":2059,
"lotta":928,
"abbattere":2605,
"nota":1005,
"es.":3681,
"espandere":4043,
"democrazia":2548,
"qualcosa":947,
"punire":3947,
"danneggiare":2474,
"pseudonimo":4717,
"quattro":359,
"Gesù":1549,
"ideale":1859,
"Raffaele":4813,
"Popolare":4262,
"design":3193,
"elaborare":2615,
"corona":2310,
"marzo":535,
"perché":203,
"condurre":606,
"calendario":3425,
"discesa":3867,
"individuale":2545,
"scrittore":1128,
"visita":1566,
"invadere":2863,
"Mondiali":2791,
"indicazione":2764,
"evento":614,
"intraprendere":2052,
"coso":3738,
"temere":2392,
"esse":1490,
"sedere":2221,
"determinato":2413,
"serale":3774,
"Codice":3595,
"San":224,
"gettare":2276,
"ottanta":1950,
"piede":999,
"spettrale":4541,
"organismo":2113,
"giunta":4236,
"razionale":4683,
"uccisione":4655,
"sapore":4397,
"escludere":1752,
"furto":4688,
"stima":3069,
"Ferrovie":4999,
"sensazione":3196,
"incisione":3916,
"controversia":4369,
"candidato":1835,
"dente":3161,
"riconoscere":689,
"accessorio":4513,
"conclusione":2165,
"editoriale":3272,
"altezza":1375,
"negativo":1622,
"specchio":3420,
"vita":187,
"ferire":2460,
"diesel":4425,
"commedia":1781,
"Ray":4928,
"castello":932,
"bagno":3395,
"Henry":3229,
"fattoria":4520,
"Iraq":2481,
"braccio":2384,
"almeno":612,
"metà":549,
"ufficialmente":1739,
"agrario":4739,
"Finlandia":5012,
"Vicenza":4204,
"familiare":1877,
"Simone":3587,
"manifestazione":1110,
"duo":4631,
"TvBlog":3338,
"evidente":1515,
"navigazione":3873,
"avviare":1793,
"carcere":1942,
"novecento":2952,
"Giulia":2914,
"Giacomo":1933,
"accontentare":5079,
"tomba":2232,
"triste":3982,
"tuttavia":373,
"denunciare":2488,
"isolato":5043,
"grafico":2169,
"mancanza":1642,
"autunno":2728,
"Leonardo":3118,
"riguardo":1439,
"ottenne":4833,
"intesa":3918,
"costellazione":3285,
"purtroppo":1901,
"accorgersi":2359,
"allievo":2174,
"aggiornamento":3773,
"vincolo":4715,
"comparsa":4057,
"Firenze":1002,
"lunedì":2681,
"misura":1051,
"dovere":148,
"Milano":470,
"finanza":3663,
"Renault":4521,
"universo":2022,
"sopravvivere":1747,
"visibile":1500,
"modulo":3098,
"ingegnere":3057,
"declino":3732,
"testata":2107,
"lettore":1388,
"curva":2081,
"magico":2599,
"turistico":2289,
"mura":1872,
"risoluzione":2974,
"kg":2202,
"uscire":421,
"spettacolare":4446,
"ringraziare":4120,
"VI":1880,
"Alfonso":2876,
"informatica":4036,
"pioggia":2839,
"chiedere":377,
"abbigliamento":4621,
"cattedrale":2479,
"petrolio":3360,
"minuto":804,
"difetto":3438,
"provocare":1151,
"Como":3949,
"Magno":4851,
"Felice":4522,
"XVI":1779,
"luglio":506,
"coloniale":4582,
"novanta":2450,
"circondare":1691,
"dodici":2641,
"caccia":1225,
"spedire":4499,
"animale":746,
"scarico":4478,
"mezzo":392,
"Gran":942,
"finalità":4919,
"personale":729,
"uno":149,
"rivestire":3036,
"School":3269,
"approccio":2673,
"mostro":2723,
"circolare":2135,
"Lee":2940,
"deputato":1993,
"costituire":397,
"materno":5096,
"estinto":4333,
"stesura":4938,
"sconfitta":1478,
"decretare":4588,
"rivendicare":4266,
"Fox":3428,
"comportare":1437,
"elettronico":1707,
"Hitler":3537,
"consumo":1913,
"efficienza":4104,
"mitico":4289,
"carburante":4042,
"ruota":2696,
"finanziare":3087,
"globale":3148,
"proiettare":4480,
"ultimo":165,
"pronunciare":2410,
"canonico":3814,
"censurare":3727,
"museo":1457,
"mentre":163,
"Jacques":4895,
"originale":662,
"ottomano":4644,
"interprete":2975,
"pericoloso":1804,
"pietra":1076,
"consapevole":4111,
"Pro":4323,
"largo":1127,
"Conte":3429,
"sormontare":4977,
"affatto":3349,
"nov":1621,
"totale":770,
"Zero":5026,
"ritmo":2516,
"avversario":1395,
"datare":3012,
"seminario":4606,
"spot":4182,
"sforzo":3685,
"romani":2961,
"DVD":2942,
"Don":1922,
"esponenti":3018,
"puntata":808,
"Star":3281,
"trofeo":4574,
"seguire":286,
"Alpi":2819,
"religioso":745,
"lira":3114,
"bassista":4530,
"urbano":1731,
"discreto":2818,
"vi":210,
"mercante":4623,
"esperto":1783,
"giugno":502,
"lei":441,
"vario":275,
"oltre":212,
"invece":234,
"fotografico":2618,
"civiltà":2585,
"fretta":5077,
"Robert":1683,
"identità":1792,
"mostrare":488,
"proprietario":1567,
"candidatura":4816,
"nove":2196,
"puro":1758,
"decreto":1924,
"Francisco":3121,
"poeta":1706,
"guadagnare":1666,
"riguardare":588,
"settimana":646,
"le":349,
"Emma":4488,
"melodia":4897,
"robot":4584,
"pervenire":4951,
"rimpiazzare":3562,
"prestito":2760,
"alto":264,
"show":1499,
"Padova":2439,
"gamma":2775,
"teorico":2558,
"giovedì":2642,
"porto":1434,
"ballare":3303,
"video":430,
"dieta":4429,
"esigenza":1998,
"iscritto":2939,
"muro":1714,
"tragedia":3288,
"sconosciuto":3373,
"patrimonio":2145,
"ascoltare":1662,
"proposta":1408,
"cellula":1651,
"gara":615,
"parlare":283,
"folla":3802,
"oggi":229,
"Tale":2910,
"Vincenzo":2671,
"cognome":3585,
"entrambi":905,
"creazione":1402,
"risiedere":3014,
"ricevere":476,
"dispiacere":4751,
"mal":5013,
"dimora":4628,
"mettere":245,
"privato":708,
"connettere":3124,
"docente":3343,
"larghezza":4401,
"calare":4265,
"partecipazione":1351,
"Pacifico":4407,
"quarto":784,
"chi":267,
"valle":1231,
"personaggio":313,
"problema":344,
"canto":1501,
"assassinio":4525,
"innocente":5045,
"ghiaccio":2844,
"riga":3232,
"formazione":597,
"distruggere":898,
"fin":1122,
"noi":569,
"mila":2675,
"amante":2304,
"protagonista":791,
"controverso":4134,
"provare":929,
"bello":463,
"telecamera":4254,
"base":311,
"Mar":2823,
"rappresentazione":1814,
"accelerazione":4886,
"Lorenzo":1838,
"ovvio":1012,
"notizia":787,
"apparato":2682,
"racconto":1237,
"subire":904,
"vicinanza":2601,
"canna":3871,
"appuntamento":2445,
"prestigioso":2717,
"Santi":3603,
"specie":446,
"ribadire":3750,
"configurazione":3289,
"finanziamento":3337,
"gli":209,
"amico":407,
"provincia":339,
"Joseph":3210,
"misto":2902,
"inglobare":5036,
"sopravvivenza":4512,
"rifare":3693,
"carrozzeria":4291,
"America":857,
"Corpo":4300,
"consiglio":660,
"colpo":931,
"dubbio":1443,
"indirizzo":2661,
"Center":4595,
"conto":887,
"fiamma":3762,
"percezione":4936,
"isolare":3344,
"terrorismo":3983,
"rock":1542,
"imperatore":962,
"nastro":3823,
"generare":1461,
"barriera":4077,
"impedire":1171,
"mediante":2050,
"Messico":2312,
"triangolo":4878,
"movimento":513,
"confronto":732,
"duello":4667,
"vittima":1223,
"scioglimento":4443,
"moglie":700,
"frase":1828,
"precedere":2408,
"<POS>":5,
"Cup":3117,
"Scozia":3527,
"numeroso":370,
"piacere":957,
"Aragona":4016,
"esperienza":728,
"fossile":3690,
"Filippi":4261,
"emergere":1844,
"cornice":4062,
"serata":1284,
"approfondire":4087,
"religione":1347,
"Catania":2584,
"giro":723,
"calcistico":3047,
"ufficiale":544,
"bruciare":2854,
"intervallo":3565,
"elevato":1022,
"ripartire":3479,
"Louis":3524,
"nemico":900,
"naturale":775,
"chiave":1510,
"progresso":3534,
"congresso":3998,
"ecc":2061,
"articolo":709,
"suono":1597,
"divertimento":4875,
"colonna":1087,
"Costituzione":2856,
"valvola":4085,
"come":118,
"riferimento":773,
"suonare":1065,
"clima":1603,
"sacrificare":4693,
"aiuto":952,
"operare":1353,
"preferire":1339,
"spontaneo":3230,
"obbligatorio":4303,
"costruttore":3077,
"Trento":3661,
"superare":803,
"esprimere":854,
"il":101,
"irregolare":4980,
"scudetto":4072,
"borghesia":4874,
"scienziato":2366,
"rovina":3333,
"giapponese":1131,
"nudo":2432,
"sembrare":355,
"cancro":4476,
"nostro":306,
"interpretare":748,
"spagnolo":983,
"ramo":3152,
"archivio":3143,
"acquistare":1019,
"morale":1742,
"Ford":3432,
"PC":4539,
"ferro":1982,
"allenamento":5027,
"sensibile":3421,
"normale":1255,
"cattura":4483,
"dottor":3491,
"personalità":853,
"quando":153,
"concerto":1114,
"conferenza":2603,
"momento":322,
"stimolare":5010,
"semplice":643,
"consorzio":4632,
"promettere":2526,
"utente":1117,
"nascondere":1267,
"viaggio":843,
"risolvere":1352,
"versione":324,
"sangue":1392,
"articolare":4481,
"ente":1963,
"Miss":2672,
"cm":1545,
"apice":4592,
"effettivo":1485,
"Asia":2342,
"intermedio":3835,
"federale":2664,
"concepire":2812,
"panorama":3053,
"stampa":1245,
"faccia":1601,
"tracciato":3650,
"tesi":2147,
"attualmente":648,
"tradimento":4079,
"servire":668,
"pò":619,
"abitare":1095,
"Tour":2779,
"culto":1678,
"perciò":1659,
"the":186,
"cavo":3468,
"Juventus":3581,
"animo":3937,
"dietro":996,
"pozzo":4622,
"maggiore":363,
"perfino":3620,
"ritiro":2578,
"soggetto":840,
"notare":1031,
"indirizzare":3489,
"precisione":3032,
"Bush":3361,
"trasformare":793,
"ricercare":3787,
"confessare":3818,
"terrestre":1997,
"genitore":1360,
"dignità":3795,
"obiettivo":991,
"new":4817,
"cavaliere":1940,
"trattamento":1802,
"contributo":1786,
"Bibliografia":415,
"immediato":998,
"detenuto":4507,
"alternativa":1812,
"Guido":3191,
"tempio":1745,
"presidenziale":4890,
"perdere":413,
"sci":3921,
"impossibile":2094,
"repubblica":2749,
"GP":3860,
"architettura":1244,
"curare":1531,
"prodotto":545,
"Brescia":3065,
"acqua":394,
"CV":2859,
"confluire":4308,
"Palmarès":3418,
"incrociare":5003,
"incursione":4518,
"piu":4412,
"fondamento":4845,
"scandalo":3175,
"filmato":3880,
"Alleanza":4251,
"scudo":3215,
"scoppio":3708,
"Man":3035,
"vertice":2463,
"Italia":170,
"televisivo":632,
"occhio":751,
"prigioniero":1972,
"Bartolomeo":4554,
"Livorno":3640,
"indipendenza":1964,
"avvalersi":5021,
"jazz":3372,
"primato":4838,
"risorsa":1556,
"Sardegna":2108,
"Williams":3419,
"coprire":1364,
"genere":412,
"colpa":2190,
"nobiltà":4274,
"Parma":2796,
"intelligenza":3546,
"superficiale":4639,
"cos'":4319,
"Davis":4860,
"disagio":4893,
"possibilità":653,
"fabbrica":2158,
"vetro":3055,
"caratterizzare":740,
"epoca":417,
"rilievo":1801,
"vegetale":3548,
"sopprimere":3815,
"io":384,
"realtà":450,
"convegno":4873,
"deserto":2934,
"intorno":828,
"picco":5059,
"autonomo":1906,
"rientrare":1476,
"sottrarre":3221,
"ridurre":818,
"Comunità":3345,
"professione":3095,
"razza":2034,
"esclusivo":1162,
"edito":4668,
"rivelare":798,
"sistema":223,
"stellare":4230,
"napoletano":2922,
"spedizione":2030,
"XVIII":2045,
"Padre":3710,
"mafia":3843,
"complice":4710,
"infanzia":2490,
"legge":410,
"comportamento":1320,
"supplemento":4464,
"radio":1607,
"principale":278,
"paragonare":4558,
"fondere":2980,
"sancire":4148,
"scienza":1302,
"all":1429,
"aggressivo":4299,
"chilometro":1668,
"danese":4447,
"splendido":3037,
"mostra":1708,
"Cavaliere":4824,
"Dario":4863,
"parziale":2907,
"continentale":4109,
"Affari":4674,
"Girone":3154,
"gatto":3406,
"autonomia":2162,
"Claudio":2331,
"produzione":381,
"bar":3028,
"Bob":3723,
"iniziativa":1324,
"Uno":4724,
"vantaggio":1401,
"luogo":369,
"magazzino":4877,
"territorio":302,
"international":2670,
"soluzione":978,
"portale":2512,
"ciao":3717,
"greco":1321,
"calcolare":2469,
"stipulare":4351,
"foro":4811,
"essenzialmente":3231,
"giungere":657,
"scattare":3812,
"banda":1780,
"House":2574,
"rugby":4002,
"uccello":2415,
"opera":192,
"cinquanta":2513,
"ne":160,
"sala":1078,
"pena":1869,
"ipotizzare":2860,
"impostazione":4510,
"celebre":1170,
"insediare":4119,
"Adam":5068,
"comunicazione":1289,
"vacanza":3251,
"continuità":3924,
"portiere":4039,
"dichiarazione":1726,
"tempo":162,
"preferito":2979,
"algoritmo":3970,
"Space":4986,
"trasferire":644,
"oggetto":574,
"XIV":2399,
"undici":4269,
"comunicare":2348,
"Giovanna":4136,
"alzare":2448,
"garantire":1153,
"essenziale":2680,
"architetto":1796,
"radice":2092,
"stilistico":4602,
"Monti":4161,
"percorso":1084,
"proveniente":2534,
"chiamare":255,
"R.":4040,
"fiscale":3551,
"contento":4151,
"terminare":939,
"infortunio":3775,
"dittatura":4954,
"meglio":594,
"tradizione":789,
"Alan":4835,
"madre":611,
"David":1310,
"libero":631,
"concreto":2700,
"Award":3863,
"spendere":3159,
"davvero":1135,
"assunzione":4856,
"sport":1420,
"Berlino":2083,
"galassia":4130,
"dimostrazione":3168,
"dettaglio":1981,
"obbligo":2925,
"disastro":4012,
"Rocco":4195,
"fotografia":2338,
"coppia":861,
"oggettivo":4996,
"gigante":2434,
"Mosca":2679,
"maschile":1669,
"Tg":2465,
"giusto":1079,
"soprannominare":4474,
"collaborazione":1100,
"colpire":762,
"perfetto":1263,
"diffondere":1204,
"acronimo":4946,
"toccare":1463,
"decina":2028,
"immenso":3736,
"Grande":1460,
"arcidiocesi":3155,
"protezione":1661,
"set":564,
"venire":122,
"leader":1570,
"Classe":5007,
"tipo":291,
"agenzia":2244,
"corrispondere":1715,
"acquisire":1660,
"funzione":498,
"bancario":4911,
"pubblicità":1974,
"ambito":922,
"sceneggiatore":4673,
"Pier":4983,
"risentire":5049,
"licenziare":4800,
"competizione":1383,
"urlare":4118,
"anziché":3171,
"imprenditore":3321,
"grasso":4107,
"rispondere":958,
"promessa":3352,
"allenatore":1805,
"marina":1185,
"raccogliere":786,
"osservatore":3707,
"sfiorare":4756,
"trono":1686,
"qualora":3895,
"rinchiudere":4536,
"Garibaldi":3742,
"attirare":2803,
"latino":1027,
"poliziotto":3122,
"consentire":739,
"Rivoluzione":3647,
"testimoniare":2362,
"Roberto":1481,
"dita":5030,
"dapprima":2736,
"compagno":902,
"caricare":3511,
"km":734,
"raggiungere":309,
"mysql_select_db":2520,
"reato":2492,
"rinascimentale":4229,
"locale":486,
"km²":3480,
"tribunale":1599,
"sfida":1823,
"strappare":4121,
"sabbia":3935,
"network":4207,
"martedì":2704,
"importante":257,
"post":2323,
"Mussolini":3588,
"III":858,
"compatibile":4998,
"prete":3853,
"vittoria":567,
"mito":2496,
"vincere":307,
"equazione":2536,
"organo":1512,
"perseguire":4638,
"flotta":2181,
"qualsiasi":897,
"emergenza":3271,
"Primo":4864,
"ricordo":1989,
"angelo":3031,
"ecclesiastico":2639,
"ritirare":1325,
"incontro":747,
"ricorso":3167,
"fianco":1407,
"tribù":2110,
"caratteristico":1936,
"Hans":4887,
"posizione":403,
"sfortunato":4178,
"Matt":4610,
"invisibile":4190,
"evidentemente":3179,
"miglior":1409,
"assente":4337,
"Vaticano":3753,
"fallire":2317,
"horror":4395,
"pieno":731,
"superficie":954,
"tempesta":3375,
"immettere":4732,
"appartenenza":4212,
"raggiungibile":4808,
"attorno":1142,
"Cuba":4759,
"vettore":3423,
"decennio":1533,
"Re":1477,
"situare":647,
"I'":5062,
"allungare":2937,
"Società":2517,
"riuscire":222,
"Regno":944,
"strutturale":4003,
"evitare":875,
"fattore":1685,
"incentrare":3995,
"lo":141,
"salvezza":3789,
"gesto":2718,
"civile":760,
"Palazzo":1470,
"rappresentante":1749,
"Ponte":3566,
"classificazione":3107,
"natura":869,
"leva":4458,
"Olanda":3538,
"circuito":1822,
"estetico":3409,
"sentenza":2389,
"Paul":1719,
"corretto":2426,
"dorato":5051,
"aiutare":842,
"profondo":982,
"pala":4620,
"dall":3398,
"RaiUno":3306,
"utilizzare":240,
"intellettuale":2215,
"Force":4555,
"avvio":4167,
"ortodosso":4070,
"fornire":764,
"convocare":2985,
"Mentana":2821,
"appoggio":2371,
"Costa":3465,
"occidentale":927,
"circostante":3222,
"borsa":3653,
"Istituto":3606,
"Amministrazione":2518,
"migliaio":1785,
"Laura":3233,
"scoperta":1576,
"digitale":1827,
"Michael":1506,
"abside":4154,
"giù":3914,
"fiore":1824,
"Tempio":4829,
"orbita":3030,
"esibire":2136,
"peso":1085,
"modello":478,
"White":4166,
"arruolare":4586,
"manutenzione":3933,
"innovativo":2972,
"provinciale":2283,
"durare":1419,
"trascorrere":1836,
"vettura":1200,
"aula":3961,
"alimentazione":2853,
"was":4571,
"Lewis":5042,
"man":4867,
"no":595,
"Jim":3978,
"ospite":1574,
"migliorare":1474,
"Hill":3477,
"Forza":3307,
"clan":4018,
"peggiore":3541,
"metallico":3305,
"disco":674,
"uscita":1144,
"core":3758,
"bacio":4885,
"già":191,
"der":4652,
"Caterina":2644,
"cristiano":1112,
"Diego":3846,
"malgrado":2915,
"storia":175,
"equivalente":3022,
"turco":2466,
"bombardiere":4124,
"reddito":3383,
"registrazione":1911,
"vendita":1317,
"spinta":3274,
"<NEG>":6,
"codice":1273,
"immaginario":2302,
"prossimità":3113,
"specializzato":4494,
"valere":1203,
"you":4581,
"vegetazione":4501,
"testamento":4826,
"opinione":1696,
"segnare":915,
"fiorentino":3134,
"alieno":3624,
"s'":1761,
"ereditare":3553,
"rispetto":335,
"funzionamento":2865,
"armamento":3354,
"ducato":2705,
"Giuliano":3739,
"colorato":4142,
"sanitario":3238,
"inglese":429,
"Sebastiano":4917,
"disturbo":3453,
"Usa":1103,
"ottavo":3369,
"conosciuto":2378,
"mare":720,
"gioventù":4025,
"regime":1436,
"discendere":4947,
"completare":1299,
"influenza":1304,
"iniziare":242,
"rinnovare":2563,
"ospedale":1602,
"Adige":4852,
"salire":1453,
"disporre":1064,
"fontana":4163,
"noto":194,
"urbanistico":4059,
"signore":1148,
"software":1759,
"diocesi":1430,
"favorevole":2755,
"compositore":2179,
"Torino":881,
"regalo":4491,
"australe":3883,
"figura":707,
"differenziare":3297,
"giustizia":1973,
"coalizione":3335,
"rabbia":4411,
"specificare":4086,
"segno":1092,
"Giuseppe":910,
"Chuck":4515,
"studio":316,
"compromettere":4810,
"Iran":3822,
"socialista":2568,
"presto":903,
"Navy":5006,
"principalmente":1226,
"predecessore":3959,
"rivista":1020,
"des":2884,
"College":3807,
"nuovo":146,
"abbandonare":744,
"geografico":2484,
"difesa":916,
"mysql_query":2521,
"Sergio":2783,
"Brasile":2299,
"legale":1978,
"moltiplicare":4624,
"Federazione":4750,
"Records":2195,
"costruire":347,
"riproporre":3530,
"offensiva":4953,
"scopo":767,
"<SOS>":2,
"entrata":4726,
"apertura":1338,
"be":4640,
"rumore":3776,
"soddisfazione":4276,
"accennare":4920,
"demone":3435,
"critica":964,
"lavorare":424,
"entro":1488,
"stasera":4243,
"emettere":1976,
"conoscere":327,
"investimento":2933,
"immagine":572,
"cinematografico":1435,
"tecnica":816,
"padre":391,
"trovare":158,
"sociale":505,
"domani":3027,
"parola":453,
"aggirare":4250,
"terme":4073,
"Svezia":2784,
"drago":4599,
"Diocesi":4219,
"collegare":961,
"al":106,
"semplicemente":1257,
"battere":1066,
"settore":836,
"trionfo":4253,
"conoscenza":951,
"formulare":4434,
"internazionale":448,
"ognuno":2164,
"irlandese":3016,
"sentiero":3025,
"scavo":3200,
"trama":879,
"insegnante":2367,
"rigido":2928,
"Capo":2549,
"fini":2804,
"geometrico":4653,
"contenuto":823,
"scafo":4593,
"Nicola":2159,
"notturno":3062,
"ospitare":895,
"italia":4675,
"ovest":1670,
"indurre":2329,
"chiaro":693,
"Scala":4956,
"leggero":1326,
"località":1187,
"giudiziario":2965,
"rettangolare":4078,
"Est":3643,
"emittente":3533,
"neonato":3928,
"precedente":473,
"divieto":4372,
"in":102,
"generale":350,
"montaggio":4921,
"propaganda":4527,
"condizione":577,
"elettore":2827,
"piemontese":4226,
"macchina":844,
"crisi":1082,
"attivare":2816,
"India":2067,
"acido":2442,
"carbonio":4115,
"germanico":4160,
"indipendente":1442,
"Scott":3523,
"significato":1451,
"università":1270,
"destinazione":3445,
"plastica":4023,
"dare":157,
"energia":821,
"animato":2932,
"link":4894,
"area":398,
"nobile":1571,
"innumerevole":4470,
"Paolo":797,
"Giappone":1431,
"luna":3611,
"esecutivo":3782,
"ministro":886,
"cavallo":1222,
"accusa":1932,
"morire":361,
"disperato":3730,
"emisfero":1985,
"polvere":3744,
"rai":4327,
"davanti":909,
"Alberto":1625,
"turista":3705,
"s.l.m.":4831,
"figurare":4193,
"britannico":950,
"erogare":4650,
"dirigere":454,
"capolavoro":3245,
"foresta":2326,
"progressivo":1855,
"eretto":3945,
"treno":1635,
"polizia":1102,
"posizionare":2676,
"domanda":1176,
"ricordare":382,
"descrivere":796,
"abuso":3770,
"mutare":3509,
"età":534,
"toponimo":4766,
"parzialmente":3342,
"firma":2112,
"orario":2721,
"dominio":1563,
"danno":1157,
"femmina":2191,
"sconfitto":4084,
"frequentato":4944,
"Ferrari":2132,
"severo":4133,
"pittorico":4698,
"sceneggiatura":3109,
"tenente":4969,
"pontificio":4123,
"coordinare":4740,
"velocità":721,
"omosessuale":3461,
"Corte":2125,
"ciascun":3743,
"corto":2199,
"Vittorio":1943,
"riservare":1594,
"verbale":4776,
"letteratura":1432,
"presidente":582,
"andare":176,
"criticare":2716,
"colui":3502,
"North":4990,
"svariato":3350,
"Alessandria":3621,
"addestramento":3573,
"data":3648,
"cedere":1381,
"cella":3246,
"piazzare":3977,
"Polizia":4301,
"Porto":3258,
"cavalleria":3920,
"eseguire":827,
"costoso":3391,
"repubblicano":3115,
"indispensabile":4184,
"visibilità":3466,
"Nero":3783,
"trazione":3616,
"online":2916,
"dinamica":3177,
"direttore":848,
"com'":4439,
"singolo":447,
"volo":1120,
"incrociatore":4523,
"DC":3622,
"regione":468,
"Unione":1158,
"multiplo":4841,
"nono":4753,
"pollice":4941,
"analizzare":2635,
"Genova":1389,
"sfera":2829,
"nascita":1111,
"anziano":2143,
"misterioso":2320,
"elenco":2072,
"porta":829,
"tornare":338,
"Giorgio":1301,
"abate":3965,
"chissà":4431,
"insieme":276,
"incredibile":3050,
"festeggiare":3357,
"gloria":4417,
"Barcellona":3437,
"la":211,
"avanzato":4587,
"dominazione":4543,
"operazione":795,
"terzo":389,
"campo":357,
"L.":3925,
"sonoro":1519,
"idrogeno":3879,
"stava":1865,
"Corso":5032,
"adulto":1958,
"comunque":388,
"Fabrizio":3596,
"tendenza":2281,
"unitario":4285,
"sondaggio":4694,
"frazione":1251,
"Orchestra":4797,
"Fabio":3362,
"gay":2982,
"differire":4616,
"atteggiamento":2385,
"Ministero":2293,
"Svizzera":2184,
"esempio":250,
"Regione":2953,
"molteplice":4194,
"Abruzzo":4792,
"distinto":3894,
"Federico":1572,
"gente":989,
"dotato":1833,
"prototipo":2486,
"originario":866,
"quindici":4041,
"attraversare":1168,
"arco":1300,
"scalo":5099,
"interessare":1281,
"musicale":609,
"omicidio":2452,
"carabiniere":2911,
"Romeo":4089,
"feb":1593,
"Visconti":4503,
"pilastro":4066,
"popolo":738,
"povertà":4455,
"HD":3601,
"numero":258,
"approdare":3704,
"facciata":1546,
"discussione":1458,
"andamento":4222,
"SS":3503,
"Salerno":3788,
"riproduzione":3890,
"residente":2828,
"agosto":496,
"propria":1716,
"denominazione":2041,
"materia":1367,
"precario":4760,
"signor":3885,
"riflessione":2742,
"maturare":4930,
"ah":4314,
"dono":3379,
"unione":2177,
"Campania":3711,
"Premio":2273,
"lunghezza":1335,
"Val":1923,
"romantico":3447,
"cambiamento":1561,
"tematico":2887,
"diametro":2771,
"sei":703,
"scuderia":4733,
"liceo":3808,
"indiretto":3976,
"creatura":2532,
"nucleo":1798,
"Guglielmo":2424,
"biografia":683,
"vero":263,
"nomination":4712,
"quinto":1585,
"presentazione":2777,
"West":3096,
"cultura":677,
"regionale":1285,
"pensione":3810,
"pezzo":925,
"valido":1949,
"banale":5085,
"assumere":605,
"collocare":1656,
"talmente":3482,
"US":4891,
"tedesco":422,
"collegio":3080,
"costa":1290,
"pensiero":1192,
"ricchezza":2709,
"erba":3834,
"logico":4224,
"boom":4324,
"somma":2194,
"pistola":3431,
"ovvero":765,
"cratere":4557,
"vescovo":948,
"ci":143,
"fine":206,
"Corrado":4196,
"ossa":4529,
"stesso":178,
"licenza":2019,
"te":1184,
"pesce":1996,
"serio":1505,
"messicano":4809,
"incremento":3564,
"Alessandro":1424,
"Alessandra":5050,
"file":1356,
"porre":404,
"organizzazione":801,
"milanese":2411,
"cupola":3737,
"accadere":1083,
"boss":4825,
"top":4244,
"sosta":4548,
"costiero":3286,
"fascia":1589,
"team":1841,
"Ferrara":2987,
"propulsore":3975,
"evolvere":4270,
"infrastruttura":3618,
"protetto":4162,
"ossia":1646,
"dopo":136,
"sospensione":2978,
"fronte":620,
"variante":1754,
"delitto":3635,
"coperto":4550,
"gravità":4416,
"rimuovere":2814,
"notevolmente":2927,
"infine":621,
"omonimo":1291,
"positivo":1305,
"Napoleone":2677,
"benessere":4106,
"insolito":5080,
"toscano":3684,
"Web":4408,
"meccanico":1496,
"convenzione":3359,
"aggressione":4952,
"informazione":557,
"palla":2845,
"efficace":2204,
"Campo":4159,
"accompagnare":1107,
"carro":1778,
"disposizione":1138,
"con":112,
"Hall":3804,
"cominciare":504,
"missione":1008,
"spingere":1280,
"altro":127,
"contraddizione":5039,
"interrompere":1871,
"sacro":1767,
"notte":777,
"scala":1513,
"contatto":969,
"ricostruire":1876,
"centimetro":4388,
"Kevin":4635,
"mysql_fetch_array":2519,
"Berlusconi":1737,
"Baviera":3915,
"m)":3366,
"e/o":2924,
"etichetta":1960,
"Festival":1415,
"dialetto":2084,
"però":217,
"Valle":2009,
"Costantinopoli":3741,
"carne":2212,
"probabilmente":618,
"tour":1552,
"Grecia":2150,
"poiché":713,
"Piano":4199,
"assistente":3329,
"pagina":935,
"reggere":2260,
"Morgan":4340,
"collocazione":4419,
"botanico":4974,
"gravemente":4060,
"City":2097,
"Emilia":3852,
"dea":4394,
"pessimo":4633,
"Medici":3249,
"gran":684,
"Battista":2628,
"probabile":2533,
"marziale":4294,
"seggio":3984,
"Arte":2751,
"ecc.":2296,
"appello":2691,
"denominare":1433,
"Ettore":5028,
"Sky":2337,
"divertire":3672,
"ripetuto":4046,
"XIX":1674,
"svizzero":2552,
"possedere":912,
"infatti":238,
"musica":465,
"accanto":1787,
"sistemare":3700,
"senatore":2707,
"entrare":290,
"Aldo":3580,
"disegnare":2039,
"liberazione":3034,
"secco":3634,
"Raidue":4034,
"Potrai":4485,
"parco":976,
"comparire":1264,
"genetico":3302,
"conduttore":2404,
"assistenza":3240,
"pesca":3725,
"Yes":2279,
"finestra":1999,
"tradizionalmente":4137,
"ben":336,
"lì":1214,
"crollo":4144,
"competente":5033,
"criterio":2581,
"alquanto":4933,
"Torre":2611,
"siccome":4659,
"togliere":1664,
"densità":3314,
"visualizzazione":4427,
"s.t.":3044,
"tali":1181,
"speranza":1919,
"stare":385,
"vice":4703,
"ott":1675,
"Tony":2893,
"composto":3013,
"influenzare":1831,
"albero":1465,
"sentire":573,
"lungo":230,
"conversazione":4866,
"provvedere":2697,
"salvo":2335,
"filosofo":3070,
"divino":2967,
"tronco":4516,
"fantasma":4237,
"Martin":2398,
"maniera":956,
"rigore":3411,
"giudicare":2006,
"stella":601,
"ricercatore":2963,
"Sinistra":4995,
"ottimo":1073,
"metropolitana":4872,
"fortuna":1795,
"Claudia":4336,
"vincente":4350,
"forma":253,
"importare":3214,
"appassionato":4050,
"for":1126,
"incrocio":4889,
"Stella":4692,
"pilota":965,
"gestione":1250,
"sonda":4882,
"ferrovia":2388,
"termico":3594,
"ago":1399,
"<EOS>":3,
"comprare":1797,
"depressione":4370,
"animare":4379,
"inevitabile":4899,
"apostolico":3836,
"particolarmente":1062,
"Lucio":3819,
"preliminare":5089,
"Romania":3968,
"successo":297,
"vestito":3655,
"risposta":1055,
"bottiglia":4679,
"innalzare":4651,
"fermare":1104,
"sciopero":3828,
"penetrare":4345,
"maestro":1077,
"EP":4526,
"san":2000,
"record":1497,
"Ascolti":1917,
"difficoltà":1147,
"medio":825,
"Disney":3153,
"inviare":864,
"Les":5075,
"testimonio":5038,
"protestante":4131,
"arrendere":4467,
"giu":1921,
"Verde":4630,
"Siena":2815,
"contrario":1054,
"dialogo":2561,
"telefonico":3540,
"rimettere":3713,
"capitano":1882,
"pagano":5052,
"natale":2898,
"schema":2236,
"su":116,
"mortale":4056,
"passivo":4583,
"Intel":4403,
"dispositivo":2102,
"carta":1000,
"significativo":1498,
"tagliare":1892,
"unire":1043,
"conservare":806,
"polacco":2589,
"commettere":2476,
"ebreo":1579,
"esibizione":3244,
"tuo":679,
"voglia":2217,
"Ferdinando":2610,
"Occidente":4026,
"commento":437,
"accertare":4321,
"identificare":1393,
"orchestra":2629,
"innanzitutto":4331,
"egli":480,
"artistico":1086,
"vento":2078,
"fuoco":1033,
"emozione":3020,
"trarre":1026,
"estendere":1165,
"reliquia":4880,
"popolarità":3123,
"mag":1862,
"riparare":4387,
"ungherese":3902,
"vuoto":2044,
"favorire":1693,
"mangiare":1904,
"richiesta":719,
"storico":333,
"on":462,
"nomina":3150,
"voce":218,
"sciogliere":2401,
"combattimento":1590,
"costante":1687,
"poté":3021,
"apparente":4482,
"fonte":627,
"esercizio":2198,
"serie":201,
"stampare":4061,
"Veneto":3156,
"cielo":1215,
"invito":3472,
"penale":2957,
"auto":1106,
"XVII":2225,
"Striscia":4881,
"amicizia":2090,
"etica":4661,
"community":3887,
"oppure":835,
"qualche":372,
"rivoluzione":1925,
"normativa":3827,
"professionista":2249,
"miracolo":4032,
"incominciare":4888,
"gennaio":530,
"fazione":4029,
"funzionale":3670,
"proiettile":3905,
"coltivazione":3664,
"blocco":1881,
"terapia":2759,
"fatica":3500,
"confrontare":3029,
"montagna":1404,
"binario":2877,
"affetto":2955,
"don":3040,
"netto":1897,
"profondità":2620,
"parità":5014,
"Angeles":2461,
"silenzio":2872,
"coincidere":3299,
"compatto":4227,
"internet":2654,
"sindrome":5082,
"tardo":2437,
"ce":1639,
"incerto":4158,
"rappresentativo":5037,
"Perugia":3696,
"Pokémon":3426,
"Europa":516,
"reagire":3402,
"massacro":4410,
"spada":2453,
"Fiat":2740,
"Paola":3657,
"uovo":2576,
"organizzatore":5087,
"ciò":334,
"Suo":4487,
"mancare":988,
"contadino":2231,
"allontanare":1817,
"Pur":3960,
"bere":2947,
"infezione":4892,
"settentrionale":1665,
"Pavia":3891,
"eleggere":1098,
"coltivare":3219,
"essa":636,
"coinvolti":4402,
"obbligare":3340,
"Agostino":3590,
"prevalentemente":2054,
"universale":2781,
"tentare":799,
"Vienna":2435,
"grigio":3180,
"derivato":3644,
"ignorare":3145,
"discorso":1616,
"costringere":645,
"programma":383,
"adorare":5063,
"This":4258,
"importanza":1080,
"volante":3559,
"novità":2115,
"Parlamento":2875,
"vedova":4500,
"tu":1288,
"terra":467,
"dorsale":4641,
"nonna":4601,
"en":4563,
"roccia":2286,
"modo":205,
"conflitto":1146,
"Comitato":2702,
"magia":3394,
"sovietico":1518,
"dolce":2073,
"programmazione":2731,
"scozzese":4001,
"periodo":241,
"aria":1042,
"meritare":2562,
"using":2489,
"indicare":483,
"gioia":3909,
"Filmografia":3185,
"lasciare":271,
"fascista":2071,
"concordare":4173,
"conte":994,
"naso":4779,
"frontiera":3593,
"formare":532,
"concorrente":2119,
"Washington":2806,
"classificato":4666,
"famoso":537,
"podio":4613,
"decorazione":2349,
"Italiani":4806,
"cadavere":3682,
"fiume":628,
"feudo":3102,
"contenitore":4415,
"sinistra":908,
"Apple":3451,
"tema":360,
"ipotesi":1405,
"dipartimento":2122,
"misero":4768,
"Brian":4192,
"adozione":3656,
"quasi":318,
"sacco":4197,
"All":3111,
"nano":4045,
"fisico":811,
"scusa":4293,
"apprezzare":1900,
"praticare":2637,
"States":4761,
"dato":575,
"presenza":409,
"inseguire":3575,
"resistenza":1232,
"Milan":3048,
"miglio":3908,
"apprendere":3071,
"cast":2711,
"spiegare":847,
"svelare":4132,
"asso":4449,
"Po":4307,
"pianeta":1259,
"offrire":555,
"romanzo":670,
"necessario":508,
"anarchico":4093,
"richiedere":637,
"dominante":3777,
"produttivo":2774,
"confine":993,
"minaccia":2014,
"scenario":3518,
"disponibilità":3637,
"fame":3972,
"server":3054,
"Domenico":2470,
"cacciare":3046,
"fortezza":2622,
"attribuire":1177,
"allestimento":4570,
"alternare":3247,
"residuo":4252,
"difficile":856,
"stabilire":814,
"nemmeno":1575,
"aspettativa":4371,
"mensile":4711,
"una":111,
"disputare":1486,
"omaggio":3698,
"denuncia":3922,
"interpretazione":1548,
"seduta":4448,
"comporre":489,
"solare":1681,
"prova":772,
"possesso":1988,
"Marvel":3980,
"sensibilità":4117,
"Lecce":4320,
"ritardo":3181,
"facezia":1097,
"climatico":4672,
"giovanile":2079,
"teatro":880,
"doppio":920,
"Eugenio":4433,
"ruotare":2377,
"Johnson":5065,
"montuoso":4209,
"pari":930,
"parrocchia":2587,
"lamentare":3912,
"concentrare":1866,
"stradale":2509,
"stemma":2502,
"Saint":4561,
"Tim":4796,
"sognare":3791,
"cannone":1800,
"cucina":2379,
"Arthur":4442,
"rispettivo":1272,
"nome":167,
"Senato":2810,
"cappella":1528,
"teoria":794,
"giornalista":1292,
"vano":4317,
"incarico":1459,
"fedeltà":4681,
"primo":132,
"centrale":520,
"proprio":152,
"versante":3654,
"Boy":5076,
"tumore":4233,
"analisi":1186,
"consegna":3571,
"adesione":4459,
"Ovest":4758,
"grotta":3535,
"accento":4858,
"resource":750,
"generalmente":1361,
"guardia":2161,
"Dante":3052,
"Sofia":4451,
"tropicale":4297,
"ditta":3805,
"impatto":2336,
"monte":1511,
"Tommaso":3456,
"assalto":3473,
"Terra":1145,
"ricadere":3323,
"trattare":289,
"breve":624,
"fidanzato":2745,
"fra":236,
"droga":2658,
"circolazione":3619,
"canone":3911,
"veicolo":1735,
"Distretto":4849,
"favore":1124,
"consistente":3322,
"Karl":4170,
"Margherita":3522,
"remoto":3999,
"scontro":1208,
"spunto":3826,
"giorno":202,
"inventare":2430,
"partire":308,
"contrasto":2193,
"freno":4052,
"identico":3009,
"investire":2468,
"ristrutturazione":3993,
"soldo":1516,
"XX":1790,
"sebbene":1023,
"od":4273,
"filtro":4546,
"televisione":1277,
"relativo":661,
"istituzionale":3781,
"professore":1700,
"lavorazione":3084,
"situazione":565,
"Porta":2026,
"elemento":538,
"nonostante":428,
"fan":2646,
"sorprendere":3944,
"regalare":2855,
"comunale":1293,
"test":1910,
"rimanere":244,
"straniero":1484,
"comunità":883,
"rimandare":4074,
"scavare":3790,
"appena":714,
"santo":1089,
"scontrare":3478,
"sempre":166,
"libreria":4164,
"olandese":2554,
"sopra":779,
"libertà":913,
"Costantino":4234,
"sensore":4772,
"belga":4590,
"Institute":5023,
"rottura":3397,
"fanteria":3519,
"espansione":2173,
"assegnare":1164,
"varietà":1791,
"professionale":2082,
"embrione":3825,
"inedito":2305,
"sta":834,
"ossigeno":4068,
"errore":1362,
"assassino":3192,
"Lazio":2373,
"complessivo":1756,
"processo":552,
"sparare":2066,
"punizione":4961,
"foglia":2262,
"elezione":949,
"stringere":3454,
"ladro":5054,
"dottrina":2692,
"Football":4769,
"parrocchiale":3165,
"Harry":2497,
"apparire":540,
"clinico":4218,
"computer":1535,
"isola":570,
"Cina":1588,
"cosa":219,
"discendente":2683,
"contare":990,
"ministero":2941,
"equipaggio":2376,
"sostenere":586,
"Johann":4780,
"Tokyo":4573,
"cronaca":1775,
"opportuno":2787,
"intanto":1444,
"sentimento":2233,
"debole":2247,
"dimissione":3927,
"corsa":1294,
"scatenare":2867,
"idea":514,
"fissare":1768,
"spettare":3845,
"simbolico":3577,
"svolta":3203,
"lezione":2608,
"delimitare":4009,
"vista":667,
"essi":771,
"meccanismo":2004,
"evidenza":3268,
"restare":625,
"corte":1391,
"vendicare":4316,
"designare":3236,
"lui":243,
"Bibbia":4335,
"russo":1063,
"opportunità":2782,
"abbassare":3991,
"disciplina":2098,
"Calabria":2539,
"distinguere":1101,
"preda":3490,
"assoluto":743,
"ligneo":4475,
"villa":906,
"funzionare":1854,
"concedere":1348,
"precisare":3677,
"comprendere":352,
"tvblog":2739,
"motivazione":3255,
"imprigionare":4108,
"destino":2444,
"Stati":493,
"esito":3769,
"capitolo":1811,
"Giovanni":469,
"son":4095,
"tal":1863,
"scolastico":2754,
"Ravenna":5098,
"neppure":2805,
"equipaggiare":3217,
"distruzione":2114,
"Venezia":1028,
"magnetico":3310,
"zio":2414,
"Prodi":4820,
"segnalare":1544,
"catalogo":3948,
"sottoporre":1287,
"caratteristica":542,
"Pupa":2695,
"Commons":4147,
"confondere":2885,
"comunemente":2726,
"user":2419,
"perduto":2339,
"mago":4165,
"leone":3796,
"corteo":4094,
"tipico":733,
"freddo":1701,
"dibattito":2275,
"Contea":4723,
"negare":2224,
"ad":124,
"vestire":2358,
"PARTE":3646,
"Polonia":2382,
"Isole":3866,
"Danimarca":3868,
"raffinato":4793,
"proclamare":3058,
"troppo":562,
"Jugoslavia":5004,
"sotto":228,
"parente":2964,
"autentico":3962,
"responsabile":1312,
"portatore":5067,
"cartone":3439,
"tecnologico":2826,
"testuale":4540,
"edificare":2660,
"scarso":1578,
"simile":472,
"commissario":2970,
"europeo":445,
"piano":419,
"Black":2649,
"quota":1449,
"direzione":917,
"membro":466,
"assassinare":3729,
"blu":2016,
"percorrere":1820,
"manga":2515,
"altitudine":4360,
"sia":285,
"inchiesta":2701,
"anch'":1332,
"Politici":4579,
"La":2027,
"Castel":4575,
"telaio":3404,
"nicchia":4646,
"fibra":3794,
"macchia":4008,
"Mike":3119,
"sessione":4128,
"filosofia":1641,
"regina":1934,
"che":105,
"scultore":4067,
"sano":3901,
"Vita":4221,
"imc_italy":2530,
"supportare":2505,
"asta":4342,
"realizzare":325,
"cinquecento":4870,
"Guardia":3884,
"League":1961,
"nero":705,
"Inserisci":4629,
"istruzione":1808,
"Paesi":2089,
"occasione":568,
"web":2423,
"durante":172,
"Luis":5081,
"mi":220,
"de":215,
"residenza":2537,
"modesto":3033,
"funerale":4568,
"pausa":4295,
"Matteo":2744,
"cantautore":4842,
"centinaio":1834,
"passaggio":992,
"capace":1249,
"nutrire":2580,
"apparizione":2200,
"Bretagna":2056,
"conservatore":4223,
"risparmiare":3532,
"incendio":2886,
"nazione":1262,
"medico":832,
"particolarità":4985,
"cinema":943,
"energetico":2935,
"disponibile":963,
"Alice":3837,
"Centro":2920,
"modificare":1279,
"categoria":890,
"austriaco":1840,
"detenere":3570,
"interesse":590,
"bordo":1483,
"coppa":3613,
"indipendentemente":4799,
"allenare":3326,
"Sport":3801,
"catena":1846,
"Massimo":1979,
"Atlantico":4993,
"that":3910,
"Nazioni":3176,
"Sovietica":2994,
"mese":323,
"news":4696,
"polare":4898,
"battaglia":495,
"accettare":820,
"federazione":2514,
"cardinale":1684,
"copia":1492,
"menzionare":3010,
"necessitare":3919,
"commissionare":4189,
"punteggio":3847,
"istituire":1760,
"francese":395,
"Sam":3120,
"Enzo":4220,
"anticipo":4469,
"verde":1647,
"benissimo":4585,
"prestare":2087,
"adatto":2118,
"minoranza":3513,
"tratto":1591,
"ruolo":399,
"Mediaset":2590,
"scusare":4021,
"Sanremo":2593,
"tessuto":2268,
"esistere":416,
"sfociare":4517,
"Argentina":2776,
"serpente":4865,
"calciatore":2566,
"resto":589,
"separato":2535,
"Alfa":4051,
"temperatura":1213,
"affrontare":907,
"strato":2457,
"volto":2010,
"manifesto":3627,
"poco":182,
"Cristina":3888,
"pane":3572,
"affidare":1011,
"limitazione":4981,
"procedimento":3448,
"erigere":3005,
"tra":133,
"orizzontale":3800,
"F1":4428,
"eliminare":1248,
"album":341,
"norma":1468,
"affinché":2950,
"colonnello":4028,
"senso":539,
"cerimonia":2597,
"difendere":1331,
"Monaco":2825,
"tutto":131,
"vicino":442,
"muscolo":5094,
"ridere":2464,
"etc":4426,
"assedio":2446,
"ove":2287,
"by":3463,
"informatico":4984,
"causare":995,
"Piemonte":3064,
"laico":3719,
"dipendenza":3721,
"trattato":3291,
"caffè":4248,
"periodico":2807,
"autem":4257,
"rapporto":487,
"Windows":2710,
"percentuale":2722,
"ferito":2998,
"denied":2524,
"Austria":1883,
"altrettanto":2068,
"ammirare":3110,
"musicista":1536,
"cercare":330,
"manuale":3300,
"monastero":1847,
"Londra":1254,
"dipendente":3220,
"amare":1050,
"Cesaroni":2817,
"non":114,
"quest'":406,
"incapace":4232,
"vivere":328,
"alba":4656,
"pomeriggio":2551,
"qualità":805,
"collo":3584,
"pure":1032,
"prendere":208,
"potere":126,
"icona":4805,
"scomparire":1915,
"definire":474,
"seguente":659,
"appartenente":2747,
"parentesi":4801,
"corruzione":4456,
"insegnare":1598,
"bombardamento":2949,
"derivare":634,
"curiosità":1507,
"Casa":2178,
"rurale":3691,
"result":2500,
"si":110,
"salto":2341,
"patologia":4381,
"rame":2591,
"viaggiatore":4090,
"Unità":3771,
"esordire":2743,
"nave":649,
"martirio":4979,
"D.":4645,
"van":4390,
"immergere":3957,
"sostenitore":2832,
"fungo":4906,
"accusare":1514,
"requisito":4445,
"Niccolò":4843,
"Franco":1837,
"guardare":945,
"consapevolezza":3929,
"pasta":4361,
"strega":4884,
"diretto":664,
"istituto":1337,
"sogno":1624,
"Cinema":3346,
"km/h":3507,
"elementare":3292,
"diffuso":1487,
"paziente":1794,
"frattempo":1239,
"svolgere":507,
"torre":1303,
"bronzo":2425,
"sottoscrivere":4329,
"corso":362,
"onore":1276,
"notevole":926,
"brano":654,
"Cristo":1690,
"vicenda":1108,
"incrementare":3958,
"fumetto":1580,
"Real":3714,
"domenica":1134,
"rivolgere":1297,
"clandestino":4247,
"conquistare":865,
"stimare":2567,
"sperimentare":3241,
"curioso":2900,
"papa":1178,
"detto":4007,
"eroe":2021,
"preoccupare":2632,
"diffusione":1466,
"grano":4010,
"pazzo":3756,
"Monte":1323,
"opzionale":2976,
"Group":5015,
"proibire":4284,
"medaglia":1698,
"anima":2134,
"Unite":4175,
"modifica":1629,
"reality":3128,
"ciascuno":2482,
"qualunque":2085,
"progettare":1379,
"attraverso":408,
"adattamento":3248,
"soprannome":4625,
"fisso":2873,
"pressione":1359,
"qualcuno":1220,
"innovazione":2688,
"scoppiare":3287,
"conseguenza":941,
"tradire":3733,
"sostituzione":2956,
"alleato":1826,
"riva":2836,
"laureare":2901,
"biblico":5019,
"convinzione":3987,
"ripresa":2291,
"tesoro":3886,
"arabo":1667,
"velocemente":3953,
"olio":1969,
"autore":444,
"modalità":1740,
"guida":812,
"segnale":1655,
"altrimenti":2403,
"pubblicare":246,
"supporto":1491,
"sicuro":741,
"opzione":4110,
"soglia":4292,
"fiction":2699,
"Bill":3367,
"ricoprire":1447,
"sud":510,
"febbre":5070,
"aggiuntivo":4903,
"allargare":3641,
"XV":2220,
"Access":2130,
"rovinare":5024,
"variare":1608,
"rivale":2609,
"appartamento":3169,
"mysql_errno":2522,
"interno":231,
"viale":4377,
"appassionati":3917,
"stabilimento":3126,
"Live":3390,
"stavano":3701,
"Mary":3436,
"corrispondenza":2725,
"espellere":3840,
"air":2758,
"decisione":1193,
"qualificazione":3514,
"prefetto":4787,
"governatore":2456,
"annoverare":4263,
"promotore":4966,
"share":2765,
"premio":706,
"ala":1475,
"mamma":2678,
"corpo":432,
"mescolare":4406,
"ricavare":2053,
"impulso":3967,
"Rock":3216,
"considerazione":1849,
"esitare":4677,
"ammettere":1482,
"brillante":2792,
"Norvegia":4343,
"di":100,
"acustico":4972,
"malvagio":4596,
"ogni":180,
"chimica":2878,
"password":<PASSWORD>,
"soltanto":918,
"York":970,
"revisione":4626,
"pastore":4088,
"osservare":1071,
"scorta":3692,
"colpevole":3671,
"Peter":1991,
"debutto":2213,
"militari":2870,
"prezzo":1021,
"terrore":4830,
"rosso":839,
"cena":3642,
"sostanza":1418,
"inferiore":986,
"Michele":1657,
"terminale":3941,
"imbarcazione":4311,
"locomotiva":5097,
"bosco":2458,
"Marche":5008,
"medesimo":1986,
"talento":2314,
"componenti":1713,
"this":3892,
"scorso":1318,
"ordinario":3038,
"diplomatico":3218,
"fortificazione":4157,
"mm":1243,
"Bergamo":3317,
"ispirazione":3042,
"temporale":3190,
"Microsoft":3401,
"coro":2862,
"Pace":4538,
"entità":2851,
"Banca":3396,
"capire":610,
"vecchio":527,
"disegno":1355,
"acquisizione":4138,
"Corriere":4669,
"nominare":737,
"loro":145,
"Alexander":4741,
"paese":282,
"Principe":3392,
"General":5055,
"Francesca":3667,
"nuova":1717,
"distribuire":1547,
"RSS":4846,
"tetto":2790,
"narrazione":4511,
"with":2888,
"batterista":3699,
"Paese":1939,
"consueto":4989,
"mira":4306,
"visitare":2036,
"concorso":1857,
"rifugiare":3309,
"volontà":1534,
"velivolo":2343,
"Angelo":2076,
"parlamento":2394,
"Rosa":2798,
"Gaetano":4334,
"arma":626,
"gomma":3712,
"ottenere":266,
"Email":3207,
"cortile":3848,
"utilità":4909,
"attenzione":1037,
"applicare":1371,
"murario":4964,
"conservazione":3160,
"osservazione":1584,
"Lucca":4714,
"delicato":3433,
"repressione":4180,
"Chicago":3574,
"Atene":3508,
"sì":1329,
"garanzia":3301,
"collaborare":1412,
"Van":3204,
"febbraio":639,
"referendum":3645,
"oceano":4615,
"convincere":977,
"intitolare":1191,
"avanguardia":4519,
"costituzionale":3242,
"telefonata":4818,
"prezioso":2346,
"laurea":2613,
"Grand":3376,
"rifugio":3073,
"contro":189,
"copertina":2959,
"barca":3666,
"sindacale":5022,
"reazione":1380,
"mar":1422,
"associare":1562,
"altare":1615,
"memoria":1030,
"C.":3452,
"vocale":2687,
"recuperare":1704,
"un":108,
"avanzata":3906,
"dichiarare":663,
"film":184,
"at":3051,
"estate":967,
"fare":123,
"capo":583,
"dominare":1896,
"godere":1832,
"preoccupazione":4432,
"Eva":4970,
"scultura":2428,
"riempire":3198,
"piattaforma":2297,
"sbarcare":4031,
"deporre":3697,
"grazie":326,
"pur":790,
"tifoso":2973,
"ma":121,
"assicurare":1564,
"ritenere":613,
"affiancare":1879,
"veste":2918,
"soap":4702,
"verificare":1235,
"felice":2274,
"architettonico":2694,
"Factor":2653,
"anteriore":1757,
"corda":2778,
"Warning":587,
"et":558,
"moto":1308,
"persiano":4701,
"inno":4927,
"polemica":2374,
"Amore":3529,
"Daniele":3308,
"narrare":2032,
"localizzare":5018,
"probabilità":2938,
"Corona":5034,
"procedere":1891,
"sorgere":1118,
"cacciatore":4260,
"progetto":232,
"Sicilia":1489,
"lug":2393,
"resa":3127,
"titolo":281,
"Et":3476,
"coda":1682,
"camminare":3715,
"smettere":3068,
"squadra":301,
"distretto":2149,
"girare":1109,
"ritorno":752,
"adiacente":4385,
"clamoroso":4994,
"fascio":5001,
"costo":924,
"campanile":3364,
"intendere":972,
"solista":2543,
"Liguria":4839,
"ridicolo":5092,
"riunione":3093,
"venerdì":2127,
"fedele":3074,
"primario":2075,
"immigrato":3992,
"Pio":2625,
"conseguire":2300,
"effettuare":687,
"critico":1403,
"emissione":2565,
"annesso":3498,
"distribuzione":1527,
"essenza":5011,
"matematico":2175,
"stanco":4932,
"from":4312,
"passo":1004,
"albergo":3457,
"pinna":4988,
"invocare":5086,
"prossimo":783,
"tre":188,
"nucleare":1856,
"racchiudere":4006,
"abilità":2060,
"Unito":2157,
"scambio":2137,
"onda":782,
"va":2895,
"cover":3252,
"Urss":4976,
"lavorio":685,
"viso":4374,
"registro":3131,
"offerta":4634,
"accademico":3582,
"tela":2406,
"costume":2340,
"attaccare":1056,
"sorte":4063,
"Jackson":3651,
"Max":2616,
"Florida":5057,
"economico":518,
"Roma":277,
"campione":1007,
"cura":1123,
"laboratorio":2216,
"fuga":1948,
"donazione":4857,
"marino":3652,
"Cultura":3545,
"meta":3377,
"indiano":2363,
"inutile":2352,
"riscuotere":3368,
"borgo":2372,
"facilmente":1732,
"latte":3709,
"cane":1905,
}
|
StarcoderdataPython
|
3360944
|
from .viedit import ViEdit
version = "0.1.3"
|
StarcoderdataPython
|
178943
|
import tensorflow as tf
import sys
if __name__ == "__main__":
if len(sys.argv) >= 2:
raw_dataset = tf.data.TFRecordDataset(sys.argv[1])
for raw_record in raw_dataset.take(1):
example = tf.train.Example()
example.ParseFromString(raw_record.numpy())
print(example)
|
StarcoderdataPython
|
3274185
|
<reponame>MUzzell/the_pokemon_api<filename>backend/backend/query_server.py
import re
from .base_server import BaseServer
STATS_REGEX = re.compile("([a-zA-Z0-9]+)([<>=]{1,2})(\\d+)")
class QueryServer(BaseServer):
def __init__(self, query_queue, pokedex):
super(QueryServer, self).__init__(query_queue)
self.query_queue = query_queue
self.pokedex = pokedex
self.q_func = {
'ID': self._get_by_id,
'NAME': self._get_by_name,
'TYPE': self._get_by_type,
'GEN': self._get_by_gen,
'LEGEND': self._get_by_legendary,
'STATS': self._get_by_stats
}
def _request_received(self, q_type, arg):
try:
result = self.q_func[q_type](arg)
except ValueError:
return 403, "Bad input"
except Exception:
return 500, "Internal server error"
else:
if not result:
return 404, "Not found"
else:
return 200, result
def _get_by_id(self, arg):
arg = arg.strip()
if not arg:
raise ValueError("Bad input")
return self.pokedex.get_pokemon_by_id(arg)
def _get_by_name(self, arg):
arg = arg.strip()
if not arg:
raise ValueError("Bad input")
return self.pokedex.get_pokemon_by_name(arg)
def _get_by_gen(self, arg):
arg = arg.strip()
if not arg:
raise ValueError("Bad input")
return self.pokedex.get_pokemon_by_generation(arg)
def _get_by_legendary(self, arg):
if arg.lower().strip() in ['0', 'f', 'false']:
arg = False
elif arg.lower().strip() in ['1', 't', 'true']:
arg = True
else:
raise ValueError("Bad input")
return self.pokedex.get_pokemon_by_legendary(arg)
def _get_by_type(self, arg):
p_types = [a for a in arg.split(',') if a.strip()]
if p_types:
return self.pokedex.get_pokemon_by_type(p_types)
else:
raise ValueError("Bad input")
return None
def _get_by_stats(self, arg):
stats = []
for match in STATS_REGEX.finditer(arg):
if not match:
continue
stats.append((match.group(1), match.group(2), int(match.group(3))))
if stats:
return self.pokedex.get_pokemon_by_stats(stats)
return None
|
StarcoderdataPython
|
22525
|
<filename>02_crowsnest/ragz_crowsnest.py
#!/usr/bin/env python3
"""
Date : 2021-09-06
Purpose: learning to work with strings
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Crow\'s Nest -- choose the correct article',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('word',
metavar='word',
help='A word')
return parser.parse_args()
# --------------------------------------------------
def get_article(user_input):
"""Determine which article to use"""
# vowels = ['a', 'e', 'i', 'o', 'u']
if user_input[0] in 'aeiouAEIOU':
solution = "an"
else:
solution = "a"
return solution
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
word = args.word
article = get_article(word)
print("Ahoy, Captain, {} {} off the larboard bow!".format(article, word))
# --------------------------------------------------
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1696966
|
<gh_stars>0
from __future__ import annotations
from typing import Any, TypeVar, Set, Dict, Tuple, Optional
from grapl_analyzerlib.node_types import (
EdgeT,
PropType,
PropPrimitive,
EdgeRelationship,
)
from grapl_analyzerlib.nodes.entity import EntityQuery, EntityView, EntitySchema
from grapl_analyzerlib.queryable import with_str_prop, with_int_prop
from grapl_analyzerlib.schema import Schema
from grapl_analyzerlib.comparators import IntOrNot, StrOrNot, OneOrMany
NCQ = TypeVar("NCQ", bound="NetworkConnectionQuery")
NCV = TypeVar("NCV", bound="NetworkConnectionView")
def default_network_connection_properties():
return {
"src_ip_address": PropType(PropPrimitive.Str, False),
"src_port": PropType(PropPrimitive.Str, False),
"dst_ip_address": PropType(PropPrimitive.Str, False),
"dst_port": PropType(PropPrimitive.Int, False),
"created_timestamp": PropType(PropPrimitive.Int, False),
"terminated_timestamp": PropType(PropPrimitive.Int, False),
"last_seen_timestamp": PropType(PropPrimitive.Int, False),
}
def default_network_connection_edges() -> Dict[str, Tuple[EdgeT, str]]:
from grapl_analyzerlib.nodes.ip_port import IpPortSchema
return {
"inbound_network_connection_to": (
EdgeT(NetworkConnectionSchema, IpPortSchema, EdgeRelationship.ManyToOne),
"inbound_network_connections_from",
)
}
class NetworkConnectionSchema(EntitySchema):
def __init__(self):
super(NetworkConnectionSchema, self).__init__(
default_network_connection_properties(),
default_network_connection_edges(),
lambda: NetworkConnectionView,
)
@staticmethod
def self_type() -> str:
return "NetworkConnection"
class NetworkConnectionQuery(EntityQuery[NCV, NCQ]):
@with_int_prop("port")
def with_port(
self,
*,
eq: Optional[IntOrNot] = None,
gt: Optional[IntOrNot] = None,
ge: Optional[IntOrNot] = None,
lt: Optional[IntOrNot] = None,
le: Optional[IntOrNot] = None,
):
pass
@with_str_prop("ip_address")
def with_ip_address(
self,
*,
eq: Optional[StrOrNot] = None,
contains: Optional[OneOrMany[StrOrNot]] = None,
starts_with: Optional[StrOrNot] = None,
ends_with: Optional[StrOrNot] = None,
regexp: Optional[OneOrMany[StrOrNot]] = None,
distance_lt: Optional[Tuple[str, int]] = None,
):
pass
def with_inbound_network_connection_to(self, *inbound_network_connection_to):
return self.with_to_neighbor(
IpPortQuery,
"inbound_network_connection_to",
"inbound_network_connections_from",
inbound_network_connection_to,
)
@classmethod
def node_schema(cls) -> "Schema":
return NetworkConnectionSchema()
class NetworkConnectionView(EntityView[NCV, NCQ]):
"""
.. list-table::
:header-rows: 1
* - Predicate
- Type
- Description
* - node_key
- string
- A unique identifier for this node.
* - created_timestamp
- int
- Time the network connection was created (in millis-since-epoch).
* - terminated_timestamp
- int
- Time the network connection was terminated (in millis-since-epoch).
* - last_seen_timestamp
- int
- Time the network connection was last seen (in millis-since-epoch)
* - src_ip_address
- string
- IP Address of the network connection's source.
* - src_port
- string
- Port of the network connection's source.
* - dst_ip_address
- string
- IP Address of the network connection's destination.
* - dst_port
- string
- Port of the network connection's destination.
"""
queryable = NetworkConnectionQuery
def __init__(
self,
uid: str,
node_key: str,
graph_client: Any,
node_types: Set[str],
port: Optional[int] = None,
ip_address: Optional[str] = None,
**kwargs,
):
super().__init__(uid, node_key, graph_client, node_types, **kwargs)
self.node_types = set(node_types)
self.port = port
self.ip_address = ip_address
def get_port(self, cached=True):
return self.get_int("port", cached=cached)
def get_ip_address(self, cached=True):
return self.get_str("ip_address", cached=cached)
def get_inbound_network_connection_to(
self, *inbound_network_connection_to, cached=False
):
return self.get_neighbor(
IpPortQuery,
"inbound_network_connection_to",
"inbound_network_connections_from",
inbound_network_connection_to,
cached=cached,
)
@classmethod
def node_schema(cls) -> "Schema":
return NetworkConnectionSchema()
from grapl_analyzerlib.nodes.ip_port import IpPortQuery, IpPortView
NetworkConnectionSchema().init_reverse()
class NetworkConnectionExtendsIpPortQuery(IpPortQuery):
def with_inbound_network_connections_from(self, *inbound_network_connections_from):
return self.with_to_neighbor(
NetworkConnectionQuery,
"inbound_network_connections_from",
"inbound_network_connection_to",
inbound_network_connections_from,
)
class NetworkConnectionExtendsIpPortView(IpPortQuery):
def get_inbound_network_connections_from(
self, *inbound_network_connections_from, cached=False
):
return self.get_neighbor(
NetworkConnectionQuery,
"inbound_network_connections_from",
"inbound_network_connection_to",
inbound_network_connections_from,
cached=cached,
)
IpPortQuery = IpPortQuery.extend_self(NetworkConnectionExtendsIpPortQuery)
IpPortView = IpPortView.extend_self(NetworkConnectionExtendsIpPortView)
|
StarcoderdataPython
|
3232477
|
<filename>src/async_kinesis_client/kinesis_producer.py
import logging
import time
import aioboto3
from .retriable_operations import RetriableKinesisProducer
log = logging.getLogger(__name__.split('.')[-2])
# Following constants are originating from here:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kinesis.html#Kinesis.Client.put_records
MAX_RECORDS_IN_BATCH = 500
MAX_RECORD_SIZE = 1024 * 1024 # 1 Mb
MAX_BATCH_SIZE = 5 * MAX_RECORD_SIZE # 5 Mb
def _get_default_partition_key():
return '{0}{1}'.format(time.process_time(), time.time())
class AsyncKinesisProducer:
def __init__(self, stream_name, ordered=True, custom_kinesis_client=None):
self.stream_name = stream_name
self.ordered = ordered
self.seq = '0'
self.record_buf = []
self.buf_size = 0
# Allow a custom kinesis client to be passed in. This allows for setting of any additional parameters in
# the client without needing to track them in this library.
if custom_kinesis_client is not None:
client = custom_kinesis_client
else:
client = aioboto3.client('kinesis')
self.kinesis_client = RetriableKinesisProducer(client=client)
log.debug("Configured kinesis producer for stream '%s'; ordered=%s",
stream_name, ordered)
async def put_record(self, record, partition_key=None, explicit_hash_key=None):
"""
Put single record into Kinesis stream
:param record: record to put, bytes
:param partition_key: partition key to determine shard; if none, time-based key is used
:param explicit_hash_key: hash value used to determine the shard explicitly, overriding partition key
:return: response from kinesis client, see boto3 doc
"""
if partition_key is None:
partition_key = _get_default_partition_key()
kwargs = {
'StreamName': self.stream_name,
'Data': record,
'PartitionKey': partition_key,
}
if self.ordered:
kwargs['SequenceNumberForOrdering'] = self.seq
kwargs['PartitionKey'] = partition_key or _get_default_partition_key()
if explicit_hash_key:
kwargs['ExplicitHashKey'] = explicit_hash_key
resp = await self.kinesis_client.put_record(**kwargs)
if self.ordered:
self.seq = resp.get('SequenceNumber')
return resp
async def put_records(self, records):
"""
Put list of records into Kinesis stream
This call is buffered until it outgrow maximum allowed sizes (500 records or 5 Mb of data including partition
keys) or until explicitly flushed (see flush() below)
:param records: iterable with records to put; has following structure:
records=[
{
'Data': b'bytes',
'ExplicitHashKey': 'string',
'PartitionKey': 'string'
},
],
If no 'PartitionKey' given, default time-based key will be used
See https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kinesis.html#Kinesis.Client.put_records
for details
:return: Empty list if no records were flushed, list of responses from kinesis client
otherwise
Raises ValueError if single record exceeds 1 Mb
Currently application should check for ProvisionedThroughputExceededException
in response structure itself.
"""
resp = []
n = 1
for datum in records:
if len(self.record_buf) == MAX_RECORDS_IN_BATCH:
resp.append(await self.flush())
data = datum.get('Data')
if not (isinstance(data, bytes) or isinstance(data, bytearray)):
raise TypeError('Record # {} is of type {}; accepted types are "bytes" and "bytearray"'.format(
n, type(data)
))
record_size = len(data)
# boto3 docs say that combined size of record and partition key should not exceed 1 MB,
# while in reality, at least with boto3==1.9.49 and botocore==1.12.49, the key size
# is not taken into account
if record_size > MAX_RECORD_SIZE:
raise ValueError('Record # {} exceeded max record size of {}; size={}; record={}'.format(
n, MAX_RECORD_SIZE, record_size, datum))
if datum.get('PartitionKey') is None:
datum['PartitionKey'] = _get_default_partition_key()
# The same applies to batch size - only record size is counted
if self.buf_size + record_size > MAX_BATCH_SIZE:
resp.append(await self.flush())
self.record_buf.append(datum)
self.buf_size += record_size
n += 1
return resp
async def flush(self):
if len(self.record_buf) == 0:
return
resp = await self.kinesis_client.put_records(
Records=self.record_buf,
StreamName=self.stream_name
)
self.record_buf = []
self.buf_size = 0
return resp
|
StarcoderdataPython
|
3207745
|
<reponame>vaibhav0000patel/look-somewhere-else<gh_stars>0
from win32api import *
from win32gui import *
import win32con
import sys, os
import time
from random import randint
class WindowsBalloonTip:
def __init__(self, title, msg):
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
}
# Register the Window class.
wc = WNDCLASS()
hinst = wc.hInstance = GetModuleHandle(None)
wc.lpszClassName = "PythonTaskbar"
# Could also specify a wndproc.
wc.lpfnWndProc = message_map
class_atom = RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = CreateWindow(
class_atom,
"Taskbar",
style,
0, 0,
win32con.CW_USEDEFAULT,
win32con.CW_USEDEFAULT,
0, 0,
hinst,
None
)
UpdateWindow(self.hwnd)
icon_path_name = os.path.abspath(os.path.join( sys.path[0], "Googleeyes.ico" ))
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
try:
hicon = LoadImage( hinst, icon_path_name, win32con.IMAGE_ICON, 0, 0, icon_flags )
except Exception as e:
hicon = LoadIcon( 0, win32con.IDI_APPLICATION )
flags = NIF_ICON | NIF_MESSAGE | NIF_TIP
nid = (self.hwnd, 0, flags, win32con.WM_USER+20, hicon, "tooltip")
Shell_NotifyIcon(NIM_ADD, nid)
Shell_NotifyIcon(NIM_MODIFY, (
self.hwnd,
0,
NIF_INFO,
win32con.WM_USER + 20,
hicon,
"Balloon tooltip",
msg,
200,
title,
NIIF_NOSOUND
)
)
# self.show_balloon(title, msg)
time.sleep(5)
DestroyWindow(self.hwnd)
def OnDestroy(self, hwnd, msg, wparam, lparam):
nid = (self.hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, nid)
# Terminate the app.
PostQuitMessage(0)
def balloon_tip(title, msg):
WindowsBalloonTip(title, msg)
if __name__ == '__main__':
messages = [
"The time has come when I will have to ask you to move your eyes as constantly staring at your screen would harm them!",
"It's been 15 minutes!! How could you still be looking at your screen!!",
"See, Let me shout you this again. MOVE YOUR EYES! STOP STARING AT YOUR SCREEN.",
"This is the time when you should give your eyes some rest."
]
balloon_tip( "Hey! You studious nerd!", messages[randint(0,len(messages)-1)])
|
StarcoderdataPython
|
4837517
|
<filename>untypy/impl/dummy_delayed.py
from typing import Any, Optional
from untypy.error import UntypyTypeError
from untypy.interfaces import TypeChecker, CreationContext, TypeCheckerFactory, ExecutionContext
class DummyDelayedType:
"""
This class is used for raising delayed type checking errors.
"""
pass
class DummyDelayedFactory(TypeCheckerFactory):
def create_from(self, annotation: Any, ctx: CreationContext) -> Optional[TypeChecker]:
if annotation is DummyDelayedType:
return DummyDelayedChecker()
else:
return None
class DummyDelayedChecker(TypeChecker):
def check_and_wrap(self, arg: Any, ctx: ExecutionContext) -> Any:
return DummyDelayedWrapper(ctx)
def describe(self) -> str:
return "DummyDelayedType"
def base_type(self) -> list[Any]:
return []
class DummyDelayedWrapper:
upper: ExecutionContext
def __init__(self, upper: ExecutionContext):
self.upper = upper
def use(self):
raise self.upper.wrap(UntypyTypeError(
"<omitted>",
"DummyDelayedType"
))
|
StarcoderdataPython
|
1699189
|
# sway
from __future__ import print_function, division
import zipfile,re,traceback,random,sys
sys.dont_write_bytecode = True
##################################################
# test engine
class ok:
tries = fails = 0 # tracks the record so far
def score(i):
t,f= ok.tries, ok.fails
return "# TRIES= %s FAIL= %s %%PASS = %s%%" % (
t,f,int(round(t*100/(t+f+0.001))))
def __init__(i,*tests):
map(i.test,tests)
print(i.score())
def test(i,f):
print("### ",f.__name__)
ok.tries += 1
try: f()
except Exception,e:
ok.fails += 1
i.report(f,e)
def report(i,test,e):
_, _, tb = sys.exc_info()
f, line, fun, what = traceback.extract_tb(tb)[-1]
print('{}: line {}, in {} ==> {} {}'.format(
f, line, fun, what,e))
def _ok():
def oa():
assert 1==1
assert 2==1
assert 5==1 # never reached
def ob(): # called, even though a() has errors
assert 10==10
def oc():
assert 3==3
assert 3==1
ok(oa,ob,oc)
class o:
def __init__(i,**d) : i.add(**d)
def __setitem__(i,k,v): i.__dict__[k] = v
def __getitem__(i,k) : print(k); return i.__dict__[k]
def __repr__(i) : return 'o'+str(i.__dict__)
def add(i,**d) : return i.__dict__.update(d)
def items(i) : return i.__dict__.items()
def _o():
def oa():
x=o(name='tim',age=55)
x['name'] = 'tom'
assert x.name == 'tom'
x.name = 'tony'
assert x.name == 'tony'
assert str(x) == "o{'age': 55, 'name': 'tony'}"
ok(oa)
####################################################
# option controls
class settings:
funs = o()
all = o()
def __init__(i,f):
what = f.__name__
def g(**d):
tmp = f()
tmp.add(**d)
settings.all[what] = tmp
return tmp
settings.funs[what] = g
settings.all[what] = g()
@staticmethod
def reset(seed=1):
for k,v in settings.funs.items():
settings.all[k] = v()
random.seed(seed)
def setting(f):
settings(f)
return settings.funs[f.__name__]
the=settings.all
reset=settings.reset
__name__ == '__main__' and ok(_ok,_o)
|
StarcoderdataPython
|
96500
|
from string import ascii_letters, digits
from random import choice
CARACTERES = ascii_letters + digits
class Clave:
def __init__(self: object, longitud: int = 12) -> None:
"""..."""
self.__clave = self.__crear_clave(longitud)
def __crear_clave(self: object, longitud: int) -> str:
"""..."""
return "".join([choice(CARACTERES) for _ in range(longitud)])
def encriptado_rot_n(self: object, n: int) -> None:
"""..."""
nueva_cadena = ""
for caracter in self.__clave:
indice = CARACTERES.index(caracter)
indice_cambiado = (indice + n) % len(CARACTERES)
nueva_cadena += CARACTERES[indice_cambiado]
self.__clave = nueva_cadena
def __str__(self: object) -> str:
return f"Clave -> {self.__clave}"
clave = Clave()
print(clave)
clave.encriptado_rot_n(13)
print(clave)
clave.encriptado_rot_n(-13)
print(clave)
|
StarcoderdataPython
|
1757715
|
"""
BMI203: Biocomputing algorithms Winter 2022
Assignment 6: Logistic regression
"""
from regression import (logreg, utils)
__version__ = '0.1.0'
|
StarcoderdataPython
|
96954
|
from .base import BaseDevice
import random
default_protocol_config = {
"protocol_config": [{
"nbns": {
"frequency": random.randint(30, 60)
},
"nbdgm": {
"frequency": random.randint(30, 60),
"type": "browser",
"cmd": "announcement",
"server_types": [
"workstation",
"server",
"print",
"xenix",
"nt_workstation",
"dfs"
]
},
"nbdgm": {
"frequency": random.randint(30, 60),
"type": "browser",
"cmd": "election"
}
}]
}
"""
"uuid": uuid.uuid1(),
"protocol_config": [{
"mdns": {
"frequency": random.randint(30, 60),
"type": "response",
"an": [{
"type": "PTR",
"rname": '_http._tcp.local',
"rdata": '{manufacturer} {model}._http._tcp.local',
"rclass": 'IN',
"ttl": 4500
}],
"ar": [
{
"type": "A",
"rname": '{hostname}.local',
"rdata": '{ipv4}',
"rclass": 'IN',
"ttl": 120
},
{
"type": "SRV",
"rname": '{manufacturer} {model}._http._tcp.local',
"rclass": 'IN',
"port": 80,
"target": '{hostname}.local',
"ttl": 120
},
{
"type": "TXT",
"rname": '{manufacturer} {model}._http._tcp.local',
"rdata": None,
"rclass": 'IN',
"ttl": 4500
}
],
},
"""
class NAS(BaseDevice):
def __init__(self, user_config):
super(NAS, self).__init__(default_protocol_config, user_config)
|
StarcoderdataPython
|
1673546
|
import functools
import operator
import sys
import warnings
import numbers
from collections import namedtuple
import inspect
import math
import numpy as np
try:
from numpy.random import Generator as Generator
except ImportError:
class Generator(): # type: ignore[no-redef]
pass
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
Examples
--------
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
... return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice, it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
cond = np.asarray(cond)
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
args = np.broadcast_arrays(cond, *arrays)
cond, arrays = args[0], args[1:]
temp = tuple(np.extract(cond, arr) for arr in arrays)
tcode = np.mintypecode([a.dtype.char for a in arrays])
out = np.full(np.shape(arrays[0]), fill_value=fillvalue, dtype=tcode)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
def _lazyselect(condlist, choicelist, arrays, default=0):
"""
Mimic `np.select(condlist, choicelist)`.
Notice, it assumes that all `arrays` are of the same shape or can be
broadcasted together.
All functions in `choicelist` must accept array arguments in the order
given in `arrays` and must return an array of the same shape as broadcasted
`arrays`.
Examples
--------
>>> x = np.arange(6)
>>> np.select([x <3, x > 3], [x**2, x**3], default=0)
array([ 0, 1, 4, 0, 64, 125])
>>> _lazyselect([x < 3, x > 3], [lambda x: x**2, lambda x: x**3], (x,))
array([ 0., 1., 4., 0., 64., 125.])
>>> a = -np.ones_like(x)
>>> _lazyselect([x < 3, x > 3],
... [lambda x, a: x**2, lambda x, a: a * x**3],
... (x, a), default=np.nan)
array([ 0., 1., 4., nan, -64., -125.])
"""
arrays = np.broadcast_arrays(*arrays)
tcode = np.mintypecode([a.dtype.char for a in arrays])
out = np.full(np.shape(arrays[0]), fill_value=default, dtype=tcode)
for index in range(len(condlist)):
func, cond = choicelist[index], condlist[index]
if np.all(cond is False):
continue
cond, _ = np.broadcast_arrays(cond, arrays[0])
temp = tuple(np.extract(cond, arr) for arr in arrays)
np.place(out, cond, func(*temp))
return out
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""Allocate a new ndarray with aligned memory.
Primary use case for this currently is working around a f2py issue
in NumPy 1.9.1, where dtype.alignment is such that np.zeros() does
not necessarily create arrays aligned up to it.
"""
dtype = np.dtype(dtype)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + align + 1, np.uint8)
offset = buf.__array_interface__['data'][0] % align
if offset != 0:
offset = align - offset
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
def _prune_array(array):
"""Return an array equivalent to the input array. If the input
array is a view of a much larger array, copy its contents to a
newly allocated array. Otherwise, return the input unchanged.
"""
if array.base is not None and array.size < array.base.size // 2:
return array.copy()
return array
def prod(iterable):
"""
Product of a sequence of numbers.
Faster than np.prod for short lists like array shapes, and does
not overflow if using Python integers.
"""
product = 1
for x in iterable:
product *= x
return product
def float_factorial(n: int) -> float:
"""Compute the factorial and return as a float
Returns infinity when result is too large for a double
"""
return float(math.factorial(n)) if n < 171 else np.inf
class DeprecatedImport:
"""
Deprecated import with redirection and warning.
Examples
--------
Suppose you previously had in some module::
from foo import spam
If this has to be deprecated, do::
spam = DeprecatedImport("foo.spam", "baz")
to redirect users to use "baz" module instead.
"""
def __init__(self, old_module_name, new_module_name):
self._old_name = old_module_name
self._new_name = new_module_name
__import__(self._new_name)
self._mod = sys.modules[self._new_name]
def __dir__(self):
return dir(self._mod)
def __getattr__(self, name):
warnings.warn("Module %s is deprecated, use %s instead"
% (self._old_name, self._new_name),
DeprecationWarning)
return getattr(self._mod, name)
# copy-pasted from scikit-learn utils/validation.py
# change this to scipy.stats._qmc.check_random_state once numpy 1.16 is dropped
def check_random_state(seed):
"""Turn `seed` into a `np.random.RandomState` instance.
Parameters
----------
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
Random number generator.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
try:
# Generator is only available in numpy >= 1.17
if isinstance(seed, np.random.Generator):
return seed
except AttributeError:
pass
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def _asarray_validated(a, check_finite=True,
sparse_ok=False, objects_ok=False, mask_ok=False,
as_inexact=False):
"""
Helper function for SciPy argument validation.
Many SciPy linear algebra functions do support arbitrary array-like
input arguments. Examples of commonly unsupported inputs include
matrices containing inf/nan, sparse matrix representations, and
matrices with complicated elements.
Parameters
----------
a : array_like
The array-like input.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
sparse_ok : bool, optional
True if scipy sparse matrices are allowed.
objects_ok : bool, optional
True if arrays with dype('O') are allowed.
mask_ok : bool, optional
True if masked arrays are allowed.
as_inexact : bool, optional
True to convert the input array to a np.inexact dtype.
Returns
-------
ret : ndarray
The converted validated array.
"""
if not sparse_ok:
import scipy.sparse
if scipy.sparse.issparse(a):
msg = ('Sparse matrices are not supported by this function. '
'Perhaps one of the scipy.sparse.linalg functions '
'would work instead.')
raise ValueError(msg)
if not mask_ok:
if np.ma.isMaskedArray(a):
raise ValueError('masked arrays are not supported')
toarray = np.asarray_chkfinite if check_finite else np.asarray
a = toarray(a)
if not objects_ok:
if a.dtype is np.dtype('O'):
raise ValueError('object arrays are not supported')
if as_inexact:
if not np.issubdtype(a.dtype, np.inexact):
a = toarray(a, dtype=np.float_)
return a
def _validate_int(k, name, minimum=None):
"""
Validate a scalar integer.
This functon can be used to validate an argument to a function
that expects the value to be an integer. It uses `operator.index`
to validate the value (so, for example, k=2.0 results in a
TypeError).
Parameters
----------
k : int
The value to be validated.
name : str
The name of the parameter.
minimum : int, optional
An optional lower bound.
"""
try:
k = operator.index(k)
except TypeError:
raise TypeError(f'{name} must be an integer.') from None
if minimum is not None and k < minimum:
raise ValueError(f'{name} must be an integer not less '
f'than {minimum}') from None
return k
# Add a replacement for inspect.getfullargspec()/
# The version below is borrowed from Django,
# https://github.com/django/django/pull/4846.
# Note an inconsistency between inspect.getfullargspec(func) and
# inspect.signature(func). If `func` is a bound method, the latter does *not*
# list `self` as a first argument, while the former *does*.
# Hence, cook up a common ground replacement: `getfullargspec_no_self` which
# mimics `inspect.getfullargspec` but does not list `self`.
#
# This way, the caller code does not need to know whether it uses a legacy
# .getfullargspec or a bright and shiny .signature.
FullArgSpec = namedtuple('FullArgSpec',
['args', 'varargs', 'varkw', 'defaults',
'kwonlyargs', 'kwonlydefaults', 'annotations'])
def getfullargspec_no_self(func):
"""inspect.getfullargspec replacement using inspect.signature.
If func is a bound method, do not list the 'self' parameter.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
fullargspec : FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
kwonlydefaults, annotations)
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not*, included in fullargspec.args.
This is done for consistency between inspect.getargspec() under
Python 2.x, and inspect.signature() under Python 3.x.
"""
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY]
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = tuple(
p.default for p in sig.parameters.values()
if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
p.default is not p.empty)
) or None
kwonlyargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY
]
kwdefaults = {p.name: p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY and
p.default is not p.empty}
annotations = {p.name: p.annotation for p in sig.parameters.values()
if p.annotation is not p.empty}
return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
kwdefaults or None, annotations)
class MapWrapper:
"""
Parallelisation wrapper for working with map-like callables, such as
`multiprocessing.Pool.map`.
Parameters
----------
pool : int or map-like callable
If `pool` is an integer, then it specifies the number of threads to
use for parallelization. If ``int(pool) == 1``, then no parallel
processing is used and the map builtin is used.
If ``pool == -1``, then the pool will utilize all available CPUs.
If `pool` is a map-like callable that follows the same
calling sequence as the built-in map function, then this callable is
used for parallelization.
"""
def __init__(self, pool=1):
self.pool = None
self._mapfunc = map
self._own_pool = False
if callable(pool):
self.pool = pool
self._mapfunc = self.pool
else:
from multiprocessing import Pool
# user supplies a number
if int(pool) == -1:
# use as many processors as possible
self.pool = Pool()
self._mapfunc = self.pool.map
self._own_pool = True
elif int(pool) == 1:
pass
elif int(pool) > 1:
# use the number of processors requested
self.pool = Pool(processes=int(pool))
self._mapfunc = self.pool.map
self._own_pool = True
else:
raise RuntimeError("Number of workers specified must be -1,"
" an int >= 1, or an object with a 'map' "
"method")
def __enter__(self):
return self
def terminate(self):
if self._own_pool:
self.pool.terminate()
def join(self):
if self._own_pool:
self.pool.join()
def close(self):
if self._own_pool:
self.pool.close()
def __exit__(self, exc_type, exc_value, traceback):
if self._own_pool:
self.pool.close()
self.pool.terminate()
def __call__(self, func, iterable):
# only accept one iterable because that's all Pool.map accepts
try:
return self._mapfunc(func, iterable)
except TypeError as e:
# wrong number of arguments
raise TypeError("The map-like callable must be of the"
" form f(func, iterable)") from e
def rng_integers(gen, low, high=None, size=None, dtype='int64',
endpoint=False):
"""
Return random integers from low (inclusive) to high (exclusive), or if
endpoint=True, low (inclusive) to high (inclusive). Replaces
`RandomState.randint` (with endpoint=False) and
`RandomState.random_integers` (with endpoint=True).
Return random integers from the "discrete uniform" distribution of the
specified dtype. If high is None (the default), then results are from
0 to low.
Parameters
----------
gen : {None, np.random.RandomState, np.random.Generator}
Random number generator. If None, then the np.random.RandomState
singleton is used.
low : int or array-like of ints
Lowest (signed) integers to be drawn from the distribution (unless
high=None, in which case this parameter is 0 and this value is used
for high).
high : int or array-like of ints
If provided, one above the largest (signed) integer to be drawn from
the distribution (see above for behavior if high=None). If array-like,
must contain integer values.
size : None
Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
samples are drawn. Default is None, in which case a single value is
returned.
dtype : {str, dtype}, optional
Desired dtype of the result. All dtypes are determined by their name,
i.e., 'int64', 'int', etc, so byteorder is not available and a specific
precision may have different C types depending on the platform.
The default value is np.int_.
endpoint : bool, optional
If True, sample from the interval [low, high] instead of the default
[low, high) Defaults to False.
Returns
-------
out: int or ndarray of ints
size-shaped array of random integers from the appropriate distribution,
or a single such random int if size not provided.
"""
if isinstance(gen, Generator):
return gen.integers(low, high=high, size=size, dtype=dtype,
endpoint=endpoint)
else:
if gen is None:
# default is RandomState singleton used by np.random.
gen = np.random.mtrand._rand
if endpoint:
# inclusive of endpoint
# remember that low and high can be arrays, so don't modify in
# place
if high is None:
return gen.randint(low + 1, size=size, dtype=dtype)
if high is not None:
return gen.randint(low, high=high + 1, size=size, dtype=dtype)
# exclusive
return gen.randint(low, high=high, size=size, dtype=dtype)
|
StarcoderdataPython
|
10979
|
from keras.optimizers import RMSprop
from keras.layers import Input, Embedding, Dense, LSTM, Bidirectional, GRU
from keras.layers import concatenate, Reshape, SpatialDropout1D
from keras.models import Model
from keras import backend as K
from .AttentionWeightedAverage import AttentionWeightedAverage
def textgenrnn_model(num_classes, cfg, context_size=None,
weights_path=None,
dropout=0.0,
optimizer=RMSprop(lr=4e-3, rho=0.99)):
'''
Builds the model architecture for textgenrnn and
loads the specified weights for the model.
'''
input = Input(shape=(cfg['max_length'],), name='input')
embedded = Embedding(num_classes, cfg['dim_embeddings'],
input_length=cfg['max_length'],
name='embedding')(input)
if dropout > 0.0:
embedded = SpatialDropout1D(dropout, name='dropout')(embedded)
rnn_layer_list = []
for i in range(cfg['rnn_layers']):
prev_layer = embedded if i == 0 else rnn_layer_list[-1]
if cfg.get('rnn_type') == 'gru':
rnn_layer_list.append(new_rnn_gru(cfg, i + 1)(prev_layer))
else:
rnn_layer_list.append(new_rnn(cfg, i + 1)(prev_layer))
seq_concat = concatenate([embedded] + rnn_layer_list, name='rnn_concat')
attention = AttentionWeightedAverage(name='attention')(seq_concat)
output = Dense(num_classes, name='output', activation='softmax')(attention)
if context_size is None:
model = Model(inputs=[input], outputs=[output])
if weights_path is not None:
model.load_weights(weights_path, by_name=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
else:
context_input = Input(
shape=(context_size,), name='context_input')
context_reshape = Reshape((context_size,),
name='context_reshape')(context_input)
merged = concatenate([attention, context_reshape], name='concat')
main_output = Dense(num_classes, name='context_output',
activation='softmax')(merged)
model = Model(inputs=[input, context_input],
outputs=[main_output, output])
if weights_path is not None:
model.load_weights(weights_path, by_name=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
loss_weights=[0.8, 0.2])
return model
'''
Create a new LSTM layer per parameters. Unfortunately,
each combination of parameters must be hardcoded.
The normal LSTMs use sigmoid recurrent activations
for parity with CuDNNLSTM:
https://github.com/keras-team/keras/issues/8860
'''
def new_rnn(cfg, layer_num):
use_cudnnlstm = K.backend() == 'tensorflow' and len(K.tensorflow_backend._get_available_gpus()) > 0
if use_cudnnlstm:
from keras.layers import CuDNNLSTM
if cfg['rnn_bidirectional']:
return Bidirectional(CuDNNLSTM(cfg['rnn_size'],
return_sequences=True),
name='rnn_{}'.format(layer_num))
return CuDNNLSTM(cfg['rnn_size'],
return_sequences=True,
name='rnn_{}'.format(layer_num))
else:
if cfg['rnn_bidirectional']:
return Bidirectional(LSTM(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid'),
name='rnn_{}'.format(layer_num))
return LSTM(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
name='rnn_{}'.format(layer_num))
def new_rnn_gru(cfg, layer_num):
use_cudnngru = K.backend() == 'tensorflow' and len(K.tensorflow_backend._get_available_gpus()) > 0
if use_cudnngru:
from keras.layers import CuDNNGRU
if cfg['rnn_bidirectional']:
return Bidirectional(CuDNNGRU(cfg['rnn_size'],
return_sequences=True),
name='rnn_{}'.format(layer_num))
return CuDNNGRU(cfg['rnn_size'],
return_sequences=True,
name='rnn_{}'.format(layer_num))
else:
if cfg['rnn_bidirectional']:
return Bidirectional(GRU(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
reset_after=True),
name='rnn_{}'.format(layer_num))
return GRU(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
reset_after=True,
name='rnn_{}'.format(layer_num))
|
StarcoderdataPython
|
129335
|
#!/usr/bin/env python3
# load needed modules
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout, GRU , BatchNormalization
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
# load feature data
X = np.load('X_egemaps.npy')
y = np.load('y_egemaps.npy')
# z-score normalization
ori_aud_features = X
norm_aud_features = []
for aud_original in ori_aud_features:
aud_original_np = np.asarray(aud_original)
z_norm_aud = (aud_original_np - aud_original_np.mean()) / aud_original_np.std()
norm_aud_features.append(np.around(z_norm_aud, 6))
X = np.array(norm_aud_features)
train_x, test_x, train_y, test_y = train_test_split(X, y, test_size=0.33, random_state=42)
# DNN layer units
n_dim = np.array(train_x).shape[2]
n_classes = np.array(train_y).shape[1]
## normalize data
#mean = train_x.reshape(504172, 23).mean(axis=0)
#train_x -= mean
#std = train_x.reshape(504172, 23).std(axis=0)
#train_x /= std
#test_x -= mean
#test_x /= std
## function to define model
#def create_model():
# model = Sequential()
# # layer 1
# model.add(BatchNormalization(axis=-1, input_shape=(523, 23)))
# model.add(GRU(n_dim, activation='relu', #input_shape=(523, 23),
# dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
# model.add(GRU(100, activation='relu', return_sequences=True,
# dropout=0.2, recurrent_dropout=0.2))
# model.add(GRU(100, activation='relu', dropout=0.2, recurrent_dropout=0.2))
# #model.add(Dense(128, activation='relu'))
# model.add(Dense(n_classes, activation='softmax'))
# # model compilation
# model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# return model
def dense_model(activation_function='relu', init_type='normal', optimiser='adam', dropout_rate=0.2):
model = Sequential()
# layer 1
model.add(BatchNormalization(axis=-1, input_shape=(523, 23)))
model.add(Flatten())
model.add(Dense(100, kernel_initializer=init_type, activation=activation_function)) #, input_shape=(523, 23)))
# layer 2
model.add(Dense(200, kernel_initializer=init_type, activation=activation_function))
model.add(Dropout(dropout_rate))
# layer 3
model.add(Dense(100, kernel_initializer=init_type, activation=activation_function))
model.add(Dropout(dropout_rate))
#layer4
#model.add(Dense(50, kernel_initializer=init_type, activation=activation_function))
#model.add(Dropout(dropout_rate))
#model.add(Flatten())
# output layer
model.add(Dense(n_classes, kernel_initializer=init_type, activation='softmax'))
# model compilation
model.compile(loss='categorical_crossentropy', optimizer=optimiser, metrics=['accuracy'])
return model
# create the model
model = dense_model()
print(model.summary())
# train the model
hist = model.fit(train_x, train_y, epochs=300, validation_data=[test_x, test_y], batch_size=32)
# evaluate model, test data may differ from validation data
evaluate = model.evaluate(test_x, test_y, batch_size=32)
print(evaluate)
|
StarcoderdataPython
|
128321
|
<reponame>WojciechMula/parsing-int-series<gh_stars>10-100
from generator import Generator
from table import Table
if __name__ == '__main__':
gen = Generator()
freq = {}
for bi in gen.run():
k = bi.total_skip
freq[k] = freq.get(k, 0) + 1
table = Table()
table.add_header(["bytes processed", "patterns", "%", "cumulative %"])
cumulative = 0
for total_skip in sorted(freq.keys()):
count = freq[total_skip]
cumulative += count
table.add_row([
'%d' % total_skip,
'%d' % count,
'%0.2f%%' % (100 * count/65536.0),
'%0.2f%%' % (100 * cumulative/65536.0),
])
print table
|
StarcoderdataPython
|
1752716
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import textwrap
from dataclasses import dataclass
from typing import Generic, Optional, Sequence, Type, get_type_hints
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem, LineOriented
from pants.engine.rules import goal_rule
from pants.engine.target import (
AsyncField,
BoolField,
DictStringToStringField,
DictStringToStringSequenceField,
Field,
FloatField,
IntField,
PrimitiveField,
RegisteredTargetTypes,
ScalarField,
SequenceField,
StringField,
StringOrStringSequenceField,
StringSequenceField,
Target,
)
from pants.engine.unions import UnionMembership
from pants.option.global_options import GlobalOptions
from pants.util.objects import get_docstring, get_docstring_summary, pretty_print_type_hint
class TargetTypesOptions(LineOriented, GoalSubsystem):
"""List all the registered target types, including custom plugin types."""
name = "target-types"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--details",
type=str,
metavar="target_type",
help="List all of the target type's registered fields.",
)
class TargetTypes(Goal):
subsystem_cls = TargetTypesOptions
@dataclass(frozen=True)
class AbbreviatedTargetInfo:
alias: str
description: Optional[str]
v1_only: bool
@classmethod
def create(cls, target_type: Type[Target]) -> "AbbreviatedTargetInfo":
return cls(
alias=target_type.alias,
description=get_docstring_summary(target_type),
v1_only=target_type.v1_only,
)
def format_for_cli(self, console: Console, *, longest_target_alias: int) -> str:
chars_before_description = longest_target_alias + 2
alias = console.cyan(f"{self.alias}".ljust(chars_before_description))
if not self.description:
description = "<no description>"
else:
description_lines = textwrap.wrap(self.description, 80 - chars_before_description)
if len(description_lines) > 1:
description_lines = [
description_lines[0],
*(f"{' ' * chars_before_description}{line}" for line in description_lines[1:]),
]
description = "\n".join(description_lines)
return f"{alias}{description}\n"
@dataclass(frozen=True)
class FieldInfo:
alias: str
description: Optional[str]
type_hint: str
required: bool
default: Optional[str]
v1_only: bool
@classmethod
def create(cls, field: Type[Field]) -> "FieldInfo":
# NB: It is very common (and encouraged) to subclass Fields to give custom behavior, e.g.
# `PythonSources` subclassing `Sources`. Here, we set `fallback_to_ancestors=True` so that
# we can still generate meaningful documentation for all these custom fields without
# requiring the Field author to rewrite the docstring.
#
# However, if the original `Field` author did not define docstring, then this means we
# would typically fall back to the docstring for `AsyncField`, `PrimitiveField`, or a
# helper class like `StringField`. This is a quirk of this heuristic and it's not
# intentional since these core `Field` types have documentation oriented to the custom
# `Field` author and not the end user filling in fields in a BUILD file target.
description = (
get_docstring(
field,
flatten=True,
fallback_to_ancestors=True,
ignored_ancestors={
*Field.mro(),
AsyncField,
PrimitiveField,
BoolField,
DictStringToStringField,
DictStringToStringSequenceField,
FloatField,
Generic, # type: ignore[arg-type]
IntField,
ScalarField,
SequenceField,
StringField,
StringOrStringSequenceField,
StringSequenceField,
},
)
or ""
)
if issubclass(field, PrimitiveField):
raw_value_type = get_type_hints(field.compute_value)["raw_value"]
elif issubclass(field, AsyncField):
raw_value_type = get_type_hints(field.sanitize_raw_value)["raw_value"]
else:
raw_value_type = get_type_hints(field.__init__)["raw_value"]
type_hint = pretty_print_type_hint(raw_value_type)
# Check if the field only allows for certain choices.
if issubclass(field, StringField) and field.valid_choices is not None:
valid_choices = sorted(
field.valid_choices
if isinstance(field.valid_choices, tuple)
else (choice.value for choice in field.valid_choices)
)
type_hint = " | ".join([*(repr(c) for c in valid_choices), "None"])
if field.required:
# We hackily remove `None` as a valid option for the field when it's required. This
# greatly simplifies Field definitions because it means that they don't need to
# override the type hints for `PrimitiveField.compute_value()` and
# `AsyncField.sanitize_raw_value()` to indicate that `None` is an invalid type.
type_hint = type_hint.replace(" | None", "")
return cls(
alias=field.alias,
description=description,
type_hint=type_hint,
required=field.required,
default=repr(field.default) if not field.required else None,
v1_only=field.v1_only,
)
def format_for_cli(self, console: Console) -> str:
field_alias = console.magenta(f"{self.alias}")
indent = " "
required_or_default = "required" if self.required else f"default: {self.default}"
type_info = console.cyan(f"{indent}type: {self.type_hint}, {required_or_default}")
lines = [field_alias, type_info]
if self.description:
lines.extend(f"{indent}{line}" for line in textwrap.wrap(self.description, 80))
return "\n".join(f"{indent}{line}" for line in lines)
@dataclass(frozen=True)
class VerboseTargetInfo:
alias: str
description: Optional[str]
fields: Sequence[FieldInfo]
@classmethod
def create(
cls, target_type: Type[Target], *, union_membership: UnionMembership
) -> "VerboseTargetInfo":
return cls(
alias=target_type.alias,
description=get_docstring(target_type),
fields=[
FieldInfo.create(field)
for field in target_type.class_field_types(union_membership=union_membership)
],
)
def format_for_cli(self, console: Console, *, v1_disabled: bool) -> str:
output = [console.green(f"{self.alias}\n{'-' * len(self.alias)}\n")]
if self.description:
output.append(f"{self.description}\n")
output.extend(
[
"Valid fields:\n",
*sorted(
f"{field.format_for_cli(console)}\n"
for field in self.fields
if not field.alias.startswith("_") and (not v1_disabled or not field.v1_only)
),
]
)
return "\n".join(output).rstrip()
@goal_rule
def list_target_types(
registered_target_types: RegisteredTargetTypes,
union_membership: UnionMembership,
target_types_options: TargetTypesOptions,
global_options: GlobalOptions,
console: Console,
) -> TargetTypes:
v1_disabled = not global_options.options.v1
with target_types_options.line_oriented(console) as print_stdout:
if target_types_options.values.details:
alias = target_types_options.values.details
target_type = registered_target_types.aliases_to_types.get(alias)
if target_type is None:
raise ValueError(
f"Unrecognized target type {repr(alias)}. All registered "
f"target types: {list(registered_target_types.aliases)}"
)
verbose_target_info = VerboseTargetInfo.create(
target_type, union_membership=union_membership
)
print_stdout("")
print_stdout(verbose_target_info.format_for_cli(console, v1_disabled=v1_disabled))
else:
title_text = "Target types"
title = console.green(f"{title_text}\n{'-' * len(title_text)}")
target_infos = [
AbbreviatedTargetInfo.create(target_type)
for target_type in registered_target_types.types
]
longest_target_alias = max(
len(target_type.alias) for target_type in registered_target_types.types
)
lines = [
f"\n{title}\n",
textwrap.fill(
"Use `./pants target-types --details=$target_type` to get detailed "
"information for a particular target type.",
80,
),
"\n",
*(
target_info.format_for_cli(console, longest_target_alias=longest_target_alias)
for target_info in target_infos
if not target_info.alias.startswith("_")
and (not v1_disabled or not target_info.v1_only)
),
]
print_stdout("\n".join(lines).rstrip())
return TargetTypes(exit_code=0)
def rules():
return [list_target_types]
|
StarcoderdataPython
|
192020
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines and generates Gin configs and sweeps programmatically."""
import functools
def gin_load(hparam_fn, clear_config = False):
"""Load specified set of hyperparameter functions into gin.
If multiple functions are specified (space separated), they will be combined.
Args:
hparam_fn: a string containing optionally multiple space separated names of
configs in the configs directory which return dictionaries mapping gin
configurable names to values.
clear_config: whether to clear the gin config before loading.
"""
if not hparam_fn:
return
# pylint: disable=g-import-not-at-top
import gin
if clear_config:
gin.clear_config()
print('=== %s ===' % hparam_fn)
if ' ' in hparam_fn:
for fn in hparam_fn.split():
gin_load(fn)
return
hparam_fn = globals().get(hparam_fn, None)
if hparam_fn is not None:
hparams = hparam_fn()
else:
raise ValueError(f'Unable to find sweep "{hparam_fn}".')
items = list(hparams.items())
def _fmt(v):
if isinstance(v, str):
if v.startswith('@'):
return v
else:
return '"%s"' % v
return v
bindings = ['%s = %s' % (k, _fmt(v)) for (k, v) in items]
print('\n'.join(bindings))
gin.parse_config(bindings)
def _transformer(
emb_dim=512,
num_heads=8,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
dropout_rate=None,
attention_dropout_rate=None,
nonlinearity='gelu',
):
"""Transformer config."""
configs = {
'models.build_transformer_config.emb_dim': emb_dim,
'models.build_transformer_config.num_heads': num_heads,
'models.build_transformer_config.num_decoder_layers': num_layers,
'models.build_transformer_config.num_encoder_layers': num_layers,
'models.build_transformer_config.qkv_dim': qkv_dim,
'models.build_transformer_config.mlp_dim': mlp_dim,
'models.build_transformer_config.mlp_activations': (nonlinearity,),
}
if dropout_rate is not None:
configs['models.build_transformer_config.dropout_rate'] = dropout_rate
if attention_dropout_rate is not None:
configs[
'models.build_transformer_config.attention_dropout_rate'] = attention_dropout_rate
return configs
def _model_gpt(size=0, dropout_rate=0.0, attention_dropout_rate=0.0):
"""Configs for a variety of Transformer model sizes."""
num_layers = [1, 3, 6, 12, 24, 36, 48][size]
dim = [64, 128, 512, 768, 1024, 1280, 1600][size]
num_heads = int(dim / 64) # Always dim 64 per head
return _transformer(
emb_dim=dim,
num_heads=num_heads,
num_layers=num_layers,
qkv_dim=dim,
mlp_dim=dim * 4,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate)
# Published GPT sizes.
gpt_extra_tiny = functools.partial(_model_gpt, size=0)
gpt_tiny = functools.partial(_model_gpt, size=1)
gpt_small = functools.partial(_model_gpt, size=2)
gpt_base = functools.partial(_model_gpt, size=3)
gpt_large = functools.partial(_model_gpt, size=4)
def lm1b():
"""Sets up diffusion to run with LM1B."""
return {
'run_experiment.dataset_name': 'lm1b',
'datasets.load.max_length': 128,
'datasets.load.pack': True,
'discrete_diffusion_loss_fn.mask_padding': False,
'discrete_diffusion_loss_fn.normalize_without_padding': False,
'discrete_diffusion_predict_fn.mask_padding': False,
}
def text8():
"""Sets up diffusion to run with LM1B."""
params = {
'run_experiment.dataset_name': 'text8',
'datasets.load.max_length': 256,
'datasets.load.sample_crop_train': True,
'discrete_diffusion_loss_fn.mask_padding': False,
'discrete_diffusion_loss_fn.normalize_without_padding': False,
'discrete_diffusion_predict_fn.mask_padding': False,
}
return params
def diffusion_length(length=40):
return {
'CategoricalDiffusionModel.num_steps': length,
'create_discrete_diffusion.num_steps': length,
}
def uniform_diffusion():
params = {
'discrete_diffusion_loss_fn.hybrid_lambda': 0.0,
'create_discrete_diffusion.kind': 'band-diagonal',
}
return params
def mask_diffusion():
params = {
'discrete_diffusion_loss_fn.hybrid_lambda': 0.01,
'create_discrete_diffusion.kind': 'mask',
'datasets.load.num_extra_tokens': 1,
}
return params
def diffusion():
"""Trains a BERT model with gradient clipping."""
params = {
'run_experiment.task_name':
'diffusion',
'run_experiment.model_cls':
'@CategoricalDiffusionModel',
'run_experiment.max_train_steps':
10000,
'CategoricalDiffusionModel.use_timestep_embeddings':
True,
'CategoricalDiffusionModel.use_film_layers':
False,
'run_experiment.batch_size_per_device':
8,
'discrete_diffusion_loss_fn.predict_x0':
True,
'discrete_diffusion_predict_fn.predict_x0':
True,
'discrete_diffusion_loss_fn.compute_elbo':
True,
'run_experiment.num_predict_steps':
1,
'run_experiment.num_eval_steps':
10,
'run_experiment.validate_every':
25000,
'create_discrete_diffusion.update_every':
200,
'trainers.Trainer.learning_rate_fn':
'@learning_rate/utils.create_learning_rate_scheduler',
'learning_rate/utils.create_learning_rate_scheduler.factors':
'linear_warmup_from * constant',
'learning_rate/utils.create_learning_rate_scheduler.base_learning_rate':
2e-4,
'learning_rate/utils.create_learning_rate_scheduler.warmup_steps':
5000,
'trainers.Trainer.grad_clip':
0.25
}
return params
def lm1b_tiny():
config = diffusion()
config.update(gpt_extra_tiny())
config.update(lm1b())
config.update(mask_diffusion())
config.update(diffusion_length(32))
return config
def lm1b_base():
config = diffusion()
config.update(gpt_base())
config.update(lm1b())
config.update(mask_diffusion())
config.update(diffusion_length(1000))
return config
def text8_tiny():
config = diffusion()
config.update(gpt_extra_tiny())
config.update(text8())
config.update(mask_diffusion())
config.update(diffusion_length(32))
return config
def text8_base():
config = diffusion()
config.update(gpt_base())
config.update(text8())
config.update(mask_diffusion())
config.update(diffusion_length(1000))
return config
|
StarcoderdataPython
|
113613
|
'''
Created on Dec 11, 2015
@author: <NAME>
'''
if __name__ == '__main__': pass
import socket
import netaddr
import nmap
import sys
wdtvlive = '192.168.1.64' ## port 80, 139, 443 open
uraniaAddy = '192.168.1.73'
network = "192.168.1.250/24"
deliaAddy = '192.168.1.65'
userPC = '192.168.1.68'
########################### findHosts()#####################
def findHosts(innerNetwork):
# from http://subneter.com/network-scan-python-nmap/
# find all hosts in network-return host list in str format
# @variable network: The network
# @return hostList: All hosts in the network
ipNet = netaddr.IPNetwork(innerNetwork)
hosts = list(ipNet)
#Removing the net and broad address if prefix is under 31 bits
if len(hosts) > 2:
hosts.remove(ipNet.broadcast)
#hosts.remove(ipNet.innerNetwork)
hostList = [str(host) for host in hosts]
print "Checking these available host ip's: " + str(hostList)
print ""
myrange= range(65, 155)
lenList = len(hostList)
## for i in range( lenList ):
for i in hostList[ hostList.index('192.168.1.55'):] :
print "val of i: " + i
checkUp(i)
############portCheck()####################################
def portCheck(remoteServerIP) :
hostName = socket.gethostbyaddr(remoteServerIP)
print "domain name is: " + str(socket.getfqdn(remoteServerIP))
print "hostname is %s" % str(hostName[0])
for tport in range(1,1025):
print "working on port: " + str(tport)
msock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
msock.settimeout(2)
result = msock.connect_ex((remoteServerIP, tport))
sys.stdout.write(str(tport)+" ")
print "result of sock connect on port {0} - {1}".format(str(tport),str(result))
if result == 0 :
print ''
print "Port {}: \t Open".format(tport)
msock.close()
########################### socketStylen()#####################
def socketStylen():
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('localhost', 8089))
serversocket.listen(5) # become a server socket, maximum 5 connections
print "Starting pSockets"
print "serversocket looks like this" + str(serversocket)
while True:
connection, address = serversocket.accept()
buf = connection.recv(64)
print "Waiting for connection"
if len(buf) > 0:
print "Connection received. Something connected on serversocket"
print "value of connection: " + str(connection)
print "value of address: " + str(address)
print buf
break
########################### checkUp()#####################
def checkUp(hostAddy):
myout = 0
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
mysock.settimeout(4)
mysock.connect((hostAddy, 135)) ## was 22
hostName = socket.gethostbyaddr(hostAddy)
print "Port 135 reachable on: " + str(hostAddy) + " " + str(hostName)
except socket.error as err:
aaa = str(err)
print "aaa : " + aaa
if "timed out" in aaa :
print "timed out!!!"
myout = 1
if myout == 0 :
print "connect string: %s" % err
mysock.close()
########################### end checkUp #####################
#################checkPort()###############################
def checkPort(hostAddy):
myout = 0
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
mysock.settimeout(1)
mysock.connect((hostAddy, 135)) ##22
print "Port 22 reachable on %s" % hostAddy
except socket.error as err:
aaa = str(err)
#print "aaa : " + aaa
if "timed out" in aaa :
print "timed out!!!"
myout = 1
if myout == 0 :
print "connect string: %s" % err
mysock.close()
######################## tryNM()##########################
#import nmap
def tryNM(myIP) :
#myIP = '192.168.1.68'
hostName = socket.gethostbyaddr(myIP)
print "------------------ Starting -------------"
print "hostname is %s" % str(hostName[0])
nm = nmap.PortScanner()
print "going to print live ports on ip: " + myIP
mylist = nm.scan(myIP, '22-443')
for i in mylist :
print mylist[i]
print "going to print scaninfo: "
print str(nm.scaninfo())
print "going to print all_hosts: "
print str(nm.all_hosts())
print "going to print state: "
print str(nm[myIP].state())
print "going to print all_protocols: "
print str(nm[myIP].all_protocols() )
print "going to print tcp-keys: "
print str(nm[myIP]['tcp'].keys() )
print "going to print has_tcp22: "
print str(nm[myIP].has_tcp(22) )
print "going to print has_tcp23: "
print str(nm[myIP].has_tcp(23) )
for host in nm.all_hosts():
print('Host : %s (%s)' % (host, nm[host].hostname()))
print('State : %s' % nm[host].state())
for proto in nm[host].all_protocols():
print('Protocol : %s' % proto)
lport = nm[host][proto].keys()
lport.sort()
for pport in lport:
#''
print ('port : %s state : %s' % (pport, nm[host][proto][pport]['state']) )
print "all done with network map"
############# execute section ######################################
## uncomment functions in this section to run them
print "Going to show info about hosts alive in local network."
# hosts = findHosts(network)
checkUp(userPC)
portCheck(userPC)
tryNM(userPC)
tryNM(deliaAddy)
|
StarcoderdataPython
|
1754986
|
<filename>gtsfm/utils/graph.py
"""Utilities for performing graph operations.
Authors: <NAME>
"""
from typing import List, Tuple
import networkx as nx
def get_nodes_in_largest_connected_component(edges: List[Tuple[int, int]]) -> List[int]:
"""Finds the nodes in the largest connected component of the bidirectional graph defined by the input edges.
Args:
edges: edges of the bi-directional graph.
Returns:
Nodes in the largest connected component of the input graph.
"""
if len(edges) == 0:
return []
input_graph = nx.Graph()
input_graph.add_edges_from(edges)
# get the largest connected component
largest_cc = max(nx.connected_components(input_graph), key=len)
subgraph = input_graph.subgraph(largest_cc).copy()
return list(subgraph.nodes())
|
StarcoderdataPython
|
1712236
|
import datetime as dt
import dateutil.parser
import itertools
import types
import uuid
from .utils import tzadd, tznow
import logging
logger = logging.getLogger(__name__)
from collections import Iterator
class Field:
"""
Base class for all field types. it tries to hold all the functionality
so derived classes only need to override methods in special circumstances.
Field objects are instantiated during model creation. ``i = IntField()``
All Model instances share the same instantiated Field objects in their
Meta class. ie: ``id(MyModel().Meta.fields['i']) == id(MyModel().Meta.fields['i'])``
Fields are python descriptors (@property is also a descriptor). So when a field
is get/set the actual value is stored in the parent model instance.
The actual Field() object is accessable via model().Meta.fields[field_name] or
via dynamic lookup of <field_name>__field. eg. m.email__field.
"""
_counter = itertools.count() # keeps track of declaration order in the Models
def __init__(self, field_type, **kw):
"""
:param field_type: the type this field should hold
:type field_type: str/int/float/etc
:param kw:
* primary_key: is this field a primary key of parent model
* indexed: is this field indexed (not implemented yet)
"""
self._order = next(Field._counter) # DO NOT TOUCH, deleted in MetaModel
assert field_type is not None
self._field_type = field_type
self._properties = ['primary_key', 'indexed',
'auto_increment', 'auto_now', 'auto_now_add', 'allowed_choices']
# create a getter property based on _properties list
# self.property_name returns self._property_name
for name in self._properties:
val = kw.pop(name, None)
name = '_' + name
setattr(self, name, val)
fget = lambda self, name=name: getattr(self, name)
setattr( self.__class__, name[1:], property(fget=fget) )
assert len(kw) == 0, "unhandeled kwargs: {}".format(str(kw))
def __get__(self, model, owner):
"""
Field is a descriptor `python.org <https://docs.python.org/2/howto/descriptor.html>`_.
return the value stored in model's __dict__ (stored via __set__ below)
"""
if model is None:
return self
return model.__dict__[self._name]
def __set__(self, model, value):
"""
Cast the value via individual Field rules and then store the value
in model instance.
This allows the same Field instance to "save" multiple values because
the actual value is in a different model instance.
"""
# WARNING: if it existed, Model.__setattr__ would intercept this method
value = self.cast(value)
model.set_field(self, value)
def __repr__(self):
# name is set via MetaModel during Model creation
name = getattr(self, 'name', '')
return "<{}: {}>".format(self.__class__.__name__, name)
@property
def field_type(self):
"""
**property**: return ``type`` of this field (int, str, etc)
"""
return self._field_type
@property
def properties(self):
"""
**property**: return list of possible Field properties
"""
return self._properties
@property
def default_value(self):
"""
**property**: what value does this Field default to during
model instantiation
"""
return None
def cast(self, value):
"""
Whenever a field value is set, the given value passes through
this (or derived class) function. This allows validation plus
helpful conversion.
::
int_field = "1" # converts to int("1")
date_field = "Jan 1 2017" # coverted to datetime()
"""
if value is None:
return None
# simple cast, eg. int(value)
return self._field_type(value)
def dumps(self, value):
"""
called during json serialization, if json module is unable to
deal with given Field.field_type, convert to a known type here.
"""
return value
def loads(self, value):
"""
called during json serialization, if json module is unable to
deal with given Field.field_type, convert to a known type here.
"""
return value
class IntField(Field):
def __init__(self, **kw):
super().__init__(int, **kw)
@property
def default_value(self):
"""
IntField implements auto_increment, useful for a primary_key. The
value is incremented during model instantiation.
"""
if self.auto_increment:
val = getattr(self.meta, '_auto_inc__' + self.name, 0) + 1
setattr( self.meta, '_auto_inc__' + self.name, val )
return val
return None
class BoolField(Field):
def __init__(self, **kw):
super().__init__(bool, **kw)
def cast(self, value):
if value is None:
return None
if isinstance(value, str):
# if value is an empty string then consider that as not yet
# set vs False. This may be a mistake.
if not value:
return None
if value.lower() in ['false', '0', 'no', 'n']:
return False
return bool(value)
class FloatField(Field):
def __init__(self, **kw):
super().__init__(float, **kw)
class StringField(Field):
"""
holds a unicode string
"""
def __init__(self, **kw):
super().__init__(str, **kw)
def cast(self, value):
if value is None:
return None
if type(value) is not self.field_type:
return self.field_type(value)
return value
class DateTimeField(Field):
def __init__(self, **kw):
super().__init__(dt.datetime, **kw)
def cast(self, value):
"""
make sure date always has a time zone
"""
if value is None:
return None
if isinstance(value, str):
if value == 'now':
value = tznow()
else:
return self.loads(value)
if type(value) is not self.field_type:
value = self.field_type(value)
return tzadd( value )
def dumps(cls, value):
if value is None:
return 'null'
return value.isoformat()
def loads(cls, value):
if value is None or value == 'null':
return None
# assume date is in isoformat, this preserves timezone info
if isinstance(value, str):
value = dateutil.parser.parse(value)
if value.tzinfo is None:
value = tzadd( value )
return value
class SetField(Field):
def __init__(self, **kw):
super().__init__(set, **kw)
class UUIDField(Field):
"""
stored as a string
"""
def __init__(self, uuid_ver=uuid.uuid4, **kw):
self.uuid_ver = uuid_ver
super().__init__(str, **kw)
def __set__(self, model, value):
"""
Cast the value via individual Field rules and then store the value
in model instance.
This allows the same Field instance to "save" multiple values because
the actual value is in a different model instance.
"""
raise RuntimeError("UUIDFields are not settable after creation")
@property
def default_value(self):
"""
returns an instance of UUID, type based on uuid_ver function
"""
return str(self.uuid_ver())
class ForeignKey(Field):
"""
A ForeignKey is a special type of field. It stores the same value
as a primary key in another field. When the model gets/sets a
ForeignKey the appropriate lookup is done in the remote manager
to return the remote instance.
"""
def __init__(self, foreign_model, **kw):
"""
:param foreign_model: the Model that this field is referencing
:type foreign_model: :class:`alkali.model.Model`
:param kw:
* primary_key: is this field a primary key of parent model
"""
from .model import Model
# TODO treat foreign_model as model name and lookup in database
# if isinstance(foreign_model, str):
# foreign_model = <db>.get_model(foreign_model)
self.foreign_model = foreign_model
# a Model is an instance of MetaModel so type(foreign_model) == MetaModel
# an instance of a Model is of course a Model. type(Model()) == Model
assert issubclass(self.foreign_model, Model), "foreign_model isn't a Model"
# test for appropriate primary key
assert len(self.foreign_model.Meta.pk_fields) == 1, \
"compound foreign key is not currently allowed"
super(ForeignKey, self).__init__(self.foreign_model, **kw)
def __get__(self, model, owner):
":rtype: Model instance"
if model is None:
return self
fk_value = model.__dict__[self._name]
return self.lookup(fk_value)
# don't require a __set__ because Model.set_field() calls our cast() method
@property
def pk_field(self):
":rtype: :func:`IField.field_type`, eg: IntField"
pks = self.foreign_model.Meta.pk_fields.values()
return pks[0]
def lookup(self, pk):
"""
given a pk, return foreign_model instance
"""
if pk is None:
return None
return self.foreign_model.objects.get(pk)
def cast(self, value):
"""
return the primary_key value of the foreign model
"""
if value is None:
return None
if isinstance(value, self.foreign_model):
return value.pk
if isinstance(value, self.pk_field.field_type):
return value
return self.pk_field.cast(value)
def dumps(self, value):
from .model import Model
if not isinstance(value, Model):
raise RuntimeError("ForeignKey value is not a Model")
return self.pk_field.dumps(value.pk)
# def loads() is not required because the Storage loader is probably
# reading json strings and then using the Model.__init__() to feed
# it key-value pairs. ie: we don't know that it's a ForeignKey on disk.
class OneToOneField(ForeignKey):
"""
"""
# TODO maybe use reify
# I forsee a problem where you load the primary table and either
# create the OneToOneField table entries and replace the as the real file is loaded
# or maybe you have some wierd race condition in the other direction
pass
# TODO class ManyToManyField
class ChoicesField(Field):
"""
holds a list or dict of valid options that can be selected
list contains elements in str format and dict as well.
"""
@classmethod
def check_iter_empty(self, iterable:Iterator):
'''checks if iterable is empty(true) or not(false)'''
try:
iter(iterable).__next__()
except StopIteration:
return True
return False
def __init__(self, **kw):
super(ChoicesField, self).__init__(field_type = Iterator, **kw)
choices:[list, dict] = kw.get('allowed_choices')
if self.check_iter_empty(choices):
raise RuntimeError("allowed_choices kwarg cannot be empty")
elif type(choices) is list:
if all(isinstance(n, str) for n in choices) is False:
raise RuntimeError("All fields in list must be str {}".format(choices))
elif type(choices is dict):
if all(isinstance(n, str) for n in choices.values()) is False:
raise RuntimeError("All values in dict must be str {}".format(choices.values()))
def cast(self, value):
if value is None:
return None
elif value in self.allowed_choices and isinstance(self.allowed_choices, list):
return value
elif isinstance(self.allowed_choices, dict) and (value in self.allowed_choices.keys()):
return self.allowed_choices.get(value)
else:
raise RuntimeError("Value is not allowed.")
return value
|
StarcoderdataPython
|
3237249
|
<filename>app/test/migrations/0002_auto_20210917_0834.py
# Generated by Django 3.2.6 on 2021-09-17 08:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('test', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='message',
name='created',
field=models.DateTimeField(auto_now_add=True, db_column='created', help_text='생성일', null=True),
),
migrations.AddField(
model_name='message',
name='updated',
field=models.DateTimeField(auto_now=True, db_column='updated', help_text='수정일'),
),
]
|
StarcoderdataPython
|
131715
|
<filename>wefe/word_embedding_model.py
from gensim.models.keyedvectors import BaseKeyedVectors
class WordEmbeddingModel:
"""A container for Word Embedding pre-trained models.
It can hold gensim's KeyedVectors or gensim's api loaded models.
It includes the name of the model and some vocab prefix if needed.
"""
def __init__(self,
word_embedding: BaseKeyedVectors,
model_name: str = None,
vocab_prefix: str = None):
"""Initializes the WordEmbeddingModel container.
Parameters
----------
keyed_vectors : BaseKeyedVectors.
An instance of word embedding loaded through gensim KeyedVector
interface or gensim's api.
model_name : str, optional
The name of the model, by default ''.
vocab_prefix : str, optional.
A prefix that will be concatenated with all word in the model
vocab, by default None.
Raises
------
TypeError
if word_embedding is not a KeyedVectors instance.
TypeError
if model_name is not None and not instance of str.
TypeError
if vocab_prefix is not None and not instance of str.
Examples
--------
>>> from gensim.test.utils import common_texts
>>> from gensim.models import Word2Vec
>>> from wefe.word_embedding_model import WordEmbeddingModel
>>> dummy_model = Word2Vec(common_texts, size=10, window=5,
... min_count=1, workers=1).wv
>>> model = WordEmbeddingModel(dummy_model, 'Dummy model dim=10',
... vocab_prefix='/en/')
>>> print(model.model_name_)
Dummy model dim=10
>>> print(model.vocab_prefix_)
/en/
Attributes
----------
model_ : KeyedVectors
The object that contains the model.
model_name_ : str
The name of the model.
vocab_prefix_ : str
A prefix that will be concatenated with each word of the vocab
of the model.
"""
if not isinstance(word_embedding, BaseKeyedVectors):
raise TypeError('word_embedding must be an instance of a gensim\'s'
' KeyedVectors. Given: {}'.format(word_embedding))
else:
self.model_ = word_embedding
if model_name is None:
self.model_name_ = 'Unnamed word embedding model'
elif not isinstance(model_name, str):
raise TypeError(
'model_name must be a string. Given: {}'.format(model_name))
else:
self.model_name_ = model_name
if vocab_prefix is None:
self.vocab_prefix_ = ''
elif not isinstance(vocab_prefix, str):
raise TypeError(
'vocab_prefix parameter must be a string. Given: {}'.format(
vocab_prefix))
else:
self.vocab_prefix_ = vocab_prefix
def __eq__(self, other):
if self.model_ != other.model_:
return False
if self.model_name_ != other.model_name_:
return False
return True
|
StarcoderdataPython
|
3212389
|
#!/cluster/home2/mglerner/anaconda3/bin/python
#!/usr/bin/env python
import sys,os,glob
if __name__ == '__main__':
from pypat import tool_utils
import glob
from optparse import OptionParser
usage = tool_utils.usage + """
Please also make sure that the imagemagick utility convert
is installed and in your path.
This will spit out an html file that will show you your images. If you
need to look at the images on another machine, tar up the html file and
output-dir/images together and move that to the other machine.
The html file will be called <struct>BigAnimatedMovies.html.
"""
parser = OptionParser(option_class=tool_utils.MyOption,usage=tool_utils.usage)
tool_utils.add_standard_options(parser)
parser.add_option('--plot-types',dest="plottypes",
#default='ca avg max min abs straight mainheavy allheavy sidechainhbond hbond'.split(),
default='straight mainheavy'.split(),
type="strlist",
help="Comma-separated list of plot types. [default: %default]",
)
parser.add_option('--no-slow-movies',dest="slowmovies",
default=True,
action="store_false",
help="Set this if you do not want to generate the movies that have 0.5s spacing between the frames.",
)
parser.add_option('--movie-link',dest="movielink",
default='fast',
help="'fast' if you want the thumbnails to link to the fast images, anything else for the slow ones. [default: %default]",
)
options,args = parser.parse_args()
html_txt = '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>Correlated Dynamics Movies</title>
</head>
<body>
<h1>Correlated Dynamics Movies</h1>
These are animated gifs of a sliding 1NS window throughout the MD simulation. So, e.g., %s 2.2NS is the correlated dynamics of the %s structure from 1.7NS to 1.7NS (the center of the window is 1.2NS).
<ul>
<li><b>straight</b> every atom is shown</li>
<li><b>mainheavy</b> mainchain heavy (nonhydrogen) atoms</li>
<li><b>allheavy</b> all heavy (nonhydrogen) atoms</li>
<li><b>hbond</b> NOSP vs. hydrogen atoms</li>
<li><b>sidechainhbond</b> sidechain NOSP vs. sidechain hydrogen atoms</li>
<li><b>ca</b> alpha carbons plotted against eachother, one per residue. This is the standard plot in the literature.</li>
<li><b>abs</b> the largest absolute value for each residue-residue pair</li>
<li><b>min</b> the minimum value for each residue-residue pair</li>
<li><b>max</b> the maximum value for each residue-residue pair</li>
</ul>
The full-sized animated gifs, which you can see by clicking on the versions shown here,
will play more slowly than the thumbnails. If you wish to see the speeded-up version,
remove the "0.5" from the filename. E.g. look at animated_%s_resi_mainheavy_correl.gif
instead of animated_0.5_%s_resi_mainheavy_correl.gif.
<table>
<tr>
<td><b>%s</b> (closed loop starting structure)</td>
</tr>
''' % (options.structurename,
options.structurename,
options.structurename,
options.structurename,
options.structurename,)
for plot_type in options.plottypes:
# New naming conventions mean that these are zero-padded and will be in the correct order.
#filenames = glob.glob(os.path.join(options.outputdir,'images',options.structurename+" NS??? resi "+plot_type+" correl*",)) + glob.glob(os.path.join(options.outputdir,'images',options.structurename+" NS???? resi "+plot_type+" correl*",)) + glob.glob(os.path.join(options.outputdir,'images',options.structurename+" NS????? resi "+plot_type+" correl*",)) #does anyone do any 100ns+ simulations??
pattern = os.path.join(options.outputdir,'images',options.structurename+"* "+plot_type+" *correl*.png")
filenames = glob.glob(pattern)
if not filenames:
filenames = glob.glob(os.path.join(options.outputdir,'images',options.structurename+" NS* resi "+plot_type+" correl*",))
if not filenames:
filenames = glob.glob(os.path.join(options.outputdir,'images',options.structurename+"* ns*"+plot_type+" correl*",))
if not filenames:
print("COULD NOT FIND files for",pattern)
continue
#ture, 55.00 ns - 65.00 n
filenames = [(float(i.split(' ns ')[0].split()[-1]),i) for i in filenames]
filenames.sort()
filenames = [fn for (i,fn) in filenames]
#filenames.sort()
prog = 'convert'
print(filenames)
#sys.exit()
print([os.path.join(options.outputdir,'images',"animated_"+options.structurename+"_resi_"+plot_type+"_correl.gif",),])
args = ['-loop','0',] + filenames + [os.path.join(options.outputdir,'images',"animated_"+options.structurename+"_resi_"+plot_type+"_correl.gif",),]
tool_utils.run(prog,args,verbose=True)
prog,args = 'convert',('-resize','256x',
os.path.join(options.outputdir,'images',"animated_"+options.structurename+"_resi_"+plot_type+"_correl.gif",),
os.path.join(options.outputdir,'images',"animated_"+options.structurename+"_resi_"+plot_type+"_correl_thumb.gif",),
)
tool_utils.run(prog,args,verbose=True)
if options.slowmovies:
prog,args = 'convert',('-delay','50',
os.path.join(options.outputdir,'images',"animated_"+options.structurename+"_resi_"+plot_type+"_correl.gif",),
os.path.join(options.outputdir,'images',"animated_0.5_"+options.structurename+"_resi_"+plot_type+"_correl.gif",),
)
tool_utils.run(prog,args,verbose=True)
if (options.movielink == 'fast') or not options.slowmovies:
html_txt += ''' <tr>
<td><a href="images/animated_%s_resi_%s_correl.gif"><img src="images/animated_%s_resi_%s_correl_thumb.gif"/></a></td>
</tr>\n'''%(
options.structurename,
plot_type,
options.structurename,
plot_type,
)
else:
html_txt += ''' <tr>
<td><a href="images/animated_0.5_%s_resi_%s_correl.gif"><img src="images/animated_%s_resi_%s_correl_thumb.gif"/></a></td>
</tr>\n'''%(
options.structurename,
plot_type,
options.structurename,
plot_type,
)
html_txt += ''' </table>
<hr>
</body>
</html>
'''
fname =os.path.join(options.outputdir,options.structurename+'BigAnimatedMovies.html')
f = open(fname,'w')
print("WRITING",fname)
f.write(html_txt)
f.close()
|
StarcoderdataPython
|
3389492
|
import os
import sys
from .get_search_path import get_search_path
from .run_all import run_all
search_path = get_search_path()
if not os.path.exists(search_path):
sys.stderr.write('Failed to locate "%s"\n' % search_path)
exit(1)
exit(run_all(search_path))
|
StarcoderdataPython
|
1767810
|
<gh_stars>0
class AppTestMagic:
spaceconfig = dict(usemodules=['__pypy__'])
def test_save_module_content_for_future_reload(self):
import sys, __pypy__, imp
d = sys.dont_write_bytecode
sys.dont_write_bytecode = "hello world"
__pypy__.save_module_content_for_future_reload(sys)
sys.dont_write_bytecode = d
imp.reload(sys)
assert sys.dont_write_bytecode == "hello world"
#
sys.dont_write_bytecode = d
__pypy__.save_module_content_for_future_reload(sys)
|
StarcoderdataPython
|
1673091
|
<filename>notion_extensions/base/props/block/quote.py
from typing import Dict, Optional, Union
from .block import Block
from .children import Children
from ..common import Text, RichText
__all__ = [
"Quote",
]
class Quote(Block):
"""
Quote
Quote property values of block
Attributes
----------
text : RichText
text
children : Children
children
Methods
-------
clear()
Clear data of title
json()
Return this class as dictionary
"""
TEMPLATE: Dict[str, Union[str, Dict]] = {
"type": "quote",
"quote": {
"text": [],
},
}
def __init__(
self,
*text: Union[Text, RichText],
children: Optional[Children] = None,
):
"""
Parameters
----------
*text : Text or RichText
text
children : Children, optional
children
"""
super().__init__()
base = [] # Aggregate Texts
for t in text:
if isinstance(t, RichText):
base.extend(list(t[t.key]))
elif isinstance(t, Text):
base.append(t)
else:
raise ValueError(
f"Expected type is `RichText` or `Text`, but {type(t)} is given"
)
self.__text = RichText(key="text", *base)
self["quote"].update(self.__text) # Add Texts with RichText Style
if children is not None:
self["quote"].update(children) # if children exists, Add Chilren
@property
def text(self) -> RichText:
return self.__text
@text.setter
def text(self, value: RichText) -> None:
if value.key != "text":
raise ValueError("RichText's key is must be `text`")
self.__text = value
self["quote"].update(self.__text)
@property
def children(self) -> Children:
return self["quote"]["children"]
@children.setter
def children(self, value: Children) -> None:
self["quote"].update(value)
|
StarcoderdataPython
|
3332993
|
<reponame>RajdeepJuneja/ga-learner-dsmp-repo<gh_stars>0
# --------------
#Code starts here
def read_file(file_path_1):
file1 = open(file_path_1,'r')
sentence_1 = file1.readline()
file1.close()
return sentence_1
message_1 = read_file(file_path_1)
print(message_1)
def read_file(file_path_2):
file2 = open(file_path_2,'r')
sentence_2 = file2.readline()
file2.close()
return sentence_2
message_2 = read_file(file_path_2)
print(message_2)
def fuse_msg(message_a,message_b):
quotient = int(message_b) // int(message_a)
return str(quotient)
secret_msg_1 = fuse_msg((message_1) , (message_2))
print(secret_msg_1)
# --------------
##File path for the file
file_path
#Code starts here
#Function to read file
def read_file(path):
#Opening of the file located in the path in 'read' mode
file = open(path, 'r')
#Reading of the first line of the file and storing it in a variable
sentence=file.readline()
#Closing of the file
file.close()
#Returning the first line of the file
return sentence
#Calling the function to read file
sample_message=read_file(file_path)
#Printing the line of the file
print(sample_message)
#Code ends here
# --------------
#Code starts here
message_3=read_file(file_path_3)
print(message_3)
def substitute_msg(message_c):
if message_c == 'Red':
sub = 'Army General'
elif message_c == 'Green':
sub = 'Data Scientist'
else:
sub = 'Marine Biologist'
return sub
secret_msg_2 = substitute_msg(message_3)
print(secret_msg_2 )
# --------------
#Code starts here
message_6 = read_file(file_path_6)
print(message_6)
def extract_msg(message_f):
a_list = message_f.split()
even_word = lambda x:len(x) % 2 == 0
b_list =list(filter(even_word,a_list))
final_msg =" ".join(b_list)
return final_msg
secret_msg_4 = extract_msg(message_6)
# --------------
# File path for message 4 and message 5
file_path_4
file_path_5
#Code starts here
message_4=read_file(file_path_4)
message_5=read_file(file_path_5)
print(message_4)
print(message_5)
def compare_msg(message_d ,message_e):
a_list=message_d.split()
b_list=message_e.split()
c_list=[i for i in a_list if i not in b_list]
final_msg = " ".join(c_list)
return final_msg
secret_msg_3 = compare_msg(message_4 , message_5)
# --------------
#Secret message parts in the correct order
message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2]
final_path= user_data_dir + '/secret_message.txt'
#Code starts here
secret_msg = " ".join(message_parts)
def write_file(secret_msg,path):
open(path,'a+').write(secret_msg)
close()
write_file(secret_msg,final_path)
print(secret_msg)
|
StarcoderdataPython
|
1619922
|
<reponame>jhh67/chapel
# RUN: %{python} %s %{inputs}/unparsed-requirements
import sys
from lit.Test import Result, Test, TestSuite
from lit.TestRunner import parseIntegratedTestScript
from lit.TestingConfig import TestingConfig
config = TestingConfig(None, "config", [".txt"], None, [], [], False, sys.argv[1], sys.argv[1], [], [], True)
suite = TestSuite("suite", sys.argv[1], sys.argv[1], config)
test = Test(suite, ["test.py"], config)
test.requires = ["meow"]
test.unsupported = ["alpha"]
test.xfails = ["foo"]
parseIntegratedTestScript(test)
error_count = 0
if test.requires != ["meow", "woof", "quack"]:
error_count += 1
if test.unsupported != ["alpha", "beta", "gamma"]:
error_count += 1
if test.xfails != ["foo", "bar", "baz"]:
error_count += 1
exit(error_count)
|
StarcoderdataPython
|
1623171
|
"""
This is the ViewCLI class, used to interface this program via
the console (e.g. terminal, commandline, or shell).
This class can be used to demonstrate the functionality of the ViewBraille
class on a computer via the shell.
Despite being referred to as a 'view' it also includes support for input.
"""
class ViewCLI:
@staticmethod
def option_select(in_options):
"""Given a list of strings, prints the list and waits for an integer in.
"""
ViewCLI.str_print("")
for n, i in enumerate(in_options):
ViewCLI.str_print(str(n) + " : " + i)
try:
ans = int(ViewCLI.str_input("\n> "))
except ValueError: # If an bad value was provided, exit with None
ans = None
if ans < 0 or ans > len(in_options):
ans = None
return(ans)
@staticmethod
def str_print(in_str, ender="\n"):
"""Prints a provided string
This is the most basic type of printing in this program.
Replaces `print()`.
"""
print(in_str, end=ender)
@staticmethod
def str_input(inputter="\n> "):
"""Gets and then returns a string from the user.
The returned string should always be in alphabetical, not braille.
This is because the program may need to logically understand the input.
This is the most basic type of inputting in this program.
Replaces `input()`.
"""
x = input(inputter)
# TODO process a translation depending on the type of input received
return(x)
|
StarcoderdataPython
|
1627499
|
from shiftschema.validators.abstract_validator import AbstractValidator
from shiftschema.result import Error
class Required(AbstractValidator):
"""
Required validator
Checks that value was provided. Can operate on strings or entities with an
option to allow False to be a valid value.
"""
value_required = '%value_required%'
allow_false = False
allow_zero = False
allow_empty_string = False
def __init__(
self,
allow_false=False,
allow_zero=False,
allow_empty_string=False,
message=None
):
"""
Initialize validator
Accepts an optional custom error message.
:param allow_false bool, whether to allow False as value
:param allow_zero: bool, whether to allow 0 as value
:param allow_empty_string: bool, whether to allow '' as value
:param message: str, custom error message
:return: None
"""
self.allow_false = allow_false
self.allow_zero = allow_zero
self.allow_empty_string = allow_empty_string
if message is not None:
self.value_required = message
def validate(self, value, model=None, context=None):
"""
Validate
Perform value validation and return result
:param value: value to check
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.results.SimpleResult
"""
# ok if non-empty string
if type(value) is str and value != '':
return Error()
# ok if has value
if bool(value):
return Error()
# ok if false, but false is allowed
if value is False and self.allow_false:
return Error()
# ok if 0, but zero is allowed
if value == 0 and self.allow_zero:
return Error()
# ok if '', but empty string is allowed
if value == '' and self.allow_empty_string:
return Error()
# error otherwise
return Error(self.value_required)
|
StarcoderdataPython
|
40805
|
<gh_stars>0
### This sample will show how programmatically create and post an annotation into document. How to delete the annotation
# Import of classes from libraries
from pyramid.renderers import render_to_response
from groupdocs.ApiClient import ApiClient
from groupdocs.AntApi import AntApi
from groupdocs.StorageApi import StorageApi
from groupdocs.GroupDocsRequestSigner import GroupDocsRequestSigner
from groupdocs.FileStream import FileStream
# Checking value on null
def IsNotNull(value):
return value is not None and len(value) > 0
# Set variables and get POST data
def sample11(request):
clientId = request.POST.get('client_id')
privateKey = request.POST.get('private_key')
inputFile = request.POST.get('file')
url = request.POST.get('url')
basePath = request.POST.get('server_type')
fileId = request.POST.get('fileId')
guid = ""
iframe = ""
annotationType = request.POST.get('annotation_type')
# Checking required parameters
if IsNotNull(clientId) == False or IsNotNull(privateKey) == False or IsNotNull(annotationType) == False:
return render_to_response('__main__:templates/sample11.pt',
{ 'error' : 'You do not enter all parameters' })
### Create Signer, ApiClient and Annotation Api objects
# Create signer object
signer = GroupDocsRequestSigner(privateKey)
# Create apiClient object
apiClient = ApiClient(signer)
# Create Annotation object
ant = AntApi(apiClient)
api = StorageApi(apiClient)
if basePath == "":
basePath = 'https://api.groupdocs.com/v2.0'
#Set base path
ant.basePath = basePath
api.basePath = basePath
if url != "":
try:
# Upload file to current user storage using entere URl to the file
upload = api.UploadWeb(clientId, url)
guid = upload.result.guid
fileId = ""
except Exception, e:
return render_to_response('__main__:templates/sample11.pt',
{ 'error' : str(e) })
if inputFile != "":
try:
#A hack to get uploaded file size
inputFile.file.seek(0, 2)
fileSize = inputFile.file.tell()
inputFile.file.seek(0)
fs = FileStream.fromStream(inputFile.file, fileSize)
####Make a request to Storage API using clientId
#Upload file to current user storage
response = api.Upload(clientId, inputFile.filename, fs)
guid = response.result.guid
fileId = ""
except Exception, e:
return render_to_response('__main__:templates/sample11.pt',
{ 'error' : str(e) })
if fileId != '':
guid = fileId
# Delete annotation if Delete Button clicked
if request.POST.get('delete_annotation') == "1":
try:
ant.DeleteAnnotation(clientId, request.POST.get('annotationId'))
except Exception, e:
return render_to_response('__main__:templates/sample11.pt',
{ 'error' : str(e) })
# Required parameters
allParams = ['box_x', 'box_y', 'text']
# Added required parameters depends on annotation type ['text' or 'area']
if annotationType == "text":
allParams = allParams + ['box_width', 'box_height', 'annotationPosition_x', 'annotationPosition_y', 'range_position', 'range_length']
elif annotationType == "area":
allParams = allParams + ['box_width', 'box_height']
# Checking required parameters
for param in allParams:
needParam = request.POST.get(param)
if IsNotNull(needParam) == False:
return render_to_response('__main__:templates/sample11.pt',
{ 'error' : 'You do not enter all parameters' })
types = {'text' : "0", "area" : "1", "point" : "2"}
# construct requestBody
requestBody = {
"type": types[request.POST.get('annotation_type')],
"replies": [ { "text": request.POST.get('text') } ],
}
# construct requestBody depends on annotation type
# text annotation
if annotationType == "text":
requestBody = dict(requestBody.items() + {
"box": {
"x" : request.POST.get('box_x'),
"y" : request.POST.get('box_y'),
"width" : request.POST.get('box_width'),
"height" : request.POST.get('box_height')
},
"textRange":{
"position" : request.POST.get('range_position'),
"length" : request.POST.get('range_length')
},
"annotationPosition": {
"x" : request.POST.get('annotationPosition_x'),
"y" : request.POST.get('annotationPosition_y')
},
}.items())
# area annotation
elif annotationType == "area":
requestBody = dict(requestBody.items() + {
"box": {
"x" : request.POST.get('box_x'),
"y" : request.POST.get('box_y'),
"width" : request.POST.get('box_width'),
"height" : request.POST.get('box_height')
},
"annotationPosition": {
"x" : "0",
"y" : "0"
},
}.items())
# point annotation
elif annotationType == "point":
requestBody = dict(requestBody.items() + {
"box": {
"x" : request.POST.get('box_x'),
"y" : request.POST.get('box_y'),
"width" : "0",
"height" : "0"
},
"annotationPosition": {
"x" : "0",
"y" : "0"
},
}.items())
try:
# Make a request to Annotation API using clientId, fileId and requestBody
response = ant.CreateAnnotation(clientId, guid, requestBody)
if response.status == "Ok":
if response.result:
#Generation of iframe URL using fileGuId
if basePath == "https://api.groupdocs.com/v2.0":
iframe = 'https://apps.groupdocs.com/document-annotation2/embed/' + response.result.documentGuid
#iframe to dev server
elif basePath == "https://dev-api.groupdocs.com/v2.0":
iframe = 'https://dev-apps.groupdocs.com/document-annotation2/embed/' + response.result.documentGuid
#iframe to test server
elif basePath == "https://stage-api.groupdocs.com/v2.0":
iframe = 'https://stage-apps.groupdocs.com/document-annotation2/embed/' + response.result.documentGuid
#Iframe to realtime server
elif basePath == "http://realtime-api.groupdocs.com":
iframe = 'https://realtime-apps.groupdocs.com/document-annotation2/embed/' + response.result.documentGuid
iframe = signer.signUrl(iframe)
except Exception, e:
return render_to_response('__main__:templates/sample11.pt',
{ 'error' : str(e) })
# If request was successfull - set variables for template
return render_to_response('__main__:templates/sample11.pt',
{ 'userId' : clientId,
'privateKey' : privateKey,
'fileId' : fileId,
'annotationType' : annotationType,
'annotationText' : request.POST.get('text'),
'annotationId' : response.result.annotationGuid,
'iframe' : iframe,
'status' : response.status
},
request=request)
|
StarcoderdataPython
|
3214166
|
class TwoSum:
"""
Given an array of integers arr and an integer target,
return indices of the two numbers such that they add up to target.
You may assume that each input would have exactly one solution,
and you may not use the same element twice.
"""
def __init__(self, arr, k):
self.arr = arr
self.k = k
"""
Compute all pairs i and j for an n-element arr where i => [0, n-2] and j => [1, n-1]
"""
def bruteForce(self):
for i in range(len(self.arr)-1):
for j in range(i+1, len(self.arr)):
if (self.arr[i] + self.arr[j] == self.k):
return [i, j]
return False
"""
Use dictionary: iterate over arr, populate dictionay with each element we come across
if (arr[i] - k in _dict):
return [i, j]
"""
def linear(self):
_dict = {}
for i in range(len(self.arr)):
if (self.k-self.arr[i] in _dict):
return [self.arr[self.k-self.arr[i]], i]
_dict[self.arr[i]] = i
return []
|
StarcoderdataPython
|
19962
|
<filename>talos/distribute/distribute_run.py
import json
import threading
from .distribute_params import run_scan_with_split_params
from .distribute_utils import return_current_machine_id, ssh_connect, ssh_file_transfer, ssh_run
from .distribute_database import update_db
def run_central_machine(self, n_splits, run_central_node):
'''
Parameters
----------
params | `dict` | hyperparameter options
Returns
-------
None.
'''
# runs the experiment in central machine
machine_id = 0
run_scan_with_split_params(self, n_splits, run_central_node, machine_id)
def distribute_run(self):
'''
Parameters
----------
run_central_machine | `bool` |The default is False.
db_machine_id | `int` | The default is 0. Indicates the centralised store
where the data gets merged.
Returns
-------
None.
'''
# run the Scan script in distributed machines
config = self.config_data
if 'run_central_node' in config.keys():
run_central_node = config['run_central_node']
else:
run_central_node = False
update_db_n_seconds = 5
if 'DB_UPDATE_INTERVAL' in config['database'].keys():
update_db_n_seconds = int(config['database']['DB_UPDATE_INTERVAL'])
n_splits = len(config['machines'])
if run_central_node:
n_splits += 1
current_machine_id = str(return_current_machine_id(self))
if current_machine_id == str(0):
clients = ssh_connect(self)
for machine_id, client in clients.items():
new_config = config
new_config['current_machine_id'] = machine_id
with open('tmp/remote_config.json', 'w') as outfile:
json.dump(new_config, outfile)
ssh_file_transfer(self, client, machine_id)
threads = []
if run_central_node:
t = threading.Thread(
target=run_central_machine,
args=(self, n_splits, run_central_node),
)
t.start()
threads.append(t)
t = threading.Thread(
target=update_db,
args=([self, update_db_n_seconds, current_machine_id]),
)
t.start()
threads.append(t)
for machine_id, client in clients.items():
t = threading.Thread(
target=ssh_run,
args=(self,
client,
machine_id,
),
)
t.start()
threads.append(t)
for t in threads:
t.join()
|
StarcoderdataPython
|
1640123
|
import torch
import torch.nn as nn
import numpy as np
np.random.seed(0)
from model.generate_anchor import generate_anchors
from model.bbox_transform import clip_boxes
from model.ellipse_transform import ellipse_transform_inv, ellipse2box
from nms.cpu_nms import cpu_nms
from nms.gpu_nms import gpu_nms
def _filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = ((ws >= min_size) & (hs >= min_size)).nonzero().view(-1)
return keep
class EllipseProposalLayer(nn.Module):
def __init__(self, cfg):
super(EllipseProposalLayer, self).__init__()
self._cfg = dict(cfg)
self._preprocess()
def _preprocess(self):
# pre-computing stuff for making anchor later
self._im_info = (self._cfg['MAX_SIZE'], self._cfg['MAX_SIZE'])
base_anchors = generate_anchors(
base_size=self._cfg['RPN_FEAT_STRIDE'],
ratios=[1],
scales=np.array(self._cfg['ANCHOR_SCALES'], dtype=np.float32))
num_anchors = base_anchors.shape[0]
feat_stride = self._cfg['RPN_FEAT_STRIDE']
feat_width = self._cfg['MAX_SIZE'] // self._cfg['RPN_FEAT_STRIDE']
feat_height = feat_width
shift_x = np.arange(0, feat_width) * feat_stride
shift_y = np.arange(0, feat_height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(),
shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
anchors = base_anchors.reshape((1, A, 4)) + \
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
self._feat_height = feat_height
self._feat_width = feat_width
self._anchors = torch.from_numpy(anchors).float()
def cuda(self, device=None):
self._anchors = self._anchors.cuda(device)
return self._apply(lambda t: t.cuda(device))
def forward(self, out_cls, out_ellipse):
"""
out_cls: (feat_height, feat_width, anchors, 2) FloatVariable
out_ellipse: (feat_height, feat_width, anchors, 5) FloatVariable
"""
scores = nn.functional.softmax(
out_cls, dim=3)[..., 1].contiguous().data.view(-1, 1)
ellipse_deltas = out_ellipse.data.view(-1, 5)
# 1. Generate proposals from ellipse deltas and shifted anchors
# Convert anchors into proposals via ellipse transformations
# Convert ellipse into bbox proposals
ellipses = ellipse_transform_inv(self._anchors, ellipse_deltas)
boxes = ellipse2box(ellipses, self._cfg['ELLIPSE_PAD'])
# 2. clip predicted boxes to image
boxes = clip_boxes(boxes, self._im_info[:2])
# 3. remove predicted boxes with either height or width < threshold
# (NOTICE: convert min_size to input image scale stored in im_info[2])
keep = _filter_boxes(boxes, self._cfg['TEST.RPN_MIN_SIZE'])
boxes = boxes[keep, :]
ellipses = ellipses[keep, :]
scores = scores[keep]
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
_, order = torch.sort(scores.view(-1), dim=0, descending=True)
if self._cfg['TEST.RPN_PRE_NMS_TOP_N'] > 0:
order = order[:self._cfg['TEST.RPN_PRE_NMS_TOP_N']]
boxes = boxes[order, :]
ellipses = ellipses[order, :]
scores = scores[order]
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
if self._cfg['USE_GPU_NMS']:
nms = gpu_nms
else:
nms = cpu_nms
dets = np.hstack((boxes.cpu().numpy(), scores.cpu().numpy()))
keep = nms(dets, self._cfg['TEST.RPN_NMS_THRESH'])
keep = torch.from_numpy(np.array(keep)).type_as(scores).long()
if self._cfg['TEST.RPN_POST_NMS_TOP_N'] > 0:
keep = keep[:self._cfg['TEST.RPN_POST_NMS_TOP_N']]
boxes = boxes[keep, :]
ellipses = ellipses[keep, :]
scores = scores[keep].view(-1)
return (boxes, ellipses, scores)
|
StarcoderdataPython
|
1677241
|
<filename>Artifacts/Results/plotcopa.py<gh_stars>1-10
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
from glob import glob
import pandas as pd
import seaborn as sns
import math
import matplotlib.gridspec as gridspec
from matplotlib.patches import Ellipse
plt.rcParams['text.latex.preamble']=[r'\boldmath']
params = {
'font.size' : 20,
'legend.fontsize': 16,
'text.latex.unicode': True,
}
plt.rcParams.update(params)
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
if not os.path.exists('figures'):
os.makedirs('figures')
def simplify_cdf(data):
'''Return the cdf and data to plot
Remove unnecessary points in the CDF in case of repeated data
'''
yvals_1 = np.arange(len(data))/float(len(data))
return yvals_1, data
data_len = len(data)
assert data_len != 0
cdf = np.arange(data_len) / data_len
simple_cdf = [0]
simple_data = [data[0]]
if data_len > 1:
simple_cdf.append(1.0 / data_len)
simple_data.append(data[1])
for cdf_value, data_value in zip(cdf, data):
if data_value == simple_data[-1]:
simple_cdf[-1] = cdf_value
else:
simple_cdf.append(cdf_value)
simple_data.append(data_value)
assert len(simple_cdf) == len(simple_data)
# to have cdf up to 1
simple_cdf.append(1)
simple_data.append(data[-1])
return simple_cdf, simple_data
def cdfplot(data_in):
"""Plot the cdf of a data array
Wrapper to call the plot method of axes
"""
# cannot shortcut lambda, otherwise it will drop values at 0
data = sorted(filter(lambda x: (x is not None and ~np.isnan(x)
and ~np.isinf(x)),
data_in))
data_len = len(data)
simple_cdf, simple_data = simplify_cdf(data)
return simple_data, simple_cdf
def scale(a):
return a/1000000.0
def parse_throughput(filename):
times = []
pktsize = []
throughput_file = open(filename,"r")
tokens = throughput_file.readline().strip().split()
sTime = float(tokens[0])
firstTime = sTime
bucket = []
bucket.append(float(tokens[1]))
for line in throughput_file:
tokens = line.strip().split()
if float(tokens[0])< sTime+1.0:
bucket.append(float(tokens[1]))
else:
pktsize.append(sum(bucket)*8/1000000.0)
bucket = []
times.append(sTime-firstTime)
while float(tokens[0])-sTime > 1.0:
sTime += 1.0
throughput_file.close()
return pktsize, times
def parse_delay_copa (filename):
delays = []
times = []
cnt = 0
shift = 0
delay_file = open(filename,"r")
for line in delay_file:
tokens = line.strip().split(",")
if tokens[0] == '--RTT--':
delays.append((float(tokens[2])))
if len(times) >0 and float(tokens[1])+shift < times[-1]:
shift = times[-1]
times.append(float(tokens[1])+shift)
delay_file.close()
return delays, times
def parse_delay(filename):
delays = []
times = []
cnt = 0
delay_file = open(filename,"r")
tokens = delay_file.readline().strip().split(",")
sTime = float(tokens[0])
delays.append((float(tokens[1])))
times.append((float(tokens[0])-sTime))
for line in delay_file:
tokens = line.strip().split(",")
if float(tokens[1]) > 1000.0:
print line.strip()
if float(tokens[1]) < 10000.0:
delays.append((float(tokens[1])))
times.append((float(tokens[0])-sTime))
delay_file.close()
return delays, times
for trace in [sys.argv[1]]:
totalThroughput = {'copa':[], 'alccCopa':[]}
totalDelay = {'copa':[], 'alccCopa':[]}
for i in range(1,int(sys.argv[2])+1): #21
print i
try:
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8,5), facecolor='w', sharex=True)
ax = plt.gca()
# plotting the trace file
f1 = open ("../../channelTraces/{}".format(trace),"r")
BW = []
nextTime = 2900
cnt = 0
for line in f1:
if int(line.strip()) > nextTime:
BW.append(cnt*1492*8)
cnt = 0
nextTime+=1000
else:
cnt+=1
f1.close()
ax1.fill_between(range(len(BW)), 0, list(map(scale,BW)),color='#D3D3D3')
colors = {"copa":"b", "alccCopa":"r"}
tsharkF = {"copa":"src", "alccCopa":"dst"}
for algo in ["copa","alccCopa"]:
os.system("tshark -r ./{0}/{1}{2}/log.pcap -T fields -e frame.time_epoch -e frame.len 'ip.{3}==172.16.17.32' > ./{0}/{1}{2}/throughput.csv".format(algo,trace,i,tsharkF[algo]))
if algo == 'copa':
delays, delayTimes = parse_delay_copa("./{0}/{1}{2}/info.out".format(algo,trace,i))
else:
delays, delayTimes = parse_delay(glob("./{0}/{1}{2}/*/".format(algo,trace,i))[0]+"Receiver.out")
ax2.plot(delayTimes, delays, color=colors[algo], lw=3, rasterized=True, label=algo)
totalDelay[algo] += delays
# plotting throughput
throughputDL = []
timeDL = []
throughputDL, timeDL = parse_throughput("./{0}/{1}{2}/throughput.csv".format(algo,trace,i))
ax1.plot(timeDL, throughputDL, color=colors[algo], lw=3, rasterized=True, label=algo)
totalThroughput[algo] += throughputDL
ax1.set_ylabel("Throughput\n(Mbps)")
ax1.legend(loc='best', prop={'size':12})
ax1.set_xlim([0,300])
ax2.set_xlim([0,300])
ax2.set_xlabel('Time (s)')
ax2.set_ylabel('Delay (ms)')
ax2.set_yscale('log',basey=10)
ax2.grid(True, which="both")
plt.savefig('./figures/{0}_{1}{2}.png'.format(algo,trace,i),bbox_inches='tight')
except:
print ("Error in", trace, i)
continue
sns.set_style("white")
fig, (ax1, ax2) = plt.subplots(2, figsize=(8,6), facecolor="w")
fig.tight_layout(pad=2.0)
colors= {'alccCopa':"red", "copa":"blue"}
for name in ["alccCopa","copa"]:
print totalThroughput
sns.kdeplot(totalThroughput[name] , color=colors[name], shade=True, ax = ax1, lw=3)
sns.kdeplot(totalDelay[name] , color=colors[name], label=name, shade=True, ax = ax2, lw=3)
ax1.set_ylabel('Probability')
ax1.set_xlabel('Throughput (Mbps)')
ax2.set_ylabel('Probability')
ax2.set_xlabel('Delay (ms)')
plt.xlim([0,800])
plt.legend(loc='best')
ax1.grid()
ax2.grid()
plt.savefig("./figures/copa_pdf_{0}.png".format(trace),dpi=300,bbox_inches='tight')
plt.close()
fig, (ax1, ax2) = plt.subplots(2)
fig.tight_layout(pad=2.0)
colors= {"alccCopa":"red", 'copa':"blue"}
for name in ["alccCopa","copa"]:
a,b = cdfplot(totalThroughput[name])
ax1.plot(a, b, label=name, color=colors[name])
a,b = cdfplot(totalDelay[name])
ax2.plot(a, b, label=name, color=colors[name])
ax1.set_ylabel('Throughput CDF')
ax1.set_xlabel('Throughput (Mbps)')
ax2.set_ylabel('Delay CDF')
ax2.set_xlabel('Delay (ms)')
plt.xlim([0,800])
plt.legend(loc='best')
ax1.grid()
ax2.grid()
#plt.show()
plt.savefig("./figures/copa_cdf_{0}.png".format(trace),dpi=300,bbox_inches='tight')
plt.close()
def simple_cdf(data):
data_sorted = np.sort(data)
# calculate the proportional values of samples
cdf = 1. * np.arange(len(data)) / (len(data) - 1)
tmp = []
for k in range(len(cdf)):
if cdf[k] >= 0.25:
tmp.append(data_sorted[k])
break
for k in range(len(cdf)):
if cdf[k] >= 0.5:
tmp.append(data_sorted[k])
break
for k in range(len(cdf)):
if cdf[k] >= 0.75:
tmp.append(data_sorted[k])
break
return tmp
labels = ["alccCopa","copa"]
colors=['r','b']
fig, (ax) = plt.subplots(1, figsize=(8,5), facecolor="w")
overallThroughput = []
overallDelay = []
for name in labels:
overallThroughput.append(simple_cdf(totalThroughput[name]))
overallDelay.append(simple_cdf(totalDelay[name]))
for i in range(len(labels)):
x = (overallDelay[i][2]+overallDelay[i][0])/2.0
y = (overallThroughput[i][2]+overallThroughput[i][0])/2.0
ellipse = Ellipse(xy=(x,y), width=(overallDelay[i][2]-overallDelay[i][0]),
height=(overallThroughput[i][2]-overallThroughput[i][0]), edgecolor=colors[i], fc='None', lw=3,
alpha=.9, label=labels[i])
print labels[i], overallThroughput[i][2], overallThroughput[i][0]
print labels[i], overallDelay[i][2], overallDelay[i][0]
ax.add_patch(ellipse)
plt.plot(overallDelay[i][1],overallThroughput[i][1],marker='x',mew=3,color=colors[i])
plt.legend(loc='best')
plt.grid()
plt.ylabel("Throughput (Mbps)")
plt.xlabel("Delay (s)")
try:
plt.xlim([0, 1.2*max(overallDelay[0][2],overallDelay[1][2])])
plt.ylim([0, 1.2*max(overallThroughput[0][2],overallThroughput[1][2])])
except:
pass
plt.savefig("./figures/copa_overall_"+trace+'.png',bbox_inches='tight')
|
StarcoderdataPython
|
155921
|
<filename>src/slave/slave.py
import zmq as mySocket
import sys
import threading
import json
import time
import base64
import zlib
from json_to_txt import make as json_txt
from save_file import save_txt
from mapper import mapper
from sorting import sorting
from reducer import reducer
def descompacta(text):
return zlib.decompress(text)
def compacta(text):
return zlib.compress(text)
class ClientTask(threading.Thread):
def __init__(self, id):
self.id = id
threading.Thread.__init__(self)
def run(self):
context = mySocket.Context()
socket = context.socket(mySocket.DEALER)
identity = u'worker-%d' % self.id
socket.identity = identity.encode('ascii')
socket.connect('tcp://192.168.0.3:5576')
print('Cliente %s INICIALIZADO' % (identity))
poll = mySocket.Poller()
poll.register(socket, mySocket.POLLIN)
socket.send_json('Hit')
msg = socket.recv() # converte de String para JSON
descompactado = descompacta(msg) # descompactando texto
decifrado = base64.b64decode(descompactado) # decifra mensagem
msg = eval(decifrado.decode('utf-8'))
print(msg)
# salvando texto json no arquivo de texto.txt
save_txt(
json_txt(
msg
)
)
# Agora proximo paso é mapear com 'mapper.py'
mapper('cache/data.txt')
# Agora temos que fazer o sorting
sorting('cache/mapper_output.txt')
# Agora é hora de aplicar o 'reducer.py'
reducer('cache/mapper_output.txt')
results = [line for line in open('cache/reducer_output.txt')]
cifrado = base64.b64encode(str(
results
).encode('utf-8'))
compactado = compacta(cifrado)
socket.send(compactado) #Enviando o resultado do reducing pro Master
socket.close()
context.term()
def main():
client = ClientTask(200)
client.start()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1663547
|
""" A list of (Google) search terms for a variant """
from collections import defaultdict
from typing import Tuple, List
from pyhgvs import HGVSName, InvalidHGVSName
def _get_gene_and_terms(vta, c_hgvs=True, dbsnp=True) -> Tuple[str, List]:
from annotation.models import VariantTranscriptAnnotation
if vta.gene:
gene_symbol = str(vta.gene.get_gene_symbol(vta.version.genome_build))
else:
gene_symbol = None
terms = []
hgvs_name = None
try:
hgvs_name = HGVSName(vta.hgvs_c)
except (NotImplementedError, InvalidHGVSName):
pass
if c_hgvs and hgvs_name:
# "7051G>A" | "7051G->A" | "7051G-->A" | "7051G/A"
if hgvs_name.mutation_type == ">":
cdna_coords = hgvs_name.format_cdna_coords()
ref, alt = hgvs_name.get_ref_alt()
for change_symbol in [">", "->", "-->", "/"]:
terms.append(f"{cdna_coords}{ref}{change_symbol}{alt}")
if vta.hgvs_p:
# pyHGVS doesn't handle p.HGVS very well, so can't use HGVSName.format_protein() etc
protein_aa3 = vta.hgvs_p.split(":p.")[1]
terms.append(protein_aa3)
protein_aa1 = VariantTranscriptAnnotation.amino_acid_3_to_1(protein_aa3)
terms.append(protein_aa1)
if dbsnp and vta.dbsnp_rs_id:
terms.append(vta.dbsnp_rs_id)
if hgvs_name and hgvs_name.mutation_type in ('ins', 'del', 'dup'):
# "del ex 20" and "del exon 20"
ex_in_terms = [(vta.intron, ["intron", "in"]),
(vta.exon, ["exon", "ex"])]
for val, in_terms in ex_in_terms:
if val:
num = val.split("/")[0] # looks like: "20/24"
for t in in_terms:
terms.append(f"{hgvs_name.mutation_type} {t} {num}")
return gene_symbol, terms
def _get_search_terms(variant_transcripts_list: List, formatter: str = None, **kwargs):
gene_terms = defaultdict(set)
for vta in variant_transcripts_list:
gene_symbol, terms = _get_gene_and_terms(vta, **kwargs)
gene_terms[gene_symbol].update(terms)
searches = []
for gene_symbol, terms in gene_terms.items():
if formatter:
gene_symbol = formatter % gene_symbol
terms = [formatter % s for s in terms]
and_terms = [gene_symbol]
optional_or = " OR ".join(terms)
if optional_or:
and_terms.append(f"({optional_or})")
search_terms = " AND ".join(and_terms)
searches.append(search_terms)
if len(searches) == 1:
return searches[0]
return " OR ".join(["(%s)" % s for s in searches])
def get_variant_search_terms(variant_transcripts_list: List):
"""
Examples:
BRCA1 AND ("5194-?_5277+?del" OR "del exon 20" OR "His1732_Lys1759del" OR "H1732_K1759del" OR "del ex 20")
BRCA1 AND ("5207T>C" OR "5326T>C" OR "Val1736Ala" OR "V1736A")
BRCA1+[("736T>G")or("855T>G")or("L246V")or("Leu246Val")]
BRCA2 AND ("10110" OR "10338" OR "Arg3370Arg" OR "R3370R" OR "Arg3370=" OR "Arg3370" OR "R3370")
BRCA2 AND ("6339") BRCA2:c.6339C>T BRCA2 AND (p.Asn2113")
"BRCA2" ("7051G>A" | "7051G->A" | "7051G-->A" | "7051G/A" | "Ala2351Thr" | "A2351T" | rs80358930)
BRCA2 AND ("3137A" OR "3365A" OR "Glu1046Gly" OR "E1046G")
"PTEN" ("1000A>T" | "1000A->T" | "1000A-->T" | "1000A/T" | "Asn334Tyr" | "N334Y")
PTEN AND ("1000A" OR "Asn334Tyr" OR "N334Y")
"""
return _get_search_terms(variant_transcripts_list, formatter='"%s"')
def get_variant_pubmed_search_terms(variant_transcripts_list: List):
""" Examples:
(CFTR) AND ((Arg117His) OR (R117H))
PubMed doesn't like rsIds or c.HGVS """
return _get_search_terms(variant_transcripts_list, formatter='(%s)', c_hgvs=False, dbsnp=False)
|
StarcoderdataPython
|
4813735
|
"""Idea: self-organization, two pathways:
- horizontal
- vertical (stacked, deep, onion, ...)
Moved to and developed in smp_growth project
"""
import time, sys, argparse
import numpy as np
import matplotlib.pyplot as plt
types = ["RBF", "tanh", "local_linear_reg_rls", "randomBF", "res", "kerasmodel"]
class unitID(object):
def __init__(self, ndim_in = 1):
self.ndim_in = ndim_in
self.a = np.zeros((1, self.ndim_in))
def activate(self, x):
self.a = x
return self.a
class unitRBF(object):
def __init__(self, ndim_in = 1):
self.ndim_in = ndim_in
self.eta1 = 0.01
# self.w = np.zeros((ndim_in, 1))
self.w = np.random.uniform(-1e-1, 1e-1, size=(self.ndim_in, 1))
self.a = np.zeros((1,self.ndim_in))
def activate(self, x1, x2):
print "x1.shape = %s, x2.shape = %s" % (x1.shape, x2.shape)
# x = np.vstack((x1.T, x2.T))
self.a = self.w - x1.T
# self.a = # np.abs(diff)
self.a += x2.T
self.dw = self.a * self.eta1
self.w -= self.dw
print "self.a.shape", self.a.shape, self.w.shape, self.dw.shape, np.linalg.norm(self.dw)
return self.a
class Nv1(object):
def __init__(self, maxsize = 10, unittype = "RBF", ndim_in = 1):
self.maxsize = maxsize
self.ndim_in = ndim_in
self.layers = [unitID] + [unitRBF(ndim_in = self.ndim_in)] * 2 + [None for i in range(2, self.maxsize)]
# layer activations: input layer + number of layers (maxsize) + self.activation
self.al = np.zeros((self.maxsize + 2, ndim_in * 2))
self.a = np.zeros((1,1))
def activate(self, x):
print "x.shape", x.shape, self.layers[1].w.shape
self.al[[0],:self.ndim_in] = x.copy()
for li in range(1, len(self.layers)):
print "li = %d, l = %s" % (li, self.al[li])
if self.al[li] is not None:
self.al[[li]] = self.layers[li].activate(self.al[[li-1]], self.al[[li+1]])
class Nv2(object):
def __init__(self, maxsize = 10, unittype = "RBF", ndim_in = 1):
self.maxsize = maxsize
self.ndim_in = ndim_in
self.layers = [unitID, unitRBF(ndim_in = self.ndim_in)] + [None for i in range(1, self.maxsize)]
# layer activations: input layer + number of layers (maxsize) + self.activation
self.al = np.zeros((self.maxsize + 2, ndim_in))
self.a = np.zeros((1,1))
self.r = np.zeros((self.maxsize + 2, ))
self.r_ = np.zeros_like(self.r)
def activate(self, x):
print "x.shape", x.shape, self.layers[1].w.shape
self.al[[0]] = x.copy()
# forward
for li in range(1, len(self.layers)):
print "li = %d, l = %s" % (li, self.al[li])
if self.layers[li] is not None:
if self.layers[li+1] is not None:
x2 = self.al[[li+1]]
else:
x2 = np.zeros((self.al[[li]].shape))
self.al[[li]] = self.layers[li].activate(self.al[[li-1]], x2).T
# backward
for li in range(len(self.layers) - 1, 1, -1):
print "li = %d, l = %s" % (li, self.al[li])
if self.layers[li] is not None:
if self.layers[li+1] is not None:
x2 = self.al[[li+1]]
else:
x2 = np.zeros((self.al[[li]].shape))
self.al[[li]] = self.layers[li].activate(self.al[[li-1]], x2).T
self.r = np.sum(np.abs(self.al), axis=1)
print "self.r = %s, self.w" % (self.r)
self.r_ = 0.99 * self.r_ + 0.01 * self.r
class Nv3(object):
def __init__(self, maxsize = 10):
self.maxsize = 10
def generate_data_1(numsteps):
from pypr.clustering import *
# from numpy import *
centroids=[np.array([0,-3]), np.array([1.5, -1])]
ccov = [np.array([[0.1,-0.3],[0.3,0.12]]), np.diag([0.1, 0.4])]
mc = [0.5, 0.5]
samples = numsteps
return gmm.sample_gaussian_mixture(centroids = centroids, ccov = ccov, mc = mc, samples=samples)
def train_network(x):
# L1 activate
# for L in L-exhausted
# activate L on x
# if capacity_L > theta:
# train L(x)
# track residuals r_L
# if accum residual L_{-1} > theta
# spawn new layer
# aspects:
# - backward skip connections: upper L activation propagates down to lower L input (recurrence, how to handle that)
# - activation cycle: activate all the way to the top, back-propagate all the way back down
# - regain capacity: if residual gets low enough, can learn again
# v1: learn L1, fixate forever, learn L2, ..., accumulate forward skip conns (inputs getting wider, put PCA/ICA/SFA on connections)
# v2: learn L1 ...
# units: RBF, local linear reg (RLS), randomBF, reservoir, full-scale deep network
pass
def main(args):
print "args", args
numsteps = 1500
maxsize = 3
# generate network
# net = Nv1(maxsize = 10, unittype = "RBF", ndim_in = 2)
net = Nv2(maxsize = maxsize, unittype = "RBF", ndim_in = 2)
print "net", net
# generate data
d = generate_data_1(numsteps)
print "d.shape", d.shape
# plt.plot(d[:,0], d[:,1], "ro", alpha=0.5)
plt.subplot(211)
plt.plot(d)
plt.subplot(212)
plt.hist2d(d[:,0], d[:,1], bins=16)
plt.colorbar()
plt.show()
# log data
net_r = np.zeros((numsteps, maxsize+2))
net_r_ = np.zeros((numsteps, maxsize+2))
# loop over data
for i in range(numsteps):
net.activate(d[[i]])
net_r[i] = net.r
net_r_[i] = net.r_
plt.subplot(211)
plt.plot(net_r)
plt.subplot(212)
plt.plot(net_r_)
plt.show()
# train network
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = parser.parse_args()
main(args)
|
StarcoderdataPython
|
4813439
|
<gh_stars>0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
FASL_GRAPH_DEF_TYPE = 1
FASL_GRAPH_REF_TYPE = 2
FASL_FALSE_TYPE = 3
FASL_TRUE_TYPE = 4
FASL_NULL_TYPE = 5
FASL_VOID_TYPE = 6
FASL_EOF_TYPE = 7
FASL_INTEGER_TYPE = 8
FASL_FLONUM_TYPE = 9
FASL_SINGLE_FLONUM_TYPE = 10
FASL_RATIONAL_TYPE = 11
FASL_COMPLEX_TYPE = 12
FASL_CHAR_TYPE = 13
FASL_SYMBOL_TYPE = 14
FASL_UNREADABLE_SYMBOL_TYPE = 15
FASL_UNINTERNED_SYMBOL_TYPE = 16
FASL_KEYWORD_TYPE = 17
FASL_STRING_TYPE = 18
FASL_IMMUTABLE_STRING_TYPE = 19
FASL_BYTES_TYPE = 20
FASL_IMMUTABLE_BYTES_TYPE = 21
FASL_PATH_TYPE = 22
FASL_RELATIVE_PATH_TYPE = 23
FASL_PREGEXP_TYPE = 24
FASL_REGEXP_TYPE = 25
FASL_BYTE_PREGEXP = 26
FASL_BYTE_REGEXP_TYPE = 27
FASL_LIST_TYPE = 28
FASL_LIST_STAR_TYPE = 29
FASL_PAIR_TYPE = 30
FASL_VECTOR_TYPE = 31
FASL_IMMUTABLE_VECTOR_TYPE = 32
FASL_BOX_TYPE = 33
FASL_IMMUTABLE_BOX_TYPE = 34
FASL_PREFAB_TYPE = 35
FASL_HASH_TYPE = 36
FASL_IMMUTABLE_HASH_TYPE = 37
FASL_SRCLOC = 38
FASL_EXTFLONUM_TYPE = 39
# 100 to 255 is used for small integers:
FASL_SMALL_INTEGER_START = 100
FASL_LOWEST_SMALL_INTEGER = -10
FASL_HIGHEST_SMALL_INTEGER = 255 - ((FASL_SMALL_INTEGER_START - FASL_LOWEST_SMALL_INTEGER) - 1)
FASL_PREFIX = "racket/fasl:"
FASL_PREFIX_LENGTH = len(FASL_PREFIX)
FASL_HASH_EQ_VARIANT = 0
FASL_HASH_EQUAL_VARIANT = 1
FASL_HASH_EQV_VARIANT = 2
#################################################
class Fasl(object):
_attrs_ = ["GLOBAL_SHARED_COUNT", "SHARED", "current_relative_dir"]
_immutable_fields_ = ["current_relative_dir"]
def __init__(self, relative_dir=None):
self.GLOBAL_SHARED_COUNT = -1
self.SHARED = []
self.current_relative_dir = relative_dir
def to_sexp_from_file(self, file_name):
from pycket.values_string import W_String
from pycket.prims.input_output import open_infile
port = open_infile(W_String.make(file_name), "rb")
return self.to_sexp_from_w_port(port)
def to_sexp_from_w_port(self, port):
prefix = port.read(FASL_PREFIX_LENGTH)
if prefix != FASL_PREFIX:
raise Exception("unrecognized prefix : %s " % prefix)
shared_count = self.read_fasl_integer_stream(port)
self.GLOBAL_SHARED_COUNT = shared_count
self.SHARED = [None]*shared_count
length = self.read_fasl_integer_stream(port)
fasl_string = port.read(length)
pos = 0
sexp, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
return sexp
def read_multi_double_into_rpython_list(self, fasl_string, pos, length):
keys = [None]*length
vals = [None]*length
for i in range(length):
k, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
v, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
keys[i] = k
vals[i] = v
return keys, vals, pos
def read_multi_into_rpython_list(self, fasl_string, pos, length):
vals = [None]*length
for i in range(length):
element, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
vals[i] = element
return vals, pos
def read_fasl_string(self, fasl_string, pos, length=-1):
if length < 0:
length, pos = self.read_fasl_integer(fasl_string, pos)
return self.read_bytes_exactly(fasl_string, pos, length)
# TODO: check utf-8
def read_fasl_bytes(self, fasl_string, pos):
bytes_len, pos = self.read_fasl_integer(fasl_string, pos)
return self.read_bytes_exactly(fasl_string, pos, bytes_len)
def read_byte_no_eof(self, fasl_string, pos):
return ord(fasl_string[pos]), pos+1
def read_bytes_exactly(self, fasl_string, pos, n):
if pos+n > len(fasl_string):
raise Exception("truncated stream")
return self.get_slice(fasl_string, pos, pos+n), pos+n
def read_fasl_integer(self, fasl_string, pos):
b, pos = self.read_byte_no_eof(fasl_string, pos)
return self.fasl_integer_inner(fasl_string, pos, b)
def fasl_integer_inner(self, fasl_string, pos, b):
from pycket import values as v
from pycket.prims.numeric import _integer_bytes_to_integer
from pycket.prims.string import _str2num
from pycket.values_string import W_String
if b <= 127:
return b, pos
elif b >= 132:
return b-256, pos
elif b == 128:
num_str, pos = self.read_bytes_exactly(fasl_string, pos, 2)
return _integer_bytes_to_integer(list(num_str), v.w_true, v.w_false).toint(), pos
elif b == 129:
num_str, pos = self.read_bytes_exactly(fasl_string, pos, 4)
return _integer_bytes_to_integer(list(num_str), v.w_true, v.w_false).toint(), pos
elif b == 130:
num_str, pos = self.read_bytes_exactly(fasl_string, pos, 8)
return _integer_bytes_to_integer(list(num_str), v.w_true, v.w_false).toint(), pos
elif b == 131:
length, pos = self.read_fasl_integer(fasl_string, pos)
num_str, pos = self.read_fasl_string(fasl_string, pos, length)
if len(num_str) != length:
raise Exception("fasl: truncated stream at number")
return _str2num(W_String.fromstr_utf8(num_str).as_str_utf8(), 16).toint(), pos
else:
raise Exception("fasl: internal error on integer mode")
def read_bytes_exactly_stream(self, stream, n):
bytes = stream.read(n)
if len(bytes) != n:
raise Exception("truncated stream")
return bytes
def read_fasl_integer_stream(self, stream):
from pycket import values as v
from pycket.prims.numeric import _integer_bytes_to_integer
from pycket.prims.string import _str2num
from pycket.values_string import W_String
_b = stream.read(1)[0]
if not _b:
raise Exception("truncated stream - got eof")
b = ord(_b)
if b <= 127:
return b
elif b >= 132:
return b-256
elif b == 128:
num_str = self.read_bytes_exactly_stream(stream, 2)
return _integer_bytes_to_integer(list(num_str), v.w_true, v.w_false).toint()
elif b == 129:
num_str = self.read_bytes_exactly_stream(stream, 4)
return _integer_bytes_to_integer(list(num_str), v.w_true, v.w_false).toint()
elif b == 130:
num_str = self.read_bytes_exactly_stream(stream, 8)
return _integer_bytes_to_integer(list(num_str), v.w_true, v.w_false).toint()
elif b == 131:
length = self.read_fasl_integer_stream(stream)
assert isinstance(length, int)
num_str = self.read_bytes_exactly_stream(stream, length)
if len(num_str) != length:
raise Exception("fasl: truncated stream at number")
return _str2num(W_String.fromstr_utf8(num_str).as_str_utf8(), 16).toint()
else:
raise Exception("fasl: internal error on integer mode")
def get_slice(self, string, start, stop):
assert stop > 0 and start >= 0
return string[start:stop]
# let's not worry about the CPS'in this right now
# we probably won't have any sexp deeper than the stack anyways
def fasl_to_sexp_recursive(self, fasl_string, pos):
from pycket import values as v
from pycket.values_string import W_String
from pycket.values_regex import W_Regexp, W_PRegexp, W_ByteRegexp, W_BytePRegexp
from pycket.vector import W_Vector
from pycket.values_struct import W_Struct
from pycket.hash import simple as hash_simple
from pycket.hash.equal import W_EqualHashTable
from pycket.prims.numeric import float_bytes_to_real
from pycket.prims.string import _str2num
from rpython.rlib.rbigint import rbigint
from pycket.prims.input_output import build_path, bytes_to_path_element
from pycket.ast_vs_sexp import to_rpython_list
typ, pos = self.read_byte_no_eof(fasl_string, pos)
if typ == FASL_GRAPH_DEF_TYPE:
position, pos = self.read_fasl_integer(fasl_string, pos)
val, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
if position >= self.GLOBAL_SHARED_COUNT:
raise Exception("fasl: bad graph index")
self.SHARED[position] = val
return val, pos
elif typ == FASL_GRAPH_REF_TYPE:
position, pos = self.read_fasl_integer(fasl_string, pos)
if position >= self.GLOBAL_SHARED_COUNT:
raise Exception("fasl: bad graph index")
return self.SHARED[position], pos
elif typ == FASL_FALSE_TYPE:
return v.w_false, pos
elif typ == FASL_TRUE_TYPE:
return v.w_true, pos
elif typ == FASL_NULL_TYPE:
return v.w_null, pos
elif typ == FASL_VOID_TYPE:
return v.w_void, pos
elif typ == FASL_EOF_TYPE:
return v.eof_object, pos
elif typ == FASL_INTEGER_TYPE:
num, pos = self.read_fasl_integer(fasl_string, pos)
if isinstance(num, rbigint):
return v.W_Bignum(num), pos
return v.W_Fixnum(num), pos
elif typ == FASL_FLONUM_TYPE:
num_str, pos = self.read_bytes_exactly(fasl_string, pos, 8)
return float_bytes_to_real(list(num_str), v.w_false), pos
elif typ == FASL_SINGLE_FLONUM_TYPE:
num_str, pos = self.read_bytes_exactly(fasl_string, pos, 4)
real = float_bytes_to_real(list(num_str), v.w_false)
return real.arith_exact_inexact(), pos
elif typ == FASL_EXTFLONUM_TYPE:
bstr_len, pos = self.read_fasl_integer(fasl_string, pos)
num_str, pos = self.read_bytes_exactly(fasl_string, pos, bstr_len)
return _str2num(W_String.fromstr_utf8(num_str).as_str_utf8(), 10), pos
elif typ == FASL_RATIONAL_TYPE:
num, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
den, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
return v.W_Rational.make(num, den), pos
elif typ == FASL_COMPLEX_TYPE:
re, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
im, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
return v.W_Complex.from_real_pair(re, im), pos
elif typ == FASL_CHAR_TYPE:
_chr, pos = self.read_fasl_integer(fasl_string, pos)
return v.W_Character(unichr(_chr)), pos
elif typ == FASL_SYMBOL_TYPE:
sym_str, pos = self.read_fasl_string(fasl_string, pos)
return v.W_Symbol.make(sym_str), pos
elif typ == FASL_UNREADABLE_SYMBOL_TYPE:
sym_str, pos = self.read_fasl_string(fasl_string, pos)
return v.W_Symbol.make_unreadable(sym_str), pos
elif typ == FASL_UNINTERNED_SYMBOL_TYPE:
sym_str, pos = self.read_fasl_string(fasl_string, pos)
return v.W_Symbol(sym_str), pos
elif typ == FASL_KEYWORD_TYPE:
key_str, pos = self.read_fasl_string(fasl_string, pos)
return v.W_Keyword.make(key_str), pos
elif typ == FASL_STRING_TYPE:
str_str, pos = self.read_fasl_string(fasl_string, pos)
return W_String.make(str_str), pos
elif typ == FASL_IMMUTABLE_STRING_TYPE:
str_str, pos = self.read_fasl_string(fasl_string, pos)
return W_String.make(str_str).make_immutable(), pos
elif typ == FASL_BYTES_TYPE:
byts, pos = self.read_fasl_bytes(fasl_string, pos)
return v.W_Bytes.from_string(byts, immutable=False), pos
elif typ == FASL_IMMUTABLE_BYTES_TYPE:
byts, pos = self.read_fasl_bytes(fasl_string, pos)
return v.W_Bytes.from_string(byts), pos
elif typ == FASL_PATH_TYPE:
byts, pos = self.read_fasl_bytes(fasl_string, pos)
return v.W_Path(byts), pos
elif typ == FASL_RELATIVE_PATH_TYPE:
wrt_dir = self.current_relative_dir
p_w_lst, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
p_r_lst, _ = to_rpython_list(p_w_lst)
rel_elems = [bytes_to_path_element(p) if isinstance(p, v.W_Bytes) else p for p in p_r_lst]
if wrt_dir:
return build_path([wrt_dir] + rel_elems), pos
elif rel_elems == []:
return build_path([v.W_Symbol.make("same")]), pos
else:
return build_path(rel_elems), pos
elif typ == FASL_PREGEXP_TYPE:
str_str, pos = self.read_fasl_string(fasl_string, pos)
return W_PRegexp(str_str), pos
elif typ == FASL_REGEXP_TYPE:
str_str, pos = self.read_fasl_string(fasl_string, pos)
return W_Regexp(str_str), pos
elif typ == FASL_BYTE_PREGEXP:
str_str, pos = self.read_fasl_string(fasl_string, pos)
return W_BytePRegexp(str_str), pos
elif typ == FASL_BYTE_REGEXP_TYPE:
str_str, pos = self.read_fasl_string(fasl_string, pos)
return W_ByteRegexp(str_str), pos
elif typ == FASL_LIST_TYPE:
list_len, pos = self.read_fasl_integer(fasl_string, pos)
lst, pos = self.read_multi_into_rpython_list(fasl_string, pos, list_len)
return v.to_list(lst), pos
elif typ == FASL_PAIR_TYPE:
car, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
cdr, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
return v.W_Cons.make(car, cdr), pos
elif typ == FASL_LIST_STAR_TYPE:
list_len, pos = self.read_fasl_integer(fasl_string, pos)
# list_len is the length of the proper part
lst, pos = self.read_multi_into_rpython_list(fasl_string, pos, list_len)
# read the last element
return_list, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
for i in range(list_len-1, -1, -1):
return_list = v.W_Cons.make(lst[i], return_list)
return return_list, pos
elif typ == FASL_VECTOR_TYPE or typ == FASL_IMMUTABLE_VECTOR_TYPE:
vec_len, pos = self.read_fasl_integer(fasl_string, pos)
storage, pos = self.read_multi_into_rpython_list(fasl_string, pos, vec_len)
if typ == FASL_IMMUTABLE_VECTOR_TYPE:
return W_Vector.fromelements(storage, immutable=True), pos
return W_Vector.fromelements(storage), pos
elif typ == FASL_BOX_TYPE:
element, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
return v.W_MBox(element), pos
elif typ == FASL_IMMUTABLE_BOX_TYPE:
element, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
return v.W_IBox(element), pos
elif typ == FASL_PREFAB_TYPE:
key, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
length, pos = self.read_fasl_integer(fasl_string, pos)
vals, pos = self.read_multi_into_rpython_list(fasl_string, pos, length)
return W_Struct.make_prefab(key, vals), pos
elif typ == FASL_HASH_TYPE:
variant, pos = self.read_byte_no_eof(fasl_string, pos)
length, pos = self.read_fasl_integer(fasl_string, pos)
keys, vals, pos = self.read_multi_double_into_rpython_list(fasl_string, pos, length)
if variant == FASL_HASH_EQ_VARIANT:
return hash_simple.make_simple_mutable_table(hash_simple.W_EqMutableHashTable, keys, vals), pos
elif variant == FASL_HASH_EQV_VARIANT:
return hash_simple.make_simple_mutable_table(hash_simple.W_EqvMutableHashTable, keys, vals), pos
else: # variant == FASL_HASH_EQUAL_VARIANT:
return W_EqualHashTable(keys, vals, immutable=False), pos
elif typ == FASL_IMMUTABLE_HASH_TYPE:
variant, pos = self.read_byte_no_eof(fasl_string, pos)
length, pos = self.read_fasl_integer(fasl_string, pos)
keys, vals, pos = self.read_multi_double_into_rpython_list(fasl_string, pos, length)
if variant == FASL_HASH_EQ_VARIANT:
return hash_simple.make_simple_immutable_table(hash_simple.W_EqImmutableHashTable, keys, vals), pos
elif variant == FASL_HASH_EQV_VARIANT:
return hash_simple.make_simple_immutable_table(hash_simple.W_EqvImmutableHashTable, keys, vals), pos
else: # variant == FASL_HASH_EQUAL_VARIANT:
return W_EqualHashTable(keys, vals, immutable=True), pos
elif typ == FASL_SRCLOC:
# difficult to create an instance of srcloc struct so defer that to the runtime
source, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
line, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
column, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
position, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
span, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
return v.to_list([v.W_Symbol.make("srcloc"), source, line, column, position, span]), pos
else:
if typ >= FASL_SMALL_INTEGER_START:
return v.W_Fixnum((typ-FASL_SMALL_INTEGER_START)+FASL_LOWEST_SMALL_INTEGER), pos
else:
raise Exception("unrecognized fasl tag : %s" % typ)
|
StarcoderdataPython
|
1723788
|
<reponame>Saruni0305/oop-work-2
import requests
import json
import re
import hashlib
from django.conf import settings
MAILCHIMP_API_KEY = getattr(settings, 'MAILCHIMP_API_KEY', None)
if MAILCHIMP_API_KEY is None:
raise NotImplementedError("MAILCHIMP_API_KEY must be set in the settings")
MAILCHIMP_DATA_CENTER = getattr(settings, 'MAILCHIMP_DATA_CENTER', None)
if MAILCHIMP_DATA_CENTER is None:
raise NotImplementedError("MAILCHIMP_DATA_CENTER must be set in the settings, something like us17")
MAILCHIMP_EMAIL_LIST_ID = getattr(settings, 'MAILCHIMP_EMAIL_LIST_ID', None)
if MAILCHIMP_EMAIL_LIST_ID is None:
raise NotImplementedError("MAILCHIMP_EMAIL_LIST_ID must be set in the settings, something like us17")
def check_email(email):
if not re.match(r'.+@.+\..+', email):
raise ValueError('String passes is not a valid email address')
return email
def get_subscriber_hash(member_email):
"""
This makes a email hash which is required by the Mailchimp API
"""
# .encode() returns a bytes representation of the Unicode string
member_email = check_email(member_email).lower().encode()
m = hashlib.md5(member_email)
return m.hexdigest()
class Mailchimp(object):
""" Class for handling mailchimp API calls: known as an API Wrapper
See docs: https://developer.mailchimp.com/documentation/mailchimp/reference/lists/members/
"""
def __init__(self):
super(Mailchimp, self).__init__()
self.key = MAILCHIMP_API_KEY
self.api_url = 'https://{dc}.api.mailchimp.com/3.0'.format(dc=MAILCHIMP_DATA_CENTER)
self.list_id = MAILCHIMP_EMAIL_LIST_ID
self.list_endpoint = '{api_url}/lists/{list_id}'.format(
api_url=self.api_url,
list_id=self.list_id
)
def get_members_endpoint(self):
return self.list_endpoint + '/members/'
def check_valid_status(self, status):
# pending means user did not confirm his email id
# cleaned means email bounced and has been removed from the list
choices = ['subscribed', 'unsubscribed', 'cleaned', 'pending']
if status not in choices:
raise ValueError('Not a valid email status choice')
return status
def check_subscription_status(self, email):
# Things needed: endpoint(url), method, data, auth
hashed_email = get_subscriber_hash(email)
# it is unsafe to send data in url directly, so the api uses the hashed form for security
endpoint = self.get_members_endpoint() + '/' + hashed_email
r = requests.get(endpoint, auth=("", self.key))
# we send the status_code in order to check for errors in django while making the call
return r.status_code, r.json()
def change_subscription_status(self, email, status='unsubscribed'):
# Things needed: endpoint(url), method, data, auth
hashed_email = get_subscriber_hash(email)
# it is unsafe to send data in url directly, so the api uses the hashed form for security
endpoint = self.get_members_endpoint() + '/' + hashed_email
data = {
'email_address': email,
'status': self.check_valid_status(status)
}
r = requests.put(endpoint, auth=("", self.key), data=json.dumps(data))
return r.status_code, r.json()
def subscribe(self, email):
return self.change_subscription_status(email, status='subscribed')
def unsubscribe(self, email):
return self.change_subscription_status(email, status='unsubscribed')
def pending(self, email):
return self.change_subscription_status(email, status='pending')
def add_email(self, email):
# Things needed: endpoint(url), method, data, auth
""" The PUT method in change_subscription_status can add an email to the list directly if
it does not exists. So with that, this function has become redundant. """
status = self.check_valid_status('subscribed')
data = {
'email_address': email,
'status': status
}
endpoint = self.get_members_endpoint()
r = requests.post(endpoint, auth=("", self.key), data=json.dumps(data))
return r.status_code, r.json()
|
StarcoderdataPython
|
3306082
|
<filename>fs_image/compiler/tests/test_image_layer.py
#!/usr/bin/env python3
import os
import sys
import unittest
from contextlib import contextmanager
from artifacts_dir import ensure_per_repo_artifacts_dir_exists
from btrfs_diff.tests.render_subvols import render_sendstream
from btrfs_diff.tests.demo_sendstreams_expected import render_demo_subvols
from subvol_utils import Subvol
from volume_for_repo import get_volume_for_current_repo
from ..subvolume_on_disk import SubvolumeOnDisk
TARGET_ENV_VAR_PREFIX = 'test_image_layer_path_to_'
TARGET_TO_PATH = {
target[len(TARGET_ENV_VAR_PREFIX):]: path
for target, path in os.environ.items()
if target.startswith(TARGET_ENV_VAR_PREFIX)
}
class ImageLayerTestCase(unittest.TestCase):
def setUp(self):
self.subvolumes_dir = os.path.join(
get_volume_for_current_repo(
1e8, ensure_per_repo_artifacts_dir_exists(sys.argv[0]),
),
'targets',
)
# More output for easier debugging
unittest.util._MAX_LENGTH = 12345
self.maxDiff = 12345
@contextmanager
def target_subvol(self, target):
with self.subTest(target):
with open(TARGET_TO_PATH[target]) as infile:
yield SubvolumeOnDisk.from_json_file(
infile, self.subvolumes_dir,
)
def _check_hello(self, subvol_path):
with open(os.path.join(subvol_path, 'hello_world')) as hello:
self.assertEqual('', hello.read())
def _check_parent(self, subvol_path):
self._check_hello(subvol_path)
# :parent_layer
for path in [
'usr/share/rpm_test/hello_world.tar',
'foo/bar/even_more_hello_world.tar',
]:
self.assertTrue(
os.path.isfile(os.path.join(subvol_path, path)),
path,
)
# :feature_dirs not tested by :parent_layer
self.assertTrue(
os.path.isdir(os.path.join(subvol_path, 'foo/bar/baz')),
)
def _check_child(self, subvol_path):
self._check_parent(subvol_path)
for path in [
# :feature_tar_and_rpms
'foo/borf/hello_world',
'foo/hello_world',
'usr/share/rpm_test/mice.txt',
# :child_layer
'foo/extracted_hello/hello_world',
'foo/more_extracted_hello/hello_world',
]:
self.assertTrue(os.path.isfile(os.path.join(subvol_path, path)))
for path in [
# :feature_tar_and_rpms ensures these are absent
'usr/share/rpm_test/carrot.txt',
'usr/share/rpm_test/milk.txt',
]:
self.assertFalse(os.path.exists(os.path.join(subvol_path, path)))
def test_image_layer_targets(self):
# Future: replace these checks by a more comprehensive test of the
# image's data & metadata using our `btrfs_diff` library.
with self.target_subvol('hello_world_base') as sod:
self._check_hello(sod.subvolume_path())
with self.target_subvol('parent_layer') as sod:
self._check_parent(sod.subvolume_path())
# Cannot check this in `_check_parent`, since that gets called
# by `_check_child`, but the RPM gets removed in the child.
self.assertTrue(os.path.isfile(os.path.join(
sod.subvolume_path(), 'usr/share/rpm_test/carrot.txt',
)))
with self.target_subvol('child_layer') as sod:
self._check_child(sod.subvolume_path())
def test_layer_from_demo_sendstreams(self):
# `btrfs_diff.demo_sendstream` produces a subvolume send-stream with
# fairly thorough coverage of filesystem features. This test grabs
# that send-stream, receives it into an `image_layer`, and validates
# that the send-stream of the **received** volume has the same
# rendering as the original send-stream was supposed to have.
#
# In other words, besides testing `image_layer`'s `from_sendstream`,
# this is also a test of idempotence for btrfs send+receive.
#
# Notes:
# - `compiler/tests/TARGETS` explains why `mutate_ops` is not here.
# - Currently, `mutate_ops` also uses `--no-data`, which would
# break this test of idempotence.
for op in ['create_ops']:
with self.target_subvol(op) as sod:
self.assertEqual(
render_demo_subvols(**{op: True}),
render_sendstream(
Subvol(sod.subvolume_path(), already_exists=True)
.mark_readonly_and_get_sendstream(),
),
)
|
StarcoderdataPython
|
3254821
|
"""============================================================================
Palyer input
Register user input according to key binding
============================================================================"""
import pygame
from pygame import *
MOUSE_LEFT = 1
MOUSE_WHEEL = 2
MOUSE_RIGHT = 3
key_bind = {
pygame.K_a : 'left', # A
pygame.K_LEFT : 'left',
pygame.K_d : 'right', # D
pygame.K_RIGHT : 'right',
pygame.K_w : 'up', # A
pygame.K_UP : 'up',
pygame.K_s : 'down', # D
pygame.K_DOWN : 'down',
pygame.K_SPACE : 'fire',
pygame.K_RETURN : 'fire',
}
mouse_bind = {
MOUSE_LEFT : 'fire',
MOUSE_RIGHT : 'fire',
}
fire_rate = 3
class PlayerInput:
def __init__(self, game_state):
self.game_state = game_state
self.game_state.tech_screen_on = False
self.reset()
def reset(self):
self.game_state.key = {}
for event, key in key_bind.items():
self.game_state.key[key] = False
for event, key in mouse_bind.items():
self.game_state.key[key] = False
self.game_state.mouse = ( 0,0 )
self.game_state.stop = False
self.game_state.suspend = False
def update(self):
event_list = pygame.event.get()
for event in event_list:
if event.type == pygame.QUIT:
self.game_state.stop = True
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
#self.suspend = not self.suspend
self.game_state.stop = True
if event.type == pygame.KEYDOWN and event.key == pygame.K_F3:
self.game_state.tech_screen_on = not self.game_state.tech_screen_on
if not self.game_state.suspend:
if event.type == pygame.MOUSEMOTION :
self.game_state.mouse = event.pos
if event.type == pygame.KEYDOWN and event.key in key_bind:
self.game_state.key[key_bind[event.key]] = True
elif event.type == pygame.KEYUP and event.key in key_bind:
self.game_state.key[key_bind[event.key]] = False
if event.type == pygame.MOUSEBUTTONDOWN and event.button in mouse_bind:
self.game_state.key[mouse_bind[event.button]] = True
elif event.type == pygame.MOUSEBUTTONUP and event.button in mouse_bind:
self.game_state.key[mouse_bind[event.button]] = False
|
StarcoderdataPython
|
12670
|
<gh_stars>0
"""Implementation of the unary-operator-replacement operator.
"""
import ast
from .operator import Operator
from ..util import build_mutations
# None indicates we want to delete the operator
OPERATORS = (ast.UAdd, ast.USub, ast.Invert, ast.Not, None)
def _to_ops(from_op):
"""
The sequence of operators which `from_op` could be mutated to.
"""
for to_op in OPERATORS:
if to_op and isinstance(from_op, ast.Not):
# 'not' can only be removed but not replaced with
# '+', '-' or '~' b/c that may lead to strange results
pass
elif isinstance(from_op, ast.UAdd) and (to_op is None):
# '+1' => '1' yields equivalent mutations
pass
else:
yield to_op
class MutateUnaryOperator(Operator):
"""An operator that modifies unary operators."""
def visit_UnaryOp(self, node): # pylint: disable=invalid-name
"""
http://greentreesnakes.readthedocs.io/en/latest/nodes.html#UnaryOp
"""
return self.visit_mutation_site(
node,
len(build_mutations([node.op], _to_ops)))
def mutate(self, node, idx):
"Perform the `idx`th mutation on node."
_, to_op = build_mutations([node.op], _to_ops)[idx]
if to_op:
node.op = to_op()
return node
return node.operand
|
StarcoderdataPython
|
1633220
|
<reponame>charutomo/SOC
import math
class Vector:
"""Base 2D Object.
Attributes
----------
x: float
The x coordinate
y: float
The y coordinate
"""
def __init__(self, _x, _y):
"""Constructor
Parameters
----------
_x: float
The x coordinate
_y: float
The y coordinate
"""
self.x = _x
self.y = _y
def __eq__(self, _other):
"""Equality Operator Override
Override uses math.isclose for both x and y coordinates
Parameters
----------
_other: Vector
The vector to compare to
Returns
-------
A boolean value indicating whether the two vectors are approximately equal.
Also returns False if _other is None
"""
return math.isclose(self.x, _other.x) and math.isclose(self.y, _other.y)
def ToTuple(self):
"""Converts and returns the Vector as a Tuple."""
return (self.x, self.y)
def ToString(self):
"""Converts and returns the Vector as a String."""
return "(" + str(self.x) + ", " + str(self.y) + ")"
def Print(self):
"""Prints the Vector"""
print(self.ToString())
# Static Methods
@staticmethod
def Midpoint(_vectorA, _vectorB):
return Vector((_vectorA.x + _vectorB.x) / 2.0, (_vectorA.y + _vectorB.y) / 2.0)
@staticmethod
def EuclideanDistance(_vectorA, _vectorB):
"""Calculates the distance between 2 vectors
Parameters
----------
_vectorA: Vector
The first vector to calculate with.
_vectorB: Vector
The second vector to calculate with.
Returns
-------
Returns the distance as a float value.
"""
return ((_vectorA.x - _vectorB.x) ** 2 + (_vectorA.y - _vectorB.y) ** 2) ** (1/2)
|
StarcoderdataPython
|
3356973
|
import boto3
import glob
import gzip
import os
s3 = boto3.client('s3')
TPCH_TABLE_NAMES = ['customer', 'lineitem', 'nation',
'orders', 'part', 'partsupp', 'region', 'supplier']
def check_region(region):
if os.environ['AWS_REGION'] != region:
raise Exception(
f"Your stack is in {os.environ['AWS_REGION']} but the bucket is in {region}")
def copy_cf():
existing_files = glob.glob("/mnt/data/*")
print('existing_files:', existing_files)
bucket = 'cloudfuse-taxi-data'
for file_name in TPCH_TABLE_NAMES:
local_path = f'/mnt/data/{file_name}.tbl'
key = f'tpch/tbl-s1/{file_name}.tbl'
if local_path in existing_files:
print(f'{local_path} already exists')
continue
try:
s3.download_file(bucket, key, local_path)
except Exception as e:
print(e)
print(
f'Error getting object {key} from bucket {bucket}. Make sure they exist and your bucket is in the same region as this function.')
raise e
def copy_memsql():
existing_files = glob.glob("/mnt/data/**", recursive=True)
print('existing_files:', existing_files)
bucket = 'memsql-tpch-dataset'
for table_name in TPCH_TABLE_NAMES:
s3ls_res = s3.list_objects_v2(
Bucket=bucket,
Prefix=f'sf_100/{table_name}',
)
for (i, ls_key) in enumerate(s3ls_res['Contents']):
key = ls_key['Key']
if key.endswith('/'):
continue
partition_name = f'{table_name}/{i:03d}.tbl'
tmp_local_path = f'/mnt/data/tmp/{partition_name}'
local_path = f'/mnt/data/{partition_name}'
if local_path in existing_files:
print(f'{local_path} already exists')
continue
print(f'starting dl of: {key}')
obj = s3.get_object(
Bucket=bucket,
Key=key,
)
os.makedirs(os.path.dirname(tmp_local_path), exist_ok=True)
with open(tmp_local_path, 'wb') as f:
with gzip.GzipFile(fileobj=obj["Body"]) as gzipfile:
for chunk in gzipfile:
f.write(chunk)
os.makedirs(os.path.dirname(local_path), exist_ok=True)
os.rename(tmp_local_path, local_path)
print(f'{key} downloaded as {local_path}')
def lambda_handler(event, context):
bucket = event.get('bucket', 'memsql-tpch-dataset')
mode = event.get('mode', 'copy')
if mode == 'copy':
if bucket == 'memsql-tpch-dataset':
check_region('us-east-1')
copy_memsql()
elif bucket == 'cloudfuse-taxi-data':
check_region('us-east-2')
copy_cf()
else:
raise Exception(f'Unknown bucket: {bucket}')
elif mode == 'delete':
for filename in glob.glob("/mnt/data/**/*"):
try:
os.remove(filename)
print('deleting', filename)
except:
print('skiping', filename)
elif mode == 'list':
for filename in glob.glob("/mnt/data/**/*"):
print(os.stat(filename).st_size, '-->', filename)
|
StarcoderdataPython
|
3342470
|
# sorting algorithm -> bubblesort
# About bubblesort: Best case O(n), Average O(n2), Worst case O(n2)
# @author unobatbayar
# Thanks to HackerRank's bubblesort tutorial
title = 'Welcome to Bubblesort Algorithm!'
print(title + '\n' + 'Enter unsorted data set: ')
user_input = input()
array = user_input.split()
def bubble_sort(array):
unsorted = True
while unsorted:
unsorted = False
unsorted_last = len(array) -1
for x in range(unsorted_last):
if array[x] > array[x+1]:
array[x],array[x+1] = array[x+1], array[x]
unsorted = True
unsorted_last -= 1
bubble_sort(array)
print(array)
|
StarcoderdataPython
|
4838718
|
<gh_stars>0
import unittest
import torch
from torch.utils.data import DataLoader
from datasets.utils import set_progress_bar_enabled
import warnings
from trigger_attack.trigger import Trigger
from trigger_attack.preprocessing import ner as nerPreprocess
from trigger_attack.preprocessing import sc as scPreprocess
from trigger_attack.preprocessing import qa as qaPreprocess
from trigger_attack.loss_functions import sc as scLoss
from trigger_attack.loss_functions import ner as nerLoss
from trigger_attack.loss_functions import qa as qaLoss
import tools
import constants
class LossTest(unittest.TestCase):
def setUp(self):
if not hasattr(self, 'expected_losses'):
self.skipTest('parent class')
warnings.filterwarnings("ignore")
set_progress_bar_enabled(False)
dataset = tools.load_dataset(self.testing_data_paths.suspicious_model_filepath)
self.trigger_models = tools.load_trigger_models(
self.testing_data_paths.suspicious_model_filepath,
self.testing_data_paths.clean_models_filepaths)
tokenizer = tools.load_tokenizer(self.testing_data_paths.tokenizer_filepath)
self.preprocessor = self.preprocessor_class(
dataset, self.trigger, self.trigger_models, tokenizer)
dataset = self.preprocessor.preprocess_data()
self.dataloader = DataLoader(dataset, batch_size=self.batch_size)
self.loss_fn = self.loss_class()
@torch.no_grad()
def test_suspicious_loss(self):
for batch in self.dataloader:
device = self.trigger_models.device
if 'valid_mask' in batch:
batch['valid_mask'] = batch['valid_mask'].to(device, non_blocking=True)
batch['baseline_probabilities'] = batch['baseline_probabilities'].to(device, non_blocking=True)
all_logits = self.trigger_models(batch)
loss = self.loss_fn._calculate_suspicious_loss(all_logits, batch, self.target_label)
break
self.assertAlmostEqual(self.expected_losses['suspicious'], loss.item(), places=3)
@torch.no_grad()
def test_clean_loss(self):
for batch in self.dataloader:
device = self.trigger_models.device
if 'valid_mask' in batch:
batch['valid_mask'] = batch['valid_mask'].to(device, non_blocking=True)
batch['baseline_probabilities'] = batch['baseline_probabilities'].to(device, non_blocking=True)
all_logits = self.trigger_models(batch)
loss = self.loss_fn._calculate_clean_loss(all_logits, batch, self.target_label)
break
self.assertAlmostEqual(self.expected_losses['clean'], loss.item(), places=3)
@torch.no_grad()
def test_loss(self):
for batch in self.dataloader:
device = self.trigger_models.device
if 'valid_mask' in batch:
batch['valid_mask'] = batch['valid_mask'].to(device, non_blocking=True)
batch['baseline_probabilities'] = batch['baseline_probabilities'].to(device, non_blocking=True)
all_logits = self.trigger_models(batch)
loss = self.loss_fn.calculate_loss(all_logits, batch, self.target_label)
break
self.assertAlmostEqual(self.expected_losses['total'], loss.item(), places=3)
class TestSCLosses(LossTest):
testing_data_paths = constants.sc_paths
source_label = [0]
target_label = [1]
trigger = Trigger(
torch.tensor([1]*10), location='start', source_labels=source_label)
batch_size = 16
expected_losses = {
'suspicious': 4.9418,
'clean': 0.0098,
'total': 4.9516
}
preprocessor_class = scPreprocess.SCDatasetPreprocessor
loss_class = scLoss.SCLoss
class TestNERLosses(LossTest):
testing_data_paths = constants.ner_paths
source_label = [3, 4]
target_label = [5, 6]
trigger = Trigger(torch.tensor(
[1]*10), 'both', source_labels=source_label)
batch_size = 16
expected_losses = {
'clean': 0.0,
'suspicious': 3.6582,
'total': 3.6582
}
preprocessor_class = nerPreprocess.NERDatasetPreprocessor
loss_class = nerLoss.NERLoss
class TestQALosses(LossTest):
testing_data_paths = constants.qa_paths
target_label = None
trigger = Trigger(
torch.tensor([1]*10), location='both', source_labels=None)
batch_size = 16
expected_losses = {
'suspicious': 12.4365,
'clean': 2.5116e-05,
'total': 12.436
}
preprocessor_class = qaPreprocess.QADatasetPreprocessor
loss_class = qaLoss.QALoss
if __name__ == '__main__':
unittest.main(verbosity=3)
|
StarcoderdataPython
|
3350912
|
<filename>ANSA_AUTOMESHER/GenerateGBMR.py
import ansa
import os
from ansa import *
@session.defbutton("QFSAE_TOOLS","GenerateGBMR")
def GenerateGBMR():
#Determine the file name from the database name as well as its path
current_model = ansa.base.DataBaseName()
model_path = current_model.split("/")
file_base = model_path[-1].split('.')[0]
base_path = ''
i = 0
while (model_path[i] not in 'CAD') and (model_path[i] not in 'CFD'):
base_path = base_path + model_path[i] + os.sep
i += 1
base_path = base_path + 'CFD' + os.sep + 'IMPORT' + os.sep
if os.path.exists('D:\Onedrive'):
mesh.ReadMeshParams('D:\Onedrive\Documents\Queens\Formula SAE\Q22\Aero\CFD\MACROS\AUTOMESHER\setToTria.ansa_mpar')
else:
mesh.ReadMeshParams('E:\Onedrive\Documents\Queens\Formula SAE\Q22\Aero\CFD\MACROS\AUTOMESHER\setToTria.ansa_mpar')
ents = base.CollectEntities(base.CurrentDeck(), None, search_types ="__PROPERTIES__" , filter_visible = True)
mesh.Wrap(ents, 25.0 , 'out', 'smooth', 10, 'no', 10.0, -1, 'wrap1', 10)
base.DeleteEntity(ents,True,True)
base.OutputStereoLithography(base_path + file_base + '.stl', 'all', 'ascii')
os.remove(current_model + '.log')
if __name__ == '__main__':
GenerateGBMR ()
|
StarcoderdataPython
|
3376281
|
# Copyright (c) IBM Corporation 2020
# Apache License, Version 2.0 (see https://opensource.org/licenses/Apache-2.0)
import re
import sys
from ansible_doc_extractor.cli import main
from ansible.utils.collection_loader import AnsibleCollectionLoader
if __name__ == '__main__':
# allow doc-extractor to import code from collections so doc_fragment plugins work
sys.meta_path.insert(0, AnsibleCollectionLoader())
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
StarcoderdataPython
|
3375691
|
<filename>gui.py
import tkinter as tk
import threading
from tkinter import scrolledtext
from tkinter import messagebox
ENCODING = 'utf-8'
class GUI(threading.Thread):
def __init__(self, client):
super().__init__(daemon=False, target=self.run)
self.font = ('Helvetica', 13)
self.client = client
self.login_window = None
self.main_window = None
def run(self):
self.login_window = LoginWindow(self, self.font)
self.main_window = ChatWindow(self, self.font)
self.notify_server(self.login_window.login, 'login')
self.main_window.run()
@staticmethod
def display_alert(message):
"""Display alert box"""
messagebox.showinfo('Error', message)
def update_login_list(self, active_users):
"""Update login list in main window with list of users"""
self.main_window.update_login_list(active_users)
def display_message(self, message):
"""Display message in ChatWindow"""
self.main_window.display_message(message)
def send_message(self, message):
"""Enqueue message in client's queue"""
self.client.queue.put(message)
def set_target(self, target):
"""Set target for messages"""
self.client.target = target
def notify_server(self, message, action):
"""Notify server after action was performed"""
data = action + ";" + message
data = data.encode(ENCODING)
self.client.notify_server(data, action)
def login(self, login):
self.client.notify_server(login, 'login')
def logout(self, logout):
self.client.notify_server(logout, 'logout')
class Window(object):
def __init__(self, title, font):
self.root = tk.Tk()
self.title = title
self.root.title(title)
self.font = font
class LoginWindow(Window):
def __init__(self, gui, font):
super().__init__("Login", font)
self.gui = gui
self.label = None
self.entry = None
self.button = None
self.login = None
self.build_window()
self.run()
def build_window(self):
"""Build login window, , set widgets positioning and event bindings"""
self.label = tk.Label(self.root, text='Enter your login', width=20, font=self.font)
self.label.pack(side=tk.LEFT, expand=tk.YES)
self.entry = tk.Entry(self.root, width=20, font=self.font)
self.entry.focus_set()
self.entry.pack(side=tk.LEFT)
self.entry.bind('<Return>', self.get_login_event)
self.button = tk.Button(self.root, text='Login', font=self.font)
self.button.pack(side=tk.LEFT)
self.button.bind('<Button-1>', self.get_login_event)
def run(self):
"""Handle login window actions"""
self.root.mainloop()
self.root.destroy()
def get_login_event(self, event):
"""Get login from login box and close login window"""
self.login = self.entry.get()
self.root.quit()
class ChatWindow(Window):
def __init__(self, gui, font):
super().__init__("Python Chat", font)
self.gui = gui
self.messages_list = None
self.logins_list = None
self.entry = None
self.send_button = None
self.exit_button = None
self.lock = threading.RLock()
self.target = ''
self.login = self.gui.login_window.login
self.build_window()
def build_window(self):
"""Build chat window, set widgets positioning and event bindings"""
# Size config
self.root.geometry('750x500')
self.root.minsize(600, 400)
# Frames config
main_frame = tk.Frame(self.root)
main_frame.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E)
self.root.rowconfigure(0, weight=1)
self.root.columnconfigure(0, weight=1)
# List of messages
frame00 = tk.Frame(main_frame)
frame00.grid(column=0, row=0, rowspan=2, sticky=tk.N + tk.S + tk.W + tk.E)
# List of logins
frame01 = tk.Frame(main_frame)
frame01.grid(column=1, row=0, rowspan=3, sticky=tk.N + tk.S + tk.W + tk.E)
# Message entry
frame02 = tk.Frame(main_frame)
frame02.grid(column=0, row=2, columnspan=1, sticky=tk.N + tk.S + tk.W + tk.E)
# Buttons
frame03 = tk.Frame(main_frame)
frame03.grid(column=0, row=3, columnspan=2, sticky=tk.N + tk.S + tk.W + tk.E)
main_frame.rowconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
main_frame.rowconfigure(2, weight=8)
main_frame.columnconfigure(0, weight=1)
main_frame.columnconfigure(1, weight=1)
# ScrolledText widget for displaying messages
self.messages_list = scrolledtext.ScrolledText(frame00, wrap='word', font=self.font)
self.messages_list.insert(tk.END, 'Welcome to Python Chat\n')
self.messages_list.configure(state='disabled')
# Listbox widget for displaying active users and selecting them
self.logins_list = tk.Listbox(frame01, selectmode=tk.SINGLE, font=self.font,
exportselection=False)
self.logins_list.bind('<<ListboxSelect>>', self.selected_login_event)
# Entry widget for typing messages in
self.entry = tk.Text(frame02, font=self.font)
self.entry.focus_set()
self.entry.bind('<Return>', self.send_entry_event)
# Button widget for sending messages
self.send_button = tk.Button(frame03, text='Send', font=self.font)
self.send_button.bind('<Button-1>', self.send_entry_event)
# Button for exiting
self.exit_button = tk.Button(frame03, text='Exit', font=self.font)
self.exit_button.bind('<Button-1>', self.exit_event)
# Positioning widgets in frame
self.messages_list.pack(fill=tk.BOTH, expand=tk.YES)
self.logins_list.pack(fill=tk.BOTH, expand=tk.YES)
self.entry.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
self.send_button.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
self.exit_button.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
# Protocol for closing window using 'x' button
self.root.protocol("WM_DELETE_WINDOW", self.on_closing_event)
def run(self):
"""Handle chat window actions"""
self.root.mainloop()
self.root.destroy()
def selected_login_event(self, event):
"""Set as target currently selected login on login list"""
target = self.logins_list.get(self.logins_list.curselection())
self.target = target
self.gui.set_target(target)
def send_entry_event(self, event):
"""Send message from entry field to target"""
text = self.entry.get(1.0, tk.END)
if text != '\n':
message = 'msg;' + self.login + ';' + self.target + ';' + text[:-1]
print(message)
self.gui.send_message(message.encode(ENCODING))
self.entry.mark_set(tk.INSERT, 1.0)
self.entry.delete(1.0, tk.END)
self.entry.focus_set()
else:
messagebox.showinfo('Warning', 'You must enter non-empty message')
with self.lock:
self.messages_list.configure(state='normal')
if text != '\n':
self.messages_list.insert(tk.END, text)
self.messages_list.configure(state='disabled')
self.messages_list.see(tk.END)
return 'break'
def exit_event(self, event):
"""Send logout message and quit app when "Exit" pressed"""
self.gui.notify_server(self.login, 'logout')
self.root.quit()
def on_closing_event(self):
"""Exit window when 'x' button is pressed"""
self.exit_event(None)
def display_message(self, message):
"""Display message in ScrolledText widget"""
with self.lock:
self.messages_list.configure(state='normal')
self.messages_list.insert(tk.END, message)
self.messages_list.configure(state='disabled')
self.messages_list.see(tk.END)
def update_login_list(self, active_users):
"""Update listbox with list of active users"""
self.logins_list.delete(0, tk.END)
for user in active_users:
self.logins_list.insert(tk.END, user)
self.logins_list.select_set(0)
self.target = self.logins_list.get(self.logins_list.curselection())
|
StarcoderdataPython
|
3205063
|
from django.contrib import admin
from .models import Prpdutos,Cliente
class Produtoadmin(admin.ModelAdmin):
list_display = ('nome','preco','estoque')
admin.site.register(Prpdutos,Produtoadmin)
admin.site.register(Cliente)
|
StarcoderdataPython
|
3278389
|
<filename>py/py_0190_maximising_a_weighted_product.py<gh_stars>0
# Solution of;
# Project Euler Problem 190: Maximising a weighted product
# https://projecteuler.net/problem=190
#
# Let Sm = (x1, x2, . . . , xm) be the m-tuple of positive real numbers with
# x1 + x2 + . . . + xm = m for which Pm = x1 * x22 * . . . * xmm is maximised.
# For example, it can be verified that [P10] = 4112 ([ ] is the integer part
# function). Find Σ[Pm] for 2 ≤ m ≤ 15.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 190
timed.caller(dummy, n, i, prob_id)
|
StarcoderdataPython
|
3324066
|
<reponame>viniciusd/DCO1008---Digital-Signal-Processing<gh_stars>0
import numpy as np
from scipy import fftpack
class Fft:
def __init__(self, x, *, sample_rate=None, padded=False):
if sample_rate is None:
raise ValueError('You must determine the sample rate')
fs = sample_rate
if padded:
padding_to = int(2**np.ceil(np.log2(len(x))))
x = np.pad(x, (0, padding_to-len(x)), 'constant')
n, X = len(x), fftpack.fft(x)
self.hz = fftpack.fftshift(fftpack.fftfreq(n, 1/fs))
self.abs = np.abs(X)
self.phase = np.angle(X)
self.values = X
self.samles = n
|
StarcoderdataPython
|
1779150
|
<filename>api/models/gcd/series.py
from django.db import models
from api.models.gcd.country import GCDCountry
from api.models.gcd.language import GCDLanguage
from api.models.gcd.image import GCDImage
from api.models.gcd.publisher import GCDPublisher
class GCDSeries(models.Model):
class Meta:
app_label = 'api'
ordering = ['sort_name', 'year_began']
db_table = 'gcd_series'
# Core series fields.
series_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255, db_index=True)
sort_name = models.CharField(max_length=255, db_index=True)
# The "format" field is a legacy field that is being split into
# color, dimensions, paper_stock, binding, and publishing_format
format = models.CharField(max_length=255, default=u'')
color = models.CharField(max_length=255, default=u'')
dimensions = models.CharField(max_length=255, default=u'')
paper_stock = models.CharField(max_length=255, default=u'')
binding = models.CharField(max_length=255, default=u'')
publishing_format = models.CharField(max_length=255, default=u'')
tracking_notes = models.TextField(null=True, blank=True)
notes = models.TextField(null=True, blank=True)
publication_notes = models.TextField(null=True, blank=True)
keywords = models.TextField(null=True, blank=True)
year_began = models.IntegerField(db_index=True)
year_ended = models.IntegerField(null=True, default=0, blank=True)
year_began_uncertain = models.BooleanField(default=False, blank=True)
year_ended_uncertain = models.BooleanField(default=False, blank=True)
publication_dates = models.CharField(max_length=255)
# Fields for handling the presence of certain issue fields
has_barcode = models.BooleanField(default=False)
has_indicia_frequency = models.BooleanField(default=False)
has_isbn = models.BooleanField(default=False)
has_issue_title = models.BooleanField(default=False)
has_volume = models.BooleanField(default=False)
has_rating = models.BooleanField(default=False)
is_current = models.BooleanField(default=False)
is_comics_publication = models.BooleanField(default=False)
is_singleton = models.BooleanField(default=False)
issue_count = models.IntegerField(null=True, default=0, blank=True)
# Fields related to cover image galleries.
has_gallery = models.BooleanField(default=False, db_index=True)
# Fields related to indexing activities.
# Only "reserved" is in active use. "open_reserve" is a legacy field
# used only by migration scripts.
reserved = models.BooleanField(default=False, db_index=True)
open_reserve = models.IntegerField(null=True)
# Fields related to change management.
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(default=False, db_index=True)
# Country and Language info.
country = models.ForeignKey(
GCDCountry,
blank=True,
null=True
)
language = models.ForeignKey(
GCDLanguage,
blank=True,
null=True
)
# Cover
cover = models.ImageField(
upload_to='cover/series',
null=True,
blank=True
)
# Fields related to the publishers table.
publication_type_id = models.IntegerField(null=True, blank=0)
publisher = models.ForeignKey(GCDPublisher)
images = models.ManyToManyField(GCDImage, blank=True)
# Put them in here to simplify REST Framework
publisher_name = models.CharField(max_length=255, db_index=True)
def _date_uncertain(self, flag):
return u' ?' if flag else u''
def __str__(self):
return '%s (%s%s series)' % (self.name, self.year_began, self._date_uncertain(self.year_began_uncertain))
|
StarcoderdataPython
|
1634277
|
from pathlib import Path
import argparse
import json
import glob
import sys
from matplotlib import pyplot as plt
import numpy as np
def roc_graphs(fprs, tprs, names, aucs, savename, minx=0.85):
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
plt.figure() # figsize=(10, 10)
ax = plt.axes(xscale='log', xlim=[1e-4, 1.0], ylim=[minx-0.05, 1])
for i, (fpr, tpr, name, auc) in enumerate(zip(fprs, tprs, names, aucs)):
fpr = np.flipud(fpr)
tpr = np.flipud(tpr)
auc *= 100
ax.plot(fpr, tpr, color=colors[i], lw=2,
label=f'{name} (AUC: {auc:.2f}%)')
plt.grid(b=True, which='major', axis='x',
color='#666666', linestyle='dashed', alpha=0.6)
plt.grid(b=True, which='minor', axis='x',
color='#666666', linestyle='dotted', alpha=0.4)
plt.grid(b=True, which='major', axis='y',
color='#999999', linestyle='solid', alpha=0.1)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
ax.legend(loc='lower right', fontsize=10,
fancybox=True).get_frame().set_alpha(0.5)
plt.savefig(savename + '_roc.pdf', bbox_inches='tight')
def acc_plot(accs, names, savename):
plt.figure() # figsize=(8, 3)
accs = np.array(accs) * 100
minacc = min(accs)
maxacc = max(accs)
ax = plt.axes(xlim=[minacc-0.5, maxacc+0.5])
bars = ax.barh(names, accs)
plt.grid(b=True, which='major', axis='x',
color='#666666', linestyle='dashed', alpha=0.6)
plt.title('Thresholded Accurracy')
plt.xlabel('Accuracy (%)\nHigher is better')
for name, acc in zip(names, accs):
plt.text(s=f'{acc:.2f}%', x=acc-0.175, y=name, color="r",
verticalalignment="center", size=9)
bars[np.argmax(accs)].set_color('green')
plt.tight_layout()
plt.savefig(savename + '_acc.pdf')
def inftime_plot(inftimes, names, savename):
plt.figure() # figsize=(8, 3)
inftimes = np.array(inftimes) * 1000
mintime = np.min(inftimes)
maxtime = np.max(inftimes)
ax = plt.axes(xlim=[mintime-5, maxtime+1])
bars = ax.barh(names, inftimes)
plt.grid(b=True, which='major', axis='x',
color='#666666', linestyle='dashed', alpha=0.6)
plt.title('Inference time')
plt.xlabel('Inference time (ms)\nLower is better')
for name, time in zip(names, inftimes):
plt.text(s=f'{time:.2f}ms', x=time-1.75, y=name, color="r",
verticalalignment="center", size=9)
bars[np.argmin(inftimes)].set_color('green')
plt.tight_layout()
plt.savefig(savename + '_time.pdf')
def tpr_at_fpr_plot(tpr_at_fprs, names, savename):
plt.figure() # figsize=(8, 3)
tpr_at_fprs = np.array(tpr_at_fprs)
minval = np.min(tpr_at_fprs)
maxval = np.max(tpr_at_fprs)
ax = plt.axes(xlim=[minval-0.05, maxval+0.05])
bars = ax.barh(names, tpr_at_fprs)
plt.grid(b=True, which='major', axis='x',
color='#666666', linestyle='dashed', alpha=0.6)
plt.title('Verification TAR (@FAR=1e-4)')
plt.xlabel('TAR (@FAR=1e-4)')
for name, val in zip(names, tpr_at_fprs):
plt.text(s=f'{val:.4f}', x=val-0.015, y=name, color="r",
verticalalignment="center", size=9)
bars[np.argmax(tpr_at_fprs)].set_color('green')
plt.tight_layout()
plt.savefig(savename + '_TAR.pdf')
def main():
parser = argparse.ArgumentParser(
description="Compare evaluation results",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--src", "-s", type=str, required=True,
help="path to dir with evaluation .json results")
parser.add_argument("--name", "-n", type=str, required=True,
help="name for output file")
args = parser.parse_args()
fprs, tprs, accs, theshs, aucs, inftimes = [], [], [], [], [], []
names, tpr_at_fprs = [], []
min_tpr = 1.
src_path = Path(args.src)
jpaths = glob.glob(str(src_path / '*.json'))
if len(jpaths) > 7:
print('Cannot compare more than 7!')
sys.exit()
for jpath in jpaths:
with open(jpath, 'r') as jfile:
jsondata = json.load(jfile)
name = Path(jpath).stem
names.append(name)
fprs.append(jsondata['roc']['fpr'])
tprs.append(jsondata['roc']['tpr'])
kfold_acc = jsondata['kfold']['acc']
roc_acc = jsondata['roc']['acc']
if kfold_acc > roc_acc:
accs.append(kfold_acc)
theshs.append(jsondata['kfold']['th'])
else:
accs.append(roc_acc)
theshs.append(jsondata['roc']['th'])
aucs.append(jsondata['roc']['auc'])
inftimes.append(jsondata['inference_time'])
tpr_at_fprs.append(jsondata['roc']['tpr_at_fpr']['0.0001'])
min_tpr = min(jsondata['roc']['tpr_at_fpr']['1e-06'], min_tpr)
Path(args.name).parent.mkdir(parents=True, exist_ok=True)
roc_graphs(fprs, tprs, names, aucs, args.name, min_tpr)
acc_plot(accs, names, args.name)
tpr_at_fpr_plot(tpr_at_fprs, names, args.name)
inftime_plot(inftimes, names, args.name)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3907
|
# 2. Repeat Strings
# Write a Program That Reads a list of strings. Each string is repeated N times, where N is the length of the string. Print the concatenated string.
strings = input().split()
output_string = ""
for string in strings:
N = len(string)
output_string += string * N
print(output_string)
|
StarcoderdataPython
|
89903
|
import torch as th
import math
import numpy as np
from video_loader import VideoLoader
from torch.utils.data import DataLoader
import argparse
from preprocessing import Preprocessing
from random_sequence_shuffler import RandomSequenceSampler
import torch.nn.functional as F
from tqdm import tqdm
import os
import clip
parser = argparse.ArgumentParser(description='Easy video feature extractor')
parser.add_argument(
'--csv',
type=str,
help='input csv with video input path')
parser.add_argument('--batch_size', type=int, default=64,
help='batch size')
parser.add_argument(
'--clip_len', type=float, default=3/2,
help='decoding length of clip (in seconds)')
parser.add_argument(
'--overwrite', action='store_true',
help='allow overwrite output files')
parser.add_argument('--half_precision', type=int, default=1,
help='output half precision float')
parser.add_argument('--num_decoding_thread', type=int, default=4,
help='Num parallel thread for video decoding')
parser.add_argument('--model_version', type=str, default="ViT-B/32",
choices=["ViT-B/32", "RN50x4"],
help='Num parallel thread for video decoding')
args = parser.parse_args()
# model_version = "RN50x4" # "RN50x4" # "ViT-B/32"
output_feat_size = 512 if args.model_version == "ViT-B/32" else 640
dataset = VideoLoader(
args.csv,
framerate=1/args.clip_len,
size=224 if args.model_version == "ViT-B/32" else 288,
centercrop=True,
overwrite=args.overwrite,
model_version=args.model_version
)
n_dataset = len(dataset)
sampler = RandomSequenceSampler(n_dataset, 10)
loader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=args.num_decoding_thread,
sampler=sampler if n_dataset > 10 else None,
)
preprocess = Preprocessing()
model, _ = clip.load(args.model_version, device="cuda")
totatl_num_frames = 0
with th.no_grad():
for k, data in enumerate(tqdm(loader)):
if data == {}:
print("problematic video file")
continue
input_file = data['input'][0]
output_file = data['output'][0]
if args.model_version == "RN50x4":
output_file = output_file.replace(
"clip-vit_features", "clip-rn50x4_features")
if os.path.isfile(output_file):
# print(f'Video {input_file} already processed.')
continue
elif not os.path.isfile(input_file):
print(f'{input_file}, does not exist.\n')
elif len(data['video'].shape) > 4:
video = data['video'].squeeze(0)
if len(video.shape) == 4:
video = preprocess(video)
n_chunk = len(video)
features = th.cuda.FloatTensor(
n_chunk, output_feat_size).fill_(0)
n_iter = int(math.ceil(n_chunk / float(args.batch_size)))
for i in range(n_iter):
min_ind = i * args.batch_size
max_ind = (i + 1) * args.batch_size
video_batch = video[min_ind:max_ind].cuda()
batch_features = model.encode_image(video_batch)
features[min_ind:max_ind] = batch_features
features = features.cpu().numpy()
if args.half_precision:
features = features.astype('float16')
totatl_num_frames += features.shape[0]
# safeguard output path before saving
dirname = os.path.dirname(output_file)
if not os.path.exists(dirname):
print(f"Output directory {dirname} does not exists, creating...")
os.makedirs(dirname)
np.savez(output_file, features=features)
else:
print(f'{input_file}, failed at ffprobe.\n')
print(f"Total number of frames: {totatl_num_frames}")
|
StarcoderdataPython
|
35396
|
<filename>formulario/urls.py
from django.conf.urls import include, url
from formulario import views
urlpatterns = [
url(r'^form/registro/(?P<pk>\d+)/$', views.RegistroSupraForm.as_view(), name='form_registro'),
url(r'^form/registro/create/$', views.RegistroCreateSupraForm.as_view(), name='form_crear_registro'),
url(r'^list/campo/$', views.CampoListView.as_view(), name='campo_list'),
]
|
StarcoderdataPython
|
1659994
|
"""
Balanced strings are those who have equal quantity of 'L' and 'R' characters.
Given a balanced string s split it in the maximum amount of balanced strings.
Return the maximum amount of splitted balanced strings.
Example:
Input: s = "RLRRLLRLRL"
Output: 4
Explanation: s can be split into "RL", "RRLL", "RL", "RL", each substring
contains same number of 'L' and 'R'.
Example:
Input: s = "RLLLLRRRLR"
Output: 3
Explanation: s can be split into "RL", "LLLRRR", "LR", each substring
contains same number of 'L' and 'R'.
Constraints:
- 1 <= s.length <= 1000
- s[i] = 'L' or 'R'
"""
#Diffculty: Easy
#40 / 40 test cases passed.
#Runtime: 36 ms
#Memory Usage: 13.9 MB
#Runtime: 36 ms, faster than 42.16% of Python3 online submissions for Split a String in Balanced Strings.
#Memory Usage: 13.9 MB, less than 37.65% of Python3 online submissions for Split a String in Balanced Strings.
class Solution:
def balancedStringSplit(self, s: str) -> int:
count = 0
stack = []
d = {'R' : 'L', 'L' : 'R'}
for c in s:
if c not in stack:
stack.append(d[c])
continue
stack.pop()
if not stack:
count += 1
return count
|
StarcoderdataPython
|
4836259
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
bonds = {
'C-C': 1,
'C-H': 5,
}
linear = False
externalSymmetry = 1
spinMultiplicity = 2
opticalIsomers = 1
energy = {
'CBS-QB3': Log('ethyl_cbsqb3.log'),
'Klip_2': -78.98344186,
}
geometry = Log('ethyl_b3lyp.log')
frequencies = Log('ethyl_b3lyp.log')
"""pivot are the two atoms that are attached to the rotor
top contains the atoms that are being rotated including one of the atoms from pivots
symmetry is the symmetry number of the scan
fit is fit of the scan data. It defaults to 'best', but can also be assigned as 'cosine' or 'fourier'
Principally, the rotor symmetry can be automatically determined by Arkane, but could also be given by the user
(then the user's input overrides Arkane's determination):
rotors = [HinderedRotor(scanLog=Log('ethyl_scan_72.log'), pivots=[1,2], top=[1,3,4], symmetry=6, fit='best')]"""
rotors = [HinderedRotor(scanLog=Log('ethyl_scan_72.log'), pivots=[1,2], top=[1,3,4])]
|
StarcoderdataPython
|
4829449
|
"""
The query counts as a “hit” every time that finds a page with a particular term from a lexicon
and it groups the results by books.
"""
from operator import add
from defoe import query_utils
from defoe.nls.query_utils import preprocess_clean_page, clean_page_as_string
from defoe.nls.query_utils import get_sentences_list_matches
import yaml, os
def do_query(archives, config_file=None, logger=None, context=None):
"""
The query counts as a “hit” every time that finds a page with a particular
term from a lexicon and it groups the results by years.
config_file must be the path to a lexicon file with a list of the keywords
to search for, one per line.
Also the config_file can indicate the preprocess treatment, along with the defoe
path, and the type of operating system.
If a term appears several times in the same page, it will be still counted as “1”.
Example:
'''Twas on the morn of sweet May Day':
- - neu
- 1
- - blaw
- 4
That means that neu appears in one page of the book 'Twas on the morn of sweet May Day'.
And blaw appears in 4 pages of the same book.
:param archives: RDD of defoe.nls.archive.Archive
:type archives: pyspark.rdd.PipelinedRDD
:param config_file: query configuration file
:type config_file: str or unicode
:param logger: logger (unused)
:type logger: py4j.java_gateway.JavaObject
:return: number of occurrences of keywords grouped bytitle
:rtype: dict
"""
with open(config_file, "r") as f:
config = yaml.load(f)
if "os_type" in config:
if config["os_type"] == "linux":
os_type = "sys-i386-64"
else:
os_type= "sys-i386-snow-leopard"
else:
os_type = "sys-i386-64"
if "defoe_path" in config :
defoe_path= config["defoe_path"]
else:
defoe_path = "./"
preprocess_type = query_utils.extract_preprocess_word_type(config)
data_file = query_utils.extract_data_file(config, os.path.dirname(config_file))
keysentences = []
with open(data_file, 'r') as f:
for keysentence in list(f):
k_split = keysentence.split()
sentence_word = [query_utils.preprocess_word(
word, preprocess_type) for word in k_split]
sentence_norm = ''
for word in sentence_word:
if sentence_norm == '':
sentence_norm = word
else:
sentence_norm += " " + word
keysentences.append(sentence_norm)
# [(year, document), ...]
documents = archives.flatMap(
lambda archive: [(document.title, document) for document in list(archive)])
# [(year, page_string)
clean_pages = documents.flatMap(
lambda title_document: [(title_document[0],
clean_page_as_string(page, defoe_path, os_type))
for page in title_document[1]])
pages = clean_pages.flatMap(
lambda cl_page: [(cl_page[0],
preprocess_clean_page(cl_page[1], preprocess_type))])
# [(year, page_string)
# [(year, page_string)
filter_pages = pages.filter(
lambda title_page: any(
keysentence in title_page[1] for keysentence in keysentences))
# [(year, [keysentence, keysentence]), ...]
matching_pages = filter_pages.map(
lambda title_page: (title_page[0],
get_sentences_list_matches(
title_page[1],
keysentences)))
# [[(year, keysentence), 1) ((year, keysentence), 1) ] ...]
matching_sentences = matching_pages.flatMap(
lambda title_sentence: [((title_sentence[0], sentence), 1)
for sentence in title_sentence[1]])
# [((year, keysentence), num_keysentences), ...]
# =>
# [(year, (keysentence, num_keysentences)), ...]
# =>
# [(year, [keysentence, num_keysentences]), ...]
result = matching_sentences\
.reduceByKey(add)\
.map(lambda titlesentence_count:
(titlesentence_count[0][0],
(titlesentence_count[0][1], titlesentence_count[1]))) \
.groupByKey() \
.map(lambda title_sentencecount:
(title_sentencecount[0], list(title_sentencecount[1]))) \
.collect()
return result
|
StarcoderdataPython
|
4809287
|
<gh_stars>1-10
#!/usr/bin/env python
from __future__ import division
import rospy
from visualization_msgs.msg import Marker
from utils.math_utils import int_or_float
from utils.markers import car_marker
VTD_CAR_X = 4.22100019455 # parameters corresponding to VTD simulated car
VTD_CAR_Y = 1.76199996471
VTD_CAR_dX = 1.3654999733
class VehicleVisualization(object):
def __init__(self, name, X, Y, dX, color):
rospy.init_node("vehicle_visualization", anonymous=True)
self.pub = rospy.Publisher("/{0}/visualization".format(name), Marker, queue_size=10)
self.marker = car_marker(name, color=color, CAR_X=X, CAR_Y=Y, CAR_dX=dX)
def run(self):
rate = rospy.Rate(100)
while not rospy.is_shutdown():
self.marker.header.stamp = rospy.Time.now()
self.pub.publish(self.marker)
rate.sleep()
if __name__ == "__main__":
args = rospy.myargv()
name = args[1]
X = float(args[2]) if len(args) > 2 else VTD_CAR_X
Y = float(args[3]) if len(args) > 3 else VTD_CAR_Y
dX = float(args[4]) if len(args) > 4 else VTD_CAR_dX
color = args[5] if len(args) > 5 else "red"
color = (int_or_float(args[5]),
int_or_float(args[6]),
int_or_float(args[7])) if len(args) > 7 else color
vv = VehicleVisualization(name, X, Y, dX, color)
vv.run()
|
StarcoderdataPython
|
1661976
|
<gh_stars>0
"""module for parse content"""
import collections
File = collections.namedtuple("File", "name path alg hash")
def parse(content, path_to_files):
"""
params:
content - list of string "name algorithm given_hash"
path_to_files - path to dir with files
return:
list of namedtuple("File", "name path alg hash")
"""
files = []
if path_to_files and path_to_files.strip()[-1] != "/":
path_to_files += "/"
for line in content:
line = line.split()
files.append(File(line[0], path_to_files + line[0], line[1], line[2]))
return files
|
StarcoderdataPython
|
4834621
|
import numpy as np
from .base import ClassifierModule
from .bert import BERTClassifier
from ..model.bert import BERTConfig
from ..model.fastbert import FastBERTClsDistillor, convert_ignore_cls
from ..token import WordPieceTokenizer
from ..third import tf
from .. import com
class FastBERTClassifier(BERTClassifier, ClassifierModule):
""" Single-label classifier on FastBERT, a distillation model. """
_INFER_ATTRIBUTES = BERTClassifier._INFER_ATTRIBUTES
def __init__(
self,
config_file,
vocab_file,
max_seq_length=128,
label_size=None,
init_checkpoint=None,
output_dir=None,
gpu_ids=None,
drop_pooler=False,
cls_model="self-attention",
do_lower_case=True,
truncate_method="LIFO",
):
self.__init_args__ = locals()
super(ClassifierModule, self).__init__(init_checkpoint, output_dir, gpu_ids)
self.batch_size = 0
self.max_seq_length = max_seq_length
self.label_size = label_size
self.truncate_method = truncate_method
self._cls_model = cls_model
self._ignore_cls = [0]
self._speed = 0.1
self._drop_pooler = drop_pooler
self._id_to_label = None
self.bert_config = BERTConfig.from_json_file(config_file)
self.tokenizer = WordPieceTokenizer(vocab_file, do_lower_case)
self.decay_power = "unsupported"
assert label_size, ("`label_size` can't be None.")
if "[CLS]" not in self.tokenizer.vocab:
self.tokenizer.add("[CLS]")
self.bert_config.vocab_size += 1
tf.logging.info("Add necessary token `[CLS]` into vocabulary.")
if "[SEP]" not in self.tokenizer.vocab:
self.tokenizer.add("[SEP]")
self.bert_config.vocab_size += 1
tf.logging.info("Add necessary token `[SEP]` into vocabulary.")
def predict(self, X=None, X_tokenized=None, batch_size=8, speed=0.1, ignore_cls="0"):
""" Inference on the model.
Args:
X: list. A list object consisting untokenized inputs.
X_tokenized: list. A list object consisting tokenized inputs.
Either `X` or `X_tokenized` should be None.
batch_size: int. The size of batch in each step.
speed: float. Threshold for leaving model in advance, which
should be within [0, 1].
ignore_cls: list. A list object of integers that stands for
the classifiers to ignore. The more classifier ignored, the
faster inference is.
Returns:
A dict object of model outputs.
"""
ignore_cls = convert_ignore_cls(ignore_cls)
if ignore_cls != self._ignore_cls:
self._ignore_cls = ignore_cls
self._session_mode = None
if speed != self._speed:
self._speed = speed
self._session_mode = None
return super(ClassifierModule, self).predict(X, X_tokenized, batch_size)
def score(self, X=None, y=None, sample_weight=None, X_tokenized=None, batch_size=8, speed=0.1, ignore_cls="0"):
""" Inference on the model with scoring.
Args:
X: list. A list object consisting untokenized inputs.
y: list. A list object consisting labels.
sample_weight: list. A list object of float-convertable values.
X_tokenized: list. A list object consisting tokenized inputs.
Either `X` or `X_tokenized` should be None.
batch_size: int. The size of batch in each step.
speed: float. Threshold for leaving model in advance, which
should be within [0, 1].
ignore_cls: list. A list object of integers that stands for
the classifiers to ignore. The more classifier ignored, the
faster inference is.
Returns:
A dict object of output metrics.
"""
ignore_cls = convert_ignore_cls(ignore_cls)
if ignore_cls != self._ignore_cls:
self._ignore_cls = ignore_cls
self._session_mode = None
if speed != self._speed:
self._speed = speed
self._session_mode = None
return super(ClassifierModule, self).score(
X, y, sample_weight, X_tokenized, batch_size)
def export(self, export_dir, speed=0.1, ignore_cls="0", rename_inputs=None, rename_outputs=None, ignore_outputs=None):
""" Export model into SavedModel files.
Args:
export_dir: str. Directory to which the model is saved.
speed: float. Threshold for leaving model in advance, which
should be within [0, 1].
ignore_cls: list. A list object of integers that stands for
the classifiers to ignore. The more classifier ignored, the
faster inference is.
rename_inputs: dict. Mapping of original name to target name.
rename_outputs: dict. Mapping of original name to target name.
ignore_outputs: list. Name of outputs to ignore.
Returns:
None
"""
ignore_cls = convert_ignore_cls(ignore_cls)
if ignore_cls != self._ignore_cls:
self._ignore_cls = ignore_cls
self._session_mode = None
if speed != self._speed:
self._speed = speed
self._session_mode = None
return super(ClassifierModule, self).export(export_dir, rename_inputs, rename_outputs, ignore_outputs)
def convert(self, X=None, y=None, sample_weight=None, X_tokenized=None, is_training=False, is_parallel=False):
self._assert_legal(X, y, sample_weight, X_tokenized)
if is_training:
assert y is None, "Training of %s is unsupervised. `y` should be None." % self.__class__.__name__
n_inputs = None
data = {}
# convert X
if X or X_tokenized:
tokenized = False if X else X_tokenized
input_ids, input_mask, segment_ids = self._convert_X(X_tokenized if tokenized else X, tokenized=tokenized)
data["input_ids"] = np.array(input_ids, dtype=np.int32)
data["input_mask"] = np.array(input_mask, dtype=np.int32)
data["segment_ids"] = np.array(segment_ids, dtype=np.int32)
n_inputs = len(input_ids)
if n_inputs < self.batch_size:
self.batch_size = max(n_inputs, len(self._gpu_ids))
if y:
# convert y and sample_weight
label_ids = self._convert_y(y)
data["label_ids"] = np.array(label_ids, dtype=np.int32)
# convert sample_weight
if is_training or y:
sample_weight = self._convert_sample_weight(sample_weight, n_inputs)
data["sample_weight"] = np.array(sample_weight, dtype=np.float32)
return data
def _forward(self, is_training, split_placeholders, **kwargs):
model = FastBERTClsDistillor(
bert_config=self.bert_config,
is_training=is_training,
input_ids=split_placeholders["input_ids"],
input_mask=split_placeholders["input_mask"],
segment_ids=split_placeholders["segment_ids"],
sample_weight=split_placeholders.get("sample_weight"),
drop_pooler=self._drop_pooler,
speed=self._speed,
ignore_cls=[] if is_training else self._ignore_cls,
cls_model=self._cls_model,
label_size=self.label_size,
**kwargs,
)
return model.get_forward_outputs()
def _get_fit_ops(self, as_feature=False):
return [self._tensors["losses"]]
def _get_fit_info(self, output_arrays, feed_dict, as_feature=False):
# loss
batch_losses = output_arrays[0]
loss = np.mean(batch_losses)
info = ""
info += ", distill loss %.6f" % loss
return info
def _get_predict_ops(self):
return [self._tensors["probs"]]
def _get_predict_outputs(self, batch_outputs):
n_inputs = len(list(self.data.values())[0])
output_arrays = list(zip(*batch_outputs))
def _uncertainty(prob):
if prob < 1e-20 or 1 - prob < 1e-20:
prob = 1e-20
return (prob * np.log(prob) + (1 - prob) * np.log(1 - prob)) / np.log(1 / self.label_size)
def _permutate(batch_probs):
n_device = max(len(self._gpu_ids), 1)
d_batch_size = self.batch_size // n_device
probs = np.zeros((self.batch_size, self.label_size))
sources = np.zeros((self.batch_size), dtype=np.int32)
max_loop = self.bert_config.num_hidden_layers + 1 - len(self._ignore_cls)
keep_cls = [
cls_idx for cls_idx in list(range(self.bert_config.num_hidden_layers + 1))
if cls_idx not in self._ignore_cls
]
i = 0
for d in range(n_device):
unfinished = [k + i for k in range(d_batch_size)]
for loop in range(max_loop):
source = keep_cls[loop]
next_unfinished = []
for k in range(len(unfinished)):
if _uncertainty(batch_probs[i][0]) < self._speed or loop == max_loop - 1:
probs[unfinished[k]] = batch_probs[i]
sources[unfinished[k]] = source
else:
next_unfinished.append(unfinished[k])
i += 1
unfinished = next_unfinished
assert i == len(batch_probs)
return probs, sources
# probs
probs_arrays = []
sources_arrays = []
for batch_probs in output_arrays[0]:
probs_array, sources_array = _permutate(batch_probs)
probs_arrays.append(probs_array)
sources_arrays.append(sources_array)
probs = com.transform(probs_arrays, n_inputs)
sources = com.transform(sources_arrays, n_inputs).tolist()
# preds
preds = np.argmax(probs, axis=-1).tolist()
if self._id_to_label:
preds = [self._id_to_label[idx] for idx in preds]
outputs = {}
outputs["preds"] = preds
outputs["probs"] = probs
outputs["sources"] = sources
return outputs
def _get_score_ops(self):
return [self._tensors["probs"]]
def _get_score_outputs(self, batch_outputs):
n_inputs = len(list(self.data.values())[0])
output_arrays = list(zip(*batch_outputs))
def _uncertainty(prob):
if prob < 1e-20 or 1 - prob < 1e-20:
prob = 1e-20
return (prob * np.log(prob) + (1 - prob) * np.log(1 - prob)) / np.log(1 / self.label_size)
def _permutate(batch_probs):
n_device = max(len(self._gpu_ids), 1)
d_batch_size = self.batch_size // n_device
probs = np.zeros((self.batch_size, self.label_size))
sources = np.zeros((self.batch_size), dtype=np.int32)
max_loop = self.bert_config.num_hidden_layers + 1 - len(self._ignore_cls)
keep_cls = [
cls_idx for cls_idx in list(range(self.bert_config.num_hidden_layers + 1))
if cls_idx not in self._ignore_cls
]
i = 0
for d in range(n_device):
unfinished = [k + i for k in range(d_batch_size)]
for loop in range(max_loop):
source = keep_cls[loop]
next_unfinished = []
for k in range(len(unfinished)):
if _uncertainty(batch_probs[i][0]) < self._speed or loop == max_loop - 1:
probs[unfinished[k]] = batch_probs[i]
sources[unfinished[k]] = source
else:
next_unfinished.append(unfinished[k])
i += 1
unfinished = next_unfinished
assert i == len(batch_probs)
return probs, sources
def _transform(output_arrays):
if len(output_arrays[0].shape) > 1:
return np.vstack(output_arrays)[:n_inputs]
return np.hstack(output_arrays)[:n_inputs]
# accuracy
probs_arrays = []
for batch_probs in output_arrays[0]:
probs_array, _ = _permutate(batch_probs)
probs_arrays.append(probs_array)
probs = _transform(probs_arrays)
preds = np.argmax(probs, axis=-1)
labels = self.data["label_ids"]
accuracy = np.mean(preds == labels)
# loss
losses = [-np.log(probs[i][label]) for i, label in enumerate(labels)]
sample_weight = self.data["sample_weight"]
losses = np.array(losses) * sample_weight
loss = np.mean(losses)
outputs = {}
outputs["accuracy"] = accuracy
outputs["loss"] = loss
return outputs
|
StarcoderdataPython
|
1701782
|
from typing import (
Dict,
Tuple,
)
import logging
import toposort
from haoda import ir, util
from haoda.ir.arithmetic import base
_logger = logging.getLogger().getChild(__name__)
GRAMMAR = r'''
SodaProgram:
(
('border' ':' border=BorderStrategies)?
('burst' 'width' ':' burst_width=INT)
('cluster' ':' cluster=ClusterStrategies)?
('iterate' ':' iterate=INT)
('kernel' ':' app_name=ID)
('unroll' 'factor' ':' unroll_factor=INT)
(input_stmts=InputStmt)+
(param_stmts=ParamStmt)*
(local_stmts=LocalStmt)*
(output_stmts=OutputStmt)+
)#;
YesOrNo: 'yes'|'no';
BorderStrategies: 'ignore'|'preserve';
ClusterStrategies: 'none'|'fine'|'coarse'|'full';
Comment: /\s*#.*$/;
InputStmt: 'input' ('dram' dram=INT ('.' dram=INT)*)? haoda_type=Type ':' name=ID ('(' (tile_size=INT ',')* '*' ')')?;
LocalStmt: 'local' haoda_type=Type ':' (let=Let)* ref=Ref '=' expr=Expr;
OutputStmt: 'output' ('dram' dram=INT ('.' dram=INT)*)? haoda_type=Type ':' (let=Let)* ref=Ref '=' expr=Expr;
ParamStmt: 'param' ('dram' dram=INT ('.' dram=INT)*)? haoda_type=Type (',' attr=ParamAttr)* ':' name=ID ('[' size=INT ']')*;
ParamAttr: 'dup' dup=Int | partitioning=Partitioning;
Partitioning:
'partition' strategy='complete' ('dim' '=' dim=Int)? |
'partition' strategy='cyclic' 'factor' '=' factor=Int ('dim' '=' dim=Int)?;
''' + ir.GRAMMAR
class InputStmt(ir.Node):
"""Node for input statement, represents a tiled input tensor.
Attributes:
haoda_type: Type of this input tensor.
dram: [int], dram id used to read this input
name: str, name of this input tensor.
tile_size: list of tile sizes. The last dimension should be 0.
"""
SCALAR_ATTRS = 'haoda_type', 'name'
LINEAR_ATTRS = ('tile_size', 'dram',)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# pylint: disable=access-member-before-definition
if not self.dram:
self.dram = (0,)
self.tile_size += (0,)
def __str__(self):
result = 'input {}: {}'.format(self.haoda_type, self.name)
if self.tile_size[:-1]:
result += '({}, *)'.format(', '.join(map(str, self.tile_size[:-1])))
return result
class LocalStmtOrOutputStmt(ir.Node):
SCALAR_ATTRS: Tuple[str, ...]
LINEAR_ATTRS: Tuple[str, ...]
SCALAR_ATTRS = 'haoda_type', 'ref', 'expr'
LINEAR_ATTRS = ('let',)
def __init__(self, **kwargs):
# inform mypy of the attributes
self.haoda_type = None
self.ref = None
self.expr = None
self.let = ()
super().__init__(**kwargs)
var_types = {}
# pylint: disable=access-member-before-definition
for let in self.let:
var_types[let.name] = let.haoda_type
def set_var_type(obj, var_types):
if isinstance(obj, ir.Var) and obj.name in var_types:
obj.haoda_type = var_types[obj.name]
return obj
self.let = tuple(_.visit(set_var_type, var_types) for _ in self.let)
self.expr = self.expr.visit(set_var_type, var_types)
self.stencil = kwargs.pop('stencil', None)
@property
def name(self):
return self.ref.name
def __str__(self):
if self.let:
let = '\n {}\n '.format('\n '.join(map(str, self.let)))
else:
let = ''
return '{} {}:{} {} = {}'.format(type(self).__name__[:-4].lower(),
self.haoda_type, let, self.ref,
ir.unparenthesize(self.expr))
@property
def symbol_table(self) -> Dict[str, str]:
# types of lets are local to this statement
# must **not** modify self.stencil.symbol_table in-place
symbol_table = self.stencil.symbol_table.copy()
lets = {let.name: let for let in self.let}
for var in toposort.toposort_flatten({
let.name: {var.name for var in ir.visitor.get_vars(let)}
for let in self.let}):
symbol_table[var] = base.propagate_type(lets[var],
symbol_table).haoda_type
return symbol_table
def propagate_type(self, dummy=None) -> None:
"""Propagate haoda type of the nodes in this statement.
Args:
symbol_table: A dict mapping input or local tensor names to haoda types.
Returns:
None.
"""
symbol_table = self.symbol_table
self.expr = base.propagate_type(self.expr, symbol_table)
if self.expr.haoda_type != self.haoda_type:
self.expr = ir.Cast(expr=self.expr, haoda_type=self.haoda_type)
self.let = tuple(base.propagate_type(let, symbol_table) for let in self.let)
class LocalStmt(LocalStmtOrOutputStmt):
pass
class OutputStmt(LocalStmtOrOutputStmt):
LINEAR_ATTRS = LocalStmtOrOutputStmt.LINEAR_ATTRS + ('dram',)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# pylint: disable=access-member-before-definition
if not self.dram:
self.dram = (0,)
class ParamStmt(ir.Node):
SCALAR_ATTRS = 'haoda_type', 'attr', 'name', 'size'
LINEAR_ATTRS = ('dram',)
def __str__(self):
return 'param {}{}: {}{}'.format(
self.haoda_type, ''.join(map(', {}'.format, self.attr)),
self.name, ''.join(map('[{}]'.format, self.size)))
class ParamAttr(ir.Node):
SCALAR_ATTRS = 'dup', 'partitioning'
def __str__(self):
if self.dup is not None:
return 'dup {}'.format(self.dup)
result = 'partition {0.strategy}'.format(self.partitioning)
if self.partitioning.strategy == 'cyclic':
result += ' factor={}'.format(self.partitioning.factor)
if self.partitioning.dim is not None:
result += ' dim={}'.format(self.partitioning.dim)
return result
class SodaProgram(ir.Node):
SCALAR_ATTRS = ('border', 'burst_width', 'cluster', 'iterate', 'app_name',
'unroll_factor', 'input_stmts', 'param_stmts', 'local_stmts',
'output_stmts')
def __init__(self, **kwargs):
super().__init__(**kwargs)
for node in self.input_stmts:
if hasattr(self, 'tile_size'):
# pylint: disable=access-member-before-definition
if self.tile_size != node.tile_size:
msg = ('tile size %s doesn\'t match previous one %s' %
# pylint: disable=access-member-before-definition
(node.tile_size, self.tile_size))
raise util.SemanticError(msg)
elif node.tile_size[:-1]:
self.tile_size = node.tile_size
self.dim = len(self.tile_size)
# deal with 1D case
if not hasattr(self, 'tile_size'):
# pylint: disable=undefined-loop-variable
self.tile_size = node.tile_size
self.dim = len(self.tile_size)
def __str__(self):
return '\n'.join(filter(None, (
'border: {}'.format(self.border),
'burst width: {}'.format(self.burst_width),
'cluster: {}'.format(self.cluster),
'iterate: {}'.format(self.iterate),
'kernel: {}'.format(self.app_name),
'unroll factor: {}'.format(self.unroll_factor),
'\n'.join(map(str, self.input_stmts)),
'\n'.join(map(str, self.param_stmts)),
'\n'.join(map(str, self.local_stmts)),
'\n'.join(map(str, self.output_stmts)))))
CLASSES = (
InputStmt,
LocalStmt,
OutputStmt,
ir.Let,
ir.Ref,
ir.Expr,
ir.LogicAnd,
ir.BinaryOr,
ir.Xor,
ir.BinaryAnd,
ir.EqCmp,
ir.LtCmp,
ir.AddSub,
ir.MulDiv,
ir.Unary,
ir.Operand,
ir.Cast,
ir.Call,
ir.Var,
ParamStmt,
ParamAttr,
SodaProgram,
)
|
StarcoderdataPython
|
3258683
|
<reponame>eng-tools/eqdes
import numpy as np
from sfsimodels import loader as ml
from sfsimodels import output as mo
from eqdes import models as em
import sfsimodels as sm
from eqdes import dbd_tools as dt
from eqdes import nonlinear_foundation as nf
from eqdes import moment_equilibrium
import geofound as gf
from eqdes import fns
from eqdes.extensions.exceptions import DesignError
from eqdes.nonlinear_foundation import calc_moment_capacity_via_millen_et_al_2020, calc_fd_rot_via_millen_et_al_2020, \
calc_fd_rot_via_millen_et_al_2020_w_tie_beams
def assess_rc_frame(fb, hz, theta_max, otm_max, **kwargs):
"""
Displacement-based assessment of a frame building
:param fb: FrameBuilding Object
:param hz: Hazard Object
:param theta_max: [degrees], maximum structural interstorey drift
:param otm_max: [N], maximum overturning moment
:param kwargs:
:return:
"""
af = em.AssessedRCFrame(fb, hz)
af.otm_max = otm_max
af.theta_max = theta_max
verbose = kwargs.get('verbose', af.verbose)
ductility_reduction_factors = 100
theta_c = theta_max
for i in range(ductility_reduction_factors):
mu_reduction_factor = 1.0 - float(i) / ductility_reduction_factors
theta_c = theta_max * mu_reduction_factor
displacements = dt.displacement_profile_frame(theta_c, af.heights, af.hm_factor)
af.delta_max, af.mass_eff, af.height_eff = dt.equivalent_sdof(af.storey_mass_p_frame, displacements, af.heights)
af.theta_y = dt.conc_frame_yield_drift(af.fye, af.concrete.e_mod_steel, af.av_bay, af.av_beam)
af.delta_y = dt.yield_displacement(af.theta_y, af.height_eff)
af.mu = dt.ductility(af.delta_max, af.delta_y)
if i == 0:
af.max_mu = af.mu
af.xi = dt.equivalent_viscous_damping(af.mu)
af.eta = dt.reduction_factor(af.xi)
otm = otm_max * dt.bilinear_load_factor(af.mu, af.max_mu, af.post_yield_stiffness_ratio)
af.v_base = otm / af.height_eff
af.k_eff = af.v_base / af.delta_max
af.t_eff = dt.effective_period_from_stiffness(af.mass_eff, af.k_eff)
af.delta_demand = dt.displacement_from_effective_period(af.eta, af.hz.corner_disp,
af.t_eff, af.hz.corner_period)
if verbose > 1:
print('Delta_D: ', af.delta_max)
print('Effective mass: ', af.mass_eff)
print('Effective height: ', af.height_eff)
print('Mu: ', af.mu)
print('theta yield', af.theta_y)
print('xi: ', af.xi)
print('Reduction Factor: ', af.eta)
print('t_eff', af.t_eff)
if af.delta_demand > af.delta_max: # failure occurs
af.mu = af.delta_demand / af.delta_y
# af.delta_demand
break
else:
if verbose > 1:
print("drift %.2f is not compatible" % theta_c)
af.assessed_drift = theta_c
af.storey_forces = dt.calculate_storey_forces(af.storey_mass_p_frame, displacements, af.v_base, btype='frame')
return af
def assess_rc_frame_w_sfsi_via_millen_et_al_2020(dfb, hz, sl, fd, theta_max, otm_max=None, mcbs=None, **kwargs):
"""
Displacement-based assessment of a frame building considering SFSI
:param dfb: DesignedRCFrameBuilding Object
:param hz: Hazard Object
:param theta_max: [degrees], maximum structural interstorey drift
:param otm_max: [N],Maximum overturning moment
:param mcbs: [Nm], Column base moments (required if foundation is PadFoundation)
:param found_rot: [rad], initial guess of foundation rotation
:param kwargs:
:return:
"""
horz2vert_mass = kwargs.get('horz2vert_mass', 1.0)
af = em.AssessedSFSIRCFrame(dfb, hz, sl, fd)
af.theta_max = theta_max
verbose = kwargs.get('verbose', af.verbose)
af.static_values()
# add foundation to heights
heights = list(af.heights)
heights.insert(0, 0)
heights = np.array(heights) + af.fd.height
# add foundation to masses
storey_masses = list(af.storey_masses)
storey_masses.insert(0, af.fd.mass)
storey_masses = np.array(storey_masses)
af.storey_mass_p_frame = storey_masses / af.n_seismic_frames
ductility_reduction_factors = 100
iterations_ductility = kwargs.get('iterations_ductility', ductility_reduction_factors)
iterations_rotation = kwargs.get('iterations_rotation', 20)
theta_c = theta_max
# if m_col_base is greater than m_foot then
if otm_max is None:
otm_max = moment_equilibrium.calc_otm_capacity(af)
af.theta_y = dt.conc_frame_yield_drift(af.fye, af.concrete.e_mod_steel, af.av_bay, af.av_beam)
for i in range(iterations_ductility):
mu_reduction_factor = 1.0 - float(i) / ductility_reduction_factors
theta_c = theta_max * mu_reduction_factor
displacements = dt.displacement_profile_frame(theta_c, heights, af.hm_factor, foundation=True,
fd_height=af.fd.height, theta_f=af.theta_f)
af.delta_max, af.mass_eff, af.height_eff = dt.equivalent_sdof(af.storey_mass_p_frame, displacements, heights)
af.delta_y = dt.yield_displacement(af.theta_y, af.height_eff - af.fd.height)
approx_delta_f = af.theta_f * af.height_eff
af.delta_ss = af.delta_max - approx_delta_f
af.mu = dt.ductility(af.delta_ss, af.delta_y)
if i == 0:
af.max_mu = af.mu
af.xi = dt.equivalent_viscous_damping(af.mu)
eta_ss = dt.reduction_factor(af.xi)
otm = otm_max * dt.bilinear_load_factor(af.mu, af.max_mu, af.post_yield_stiffness_ratio)
# Foundation behaviour
eta_fshear = nf.foundation_shear_reduction_factor()
af.delta_fshear = af.v_base / (0.5 * af.k_f0_shear)
moment_f = otm
found_rot_tol = 0.00001
bhr = (af.fd.width / af.height_eff)
n_ult = af.fd_bearing_capacity
psi = 0.75 * np.tan(sl.phi_r)
found_rot = nf.calc_fd_rot_via_millen_et_al_2020(af.k_f_0, af.fd.length, af.total_weight, n_ult,
psi, moment_f, af.height_eff)
norm_rot = found_rot / af.theta_pseudo_up
cor_norm_rot = nf.calculate_corrected_normalised_rotation(norm_rot, bhr)
eta_frot = nf.foundation_rotation_reduction_factor(cor_norm_rot)
af.delta_frot = af.theta_f * af.height_eff
af.delta_f = af.delta_frot + af.delta_fshear
af.delta_max = af.delta_ss + af.delta_f
eta_sys = nf.system_reduction_factor(af.delta_ss, af.delta_frot, af.delta_fshear, eta_ss, eta_frot, eta_fshear)
af.eta = eta_sys
af.theta_f = found_rot
af.v_base = otm / (af.height_eff - af.fd.height) # Assume hinge at top of foundation.
af.k_eff = af.v_base / af.delta_max
af.t_eff = dt.effective_period_from_stiffness(af.mass_eff, af.k_eff)
if verbose > 1:
print('Delta_max: ', af.delta_max)
print('Effective mass: ', af.mass_eff)
print('Effective height: ', af.height_eff)
print('Mu: ', af.mu)
print('theta yield', af.theta_y)
print('xi: ', af.xi)
print('Reduction Factor: ', af.eta)
print('t_eff', af.t_eff)
af.delta_demand = dt.displacement_from_effective_period(af.eta, af.hz.corner_disp,
af.t_eff, af.hz.corner_period)
if af.delta_demand > af.delta_max: # failure occurs
af.mu = (af.delta_demand - af.delta_f) / af.delta_y
# af.delta_demand
break
else:
if verbose > 1:
print("drift %.2f is not compatible" % theta_c)
if fd.type == 'pad_foundation':
# assert isinstance(fd, em.PadFoundation)
ip_axis = 'length'
af.storey_forces = dt.calculate_storey_forces(af.storey_mass_p_frame, displacements, af.v_base, btype='frame')
# moment_beams_cl, moment_column_bases, axial_seismic = moment_equilibrium.assess(af, af.storey_forces, mom_ratio)
mom_ratio = 0.6 # TODO: need to validate !
if mcbs is None:
mcbs = af.get_column_base_moments()
# TODO: need to account for minimum column base moment which shifts mom_ratio
h1 = af.interstorey_heights[0]
h_eff = h1 * mom_ratio + fd.height
pad = af.fd.pad
pad.n_ult = af.soil_q * pad.area
col_loads = af.get_column_vert_loads()
col_loads = np.mean(col_loads, axis=1) # average since multple frames
ext_nloads = col_loads[0]
int_nloads = np.max(col_loads[1:-1])
m_foot_int = np.max(mcbs[1:-1]) * h_eff / (h1 * mom_ratio)
pad.n_load = int_nloads
tie_beams = None
if hasattr(fd, f'tie_beam_in_{ip_axis}_dir'):
tie_beams = getattr(fd, f'tie_beam_in_{ip_axis}_dir')
if tie_beams is not None:
tb_sect = getattr(fd, f'tie_beam_in_{ip_axis}_dir').s[0]
tb_length = (fd.length - (fd.pad_length * fd.n_pads_l)) / (fd.n_pads_l - 1)
assert isinstance(tb_sect, sm.sections.RCBeamSection)
# See supporting_docs/tie-beam-stiffness-calcs.pdf
k_ties = (6 * tb_sect.i_rot_ww_cracked * tb_sect.mat.e_mod_conc) / tb_length
else:
k_ties = 0
l_in = getattr(pad, ip_axis)
k_f_0_pad = gf.stiffness.calc_rotational_via_gazetas_1991(sl, pad, ip_axis=ip_axis)
rot_ipad = calc_fd_rot_via_millen_et_al_2020_w_tie_beams(k_f_0_pad, l_in, int_nloads, pad.n_ult, psi,
m_foot_int, h_eff, 2 * k_ties)
# TODO: change to cycle through all
# Exterior footings
if rot_ipad is None: # First try moment ratio of 0.5
# m_cap = pad.n_load * getattr(pad, ip_axis) / 2 * (1 - pad.n_load / pad.n_ult)
m_cap = calc_moment_capacity_via_millen_et_al_2020(l_in, pad.n_load, pad.n_ult, psi, h_eff)
raise DesignError(f"Assessment failed - interior footing moment demand ({m_foot_int/1e3:.3g})"
f" kNm exceeds capacity (~{m_cap/1e3:.3g} kNm)")
m_foot_ext = np.max(mcbs[np.array([0, -1])]) * h_eff / (h1 * mom_ratio)
pad.n_load = ext_nloads
# rot_epad = check_local_footing_rotations(sl, pad, m_foot_ext, h_eff, ip_axis=ip_axis, k_ties=k_ties)
rot_epad = calc_fd_rot_via_millen_et_al_2020_w_tie_beams(k_f_0_pad, l_in, ext_nloads, pad.n_ult, psi,
m_foot_ext, h_eff, k_ties)
if rot_epad is None:
m_cap = pad.n_load * getattr(pad, ip_axis) / 2 * (1 - pad.n_load / pad.n_ult)
raise DesignError(f"Assessment failed - interior footing moment demand ({m_foot_ext/1e3:.3g})"
f" kNm exceeds capacity (~{m_cap/1e3:.3g} kNm)")
if max([rot_ipad, rot_epad]) - found_rot > theta_c - af.theta_y:
# footing should be increased or design drift increased
pad_rot = max([rot_ipad, rot_epad])
plastic_rot = theta_c - af.theta_y
raise DesignError(f"Assessment failed - footing rotation ({pad_rot:.3g}) "
f"exceeds plastic rotation (~{plastic_rot:.3g})")
af.m_foot = np.zeros(af.n_bays + 1)
af.m_foot[0] = m_foot_ext
af.m_foot[-1] = m_foot_ext
af.m_foot[1:-1] = m_foot_int
af.theta_f = found_rot
af.assessed_drift = theta_c
af.storey_forces = dt.calculate_storey_forces(af.storey_mass_p_frame, displacements, af.v_base, btype='frame')
return af
def calc_base_moment_rotation(af, fd, sl, theta_col_y, mom_ratio=0.6, peak_rot=0.1, mcbs=None):
if mcbs is None: # provide mcbs if mu<1
mcbs = fns.get_column_base_moments(af)
# Assume column rotation at moment capacity is at 5x column yield rotation and has post-yield stiffness ratio of 0.01
b = 0.01
mu_peak = 5
mybs = mcbs / (1 + (mu_peak - 1) * b)
k_cols = mybs / theta_col_y
if fd.type == 'pad_foundation':
ip_axis = fd.ip_axis
tie_beams = getattr(fd, f'tie_beam_in_{ip_axis}_dir')
pad = af.fd.pad
pad.n_ult = af.soil_q * pad.area
psi = 0.75 * np.tan(np.radians(sl.phi))
l_in = getattr(pad, ip_axis)
h1 = af.interstorey_heights[0]
# NOTE: could validate at the end that the mom_ratio is correct
h_eff = h1 * mom_ratio + fd.height
# k_f_0_pad = gf.stiffness.calc_rotational_via_gazetas_1991(sl, pad, ip_axis=ip_axis)
col_loads = af.get_column_vert_loads()
col_loads = np.mean(col_loads, axis=1) # use average since it is for multiple frames - but want average
if tie_beams is not None:
tb_sect = getattr(fd, f'tie_beam_in_{ip_axis}_dir').s[0]
tb_length = (fd.length - (fd.pad_length * fd.n_pads_l)) / (fd.n_pads_l - 1)
assert isinstance(tb_sect, sm.sections.RCBeamSection)
# See supporting_docs/tie-beam-stiffness-calcs.pdf
k_ties = (6 * tb_sect.i_rot_ww_cracked * tb_sect.mat.e_mod_conc) / tb_length
else:
k_ties = 0
rot_f_vals = []
rot_col_vals = []
mom_combo_vals = []
mom_f_vals = []
for i in range(len(col_loads)):
nload = col_loads[i] + fd.pad.mass * 9.8
m_cap = calc_moment_capacity_via_millen_et_al_2020(l_in, nload, pad.n_ult, psi, h_eff)
mom_f = np.linspace(0, 0.99 * m_cap, 100)
l_in = getattr(pad, ip_axis)
k_f_0_pad = gf.stiffness.calc_rotational_via_gazetas_1991(sl, pad, ip_axis=ip_axis)
rot_fs = calc_fd_rot_via_millen_et_al_2020(k_f_0_pad, l_in, nload, pad.n_ult, psi, mom_f, h_eff, mval=-1)
# TODO: build extra rot here
if max(rot_fs) < peak_rot:
extra_rot_points = int((peak_rot - max(rot_fs)) / (peak_rot / 20))
extra_rots = np.linspace(max(rot_fs), peak_rot, extra_rot_points + 1)[1:]
extra_moms = m_cap * np.ones_like(extra_rots)
rot_fs = np.concatenate([rot_fs, extra_rots])
mom_f = np.concatenate([mom_f, extra_moms])
# else:
# inds = np.where(rot_fs < peak_rot)
# rot_fs = rot_fs[inds]
# mom_f = mom_f[inds]
if i == 0 or i == len(col_loads) - 1:
k_tbs = k_ties
else:
k_tbs = 2 * k_ties
mom_f_plus_tbs = k_tbs * rot_fs + mom_f
mom_ft_at_cb = mom_f_plus_tbs * (h1 * mom_ratio) / h_eff
if mybs[i] > mom_ft_at_cb[-1]:
rot_f_at_col_yield = np.interp(0.99 * mom_ft_at_cb[-1], mom_ft_at_cb, rot_fs)
else:
rot_f_at_col_yield = np.interp(mybs[i], mom_ft_at_cb, rot_fs)
mom_f_at_col_yield = np.interp(rot_f_at_col_yield, rot_fs, mom_f)
ind = sm.interp_left(rot_f_at_col_yield, rot_fs) + 1
rot_fs = np.insert(rot_fs, ind, rot_f_at_col_yield)
mom_ft_at_cb = np.insert(mom_ft_at_cb, ind, mybs[i])
mom_f = np.insert(mom_f, ind, mom_f_at_col_yield)
# TODO: insert mybs here
rot_col = np.where(mom_ft_at_cb < mybs[i], mom_ft_at_cb / k_cols[i], theta_col_y + (mom_ft_at_cb - mybs[i]) / (k_cols[i] * b))
rot_combo = rot_col + rot_fs
# mom_c = np.interp(theta_col_p, rot_combo, mom_f_plus_tbs)
# mcbs[i] = mom_c
mom_f_vals.append(mom_f)
mom_combo_vals.append(mom_ft_at_cb)
rot_f_vals.append(rot_fs)
rot_col_vals.append(rot_col)
np.array(rot_f_vals, dtype=float)
np.array(rot_col_vals, dtype=float)
np.array(mom_f_vals, dtype=float)
np.array(mom_combo_vals, dtype=float)
return np.array(rot_f_vals, dtype=float), np.array(rot_col_vals, dtype=float), \
np.array(mom_f_vals, dtype=float), np.array(mom_combo_vals, dtype=float)
def push_over_rc_frame_w_sfsi_via_millen_et_al_2021(dfb, sl, fd, theta_max, mcbs=None, **kwargs):
"""
Displacement-based assessment of a frame building considering SFSI
:param dfb: DesignedRCFrameBuilding Object
:param theta_max: [degrees], maximum structural interstorey drift
:param otm_max: [N],Maximum overturning moment
:param mcbs: [Nm], Column base moments (required if foundation is PadFoundation)
:param found_rot: [rad], initial guess of foundation rotation
:param kwargs:
:return:
"""
# NOTE: The global rotation is only considered based on A*d^2, and the local rotation is dealt with separately.
horz2vert_mass = kwargs.get('horz2vert_mass', 1.0)
af = em.AssessedSFSIRCFrame(dfb, sm.SeismicHazard(), sl, fd)
af.theta_max = theta_max
verbose = kwargs.get('verbose', af.verbose)
af.static_values()
# add foundation to heights
heights = list(af.heights)
heights.insert(0, 0)
heights = np.array(heights) + af.fd.height
# add foundation to masses
storey_masses = list(af.storey_masses)
storey_masses.insert(0, af.fd.mass)
storey_masses = np.array(storey_masses)
af.storey_mass_p_frame = storey_masses / af.n_seismic_frames
ductility_reduction_factors = 100
iterations_ductility = kwargs.get('iterations_ductility', ductility_reduction_factors)
iterations_rotation = kwargs.get('iterations_rotation', 20)
af.theta_y = dt.conc_frame_yield_drift(af.fye, af.concrete.e_mod_steel, af.av_bay, af.av_beam)
theta_col_y = af.theta_y * 0.8
m_col_bases = fns.get_column_base_moments(af)
mom_ratio = 0.6
displacements = dt.displacement_profile_frame(theta_max, af.heights, af.hm_factor)
delta_max_fb, mass_eff_fb, height_eff_fb = dt.equivalent_sdof(af.storey_mass_p_frame[1:], displacements, af.heights)
delta_y_fb = dt.yield_displacement(af.theta_y, height_eff_fb)
af.max_mu = delta_max_fb / delta_y_fb
theta_p = 0.8 * af.theta_y
max_drift_duct = theta_max / af.theta_y
ducts = np.linspace(0.1, max_drift_duct, 10)
peak_rot = af.theta_y * ducts[-1]
# TODO: if m_col_base is greater than m_foot then
# Make sure that the peak rotation and the col yield rotation are included in the rot_col_vals output else inpter fails later
rot_f_vals, rot_col_vals, mom_f_vals, mom_combo_vals = calc_base_moment_rotation(af, fd, sl, theta_col_y,
mom_ratio=mom_ratio, peak_rot=0.1)
otm_max = moment_equilibrium.calc_otm_capacity(af, mcbs=m_col_bases)
otm_max_from_beams = otm_max - np.sum(m_col_bases)
disps = []
vbases = []
mfs = []
nfs = []
for i in range(len(ducts)):
theta_c = af.theta_y * ducts[i]
if ducts[i] < 0.8:
pass
# Assume that column yield occurs at 80% of yield drift
# theta_c = af.theta_y * ducts[i] / 0.8
# Reduce column base moments
displacements = dt.displacement_profile_frame(theta_c, heights, af.hm_factor, foundation=True,
fd_height=af.fd.height, theta_f=af.theta_f)
af.delta_max, af.mass_eff, af.height_eff = dt.equivalent_sdof(af.storey_mass_p_frame, displacements, heights)
af.delta_y = dt.yield_displacement(af.theta_y, af.height_eff - af.fd.height)
approx_delta_f = af.theta_f * af.height_eff
af.delta_ss = af.delta_max - approx_delta_f
af.mu = dt.ductility(af.delta_ss, af.delta_y)
af.xi = dt.equivalent_viscous_damping(af.mu)
eta_ss = dt.reduction_factor(af.xi)
mcbs_w_sfsi = np.zeros_like(m_col_bases, dtype=float)
for i in range(len(m_col_bases)):
mcbs_w_sfsi[i] = np.interp(theta_c, rot_f_vals[i] + rot_col_vals[i], mom_combo_vals[i])
mu_factor = dt.bilinear_load_factor(af.mu, af.max_mu, af.post_yield_stiffness_ratio)
otm_from_beams = otm_max_from_beams * mu_factor
otm_at_col_base = otm_from_beams + np.sum(mcbs_w_sfsi) # TODO: could split 1st order and 2nd up
print('otm_at_col_base: ', otm_at_col_base, otm_from_beams, np.sum(mcbs_w_sfsi), theta_c,mom_combo_vals[2][-1])
# Foundation behaviour
eta_fshear = nf.foundation_shear_reduction_factor()
af.delta_fshear = af.v_base / (0.5 * af.k_f0_shear)
moment_f = otm_at_col_base * af.height_eff / (af.height_eff - af.fd.height)
found_rot_tol = 0.00001
bhr = (af.fd.width / af.height_eff)
n_ult = af.fd_bearing_capacity
psi = 0.75 * np.tan(sl.phi_r)
found_rot = nf.calc_fd_rot_via_millen_et_al_2020(af.k_f_0, af.fd.length, af.total_weight, n_ult,
psi, moment_f, af.height_eff)
norm_rot = found_rot / af.theta_pseudo_up
cor_norm_rot = nf.calculate_corrected_normalised_rotation(norm_rot, bhr)
eta_frot = nf.foundation_rotation_reduction_factor(cor_norm_rot)
af.delta_frot = af.theta_f * af.height_eff
af.delta_f = af.delta_frot + af.delta_fshear
af.delta_max = af.delta_ss + af.delta_f
eta_sys = nf.system_reduction_factor(af.delta_ss, af.delta_frot, af.delta_fshear, eta_ss, eta_frot, eta_fshear)
af.eta = eta_sys
af.theta_f = found_rot
af.v_base = otm_at_col_base / (af.height_eff - af.fd.height) # Assume hinge at top of foundation.
af.k_eff = af.v_base / af.delta_max
af.t_eff = dt.effective_period_from_stiffness(af.mass_eff, af.k_eff)
if verbose > 1:
print('Delta_max: ', af.delta_max)
print('Effective mass: ', af.mass_eff)
print('Effective height: ', af.height_eff)
print('Mu: ', af.mu)
print('theta yield', af.theta_y)
print('xi: ', af.xi)
print('Reduction Factor: ', af.eta)
print('t_eff', af.t_eff)
disps.append(af.delta_max)
vbases.append(af.v_base)
h1 = af.interstorey_heights[0]
h_eff = h1 * mom_ratio + fd.height
mom_f = np.zeros(af.n_cols)
for cc in range(af.n_cols):
mom_f[cc] = np.interp(mcbs_w_sfsi[cc], mom_combo_vals[cc], mom_f_vals[cc])
mf = mom_f #* h_eff / h1 * mom_ratio
mfs.append(mf)
axial_seismic = moment_equilibrium.calc_seismic_axial_load_limit(af)
col_loads = af.get_column_vert_loads()
col_loads = np.mean(col_loads, axis=1)
nfloads = col_loads + fd.pad.mass * 9.8
nfloads += axial_seismic * mu_factor
# nfloads[-1] += -axial_seismic[-1] * mu_factor
nfs.append(nfloads)
return np.array(vbases), np.array(disps), np.array(mfs), np.array(nfs)
|
StarcoderdataPython
|
188048
|
<reponame>Nexusoft/LLL-OS<gh_stars>1-10
#
# Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
#
# SPDX-License-Identifier: GPL-2.0-only
#
''' generate a yaml file with memory region info from the device tree '''
import argparse
import yaml
from hardware import config, fdt
from hardware.utils import memory, rule
def get_kernel_devices(tree: fdt.FdtParser, rules: rule.HardwareYaml):
kernel_devices = tree.get_kernel_devices()
groups = []
for dev in kernel_devices:
rule = rules.get_rule(dev)
groups += rule.get_regions(dev)
return groups
def run(tree: fdt.FdtParser, hardware: rule.HardwareYaml, config: config.Config,
args: argparse.Namespace):
if not args.yaml_out:
raise ValueError('you need to provide a yaml-out to use the yaml output method')
phys_mem, reserved, _ = memory.get_physical_memory(tree, config)
kernel_devs = get_kernel_devices(tree, hardware)
dev_mem = memory.get_addrspace_exclude(list(reserved) + phys_mem + kernel_devs, config)
yaml.add_representer(int, lambda dumper, data: yaml.ScalarNode(
'tag:yaml.org,2002:int', hex(data)))
yaml_obj = {
'devices': [{'start': r.base, 'end': r.base + r.size} for r in dev_mem if r.size > 0],
'memory': [{'start': r.base, 'end': r.base + r.size} for r in phys_mem if r.size > 0]
}
yaml.dump(yaml_obj, args.yaml_out)
args.yaml_out.close()
def add_args(parser):
parser.add_argument('--yaml-out', help='output file for memory yaml',
type=argparse.FileType('w'))
|
StarcoderdataPython
|
77060
|
<filename>umake/test.py<gh_stars>10-100
#!/usr/bin/env python
#coding: utf-8
from umake import CMake
#cmake = CMake('3.15', 'hello')
#cmake.add_library('hello', ['src/hello.h', 'src/hello.cpp'])
#cmake.add_executable('demo', ['src/main.cpp'])
#cmake.target_link_libraries('demo', ['hello'])
cmake = CMake('3.15', 'hello')
hello = CMake.Target.Library('hello')
hello.add_dep_files([
'inc/hello.h',
'src/hello.cpp'
])
hello.add_include_dir('inc', 'PUBLIC')
demo = CMake.Target.Executable('demo')
demo.add_dep_file(
'test/main.cpp'
)
demo.add_include_dir('inc', 'PUBLIC')
demo.add_dep_lib('hello')
#opencv_pkg = CMake.find_package('OpenCV')
#demo.add_dep_lib(opencv_pkg)
cmake.add_target(hello)
cmake.add_target(demo)
cmake.dump()
|
StarcoderdataPython
|
3219178
|
import utils
import pytest
import time
from deepdiff import DeepDiff
@pytest.mark.sanity
def test_single_interface_connected_multiple_interfaces():
"""
Deploy single otg duplicate interfaces kne topology,
- namespace - 1: ixia-c
Validate,
- kne_cli error
- total pods count - 0
- total service count - 0
- operator pod health
"""
namespace1 = 'ixia-c'
namespace1_config = 'single_otg_duplicate_interface.txt'
try:
op_rscount = utils.get_operator_restart_count()
print("[Namespace:{}]Deploying KNE topology".format(
namespace1
))
_, err = utils.create_kne_config(namespace1_config, namespace1)
expected_err = "could not find peer for node otg pod otg-port-eth1 link UID 0"
err = err.split("\n")[-2]
assert expected_err in err, "Expected error mismatch!!!"
utils.ixia_c_pods_ok(namespace1, [])
utils.ixia_c_services_ok(namespace1, [])
op_rscount = utils.ixia_c_operator_ok(op_rscount)
print("[Namespace:{}]Deleting KNE topology".format(
namespace1
))
utils.delete_kne_config(namespace1_config, namespace1)
utils.ixia_c_pods_ok(namespace1, [])
utils.ixia_c_services_ok(namespace1, [])
op_rscount = utils.ixia_c_operator_ok(op_rscount)
finally:
utils.delete_kne_config(namespace1_config, namespace1)
utils.ixia_c_pods_ok(namespace1, [])
utils.ixia_c_services_ok(namespace1, [])
utils.wait_for(
lambda: utils.topology_deleted(namespace1),
'topology deleted',
timeout_seconds=30
)
utils.delete_namespace(namespace1)
|
StarcoderdataPython
|
1748823
|
<gh_stars>0
'''
* @Author: csy
* @Date: 2019-04-28 13:56:20
* @Last Modified by: csy
* @Last Modified time: 2019-04-28 13:56:20
'''
for value in range(1, 5):
print(value)
numbers = list(range(1, 6))
print(numbers)
|
StarcoderdataPython
|
37016
|
from datetime import datetime
class Price:
date: datetime = datetime(1, 1, 1)
currency: str = 'BRL'
symbol: str = ''
current: float = 0
open: float = 0
close: float = 0
low: float = 0
high: float = 0
volume: float = 0
interval: str = ''
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
|
StarcoderdataPython
|
103684
|
#!/usr/bin/env python3
import os
import time
import subprocess
import random
import inquirer
import stat
import wget
from libsw import php, nginx, user, bind, cert, db, settings, input_util
from getpass import getpass
from mysql import connector
from pwd import getpwnam
def list_installations():
"""
List all domains with a valid WordPress installation
"""
sites = nginx.enabled_sites()
wp_sites = []
for site in sites:
if is_wordpress_installation(site):
wp_sites.append(site)
return wp_sites
def select_installation(query_message):
"""
Have the user select from a list of all domains with enabled vhost files.
Args:
query_message - The message to display to the user in the prompt
"""
domain_list = list_installations()
questions = [
inquirer.List('f',
message=query_message,
choices=domain_list
)
]
domain = inquirer.prompt(questions)['f']
return domain
def is_wordpress_installation(domain):
"""
Check if a domain has a valid WordPress installation
Args:
domain - The domain associated with the installation
"""
sys_user = nginx.user_from_domain(domain)
webroot = user.webroot(sys_user)
if os.path.exists(webroot + 'wp-content') and \
os.path.exists(webroot + 'wp-includes') and \
os.path.exists(webroot + 'wp-config.php'):
return True
return False
def wp_cron_disabled(domain):
"""
Check if a domain has it's built in cron disabled
Args:
domain - The domain associated with the installation
"""
sys_user = nginx.user_from_domain(domain)
webroot = user.webroot(sys_user)
output = subprocess.getoutput('sudo -u "' + sys_user + '" -i wp config get --path="' + webroot + '" DISABLE_WP_CRON')
output = output.lower()
if output == 'true':
return True
return False
def sys_cron_enabled(domain):
"""
Check if a domain has a system cron
Args:
domain - The domain associated with the installation
"""
sys_user = nginx.user_from_domain(domain)
output = subprocess.getoutput('sudo -u "' + sys_user + '" -i crontab -l 2>/dev/null | grep -Ev "^[ \s]*#"')
if output.find('/wp-cron.php') == -1:
return False
return True
# for line in output:
# if line.endswith('/wp-cron.php'):
# return True
# return False
def get_version(domain):
"""
Check the WordPress version for a domain
Args:
domain - The domain associated with the installation
"""
sys_user = nginx.user_from_domain(domain)
webroot = user.webroot(sys_user)
return subprocess.getoutput('sudo -u "' + sys_user + '" -i wp core version --path="' + webroot + '"')
def get_db_info(sys_user, webroot=False):
"""
Get the database name, user and password for an existing WordPress
installation.
Args:
sys_user - The system user that the WorpPress site is stored in
webroot - (optional) the webroot for the WordPress installation
"""
if webroot == False:
webroot = user.webroot(sys_user)
db_user = subprocess.getoutput('sudo -u "' + sys_user + '" -i wp config get --path="' + webroot + '" DB_USER')
name = subprocess.getoutput('sudo -u "' + sys_user + '" -i wp config get --path="' + webroot + '" DB_NAME')
password = subprocess.getoutput('sudo -u "' + sys_user + '" -i wp config get --path="' + webroot + '" DB_PASSWORD')
return (name, db_user, password)
def update_config(sys_user, db_name, db_user, db_password, path=False):
"""
Update the database name, user and password for a WordPress installation.
Args:
sys_user - The system user that the WorpPress site is stored in
db_name - The new database name
db_user - The new database user
db_password - The new database password
path - (optional) the webroot for the WordPress installation
"""
if path == False:
path = user.home_dir(sys_user) + 'public_html/'
set_config_value('DB_USER', db_user, sys_user, path)
set_config_value('DB_PASSWORD', db_password, sys_user, path)
set_config_value('DB_NAME', db_name, sys_user, path)
def set_config_value(name, value, sys_user, path):
"""
Update a text value in a WordPress installation's configuraiton file.
Args:
sys_user - The system user that the WorpPress site is stored in
db_name - The new database name
db_user - The new database user
db_password - <PASSWORD>
path - (optional) the webroot for the WordPress installation
"""
if path == False:
path = user.home_dir(sys_user) + 'public_html/'
os.system('sudo -u "' + sys_user + '" -i wp config set ' +
' --path="' + path + '" "' + name + '" "' + value + '"')
def install_files(sys_user, db_name, db_user, db_password, path=False):
"""
Download Wordpress for a given system user. Then set the database name, user
and password for the new WordPress installation.
Args:
sys_user - The system user that the WorpPress site is stored in
db_name - The existing database name
db_user - The existing database user
db_password - <PASSWORD>
path - (optional) the webroot for the WordPress installation
"""
if path == False:
path = user.home_dir(sys_user) + 'public_html/'
# Set environment
pwd = os.getcwd()
whoami = os.geteuid()
os.seteuid(getpwnam(sys_user).pw_uid)
os.chdir(path)
# Download WordPress
os.system("su - '" + sys_user + "' -c \"wp core download --path='" + path + "'\"")
# Configure WordPress
command = "su - '" + sys_user + "' -c \"wp config create --skip-check" + \
" --path='" + path + "'" + \
" --dbname='" + db_name + "'" + \
" --dbuser='" + db_user + "'" + \
" --dbpass='" + db_password + "'" + \
"\""
print(command)
os.system(command)
# Reset environment
os.seteuid(whoami)
os.chdir(pwd)
def cert_try_loop(domain, username):
"""
Try up to 5 times to get a certificate for a domain.
Args:
domain - The domain to generate the certificate for
username - The system user the domain belongs to
"""
cert_try = 0
no_cert = True
time.sleep(2)
while no_cert and cert_try < 5:
cert_try += 1
no_cert = not cert.create_std_le_certs(domain, username)
if no_cert:
wait = 30 * cert_try
print('Cert Failed. Waiting ' + str(wait) + ' seconds and then trying again...')
time.sleep(wait)
if no_cert:
cert_try += 1
no_cert = not cert.create_std_le_certs(domain, username)
if no_cert:
print('Cert Failed. Investigate, then wait at least 30 seconds; then to try again run: sw nginx addssl ' + domain)
return not no_cert
def make_site(username, domain, php_version, db_conn):
"""
Create a new WordPress website.
Args:
username - The existing system user for the site
domain - The domain name for the site
php_version - The php version to user for the site (subversion)
db_conn - An open database connection with rights to create the database
and user
"""
user.make_user(username)
bind.make_zone(domain)
bind.rebuild_zone_index()
nginx.make_vhost(username, domain)
php.make_vhost(username, domain, php_version)
database_name = username[:18]
database_user = database_name
database_pass = input_util.random_string()
print("Setting db password to: " + database_pass)
db.create_database_with_user(database_name, database_user, database_pass, db_conn)
install_files(username, database_name, database_user, database_pass)
has_cert = cert_try_loop(domain, username)
if has_cert:
nginx.add_ssl_to_site_hosts(domain)
return has_cert
def clone_site(old_site, new_user, new_domain, db_conn):
"""
Create a new WordPress website cloned from an existing site.
Args:
old_site - The domain name for the site to be cloned from
new_user - The non-existing system user for the cloned site
new_domain - The domain name for the cloned site
db_conn - An open database connection with rights to create the database
and user
"""
php_version = php.get_site_version(old_site)
old_user = php.user_from_domain(old_site)
user.make_user(new_user)
bind.make_zone(new_domain)
bind.rebuild_zone_index()
nginx.make_vhost(new_user, new_domain)
for rule_id in nginx.get_bypassed_modsec_rules(old_site):
nginx.bypass_modsec_rule(new_domain, rule_id)
php.make_vhost(new_user, new_domain, php_version)
db_name = new_user[:18]
db_user = db_name
db_pass = input_util.random_string(20, False)
print("Setting db password to: " + db_pass)
db.create_database_with_user(db_name, db_user, db_pass, db_conn)
old_db_user, old_db, old_pass = get_db_info(old_user)
db.clone(old_db, db_name, db_conn)
old_dir = user.webroot(old_user)
new_dir = user.webroot(new_user)
print('Copying site files...')
os.system("cp -a '" + old_dir + ".' '" + new_dir + "'")
print('Copy complete, fixing permissions...')
os.system("find '" + new_dir + "' -user '" + old_user + "' -exec chown '" + new_user + "' {} \;")
os.system("find '" + new_dir + "' -group '" + old_user + "' -exec chgrp '" + new_user + "' {} \;")
print('Permissions fixed')
os.system("sed -i 's~" + old_dir + "~" + new_dir + "~g' " + new_dir + "wp-config.php")
update_config(new_user, db_name, db_user, db_pass)
os.system("sudo -u '" + new_user + "' -i wp search-replace --path='" + new_dir + "' '" + old_site + "' '" + new_domain + "'")
os.system("sudo -u '" + new_user + "' -i wp cache flush --path='" + new_dir + "'")
has_cert = cert_try_loop(new_domain, new_user)
if has_cert:
nginx.add_ssl_to_site_hosts(new_domain)
return has_cert
def wizard_make_site():
"""
Create a new WordPress site, promting the user for all needed information.
"""
print('Your domain should already be using this server as it\'s nameservers.')
print('Wait at least five minutes after changing nameservers to continue with this script.')
username = user.select_new_username()
domain = input('New Domain: ')
php_version = php.select_version()
mydb = db.get_connection()
is_ssl = make_site(username, domain, php_version, mydb)
add_cron(username)
protocol = 'http'
if is_ssl:
protocol = 'https'
print('Now go to ' + protocol + '://' + domain + ' to complete the WordPress setup wizard.')
def wizard_clone_site():
"""
Clone an existing WordPress site, promting the user for all needed
information.
"""
print('Enter information for new site:')
new_user = user.select_new_username()
new_domain = input('New Domain: ')
old_site = php.select_conf('Select site to clone from: ')['file']
mydb = db.get_connection()
is_ssl = clone_site(old_site, new_user, new_domain, mydb)
add_cron(new_user)
protocol = 'http'
if is_ssl:
protocol = 'https'
print('Now go to ' + protocol + '://' + new_domain + ' to check the cloned site.')
def add_cron(sys_user):
"""
Disable the fake cron job in WordPress and create a real one with the system
cron daemon. (speeds up page loads)
"""
user_info = getpwnam(sys_user)
crons = subprocess.getoutput("su - " + sys_user + " -c 'crontab -l 2>/dev/null'")
found = False
for line in crons:
if line.startswith('#'):
continue
if line.find('wp-cron.php') != -1:
found = True
break
if not found:
minute = random.randint(0,59)
cron = str(minute) + ' 0 * * * ~/.local/bin/php ~/public_html/wp-cron.php'
command = "su - " + sys_user + " -c \"crontab -l 2>/dev/null | { cat; echo '" + cron + "'; } | crontab -\" "
#print(command)
subprocess.getoutput(command)
print('Created system cron')
subprocess.getoutput("su - " + sys_user + " -c \"wp config set --path='~/public_html/' 'DISABLE_WP_CRON' true\" ")
print('Disabled WordPress cron')
return not found
def create_one_time_login(domain):
"""
Create a PHP file to give a one-time login into a WordPress site without a
password. There is no safty measure to remove this link if it is not used.
Args:
domain - The domain that needs a one-time login
"""
sys_user = nginx.user_from_domain(domain)
passcode = input_util.random_string(40, False)
passname = input_util.random_string(40, False)
docroot = nginx.docroot_from_domain(domain)
target_file = docroot + 'wp-admin/wp-autologin-' + passname + '.php'
site_url = get_site_url(sys_user, docroot)
# Set environment
whoami = os.geteuid()
os.seteuid(getpwnam(sys_user).pw_uid)
with open(settings.get('install_path') + 'etc/wp-autologin.php', 'r') as template:
with open(target_file, 'w') as php_file:
for line in template:
line = line.replace('PASSWORDD', passcode, 10000)
php_file.write(line)
# Reset environment
os.seteuid(whoami)
print('Go to: ' + site_url + 'wp-admin/wp-autologin-' + passname + '.php?pass=' + passcode)
def get_outdated(domain):
docroot = nginx.docroot_from_domain(domain)
sys_user = nginx.user_from_domain(domain)
core = subprocess.getoutput("su - " + sys_user + " -c 'wp core check-update --path=\"" + docroot + "\" --fields=update_type --format=csv 2>/dev/null | tail -n +2'")
themes = subprocess.getoutput("su - " + sys_user + " -c 'wp theme list --path=\"" + docroot + "\" --update=available --fields=name --format=csv 2>/dev/null | tail -n +2'")
plugins = subprocess.getoutput("su - " + sys_user + " -c 'wp plugin list --path=\"" + docroot + "\" --update=available --fields=name --format=csv 2>/dev/null | tail -n +2'")
return [core, themes.splitlines(), plugins.splitlines()]
def get_site_option(sys_user, docroot, option):
value = subprocess.getoutput("su - " + sys_user + " -c 'wp option get " + option + " --path=\"" + docroot + "\"' ")
return value
def get_site_url(sys_user, docroot):
url = get_site_option(sys_user, docroot, 'siteurl')
if url[-1] != '/':
url += '/'
return url
def get_site_home(sys_user, docroot):
home = get_site_option(sys_user, docroot, 'home')
if home[-1] != '/':
home += '/'
return home
def install_wp_cli():
install_directory = '/opt/wp-cli/'
download_url = 'https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar'
save_file = install_directory + 'wp-cli.phar'
bin_path = '/usr/local/bin/wp'
if not os.path.exists(install_directory):
os.makedirs(install_directory)
wget.download(download_url, save_file)
old_mode = os.stat(save_file)
os.chmod(save_file, old_mode.st_mode | stat.S_IEXEC)
if not os.path.exists(bin_path):
os.makedirs('/usr/local/bin')
os.symlink(save_file, bin_path)
|
StarcoderdataPython
|
3382685
|
<reponame>priyamshah112/Project-Descripton-Blog
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from parler.utils.context import switch_language
from aldryn_categories.models import Category
from aldryn_categories.fields import (
CategoryForeignKey,
CategoryManyToManyField,
CategoryModelChoiceField,
CategoryMultipleChoiceField,
CategoryOneToOneField,
)
from .base import CategoryTestCaseMixin
class TestCategoryField(CategoryTestCaseMixin, TestCase):
def test_category_model_choice_field(self):
root = Category.add_root(name="root")
root.save()
child1 = root.add_child(name="child1")
child2 = root.add_child(name="child2")
grandchild1 = child1.add_child(name="grandchild1")
bad_grandchild = child1.add_child(
name='bad grandchild<script>alert("bad stuff");</script>')
field = CategoryModelChoiceField(None)
self.assertEqual(
field.label_from_instance(child2),
" child2",
)
self.assertEqual(
field.label_from_instance(grandchild1),
" grandchild1",
)
self.assertEqual(
field.label_from_instance(bad_grandchild),
' bad grandchild<script>alert'
'("bad stuff");</script>',
)
# Tests that the field correctly throws an ImproperlyConfigured
# exception if the given object is not a Category (or something that
# acts like one)
with self.assertRaises(ImproperlyConfigured):
field.label_from_instance(object)
# Check that using an untranslated language does not raise exceptions
with switch_language(child1, 'it'):
try:
field.label_from_instance(child1)
except ImproperlyConfigured:
self.fail("Translating to an unavailable language should not "
"result in an exception.")
def test_category_multiple_choice_field(self):
root = Category.add_root(name="root")
root.save()
child1 = root.add_child(name="child1")
child2 = root.add_child(name="child2")
grandchild1 = child1.add_child(name="grandchild1")
bad_grandchild = child1.add_child(
name='bad grandchild<script>alert("bad stuff");</script>')
root = self.reload(root)
child1 = self.reload(child1)
field = CategoryMultipleChoiceField(None)
self.assertEqual(
field.label_from_instance(child2),
" child2",
)
self.assertEqual(
field.label_from_instance(grandchild1),
" grandchild1",
)
self.assertEqual(
field.label_from_instance(bad_grandchild),
' bad grandchild<script>alert'
'("bad stuff");</script>',
)
# Tests that the field correctly throws an ImproperlyConfigured
# exception if the given object is not a Category (or something that
# acts like one)
with self.assertRaises(ImproperlyConfigured):
field.label_from_instance(object)
# Check that using an untranslated language does not raise exceptions
with switch_language(child1, 'it'):
try:
field.label_from_instance(child1)
except ImproperlyConfigured:
self.fail("Translating to an unavailable language should not "
"result in an exception.")
def test_category_fk_field(self):
field = CategoryForeignKey(Category)
form_field = field.formfield()
self.assertTrue(isinstance(form_field, CategoryModelChoiceField))
field_type = field.get_internal_type()
self.assertEquals(field_type, 'ForeignKey')
def test_category_one_to_one_field(self):
field = CategoryOneToOneField(Category)
form_field = field.formfield()
self.assertTrue(isinstance(form_field, CategoryModelChoiceField))
field_type = field.get_internal_type()
self.assertEquals(field_type, 'ForeignKey')
def test_category_many_to_many_field(self):
field = CategoryManyToManyField(Category)
form_field = field.formfield()
self.assertTrue(isinstance(form_field, CategoryMultipleChoiceField))
field_type = field.get_internal_type()
self.assertEquals(field_type, 'ManyToManyField')
|
StarcoderdataPython
|
109878
|
<reponame>sheagk/leetcode_solutions
## https://leetcode.com/problems/find-all-duplicates-in-an-array/
## pretty simple solution -- use a set to keep track of the numbers
## that have already appeared (because lookup time is O(1) given
## the implementation in python via a hash table). Gives me an O(N)
## runtime
## runetime is 79th percentile; memory is 19th percentile
class Solution:
def findDuplicates(self, nums: List[int]) -> List[int]:
already_appeared = set([])
twice = []
while len(nums):
n = nums.pop()
if n in already_appeared:
twice.append(n)
else:
already_appeared.add(n)
return twice
|
StarcoderdataPython
|
1722670
|
<reponame>veot/ifcbdb
# Generated by Django 2.1.7 on 2019-05-28 06:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0018_auto_20190508_1659'),
]
operations = [
migrations.AlterField(
model_name='bin',
name='concentration',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='humidity',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='look_time',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='ml_analyzed',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='run_time',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='temperature',
field=models.FloatField(default=-9999999),
),
]
|
StarcoderdataPython
|
27709
|
<gh_stars>0
# Generated by Django 3.2.9 on 2021-12-13 21:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('awards', '0004_auto_20211213_1253'),
]
operations = [
migrations.RenameField(
model_name='rating',
old_name='avg_rate',
new_name='average',
),
]
|
StarcoderdataPython
|
1700654
|
__VERSION__ = "0.17.1"
|
StarcoderdataPython
|
1657701
|
import requests
import json
class runner:
def __init__(self):
self._apiUrl = 'https://api.jdoodle.com/v1/execute'
self._apiID = 'your id here' # change it to your api id
self._apiSecret = 'your secret here' # change it to your api secret
pass
def sendCode(self, source_code, stdin):
headers = {'content-type': 'application/json'}
payload = json.dumps({'clientId': self._apiID,
'clientSecret': self._apiSecret,
'script': source_code,
'stdin': stdin,
'language': 'java',
'versionIndex': '3'})
myPost = requests.post(self._apiUrl, headers=headers, data=payload)
return myPost.json()
def checkValue(self, source_code, myInput):
# modify code to allow several pairs of input for the student program to run only once
myModCode = 'public class main{' + \
source_code +\
'public static void main (String[] args){'
# call multiple times of the student's function to get all results in single run
for item in myInput:
myModCode += 'System.out.println(StudentAnswer(\"' + item + '\"));'
pass
myModCode += '}}'
# send code and return result
return self.sendCode(myModCode, '')
pass
|
StarcoderdataPython
|
3230291
|
<reponame>amjadmajid/rosbook<filename>chessbot/r2_chess_pgn.py
#!/usr/bin/env python
import sys, rospy, tf, moveit_commander, random
from geometry_msgs.msg import Pose, Point, Quaternion
import pgn
class R2ChessboardPGN:
def __init__(self):
self.left_arm = moveit_commander.MoveGroupCommander("left_arm")
self.left_hand = moveit_commander.MoveGroupCommander("left_hand")
def setGrasp(self, state):
if state == "pre-pinch":
vec = [ 0.3, 0, 1.57, 0, # index
-0.1, 0, 1.57, 0, # middle
0, 0, 0, # ring
0, 0, 0, # pinkie
0, 1.1, 0, 0] # thumb
elif state == "pinch":
vec = [ 0, 0, 1.57, 0,
0, 0, 1.57, 0,
0, 0, 0,
0, 0, 0,
0, 1.1, 0, 0]
elif state == "open":
vec = [0] * 18
else:
raise ValueError("unknown hand state: %s" % state)
self.left_hand.set_joint_value_target(vec)
self.left_hand.go(True)
def setPose(self, x, y, z, phi, theta, psi):
orient = \
Quaternion(*tf.transformations.quaternion_from_euler(phi, theta, psi))
pose = Pose(Point(x, y, z), orient)
self.left_arm.set_pose_target(pose)
self.left_arm.go(True)
def setSquare(self, square, height_above_board):
if len(square) != 2 or not square[1].isdigit():
raise ValueError(
"expected a chess rank and file like 'b3' but found %s instead" %
square)
print "going to %s" % square
rank_y = -0.24 - 0.05 * int(square[1])
file_x = 0.5 - 0.05 * (ord(square[0]) - ord('a'))
z = float(height_above_board) + 1.0
self.setPose(file_x, rank_y, z, 3.14, 0.3, -1.57)
def playGame(self, pgn_filename):
game = pgn.loads(open(pgn_filename).read())[0]
self.setGrasp("pre-pinch")
self.setSquare("a1", 0.15)
for move in game.moves:
self.setSquare(move[0:2], 0.10)
self.setSquare(move[0:2], 0.015)
self.setGrasp("pinch")
self.setSquare(move[0:2], 0.10)
self.setSquare(move[2:4], 0.10)
self.setSquare(move[2:4], 0.015)
self.setGrasp("pre-pinch")
self.setSquare(move[2:4], 0.10)
if __name__ == '__main__':
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('r2_chess_pgn',anonymous=True)
argv = rospy.myargv(argv=sys.argv) # filter out any arguments used by ROS
if len(argv) != 2:
print "usage: r2_chess_pgn.py PGNFILE"
sys.exit(1)
print "playing %s" % argv[1]
r2pgn = R2ChessboardPGN()
r2pgn.playGame(argv[1])
moveit_commander.roscpp_shutdown()
|
StarcoderdataPython
|
193483
|
<reponame>jayatsandia/svp_energy_lab
"""
Copyright (c) 2017, Sandia National Labs and SunSpec Alliance
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the names of the Sandia National Labs and SunSpec Alliance nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Questions can be directed to <EMAIL>
"""
import os
import time
import socket
import serial
import visa
import dcsim
chroma_info = {
'name': os.path.splitext(os.path.basename(__file__))[0],
'mode': 'Chroma 62000P'
}
def dcsim_info():
return chroma_info
def params(info, group_name=None):
gname = lambda name: group_name + '.' + name
pname = lambda name: group_name + '.' + GROUP_NAME + '.' + name
mode = chroma_info['mode']
info.param_add_value(gname('mode'), mode)
info.param_group(gname(GROUP_NAME), label='%s Parameters' % mode,
active=gname('mode'), active_value=mode, glob=True)
# Constant current/voltage modes
info.param(pname('v_max'), label='Max Voltage', default=300.0)
info.param(pname('v'), label='Voltage', default=50.0)
info.param(pname('i_max'), label='Max Current', default=100.0)
info.param(pname('i'), label='Power Supply Current', default=21.0)
# Comms
info.param(pname('comm'), label='Communications Interface', default='USB',
values=['USB', 'Serial', 'TCP/IP', 'GPIB'])
# USB
info.param(pname('usb_name'), label='USB Device String', active=pname('comm'),
active_value=['USB'], default='USB0::0x1698::0x0837::008000000452::INSTR')
# Serial
info.param(pname('serial_port'), label='Serial Port', active=pname('comm'),
active_value=['Serial'], default='com7')
# IP
info.param(pname('ip_addr'), label='IP Address', active=pname('comm'),
active_value=['TCP/IP'], default='192.168.1.10')
info.param(pname('ip_port'), label='IP Port', active=pname('comm'),
active_value=['TCP/IP'], default=5025)
# GPIB
# parameters for tuning unintentional islanding tests
# info.param(pname('volts'), label='Voltage', default=220)
# info.param(pname('freq'), label='Frequency', default=50)
GROUP_NAME = 'chroma_62000P'
class DCSim(dcsim.DCSim):
"""
Implementation for Chroma Programmable DC power supply 62050P-100-100.
Valid parameters:
mode - 'Chroma 62000P/63200'
auto_config - ['Enabled', 'Disabled']
v_nom
v_max
i_max
freq
profile_name
serial_port
baudrate
timeout
write_timeout
ip_addr
ip_port
"""
def __init__(self, ts, group_name):
self.buffer_size = 1024
self.conn = None
self.conn_load = None
dcsim.DCSim.__init__(self, ts, group_name)
# power supply parameters
self.v_max_param = self._param_value('v_max')
self.v_param = self._param_value('v')
self.i_max_param = self._param_value('i_max')
self.i_param = self._param_value('i')
self.comm = self._param_value('comm')
self.serial_port = self._param_value('serial_port')
self.ipaddr = self._param_value('ip_addr')
self.ipport = self._param_value('ip_port')
self.usb_name = self._param_value('usb_name')
self.baudrate = 115200
self.timeout = 5
self.write_timeout = 2
self.cmd_str = ''
self._cmd = None
self._query = None
# Establish communications with the DC power supply
if self.comm == 'Serial':
self.open() # open communications
self._cmd = self.cmd_serial
self._query = self.query_serial
elif self.comm == 'TCP/IP':
self._cmd = self.cmd_tcp
self._query = self.query_tcp
elif self.comm == 'USB':
# rm = visa.ResourceManager()
# self.ts.log_debug(rm.list_resources())
self._cmd = self.cmd_usb
self._query = self.query_usb
def _param_value(self, name):
return self.ts.param_value(self.group_name + '.' + GROUP_NAME + '.' + name)
# Serial commands for power supply
def cmd_serial(self, cmd_str):
self.cmd_str = cmd_str
try:
if self.conn is None:
raise dcsim.DCSimError('Communications port to power supply not open')
self.conn.flushInput()
self.conn.write(cmd_str)
except Exception, e:
raise dcsim.DCSimError(str(e))
# Serial queries for power supply
def query_serial(self, cmd_str):
resp = ''
more_data = True
self.cmd_serial(cmd_str)
while more_data:
try:
count = self.conn.inWaiting()
if count < 1:
count = 1
data = self.conn.read(count)
if len(data) > 0:
for d in data:
resp += d
if d == '\n':
more_data = False
break
else:
raise dcsim.DCSimError('Timeout waiting for response')
except dcsim.DCSimError:
raise
except Exception, e:
raise dcsim.DCSimError('Timeout waiting for response - More data problem')
return resp
# TCP commands for power supply
def cmd_tcp(self, cmd_str):
try:
if self.conn is None:
self.ts.log('ipaddr = %s ipport = %s' % (self.ipaddr, self.ipport))
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.settimeout(self.timeout)
self.conn.connect((self.ipaddr, self.ipport))
# print 'cmd> %s' % (cmd_str)
self.conn.send(cmd_str)
except Exception, e:
raise dcsim.DCSimError(str(e))
# TCP queries for power supply and load
def query_tcp(self, cmd_str):
resp = ''
more_data = True
self._cmd(cmd_str)
while more_data:
try:
data = self.conn.recv(self.buffer_size)
if len(data) > 0:
for d in data:
resp += d
if d == '\n': #\r
more_data = False
break
except Exception, e:
raise dcsim.DCSimError('Timeout waiting for response')
return resp
# USB queries for power supply
def query_usb(self, cmd_str):
# setup connection if not already established.
try:
if self.conn is None:
self.ts.log('USB device = %s' % self.usb_name)
rm = visa.ResourceManager()
self.conn = rm.open_resource(self.usb_name)
resp = self.conn.query(cmd_str)
#self.ts.log_debug('cmd_str = %s, resp = %s' % (cmd_str, resp))
except Exception, e:
raise dcsim.DCSimError('Timeout waiting for response')
return resp
# USB commands for power supply
def cmd_usb(self, cmd_str):
try:
if self.conn is None:
self.ts.log('USB device = %s' % self.usb_name)
rm = visa.ResourceManager()
self.conn = rm.open_resource(self.usb_name)
#self.conn.write('*RST\n')
self.conn.write('*CLS\n')
self.conn.write(cmd_str)
except Exception, e:
raise dcsim.DCSimError(str(e))
# Commands for power supply
def cmd(self, cmd_str):
self.cmd_str = cmd_str
# self.ts.log_debug('cmd_str = %s' % cmd_str)
try:
self._cmd(cmd_str)
resp = self._query('SYSTem:ERRor?\n') #\r
#self.ts.log_debug('error resp = %s' % resp)
if len(resp) > 0:
if resp[0] != '0':
raise dcsim.DCSimError(resp + ' ' + self.cmd_str)
except Exception, e:
raise dcsim.DCSimError(str(e))
# Queries for power supply
def query(self, cmd_str):
# self.ts.log_debug('query cmd_str = %s' % cmd_str)
try:
resp = self._query(cmd_str).strip()
except Exception, e:
raise dcsim.DCSimError(str(e))
return resp
def info(self):
"""
Return information string for the device.
"""
return self.query('*IDN?\n')
def config(self):
"""
Perform any configuration for the simulation based on the previously
provided parameters.
"""
# Setup the power supply
self.ts.log('Battery power supply model: %s' % self.info().strip())
# set voltage limits
v_max_set = self.voltage_max()
if v_max_set != self.v_max_param:
v_max_set = self.voltage_max(voltage=v_max)
v_min_set = self.voltage_min()
if v_min_set != 0:
v_min_set = self.voltage_min(voltage=20.)
self.ts.log('Battery power supply voltage range: [%s, %s] volts' % (v_min_set, v_max_set))
v_set = self.voltage()
if v_set != self.v_param:
self.ts.log_debug('Power supply voltage is %s, should be %s' % (self.v_param, v_set))
v_set = self.voltage(voltage=self.v_param)
self.ts.log('Battery power supply voltage: %s volts' % v_set)
i_max_set = self.current_max()
if i_max_set != self.i_max_param:
i_max_set = self.current_max(self.i_max_param)
self.ts.log('Battery power supply max current: %s Amps' % i_max_set)
# set current
self.current_min(current=0.) # get the current limit out of the way.
i = self.i_param
i_set = self.current()
self.ts.log_debug('Power supply current is %s, should be %s' % (i_set, i))
if i != self.i_param:
i_set = self.current(current=i)
self.ts.log_debug('Power supply current is %0.3f, should be %0.3f' % (i, i_set))
if i_set == 0.0:
self.ts.log_warning('Make sure the DC switch is closed!')
self.ts.log('Battery power supply current settings: i = %s' % i_set)
''' Not implemented
output_mode_set = self.output_mode()
self.ts.log('Battery power supply mode is %s' % output_mode_set)
if output_mode_set == 'CVCC':
self.output_mode(mode='CVCC')
'''
# set power supply output
self.output(start=True)
outputting = self.output()
if outputting == 'ON':
self.ts.log_warning('Battery power supply output is started!')
# Power Supply Functions
def open(self):
"""
Open the communications resources associated with the grid simulator.
"""
try:
self.conn = serial.Serial(port=self.serial_port, baudrate=self.baudrate, bytesize=8, stopbits=1,
xonxoff=0, timeout=self.timeout, writeTimeout=self.write_timeout)
time.sleep(2)
#self.cmd('CONFigure:REMote ON\n')
except Exception, e:
raise dcsim.DCSimError(str(e))
def close(self):
"""
Close any open communications resources associated with the grid
simulator.
"""
if self.conn:
if self.comm == 'Serial':
self.cmd('CONFigure:REMote OFF\n')
self.conn.close()
def output(self, start=None):
"""
Start/stop power supply output
start: if False stop output, if True start output
"""
if start is not None:
if start is True:
self.cmd('CONF:OUTP ON\n')
else:
self.cmd('CONF:OUTP OFF\n')
output = self.query('CONF:OUTP?\n')
return output
def output_mode(self, mode=None):
"""
Start/stop power supply output
mode: 'CVCC' constant voltage constant current
"""
if mode is not None:
self.cmd('OUTPut:MODE %s' % mode)
mode = self.query('OUTPut:MODE?\n')
return mode
def current(self, current=None):
"""
Set the value for current if provided. If none provided, obtains the value for current.
"""
if current is not None:
self.cmd('SOUR:CURR %0.1f\n' % current)
i = self.query('SOUR:CURR?\n')
return float(i)
def current_max(self, current=None):
"""
Set the value for max current if provided. If none provided, obtains the value for max current.
"""
if current is not None:
self.cmd('SOUR:CURR:LIMIT:HIGH %0.1f\n' % current)
self.cmd('SOUR:CURR:PROT:HIGH %0.1f\n' % current)
i1 = self.query('SOUR:CURR:LIMIT:HIGH?\n')
i2 = self.query('SOUR:CURR:PROT:HIGH?\n')
return float(min(i1, i2))
def current_min(self, current=None):
"""
Set the value for min current if provided. If none provided, obtains
the value for min current.
"""
if current is not None:
self.cmd('SOUR:CURR:LIMIT:LOW %0.1f\n' % current)
i = self.query('SOUR:CURR:LIMIT:LOW?\n')
return float(i)
def voltage(self, voltage=None):
"""
Set the value for voltage. If none provided, obtains the value for voltage.
"""
if voltage is not None:
self.cmd('SOUR:VOLT %0.1f\n' % voltage)
v = self.query('SOUR:VOLT?\n')
return float(v)
def voltage_max(self, voltage=None):
"""
Set the value for max voltage if provided. If none provided, obtains
the value for max voltage.
"""
if voltage is not None:
self.cmd('SOUR:VOLT:LIMIT:HIGH %0.1f\n' % voltage)
self.cmd('SOUR:VOLT:PROT:HIGH %0.1f\n' % voltage)
v1 = self.query('SOUR:VOLT:LIMIT:HIGH?\n')
v2 = self.query('SOUR:VOLT:PROT:HIGH?\n')
return min(float(v1), float(v2))
def voltage_min(self, voltage=None):
"""
Set the value for max voltage if provided. If none provided, obtains
the value for max voltage.
"""
if voltage is not None:
self.cmd('SOUR:VOLT:LIMIT:LOW %0.1f\n' % voltage)
v = self.query('SOUR:VOLT:LIMIT:LOW?\n')
return float(v)
|
StarcoderdataPython
|
1607481
|
<filename>lib/tpn/invariant.py<gh_stars>1-10
#===============================================================================
# Imports
#===============================================================================
import os
import re
import inspect
import datetime
import linecache
import itertools
from .util import (
endswith,
)
from os.path import (
isdir,
isfile,
exists,
abspath,
dirname,
basename,
normpath,
)
#===============================================================================
# Globals
#===============================================================================
SUFFIXES = (
'Option',
'Error',
'Arg',
)
# Quick hack for 3.x support.
try:
STRING_TYPES = (str, unicode)
except NameError:
STRING_TYPES = (str,)
#===============================================================================
# Base Invariant Class
#===============================================================================
class Invariant(BaseException):
_arg = None
_type = None
_name = None
_help = None
_maxlen = None
_minlen = None
_action = None
_default = None
_metavar = None
_opt_long = None
_opt_type = None
_opt_short = None
_mandatory = True
_type_desc = None
_capitalized_name = None
__filter = lambda _, n: (n[0] != '_' and n not in ('message', 'args'))
__keys = lambda _, n: (
n[0] != '_' and n not in {
'args',
'actual',
'message',
'expected',
}
)
def __init__(self, obj, name):
self._obj = obj
self._name = name
self.actual = None
self.dst_attr = None
self.dst_value = None
self._existing = None
self._existing_str = None
n = self.__class__.__name__.replace('Error', '').lower()
if n.endswith('arg'):
n = n[:-3]
if hasattr(self, '_regex'):
self._pattern = re.compile(self._regex)
if not hasattr(self, 'expected'):
self.expected = "%s to match regex '%s'" % (n, self._regex)
self._test = self._test_regex
if not hasattr(self, '_test'):
self._test = self._test_simple_equality
if not self._opt_type and self._type:
if self._type in STRING_TYPES:
self._opt_type = 'string'
elif self._type == int:
self._opt_type = 'int'
elif self._type == float:
self._opt_type = 'float'
elif self._type == complex:
self._opt_type = 'complex'
if not self._type_desc and self._opt_type:
self._type_desc = self._opt_type
if not self._type_desc:
self._type_desc = self._type.__name__
if not self._metavar:
self._metavar = name.upper()
s = None
l = None
long_opts = obj._long_opts
short_opts = obj._short_opts
if self._arg:
a = self._arg
assert len(a) >= 2, a
if '/' in a:
(s, l) = a.split('/')
if s.startswith('-'):
s = s[1:]
if l.startswith('--'):
l = l[2:]
assert s, s
assert l, l
else:
if a[0] == '-' and a[1] != '-':
s = a[1:]
else:
assert a.startswith('--') and len(a) >= 4, a
l = a[2:]
else:
l = name.replace('_', '-')
chars = [ (c, c.upper()) for c in list(name) ]
for c in itertools.chain.from_iterable(chars):
if c not in short_opts:
s = c
break
if l:
assert l not in long_opts, (l, long_opts)
long_opts[l] = self
if s:
assert s not in short_opts, (s, short_opts)
short_opts[s] = self
self._opt_long = l
self._opt_short = s
tokens = re.findall('[A-Z][^A-Z]*', self.__class__.__name__)
if tokens[-1] == 'Error':
tokens = tokens[:-1]
elif tokens[-1] == 'Arg':
tokens = tokens[:-1]
self._capitalized_name = ' '.join(tokens)
def _test_regex(self):
return bool(self._pattern.match(self.actual))
def _test_simple_equality(self):
return (self.actual == self.expected)
def __save(self, value, force, retval):
assert force in (True, False)
assert retval in (True, False)
try:
setattr(self._obj, '_' + self._name, value)
except AttributeError:
if force:
raise
return retval
def _try_save(self, value, retval=True):
force = False
return self.__save(value, force, retval)
def _save(self, value, retval=True):
force = True
return self.__save(value, force, retval)
def __check_existing(self):
obj = self._obj
name = self._name
actual = self.actual
check_existing = (
not hasattr(self, '_check_existing_') or (
hasattr(self, '_check_existing_') and
self._check_existing_
)
)
has_existing = (
hasattr(obj, '_existing') and
obj._existing
)
if not has_existing:
return
ex_obj = obj._existing
existing = getattr(ex_obj, name)
existing_str = existing
actual_str = str(actual)
if isiterable(existing):
existing_str = ','.join(str(e) for e in existing)
elif isinstance(existing, datetime.date):
existing_str = existing.strftime(self._date_format)
elif isinstance(existing, datetime.datetime):
existing_str = existing.strftime(self._datetime_format)
elif not isinstance(existing, str):
existing_str = str(existing)
self._existing = existing
self._existing_str = existing_str
if not check_existing:
return
if not (existing_str != actual_str):
message = "%s already has a value of '%s'"
BaseException.__init__(self, message % (name, actual_str))
raise self
def _validate(self, new_value):
self.actual = new_value
self.__check_existing()
result = self._test()
if result not in (True, False):
raise RuntimeError(
"invalid return value from %s's validation "
"routine, expected True/False, got: %s (did "
"you forget to 'return True'?)" % (self._name, repr(result))
)
if not result:
if hasattr(self, 'message') and self.message:
message = self.message
prefix = ''
else:
keys = [
'expected',
'actual'
] + sorted(filter(self.__keys, dir(self)))
f = lambda k: 'got' if k == 'actual' else k
items = ((f(k), repr(getattr(self, k))) for k in keys)
message = ', '.join('%s: %s' % (k, v) for (k, v) in items)
prefix = "%s is invalid: " % self._name
BaseException.__init__(self, prefix + message)
raise self
obj = self._obj
dst_attr = self.dst_attr or ('_' + self._name)
if self.dst_value and hasattr(obj, dst_attr):
setattr(obj, dst_attr, self.dst_value)
#===============================================================================
# Common Invariants
#===============================================================================
class BoolInvariant(Invariant):
expected = None
_type = bool
_metavar = None
_action = 'store_true'
def _test(self):
return True
class StringInvariant(Invariant):
_type = str
_type_desc = 'string'
_maxlen = 1024
_minlen = 2
@property
def expected(self):
assert isinstance(self._maxlen, int), (self._maxlen,type(self._maxlen))
assert self._maxlen > 0, self._maxlen
return '%s with length between %d and %d characters' % (
self._type_desc,
self._minlen,
self._maxlen
)
def _test(self):
if not isinstance(self.actual, self._type):
return False
l = len(self.actual)
return (
l >= self._minlen and
l <= self._maxlen
)
try:
class UnicodeInvariant(StringInvariant):
_type = unicode
_type_desc = 'unicode string'
except NameError:
UnicodeInvariant = StringInvariant
class PositiveIntegerInvariant(Invariant):
_type = int
_min = 1
_max = None
expected = "an integer greater than 0"
def _test(self):
try:
i = int(self.actual)
if self._min:
assert i >= self._min
if self._max:
assert i <= self._max
return self._try_save(i)
except:
return False
class AscendingCSVSepPositiveIntegersInvariant(Invariant):
_type = str
expected = (
"one or more positive integers separated by ',' "
"in ascending order"
)
def _test(self):
numbers = None
try:
numbers = [ int(i) for i in self.actual.split(',') ]
sorted_numbers = sorted(numbers)
assert numbers == sorted_numbers
except (ValueError, AssertionError):
return False
assert numbers
try:
setattr(self._obj, '_' + self._name, numbers)
except AttributeError:
pass
return True
class NonNegativeIntegerInvariant(Invariant):
_type = int
expected = "an integer greater than or equal to 0"
def _test(self):
try:
return (int(self.actual) >= 0)
except:
return False
class FloatInvariant(Invariant):
_type = float
_min = None
_max = None
expected = "a float"
def _test(self):
try:
f = float(self.actual)
if self._min:
assert f >= self._min
if self._max:
assert f <= self._max
return True
except:
return False
class NonEmptyDictInvariant(Invariant):
_type = dict
expected = "a non-empty dict"
def _test(self):
try:
d = self.actual
return isinstance(d, dict) and d
except:
return False
class MonthDayRangeInvariant(StringInvariant):
_minlen = 3
_maxlen = 5
expected = "a month range in the format n-m, i.e. '1-15'"
def _test(self):
if not StringInvariant._test(self):
return False
try:
(s, e) = (int(i) for i in self.actual.split('-'))
assert s < e
assert s >= 1 and s <= 27
assert e >= 2 and e <= 31
except:
return False
return True
class SetInvariant(Invariant):
_type = str
_expected_fmt = "a member of the following set: %s"
def _test(self):
set_str = ', '.join(("'%s'" % s for s in self._set))
self.expected = self._expected_fmt % set_str
try:
self.dst_value = set((self.actual,))
assert ((self._set & self.dst_value) == self.dst_value)
except (ValueError, AssertionError):
return False
return True
class MultipleSetInvariant(Invariant):
_type = str
_expected_fmt = (
"one or more values (csv separated if more than one) "
"from the following set: %s"
)
def _test(self):
set_str = ', '.join(("'%s'" % s for s in self._set))
self.expected = self._expected_fmt % set_str
try:
self.dst_value = set(self.actual.split(','))
assert ((self._set & self.dst_value) == self.dst_value)
except (ValueError, AssertionError):
return False
return True
class PathInvariant(StringInvariant):
_minlen = 1
_maxlen = 1024
_allow_dash = False
_endswith = None
@property
def expected(self):
if self._endswith:
return "a valid, existing path ending with '%s'" % self._endswith
else:
return "a valid path name"
def _test(self):
if not StringInvariant._test(self):
return False
if self._endswith and not self.actual.endswith(self._endswith):
return False
if self._allow_dash and self.actual == '-':
return True
p = abspath(self.actual)
if not isfile(p):
return False
return self._try_save(p)
class YMDPathInvariant(PathInvariant):
def _test(self):
dst_name = '_' + self._name + '_ymd'
assert hasattr(self._obj, dst_name), dst_name
if not PathInvariant._test(self):
return False
path = self.actual
n = basename(path)
ix = n.find('2')
ymd = n[ix:ix+len('yyyy-mm-dd')]
setattr(self._obj, dst_name, ymd)
return True
class OutPathInvariant(StringInvariant):
expected = "a valid path name (path does not have to exist)"
# If the base directory doesn't exist and _mkdir is True, create the
# directory.
_mkdir = True
_minlen = 1
_maxlen = 1024
def _test(self):
if not StringInvariant._test(self):
return False
try:
path = self.actual
base = abspath(dirname(path))
if base:
if not exists(base) and self._mkdir:
os.makedirs(base)
except:
return False
else:
return True
class MultiplePathsInvariant(StringInvariant):
expected = "one or more valid path names"
_minlen = 1
_maxlen = 1024
_endswith = None
def _test(self):
paths = []
actual = self.actual.split(',')
for path in actual:
path = normpath(abspath(path))
if not StringInvariant._test(self):
return False
if not exists(path):
return False
if self._endswith:
if not endswith(path, self._endswith):
return False
paths.append(path)
setattr(self._obj, '_' + self._name, paths)
return True
class DirectoryInvariant(StringInvariant):
expected = "a valid directory name"
_minlen = 1
_maxlen = 1024
_allow_dash = False
def _test(self):
if not StringInvariant._test(self):
return False
if self._allow_dash and self.actual == '-':
return True
p = abspath(self.actual)
if not isdir(p):
return False
return self._try_save(p)
class ExistingDirectoryInvariant(StringInvariant):
expected = "an existing directory name"
_minlen = 1
_maxlen = 1024
def _test(self):
if not StringInvariant._test(self):
return False
p = abspath(self.actual)
if not isdir(self.actual):
return False
return self._try_save(p)
class MkDirectoryInvariant(DirectoryInvariant):
expected = "a valid directory name"
_minlen = 1
_maxlen = 1024
_allow_dash = False
def _test(self):
if not DirectoryInvariant._test(self):
p = abspath(self.actual)
os.makedirs(p)
return True
class DateInvariant(Invariant):
_type_desc = 'datetime'
_date_format = '%Y-%m-%d'
_date_format_str = 'YYYY-MM-DD'
@property
def expected(self):
return "a date in the format '%s'" % self._date_format_str
def _store(self, value):
attr = '_' + self._name
setattr(self._obj, attr, value)
def _test(self):
fmt = self._date_format
strptime = lambda d: datetime.datetime.strptime(d, fmt)
strftime = lambda d: datetime.datetime.strftime(d, fmt)
try:
date = strptime(self.actual)
except ValueError:
return False
obj = self._obj
self._store(datetime.date(date.year, date.month, date.day))
return True
class EndDateInvariant(DateInvariant):
def _test(self):
if not DateInvariant._test(self):
return False
obj = self._obj
start_date = obj._start_date
end_date= obj._end_date
if start_date:
if start_date > end_date:
self.message = (
"end date (%s) is earlier than "
"start date (%s)" % (
self.actual,
start_date.strftime(self._date_format),
)
)
return False
return True
#===============================================================================
# Networking Invariants
#===============================================================================
class PortInvariant(PositiveIntegerInvariant):
_min = 1
_max = 65535
expected = "a TCP/IP port (integer between 1 and 65535)"
class NonEphemeralPortInvariant(PortInvariant):
_min = 1025
expected = "an non-ephemeral port (i.e. between 1024 and 65535)"
#===============================================================================
# Invariant Aware Object
#===============================================================================
class InvariantDetail(object):
name = None
long = None
type = None
short = None
target = None
@classmethod
def create_from_invariant(cls, invariant):
i = invariant
d = InvariantDetail()
d.name = i._name
class InvariantAwareObject(object):
_existing_ = None
__filter = lambda _, n: (n[0] != '_' and endswith(n, SUFFIXES))
__convert = lambda _, n: '_'.join(t.lower() for t in n[:-1])
__pattern = re.compile('[A-Z][^A-Z]*')
__inner_classes_pattern = re.compile(
r' class ([^\s]+(%s))\(.*' % (
'|'.join(s for s in SUFFIXES)
)
)
def __init__(self, *args, **kwds):
self._long_opts = kwds.get('long_opts', {})
self._short_opts = kwds.get('short_opts', {})
f = self.__filter
c = self.__convert
p = self.__pattern
classes = dict(
(c(t), getattr(self, n)) for (n, t) in [
(n, p.findall(n)) for n in filter(f, dir(self))
]
)
names = dict((v.__name__, k) for (k, v) in classes.items())
cls = self.__class__
classname = cls.__name__
filename = inspect.getsourcefile(cls)
lines = linecache.getlines(filename)
lines_len = len(lines)
prefix = 'class %s(' % classname
found = False
for i in range(lines_len):
line = lines[i]
if prefix in line:
found = i
break
if not found:
raise IOError('could not find source code for class')
block = inspect.getblock(lines[found:])
text = ''.join(block)
inner = self.__inner_classes_pattern
order = [
(i, names[n[0]]) for (i, n) in enumerate(inner.findall(text))
]
instances = {
name: cls(self, name)
for (cls, name) in [
(classes[name], name)
for (_, name) in order
]
}
self._invariants = instances
self._invariant_names = names
self._invariant_order = order
self._invariant_classes = classes
self._invariants_processed = list()
def __setattr__(self, name, new_value):
object.__setattr__(self, name, new_value)
if hasattr(self, '_invariants') and name in self._invariants:
invariant = self._invariants[name]
existing = (self._existing_ or (None, None))
(existing_obj_attr, dependencies) = existing
if not dependencies or name in dependencies:
invariant._validate(new_value)
self._invariants_processed.append(invariant)
return
# All necessary dependencies have been set (assuming the class has
# correctly ordered the invariant classes), so set the object's
# '_existing' attribute, which signals to future invariants that
# they are to begin checking existing values during the the normal
# setattr interception and validation.
if not self._existing:
self._existing = getattr(self, existing_obj_attr)
invariant._validate(new_value)
# And make a note of the existing value as well as the new value
# in the 'processed' list. This is done for convenience of the
# subclass that may want easy access to the old/new values down
# the track (i.e. for logging purposes).
old_value = invariant._existing_str
self._invariants_processed.append(invariant)
# vim:set ts=8 sw=4 sts=4 tw=78 et:
|
StarcoderdataPython
|
1665010
|
<reponame>ngarneau/awd-lstm-lm
import logging
import argparse
import time
import math
import os
import hashlib
import numpy as np
import torch
import torch.nn as nn
import data
from data import SentenceLoader
import model as m
from utils import batchify, get_batch, repackage_hidden
from splitcross import SplitCrossEntropyLoss
from pytoune.framework import Experiment as PytouneExperiment
from pytoune.framework.callbacks import ClipNorm, ReduceLROnPlateau
from callbacks import *
def get_source_directory(directory_name):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), directory_name)
def get_experiment_directory(directory_name):
default_dir = get_source_directory('./results')
dest_directory = os.environ.get('RESULTS_DIR', default_dir)
return os.path.join(dest_directory, directory_name)
def main():
randomhash = ''.join(str(time.time()).split('.'))
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='data/penn/', help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM', help='type of recurrent net (LSTM, QRNN, GRU)')
parser.add_argument('--emsize', type=int, default=400, help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=1150, help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=3, help='number of layers')
parser.add_argument('--lr', type=float, default=30, help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25, help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8000, help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=80, metavar='N', help='batch size')
parser.add_argument('--bptt', type=int, default=70, help='sequence length')
parser.add_argument('--dropout', type=float, default=0.4, help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.3, help='dropout for rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.65, help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.1, help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.5, help='amount of weight dropout to apply to the RNN hidden to hidden matrix')
parser.add_argument('--seed', type=int, default=1111, help='random seed')
parser.add_argument('--nonmono', type=int, default=5, help='random seed')
parser.add_argument('--cuda', action='store_false', help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N', help='report interval')
parser.add_argument('--save', type=str, default=randomhash+'.pt', help='path to save the final model')
parser.add_argument('--alpha', type=float, default=2, help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1, help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=1.2e-6, help='weight decay applied to all weights')
parser.add_argument('--resume', type=str, default='', help='path of model to resume')
parser.add_argument('--optimizer', type=str, default='sgd', help='optimizer to use (sgd, adam)')
parser.add_argument('--when', nargs="+", type=int, default=[-1], help='When (which epochs) to divide the learning rate by 10 - accepts multiple')
args = parser.parse_args()
args.tied = True
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
fn = 'corpus.{}.data'.format(hashlib.md5(args.data.encode()).hexdigest())
if os.path.exists(fn):
print('Loading cached dataset...')
corpus = torch.load(fn)
else:
print('Producing dataset...')
corpus = data.Corpus(args.data)
torch.save(corpus, fn)
eval_batch_size = 20
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
train_loader = SentenceLoader(train_data, args.bptt)
valid_loader = SentenceLoader(val_data, args.bptt, False)
test_loader = SentenceLoader(test_data, args.bptt, False)
ntokens = len(corpus.dictionary)
model = m.RNNModel(
args.model,
ntokens,
args.emsize,
args.nhid,
args.nlayers,
args.dropout,
args.dropouth,
args.dropouti,
args.dropoute,
args.wdrop,
args.tied,
args.alpha,
args.beta,
args.batch_size
)
if args.model == 'QRNN': model.reset()
###
params = list(model.parameters())
total_params = sum(x.size()[0] * x.size()[1] if len(x.size()) > 1 else x.size()[0] for x in params if x.size())
print('Args:', args)
print('Model total parameters:', total_params)
optimizer = None
if args.optimizer == 'sgd':
optimizer = torch.optim.SGD(params, lr=args.lr, weight_decay=args.wdecay)
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.wdecay)
device = None
device_id = 0
if torch.cuda.is_available():
torch.cuda.set_device(device_id) # Fix bug where memory is allocated on GPU0 when ask to take GPU1.
device = torch.device('cuda:%d' % device_id)
logging.info("Training on GPU %d" % device_id)
else:
logging.info("Training on CPU")
dataset = args.data.split('/')[-1]
model_name = "AWD_{}_{}".format(args.model, dataset)
expt_name = './expt_{}'.format(model_name)
expt_dir = get_experiment_directory(expt_name)
expt = PytouneExperiment(
expt_dir,
model,
device=device,
optimizer=optimizer,
monitor_metric='val_loss',
monitor_mode='min'
)
callbacks = [
HiddenInitCallback(args.batch_size, eval_batch_size),
HiddenRepackagingCallback(),
ClipNorm(params, args.clip),
# EvaluationCallback(),
ASGDOptimizerSwitchCallback(args),
ReduceLROnPlateau(monitor='val_loss', mode='min', patience=20, factor=0.5, threshold_mode='abs', threshold=1e-3, verbose=True),
AdaptativeLRSchedulerCallback(train_loader),
]
try:
expt.train(train_loader, valid_loader, callbacks=callbacks, seed=args.seed)
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
print("Testing on test set...")
expt.test(test_loader)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main()
|
StarcoderdataPython
|
1786006
|
from django.apps import AppConfig
class AIModelConfig(AppConfig):
name = 'aimodel'
verbose_name = "II-20: AI Model config"
|
StarcoderdataPython
|
3305572
|
#!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2021 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
"""
The module that represents an IpAddressV6 feature type.
Classes:
IpAddressV6
The IpAddressV6 feature type.
"""
import re
import pandas as pd
from ads.feature_engineering.feature_type.base import FeatureType
from ads.feature_engineering.utils import _count_unique_missing
from ads.feature_engineering import schema
PATTERN = re.compile(
r"\s*(?!.*::.*::)(?:(?!:)|:(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)){3})\s*",
re.VERBOSE | re.IGNORECASE | re.DOTALL,
)
def default_handler(data: pd.Series, *args, **kwargs) -> pd.Series:
"""Processes given data and indicates if the data matches requirements.
Parameters
----------
data: :class:`pandas.Series`
The data to process.
Returns
-------
:class:`pandas.Series`
The logical list indicating if the data matches requirements.
"""
return data.apply(
lambda x: True
if not pd.isnull(x) and PATTERN.match(str(x)) is not None
else False
)
class IpAddressV6(FeatureType):
"""
Type representing IP Address V6.
Attributes
----------
description: str
The feature type description.
name: str
The feature type name.
warning: FeatureWarning
Provides functionality to register warnings and invoke them.
validator
Provides functionality to register validators and invoke them.
Methods
--------
feature_stat(x: pd.Series) -> pd.DataFrame
Generates feature statistics.
Example
-------
>>> from ads.feature_engineering.feature_type.ip_address_v6 import IpAddressV6
>>> import pandas as pd
>>> import numpy as np
>>> s = pd.Series(['192.168.0.1', '2001:db8::', '', np.NaN, None], name='ip_address')
>>> s.ads.feature_type = ['ip_address_v6']
>>> IpAddressV6.validator.is_ip_address_v6(s)
0 False
1 True
2 False
3 False
4 False
Name: ip_address, dtype: bool
"""
description = "Type representing IP Address V6."
@staticmethod
def feature_stat(x: pd.Series) -> pd.DataFrame:
"""Generates feature statistics.
Feature statistics include (total)count, unique(count) and missing(count).
Examples
--------
>>> s = pd.Series(['2002:db8::', '2001:db8::', '2001:db8::', '2002:db8::', np.NaN, None], name='ip_address')
>>> s.ads.feature_type = ['ip_address_v6']
>>> s.ads.feature_stat()
Metric Value
0 count 6
1 unique 2
2 missing 2
Returns
-------
Pandas Dataframe
Summary statistics of the Series provided.
"""
return _count_unique_missing(x)
@classmethod
def feature_domain(cls, x: pd.Series) -> schema.Domain:
"""
Generate the domain of the data of this feature type.
Examples
--------
>>> s = pd.Series(['2002:db8::', '2001:db8::', '2001:db8::', '2002:db8::', np.NaN, None], name='ip_address_v6')
>>> s.ads.feature_type = ['ip_address_v6']
>>> s.ads.feature_domain()
constraints: []
stats:
count: 6
missing: 2
unique: 2
values: IpAddressV6
Returns
-------
ads.feature_engineering.schema.Domain
Domain based on the IpAddressV6 feature type.
"""
return schema.Domain(
cls.__name__,
cls.feature_stat(x).to_dict()[x.name],
[],
)
IpAddressV6.validator.register("is_ip_address_v6", default_handler)
|
StarcoderdataPython
|
4839110
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import math
import sys
import argparse
import rospy
import time
import tf2_ros
from lxml import etree
from collections import deque
import numpy
import yaml
import underworlds
from std_msgs.msg import String
from underworlds.types import Entity, Mesh, Camera, MESH, Situation
from underworlds.helpers import transformations
from underworlds.tools.loader import ModelLoader
from underworlds.tools.primitives_3d import Box
EPSILON = 0.02
TF_CACHE_TIME = 5.0
DEFAULT_CLIP_PLANE_NEAR = 0.001
DEFAULT_CLIP_PLANE_FAR = 1000.0
DEFAULT_HORIZONTAL_FOV = 57.20
DEFAULT_ASPECT = 1.291196388
# just for convenience
def strip_leading_slash(s):
return s[1:] if s.startswith("/") else s
# just for convenience
def transformation_matrix(t, q):
translation_mat = transformations.translation_matrix(t)
rotation_mat = transformations.quaternion_matrix(q)
return numpy.dot(translation_mat, rotation_mat)
class RobotMonitor(object):
"""
"""
def __init__(self, ctx, source_world, target_world, urdf_file_path, model_dir_path, robot_name, perspective_frame,
cam_rot, reference_frame):
"""
The constructor method
@param ctx: The underworlds context
@param source_world: The name of the source world
@param source_world: The name of the target world
@param urdf_path: The absolute path of the robot URDF model
@param model_dir_path: The absolute path of the meshes directory
@param reference_frame: The reference frame of the system
"""
self.ctx = ctx
self.source = ctx.worlds[source_world]
self.source_world_name = source_world
self.target = ctx.worlds[target_world]
self.target_world_name = target_world
self.tfBuffer = tf2_ros.Buffer(rospy.Duration(TF_CACHE_TIME), debug=False)
self.listener = tf2_ros.TransformListener(self.tfBuffer)
self.node_mapping = {self.source.scene.rootnode.id: self.target.scene.rootnode.id}
self.situation_mapping = {}
self.already_created_node_ids = {}
self.time_table = {}
self.urdf_file_path = urdf_file_path
self.model_dir_path = model_dir_path
self.situation_map = {}
self.robot_name = robot_name
rospy.set_param('robot_name', robot_name)
self.perspective_frame = perspective_frame
self.reference_frame = reference_frame
self.cam_rot = cam_rot
# The map of the parent frames ordered by frame name
self.parent_frames_map = {}
self.model_map = {}
self.relations_map = {}
self.ros_pub = {"situation_log": rospy.Publisher("robot_monitor/log", String, queue_size=5)}
self.previous_nodes_to_update = []
self.aabb_map = {}
self.frames_transform = {}
self.current_situations_map = {}
self.parent_frames_map[reference_frame] = "root"
self.parent_frames_map["base_footprint"] = reference_frame
self.load_urdf()
def load_urdf(self):
"""
This function read the URDF file given in constructor and save the robot structure
@return : None
"""
urdf_tree = etree.parse(self.urdf_file_path)
urdf_root = urdf_tree.getroot()
for link in urdf_root.iter("link"):
if link.find("visual") is not None:
if link.find("visual").find("geometry").find("mesh") is not None:
path = link.find("visual").find("geometry").find("mesh").get("filename").split("/")
if link.find("visual").find("geometry").find("mesh").get("scale"):
scale_str = link.find("visual").find("geometry").find("mesh").get("scale").split(" ")
scale = float(scale_str[0]) * float(scale_str[1]) * float(scale_str[2])
else:
scale = 0.1
count = 0
path_str = ""
element = path[len(path)-1]
while count < len(path):
if element == "meshes":
break
else:
path_str = "/" + element + path_str
count += 1
element = path[len(path)-1-count]
filename = self.model_dir_path + path_str
try:
nodes_loaded = ModelLoader().load(filename, self.ctx, world=self.target_world_name, root=None,
only_meshes=True, scale=scale)
for n in nodes_loaded:
if n.type == MESH:
self.model_map[link.get("name")] = n.properties["mesh_ids"]
self.aabb_map[link.get("name")] = n.properties["aabb"]
except Exception as e:
pass
else:
if link.find("visual").find("geometry").find("box") is not None:
mesh_ids = []
sizes = link.find("visual").find("geometry").find("box").get("size").split(" ")
box = Box.create(float(sizes[0]), float(sizes[1]), float(sizes[2]))
self.ctx.push_mesh(box)
mesh_ids.append([box.id])
self.model_map[link.get("name")] = mesh_ids
def start_n2_situation(self, predicate, subject_name, object_name):
description = predicate+"("+subject_name+","+object_name+")"
sit = Situation(desc=description)
sit.starttime = time.time()
self.current_situations_map[description] = sit
self.ros_pub["situation_log"].publish("START " + description)
self.target.timeline.update(sit)
return sit.id
def start_n1_situation(self, predicate, subject_name):
description = predicate+"("+subject_name+")"
sit = Situation(desc=description)
sit.starttime = time.time()
self.current_situations_map[description] = sit
self.ros_pub["situation_log"].publish("START " + description)
self.target.timeline.update(sit)
return sit.id
def end_n1_situation(self, predicate, subject_name):
description = predicate+"("+subject_name+")"
sit = self.current_situations_map[description]
self.ros_pub["situation_log"].publish("END "+description)
try:
self.target.timeline.end(sit)
except Exception as e:
rospy.logwarn("[robot_monitor] Exception occurred : "+str(e))
def end_n2_situation(self, predicate, subject_name, object_name):
description = predicate+"("+subject_name+","+object_name+")"
sit = self.current_situations_map[description]
self.ros_pub["situation_log"].publish("END "+description)
try:
self.target.timeline.end(sit)
except Exception as e:
rospy.logwarn("[robot_monitor] Exception occurred : "+str(e))
def filter(self):
nodes_to_update = []
for node in self.source.scene.nodes:
if node != self.source.scene.rootnode:
new_node = node.copy()
if node.id in self.node_mapping:
new_node.id = self.node_mapping[node.id]
if new_node in self.target.scene.nodes:
if not numpy.allclose(self.target.scene.nodes[new_node.id].transformation, node.transformation,
rtol=0, atol=EPSILON):
nodes_to_update.append(node)
else:
self.node_mapping[node.id] = new_node.id
self.frames_transform[new_node.name] = new_node.transformation
nodes_to_update.append(new_node)
if nodes_to_update:
for node in nodes_to_update:
if node.parent == self.source.scene.rootnode.id:
self.target.scene.nodes.update(node)
node.parent = self.node_mapping[node.parent] if node.parent in self.node_mapping \
else self.target.scene.rootnode.id
self.target.scene.nodes.update(nodes_to_update)
situations_to_update = []
for situation in self.source.timeline:
new_situation = situation.copy()
if situation in self.situation_mapping:
new_situation.id = self.situation_mapping[situation.id]
else:
self.situation_mapping[situation.id] = new_situation.id
situations_to_update.append(new_situation)
if situations_to_update:
self.target.timeline.update(situations_to_update)
def monitor_robot(self):
"""
This method read the frames of the robot if they exist in /tf and then update the poses/3D models of
the robot in the output world
@return : None
"""
try:
nodes_to_update = []
node = Camera(name=self.robot_name)
node.properties["clipplanenear"] = DEFAULT_CLIP_PLANE_NEAR
node.properties["clipplanefar"] = DEFAULT_CLIP_PLANE_FAR
node.properties["horizontalfov"] = math.radians(DEFAULT_HORIZONTAL_FOV)
node.properties["aspect"] = DEFAULT_ASPECT
msg = self.tfBuffer.lookup_transform(self.reference_frame, self.perspective_frame, rospy.Time(0))
trans = [msg.transform.translation.x, msg.transform.translation.y, msg.transform.translation.z]
rot = [msg.transform.rotation.x, msg.transform.rotation.y, msg.transform.rotation.z, msg.transform.rotation.w]
transform = transformation_matrix(trans, rot)
node.transformation = numpy.dot(transform, self.cam_rot)
if node.name in self.already_created_node_ids:
node.id = self.already_created_node_ids[node.name]
if not numpy.allclose(self.frames_transform[node.name], node.transformation, rtol=0, atol=EPSILON):
self.frames_transform[node.name] = node.transformation
nodes_to_update.append(node)
else:
self.already_created_node_ids[node.name] = node.id
self.frames_transform[node.name] = node.transformation
nodes_to_update.append(node)
for frame in self.model_map:
node = Mesh(name=frame)
node.properties["mesh_ids"] = [mesh_id for mesh_id in self.model_map[frame]]
node.properties["aabb"] = self.aabb_map[frame]
msg = self.tfBuffer.lookup_transform(self.perspective_frame, frame, rospy.Time(0))
trans = [msg.transform.translation.x, msg.transform.translation.y, msg.transform.translation.z]
rot = [msg.transform.rotation.x, msg.transform.rotation.y, msg.transform.rotation.z,
msg.transform.rotation.w]
node.transformation = transformation_matrix(trans, rot)
node.parent = self.already_created_node_ids[self.robot_name]
if node.name in self.already_created_node_ids:
node.id = self.already_created_node_ids[frame]
if not numpy.allclose(self.frames_transform[node.name], node.transformation, rtol=0, atol=EPSILON):
self.frames_transform[node.name] = node.transformation
nodes_to_update.append(node)
else:
self.already_created_node_ids[node.name] = node.id
self.frames_transform[node.name] = node.transformation
nodes_to_update.append(node)
for node in self.source.scene.nodes:
if node != self.source.scene.rootnode:
new_node = node.copy()
if node.id in self.node_mapping:
new_node.id = self.node_mapping[node.id]
if new_node in self.target.scene.nodes:
if not numpy.allclose(self.target.scene.nodes[new_node.id].transformation, node.transformation,
rtol=0, atol=EPSILON):
nodes_to_update.append(node)
else:
self.node_mapping[node.id] = new_node.id
self.frames_transform[new_node.name] = new_node.transformation
nodes_to_update.append(new_node)
if nodes_to_update:
self.target.scene.nodes.update(nodes_to_update)
self.previous_nodes_to_update = nodes_to_update
except (tf2_ros.TransformException, tf2_ros.LookupException, tf2_ros.ConnectivityException,
tf2_ros.ExtrapolationException):
pass
def run(self):
while not rospy.is_shutdown():
self.filter()
self.monitor_robot()
if __name__ == "__main__":
sys.argv = [arg for arg in sys.argv if "__name" not in arg and "__log" not in arg]
sys.argc = len(sys.argv)
parser = argparse.ArgumentParser(description="Add in the given output world, the nodes from input "
"world and the robot agent from ROS")
parser.add_argument("input_world", help="Underworlds input world")
parser.add_argument("output_world", help="Underworlds output world")
parser.add_argument("urdf_file_path", help="The path of the urdf file")
parser.add_argument("model_dir_path", help="The path of the robot mesh directory")
parser.add_argument("robot_name", help="The robot name")
parser.add_argument("perspective_frame", help="The name of the robot head gaze frame")
parser.add_argument("--cam_rot", default="0.0_0.0_0.0",
help="The camera rotation offset :\"<rx>_<ry>_<rz>\" in [°] ")
parser.add_argument("--reference", default="map", help="The reference frame")
args = parser.parse_args()
rospy.init_node("robot_filter", anonymous=False)
with underworlds.Context("Robot filter") as ctx:
rx, rz, ry = [math.radians(float(i)) for i in args.cam_rot.split("_")]
rot = transformations.euler_matrix(rx, rz, ry, 'rxyz')
RobotMonitor(ctx, args.input_world, args.output_world, args.urdf_file_path, args.model_dir_path,
args.robot_name, args.perspective_frame, rot, args.reference).run()
|
StarcoderdataPython
|
3323063
|
<reponame>metalibm/metalibm
# -*- coding: utf-8 -*-
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2019 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
#
# created: Mar 21st, 2019
# last-modified: Mar 21st, 2019
#
# description: toolbox for approximation construct
#
###############################################################################
import sollya
from sollya import Interval, sup, inf, guessdegree
S2 = sollya.SollyaObject(2)
from metalibm_core.core.ml_table import ML_NewTable
from metalibm_core.utility.num_utils import fp_next
from metalibm_core.core.polynomials import (
Polynomial, PolynomialSchemeEvaluator, SollyaError)
from metalibm_core.core.ml_operations import (
Constant, FMA, TableLoad, BitLogicAnd, BitLogicRightShift,
Multiplication, Subtraction,
TypeCast, Conversion,
Max, Min,
NearestInteger,
)
from metalibm_core.core.ml_formats import (
ML_Binary32, ML_Binary64, ML_SingleSingle, ML_DoubleDouble)
from metalibm_core.utility.debug_utils import debug_multi
from metalibm_core.utility.log_report import Log
from metalibm_core.utility.axf_utils import (
AXF_SimplePolyApprox, AXF_UniformPiecewiseApprox,
AbsoluteApproxError, AXF_ApproxError,
RelativeApproxError,
AXF_Polynomial,
AXF_GenericPolynomialSplit)
from metalibm_core.utility.ml_template import precision_parser
def get_extended_fp_precision(precision):
""" return the extended counterart of @p precision """
ext_precision = {
ML_Binary32: ML_SingleSingle,
ML_Binary64: ML_DoubleDouble,
}[precision]
return ext_precision
def load_piecewese_poly_params_from_axf(axf_approx, indexing):
""" load paramater for a generic polynomial split from an AXF structure """
# indexing = eval(axf_approx.indexing)
max_degree = axf_approx.max_degree
coeff_precision = axf_approx.precision
poly_table = ML_NewTable(dimensions=[indexing.split_num, max_degree+1], storage_precision=coeff_precision, const=True)
offset_table = ML_NewTable(dimensions=[indexing.split_num], storage_precision=coeff_precision, const=True)
# TODO/FIXME: means to select and/or compare between relative and absolute errors
max_error = RelativeApproxError(0.0)
for sub_index in range(indexing.split_num):
offset, approx_interval = indexing.get_offseted_sub_interval(sub_index)
offset_table[sub_index] = offset
local_approx = axf_approx.approx_list[sub_index]
poly_object = local_approx.poly
approx_error = local_approx.approx_error
for monomial_index in range(max_degree+1):
if monomial_index in poly_object.coeff_map:
poly_table[sub_index][monomial_index] = poly_object.coeff_map[monomial_index]
else:
poly_table[sub_index][monomial_index] = 0
# TODO/FIXME: must implement proper error storage/comparaison
# mechanism to process cases when we want to
# compare a relative to an absolute error
if approx_error.error_type != max_error.error_type:
Log.report(Log.Warning, "comparing two errors of different types")
max_error = approx_error if approx_error.value > max_error.value else max_error# max(approx_error, max_error)
return offset_table, max_degree, poly_table, max_error
def generate_parameters_piecewise_poly_approx(offset_fct, indexing, target_eps, coeff_precision, max_degree=None, axf_export=False, error_target_type=sollya.relative):
""" generate the parameters (table) for a generic piecewise polynomial
approximation """
# computing degree for a different polynomial approximation on each
# sub-interval
if max_degree is None:
poly_degree_list = [int(sup(guessdegree(offset_fct(offset), sub_interval, target_eps))) for offset, sub_interval in indexing.get_offseted_sub_list()]
max_degree = max(poly_degree_list)
else:
poly_degree_list = [max_degree for index in range(indexing.split_num)]
Log.report(Log.Debug, "generate_parameters_piecewise_poly_approx max_degree={}", max_degree)
# tabulating polynomial coefficients on split_num sub-interval of interval
poly_table = ML_NewTable(dimensions=[indexing.split_num, max_degree+1], storage_precision=coeff_precision, const=True)
offset_table = ML_NewTable(dimensions=[indexing.split_num], storage_precision=coeff_precision, const=True)
ErrorCtor = AbsoluteApproxError if error_target_type is sollya.absolute else RelativeApproxError
max_error = ErrorCtor(0.0)
# object for AXF export
if axf_export:
# TODO/FIXME/ using offset_fct evaluation at 0 to provide a dumpable
# function. We may prefer an non-evaluated offset_fct
# transcription
axf_error = AXF_ApproxError.from_AE(ErrorCtor(target_eps))
axf_approx = AXF_GenericPolynomialSplit(offset_fct(0), coeff_precision, indexing.interval, indexing, max_degree, axf_error)
else:
axf_approx = None
for sub_index in range(indexing.split_num):
poly_degree = poly_degree_list[sub_index]
offset, approx_interval = indexing.get_offseted_sub_interval(sub_index)
offset_table[sub_index] = offset
if poly_degree == 0:
# managing constant approximation separately since it seems
# to break sollya
local_approx = coeff_precision.round_sollya_object(offset_fct(offset)(inf(approx_interval)))
poly_table[sub_index][0] = local_approx
for monomial_index in range(1, max_degree+1):
poly_table[sub_index][monomial_index] = 0
if error_target_type is sollya.absolute:
approx_error_value = sup(abs(sollya.infnorm(offset_fct(offset) - local_approx, approx_interval)))
elif error_target_type is sollya.relative:
approx_error_value = sup(abs(sollya.infnorm((offset_fct(offset) - local_approx) / offset_fct(offset_fct), approx_interval)))
else:
raise NotImplementedError
approx_error = ErrorCtor(approx_error_value)
if axf_export:
axf_poly = AXF_Polynomial.from_poly(Polynomial({0: local_approx}))
# axf_error = AXF_ApproxError.from_AE(AbsoluteApproxError(approx_error))
axf_error = AXF_ApproxError.from_AE(approx_error)
axf_approx.approx_list.append(
AXF_SimplePolyApprox(axf_poly,
offset_fct(offset), [0], [coeff_precision],
approx_interval,
approx_error=axf_error))
else:
try:
if 0 in approx_interval and offset_fct(offset)(0) == 0.0:
# if 0 is within the local interval and that the function has a zero at zero,
# we force the first coefficient to be 0
# NOTES: having a zero makes it difficult to target relative error
assert error_target_type is sollya.absolute
poly_object, approx_error_value = Polynomial.build_from_approximation_with_error(
offset_fct(offset), list(range(1,poly_degree+1)), [coeff_precision]*(poly_degree),
approx_interval, error_target_type)
approx_error = AbsoluteApproxError(approx_error_value)
else:
poly_object, approx_error_value = Polynomial.build_from_approximation_with_error(
offset_fct(offset), poly_degree, [coeff_precision]*(poly_degree+1),
approx_interval, error_target_type)
# TODO/FIXME: not sure build_from_approximation_with_error
# is returning an error of the proper
# <error_target_type> type
approx_error = ErrorCtor(approx_error_value)
except SollyaError as err:
# try to see if function is constant on the interval (possible
# failure cause for fpminmax)
subint_low = inf(approx_interval)
local_function = offset_fct(offset)
local_interval = approx_interval
cst_value = coeff_precision.round_sollya_object(local_function(subint_low), sollya.RN)
accuracy = target_eps
diff_with_cst_range = sollya.supnorm(cst_value, local_function, local_interval, sollya.absolute, accuracy)
diff_with_cst = sup(abs(diff_with_cst_range))
if diff_with_cst < target_eps:
Log.report(Log.Info, "constant polynomial detected")
poly_object = Polynomial([cst_value] + [0] * poly_degree)
if error_target_type is sollya.absolute:
approx_error_value = diff_with_cst
elif error_target_type is sollya.relative:
approx_error_value = diff_with_cst / sollya.infnorm(local_function, local_interval)
else:
raise NotImplementedError
approx_error = ErrorCtor(approx_error_value)
else:
Log.report(Log.Error, "degree: {} for index {}, diff_with_cst={} (vs error_threshold={}) ", poly_degree, sub_index, diff_with_cst, target_eps, error=err)
for monomial_index in range(max_degree+1):
if monomial_index <= poly_degree:
if monomial_index in poly_object.coeff_map:
coeff_value = poly_object.coeff_map[monomial_index]
else:
coeff_value = 0
poly_table[sub_index][monomial_index] = coeff_value
else:
poly_table[sub_index][monomial_index] = 0
if axf_export:
axf_poly = AXF_Polynomial.from_poly(poly_object)
# axf_error = AXF_ApproxError.from_AE(RelativeApproxError(approx_error))
axf_error = AXF_ApproxError.from_AE(approx_error)
axf_approx.approx_list.append(
AXF_SimplePolyApprox(axf_poly,
offset_fct(offset), list(range(poly_degree+1)),
[coeff_precision]*(poly_degree+1),
approx_interval,
approx_error=axf_error))
max_error = max(approx_error, max_error)
# if an axf approx is being exported, we need to update the stored
# approximation error
if not axf_approx is None:
axf_approx.approx_error = AXF_ApproxError.from_AE(max_error)
return offset_table, max_degree, poly_table, max_error, axf_approx
def generate_piecewise_poly_approx_from_params(offset_table, max_degree, poly_table, indexing, coeff_precision, vx):
""" generate the ML node graph of approximation of a function
from the parameter of a generic piecewise polynomial approximation """
# indexing function: derive index from input @p vx value
poly_index = indexing.get_index_node(vx)
poly_index.set_attributes(tag="poly_index", debug=debug_multi)
ext_precision = get_extended_fp_precision(coeff_precision)
# building polynomial evaluation scheme
offset = TableLoad(offset_table, poly_index, precision=coeff_precision, tag="offset", debug=debug_multi)
poly = TableLoad(poly_table, poly_index, max_degree, precision=coeff_precision, tag="poly_init", debug=debug_multi)
red_vx = Subtraction(vx, offset, precision=vx.precision, tag="red_vx", debug=debug_multi)
for monomial_index in range(max_degree, -1, -1):
coeff = TableLoad(poly_table, poly_index, monomial_index, precision=coeff_precision, tag="poly_%d" % monomial_index, debug=debug_multi)
#fma_precision = coeff_precision if monomial_index > 1 else ext_precision
fma_precision = coeff_precision
# TODO/FIXME: only using Horner evaluation scheme
poly = FMA(red_vx, poly, coeff, precision=fma_precision)
#return Conversion(poly, precision=coeff_precision)
#return poly.hi
return poly
def generate_piecewise_poly_approx(offset_fct, indexing, target_eps, coeff_precision,
vx, max_degree=None, error_target_type=sollya.relative,
axf_export=False):
""" generate the meta approximation for @p offset_fct over several
intervals defined by @p indexing object
For each sub-interval, a polynomial approximation with
maximal_error @p target_eps is tabulated, and evaluated using format
@p coeff_precision.
The input variable is @p vx """
offset_table, max_degree, poly_table, max_error, axf_approx = generate_parameters_piecewise_poly_approx(offset_fct, indexing,
target_eps, coeff_precision,
max_degree=max_degree,
error_target_type=error_target_type,
axf_export=axf_export)
Log.report(Log.Debug, "max approx error is {}", max_error)
poly = generate_piecewise_poly_approx_from_params(offset_table, max_degree, poly_table, indexing, coeff_precision, vx)
return poly, axf_approx
def search_bound_threshold(fct, limit, start_point, end_point, precision):
""" This function assume that <fct> is monotonic and increasing
search by dichotomy the minimal x, floating-point number in
@p precision, such that x >= start_point and x <= end_point
and round(fct(x)) = limit. """
assert precision.round_sollya_object(fct(start_point)) < limit
assert precision.round_sollya_object(fct(end_point)) >= limit
assert start_point < end_point
left_bound = start_point
right_bound = end_point
while left_bound != right_bound and fp_next(left_bound, precision) != right_bound:
mid_point = precision.round_sollya_object((left_bound + right_bound) / S2, round_mode=sollya.RU)
mid_point_img = precision.round_sollya_object(fct(mid_point), round_mode=sollya.RU)
if mid_point_img >= limit:
right_bound = mid_point
elif mid_point_img < limit:
left_bound = mid_point
else:
Log.report(Log.Error, "function must be increasing in search_bound_threshold")
return left_bound
def search_bound_threshold_mirror(fct, limit, start_point, end_point, precision):
""" This function assume that <fct> is monotonic and decreasing
search by dichotomy the maximal x, floating-point number in
@p precision, such that x >= start_point and x <= end_point
and round(fct(x)) >= limit. """
assert precision.round_sollya_object(fct(start_point)) >= limit
assert precision.round_sollya_object(fct(end_point)) < limit
assert start_point < end_point
left_bound = start_point
right_bound = end_point
while left_bound != right_bound and fp_next(left_bound, precision) != right_bound:
mid_point = precision.round_sollya_object((left_bound + right_bound) / S2, round_mode=sollya.RU)
mid_point_img = precision.round_sollya_object(fct(mid_point), round_mode=sollya.RU)
if mid_point_img >= limit:
left_bound = mid_point
elif mid_point_img < limit:
right_bound = mid_point
else:
Log.report(Log.Error, "function must be increasing in search_bound_threshold")
return left_bound
|
StarcoderdataPython
|
23684
|
<gh_stars>1-10
# JSON engine 21 9 16
# database
# eng.json
# engine
# eng.py
import os
import json
path = os.getcwd() + '\\json_engine_database\\'
path_string = ''
def set_path(string):
global path
path = os.getcwd() + string
def dictionary_kv(dictionary, key, value):
dictionary[key] = value
return dictionary
def set_path_string(args,create_flag):
global path_string
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string)==False:
if create_flag == True:
os.makedirs(path + path_string)
else:
return False
return path_string
def create(dictionary, *args):
path_string = set_path_string(args,True)
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(dictionary, outfile, indent=4)
def retrieve(*args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'r') as f:
return(json.load(f))
def retrieve_k(key, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'r') as f:
if key in json.load(f):
with open(path + path_string + 'eng.json', 'r') as f:
return(json.load(f)[key])
else:
return False
def update(dictionary, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(dictionary, outfile, indent=4)
return True
def update_kv(key, value, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump({key: value}, outfile, indent=4)
return True
def patch(dictionary, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'r') as f:
data=(json.load(f))
data.update(dictionary)
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(data, outfile, indent=4)
return True
def patch_kv(key, value, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'r') as f:
data=(json.load(f))
data.update({key: value})
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(data, outfile, indent=4)
return True
def delete(*args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
os.remove(path + path_string + 'eng.json')
os.rmdir(path + path_string)
return True
else:
return False
def delete_k(key, *args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
if key in json.load(f):
data = json.load(f)
data.pop(key)
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(data, outfile, indent=4)
return True
else:
return False
else:
return False
def display(*args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
print(json.load(f))
return True
else:
print('The selected file does not exist')
return False
def display_key(key, *args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
if key in json.load(f):
print(key + ' ' + str(json.load(f)[key]))
return True
else:
print('The selected file does not exist')
return False
def display_nkv(key, *args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
if key in json.load(f):
data = json.load(f)
data.pop(key,'key not found')
print(data)
return True
else:
print('The selected file does not exist')
return False
def display_ind(*args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
print(json.dumps(json.load(f), indent=4))
else:
print('The selected file does not exist')
def display_ind_nkv(key, *args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
data = json.load(f)
data.pop(key,'key not found')
print(json.dumps(data, indent=4))
else:
print('The selected file does not exist')
|
StarcoderdataPython
|
3351906
|
import spotipy
from settings import settings
import spotipy.util as util
from time import sleep
class Spotify:
def __init__(self):
self.spotify = spotipy.Spotify()
self.tokens = {}
self.username = settings['username']
def get_spotify(self, scope):
s = self.tokens.get(scope)
if s:
return s
else:
return self.get_token(scope)
def get_token(self, scope):
token = util.prompt_for_user_token(self.username, scope, settings['client_id'], settings['client_secret'], settings['redirect_uri'])
if token:
try:
s = self.tokens['scope'] = spotipy.Spotify(auth=token)
return s
except:
print("spotify api error, sleep 5s...")
sleep(5)
return get_token(scope)
else:
return None
def add_tracks(self, tracks, playlist):
scope = 'playlist-modify-public playlist-modify-private'
s = self.get_spotify(scope)
try:
r = s.user_playlist_add_tracks(self.username, playlist, tracks)
except:
print("spotify api error, sleep 5s...")
sleep(5)
r = self.add_tracks(tracks, playlist)
def removes_tracks(self, tracks, playlist):
scope = 'playlist-modify-public playlist-modify-private'
s = self.get_spotify(scope)
try:
r = s.user_playlist_remove_all_occurrences_of_tracks(self.username, playlist, tracks)
except:
print("spotify api error, sleep 5s...")
sleep(5)
r = self.removes_tracks(tracks, playlist)
def search_track(self, title, artist):
q = 'artist:' + artist + " track:" + title
print(q)
try:
results = self.spotify.search(q=q, type='track')
except:
print("spotify api error, sleep 5s...")
sleep(5)
self.search_track(title, artist)
try:
tracks = results['tracks']['items']
except:
return None
if len(tracks) > 0:
return tracks[0]
else:
return None
def search_tracks(self, data):
tracks = []
for i in data:
track = self.search_track(i['title'], i['artist'])
if track:
tracks.append(track['uri'])
return tracks
def get_playlist_tracks(self, playlist):
scope = 'playlist-modify-public playlist-modify-private'
s = self.get_spotify(scope)
try:
p = s.user_playlist_tracks(self.username, playlist, "items.track.id")
except:
print("spotify api error, sleep 5s...")
sleep(5)
r = self.get_playlist_tracks(playlist)
tracks = []
for i in p['items']:
tracks.append(i['track']['id'])
return tracks
|
StarcoderdataPython
|
3397104
|
<reponame>AccSrd/multimodal-Parkinson-data-processing<filename>Scripts/prep2seg.py
"""
************************************************************************************************
*********************** Preprocessed --> Segmented ***********************************
************************************************************************************************
Divided the data into a single piece of data for each task. Put multimodal data together.
Input File -->
(1) EEG.txt / EMG.txt
Data Format: [NaN,TIME,data*25(EEG)/data*5(EMG)], 500Hz
(2) LShank.csv / RShank.csv / Arm.csv / Waist.csv
Data Format(Arm.csv as exp.): [NaN,TIME,OriTIME,ArmTIME,ArmACCX,ArmACCY,ArmACCZ,ArmGYROX,ArmGYROY,ArmGYROZ,SC(NC in others)], 500Hz
Output File -->
task_1_data.txt / task_2_data.txt / ...
Data Format: [NaN,TIME,FP1,FP2,F3,F4,C3,C4,P3,P4,O1,O2,F7,F8,Fz,Cz,Pz,FC1,FC2,CP1,CP2,FC5,FC6,CP5,CP6,EMG1,EMG2,IO,EMG3,EMG4,
LShankACCX,LShankACCY,LShankACCZ,LShankGYROX,LShankGYROY,LShankGYROZ,NC,RShankACCX,RShankACCY,RShankACCZ,RShankGYROX,
RShankGYROY,RShankGYROZ,NC,WaistACCX,WaistACCY,WaistACCZ,WaistGYROX,WaistGYROY,WaistGYROZ,NC,ArmACCX,ArmACCY,ArmACCZ,
ArmGYROX,ArmGYROY,ArmGYROZ,SC], 500Hz
"""
import sys
sys.path.append("..")
sys.path.append(".")
from config import settings
import formatting
import function
import pandas as pd
import os
import csv
#Cut the ACC/SC data file
def CutGait(sensor_loc, task_id, CUT_TIME):
StartTimeRow, EndTimeRow = 0, 0
StartHour, StartMin, StartSec, EndHour, EndMin, EndSec = function.SplitCutTime(CUT_TIME)
StartTime = StartHour[task_id-1]+':'+StartMin[task_id-1]+':'+StartSec[task_id-1]+'.002'
EndTime = EndHour[task_id-1]+':'+EndMin[task_id-1]+':'+EndSec[task_id-1]+'.002'
col_name = formatting.get_column_name_prep(sensor_loc)
input_path = os.path.join(settings.DATA_PATH_Preprocessed, sensor_loc + settings.FILE_SUFFIX_Preprocessed)
mid_path = os.path.join(settings.DATA_PATH_Preprocessed, sensor_loc + settings.FILE_SUFFIX_PreprocessedMid)
#For nonexistent files, write 0 in the corresponding position
if not os.path.exists(input_path):
new_f = open(input_path,'w')
writer = csv.writer(new_f)
writer.writerow(['0']*len(col_name))
new_f.close()
Dataframe = pd.read_csv(input_path, header=None, index_col=False, names=col_name)
Dataframe = Dataframe.drop([sensor_loc, '?'], axis=1)
os.remove(input_path)
print(f'There is no [{sensor_loc}.csv] for task {task_id}, thus an empty one has been utilized')
else:
origin_f = open(input_path,'r')
reader = csv.reader(origin_f)
column = [row[1] for row in reader]
#Find the start and the end lines of the data
for j in range(0,len(column)):
if StartTime in column[j]:
StartTimeRow = j-1
if EndTime in column[j]:
EndTimeRow = j-1
origin_f.close()
origin_f = open(input_path,'r')
new_f = open(mid_path, 'w')
reader = csv.reader(origin_f)
writer = csv.writer(new_f)
#Select the part between StartTimeRow and EndTimeRow and save it to the corresponding dataframe
for i,row in enumerate(reader):
if ((i >= StartTimeRow) and (i <= EndTimeRow)):
writer.writerow(row)
origin_f.close()
new_f.close()
Dataframe = pd.read_csv(mid_path, header=None, index_col=False, names=col_name)
Dataframe = Dataframe.drop([sensor_loc, '?'], axis=1)
os.remove(mid_path)
function.ZeroDetc(StartTimeRow, EndTimeRow, sensor_loc, task_id)
return Dataframe
#Cut the EEG/EMG data file
def CutEEG(sensor_loc, task_id, CUT_TIME):
StartTimeRow, EndTimeRow = 0, 0
StartHour, StartMin, StartSec, EndHour, EndMin, EndSec = function.SplitCutTime(CUT_TIME)
StartTime = StartHour[task_id-1]+':'+StartMin[task_id-1]+':'+StartSec[task_id-1]+'.002'
EndTime = EndHour[task_id-1]+':'+EndMin[task_id-1]+':'+EndSec[task_id-1]+'.002'
col_name = formatting.get_column_name_prep(sensor_loc)
input_path = os.path.join(settings.DATA_PATH_Preprocessed, sensor_loc + settings.FILE_SUFFIX_Preprocessed)
mid_path = os.path.join(settings.DATA_PATH_Preprocessed, sensor_loc + settings.FILE_SUFFIX_PreprocessedMid)
# impossible in general
if not os.path.exists(input_path):
new_f = open(input_path, 'w')
writer = csv.writer(new_f)
writer.writerow(['0']*len(col_name))
new_f.close()
Dataframe = pd.read_csv(input_path, header=None, index_col=False, names=col_name)
if sensor_loc != 'EEG':
Dataframe = Dataframe.drop([sensor_loc+'TIME'], axis=1)
Dataframe = Dataframe.drop(['?'], axis=1)
os.remove(input_path)
print(f'There is no [{sensor_loc}.csv] for task {task_id}, thus an empty one has been utilized')
#Same method as the ACC/SC cutting
else:
origin_f = open(input_path,'r')
reader = csv.reader(origin_f)
column = [row[1] for row in reader]
for j in range(0,len(column)):
if StartTime in column[j]:
StartTimeRow = j-1
if EndTime in column[j]:
EndTimeRow = j-1
origin_f.close()
origin_f = open(input_path,'r')
new_f = open(mid_path, 'w')
reader = csv.reader(origin_f)
writer = csv.writer(new_f)
for i,row in enumerate(reader):
if ((i >= StartTimeRow) and (i <= EndTimeRow)):
writer.writerow(row)
origin_f.close()
new_f.close()
Dataframe = pd.read_csv(mid_path, header=None, index_col=False, names=col_name)
if sensor_loc != 'EEG':
Dataframe = Dataframe.drop([sensor_loc+'TIME'], axis=1)
Dataframe = Dataframe.drop(['?'], axis=1)
Dataframe = Dataframe.loc[:,~Dataframe.columns.str.contains('^Unnamed')]
os.remove(mid_path)
function.ZeroDetc(StartTimeRow, EndTimeRow, sensor_loc, task_id)
return Dataframe
def CutData(task_id, CUT_TIME):
TASK_NAME = 'task_' + str(task_id)
output_path = os.path.join(settings.DATA_PATH_Cut, TASK_NAME + settings.FILE_SUFFIX_CUT)
# The gait files are cut separately and stored in a dataframe
Dataframe_Gait_LS = CutGait('LShank', task_id, CUT_TIME)
Dataframe_Gait_RS = CutGait('RShank', task_id, CUT_TIME)
Dataframe_Gait_WST = CutGait('Waist', task_id, CUT_TIME)
Dataframe_Gait_ARM = CutGait('Arm', task_id, CUT_TIME)
Dataframe_Gait = pd.concat([Dataframe_Gait_LS, Dataframe_Gait_RS, Dataframe_Gait_WST, Dataframe_Gait_ARM], axis=1)
Dataframe_Gait = formatting.reformat_col_gait(Dataframe_Gait)
# The EEG and EMG file are cut and stored in a dataframe
Dataframe_EEG = CutEEG('EEG', task_id, CUT_TIME)
Dataframe_EMG = CutEEG('EMG', task_id, CUT_TIME)
# Put multimodal data together.
Dataframe = pd.concat([Dataframe_EEG, Dataframe_EMG, Dataframe_Gait],axis=1)
Dataframe = Dataframe.fillna(value=0)
Dataframe = Dataframe.drop(settings.PREP_COLNAME_DROP, axis=1)
Dataframe = Dataframe.rename(columns={('EEGTIME'): 'TIME'})
Dataframe.to_csv(output_path)
print(f'{TASK_NAME} Segmented Finished')
def prep2seg(TASK_NUMBER, CUT_TIME):
if not os.path.exists(settings.DATA_PATH_Cut):
function.mkdir(settings.DATA_PATH_Cut)
for task_id in range(1, TASK_NUMBER + 1):
CutData(task_id, CUT_TIME)
print('============================')
print('Preprocessed Data -> Segmented Data: Finished')
print('============================\n')
|
StarcoderdataPython
|
1767694
|
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2019-12-28 13:13:28
# @Last Modified by: <NAME>
# @Last Modified time: 2019-12-28 13:13:28
|
StarcoderdataPython
|
3266238
|
BUILDING_LABEL = 3
OTHER_LABEL = 0
IRRELEVANT_LABELS_ADE = [27, 62, 91, 114, 129]
MAPPING_DICT_ADE = {
1: [10, 14, 17, 30, 35, 47, 69, 95],
2: [5, 18, 67, 73],
3: [1, 2, 4, 6, 26, 49, 85],
4: [87, 89, 115],
5: [33],
6: [3],
7: [7, 55],
8: [12, 53, 92],
9: [9, 15, 19],
10: [21, 84, 104, 117, 128, 134],
11: [13],
}
IRRELEVANT_LABELS_CITYSCAPES = []
MAPPING_DICT_CITYSCAPES = {
1: [6, 22],
2: [21],
3: [11],
4: [],
5: [12, 13, 14],
6: [23],
7: [7, 9],
8: [8],
9: [],
10: [26, 27, 28, 29, 30, 31, 32, 33, -1],
11: [24, 25],
}
IRRELEVANT_LABELS_BARAK = []
MAPPING_DICT_BARAK = {
1: [[107, 142, 35]],
2: [[0, 91, 46]],
3: [[70, 70, 70]],
4: [],
5: [],
6: [[180, 130, 70]],
7: [],
8: [],
9: [],
10: [],
11: [],
}
TEVEL_SHAPE = (700, 560)
TRAIN_PERCENT = 0.7
VAL_PERCENT = 0.2
TEST_PERCENT = 0.1
|
StarcoderdataPython
|
187298
|
from django.shortcuts import render
from django.http import HttpResponse
# Include the `fusioncharts.py` file which has required functions to embed the charts in html page
from ..fusioncharts import FusionCharts
# Loading Data from a Static JSON String
# It is a example to show a mscombi 2d chart where data is passed as JSON string format.
# The `chart` method is defined to load chart data from an JSON string.
def chart(request):
# Create an object for the mscombi2d chart using the FusionCharts class constructor
mscombi2d = FusionCharts("mscombi2d", "ex1" , "600", "400", "chart-1", "json",
# The data is passed as a string in the `dataSource` as parameter.
"""{
"chart": {
"caption": "App Publishing Trend",
"subCaption": "2012-2016",
"xAxisName": "Years",
"formatnumberscale": "0",
"numberSuffix": "K",
"showvalues":"0",
"theme": "fint"
},
"categories": [{
"category": [{
"label": "2012"
}, {
"label": "2013"
}, {
"label": "2014"
}, {
"label": "2015"
}, {
"label": "2016"
}]
}],
"dataset": [{
"seriesname": "iOS App Store",
"renderAs": "Spline",
"data": [{
"value": "125"
}, {
"value": "300"
}, {
"value": "480"
}, {
"value": "800"
}, {
"value": "1100"
}]
}, {
"seriesname": "Google Play Store",
"renderAs": "SplineArea",
"data": [{
"value": "70"
}, {
"value": "150"
}, {
"value": "350"
}, {
"value": "600"
},{
"value": "1400"
}]
}, {
"seriesname": "Amazon AppStore",
"data": [{
"value": "10"
}, {
"value": "100"
}, {
"value": "300"
}, {
"value": "600"
},{
"value": "900"
}]
}]
}""")
# returning complete JavaScript and HTML code, which is used to generate chart in the browsers.
return render(request, 'index.html', {'output' : mscombi2d.render()})
|
StarcoderdataPython
|
183260
|
<reponame>M0Rf30/mopidy-cd
from __future__ import unicode_literals
import os
from mopidy import config, ext
__version__ = '0.5.1'
class Extension(ext.Extension):
dist_name = 'Mopidy-Cd'
ext_name = 'cd'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def setup(self, registry):
from .backend import CdBackend
registry.add('backend', CdBackend)
|
StarcoderdataPython
|
1716992
|
from django import urls
from django.db import models
from django.utils import html
from django.utils.translation import ugettext_lazy as _
from djfw.wysibb.templatetags import bbcodes
from tulius.forum.comments import models as comment_models
from tulius.gameforum.threads import models as thread_models
from tulius.stories import models as story_models
from tulius.forum.comments import signals
class Comment(comment_models.AbstractComment):
role = models.ForeignKey(
story_models.Role, models.PROTECT,
null=True,
blank=True,
related_name='comments',
)
edit_role = models.ForeignKey(
story_models.Role, models.PROTECT,
null=True,
blank=True,
related_name='edited_comments',
)
parent: thread_models.Thread = models.ForeignKey(
thread_models.Thread, models.PROTECT,
null=False,
blank=False,
related_name='comments',
verbose_name=_('thread')
)
def get_absolute_url(self):
return urls.reverse(
'game_forum_api:comment', kwargs={
'pk': self.pk,
'variation_id': self.parent.variation_id,
})
def to_elastic_search(self, data):
super().to_elastic_search(data)
data['variation_id'] = self.parent.variation_id
data['role_id'] = self.role_id or 0
@classmethod
def to_elastic_mapping(cls, fields):
super().to_elastic_mapping(fields)
fields['variation_id'] = {'type': 'integer'}
def to_json(self, user, detailed=False):
""" Override original method to avoid resolving "user" foreign key. """
data = {
'id': self.pk,
'thread': {
'id': self.parent_id,
# anyway parent is resolved in user
'url': self.parent.get_absolute_url()
},
'page': self.page,
'user': self.parent.variation.role_to_json(
self.role_id, user, detailed=detailed),
'create_time': self.create_time,
}
data = {
**data,
'url': self.get_absolute_url() if self.pk else None,
'title': html.escape(self.title),
'body': bbcodes.bbcode(self.body),
'edit_right': self.edit_right(user),
'is_thread': self.is_thread(),
'edit_time': self.edit_time,
'editor': self.parent.variation.role_to_json(
self.edit_role_id, user, detailed=True
) if self.edit_time else None,
'media': self.media,
'reply_id': self.reply_id,
}
signals.to_json.send(
self.__class__, comment=self, data=data, user=user)
return data
|
StarcoderdataPython
|
3296580
|
# encoding: utf-8
"""
"""
__author__ = '<NAME>'
__date__ = '13 Feb 2020'
__copyright__ = 'Copyright 2018 United Kingdom Research and Innovation'
__license__ = 'BSD - see LICENSE file in top-level package directory'
__contact__ = '<EMAIL>'
from collections import namedtuple
def get_file_subset(path_gen, max_number):
"""
Get a subset of file from a generator
:param path_gen: pathlib.Path.glob
:param max_number: int
:return: list of pathlib.Path objects
"""
filelist = []
while len(filelist) < max_number:
try:
next_path = next(path_gen)
if next_path.is_file():
filelist.append(next_path)
except StopIteration:
break
return filelist
TaggedDataset = namedtuple('TaggedDataset', ['drs','labels','uris'])
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.