id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3351688
|
"""
Snippets for file handling, reading, and parsing.
<NAME>, 2013-2014
"""
import os
def finding_max_nsets(array_of_files, num_of_files):
"""
Find max number of sets
"""
max_num_set= 0
for n in range(num_of_files):
filename = str(array_of_files[n])
nset, nl = parsing_name_file(filename)
if nset > max_num_set:
max_num_set = nset
return max_num_set
def counting_number_of_files(path):
"""
Count the number of data files in the folder.
"""
count = 0
for f in os.listdir(path):
if os.path.isfile(os.path.join(path, f)):
count += 1
return count
def reading_name_files(path):
"""
Read all the names of the data file and save them in an array.
"""
array_filenames = []
for root, dirs, files in os.walk(path):
for name in files:
filename = os.path.join(name)
array_filenames.append(filename)
return array_filenames
def parsing_name_file(filename):
"""
Parse the name of the data file to extract l and set number.
"""
str1 = filename.split("t")
str2 = str1[1].split("l")
str3 = str2[1].split(".")
str4 = str3[0][1:]
str5 = '0.'+ str4
nset = int(str2[0])
nl = float(str5)
return nset, nl
def parsing_data(path):
"""
This is the calling module.
"""
num_of_files = counting_number_of_files(path)
array_of_files = reading_name_files(path)
max_num_set = finding_max_nsets(array_of_files, num_of_files)
return num_of_files, array_of_files, max_num_set
|
StarcoderdataPython
|
1707588
|
sample = """35
20
15
25
47
40
62
55
65
95
102
117
150
182
127
219
299
277
309
576"""
full_input = """33
18
22
44
49
15
12
38
41
46
3
42
37
19
13
7
21
29
34
40
39
35
27
25
48
87
10
16
17
45
18
30
20
22
23
73
24
26
28
53
31
37
51
32
33
34
36
35
54
27
38
39
40
74
70
41
93
144
45
63
87
66
65
55
58
62
72
105
59
76
61
67
68
77
106
78
79
183
86
117
96
100
108
104
123
113
114
116
129
119
178
120
196
126
177
184
135
182
192
201
174
360
186
190
210
248
214
282
217
227
229
304
235
370
308
261
302
300
542
376
309
317
356
364
375
407
396
400
404
585
599
514
444
907
456
633
687
496
665
561
563
832
1056
626
673
681
720
731
840
819
796
800
1033
1117
900
958
940
952
1017
1019
1748
1177
1422
1124
1578
1189
1299
1678
1307
1354
2702
2476
1689
3775
1740
2813
2706
1977
1898
1840
2630
2864
1971
2439
2143
2932
2301
2313
2423
2488
2496
3194
2661
2996
3043
4416
3429
3529
3580
3638
3738
3983
4830
3811
4114
4724
5782
4394
4444
7143
6623
6882
7267
5149
7823
5855
5657
5704
6425
7026
6958
9666
7109
7218
10249
8535
7794
11512
14327
9969
15906
8838
10101
9593
12082
10806
10853
13451
17489
13073
11361
12129
17210
13383
13984
30562
16329
14903
15753
19306
16632
23920
26456
26113
18431
27032
19644
27882
20399
37885
21659
27976
24812
23490
24744
25345
25512
42785
27367
29737
30656
45579
31535
32385
35063
52788
38075
38830
44544
40090
40043
55249
57897
42058
45149
52712
48234
48302
72946
50089
64342
52879
103551
57104
72428
62191
79837
63920
71215
73138
80133
84587
82148
156430
90132
152323
113146
90360
197733
100946
105591
101113
98391
288093
154280
109983
204497
164424
152975
137058
126111
191473
181079
144353
153271
170265
166735
183261
180492
188523
236094
235449
188751
202059
262815
305610
199504
208374
247041
312042
254336
440591
353175
442859
263169
270464
616344
297624
311088
320006
402829
377274
363753
369015
501377
388255
390810
397125
401563
407878
446545
455415
471543
732768
524800
751679
1043856
533633
626922
568088
581552
608712
699343
631094
697280
779065
926958
921925
868668
785380
969651
856978
798688
809441
989430
901960
1036967
996343
1285312
1133512
1101721
1195010
1115185
1149640
1330437
1911355
1239806
1328374
1912577
1476345
3239729
1654048
1655666
1594821
1584068
1608129
1758938
1700648
1711401
2365341
1898303
2895658
2098064
2216906
4654596
3293074
2769233
2354991
3039775
2834627
3238116
3810880
3132011
3060413
3178889
3284716
3192197
3202950
3295469
4253294
3928307
3412049
4720332
4993722
4867297
3996367
4571897
8152013
4986139
5124224
5189618
6472462
5394766
5874402
5895040
7003077
6192424
7385305
8181601
6476913
6395147
6487666
8716699
6707518
7408416
8398188
7983946
8568264
8863664
8982506
9120591
13184431
10860541
10110363
10313842
10584384
11289806
15470172
11769442
12382706
16579789
15056088
12872060
13896082
12964579
13102665
25185888
24474237
14115934
20633106
16382134
16552210
23098440
19092869
18103097
24209924
24392471
27842016
24254385
28687481
24152148
23059248
38150467
24641502
25347285
25836639
42357482
36023827
26067244
27080513
35645079
49033973
30498068
39463219
42619454
32934344
34655307
29221323
37195966
60665329
62725592
47313633
50901264
47211396
82958712
47700750
48406533
49988787
50708746
51183924
51903883
53147757
88497192
77789259
56301836
86546343
67694034
59719391
70130310
81340877
101892670
63876630
66417289
84407362
96107283
94525029
146096070
94912146
95617929
103087807
101554290
180025291
103136544
102612629
110903315
105051640
109449593
116021227
120178466
122719125
178932391
197661573
123596021
167971579
130293919
283113098
199576669
245349680
199243827
189437175
213515944
190530075
196466436
197172219
233045614
390106744
223315010
225331754
207664269
214501233
396748888
225470820
291567600
312156300
478395294
343809863
253889940
298265498
509328519
319731094
472550273
379967250
385903611
386609394
479221694
711440908
545201914
393638655
629794502
422165502
439972053
680214542
461554209
433135089
468391173
1024423608
479360760
545457540
552155438
854294784
573621034
617996592
678232748
705634705
699698344
938840569
773605905
772513005
780248049
815804157
1170152030
826773744
855192864
1418201329
862137555
1794033433
1024818300
894689298
1051131681
1163454132
1031516198
1052981794
1119078574
1125776472
1191617626
1450745753
1485882754
1377931092
1405333049
1472211349
2266130803
1546118910
1552761054
2576522225
1642577901
1717330419
1681966608
1749882162
2300022347
1886955855
2216435926
1919507598
2242749307
2084497992
2150594772
2157292670
2172060368
2244855046
2317394098
2569548718
2891215803
2783264141
2850142441
2877544398
3762554836
3098879964
4533830024
3195338955
4384520339
5066578367
3399297027
3568922463
3636838017
8170668041
6514382415
4373728596
4467988870
4322655140
6446466861
4307887442
5674479944
4416915414
7406767406
5100658239
5352812859
5978603096
7105919281
6498176991
8669580702
6294218919
6735717981
11048212439
8021358356
6968219490
11573908151
7036135044
7205760480
7944725459
9423313379
8630542582
8790644010
17868127070
8724802856
9408545681
9982367386
10453471098
9517573653
11079261335
12792395910
18253972919
12272822015
13400138200
13233894972
13029936900
13262438409
14912944949
14004354534
27677286298
17188127866
20436273244
14241895524
21790395668
20502574714
17421186592
19499941039
17515446866
25912930722
23687366070
18926119334
20435838484
25672960215
24109198235
44865414164
25065217925
25302758915
25506716987
26263831872
30683625001
27034291434
27504333933
28246250058
31192482400
48104811593
36921127631
33168014858
31663082116
42486404517
34936633458
43188407081
50683461724
36441566200
43991337259
81786541795
39361957818
44545036719
49174416160
49411957150
61876107401
50367976840
53549008973
51770548859
53298123306
54538625367
55280541492
59909332174
84348590608
87706640225
64831096974
82346543840
71025039934
66599715574
71378199658
126331578514
75803524018
121746176498
93956993869
115923236377
89729934658
83906994537
93719452879
143131410029
99779933990
102138525699
136119139467
125563665301
106309174226
107836748673
109819166859
115189873666
126509047748
185622690877
147177640814
131430812548
287761216576
137624755508
192931294088
165533458676
159710518555
177626447416
201793742542
173636929195
183449387537
183686928527
186045520236
193499386869
206089108216
217655915532
208447699925
407882850758
423745023748
232818221974
239267561221
300145976943
241698921414
314880200085
269055568056
330627028351
291141331103
353209905424
343159906092
392134628452
325243977231
351263376611
363671967652
357086316732
416505150501
367136316064
399588495085
394493220161
745756596772
414536808141
426103615457
441265921899
472085783195
584858827506
474517143388
592477466645
709373420246
510754489470
783641466565
560196899159
841653459452
616385308334
694423282703
719737197392
676507353842
886622591336
720758284384
724222632796
1071000574003
810998370662
1010962442963
794081715246
1208618523387
1150639342145
1001462821058
867369537356
915783065287
946602926583
985271632858
1034714042547
1176582207493
1070951388629
1127139797804
1587106734748
1812461191720
2502889800035
1292892662176
1370930636545
1396244551234
1607380875720
3537603842582
1444980917180
1795174021425
1938370111359
1661451252602
1709864780533
2086974377422
1950497107834
1813972463939
1783152602643
2105665431176
1986734453916
1931874559441
2019985675405
3108456766934
2198091186433
2788591050406
2420032459980
2689137213410
3769887056559
4136461297792
2663823298721
2767175187779
3003625426954
4056162539010
3106432169782
3228133519823
3371316033135
3733649710477
4887228399843
3493017383176
3597125066582
5109290858130
3803138278048
5917270733233
4125651106581
3951860234846
4129965745874
4218076861838
4618123646413
4861914485154
5109169673390
7773114156851
6156840681897
6840081880259
5430998486500
5667448725675
6500824898256
6110057596736
6721150902999
10235708703317
7446210381661
6968441099717
7090142449758
8743774752994
7296155661224
7400263344630
8912307951438
8665052763202
8077511341427
8081825980720
9079991346992
15069148633335
10374917543735
13510320941366
9971084158544
11219227270126
14917593221686
12271080366759
11098447212175
12168273623931
11777506322411
12610882494992
12831208499735
13689592002716
14058583549475
14264596760941
14368704444347
14386298110982
14696419005854
23369527578934
20231471844365
16159337322147
24824390085349
25915646275980
26997180605974
25678594565297
22242164525303
22802292658279
22139357782475
21069531370719
22317674482301
22875953534586
25442090994727
28961015766795
25836089871886
26042103083352
26300474497708
40284350720327
28755002555329
28323180310416
35334128131660
51203180292098
36525655893457
45044457183582
45193628016887
36390809166512
42074983598127
48153764354187
43311695896022
49176428032294
43208889153194
43387205853020
43871824028998
44457032264776
73989854226073
47759765477028
48318044529313
51278180866613
54591092427215
69044979025080
52342577581060
54623654808124
57078182865745
76082945787444
92610796618963
71724937298172
72916465059969
78465792764639
79599698319706
81584437183399
79702505062534
122092893092263
86596095006214
86520585049216
87080713182192
96799609845836
87259029882018
99080687072900"""
|
StarcoderdataPython
|
1698981
|
<gh_stars>0
import re
test=re.compile(
r'^'
r'(?!.*(\d)(-?\1){3})'
r'[456]'
r'\d{3}'
r'(?:-?\d{4}){3}'
r'$'
)
for _ in range(int(input())):
print("Valid" if test.search(input().strip()) else "Invalid")
|
StarcoderdataPython
|
8318
|
<reponame>richo/groundstation
from broadcast_ping import BroadcastPing
EVENT_TYPES = {
"PING": BroadcastPing,
}
class UnknownBroadcastEvent(Exception):
pass
def new_broadcast_event(data):
event_type, payload = data.split(" ", 1)
if event_type not in EVENT_TYPES:
raise UnknownBroadcastEvent(event_type)
return EVENT_TYPES[event_type](payload)
|
StarcoderdataPython
|
3314933
|
import os
import sys
from collections import *
from copy import deepcopy
from itertools import *
# change to dir of script
os.chdir(os.path.dirname(os.path.abspath(__file__)))
input_file = "input.txt"
if "s" in sys.argv:
input_file = "input_small.txt"
try:
with open(input_file) as f:
data = f.read() # entire file as string
lines = data.splitlines()
except:
print("no " + input_file)
data, lines = "", []
def ans(answer):
# store answer to clipboard
from distutils.spawn import find_executable
xclip_path = find_executable("xclip")
if xclip_path:
os.system(f'echo "{answer}"| {xclip_path} -selection clipboard -in')
print("\t", answer, "| in clipboard\n")
else:
print(f"\t {answer} | (answer)\n")
############### end of boilerplate ############################################
### PART 1 ###
def pos_moves(nums, goal):
"how far is each num from goal in total?"
moves = 0
for num in nums:
moves += abs(goal - num)
return moves
def part1(nums):
"check every possible position in the range of positions. take min"
return min([pos_moves(nums, m) for m in range(min(nums), max(nums) + 1)])
### PART 2 ###
def pos_moves2(nums, goal):
"moving now has a different cost to p (triangular number cost)"
def tri(n):
"triangular number"
return int((n * (n + 1)) / 2)
fuel = 0
for num in nums:
fuel += tri(abs(goal - num))
return fuel
def part2(nums):
"check every possible position in the range of positions. take min"
return min([pos_moves2(nums, m) for m in range(min(nums), max(nums) + 1)])
if __name__ == "__main__":
data = [int(i) for i in lines[0].split(",")]
ans(part1(data.copy())) # 342641
ans(part2(data.copy())) # 93006301
|
StarcoderdataPython
|
3373355
|
# Copyright 2021 The Data Text Grid Reader Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List
from _pytest.fixtures import FixtureRequest
from pytest import fixture
from sqlalchemy import create_engine
from sqlalchemy.engine.base import Connection
from sqlalchemy.engine.cursor import CursorResult
class DatabaseTestBase:
_db_connection = None
@staticmethod
def get_required_env_var(variable_name: str):
value = os.getenv(variable_name)
if not value:
raise ValueError(f'Required environment variable "{variable_name}" not found.')
return value
@fixture(autouse=True, scope="module")
def __before_and_after_all_tests(self, db_connection: Connection) -> None:
# Setup:
DatabaseTestBase._db_connection = db_connection
yield # Test functions will run at this point.
# Teardown:
@classmethod
def execute(cls, query: str) -> CursorResult:
return cls._db_connection.execute(query)
@classmethod
def execute_as_list(cls, query: str) -> List[dict]:
return [dict(row) for row in cls.execute(query)]
@fixture(scope="module")
def db_connection(self, request: FixtureRequest, db_connection_uri) -> Connection:
"""
Create a SQLAlchemy Connection fixture for testing.
:param request: pytest FixtureRequest object
:param db_connection_uri: A SQLAlchemy connection URI
:return: A SQLAlchemy Connection
"""
if not db_connection_uri:
raise ValueError(f'db_connection_uri is required.')
db_engine = create_engine(db_connection_uri)
connection = db_engine.connect()
request.addfinalizer(lambda: connection.close()) # Close connection after all tests have run
return connection
|
StarcoderdataPython
|
4840387
|
# -*- coding: utf-8 -*-
# 合并腾讯视频极速版离线TS文件
import os
import sys
import logging
import inspect
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from m3u8downloader.downloader import M3U8Downloader
if __name__ == '__main__':
LOG_LEVEL = logging.INFO
log = logging.getLogger()
log.setLevel(LOG_LEVEL)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(LOG_LEVEL)
formatter = logging.Formatter('%(asctime)s - %(module)s.%(funcName)s:%(lineno)d - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
if len(sys.argv) < 3:
print("python combine-ts.py hls video.ts")
else:
downloader = M3U8Downloader(log)
tsPath = sys.argv[1]
tsFilePath = sys.argv[2]
baseFolderName = tsPath.split("/")[-1]
tsFileList = []
moreFile = True
index = 0
while moreFile:
tsFolder = baseFolderName + "_" + str(index)
index = index + 30
tsFolder = tsFolder + "_" + str(index - 1)
tsFolder = os.path.join(tsPath, tsFolder)
if os.path.exists(tsFolder):
moreFile = True
for i in range(index - 30, index):
ts = os.path.join(tsFolder, str(i) + ".ts")
if os.path.exists(ts):
tsFileList.append(ts)
else:
moreFile = False
downloader.combineTS(tsFileList, tsFilePath)
|
StarcoderdataPython
|
1660663
|
import torch
from torch.nn import functional as F
def deep_gambler_loss(outputs, targets, reward):
outputs = F.softmax(outputs, dim=1)
outputs, reservation = outputs[:,:-1], outputs[:,-1]
# gain = torch.gather(outputs, dim=1, index=targets.unsqueeze(1)).squeeze()
gain = outputs[torch.arange(targets.shape[0]), targets]
doubling_rate = (gain.add(reservation.div(reward))).log()
return -doubling_rate.mean()
class SelfAdativeTraining():
def __init__(self, num_examples=50000, num_classes=10, mom=0.9):
self.prob_history = torch.zeros(num_examples, num_classes)
self.updated = torch.zeros(num_examples, dtype=torch.int)
self.mom = mom
self.num_classes = num_classes
def _update_prob(self, prob, index, y):
onehot = torch.zeros_like(prob)
onehot[torch.arange(y.shape[0]), y] = 1
prob_history = self.prob_history[index].clone().to(prob.device)
# if not inited, use onehot label to initialize runnning vector
cond = (self.updated[index] == 1).to(prob.device).unsqueeze(-1).expand_as(prob)
prob_mom = torch.where(cond, prob_history, onehot)
# momentum update
prob_mom = self.mom * prob_mom + (1 - self.mom) * prob
self.updated[index] = 1
self.prob_history[index] = prob_mom.to(self.prob_history.device)
return prob_mom
def __call__(self, logits, y, index):
prob = F.softmax(logits.detach()[:, :self.num_classes], dim=1)
prob = self._update_prob(prob, index, y)
soft_label = torch.zeros_like(logits)
soft_label[torch.arange(y.shape[0]), y] = prob[torch.arange(y.shape[0]), y]
soft_label[:, -1] = 1 - prob[torch.arange(y.shape[0]), y]
soft_label = F.normalize(soft_label, dim=1, p=1)
loss = torch.sum(-F.log_softmax(logits, dim=1) * soft_label, dim=1)
return torch.mean(loss)
|
StarcoderdataPython
|
3385326
|
<gh_stars>0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# pylint: disable=C0103,C0114
import sys
import tblob
with open(sys.argv[1], 'rb') as blob_file:
tparser = tblob.tblob()
blob = tparser.parse_blob(blob_file.read())
print(blob)
|
StarcoderdataPython
|
37048
|
class Calculador_de_impostos:
def realiza_calculo(self, orcamento, imposto):
imposto_calculado = imposto.calcula(orcamento)
print(imposto_calculado)
if __name__ == '__main__':
from orcamento import Orcamento, Item
from impostos import ISS, ICMS, ICPP, IKCV
orcamento = Orcamento()
orcamento.adiciona_item(Item('ITEM 1', 50))
orcamento.adiciona_item(Item('ITEM 2', 200))
orcamento.adiciona_item(Item('ITEM 3', 250))
calculador_de_impostos = Calculador_de_impostos()
print('ISS e ICMS')
calculador_de_impostos.realiza_calculo(orcamento, ICMS(ISS()))
print('ICPP e IKCV')
calculador_de_impostos.realiza_calculo(orcamento, IKCV(ICPP()))
|
StarcoderdataPython
|
1739229
|
<reponame>dannyvi/py-fly-compile-c<filename>compfly/parse/loader.py
"""Loader do the preceding part of constructing a parser(syntax analyzer).
It read rules from a .grammar definition file, and execute a analyze procedure.
The procedure of loading is below:
1. seperate file by --------------- seperate line into **definitions** and
**rules** part.
2. augment syntax by adding ``startsup -> start "$"`` with an "$" as the end.
3. get definition funcs in the **rule** part into namespace env.
4. strip comments from the **definitions** part.
5. separate the productions from **definition**.
6. get none terminals by reading the head of the productions.
7. get terminals and values by reading from the right part of the productions.
8. generate grammar list.
9. eliminating null productions.
The main function is ``load_grammar`` and it
returns a ``grammar list``, a ``symbol list``, and an ``env``.
"""
import re
from functools import reduce
from .atoms import Production, NTerm, Term, Value, Code, Null
def strip_comments(stream):
"""Strip comments, tail comments, but keep # in quotations."""
switch = '\'"'
quoted = False
quotation = None
triplet = 0
mulline = False
commented = False
code = ''
for num, i in enumerate(stream):
if triplet:
code += i
triplet -= 1
continue
if i in switch:
if not quoted:
quoted = True
quotation = i
if stream[num+1] == stream[num+2] == i:
triplet = 2
mulline = True
else:
if i == quotation:
if mulline:
if stream[num+1] == stream[num+2] == i:
triplet = 2
mulline = False
quoted = False
quotation = None
else:
quoted = False
quotation = None
elif i == '#':
if not quoted:
commented = True
elif i == '\n' and commented and not mulline:
commented = False
if commented:
code += ' '
else:
code += i
return code
def get_none_terminals(prod_str_list):
n_terms = []
spec = re.compile(r"""(?s)^\s*?(?P<Head>\w+) # head Nterm
.*? # any character
(?:(?P<Multi>(?:\|\s*{{.*?}}.*?{{.*?}}\s*)$)|
(?P<Nullable>(?:\|\s*|\|\s*{{.*?}}\s*)?$))""",
re.X)
for p in prod_str_list:
res = re.match(spec, p).groups()
nterm = NTerm(res[0], bool(res[2]))
if nterm not in n_terms:
n_terms.append(nterm)
else:
if nterm.nullable:
n_terms[n_terms.index(nterm)] = nterm
return n_terms
def decompose_prod(prod, n_terms, values, terms, codes):
"""Decompose productions and return productions, terms, values and codes.
Incrementally add productions, values, terms, and codes that are discovered
in the prod parameter.
:param prod: production with '|' which combined multi prods with one head.
:param n_terms: a complete none terminal list
:param values: the values list already occurs in the prev part.
:param terms: the terms list already occurs in the prev part.
:param codes: the codes list already occurs in the prev part.
:return: a list of (productions, null_prods, values, terms, codes)
"""
p = r"(?s)^\s*?(\w+)\s*(.*)$"
h_str, units = re.match(p, prod).groups()
head = n_terms[n_terms.index(NTerm(h_str))]
bodies = re.split(r"\|", units)
productions = []
null_prods = []
spec = [r"(?s){{\s*(?P<Code>.+?)\s*}}",
r"(?P<Spaces>\s+)",
r"(?P<quote>[\"'])(?P<Value>\S+)(?P=quote)",
r"(?P<Term_NTerm>\w+)",
]
pattern = "|".join(spec)
for body in bodies:
formlist = []
for symbol in re.finditer(pattern, body):
kind = symbol.lastgroup
value = symbol.group(kind)
if kind == "Value":
v = Value(value)
formlist.append(v)
if v not in values:
values.append(v)
elif kind == "Term_NTerm":
if NTerm(value) in n_terms:
t = n_terms[n_terms.index(NTerm(value))]
formlist.append(t)
else:
t = Term(value)
formlist.append(t)
if t not in terms:
terms.append(t)
elif kind == "Code":
t = Code(f"CD{str(len(codes))}", value)
formlist.append(t)
codes.append(t)
nullprod = Production.cons(t, (Null(),))
null_prods.append(nullprod)
production = Production.cons(head, tuple(formlist))
productions.append(production)
return productions, null_prods, values, terms, codes
def eliminate_null_production(grammar):
new_gram = []
grams = list(map(lambda p: p.remove_null(), grammar))
for i in grammar:
prods = i.remove_null()
for p in prods:
if p not in new_gram:
new_gram.append(p)
return new_gram
def substitue_ref(production):
p = production.production
total = len(p)
for num, atom in enumerate(p):
if isinstance(atom, Code):
startp = -num
format_t = [f'Stack[{i}]' for i in range(startp, 0)]
atom.source = atom.source.format(*format_t)
def load_grammar(grammar_file):
"""Read rules from grammar file, parse and return intermediates.
For example::
from compfly.parse.loader import load_grammar
gram_file = "a.grammar" # the file with correct path needed
grammar, symbols, env = load_grammar(gram_file)
:param grammar_file: grammar rules in \*.grammar file.
:return: a tuple contains a grammar list, a symbols list, and an env.
"""
grammar = []
env = {}
with open(grammar_file) as f:
# 1. seperate file by ----------------- seperate line
raw_grammar, definitions = re.split(r"(?m)^[\s-]+[-]+[\s-]+$", f.read())
# 2. augment syntax
aug_grammar = 'startsup :== start "$" \n' + raw_grammar
aug_definitions = definitions + '\n\ndef startsup(f):\n return f()'
# 3. get definition funcs into namespace
exec(aug_definitions, env)
# 4. strip comments
pure_grammar = strip_comments(aug_grammar)
# 5. get productions with | operators. alter to prefixed expression.
prefixed = re.sub(r"(?m)^(\w+)(\s*?)(:==)", r"\3\2\1", pure_grammar)
prods = [i for i in re.split(":==", prefixed) if i]
# 6. get none terminals list
n_terms = get_none_terminals(prods)
# 7. get terminals and terminal values list. And all symbols list
values, terms, codes, null_prod_list = [], [], [], []
# 8. generate grammar list, contains production rules.
for prod in prods:
result = decompose_prod(prod, n_terms, values, terms, codes)
p_list, n_list, values, terms, codes = result
null_prod_list.extend(n_list)
grammar.extend(p_list)
grammar.extend(null_prod_list)
symbols = [Null()] + values + terms + n_terms + codes
# 9. eliminate null productions.
new_grammar = eliminate_null_production(grammar)
for i in new_grammar:
substitue_ref(i)
return new_grammar, symbols, env
|
StarcoderdataPython
|
1642266
|
# Generated by Django 3.0.10 on 2020-09-11 12:00
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("admin_tools_stats", "0007_auto_20200205_1054"),
]
operations = [
migrations.AddField(
model_name="criteriatostatsm2m",
name="default_option",
field=models.CharField(
blank=True,
default="",
help_text="Works only with Chart filter criteri",
max_length=255,
verbose_name="Default filter criteria option",
),
),
migrations.AddField(
model_name="dashboardstats",
name="default_multiseries_criteria",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="default_choices_stats",
to="admin_tools_stats.CriteriaToStatsM2M",
),
),
]
|
StarcoderdataPython
|
4830664
|
<reponame>Atelis/DirectML<gh_stars>100-1000
#!/usr/bin/env python
# Copyright (c) Microsoft Corporation. All rights reserved.
import matplotlib.pyplot as plt
import json
import os
import re
import argparse
script_root = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument(
"--traces_dir", "-t",
default=os.path.join(script_root, "traces"),
help="Path to traces directory")
args = parser.parse_args()
# Parse trace summaries
trace_environments = {}
for trace_dir in os.listdir(args.traces_dir):
trace_dir_full = os.path.join(args.traces_dir, trace_dir)
if os.path.isdir(trace_dir_full):
trace_env = re.sub("(\w+)_(\d+)_(NCHW|NHWC).*", "\\1", trace_dir)
trace_batch_size = re.sub("(\w+)_(\d+)_(NCHW|NHWC).*", "\\2", trace_dir)
trace_data_format = re.sub("(\w+)_(\d+)_(NCHW|NHWC).*", "\\3", trace_dir)
trace_env_with_format = f"{trace_env}_{trace_data_format}"
if not(trace_env_with_format in trace_environments):
trace_environments[trace_env_with_format] = {"medians":[], "batch_sizes":[], "measurements":[]}
trace_summary_path = os.path.join(trace_dir_full, "summary.json")
with open(trace_summary_path, "r") as f:
trace_summary = json.load(f)
trace_environments[trace_env_with_format]["batch_sizes"].append(int(trace_batch_size))
trace_environments[trace_env_with_format]["medians"].append(float(trace_summary["median"]))
trace_environments[trace_env_with_format]["measurements"].append(trace_summary["measurements"])
# Plot median times
for trace_environment_name in trace_environments:
trace_environment = trace_environments[trace_environment_name]
medians, batch_sizes = zip(*sorted(zip(trace_environment["medians"], trace_environment["batch_sizes"])))
line, = plt.plot(batch_sizes, medians)
line.set_label(trace_environment_name)
print(trace_environment_name)
print(medians)
plt.xlabel("Batch Size")
plt.ylabel("ms")
plt.title("Training Step Time")
plt.legend()
plt.show()
|
StarcoderdataPython
|
186212
|
"""
test_tools: tests function in denovo.tools
<NAME> <<EMAIL>>
Copyright 2020-2021, <NAME>
License: Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0)
ToDo:
Add tests for the remaining functions in tools.
"""
import sys
from typing import (Any, Callable, ClassVar, Dict, Hashable, Iterable, Mapping,
MutableMapping, MutableSequence, Optional, Sequence, Type,
Union)
import denovo
@dataclasses.dataclass
class TestClass(object):
a_dict: Dict = dataclasses.field(default_factory = dict)
a_list: Dict = dataclasses.field(default_factory = list)
a_none: None = None
def test_how_soon_is_now():
current_datetime = denovo.tools.how_soon_is_now()
assert isinstance(current_datetime, str)
return
def test_instancify():
an_instance = TestClass()
result = denovo.tools.instancify(item = TestClass, a_list = ['a'])
assert result.a_list == ['a']
result = denovo.tools.instancify(item = an_instance, a_list = ['a'])
assert result.a_list == ['a']
return
def test_listify():
some_list = ['a', 'b', 'c']
another_list = ['d']
a_string = 'tree'
an_int = 4
result = denovo.convert.listify(item = some_list)
assert result == ['a', 'b', 'c']
result = denovo.convert.listify(item = another_list)
assert result == ['d']
result = denovo.convert.listify(item = a_string)
assert result == ['tree']
result = denovo.convert.listify(item = an_int)
assert result == [4]
return
def test_namify():
an_instance = TestClass()
result = denovo.unit.get_name(item = an_instance)
assert result == 'test_class'
an_instance.name = 'huh'
result = denovo.unit.get_name(item = an_instance)
assert result == 'huh'
return
def test_numify():
a_string = '5'
an_int = 4
another_string = 'tree'
result = denovo.tools.numify(item = a_string)
assert result == 5
result = denovo.tools.numify(item = an_int)
assert result == 4
result = denovo.tools.numify(item = another_string)
assert result == 'tree'
return
if __name__ == '__main__':
testables = denovo.test.get_testables(module = denovo.tools)
denovo.test.run_tests(testables = testables,
module = sys.modules[__name__])
|
StarcoderdataPython
|
131155
|
import re
import copy
import pickle
import numpy as np
from collections import OrderedDict
import torch
from torch.autograd import Variable
import global_variables as g
def save_checkpoint(state, filename='./checkpoints/checkpoint.pth.tar'):
print('save model!', filename)
torch.save(state, filename)
def save_pickle(d, path):
print('save pickle to', path)
with open(path, mode='wb') as f:
pickle.dump(d, f)
def load_pickle(path):
print('load', path)
with open(path, mode='rb') as f:
return pickle.load(f)
def get_entities(fpath):
entities = OrderedDict({'R_cuisine': [], 'R_location': [], 'R_price': [], 'R_number': []})
with open(fpath, 'r') as file:
lines = file.readlines()
for l in lines:
wds = l.rstrip().split(' ')[2].split('\t')
slot_type = wds[0] # ex) R_price
slot_val = wds[1] # ex) cheap
# if slot_type not in entities:
# entities[slot_type] = []
if slot_type in entities:
if slot_val not in entities[slot_type]:
entities[slot_type].append(slot_val)
return entities
def load_embd_weights(word2vec, vocab_size, embd_size, w2i):
embedding_matrix = np.zeros((vocab_size, embd_size))
print('embed_matrix.shape', embedding_matrix.shape)
found_ct = 0
for word, idx in w2i.items():
# words not found in embedding index will be all-zeros.
if word in word2vec.wv:
embedding_matrix[idx] = word2vec.wv[word]
found_ct += 1
print(found_ct, 'words are found in word2vec. vocab_size is', vocab_size)
return torch.from_numpy(embedding_matrix).type(torch.FloatTensor)
def preload(fpath, vocab, system_acts):
with open(fpath, 'r') as f:
lines = f.readlines()
for idx, l in enumerate(lines):
l = l.rstrip()
if l != '':
ls = l.split("\t")
t_u = ls[0].split(' ', 1)
# turn = t_u[0]
uttr = t_u[1].split(' ')
if len(ls) == 2: # includes user and system utterance
for w in uttr:
if w not in vocab:
vocab.append(w)
if len(ls) == 2: # includes user and system utterance
sys_act = ls[1]
sys_act = re.sub(r'resto_\S+', '', sys_act)
if sys_act.startswith('api_call'): sys_act = 'api_call'
if sys_act not in system_acts: system_acts.append(sys_act)
vocab = sorted(vocab)
system_acts = sorted(system_acts)
return vocab, system_acts
def load_data(fpath, entities, w2i, system_acts):
'''
store data as dialog (multi turns)
'''
data = []
with open(fpath, 'r') as f:
lines = f.readlines()
# x: user uttr, y: sys act, c: context, b: BoW, p: previous sys act, f: action filter
x, y, c, b, p, f = [], [], [], [], [], []
context = [0] * len(entities.keys())
for idx, l in enumerate(lines):
l = l.rstrip()
if l == '':
data.append((x, y, c, b, p, f))
# reset
x, y, c, b, p, f = [], [], [], [], [], []
context = [0] * len(entities.keys())
else:
ls = l.split("\t")
t_u = ls[0].split(' ', 1)
# turn = t_u[0]
uttr = t_u[1].split(' ')
update_context(context, uttr, entities)
act_filter = generate_act_filter(len(system_acts), context)
bow = get_bow(uttr, w2i)
sys_act = g.SILENT
if len(ls) == 2: # includes user and system utterance
sys_act = ls[1]
sys_act = re.sub(r'resto_\S+', '', sys_act)
if sys_act.startswith('api_call'): sys_act = 'api_call'
else:
continue # TODO
x.append(uttr)
if len(y) == 0:
p.append(g.SILENT)
else:
p.append(y[-1])
y.append(sys_act)
c.append(copy.deepcopy(context))
b.append(bow)
f.append(act_filter)
return data, system_acts
def update_context(context, sentence, entities):
for idx, (ent_key, ent_vals) in enumerate(entities.items()):
for w in sentence:
if w in ent_vals:
context[idx] = 1
def generate_act_filter(action_size, context):
mask = [0] * action_size
# TODO hard coding
# 0 <SILENT>
# 1 any preference on a type of cuisine
# 2 api_call
# 3 great let me do the reservation
# 4 hello what can i help you with today
# 5 here it is
# 6 how many people would be in your party
# 7 i'm on it
# 8 is there anything i can help you with
# 9 ok let me look into some options for you
# 10 sure is there anything else to update
# 11 sure let me find an other option for you
# 12 what do you think of this option:
# 13 where should it be
# 14 which price range are looking for
# 15 you're welcome
# context: {'R_cuisine': [], 'R_location': [], 'R_price': [], 'R_number': []}
mask[0] = 1
mask[7] = 1
mask[8] = 1
if context == [0, 0, 0, 0]:
mask[4] = 1
if context == [1, 1, 1, 1]:
mask[2] = 1
mask[3] = 1
mask[5] = 1
mask[8] = 1
mask[9] = 1
mask[10] = 1
mask[11] = 1
mask[12] = 1
mask[15] = 1
if context[0] == 0: # R_cuisine
mask[1] = 1
if context[1] == 0: # R_location
mask[13] = 1
if context[2] == 0: # R_price
mask[14] = 1
if context[3] == 0: # R_number
mask[6] = 1
return mask
def get_bow(sentence, w2i):
bow = [0] * len(w2i)
for word in sentence:
if word in w2i:
bow[w2i[word]] += 1
return bow
def add_padding(data, seq_len):
pad_len = max(0, seq_len - len(data))
data += [0] * pad_len
data = data[:seq_len]
return data
def make_word_vector(uttrs_list, w2i, dialog_maxlen, uttr_maxlen):
dialog_list = []
for uttrs in uttrs_list:
dialog = []
for sentence in uttrs:
sent_vec = [w2i[w] if w in w2i else w2i[g.UNK] for w in sentence]
sent_vec = add_padding(sent_vec, uttr_maxlen)
dialog.append(sent_vec)
for _ in range(dialog_maxlen - len(dialog)):
dialog.append([0] * uttr_maxlen)
dialog = torch.LongTensor(dialog[:dialog_maxlen])
dialog_list.append(dialog)
return to_var(torch.stack(dialog_list, 0))
def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def padding(data, default_val, maxlen, pad_seq_len):
for i, d in enumerate(data):
pad_len = maxlen - len(d)
for _ in range(pad_len):
data[i].append([default_val] * pad_seq_len)
return to_var(torch.FloatTensor(data))
def get_data_from_batch(batch, w2i, act2i):
uttrs_list = [d[0] for d in batch]
dialog_maxlen = max([len(uttrs) for uttrs in uttrs_list])
uttr_maxlen = max([len(u) for uttrs in uttrs_list for u in uttrs])
uttr_var = make_word_vector(uttrs_list, w2i, dialog_maxlen, uttr_maxlen)
batch_labels = [d[1] for d in batch]
labels_var = []
for labels in batch_labels:
vec_labels = [act2i[l] for l in labels]
pad_len = dialog_maxlen - len(labels)
for _ in range(pad_len):
vec_labels.append(act2i[g.SILENT])
labels_var.append(torch.LongTensor(vec_labels))
labels_var = to_var(torch.stack(labels_var, 0))
batch_prev_acts = [d[4] for d in batch]
prev_var = []
for prev_acts in batch_prev_acts:
vec_prev_acts = []
for act in prev_acts:
tmp = [0] * len(act2i)
tmp[act2i[act]] = 1
vec_prev_acts.append(tmp)
pad_len = dialog_maxlen - len(prev_acts)
for _ in range(pad_len):
vec_prev_acts.append([0] * len(act2i))
prev_var.append(torch.FloatTensor(vec_prev_acts))
prev_var = to_var(torch.stack(prev_var, 0))
context = copy.deepcopy([d[2] for d in batch])
context = padding(context, 1, dialog_maxlen, len(context[0][0]))
bow = copy.deepcopy([d[3] for d in batch])
bow = padding(bow, 0, dialog_maxlen, len(bow[0][0]))
act_filter = copy.deepcopy([d[5] for d in batch])
act_filter = padding(act_filter, 0, dialog_maxlen, len(act_filter[0][0]))
return uttr_var, labels_var, context, bow, prev_var, act_filter
|
StarcoderdataPython
|
3245475
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from data_structures.linked_list import BaseNode, InsertPositions, LinkedList, SearchPositions
class Node(BaseNode):
"""Class implementing a `node` in a singly linked list
Each node contains following parts:
- value
- a pointer to the next node
"""
pass
class SinglyLinkedList(LinkedList):
"""Singly linked list that can only be traversed in a forward direction.
A singly linked list could also be circularly linked where the last node
points to the head node as it's next node.
"""
circular = False #: boolean flag indicating whether the linked list is circular
def make_circular(self):
"""Method to circularly link a singly linked list.
:return:
"""
if not self.is_empty() and self.get_tail():
self.get_tail().set_next(self.get_head())
def _insert_before(self, data, reference_value):
"""Method to insert a node before the node with the `reference_value` in the linked list.
This implementation of insert has a worst case complexity O(n). This is because the insert
method has to first seek the position of the reference value before performing an insert. At
best, the reference value is the head of the list and at worst it's the last item.
:param data: data item to be inserted into the linked list
:param reference_value: reference value for inserting between nodes
:return:
"""
new_node = Node(data=data)
#: if linked list is empty, reference_value won't exist hence initialize with node
if self.is_empty():
return self.initialize(node=new_node)
node_before = self.search(data=reference_value, position=SearchPositions.BEFORE)
if node_before:
#: if node with reference_value is found, set new_node as it's next node
#: no need to reset list tail as new node is inserted before another node
#: because even in the event that that node was the tail, it will not be
#: displaced from it's position
new_node.set_next(node_before.get_next())
node_before.set_next(new_node)
else:
#: if node with reference_value is not found, default to inserting to the front of the
#: linked list as it's a constant time operation
self.insert(data, position=InsertPositions.BEGINNING)
def _insert_after(self, data, reference_value):
"""Method to insert a node after the node with the `reference_value` in the linked list.
This implementation of insert has a worst case complexity O(n). This is because the insert
method has to first seek the position of the reference value before performing an insert.
At best, the reference value is the head of the list and at worst it's the last item.
:param data: data item to be inserted into the linked list
:param reference_value: reference value for inserting between nodes
:return:
"""
new_node = Node(data=data)
#: if linked list is empty, reference_value won't exist hence initialize with node
if self.is_empty():
return self.initialize(node=new_node)
reference_node = self.search(data=reference_value)
if reference_node:
#: if node with reference_value is found, set new_node as it's next node
new_node.set_next(reference_node.get_next())
reference_node.set_next(new_node)
#: we need to reset the list's tail as new node is inserted after the reference
#: node which is displaced from it's position as the last node in the list
if reference_node is self.get_tail():
self.set_tail(new_node)
else:
#: if node with reference_value is not found, default to inserting to the front of the
#: linked list as it's a constant time operation
self.insert(data, position=InsertPositions.BEGINNING)
def _insert_beginning(self, data, reference_value=None):
"""Method to insert a node at the beginning of the linked list
This implementation of insert is constant O(1) (efficient!). This is because the insert
method, no matter what, will always take the same amount of time: it can only take one data
point, it can only ever create one node, and the new node does not need to interact with all
the other nodes in the list, the inserted node will only ever interact with the head.
:param data: data item to be inserted into the linked list
:param reference_value: reference value for inserting between nodes
:return:
"""
new_node = Node(data=data)
new_node.set_next(self.get_head())
self.set_head(new_node)
#: set the tail of the list if it was an empty list
if not self.get_tail() or not self.get_head().get_next():
self.set_tail(new_node)
def _insert_end(self, data, reference_value=None):
"""Method to insert a node at the end of the linked list.
This implementation of insert has a complexity O(n). This is because the insert method has
to first seek the last node before performing an insert which would involve traversing the
size of linked list.
:param data: data item to be inserted into the linked list
:param reference_value: reference value for inserting between nodes
:return:
"""
new_node = Node(data=data)
#: if linked list is empty, reference_value won't exist hence initialize with node
if self.is_empty():
return self.initialize(node=new_node)
self.get_tail().set_next(new_node)
#: set new node as the tail of the list
self.set_tail(new_node)
def insert(self, data, position=InsertPositions.BEGINNING, reference_value=None):
"""Method to positionally insert a node into the linked list.
A node can be inserted at the beginning, end, after or before a particular node.
The default behaviour is to insert at the beginning because that implementation is of
constant time complexity O(1).
:param data: data item to be inserted into the linked list
:param position: where in the linked list to do the insert i.e beginning, end, after, before
:param reference_value: reference value for inserting between nodes i.e. after or before
:return:
"""
getattr(self, "_insert_{}".format(position.lower()), self._insert_beginning)(
data, reference_value
)
#: since this is the entry point for all inserts; push, append, insert before and after,
#: we want to control the list's circular property on insert from this point by setting
#: the tail node's next pointer to the head node after every insert operation
if self.circular:
self.make_circular()
def push(self, data):
"""Method to insert a node at the beginning of a linked list; hence the semantics.
:param data: data item to be inserted into the linked list
:return:
"""
self.insert(data, position=InsertPositions.BEGINNING)
def append(self, data):
"""Method to insert a node at the end of a linked list; hence the semantics.
:param data: data item to be inserted into the linked list
:return:
"""
self.insert(data, position=InsertPositions.END)
def _node_before(self, data):
"""Method to traverse through a linked list whilst looking for the node with the data item
then determine the node pointing to it i.e node before it.
Search is linear beginning at the head of the linked list thus in the event a data item
appears more than once in the list, the first encountered is referenced.
Search is actually very similar to size, but instead of traversing the whole list of nodes
it checks at each stop to see whether the current node has the requested data.
The time complexity of search is O(n) in the worst case.
:param data: item to look for in the linked list
:return: node after the node holding the data item
"""
head = self.get_head()
previous, current, found, result = None, head, False, None
#: while taking care to ensure that we are not circling back
#: in a circular linked list hence iterating infinitely
while not found and current:
if current.get_data() == data:
result, found = previous, True
break
previous, current = current, current.get_next()
#: how we know we have circled back in a circular linked list
if not found and current is head:
current = None
return result
def _node_with(self, data):
"""Method to traverse through a linked list whilst looking for an item.
Search is linear beginning at the head of the linked list thus in the event a data item
appears more than once in the list, the first encountered is referenced.
Search is actually very similar to size, but instead of traversing the whole list of nodes
it checks at each stop to see whether the current node has the requested data.
The time complexity of search is O(n) in the worst case.
:param data: item to look for in the linked list
:return: node holding the data item
"""
head = self.get_head()
current, found, result = head, False, None
#: while taking care to ensure that we are not circling back
#: in a circular linked list hence iterating infinitely
while not found and current:
if current.get_data() == data:
result, found = current, True
break
previous, current = current, current.get_next()
#: how we know we have circled back in a circular linked list
if not found and current is head:
current = None
return result
def _node_after(self, data):
"""Method to traverse through a linked list whilst looking for the node with the data item
then determine the node it points to i.e node after it.
Search is linear beginning at the head of the linked list thus in the event a data item
appears more than once in the list, the first encountered is referenced.
Search is actually very similar to size, but instead of traversing the whole list of nodes
it checks at each stop to see whether the current node has the requested data.
The time complexity of search is O(n) in the worst case.
:param data: item to look for in the linked list
:return: node after the node holding the data item
"""
current = self._node_with(data)
next_node = None
if current:
next_node = current.get_next()
return next_node
def search(self, data, position=SearchPositions.CURRENT):
"""Method to traverse through a linked list whilst looking for an item.
This method returns either the node holding the data item, the node before or the node after
depending on the configuration chosen. It defaults to returning the node holding the item.
Search is linear beginning at the head of the linked list thus in the event a data item
appears more than once in the list, the first encountered is referenced.
Search is actually very similar to size, but instead of traversing the whole list of nodes
it checks at each stop to see whether the current node has the requested data.
The time complexity of search is O(n) in the worst case.
:param data: item to look for in the linked list
:param position: node relative to the node holding data item to return
:return: node holding the data item
"""
return getattr(self, "_node_{}".format(position.lower()), self._node_with)(data)
def delete(self, data):
"""Method to traverse through a linked list whilst looking for an item to delete it.
Delete is very similar to search. The delete method traverses the list in the same way that
search does, but in addition to keeping track of the current node, the delete method also
remembers the last node it visited. When delete finally arrives at the node it wants to
delete, it simply removes that node from the chain by “leap frogging” it. This mean that
when the delete method reaches the node it wants to delete, it looks at the last node it
visited (the ‘previous’ node), and resets that previous node’s pointer so that, rather than
pointing to the soon-to-be-deleted node, it will point to the next node in line. Since no
nodes are pointing to the poor node that is being deleted, it is effectively removed from
the list!
The time complexity for delete is also O(n), because in the worst case it will visit every
node, interacting with each node a fixed number of times.
:param data: item to delete from the linked list
:return:
"""
head = self.get_head()
previous, current, found, result = None, head, False, None
#: while taking care to ensure that we are not circling back
#: in a circular linked list hence iterating infinitely
while not found and current:
if current.get_data() == data:
result, found = current, True
break
previous, current = current, current.get_next()
#: how we know we have circled back in a circular linked list
if not found and current is head:
previous = current = None
#: can't delete a node if it does not exist
if not found:
return
next_node = result.get_next()
if result == next_node:
self.set_head(None)
self.set_tail(None)
return
if previous:
previous.set_next(next_node)
if not previous:
self.set_head(next_node)
#: if we have just deleted the last node
if not next_node:
self.set_tail(previous)
#: since this is the entry point for all delete operations we want to control the list's
#: circular property on deletion from this point by setting the tail node's next pointer
# to the head node after every delete operation
if self.circular:
self.make_circular()
|
StarcoderdataPython
|
1687800
|
import logging
from io import StringIO
from typing import Union
from asyncio import TimeoutError
import discord
from discord.ext import commands
from bot import constants
from bot.cogs.utils.embed_handler import authored, failure, success, info
from bot.cogs.utils.checks import check_if_it_is_tortoise_guild
from bot.cogs.utils.message_logger import MessageLogger
logger = logging.getLogger(__name__)
class UnsupportedFileExtension(Exception):
pass
class UnsupportedFileEncoding(ValueError):
pass
class TortoiseDM(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Key is user id value is mod/admin id
self.active_mod_mails = {}
self.pending_mod_mails = set()
self.active_event_submissions = set()
self.active_bug_reports = set()
self.active_suggestions = set()
# Keys are custom emoji IDs, sub-dict message is the message appearing in the bot DM
# and callable is the method to call when that option is selected.
self._options = {
constants.mod_mail_emoji_id: {
"message": "Contact staff (mod mail)",
"callable": self.create_mod_mail
},
constants.event_emoji_id: {
"message": "Event submission",
"callable": self.create_event_submission
},
constants.bug_emoji_id: {
"message": "Bug report",
"callable": self.create_bug_report
},
constants.suggestions_emoji_id: {
"message": "Make a suggestion",
"callable": self.create_suggestion
}
}
# User IDs for which the trigger_typing() is active, so we don't spam the method.
self._typing_active = set()
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
if payload.guild_id is not None:
return # Only allow in DMs
user_id = payload.user_id
if user_id == self.bot.user.id:
return # Ignore the bot
elif self.is_any_session_active(user_id):
return
for emoji_id, sub_dict in self._options.items():
emoji = self.bot.get_emoji(emoji_id)
if emoji == payload.emoji:
user = self.bot.get_user(user_id)
await sub_dict["callable"](user)
break
@commands.Cog.listener()
async def on_message(self, message):
if message.author == self.bot.user:
return
elif message.guild is not None:
return # Functionality only active in DMs
if self.is_any_session_active(message.author.id):
return
else:
await self.send_dm_options(output=message.author)
@commands.Cog.listener()
async def on_typing(self, channel, user, _when):
if not isinstance(channel, discord.DMChannel):
return
elif not self.is_any_session_active(user.id):
return
elif user.id in self._typing_active:
return
destination_id = self.active_mod_mails.get(user.id)
if destination_id is None:
# If it's None there is no user with that ID that has opened mod mail request.
# However we can still have the mod/admin that could be attending mod mail
destination_id = self._get_dict_key_by_value(user.id)
if destination_id is None:
# If it's again None then there is no such ID in either user nor mods/admins
return
self._typing_active.add(user.id)
destination_user = self.bot.get_user(destination_id)
# Per docs: Active for 10s or until first message
await destination_user.trigger_typing()
self._typing_active.remove(user.id)
def _get_dict_key_by_value(self, value: int) -> int:
for key, v in self.active_mod_mails.items():
if v == value:
return key
async def send_dm_options(self, *, output):
tortoise_guild = self.bot.get_guild(constants.tortoise_guild_id)
emoji_map = {self.bot.get_emoji(emoji_id): sub_dict['message'] for emoji_id, sub_dict in self._options.items()}
msg_options = "\n\n".join(f"{emoji} {message}" for emoji, message in emoji_map.items())
embed = discord.Embed(description=msg_options)
embed.set_footer(text=f"Tortoise Community{constants.embed_space * 100}")
embed.set_thumbnail(url=str(tortoise_guild.icon_url))
msg = await output.send(embed=embed)
for emoji in emoji_map.keys():
if emoji is None:
logger.warning("Sending DM options failed as emoji is not found.")
return
else:
await msg.add_reaction(emoji)
def is_any_session_active(self, user_id: int) -> bool:
# If the mod mail or anything else is active don't clutter the active session
return any(
user_id in active for active in (
self.active_mod_mails.keys(),
self.active_mod_mails.values(),
self.active_event_submissions,
self.active_bug_reports,
self.active_suggestions
)
)
async def create_mod_mail(self, user: discord.User):
if user.id in self.pending_mod_mails:
await user.send(embed=failure("You already have a pending mod mail, please be patient."))
return
mod_mail_report_channel = self.bot.get_channel(constants.mod_mail_report_channel_id)
submission_embed = authored(f"`{user.id}` submitted for mod mail.", author=user)
await mod_mail_report_channel.send(embed=submission_embed)
self.pending_mod_mails.add(user.id)
await user.send(embed=success("Mod mail was sent to admins, please wait for one of the admins to accept."))
async def create_event_submission(self, user: discord.User):
user_reply = await self._get_user_reply(self.active_event_submissions, user)
if user_reply is None:
return
code_submissions_channel = self.bot.get_channel(constants.code_submissions_channel_id)
await code_submissions_channel.send(
f"User `{user}` ID:{user.id} submitted code submission: "
f"{user_reply}"
)
await user.send(embed=success("Event submission successfully submitted."))
self.active_event_submissions.remove(user.id)
async def create_bug_report(self, user: discord.User):
user_reply = await self._get_user_reply(self.active_bug_reports, user)
if user_reply is None:
return
bug_report_channel = self.bot.get_channel(constants.bug_reports_channel_id)
await bug_report_channel.send(f"User `{user}` ID:{user.id} submitted bug report: {user_reply}")
await user.send(embed=success("Bug report successfully submitted, thank you."))
self.active_bug_reports.remove(user.id)
async def create_suggestion(self, user: discord.User):
user_reply = await self._get_user_reply(self.active_suggestions, user)
if user_reply is None:
return
user_suggestions_channel = self.bot.get_channel(constants.suggestions_channel_id)
await user_suggestions_channel.send(f"User `{user}` ID:{user.id} submitted suggestion: {user_reply}")
await user.send(embed=success("Suggestion successfully submitted, thank you."))
self.active_suggestions.remove(user.id)
async def _get_user_reply(self, container: set, user: discord.User) -> Union[str, None]:
"""
Helper method to get user reply, only deals with errors.
Uses self._wait_for method so it can get both the user message reply and text from attachment file.
:param container: set, container holding active user sessions by having their IDs in it.
:param user: Discord user to wait reply from
:return: Union[str, None] string representing user reply, can be None representing invalid reply.
"""
user_reply = await self._wait_for(container, user)
if user_reply is None:
return None
try:
possible_attachment = await self.get_message_txt_attachment(user_reply)
except (UnsupportedFileExtension, UnsupportedFileEncoding) as e:
await user.send(embed=failure(f"Error: {e} , canceling."))
container.remove(user.id)
return
user_reply_content = user_reply.content if possible_attachment is None else possible_attachment
if len(user_reply_content) < 10:
await user.send(embed=failure("Too short - seems invalid, canceling."))
container.remove(user.id)
return None
else:
return user_reply_content
async def _wait_for(self, container: set, user: discord.User) -> Union[discord.Message, None]:
"""
Simple custom wait_for that waits for user reply for 5 minutes and has ability to cancel the wait,
deal with errors and deal with containers (which mark users that are currently doing something aka
event submission/bug report etc).
:param container: set, container holding active user sessions by having their IDs in it.
:param user: Discord user to wait reply from
:return: Union[Message, None] message representing user reply, can be none representing invalid reply.
"""
def check(msg):
return msg.guild is None and msg.author == user
container.add(user.id)
await user.send(
embed=info(
"Reply with single message, link to paste service or uploading utf-8 `.txt` file.\n"
"You have 5m, type `cancel` to cancel right away.", user
)
)
try:
user_reply = await self.bot.wait_for("message", check=check, timeout=300)
except TimeoutError:
await user.send(embed=failure("You took too long to reply."))
container.remove(user.id)
return
if user_reply.content.lower() == "cancel":
await user.send(embed=success("Successfully canceled."))
container.remove(user.id)
return
return user_reply
@classmethod
async def get_message_txt_attachment(cls, message: discord.Message) -> Union[str, None]:
"""
Only supports .txt file attachments and only utf-8 encoding supported.
:param message: message object to extract attachment from.
:return: Union[str, None]
:raise UnsupportedFileExtension: If file type is other than .txt
:raise UnicodeDecodeError: If decoding the file fails
"""
try:
attachment = message.attachments[0]
except IndexError:
return None
if not attachment.filename.endswith(".txt"):
raise UnsupportedFileExtension("Only `.txt` files supported")
try:
content = (await attachment.read()).decode("utf-8")
except UnicodeDecodeError:
raise UnsupportedFileEncoding("Unsupported file encoding, please only use utf-8")
return content
@commands.command()
@commands.has_permissions(administrator=True)
@commands.check(check_if_it_is_tortoise_guild)
async def attend(self, ctx, user_id: int):
# Time to wait for FIRST USER reply. Useful if mod attends but user is away.
first_timeout = 10_800
# Flag for above variable. False means there has been no messages from the user.
first_timeout_flag = False
# After the user sends first reply this is the timeout we use.
regular_timeout = 600
user = self.bot.get_user(user_id)
mod = ctx.author
# Keep a log of all messages in mod-mail
log = MessageLogger(mod.id, user.id)
mod_mail_report_channel = self.bot.get_channel(constants.mod_mail_report_channel_id)
if user is None:
await ctx.send(embed=failure("That user cannot be found or you entered incorrect ID."))
return
elif user_id not in self.pending_mod_mails:
await ctx.send(embed=failure("That user is not registered for mod mail."))
return
elif self.is_any_session_active(mod.id):
await ctx.send(embed=failure("You already have one of active sessions (reports/mod mail etc)."))
return
self.pending_mod_mails.remove(user_id)
self.active_mod_mails[user_id] = mod.id
await user.send(
embed=authored(
(
"has accepted your mod mail request.\n"
"Reply here in DMs to chat with them.\n"
"This mod mail will be logged, by continuing you agree to that.\n"
"Type `close` to close this mod mail."
),
author=mod
)
)
await mod.send(
embed=success(
f"You have accepted `{user}` mod mail request.\n"
"Reply here in DMs to chat with them.\n"
"This mod mail will be logged.\n"
"Type `close` to close this mod mail."
)
)
await ctx.send(embed=success("Mod mail initialized, check your DMs."), delete_after=10)
def mod_mail_check(msg):
return msg.guild is None and msg.author.id in (user_id, mod.id)
_timeout = first_timeout
while True:
try:
mail_msg = await self.bot.wait_for("message", check=mod_mail_check, timeout=_timeout)
log.add_message(mail_msg)
except TimeoutError:
timeout_embed = failure("Mod mail closed due to inactivity.")
log.add_embed(timeout_embed)
await mod.send(embed=timeout_embed)
await user.send(embed=timeout_embed)
del self.active_mod_mails[user_id]
await mod_mail_report_channel.send(file=discord.File(StringIO(str(log)), filename=log.filename))
break
# Deal with dynamic timeout.
if mail_msg.author == user and not first_timeout_flag:
first_timeout_flag = True
_timeout = regular_timeout
# Deal with canceling mod mail
if mail_msg.content.lower() == "close":
close_embed = success(f"Mod mail successfully closed by {mail_msg.author}.")
log.add_embed(close_embed)
await mod.send(embed=close_embed)
await user.send(embed=close_embed)
del self.active_mod_mails[user_id]
await mod_mail_report_channel.send(file=discord.File(StringIO(str(log)), filename=log.filename))
break
# Deal with user-mod communication
if mail_msg.author == user:
await mod.send(mail_msg.content)
elif mail_msg.author == mod:
await user.send(mail_msg.content)
def setup(bot):
bot.add_cog(TortoiseDM(bot))
|
StarcoderdataPython
|
3245662
|
<reponame>neo2100/BioNIR_Pipeline
# BioNIR Pipline class
# Pipline order:
# 1- documentRetrieval (PubMed APIs or BM25+DB)
# 2- preprocessing: (a- sentence splitting b- co-reference resolution 3- Abbreviation resolution 4- sentence simplification)
# 3- embedding (SBERT or BioASQ SBERT)
# 4- pooling (MEAN, MAX, or CLS, hyper parameters)
# 5- ranking (vectorSimilarity)
# define defferent piplines and connect them to evaluators and run tests.# Input: retrievalType (LocalBM25 or PubMed), query, and max number of documents
# Input: pipe types, query and max number of documents and ranked snippets
# Output: a list of ranked documents containing:
## id, directLink, type, text, rank, score
from .documentRetrieval.documentRetrieval import DocumentRetrieval
from .preprocessing.preprocessing import Preprocessing
from .embedding.embedding import Embedding
from .ranking.ranking import Ranking
from .utility.utility import Utility
class Pipe:
def __init__(self, pipeName, parameters):
pipeType = pipeName.split(" as ")
if len(pipeType) < 2:
return print("ERROR: please use standard pipe names. eg., PubMed as documentRetrieval")
if pipeType[1] == "documentRetrieval":
self.pipe = DocumentRetrieval(pipeType[0], parameters)
elif pipeType[1] == "preprocessing":
self.pipe = Preprocessing(pipeType[0], parameters)
elif pipeType[1] == "embedding":
self.pipe = Embedding(pipeType[0], parameters)
elif pipeType[1] == "ranking":
self.pipe = Ranking(pipeType[0], parameters)
elif pipeType[1] == "utility":
self.pipe = Utility(pipeType[0], parameters)
def execute(self, input):
return self.pipe.execute(input)
|
StarcoderdataPython
|
1688114
|
"""Custom methods for device entities."""
from six.moves.urllib.parse import urlparse
from io import StringIO
def download_report_file(self, absolute_url):
"""Download a report file.
:param self: Instance of the entity for which this is being called.
:type self: mbed_cloud.foundation.DeviceEnrollmentBulkCreate or mbed_cloud.foundation.DeviceEnrollmentBulkDelete
:param absolute_url: URL of the report file in Pelion Device Management.
:type absolute_url: str
:rtype: StringIO - contents of the downloaded file.
"""
if absolute_url:
relative_path = urlparse(absolute_url).path
response = self._client.call_api(method="get", path=relative_path)
return StringIO(response.text)
else:
# If the URL not available return an empty stream
return StringIO()
def download_full_report_file(self, foreign_key):
"""Download a full report file.
:param self: Instance of the entity for which this is a custom method.
:type self: mbed_cloud.foundation.DeviceEnrollmentBulkCreate or mbed_cloud.foundation.DeviceEnrollmentBulkDelete
:param foreign_key: Class name.
:rtype: StringIO - contents of the downloaded file.
"""
return download_report_file(self, self.full_report_file)
def download_errors_report_file(self, foreign_key):
"""Download an error report file.
:param self: Instance of the entity for which this is a custom method.
:type self: mbed_cloud.foundation.DeviceEnrollmentBulkCreate or mbed_cloud.foundation.DeviceEnrollmentBulkDelete
:param foreign_key: Class name.
:rtype: StringIO - contents of the downloaded file.
"""
return download_report_file(self, self.errors_report_file)
|
StarcoderdataPython
|
82188
|
import sys
from torch.utils.data import Dataset, DataLoader
import os
import os.path as osp
import glob
import numpy as np
import random
import cv2
import pickle as pkl
import json
import h5py
import torch
import matplotlib.pyplot as plt
from lib.utils.misc import process_dataset_for_video
class SurrealDataset(Dataset):
def __init__(self, config, is_train=True):
self.is_train = is_train
self.frame_interval = config.DATA.FRAME_INTERVAL
# randomization will lead to inferior performance
# since diff will only be used when training
self.data_path = config.DATA.TRAIN_PATH if self.is_train else config.DATA.VALID_PATH
self.use_same_norm_2d = config.DATA.USE_SAME_NORM_2D
self.use_same_norm_3d = config.DATA.USE_SAME_NORM_2D
self.seed_set = False
self.head_root_distance = 1 / config.TRAIN.CAMERA_SKELETON_DISTANCE
# whether to use dataset adapted from k[MaÌ]inetics
self.use_gt = config.USE_GT
self.min_diff_dist = config.DATA.MIN_DIFF_DIST
self.bound_azim = config.TRAIN.BOUND_AZIM # y axis rotation
self.bound_elev = config.TRAIN.BOUND_ELEV
self._load_data_set()
def get_seqnames(self):
return self.sequence_names
def _load_data_set(self):
# self.v3d_2d_to_ours = np.arange(17)
if self.is_train:
print('start loading surreal {} data.'.format("train" if self.is_train else "test"))
key = "original_joint_2d_gt" if self.use_gt else "joint_2d_pre"
assert self.use_gt
fp = h5py.File(self.data_path, "r")
self.kp2ds = np.array(fp[key])
self.kp2ds[:, :, 0] = (self.kp2ds[:, :, 0] - 160.0) / 160.0
self.kp2ds[:, :, 1] = (self.kp2ds[:, :, 1] - 160.0) / 160.0
# locate root at the origin
# self.kp2ds[:, 12] = (self.kp2ds[:, 8] + self.kp2ds[:, 9]) / 2
self.kp2ds = self.kp2ds - self.kp2ds[:, 13:14]
self.kp2ds[:, 13] = 1e-5
# imagenames will be used to sample frames
self.imagenames = [name.decode() for name in fp['imagename'][:]]
if 'seqname' not in fp.keys():
fp.close()
print("Process corresponding dataset...")
process_dataset_for_video(self.data_path, is_surreal=True)
fp = h5py.File(self.data_path, "r")
self.sequence_lens = np.array(fp['seqlen'])
self.sequence_names = [name.decode() for name in fp['seqname'][:]]
self.indices_in_seq = np.array(fp['index_in_seq'])
# normlize again so that the mean distance of head and root is 1/c
if not self.use_same_norm_2d:
factor_gt = self.head_root_distance / (np.tile(np.linalg.norm(self.kp2ds[:, -1] - self.kp2ds[:, 13], axis=1).reshape(-1, 1, 1), (1, 17, 2)) + 1e-8)
else:
factor_gt = self.head_root_distance / np.linalg.norm(self.kp2ds[:, -1] - self.kp2ds[:, 13], axis=1).mean()
self.kp2ds = self.kp2ds * factor_gt
self.kp3ds = np.array(fp['joint_3d_gt'])
# self.kp3ds[:, 12] = (self.kp3ds[:, 8] + self.kp3ds[:, 9]) / 2
factor_3d = np.linalg.norm(self.kp3ds[:, -1] - self.kp3ds[:, 13], axis=1).mean()
factor_filename = "../data/surreal_{}_factor_3d.pkl".format("train" if self.is_train else "test")
if not self.use_same_norm_3d and not osp.exists(factor_filename):
factor_3d = (np.tile(np.linalg.norm(self.kp3ds[:, -1] - self.kp3ds[:, 13], axis=1).reshape(-1, 1, 1), (1, 17, 3)) + 1e-8)
with open(factor_filename, "wb") as f:
pkl.dump(factor_3d, f)
fp.close()
print('finished load surreal {} data, total {} samples'.format("train" if self.is_train else "test", \
self.kp2ds.shape[0]))
# get random diff1
self.diff_indices = []
for index in range(self.kp2ds.shape[0]):
index_in_seq = self.indices_in_seq[index]
seq_len = self.sequence_lens[index]
if seq_len == 1:
diff1_index = index
elif index_in_seq + self.frame_interval < seq_len:
diff_index = index + self.frame_interval
else:
diff_index = index - self.frame_interval
self.diff_indices.append(diff_index)
# generate the rotation factors
num_examples = self.kp2ds.shape[0]
np.random.seed(2019)
rotation_y = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_azim
rotation_x = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_elev
rotation_z = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_elev / 2
rotation_1 = np.concatenate((rotation_y, rotation_x, rotation_z), axis=1)
rotation_2 = rotation_1.copy()
rotation_2[:, 0] = rotation_2[:, 0] + np.pi
self.rotation = np.concatenate((rotation_1, rotation_2), axis=0)
np.random.shuffle(self.rotation)
self.rotation = torch.from_numpy(self.rotation).float()
self.kp2ds = torch.from_numpy(self.kp2ds).float()
self.kp3ds = torch.from_numpy(self.kp3ds).float()
def __len__(self):
return self.kp2ds.shape[0]
def __getitem__(self, index):
if not self.seed_set:
self.seed_set = True
random.seed(index)
np.random.seed(index)
seq_len = self.sequence_lens[index]
index_in_seq = self.indices_in_seq[index]
kps_3d = self.kp3ds[index]
rot = self.rotation[index]
# index in its sequence
kps_2d = self.kp2ds[index]
kps_3d = self.kp3ds[index]
diff1 = self.kp2ds[self.diff_indices[index]]
if seq_len == 1:
diff_dist = 0
else:
diff_dist = np.random.randint(-index_in_seq, seq_len-index_in_seq)
while abs(diff_dist) < self.min_diff_dist:
diff_dist = np.random.randint(-index_in_seq, seq_len-index_in_seq)
diff2_index = index + diff_dist
diff2 = self.kp2ds[diff2_index]
# current form: F * J * 2
# we need to swap the last two axis, so that the item will be in the form J * 2 * F where
# J is the number of keypoints and F is the number of frames
# kps_2d = kps_2d.permute(1, 2, 0).contiguous()
# diff = self.diff[all_indices].permute(1, 2, 0).contiguous()
kps_2d = self.kp2ds[index]
rot = self.rotation[index]
# the flag will always be 1 when no extra data is used
# flag = self.flags[index]
# for valdiation, simply ignore scale
scale = 0
return kps_2d, kps_3d, rot, diff1, diff2, scale
|
StarcoderdataPython
|
4834307
|
#Uses RSI to determine if stock overbought or oversold
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
stock = pd.read_csv('AAPL.csv')
stock.set_index(pd.DatetimeIndex(stock['Date']),inplace=True)
delta = stock['Adj Close'].diff(1)
delta.dropna()
up = delta.copy()
down = delta.copy()
up[up<0] = 0
down[down>0] = 0
period=14
AVG_Gain = up.rolling(window=period).mean()
AVG_Loss = abs(down.rolling(window=period).mean())
RS = AVG_Gain/AVG_Loss
RSI = 100.0 - (100.0/(1.0+RS))
new_df = pd.DataFrame()
new_df['Adj Close'] = stock['Adj Close']
new_df['RSI'] = RSI
#visualizing
plt.figure(figsize=(12.2,5.8))
plt.plot(new_df.index,new_df['Adj Close'],)
plt.title('Adj. Close Price History')
plt.xlabel('Date',fontsize=18)
plt.ylabel('Adj. Close Price USD($)',fontsize=18)
plt.legend(new_df.columns.values,loc='upper left')
plt.show()
plt.figure(figsize=(12.2,5.8))
plt.title('RSI Plot')
plt.plot(new_df.index,new_df['RSI'])
plt.axhline(0,linestyle='--',alpha=0.5,color='gray')
plt.axhline(10,linestyle='--',alpha=0.5,color='orange')
plt.axhline(20,linestyle='--',alpha=0.5,color='green')
plt.axhline(30,linestyle='--',alpha=0.5,color='red')
plt.axhline(70,linestyle='--',alpha=0.5,color='red')
plt.axhline(80,linestyle='--',alpha=0.5,color='green')
plt.axhline(90,linestyle='--',alpha=0.5,color='orange')
plt.axhline(100,linestyle='--',alpha=0.5,color='gray')
plt.show()
|
StarcoderdataPython
|
1671773
|
<reponame>Ravoxsg/transformers
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch REALM model. """
import copy
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import RealmConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
RealmEmbedder,
RealmForOpenQA,
RealmKnowledgeAugEncoder,
RealmReader,
RealmRetriever,
RealmScorer,
RealmTokenizer,
)
class RealmModelTester:
def __init__(
self,
parent,
batch_size=13,
retriever_proj_size=128,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
span_hidden_size=50,
max_span_width=10,
reader_layer_norm_eps=1e-3,
reader_beam_size=4,
reader_seq_len=288 + 32,
num_block_records=13353718,
searcher_beam_size=8,
searcher_seq_len=64,
num_labels=3,
num_choices=4,
num_candidates=10,
scope=None,
):
# General config
self.parent = parent
self.batch_size = batch_size
self.retriever_proj_size = retriever_proj_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
# Reader config
self.span_hidden_size = span_hidden_size
self.max_span_width = max_span_width
self.reader_layer_norm_eps = reader_layer_norm_eps
self.reader_beam_size = reader_beam_size
self.reader_seq_len = reader_seq_len
# Searcher config
self.num_block_records = num_block_records
self.searcher_beam_size = searcher_beam_size
self.searcher_seq_len = searcher_seq_len
self.num_labels = num_labels
self.num_choices = num_choices
self.num_candidates = num_candidates
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
candiate_input_ids = ids_tensor([self.batch_size, self.num_candidates, self.seq_length], self.vocab_size)
reader_input_ids = ids_tensor([self.reader_beam_size, self.reader_seq_len], self.vocab_size)
input_mask = None
candiate_input_mask = None
reader_input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
candiate_input_mask = random_attention_mask([self.batch_size, self.num_candidates, self.seq_length])
reader_input_mask = random_attention_mask([self.reader_beam_size, self.reader_seq_len])
token_type_ids = None
candidate_token_type_ids = None
reader_token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
candidate_token_type_ids = ids_tensor(
[self.batch_size, self.num_candidates, self.seq_length], self.type_vocab_size
)
reader_token_type_ids = ids_tensor([self.reader_beam_size, self.reader_seq_len], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
# inputs with additional num_candidates axis.
scorer_encoder_inputs = (candiate_input_ids, candiate_input_mask, candidate_token_type_ids)
# reader inputs
reader_inputs = (reader_input_ids, reader_input_mask, reader_token_type_ids)
return (
config,
input_ids,
token_type_ids,
input_mask,
scorer_encoder_inputs,
reader_inputs,
sequence_labels,
token_labels,
choice_labels,
)
def get_config(self):
return RealmConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
retriever_proj_size=self.retriever_proj_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_candidates=self.num_candidates,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
def create_and_check_embedder(
self,
config,
input_ids,
token_type_ids,
input_mask,
scorer_encoder_inputs,
reader_inputs,
sequence_labels,
token_labels,
choice_labels,
):
model = RealmEmbedder(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.projected_score.shape, (self.batch_size, self.retriever_proj_size))
def create_and_check_encoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
scorer_encoder_inputs,
reader_inputs,
sequence_labels,
token_labels,
choice_labels,
):
model = RealmKnowledgeAugEncoder(config=config)
model.to(torch_device)
model.eval()
relevance_score = floats_tensor([self.batch_size, self.num_candidates])
result = model(
scorer_encoder_inputs[0],
attention_mask=scorer_encoder_inputs[1],
token_type_ids=scorer_encoder_inputs[2],
relevance_score=relevance_score,
labels=token_labels,
)
self.parent.assertEqual(
result.logits.shape, (self.batch_size * self.num_candidates, self.seq_length, self.vocab_size)
)
def create_and_check_reader(
self,
config,
input_ids,
token_type_ids,
input_mask,
scorer_encoder_inputs,
reader_inputs,
sequence_labels,
token_labels,
choice_labels,
):
model = RealmReader(config=config)
model.to(torch_device)
model.eval()
relevance_score = floats_tensor([self.reader_beam_size])
result = model(
reader_inputs[0],
attention_mask=reader_inputs[1],
token_type_ids=reader_inputs[2],
relevance_score=relevance_score,
)
self.parent.assertEqual(result.block_idx.shape, ())
self.parent.assertEqual(result.candidate.shape, ())
self.parent.assertEqual(result.start_pos.shape, ())
self.parent.assertEqual(result.end_pos.shape, ())
def create_and_check_scorer(
self,
config,
input_ids,
token_type_ids,
input_mask,
scorer_encoder_inputs,
reader_inputs,
sequence_labels,
token_labels,
choice_labels,
):
model = RealmScorer(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
candidate_input_ids=scorer_encoder_inputs[0],
candidate_attention_mask=scorer_encoder_inputs[1],
candidate_token_type_ids=scorer_encoder_inputs[2],
)
self.parent.assertEqual(result.relevance_score.shape, (self.batch_size, self.num_candidates))
self.parent.assertEqual(result.query_score.shape, (self.batch_size, self.retriever_proj_size))
self.parent.assertEqual(
result.candidate_score.shape, (self.batch_size, self.num_candidates, self.retriever_proj_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
scorer_encoder_inputs,
reader_inputs,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class RealmModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
RealmEmbedder,
RealmKnowledgeAugEncoder,
# RealmScorer is excluded from common tests as it is a container model
# consisting of two RealmEmbedders & a simple inner product calculation.
# RealmScorer
)
if is_torch_available()
else ()
)
all_generative_model_classes = ()
# disable these tests because there is no base_model in Realm
test_save_load_fast_init_from_base = False
test_save_load_fast_init_to_base = False
def setUp(self):
self.test_pruning = False
self.model_tester = RealmModelTester(self)
self.config_tester = ConfigTester(self, config_class=RealmConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_embedder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_embedder(*config_and_inputs)
def test_encoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_encoder(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_embedder(*config_and_inputs)
self.model_tester.create_and_check_encoder(*config_and_inputs)
def test_retriever(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_scorer(*config_and_inputs)
def test_training(self):
if not self.model_tester.is_training:
return
config, *inputs = self.model_tester.prepare_config_and_inputs()
input_ids, token_type_ids, input_mask, scorer_encoder_inputs = inputs[0:4]
config.return_dict = True
tokenizer = RealmTokenizer.from_pretrained("google/realm-orqa-nq-openqa")
# RealmKnowledgeAugEncoder training
model = RealmKnowledgeAugEncoder(config)
model.to(torch_device)
model.train()
inputs_dict = {
"input_ids": scorer_encoder_inputs[0].to(torch_device),
"attention_mask": scorer_encoder_inputs[1].to(torch_device),
"token_type_ids": scorer_encoder_inputs[2].to(torch_device),
"relevance_score": floats_tensor([self.model_tester.batch_size, self.model_tester.num_candidates]),
}
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs = inputs_dict
loss = model(**inputs).loss
loss.backward()
# RealmForOpenQA training
openqa_config = copy.deepcopy(config)
openqa_config.vocab_size = 30522 # the retrieved texts will inevitably have more than 99 vocabs.
openqa_config.num_block_records = 5
openqa_config.searcher_beam_size = 2
block_records = np.array(
[
b"This is the first record.",
b"This is the second record.",
b"This is the third record.",
b"This is the fourth record.",
b"This is the fifth record.",
],
dtype=np.object,
)
retriever = RealmRetriever(block_records, tokenizer)
model = RealmForOpenQA(openqa_config, retriever)
model.to(torch_device)
model.train()
inputs_dict = {
"input_ids": input_ids[:1].to(torch_device),
"attention_mask": input_mask[:1].to(torch_device),
"token_type_ids": token_type_ids[:1].to(torch_device),
"answer_ids": input_ids[:1].tolist(),
}
inputs = self._prepare_for_class(inputs_dict, RealmForOpenQA)
loss = model(**inputs).reader_output.loss
loss.backward()
@slow
def test_embedder_from_pretrained(self):
model = RealmEmbedder.from_pretrained("google/realm-cc-news-pretrained-embedder")
self.assertIsNotNone(model)
@slow
def test_encoder_from_pretrained(self):
model = RealmKnowledgeAugEncoder.from_pretrained("google/realm-cc-news-pretrained-encoder")
self.assertIsNotNone(model)
@slow
def test_open_qa_from_pretrained(self):
model = RealmForOpenQA.from_pretrained("google/realm-orqa-nq-openqa")
self.assertIsNotNone(model)
@slow
def test_reader_from_pretrained(self):
model = RealmReader.from_pretrained("google/realm-orqa-nq-reader")
self.assertIsNotNone(model)
@slow
def test_scorer_from_pretrained(self):
model = RealmScorer.from_pretrained("google/realm-cc-news-pretrained-scorer")
self.assertIsNotNone(model)
@require_torch
class RealmModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_embedder(self):
retriever_projected_size = 128
model = RealmEmbedder.from_pretrained("google/realm-cc-news-pretrained-embedder")
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, retriever_projected_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[-0.0714, -0.0837, -0.1314]])
self.assertTrue(torch.allclose(output[:, :3], expected_slice, atol=1e-4))
@slow
def test_inference_encoder(self):
num_candidates = 2
vocab_size = 30522
model = RealmKnowledgeAugEncoder.from_pretrained(
"google/realm-cc-news-pretrained-encoder", num_candidates=num_candidates
)
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
relevance_score = torch.tensor([[0.3, 0.7]], dtype=torch.float32)
output = model(input_ids, relevance_score=relevance_score)[0]
expected_shape = torch.Size((2, 6, vocab_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[[-11.0888, -11.2544], [-10.2170, -10.3874]]])
self.assertTrue(torch.allclose(output[1, :2, :2], expected_slice, atol=1e-4))
@slow
def test_inference_open_qa(self):
from transformers.models.realm.retrieval_realm import RealmRetriever
config = RealmConfig()
tokenizer = RealmTokenizer.from_pretrained("google/realm-orqa-nq-openqa")
retriever = RealmRetriever.from_pretrained("google/realm-orqa-nq-openqa")
model = RealmForOpenQA.from_pretrained(
"google/realm-orqa-nq-openqa",
retriever=retriever,
config=config,
)
question = "Who is the pioneer in modern computer science?"
question = tokenizer(
[question],
padding=True,
truncation=True,
max_length=model.config.searcher_seq_len,
return_tensors="pt",
).to(model.device)
predicted_answer_ids = model(**question).predicted_answer_ids
predicted_answer = tokenizer.decode(predicted_answer_ids)
self.assertEqual(predicted_answer, "alan mathison turing")
@slow
def test_inference_reader(self):
config = RealmConfig(reader_beam_size=2, max_span_width=3)
model = RealmReader.from_pretrained("google/realm-orqa-nq-reader", config=config)
concat_input_ids = torch.arange(10).view((2, 5))
concat_token_type_ids = torch.tensor([[0, 0, 1, 1, 1], [0, 0, 1, 1, 1]], dtype=torch.int64)
relevance_score = torch.tensor([0.3, 0.7], dtype=torch.float32)
output = model(
concat_input_ids, token_type_ids=concat_token_type_ids, relevance_score=relevance_score, return_dict=True
)
block_idx_expected_shape = torch.Size(())
start_pos_expected_shape = torch.Size((1,))
end_pos_expected_shape = torch.Size((1,))
self.assertEqual(output.block_idx.shape, block_idx_expected_shape)
self.assertEqual(output.start_pos.shape, start_pos_expected_shape)
self.assertEqual(output.end_pos.shape, end_pos_expected_shape)
expected_block_idx = torch.tensor(1)
expected_start_pos = torch.tensor(3)
expected_end_pos = torch.tensor(3)
self.assertTrue(torch.allclose(output.block_idx, expected_block_idx, atol=1e-4))
self.assertTrue(torch.allclose(output.start_pos, expected_start_pos, atol=1e-4))
self.assertTrue(torch.allclose(output.end_pos, expected_end_pos, atol=1e-4))
@slow
def test_inference_scorer(self):
num_candidates = 2
model = RealmScorer.from_pretrained("google/realm-cc-news-pretrained-scorer", num_candidates=num_candidates)
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]])
candidate_input_ids = torch.tensor([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
output = model(input_ids, candidate_input_ids=candidate_input_ids)[0]
expected_shape = torch.Size((1, 2))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[0.7410, 0.7170]])
self.assertTrue(torch.allclose(output, expected_slice, atol=1e-4))
|
StarcoderdataPython
|
1767787
|
from rest_framework.permissions import BasePermission
class CanSubscribe(BasePermission):
pass
|
StarcoderdataPython
|
3320127
|
<gh_stars>0
# pylint: disable=missing-module-docstring
#
# Copyright (C) 2020 by UsergeTeam@Github, < https://github.com/UsergeTeam >.
#
# This file is part of < https://github.com/UsergeTeam/Userge > project,
# and is released under the "GNU v3.0 License Agreement".
# Please see < https://github.com/uaudith/Userge/blob/master/LICENSE >
#
# All rights reserved.
__all__ = ['Command']
import re
from typing import Union, Dict, List
from pyrogram import filters
from userge import Config
from .filter import Filter
from ... import client as _client # pylint: disable=unused-import
class Command(Filter):
""" command class """
def __init__(self, about: str, trigger: str, pattern: str,
**kwargs: Union['_client.Userge', int, str, bool]) -> None:
self.about = about
self.trigger = trigger
self.pattern = pattern
super().__init__(**Filter._parse(**kwargs)) # pylint: disable=protected-access
@classmethod
def parse(cls, command: str, # pylint: disable=arguments-differ
about: Union[str, Dict[str, Union[str, List[str], Dict[str, str]]]],
trigger: str, name: str, filter_me: bool,
**kwargs: Union['_client.Userge', int, bool]) -> 'Command':
""" parse command """
pattern = f"^(?:\\{trigger}|\\{Config.SUDO_TRIGGER}){command.lstrip('^')}" if trigger \
else f"^{command.lstrip('^')}"
if [i for i in '^()[]+*.\\|?:$' if i in command]:
match = re.match("(\\w[\\w_]*)", command)
cname = match.groups()[0] if match else ''
cname = name or cname
cname = trigger + cname if cname else ''
else:
cname = trigger + command
cname = name or cname
pattern += r"(?:\s([\S\s]+))?$"
filters_ = filters.regex(pattern=pattern)
if filter_me:
outgoing_flt = filters.create(
lambda _, __, m:
m.via_bot is None
and not (m.from_user and m.from_user.is_bot)
and (m.outgoing or (m.from_user and m.from_user.is_self))
and not (m.chat and m.chat.type == "channel" and m.edit_date)
and (m.text and m.text.startswith(trigger) if trigger else True))
incoming_flt = filters.create(
lambda _, __, m:
m.via_bot is None
and not m.outgoing
and trigger
and m.from_user and m.text
and ((m.from_user.id in Config.OWNER_ID)
or (Config.SUDO_ENABLED and (m.from_user.id in Config.SUDO_USERS)
and (cname.lstrip(trigger) in Config.ALLOWED_COMMANDS)))
and m.text.startswith(Config.SUDO_TRIGGER))
filters_ = filters_ & (outgoing_flt | incoming_flt)
return cls(_format_about(about), trigger, pattern, filters=filters_, name=cname, **kwargs)
def __repr__(self) -> str:
return f"<command {self.name}>"
def _format_about(about: Union[str, Dict[str, Union[str, List[str], Dict[str, str]]]]) -> str:
if not isinstance(about, dict):
return about
tmp_chelp = ''
if 'header' in about and isinstance(about['header'], str):
tmp_chelp += f"<i><b>{about['header'].title()}</b><i>"
del about['header']
if 'description' in about and isinstance(about['description'], str):
tmp_chelp += ("\n\n📝 <u><b>Description</b></u> :\n\n "
f"<i>{about['description'].capitalize()}</i>")
del about['description']
if 'flags' in about:
tmp_chelp += "\n\n⛓ <u><b>Available Flags</b></u> :\n"
if isinstance(about['flags'], dict):
for f_n, f_d in about['flags'].items():
tmp_chelp += f"\n ▫ <code>{f_n}</code> : <i>{f_d.lower()}</i>"
else:
tmp_chelp += f"\n {about['flags']}"
del about['flags']
if 'options' in about:
tmp_chelp += "\n\n🕶 <u><b>Available Options</b></u> :\n"
if isinstance(about['options'], dict):
for o_n, o_d in about['options'].items():
tmp_chelp += f"\n ▫ <code>{o_n}</code> : <i>{o_d.lower()}</i>"
else:
tmp_chelp += f"\n {about['options']}"
del about['options']
if 'types' in about:
tmp_chelp += "\n\n🎨 <u><b>Supported Types</b></u> :\n\n"
if isinstance(about['types'], list):
for _opt in about['types']:
tmp_chelp += f" <code>{_opt}</code> ,"
else:
tmp_chelp += f" {about['types']}"
del about['types']
if 'usage' in about:
tmp_chelp += f"\n\n✒ <u><b>Usage</b></u> :\n\n<code>{about['usage']}</code>"
del about['usage']
if 'examples' in about:
tmp_chelp += "\n\n✏ <u><b>Examples</b></u> :"
if isinstance(about['examples'], list):
for ex_ in about['examples']:
tmp_chelp += f"\n\n <code>{ex_}</code>"
else:
tmp_chelp += f"\n\n <code>{about['examples']}</code>"
del about['examples']
if 'others' in about:
tmp_chelp += f"\n\n📎 <u><b>Others</b></u> :\n\n{about['others']}"
del about['others']
if about:
for t_n, t_d in about.items():
tmp_chelp += f"\n\n⚙ <u><b>{t_n.title()}</b></u> :\n"
if isinstance(t_d, dict):
for o_n, o_d in t_d.items():
tmp_chelp += f"\n ▫ <code>{o_n}</code> : <i>{o_d.lower()}</i>"
elif isinstance(t_d, list):
tmp_chelp += '\n'
for _opt in t_d:
tmp_chelp += f" <code>{_opt}</code> ,"
else:
tmp_chelp += '\n'
tmp_chelp += t_d
return tmp_chelp.replace('{tr}', Config.CMD_TRIGGER)
|
StarcoderdataPython
|
3315759
|
<gh_stars>1-10
#########
# Copyright (c) 2018 Lumina Communcation Systems Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from setuptools import setup
# from setuptools import find_packages
setup(
# Do not use underscores in the plugin name.
name='cloudify-lfm-plugin',
version='0.1.0',
author='<NAME>',
author_email='<EMAIL>',
url="https://github.com/luminanetworks/cloudify-lfm-plugin",
description='A Cloudify Plugin that provisions services in Lumina Flow Manager',
long_description=open('README.md').read(),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# This must correspond to the actual packages in the plugin.
# packages=find_packages(exclude=['tests*']),
packages=[
'cloudify_fm',
'cloudify_fm.common',
],
license='LICENSE',
zip_safe=False,
install_requires=[
'lfmcli>=2.0.0',
'cloudify-plugins-common==4.3.1', # latest: >=4.4
'requests==2.19.1',
'Cerberus==1.2'
# 'cloudify-dsl-parser==4.3.1',
# 'cloudify-rest-client==4.3.1',
# 'cloudify-plugins-common==4.3.1'
],
# test_requires=[
# 'cloudify-common>=4.4',
# # 'cloudify-plugins-common>=4.3',
# 'pyyaml',
# 'nose',
# 'requests'
# ]
)
|
StarcoderdataPython
|
139883
|
<reponame>dbbxzw-610/bilibili-live-push
import asyncio
import time
from typing import *
class EchoFormat:
th: List[str] = []
td: List[List[str]] = []
first_echo: bool = True
last_update: float = time.time()
def __init__(self) -> None:
pass
def init_th(self, *arg) -> None:
self.th.clear()
for i, x in enumerate(arg): # type: str
if isinstance(x, str) is True:
self.th.append(x)
else:
print("非str类型", i, ":", x)
def del_th(self) -> None:
self.th.clear()
def create_td(self) -> Tuple[int, list]:
self.td.append([])
while len(self.td[len(self.td) - 1]) < len(self.th):
self.td[len(self.td) - 1].append("")
return len(self.td) - 1, self.td[len(self.td) - 1]
def update_td(self, index: int, *arg):
for i, x in enumerate(arg): # type: int, str
if i < len(self.th):
self.td[index][i] = x
self.last_update = time.time()
def update_element(self, row: int, column: int, arg: str):
# 横排数列
# row: 排
# column: 列
self.td[row][column] = arg
self.last_update = time.time()
def str_width(self) -> List[int]:
str_len = [str_width(v) + 3 for v in self.th]
for v in self.td: # type: int, List[str]
for i, x in enumerate(v):
if i >= len(str_len):
break
str_len[i] = (
str_len[i] if str_len[i] > str_width(x) + 3 else str_width(x) + 3
)
return str_len
def echo(self):
if not self.first_echo:
print("\033[%sA" % (len(self.td) + 2))
else:
self.first_echo = False
str_len = self.str_width()
# th
print(build_format(str_len, self.th) % tuple(self.th))
# td
for v in self.td:
print(build_format(str_len, v) % tuple(v))
async def loop(self):
last_update: float = 0
while True:
if last_update >= self.last_update:
await asyncio.sleep(1)
continue
self.echo()
last_update = self.last_update
def build_format(str_len: List[int], str_list: List[str]) -> str:
str_format = ""
for i, v in enumerate(str_list): # type: str
str_format += "%-{}.{}s".format(
str_len[i] - alpha_len(v), str_len[i] - alpha_len(v)
)
str_format += "\033[K"
return str_format
def str_width(text: str) -> int:
count = len(text)
for i in text:
if "\u4e00" <= i <= "\u9fff":
count += 1
return count
def alpha_len(text: str) -> int:
count = 0
for i in text:
if "\u4e00" <= i <= "\u9fff":
count += 1
return count
if __name__ == "__main__":
ef = EchoFormat()
ef.init_th("#", "直播间ID", "真实ID", "主播名", "开播/下播", "实时状态")
i, v = ef.create_td()
v.append("1")
v.append("2")
v.append("3")
v.append("4")
v.append("5")
v.append("6")
ef.echo()
|
StarcoderdataPython
|
1676246
|
<reponame>smalldragonvt/curlify
# coding: utf-8
import os
from setuptools import setup, Command
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='curlify',
version='2.2.1',
py_modules=[
'curlify',
],
include_package_data=True,
install_requires=[
'requests',
],
license='MIT License',
description='Library to convert python requests object to curl command.',
author='<NAME>',
author_email='<EMAIL>',
platforms='any',
url='https://github.com/oeegor/curlify',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
cmdclass={
'clean': CleanCommand,
},
)
|
StarcoderdataPython
|
109465
|
import torch
try:
from torch.utils.data import IterableDataset
except ImportError:
class IterableDataset:
pass
class BatchChecker:
def __init__(self, data, init_counter=0):
self.counter = init_counter
self.data = data
self.true_batch = None
def check(self, batch):
self.true_batch = self.data[self.counter % len(self.data)]
self.counter += 1
res = self.true_batch == batch
return res.all() if not isinstance(res, bool) else res
class IterationCounter:
def __init__(self, start_value=1):
self.current_iteration_count = start_value
def __call__(self, engine):
assert engine.state.iteration == self.current_iteration_count
self.current_iteration_count += 1
class EpochCounter:
def __init__(self, start_value=1):
self.current_epoch_count = start_value
def __call__(self, engine):
assert engine.state.epoch == self.current_epoch_count
self.current_epoch_count += 1
def setup_sampler(sampler_type, num_iters, batch_size):
if sampler_type is None:
return None, batch_size
if sampler_type == "weighted":
from torch.utils.data.sampler import WeightedRandomSampler
w = torch.ones(num_iters * batch_size, dtype=torch.float)
for i in range(num_iters):
w[batch_size * i : batch_size * (i + 1)] += i * 1.0
return WeightedRandomSampler(w, num_samples=num_iters * batch_size, replacement=True), batch_size
if sampler_type == "distributed":
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
num_replicas = 1
rank = 0
if dist.is_available() and dist.is_initialized():
num_replicas = dist.get_world_size()
rank = dist.get_rank()
dataset = torch.zeros(num_iters * batch_size)
return DistributedSampler(dataset, num_replicas=num_replicas, rank=rank), batch_size // num_replicas
class MyIterableDataset(IterableDataset):
def __init__(self, start, end):
super(MyIterableDataset).__init__()
assert end > start, "this example code only works with end >= start"
self.start = start
self.end = end
def __iter__(self):
return iter(range(self.start, self.end))
def get_iterable_dataset(*args, **kwargs):
return MyIterableDataset(*args, **kwargs)
|
StarcoderdataPython
|
4802186
|
<reponame>ttngu207/Economo-2018
import re
import os
import sys
from datetime import datetime
import numpy as np
import scipy.io as sio
import datajoint as dj
import h5py as h5
from . import reference, subject, utilities
schema = dj.schema(dj.config['custom'].get('database.prefix', '') + 'acquisition')
@schema
class Session(dj.Manual):
definition = """
-> subject.Subject
session_time: datetime # session time
session_id: smallint
---
session_directory = "": varchar(256)
session_note = "": varchar(256)
"""
class Experimenter(dj.Part):
definition = """
-> master
-> reference.Experimenter
"""
@schema
class TrialSet(dj.Manual):
definition = """
-> Session
---
trial_counts: int # total number of trials
"""
class Trial(dj.Part):
definition = """
-> master
trial_id: smallint # id of this trial in this trial set
---
start_time = null: float # start time of this trial, with respect to the starting point of this session
-> reference.TrialType
-> reference.TrialResponse
trial_stim_present: bool # is this a stim or no-stim trial
trial_is_good: bool # good/bad status of trial (bad trials are not analyzed)
"""
class EventTime(dj.Part):
definition = """ # experimental paradigm event timing marker(s) for this trial, relative to trial start time
-> master.Trial
-> reference.ExperimentalEvent.proj(trial_event="event")
---
event_time = null: float # (in second) event time with respect to this trial's start time
"""
|
StarcoderdataPython
|
3300443
|
import ast
import sys
from optparse import OptionParser, OptionGroup
from logger import Logger
from check import check_mod, check_expr
from infer import infer_expr
from ptype import PType
from parse_file import parse_type_decs
from ast_extensions import TypeDecASTModule
import check
import parse_file
import infer
log = check.log = parse_file.log = infer.log = Logger()
# Invoked like:
# python pyty.py <source_file.py>
usage = "usage: %prog [options]\n\nDo not mix options from different modes."
parser = OptionParser(usage=usage)
f_group = OptionGroup(parser, "File Mode",
"Use Pyty to typecheck source code files.")
f_group.add_option("-f", "--file", dest="filename",
help="file to typecheck", metavar="FIL")
parser.add_option_group(f_group)
e_group = OptionGroup(parser, "Expression Mode",
"Use Pyty to typecheck expressions (under the "
"empty environment). Both options are required.")
e_group.add_option("-e", "--expr", dest="expr",
help="string of expression", metavar="EXP")
e_group.add_option("-t", "--type", dest="type",
help="type to typecheck against", metavar="TYP")
parser.add_option_group(e_group)
i_group = OptionGroup(parser, "Inference Mode",
"Use Pyty to infer the type of an expression (under the "
"empty environment).")
i_group.add_option("-i", "--inf", dest="infer_expr",
help="string of expression", metavar="EXP")
parser.add_option_group(i_group)
(opt, args) = parser.parse_args()
if opt.filename and not opt.expr and not opt.type and not opt.infer_expr:
file_name = opt.filename
try:
# FIXME: this is copied from unit_test_core, should be abstracted
# away somewhere, but don't know the best way to deal with logging.
with open(file_name, 'r') as f:
text = f.read()
untyped_ast = ast.parse(text)
typedecs = parse_type_decs(file_name)
typed_ast = TypeDecASTModule(untyped_ast, typedecs)
if check_mod(typed_ast.tree):
print "Typechecked correctly!"
else:
print "Did not typecheck."
except IOError as e:
print "File not found: %s" % e.filename
elif opt.expr and opt.type and not opt.filename and not opt.infer_expr:
e = ast.parse(opt.expr).body[0].value
t = PType.from_str(opt.type)
template = ("YES! -- %s typechecks as type %s" if check_expr(e, t, {}) else
"NO! --- %s does not typecheck as type %s")
print template % (opt.expr, t)
elif opt.infer_expr and not opt.filename and not opt.expr and not opt.type:
e = ast.parse(opt.infer_expr).body[0].value
print "%s -- is the inferred type of %s" % (infer_expr(e, {}),
opt.infer_expr)
else:
parser.print_help()
|
StarcoderdataPython
|
3264384
|
<reponame>cloudcalvin/spira<filename>spira/param/__init__.py<gh_stars>0
from .field.typed_integer import IntegerField
from .field.typed_string import StringField
# from .field.typed_float import FloatField
from .field.typed_bool import BoolField
from .field.typed_list import ListField
from .field.layer_list import LayerListProperty
from .field.typed_color import ColorField
from .field.typed_point import PointField
from spira.core.descriptor import DataField
from spira.core.descriptor import DataFieldDescriptor
from spira.core.descriptor import FunctionField
import numpy as np
class MidPointField(DataFieldDescriptor):
from .field.point import Point
__type__ = Point
def __init__(self, default=Point(0,0), **kwargs):
if isinstance(default, self.__type__):
kwargs['default'] = [default.x, default.y]
elif isinstance(default, (list, set, tuple, np.ndarray)):
kwargs['default'] = default
super().__init__(**kwargs)
def get_stored_value(self, obj):
value = obj.__store__[self.__name__]
if not isinstance(value, (list, set, tuple, np.ndarray)):
raise ValueError('Correct MidPoint type to retreived.')
return list(value)
def __set__(self, obj, value):
if isinstance(value, self.__type__):
value = self.__type__()
elif isinstance(value, (list, set, tuple, np.ndarray)):
value = self.__type__(value)
else:
raise TypeError("Invalid type in setting value " +
"of {} (expected {}): {}"
.format(self.__class__, type(value)))
# if (value.x > 0) and (value.y > 0):
# if (value.x/100 < 1.0) and (value.y/100 < 1.0):
# from spira.gdsii.utils import SCALE_UP
# from spira.gdsii.utils import SCALE_DOWN
# value.x = SCALE_UP*value.x
# value.y = SCALE_UP*value.y
obj.__store__[self.__name__] = [value.x, value.y]
def PolygonField(shape=[]):
from spira.gdsii.elemental.polygons import Polygons
F = Polygons(shape)
return DataFieldDescriptor(default=F)
def ShapeField(points=[]):
from spira.lgm.shapes.shape import Shape
F = Shape(points)
return DataFieldDescriptor(default=F)
def LayerField(name='', number=0, datatype=0, **kwargs):
from spira.gdsii.layer import Layer
F = Layer(name=name, number=number, datatype=datatype, **kwargs)
return DataFieldDescriptor(default=F, **kwargs)
# def FloatField(default=0.0, **kwargs):
def FloatField(**kwargs):
from .variables import FLOAT
return DataFieldDescriptor(constraint=FLOAT, **kwargs)
def CellField(name=None, elementals=None, library=None):
from spira.gdsii.cell import Cell
F = Cell(name=name, elementals=elementals, library=library)
return DataFieldDescriptor(default=F)
def PhysicalLayerField(layer=None, purpose=None):
from spira.rdd.layer import PhysicalLayer
F = PhysicalLayer(layer=layer, purpose=purpose)
return DataFieldDescriptor(default=F)
class ElementListField(DataFieldDescriptor):
from spira.core.lists import ElementList
__type__ = ElementList
def __init__(self, default=[], **kwargs):
kwargs['default'] = self.__type__(default)
super().__init__(**kwargs)
def __repr__(self):
return ''
def __str__(self):
return ''
def call_param_function(self, obj):
f = self.get_param_function(obj)
value = f(self.__type__())
if value is None:
value = self.__type__()
obj.__store__[self.__name__] = value
return value
class PointArrayField(DataFieldDescriptor):
import numpy as np
__type__ = np.array([])
def call_param_function(self, obj):
f = self.get_param_function(obj)
value = f([])
if value is None:
value = self.__operations__([])
else:
value = self.__operations__(value)
obj.__store__[self.__name__] = value
return value
# if (value is None):
# value = self.__process__([])
# else:
# value = self.__process__([c.convert_to_array() if isinstance(c, Coord) else c for c in value])
# return value
def __operations__(self, points):
# from spira.gdsii.utils import scale_polygon_up as spu
# return spu(points)
return points
def __set__(self, obj, points):
# from spira.gdsii.utils import scale_polygon_up as spu
# pp = spu(self.__operations__(points))
# obj.__store__[self.__name__] = pp
obj.__store__[self.__name__] = points
# def __process__(self, points):
# if isinstance(points, Shape):
# return array(points.points)
# elif isinstance(points, (list, ndarray)):
# if len(points):
# element = points[0]
# if isinstance(element, (ndarray, list)):
# points_as_array = array(points, copy=False)
# else:
# points_as_array = array([(c[0], c[1]) for c in points])
# return points_as_array
# else:
# return ndarray((0, 2))
# elif isinstance(points, Coord2):
# return array([[points.x, points.y]])
# elif isinstance(points, tuple):
# return array([[points[0], points[1]]])
# else:
# raise TypeError("Invalid type of points in setting value of PointsDefinitionProperty: " + str(type(points)))
# def __set__(self, obj, points):
# points = self.__process__(points)
# self.__externally_set_property_value_on_object__(obj, points)
class PortListField(DataFieldDescriptor):
pass
# from spira.gdsii.lists.port_list import PortList
# __type__ = PortList
# def __init__(self, default=[], **kwargs):
# kwargs['default'] = self.__type__(default)
# super().__init__(**kwargs)
# def call_param_function(self, obj):
# f = self.get_param_function(obj)
# value = f(self.__type__())
# if value is None:
# value = self.__type__()
# obj.__store__[self.__name__] = value
# return value
|
StarcoderdataPython
|
3348698
|
#%%
msg="hello world"
print(msg)
#%%
|
StarcoderdataPython
|
3259565
|
import numpy as np
from tacs import TACS, elements, constitutive, functions
from static_analysis_base_test import StaticTestCase
import os
'''
Load in a bdf file with tetrahedral elements, apply a load,
and test KSFailure, StructuralMass, and Compliance functions and sensitivities.
This test is based on the "tetrahedral" script under the examples directory.
'''
FUNC_REFS = np.array([1.2622791020763084, 172800.0, 16.257419866831018])
base_dir = os.path.dirname(os.path.abspath(__file__))
bdf_file = os.path.join(base_dir, "./input_files/5x5x5_cube.bdf")
# KS function weight
ksweight = 10.0
class ProblemTest(StaticTestCase.StaticTest):
N_PROCS = 2 # this is how many MPI processes to use for this TestCase.
def setup_assembler(self, comm, dtype):
"""
Setup mesh and tacs assembler for problem we will be testing.
"""
# Overwrite default tolerances from base class
if dtype == complex:
self.rtol = 1e-11
self.atol = 1e-8
self.dh = 1e-50
else:
self.rtol = 1e-2
self.atol = 1e-4
self.dh = 1e-8
# Create the mesh loader object on MPI_COMM_WORLD.The
# TACSAssembler object will be created on the same comm MPI_Comm
mesh = TACS.MeshLoader(comm)
# Create the isotropic material class
rho = 2700.0
specific_heat = 921.096
E = 70e3
nu = 0.3
ys = 270.0
cte = 24.0e-6
kappa = 230.0
props = constitutive.MaterialProperties(rho=rho, specific_heat=specific_heat, E=E, nu=nu, ys=ys,
alpha=cte, kappa=kappa)
# Create the stiffness object
stiff = constitutive.SolidConstitutive(props, t=1.0, tNum=0)
# Create model (need class)
model = elements.LinearElasticity3D(stiff)
vars_per_node = model.getVarsPerNode()
# Set up the basis function
linear_basis = elements.LinearTetrahedralBasis()
quad_basis = elements.QuadraticTetrahedralBasis()
# Create the element type (need 3D element class)
linear_element = elements.Element3D(model, linear_basis)
quad_element = elements.Element3D(model, quad_basis)
# Read in bdf file
fail = mesh.scanBDFFile(bdf_file)
if fail is True:
raise IOError("Failed to read in the BDF file")
# Add the elements to the mesh loader class
for i in range(mesh.getNumComponents()):
elem_descript = mesh.getElementDescript(i)
if elem_descript in ["CTETRA", "CTETRA4"]:
elem = linear_element
elif elem_descript == "CTETRA10":
elem = quad_element
else:
elem = None
if elem is not None:
mesh.setElement(i, elem)
# Now, create the TACSAssembler object
assembler = mesh.createTACS(vars_per_node)
return assembler
def setup_tacs_vecs(self, assembler, force_vec, dv_pert_vec, ans_pert_vec, xpts_pert_vec):
"""
Setup user-defined vectors for analysis and fd/cs sensitivity verification
"""
local_num_nodes = assembler.getNumOwnedNodes()
vars_per_node = assembler.getVarsPerNode()
# Create force vector
f_array = force_vec.getArray()
# Set uniform force on all nodes
f_array[:] = -10.0
# Create temporary dv vec for doing fd/cs
dv_pert_vec.getArray()[:] = 1.0
# Create temporary state variable vec for doing fd/cs
np.random.seed(30) # Seed random numbers for deterministic/repeatable tests
rand_data = np.random.rand(vars_per_node * local_num_nodes).astype(self.dtype)
ans_pert_vec.getArray()[:] = rand_data
# Define perturbation array that uniformly moves all nodes on right edge of plate to the right
np.random.seed(30) # Seed random numbers for deterministic/repeatable tests
rand_data = np.random.rand(3 * local_num_nodes).astype(self.dtype)
xpts_pert_vec.getArray()[:] = rand_data
return
def setup_funcs(self, assembler):
"""
Create a list of functions to be tested and their reference values for the problem
"""
func_list = [functions.KSFailure(assembler, ksWeight=ksweight),
functions.StructuralMass(assembler),
functions.Compliance(assembler)]
return func_list, FUNC_REFS
|
StarcoderdataPython
|
3390422
|
# -*- coding: utf-8 -*-
# environment vars
import os
import logging
# webapp utilities
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
# import project modules
import config
from utilities import *
from kudos import *
class Handler(webapp.RequestHandler):
"""For app requests."""
def get(self, request):
"""Callback for GET requests.
Args:
- request: the request. [Sigh.]
"""
options = parse_the_bloody(request) # Does what it says.
server = Kudos(options)
response = server.respond()
logging.info('Some bloke tried to initiate a ' + server.action)
if response['return_type'] == 'text':
self.response.headers['Content-Type'] = 'text/html'
result = response['result']
elif response['return_type'] == 'html':
self.response.headers['Content-Type'] = 'text/html'
result = hitman(response) # Renders template. No bloodshed.
self.response.out.write(result) # Flush response to her
return
# Map url's to handlers
urls = [
(r'/(.*)', Handler), # handles actions
]
application = webapp.WSGIApplication(urls, debug=config._DEBUG)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
185629
|
#!/usr/bin/env python
import os
import subprocess
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from whoosh.index import open_dir
from whoosh.qparser import QueryParser
from archivo.models import Documento
class Command(BaseCommand):
help = 'Buscar documentos en hipatia'
def add_arguments(self, parser):
parser.add_argument('words', nargs='+')
def handle(self, *args, **kwargs):
words = kwargs['words']
self.stdout.write('Buscado por: {}'.format(' '.join(words)))
ix = open_dir(settings.INDEX_DIR)
query_parser = QueryParser("content", schema=ix.schema)
q = query_parser.parse(' '.join(words))
with ix.searcher() as s:
results = s.search(q)
self.stdout.write('Encontrados {} documentos'.format(len(results)))
for item in results:
id_documento = int(item['id_documento'])
doc = Documento.objects.get(pk=id_documento)
print(id_documento, doc, file=self.stdout)
|
StarcoderdataPython
|
1651263
|
<reponame>openstack/murano-pkg-check
# Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import os
import oslotest.base
import testscenarios
import yaml
from muranopkgcheck import consts
from muranopkgcheck import manager
from muranopkgcheck import pkg_loader
class DictLoader(pkg_loader.BaseLoader):
@classmethod
def _try_load(cls, pkg):
if consts.MANIFEST_PATH in pkg:
return cls(pkg)
return None
def __init__(self, pkg):
super(DictLoader, self).__init__('')
self.pkg = pkg
def open_file(self, path, mode='r'):
if self.pkg[path]['format'] == 'raw':
sio = io.BytesIO(self.pkg[path]['content'].encode())
setattr(sio, 'name', path)
elif self.pkg[path]['format'] == 'yaml':
content = yaml.safe_dump(self.pkg[path]['content'])
sio = io.BytesIO(content.encode())
setattr(sio, 'name', path)
else:
raise ValueError('Unknown type of content')
return sio
def list_files(self, subdir=None):
files = self.pkg.keys()
if subdir is None:
return files
subdir_len = len(subdir)
return [file_[subdir_len:].lstrip('/') for file_ in files
if file_.startswith(subdir)]
def exists(self, name):
return name in self.pkg
class DictFormatter(manager.Formatter):
def format(self, error):
return sorted([{'code': e.code, 'msg': e.message} for e in error],
key=lambda item: item['code'])
def load_cases():
cases_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'cases')
cases_files = [os.path.join(cases_path, f)for f in os.listdir(cases_path)
if os.path.isfile(os.path.join(cases_path, f))]
cases = []
for cases_file in cases_files:
with open(cases_file) as f:
cases.extend(list(yaml.load_all(f)))
return cases
cases = load_cases()
class TestCase(testscenarios.WithScenarios, oslotest.base.BaseTestCase):
"""Test case base class for all unit tests."""
scenarios = cases
def test_foo(self):
m = manager.Manager(self.pkg, loader=DictLoader)
errors = m.validate()
fmt = DictFormatter()
self.assertEqual(self.expected, fmt.format(errors))
|
StarcoderdataPython
|
1646662
|
<filename>test/programytest/extensions/geocode/test_geocode.py
import unittest
import os
import json
from programy.extensions.geocode.geocode import GeoCodeExtension
from programy.utils.geo.google import GoogleMaps
from programy.context import ClientContext
from programytest.aiml_tests.client import TestClient
class MockGoogleMaps(GoogleMaps):
def __init__(self, data_file_name):
self._data_file_name = data_file_name
def _get_response_as_json(self, url):
with open(self._data_file_name, "r") as data_file:
return json.load(data_file)
class MockGeoCodeExtension(GeoCodeExtension):
def __init__(self, geo_locator):
self._geo_locator = geo_locator
def get_geo_locator(self):
return self._geo_locator
class GeoCodeExtensionTests(unittest.TestCase):
def setUp(self):
client = TestClient()
self.context = client.create_client_context("testid")
def test_geocode_postcode1(self):
filename = os.path.dirname(__file__) + os.sep + "google_latlong.json"
self.assertTrue(os.path.isfile(filename))
geo_locator = MockGoogleMaps(filename)
self.assertIsNotNone(geo_locator)
geocode = MockGeoCodeExtension(geo_locator)
self.assertIsNotNone(geocode)
result = geocode.execute(self.context, "POSTCODE1 KY39UR")
self.assertIsNotNone(result)
self.assertEquals("LATITUDE DEC 56 FRAC 0720397 LONGITUDE DEC -3 FRAC 1752001", result)
def test_geocode_postcode2(self):
filename = os.path.dirname(__file__) + os.sep + "google_latlong.json"
self.assertTrue(os.path.isfile(filename))
geo_locator = MockGoogleMaps(filename)
self.assertIsNotNone(geo_locator)
geocode = MockGeoCodeExtension(geo_locator)
self.assertIsNotNone(geocode)
result = geocode.execute(self.context, "POSTCODE2 KY3 9UR")
self.assertIsNotNone(result)
self.assertEquals("LATITUDE DEC 56 FRAC 0720397 LONGITUDE DEC -3 FRAC 1752001", result)
def test_geocode_location(self):
filename = os.path.dirname(__file__) + os.sep + "google_latlong.json"
self.assertTrue(os.path.isfile(filename))
geo_locator = MockGoogleMaps(filename)
self.assertIsNotNone(geo_locator)
geocode = MockGeoCodeExtension(geo_locator)
self.assertIsNotNone(geocode)
result = geocode.execute(self.context, "LOCATION KINGHORN")
self.assertIsNotNone(result)
self.assertEquals("LATITUDE DEC 56 FRAC 0720397 LONGITUDE DEC -3 FRAC 1752001", result)
|
StarcoderdataPython
|
1687428
|
<gh_stars>1-10
class Decoder(object):
"""
A class used to represent a Decoder
"""
@staticmethod
def validate(text):
"""Validate string format for this cipher.
:param text: The cipher-text
:type text: str
:returns: Either the text is in the cipher format or not
:rtype: bool
:raise NotImplementedError: If the validate function not set in the decoder
"""
raise NotImplementedError
@staticmethod
def decode(text):
"""Decode the text by the cipher
If there are multiple ways to decode the text, return all of them
:param text: The cipher-text
:type text: str
:returns: List of the plain-texts (or plain-text) after decode
:rtype: list
:raise NotImplementedError: If the decode function not set in the decoder
"""
raise NotImplementedError
@classmethod
def safe_decode(cls, text):
"""Validate the format of the text and decode it
First check if the text is in the format of the cipher, if so decode it.
If the text is not in the format, return empty list
:param text: The cipher-text
:type text: str
:returns: List of the plain-texts (or plain-text) after decode
:rtype: list
"""
if cls.validate(text): # Check the text is in the cipher format
return cls.decode(text) # Decode it
else:
return [] # The text it's not in the format
|
StarcoderdataPython
|
75229
|
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Serializers for DataFile Scraper"""
from pathlib import Path
from typing import Optional, Tuple
from .utils import get_metadata_from_db, ManifestRow
def metadata_from_manifest(manifest_row: ManifestRow, manifest_dir: Path):
data = manifest_row._asdict()
data["file_dir"] = str(manifest_dir)
return data
def metadata_from_db(
metadata_db_url: str, species: str, ens_release: int
) -> Tuple[Optional[dict], Optional[str]]:
ens_metadatas, err = get_metadata_from_db(metadata_db_url, species, ens_release)
if err:
return None, err
data = ens_metadatas[0]._asdict()
data["release_date"] = data["release_date"].isoformat()
return data, None
|
StarcoderdataPython
|
4830235
|
<filename>software/multifluids_icferst/tools/genpvtu.py
#!/usr/bin/env python
import os
import sys
import tempfile
import vtk
import fluidity.diagnostics.debug as debug
import fluidity.diagnostics.filehandling as filehandling
import fluidity.diagnostics.fluiditytools as fluiditytools
import fluidity.diagnostics.vtutools as vtktools
if not len(sys.argv) == 2:
print "Usage: genpvtu basename"
sys.exit(1)
basename = sys.argv[1]
debug.dprint("vtu basename: " + basename)
nPieces = fluiditytools.FindMaxVtuId(basename) + 1
debug.dprint("Number of pieces: " + str(nPieces))
# Write to a temporary directory so that the first piece isn't overwritten
tempDir = tempfile.mkdtemp()
# Create the parallel writer
writer = vtk.vtkXMLPUnstructuredGridWriter()
writer.SetNumberOfPieces(nPieces)
writer.WriteSummaryFileOn()
pvtuName = basename + ".pvtu"
writer.SetFileName(os.path.join(tempDir, pvtuName))
# Load in the first piece, so that the parallel writer has something to do (and
# knows which fields we have)
pieceName = fluiditytools.VtuFilenames(basename, 0)[0]
pieceVtu = vtktools.vtu(pieceName)
if vtk.vtkVersion.GetVTKMajorVersion() <= 5:
writer.SetInput(0, pieceVtu.ugrid)
else:
writer.SetInputData(0, pieceVtu.ugrid)
# Write
writer.Write()
# Move the output back and clean up
filehandling.Move(os.path.join(tempDir, pvtuName), pvtuName)
filehandling.Rmdir(tempDir, force = True)
|
StarcoderdataPython
|
3380258
|
<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from .compat import isstr
from .sql import sql_escape
from .log import configure_logging, default_logger
from .argparser import configure_parser, default_parser
from .csv import flatten, unflatten
__all__ = [
'isstr',
'sql_escape',
'configure_logging', 'default_logger',
'configure_parser', 'default_parser',
'flatten', 'unflatten'
]
|
StarcoderdataPython
|
162933
|
<reponame>AtjonTV/Python-1.4
# Temporary file name allocation
#
# XXX This tries to be not UNIX specific, but I don't know beans about
# how to choose a temp directory or filename on MS-DOS or other
# systems so it may have to be changed...
import os
# Parameters that the caller may set to override the defaults
tempdir = None
template = None
# Function to calculate the directory to use
def gettempdir():
global tempdir
if tempdir is not None:
return tempdir
attempdirs = ['/usr/tmp', '/tmp', os.getcwd(), os.curdir]
if os.name == 'nt':
attempdirs.insert(0, 'C:\\TEMP')
attempdirs.insert(0, '\\TEMP')
if os.environ.has_key('TMPDIR'):
attempdirs.insert(0, os.environ['TMPDIR'])
testfile = gettempprefix() + 'test'
for dir in attempdirs:
try:
filename = os.path.join(dir, testfile)
fp = open(filename, 'w')
fp.write('blat')
fp.close()
os.unlink(filename)
tempdir = dir
break
except IOError:
pass
if tempdir is None:
msg = "Can't find a usable temporary directory amongst " + `attempdirs`
raise IOError, msg
return tempdir
# Function to calculate a prefix of the filename to use
def gettempprefix():
global template
if template == None:
if os.name == 'posix':
template = '@' + `os.getpid()` + '.'
else:
template = 'tmp' # XXX might choose a better one
return template
# Counter for generating unique names
counter = 0
# User-callable function to return a unique temporary file name
def mktemp():
global counter
dir = gettempdir()
pre = gettempprefix()
while 1:
counter = counter + 1
file = os.path.join(dir, pre + `counter`)
if not os.path.exists(file):
return file
|
StarcoderdataPython
|
1634939
|
#!/usr/bin/env python3
"""concatenates two matrices"""
def cat_matrices2D(mat1, mat2, axis=0):
"""cat_matrices2D: concatenates two matrices along a specific axis
Args:
mat1: First matrix to concatenate
mat2: Second matrix to concatenate
axis (optional): Defaults to 0.
"""
result = []
# Check axis 1
if axis == 1 and len(mat1) == len(mat2):
for count, value in enumerate(mat1):
result.append(value + mat2[count])
return result
# Check axis 0
elif axis == 0 and len(mat1[0]) == len(mat2[0]):
for row in mat1:
result.append(list(row))
for row in mat2:
result.append(list(row))
return result
return None
|
StarcoderdataPython
|
3393985
|
from pathlib import Path
ROOT_DIR = Path(__file__).parent.parent.resolve()
DATA_DIR = ROOT_DIR.joinpath('data')
RESULTS_DIR = ROOT_DIR.joinpath('results')
PLOTS_DIR = ROOT_DIR.joinpath('plots')
UMAPS_DIR = ROOT_DIR.joinpath('umaps')
SCORES_PATH = RESULTS_DIR.joinpath('scores.csv')
TIMES_PATH = RESULTS_DIR.joinpath('times.csv')
def create_required_folders():
for _dir in (DATA_DIR, RESULTS_DIR, PLOTS_DIR, UMAPS_DIR):
_dir.mkdir(exist_ok=True)
return
|
StarcoderdataPython
|
3244750
|
import glob
import os
from PIL import Image
from minio import Minio
from minio.error import ResponseError
import logging
import confluent_kafka
import pymongo
import json
from bson.objectid import ObjectId
minio_endpoint = os.environ["MINIO_ENDPOINT"]
minio_access_key = os.environ["MINIO_ACCESSKEY"]
minio_secret_key = os.environ["MINIO_SECRETKEY"]
minio_client = Minio(minio_endpoint,
minio_access_key,
minio_secret_key,
secure=False)
mongodb_user = os.environ["MONGODB_ROOT_USER"]
mongodb_password = os.environ["MONGODB_ROOT_PASSWORD"]
mongodb_server = os.environ["MONGODB_SERVER"]
mongodb_db = os.environ["MONGODB_DATABASE"]
metadata_collection = os.environ["MONGODB_METADATA_TABLE"]
kafka_bootstrap_servers = os.environ["KAFKA_BOOTSTRAP_SERVERS"]
thumbnails_bucket_event_topic = os.environ["THUMBNAILS_EVENT_TOPIC"]
mongo_client = pymongo.MongoClient(
"mongodb://{}:{}@{}".format(mongodb_user, mongodb_password, mongodb_server))
mongodb_db = mongo_client[mongodb_db]
metadata_collection = mongodb_db[metadata_collection]
# Create logger for consumer (logs will be emitted when poll() is called)
logger = logging.getLogger('consumer')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)-15s %(levelname)-8s %(message)s'))
logger.addHandler(handler)
def start_consumer():
conf = {
'bootstrap.servers': kafka_bootstrap_servers,
'group.id': 'thumbnailer',
'auto.offset.reset': 'earliest',
'enable.partition.eof': 'false'
}
consumer = confluent_kafka.Consumer(conf, logger=logger)
consumer.subscribe([thumbnails_bucket_event_topic])
try:
while True:
msg = consumer.poll()
if msg is None:
continue
if msg.error():
raise confluent_kafka.KafkaException("Consumer error: {}".format(msg.error()))
else:
object_id = json.loads(msg.value().decode('utf-8'))["ObjectId"]
logger.info("ObjectId obtained: {}, extracting object bucket and key info ...".format(object_id))
query_results = metadata_collection.find_one({"_id": ObjectId(object_id)},
{"s3.bucket.name": 1, "s3.object.key": 1, "_id": 0})
local_image = download(query_results["s3"]["bucket"]["name"], query_results["s3"]["object"]["key"])
thumbnail = generate_thumbnail(local_image)
upload(thumbnail, "thumbnails", thumbnail)
metadata_collection.update_one({"_id": ObjectId(object_id)},
{"$set": {"thumbnails": "{}/{}".format("thumbnails", thumbnail)}})
finally:
consumer.close()
# Get a full object
def download(bucket, object_key):
logger.info("Downloading image from {}/{}".format(bucket, object_key))
try:
data = minio_client.get_object(bucket, object_key)
with open(os.path.split(object_key)[1], 'wb') as file_data:
for d in data.stream(32 * 1024):
file_data.write(d)
return os.path.split(object_key)[1]
except ResponseError as err:
print(err)
def upload(local_file, bucket, object_key):
logger.info("Uploading thumbnails for {} to {}/{}".format(local_file, bucket, object_key))
try:
with open(local_file, 'rb') as file_data:
file_stat = os.stat(local_file)
minio_client.put_object(bucket, object_key, file_data,
file_stat.st_size, content_type='image/jpeg')
except ResponseError as err:
logger.error(err)
finally:
cleanup()
def generate_thumbnail(image_file):
logger.info("Generating thumbnails for {}".format(image_file))
size = 128, 128
file_name, ext = os.path.splitext(image_file)
im = Image.open(image_file)
im.thumbnail(size)
im.save(file_name + ".thumbnail", "JPEG")
return file_name + ".thumbnail"
def cleanup():
for file in glob.glob("*.thumbnail"):
os.remove(file)
for file in glob.glob("*.jpg"):
os.remove(file)
def main():
"""Launcher."""
# print("Downloading...")
# local_image = download("images", "100501.jpg")
# print("Generating thumbnail")
# thumbnail = generate_thumbnail(local_image)
# print("Uploading...")
# upload(thumbnail, "thumbnails", thumbnail)
start_consumer()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3397897
|
<filename>pressure_adapt.py<gh_stars>0
import os
import pandas as pd
import torch
import torch.nn.functional as F
import numpy as np
import time
from featurization.data_utils import load_data_from_df, construct_loader_gf_pressurever, construct_dataset_gf_pressurever, data_prefetcher
from models.transformer import make_model
from argparser import parse_finetune_args
import pickle
from utils import *
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(model, epoch, train_loader, optimizer, scheduler, adapter_dim):
model.train()
loss = 0
loss_all = 0
prefetcher = data_prefetcher(train_loader)
batch_idx = 0
data = prefetcher.next()
while data is not None:
lr = scheduler.optimizer.param_groups[0]['lr']
adjacency_matrix, node_features, distance_matrix, global_features, y = data
batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0
optimizer.zero_grad()
output = model(node_features, batch_mask, adjacency_matrix, distance_matrix, global_features)
loss = F.mse_loss(output.reshape(-1), y.reshape(-1))
loss.backward()
step_loss = loss.cpu().detach().numpy()
loss_all += step_loss
optimizer.step()
scheduler.step()
print(f'After Step {batch_idx} of Epoch {epoch}, Loss = {step_loss}, Lr = {lr}')
batch_idx += 1
data = prefetcher.next()
return loss_all / len(train_loader.dataset)
def test(model, data_loader, mean, std, adapter_dim):
model.eval()
error = 0
prefetcher = data_prefetcher(data_loader)
batch_idx = 0
data = prefetcher.next()
futures, ys = None, None
while data is not None:
adjacency_matrix, node_features, distance_matrix, global_features, y = data
batch_mask = torch.sum(torch.abs(node_features), dim=-1) != 0
output = model(node_features, batch_mask, adjacency_matrix, distance_matrix, global_features)
output = output.reshape(y.shape).cpu().detach().numpy()
y = y.cpu().detach().numpy()
ys = y if ys is None else np.concatenate([ys,y], axis=0)
futures = output if futures is None else np.concatenate([futures,output], axis=0)
batch_idx += 1
data = prefetcher.next()
futures = np.array(futures) * std + mean
ys = np.array(ys) * std + mean
mae = np.mean(np.abs(futures - ys), axis=0)
rmse = np.sqrt(np.mean((futures - ys)**2, axis=0))
# pcc = np.corrcoef(futures,ys)[0][1]
pcc = np.array([np.corrcoef(futures[:,i],ys[:,i])[0][1] for i in range(adapter_dim)])
smape = 2 * np.mean(np.abs(futures-ys)/(np.abs(futures)+np.abs(ys)), axis=0)
return {'MAE':mae, 'RMSE':rmse, 'PCC':pcc, 'sMAPE':smape}
def get_RdecayFactor(warmup_step):
def warmupRdecayFactor(step):
if step < warmup_step:
return step / warmup_step
else:
return (warmup_step / step) ** 0.5
return warmupRdecayFactor
if __name__ == '__main__':
model_params = parse_finetune_args()
batch_size = model_params['batch_size']
device_ids = [0,1,2,3]
logger = get_logger(model_params['save_dir'] + f"/{model_params['gas_type']}_{model_params['pressure']}")
X, f, y, p = load_data_from_df(model_params['data_dir'],gas_type=model_params['gas_type'], pressure='all',add_dummy_node = True,use_global_features = True)
tar_idx = np.where(p==model_params['pressure'])[0][0]
print(f'Loaded {len(X)} data.')
logger.info(f'Loaded {len(X)} data.')
y = np.array(y)
mean = y[...,tar_idx].mean()
std = y[...,tar_idx].std()
y = (y - mean) / std
f = np.array(f)
fmean = f.mean(axis=0)
fstd = f.std(axis=0)
f = (f - fmean) / fstd
with open(os.path.join(model_params['save_dir'] + f"/{model_params['gas_type']}_{model_params['pressure']}",f'offset.p'),'wb') as file:
pickle.dump((model_params['pressure'], mean, std, fmean, fstd), file)
printParams(model_params,logger)
fold_num = model_params['fold']
epoch_num = model_params['epoch']
test_errors = []
idx_list = np.arange(len(X))
set_seed(model_params['seed'])
np.random.shuffle(idx_list)
X = applyIndexOnList(X,idx_list)
f = f[idx_list]
y = y[idx_list]
test_errors = []
for fold_idx in range(1, fold_num + 1):
set_seed(model_params['seed'])
ori_state = CheckpointHandler(model_params['ori_dir']+f'/Fold-{fold_idx}').checkpoint_avg()
ori_params = ori_state['params']
ori_params['adapter_finetune'] = True
model = make_model(**ori_params)
model.set_adapter_dim(model_params['adapter_dim'])
model = torch.nn.DataParallel(model, device_ids=device_ids)
model.load_state_dict(ori_state['model'],strict=False)
model = model.to(device)
lr = model_params['lr']
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,lr_lambda = get_RdecayFactor(ori_params['warmup_step']))
best_val_error = 0
best_val_error_s = 0
test_error = 0
best_epoch = -1
train_idx, val_idx, test_idx = splitdata(len(X),fold_num, fold_idx)
train_set = construct_dataset_gf_pressurever(applyIndexOnList(X,train_idx), f[train_idx], y[train_idx],p, is_train=True, mask_point=model_params['pressure'])
val_set = construct_dataset_gf_pressurever(applyIndexOnList(X,val_idx), f[val_idx], y[val_idx],p, is_train=True, mask_point=model_params['pressure'])
test_set = construct_dataset_gf_pressurever(applyIndexOnList(X,test_idx), f[test_idx], y[test_idx],p, is_train=True, mask_point=model_params['pressure'])
ckpt_handler = CheckpointHandler(model_params['save_dir'] + f"/{model_params['gas_type']}_{model_params['pressure']}/Fold-{fold_idx}")
for epoch in range(1,epoch_num + 1):
train_adapter_dim = model_params['adapter_dim']
train_loader = construct_loader_gf_pressurever(train_set,batch_size)
loss = train(model, epoch, train_loader,optimizer,scheduler, train_adapter_dim)
val_loader = construct_loader_gf_pressurever(val_set, batch_size, shuffle=False)
val_error = test(model, val_loader, mean, std, train_adapter_dim)['MAE']
val_error_ = np.mean(val_error)
ckpt_handler.save_model(model,ori_params,epoch,val_error_)
if best_val_error == 0 or val_error_ <= best_val_error:
print("Enter test step.\n")
best_epoch = epoch
best_val_error = val_error_
test_loader = construct_loader_gf_pressurever(test_set, batch_size, shuffle=False)
test_error = test(model, test_loader, mean, std, train_adapter_dim)
for idx, pres in enumerate(p):
for _ in test_error.keys():
print('Fold: {:02d}, Epoch: {:03d}, Pressure: {}, Test {}: {:.7f}'.format(fold_idx, epoch, pres, _, test_error[_][idx]))
logger.info('Fold: {:02d}, Epoch: {:03d}, Pressure: {}, Test {}: {:.7f}'.format(fold_idx, epoch, pres, _, test_error[_][idx]))
lr = scheduler.optimizer.param_groups[0]['lr']
p_str = 'Fold: {:02d}, Epoch: {:03d}, Val MAE: {:.7f}, Best Val MAE: {:.7f}'.format(fold_idx, epoch, val_error_, best_val_error)
print(p_str)
logger.info(p_str)
for idx, pres in enumerate(p):
for _ in test_error.keys():
print('Fold: {:02d}, Epoch: {:03d}, Pressure: {}, Test {}: {:.7f}'.format(fold_idx, epoch, pres, _, test_error[_][idx]))
logger.info('Fold: {:02d}, Epoch: {:03d}, Pressure: {}, Test {}: {:.7f}'.format(fold_idx, epoch, pres, _, test_error[_][idx]))
test_errors.append(test_error)
for idx, pres in enumerate(p):
for _ in test_errors[0].keys():
mt_list = [__[_][idx] for __ in test_errors]
p_str = 'Pressure {}, Test {} of {:02d}-Folds: {:.7f}({:.7f})'.format(pres, _, fold_num, np.mean(mt_list), np.std(mt_list))
print(p_str)
logger.info(p_str)
|
StarcoderdataPython
|
1629322
|
<gh_stars>0
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC, expected_conditions
from selenium.webdriver.support.wait import WebDriverWait as wait
from fixtures.params import DEFAULT_PASSWORD
def login(driver, username = 'admin', password = <PASSWORD>):
driver.find_element_by_id('txtUsername').send_keys(username)
driver.find_element_by_id('txtPassword').send_keys(password)
driver.find_element_by_id('btnLogin').click()
def welcome_message_method(driver):
condition = wait(driver, 2).until(EC.presence_of_element_located((By.ID, 'welcome')))
return condition.text
def error_message_method(driver):
return driver.find_element_by_id('spanMessage').text
def logout(driver):
driver.find_element_by_id('welcome').click()
wait(driver, 2).until(expected_conditions.visibility_of_element_located((By.LINK_TEXT, 'Logout'))).click()
|
StarcoderdataPython
|
5793
|
class ICFSError(IOError):
"""Error while making any filesystem API requests."""
|
StarcoderdataPython
|
3323702
|
<filename>websclaping1.py
# import requests
# from bs4 import BeautifulSoup
#
# url='https://news.yahoo.co.jp/flash?p=1'
# res=requests.get(url)
# soup=BeautifulSoup(res.content)
# parent=soup.find('div','newsFeed')
# targets=parent.findAll('div','newsFeed_item_title')
#
# for target in targets:
# print(target.text)
import requests
from bs4 import BeautifulSoup
url='https://news.yahoo.co.jp/flash?p=1'
res=requests.get(url)
soup=BeautifulSoup(res.content)
parents=soup.findAll('div','flashSummary')
# print(parents)
for parent in parents:
targets=parent.findAll('p','flashSummary_title')
for target in targets:
print(target.text)
# targets=parent.findAll('p','flashSummary_title')
|
StarcoderdataPython
|
145911
|
import datetime
from unittest.mock import MagicMock
import pytest
from bloop.models import BaseModel, Column
from bloop.stream.coordinator import Coordinator
from bloop.stream.stream import Stream
from bloop.types import Integer, String
from bloop.util import ordered
from . import build_shards
@pytest.fixture
def coordinator():
# MagicMock because we're testing __next__
return MagicMock(spec=Coordinator)
@pytest.fixture
def stream(coordinator, engine):
stream = Stream(model=Email, engine=engine)
stream.coordinator = coordinator
return stream
class Email(BaseModel):
class Meta:
stream = {
"include": {"new", "old"},
"arn": "stream-arn"
}
id = Column(Integer, hash_key=True)
data = Column(String)
def test_repr(stream):
assert repr(stream) == "<Stream[Email]>"
def test_iter(stream):
"""stream is both an Iterable and an Iterator"""
assert iter(stream) is stream
def test_token(engine):
engine.bind(Email)
shards = build_shards(3, {0: [1, 2]}, stream_arn=Email.Meta.stream["arn"])
shards[1].iterator_type = "latest"
shards[2].iterator_type = "at_sequence"
shards[2].sequence_number = "sequence-number"
stream = Stream(model=Email, engine=engine)
stream.coordinator.roots.append(shards[0])
stream.coordinator.active.extend(shards[1:])
assert ordered(stream.token) == ordered({
"stream_arn": "stream-arn",
"active": ["shard-id-1", "shard-id-2"],
"shards": [
{"shard_id": "shard-id-0"},
{"shard_id": "shard-id-1", "parent": "shard-id-0", "iterator_type": "latest"},
{"shard_id": "shard-id-2", "parent": "shard-id-0",
"iterator_type": "at_sequence", "sequence_number": "sequence-number"},
]
})
def test_heartbeat(stream, coordinator):
stream.heartbeat()
coordinator.heartbeat.assert_called_once_with()
def test_move_to(stream, coordinator):
stream.move_to("latest")
coordinator.move_to.assert_called_once_with("latest")
def test_next_no_record(stream, coordinator):
coordinator.__next__.return_value = None
# Explicit marker so we don't get next's default value
missing = object()
record = next(stream, missing)
assert record is None
def test_next_unpacks(stream, coordinator):
now = datetime.datetime.now(datetime.timezone.utc)
meta = {
"created_at": now,
"sequence_number": "sequence-number",
"event": {
"id": "event-id",
"type": "event-type",
"version": "event-version"
}
}
coordinator.__next__.return_value = {
# Impossible to have old and key, but for the sake of testing
# an object that's partially/fully loaded
"old": {
"id": {"N": "0"},
"data": {"S": "some-data"}
},
"key": {
# Omitted because the model only includes "new"
"id": {"N": "343"}
},
"new": None,
"meta": meta
}
record = next(stream)
assert record["old"].id == 0
assert record["old"].data == "some-data"
assert record["new"] is None
assert record["key"] is None
assert not hasattr(record["key"], "data")
|
StarcoderdataPython
|
3359039
|
<filename>cassie/misc/rewards/rnn_dyn_random_reward.py
import numpy as np
def jonah_RNN_reward(self):
qpos = np.copy(self.sim.qpos())
qvel = np.copy(self.sim.qvel())
ref_pos, ref_vel = self.get_ref_state(self.phase)
# TODO: should be variable; where do these come from?
# TODO: see magnitude of state variables to gauge contribution to reward
weight = [0.15, 0.15, 0.1, 0.05, 0.05, 0.15, 0.15, 0.1, 0.05, 0.05]
joint_error = 0
com_error = 0
orientation_error = 0
spring_error = 0
# each joint pos
for i, j in enumerate(self.pos_idx):
target = ref_pos[j]
actual = qpos[j]
joint_error += 50 * weight[i] * (target - actual) ** 2
# center of mass: x, y, z
for j in [0, 1, 2]:
target = ref_pos[j]
actual = qpos[j]
# NOTE: in Xie et al y target is 0
com_error += 10 * (target - actual) ** 2
actual_q = qpos[3:7]
target_q = ref_pos[3:7]
#target_q = [1, 0, 0, 0]
orientation_error = 5 * (1 - np.inner(actual_q, target_q) ** 2)
# left and right shin springs
for i in [15, 29]:
target = ref_pos[i] # NOTE: in Xie et al spring target is 0
actual = qpos[i]
spring_error += 1000 * (target - actual) ** 2
reward = 0.200 * np.exp(-joint_error) + \
0.450 * np.exp(-com_error) + \
0.300 * np.exp(-orientation_error) + \
0.050 * np.exp(-spring_error)
return reward
|
StarcoderdataPython
|
3313916
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright (c) 2017, taher and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class StockOUT(Document):
def on_submit(self):
frappe.errprint("in on on_submit")
# it = self.get('items')
# frappe.errprint(it)
se = frappe.new_doc("Stock Entry")
se.purpose = "Material Receipt"
for items in self.get('items'):
row = se.append("items",{})
row.t_warehouse =items.warehouse
row.item_code = items .item
row.qty= -items.quantity
row.expense_account =self.expense_account
se.insert(ignore_permissions=True)
se.submit()
|
StarcoderdataPython
|
1751722
|
from enum import Enum
class ReadCommand(Enum):
exhaustTemperature = bytes([0x04, 0x04, 0x14, 0x75])
supplyTemperature = bytes([0x04, 0x04, 0x14, 0x73])
extractTemperature = bytes([0x04, 0x04, 0x14, 0x74])
outdoorTemperature = bytes([0x04, 0x04, 0x14, 0x72])
humidity = bytes([0x01, 0x04, 0x14, 0x70])
bypass = bytes([0x00, 0x04, 0x14, 0x60])
filterPercent = bytes([0x01, 0x04, 0x14, 0x6a])
boost = bytes([0x01, 0x04, 0x15, 0x30])
supply_fan_speed = bytes([0x00, 0x04, 0x14, 0x50])
exhaust_fan_speed = bytes([0x00, 0x04, 0x14, 0x51])
fan_step = bytes([0x00, 0x04, 0x17, 0x81])
away_mode = bytes([0x01, 0x04, 0x15, 0x22])
battery_percent = bytes([0x01, 0x04, 0x03, 0x0f])
automatic_bypass = bytes([0x01, 0x04, 0x17, 0x06])
operation_mode = bytes([0x01, 0x04, 0x14, 0x12])
class UpdateCommand(Enum):
boost_activate = bytes([0x01, 0x06, 0x15, 0x30, 0x01])
boost_deactivate = bytes([0x01, 0x06, 0x15, 0x30, 0x00])
bypass_activate = bytes([0x00, 0x06, 0x14, 0x60, 0x01])
bypass_deactivate = bytes([0x00, 0x06, 0x14, 0x60, 0x00])
automatic_bypass_deactivate = bytes([0x01, 0x06, 0x17, 0x06, 0x01])
automatic_bypass_activate = bytes([0x01, 0x06, 0x17, 0x06, 0x00])
|
StarcoderdataPython
|
22514
|
from . import models
from . import serializers
from rest_framework import viewsets, permissions
class CompetitionViewSet(viewsets.ModelViewSet):
"""ViewSet for the Competition class"""
queryset = models.Competition.objects.all()
serializer_class = serializers.CompetitionSerializer
permission_classes = [permissions.IsAuthenticated]
class TrainingViewSet(viewsets.ModelViewSet):
"""ViewSet for the Training class"""
queryset = models.Training.objects.all()
serializer_class = serializers.TrainingSerializer
permission_classes = [permissions.IsAuthenticated]
class CompetitorViewSet(viewsets.ModelViewSet):
"""ViewSet for the Competitor class"""
queryset = models.Competitor.objects.all()
serializer_class = serializers.CompetitorSerializer
permission_classes = [permissions.IsAuthenticated]
class TrainingpresenceViewSet(viewsets.ModelViewSet):
"""ViewSet for the Trainingpresence class"""
queryset = models.Trainingpresence.objects.all()
serializer_class = serializers.TrainingpresenceSerializer
permission_classes = [permissions.IsAuthenticated]
class DriverViewSet(viewsets.ModelViewSet):
"""ViewSet for the Driver class"""
queryset = models.Driver.objects.all()
serializer_class = serializers.DriverSerializer
permission_classes = [permissions.IsAuthenticated]
class EventViewSet(viewsets.ModelViewSet):
"""ViewSet for the Event class"""
queryset = models.Event.objects.all()
serializer_class = serializers.EventSerializer
permission_classes = [permissions.IsAuthenticated]
class ResultViewSet(viewsets.ModelViewSet):
"""ViewSet for the Result class"""
queryset = models.Result.objects.all()
serializer_class = serializers.ResultSerializer
permission_classes = [permissions.IsAuthenticated]
class LocationViewSet(viewsets.ModelViewSet):
"""ViewSet for the Location class"""
queryset = models.Location.objects.all()
serializer_class = serializers.LocationSerializer
permission_classes = [permissions.IsAuthenticated]
|
StarcoderdataPython
|
112904
|
<filename>pyradmon/args.py
#!/usr/bin/env python
# PyRadmon - Python Radiance Monitoring Tool
# Copyright 2014 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Argument Parsing Library -
# library for parsing command line arguments
#
import argparse
import textwrap
import os
import sys
from _version import __version__
from core import *
import config
import config_printer
from config import SPECIAL_FIELDS
import log
import logging
try:
from collections import OrderedDict
except:
try:
from ordereddict import OrderedDict
except:
print "ERROR: OrderedDict not found! It is required to run this script."
sys.exit(1)
def add_args(parser, inherit, opts):
for opt in opts:
if inherit:
opts[opt]['help'] = argparse.SUPPRESS
opts[opt]['default'] = argparse.SUPPRESS
opt_opt = dict(opts[opt])
parser.add_argument(opt, **opt_opt)
def add_list_args(parser, inherit = False):
opts = OrderedDict()
opts['--data-single-date'] = \
{
'action' : 'store',
'metavar' : 'DATE',
'dest' : 'data_single_date',
'help' : 'Use single date. Format should be "YYYY-MM-DD HHz". Negates the options below specifying dates and times.',
}
opts['--data-path-format'] = \
{
'action' : 'store',
'metavar' : 'PATH_FORMAT',
'dest' : 'data_path_format',
'help' : 'Specify the path format for data.',
}
opts['--data-experiment-id'] = \
{
'action' : 'store',
'metavar' : 'EXPERIMENT_ID',
'dest' : 'data_experiment_id',
'help' : 'Specify the experiment ID for data.',
}
opts['--data-start-date'] = \
{
'action' : 'store',
'metavar' : 'DATE',
'dest' : 'data_start_date',
'help' : 'Specify the start date for data. Format should be "YYYY-MM-DD HHz".',
}
opts['--data-end-date'] = \
{
'action' : 'store',
'metavar' : 'DATE',
'dest' : 'data_end_date',
'help' : 'Specify the end date for data. Format should be "YYYY-MM-DD HHz".',
}
opts['--data-instrument-sat'] = \
{
'action' : 'store',
'metavar' : 'INSTRUMENT_SAT',
'dest' : 'data_instrument_sat',
'help' : 'Specify the instrument and satellite ID for data.',
}
opts['--data-step'] = \
{
'action' : 'store',
'metavar' : 'STEP_TYPE',
'dest' : 'data_step',
'help' : 'Specify the step/type for the data. "anl" and "ges" are allowed. If you wish to specify more than one, use a pipe to seperate them, e.g. "anl|ges".',
}
opts['--data-time-delta'] = \
{
'action' : 'store',
'metavar' : 'TIME_DELTA',
'dest' : 'data_time_delta',
'help' : """Specify the time interval for data. The time format is
expressed using the sleep command's format, "#U", where # is
a number and U is a letter representing a unit of time.""",
}
add_args(parser, inherit, opts)
def add_dump_args(parser, inherit = False):
opts = OrderedDict()
opts['--dump-columns'] = \
{
'action' : 'store',
'metavar' : 'COLUMNS',
'dest' : 'dump_columns',
'help' : 'Specify the columns to dump/use, separated by commas.',
}
opts['--dump-all-channels'] = \
{
'action' : 'store_true',
'dest' : 'dump_all_channels',
'help' : 'Specify to dump all channels. Negates the option below specifying channels to use.',
}
opts['--dump-channels'] = \
{
'action' : 'store',
'metavar' : 'CHANNELS',
'dest' : 'dump_channels',
'help' : 'Specify the channels to dump/use, separated by commas. Ranges are also acceptable.',
}
opts['--dump-assim-only'] = \
{
'action' : 'store_true',
'dest' : 'dump_assim_only',
'help' : 'Specify to use only assimilated data (iuse = 1).',
}
opts['--dump-suppress-warnings'] = \
{
'action' : 'store_true',
'dest' : 'dump_suppress_warnings',
'help' : 'Specify whether to suppress data warnings or not. This will hide important warnings about data inconsistencies, so only enable if you are 100%% sure that your data is valid!',
}
add_args(parser, inherit, opts)
def add_plot_args(parser, inherit = False):
opts = OrderedDict()
opts['--plot-define-plots'] = \
{
'action' : 'append',
'metavar' : 'PLOTS',
'dest' : 'plot_define_plots',
'help' : 'Define plots. Uses the value list system, specified by "plot1,plot2,plot3,...".',
}
opts['--plot-define-subplots'] = \
{
'action' : 'append',
'metavar' : 'SUBPLOTS',
'dest' : 'plot_define_subplots',
'help' : 'Define subplots. Uses the key-value pair system, specified by "plot1:subplot1,subplotid2,...;".',
}
opts['--plot-define-axes'] = \
{
'action' : 'append',
'metavar' : 'AXES',
'dest' : 'plot_define_axes',
'help' : 'Define axes for the subplot. Uses the key-value pair system, specified by "plot1|subplot1|y:ticks=5,label="test";...".',
}
opts['--plot-define-data'] = \
{
'action' : 'append',
'metavar' : 'DATA',
'dest' : 'plot_define_data',
'help' : 'Define data to be plotted in the subplot. Uses the key-value pair system, specified by "plot1|subplot1|x:data_field_1;plot1|subplot1|y:...".',
}
opts['--plot-define-title'] = \
{
'action' : 'append',
'metavar' : 'TITLE',
'dest' : 'plot_define_title',
'help' : 'Define the title for the plot, and optionally, subplot and legend. Uses the key-value pair system, specified by "plot1:title;plot1|subplot1:title;plot1|subplot1|legend:title;...".',
}
opts['--plot-define-output'] = \
{
'action' : 'append',
'metavar' : 'OUTPUT_FILE',
'dest' : 'plot_define_output',
'help' : 'Define the output file for the plot. Uses the key-value pair system, specified by "plot1:output_file.png;...".',
}
opts['--plot-define-settings'] = \
{
'action' : 'append',
'metavar' : 'SETTINGS',
'dest' : 'plot_define_settings',
'help' : 'Define the settings for the plot. Uses the key-value pair system, specified by "plot1:target_size=595x700,dpi=50;...".',
}
opts['--plot-define-custom-vars'] = \
{
'action' : 'append',
'metavar' : 'CUSTOM_VARS',
'dest' : 'plot_define_custom_vars',
'help' : 'Define the custom variables for use in the output file and title. Uses the key-value pair system, specified by "myvar:123,myvar2:abc,...".',
}
opts['--plot-make-dirs'] = \
{
'action' : 'store_true',
'dest' : 'plot_make_dirs',
'help' : 'Make directories if the specified output path does not exist.',
}
add_args(parser, inherit, opts)
def add_config_args(parser, inherit = False):
opts = OrderedDict()
opts['--config-display'] = \
{
'action' : 'store_true',
'dest' : 'config_display',
'help' : 'Displays the configuration.',
}
opts['--config-load'] = \
{
'action' : 'store',
'metavar' : 'FILE',
'dest' : 'config_load',
'help' : 'Load an existing configuration file. Note that this will override the main --config-file argument.',
}
opts['--config-save'] = \
{
'action' : 'store',
'metavar' : 'FILE',
'dest' : 'config_save',
'help' : 'Save the currently loaded configuration to a file. Note that saved configuration may not be placed in order.',
}
add_args(parser, inherit, opts)
def make_argparser():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, \
epilog = textwrap.dedent("""\
Logging levels, from least to most importance:
DEBUG, INFO, WARNING, ERROR, CRITICAL
Setting a logging level will show messages that meet and
exceed that level. For instance, setting INFO will show
INFO, WARNING, ERROR, and CRITICAL messages.
Priority modes:
DUMMYMP_GENEROUS - Generous mode. Very conservative about using any
CPU, and ensures that no one else is disrupted.
Note that this is VERY GENEROUS - if all CPUs are
taken, DummyMP will wait until there are
available CPUs! (All other modes will run a
single process, regardless of CPU usage.) This is
the slowest mode!
DUMMYMP_NORMAL - Normal mode. Careful not to take up too much
resources on the CPU, but it will try to get
things done. This is faster than GENEROUS, but it
isn't the fastest. This mode is the default and
is recommended for most conditions.
DUMMYMP_AGGRESSIVE - Aggressive mode. This mode considers other users,
but it may spawn processes anyway depending on
how other processes behave. This is faster than
NORMAL, and is recommended for semi-important
conditions.
DUMMYMP_EXTREME - Extreme mode. This mode somewhat considers other
users, but unless the other processes are using
a significant portion of the CPU, it will spawn
spawn processes anyway. This is faster than
AGGRESSIVE, and is recommended for important
conditions.
DUMMYMP_NUCLEAR - Nuclear mode. This mode does NOT consider other
users, and just runs as many processes as it can
allow (total number of cores). This is much
faster than EXTREME, and is recommended for
really important conditions. Note that this may
earn you very angry co-workers knocking down your
door with pitchforks, so use sparingly!
Quick Start Examples:
List data available:
%(prog)s --config-file=config.yaml list
Dump data:
%(prog)s --config-file=config.yaml dump
Make plots:
%(prog)s --config-file=config.yaml plot
Print configuration:
%(prog)s --config-file=config.yaml config
Make plots and log output:
%(prog)s --config-file=config.yaml --logging-output="stdout,file" \\
--logging-file="mylog.txt" plot
"""),
version = 'PyRadmon v' + __version__) # Old: %(prog)s evals to pyradmon.py
subparsers = parser.add_subparsers(metavar="verb", dest = "verb", help='Description')
main_opts = OrderedDict()
main_opts['--config-file'] = \
{
'action' : 'store',
'dest' : 'config_file',
'metavar' : 'FILE',
'help' : 'Load a configuration file for PyRadmon to use. Synonymous to --config-load, but for all verbs and options.',
}
main_opts['--config-unset'] = \
{
'action' : 'append',
'dest' : 'config_unset',
'metavar' : 'VARS',
'help' : 'Unset (or remove) variables after all configuration settings (config file and command line) have been loaded. Format for VARS is "VARIABLE1;VARIABLE2;...". For core PyRadmon configuration, the variable syntax is "config.key". For plot configuration, the variable syntax follows the plot argument hierarchy format at any level, e.g. "plot|subplot|attr|subattr", "plot|subplot", or even "plot". (See plot help for more details regarding the plot hierarchy format.) Note that removing certain variables can cause PyRadmon to not run.',
}
main_opts['--logging-output'] = \
{
'action' : 'store',
'metavar' : 'OUTPUT',
'dest' : 'logging_output',
'help' : 'Specify where to output to. Options include file, stdout, and stderr. Multiple outputs can be specified, seperated with a comma.',
}
main_opts['--logging-file'] = \
{
'action' : 'store',
'metavar' : 'FILE',
'dest' : 'logging_file',
'help' : 'If outputting to a file, specify the file path to save to.',
}
main_opts['--logging-level'] = \
{
'action' : 'store',
'metavar' : 'LOG_LEVEL',
'dest' : 'logging_level',
'help' : 'Set the logging level for PyRadmon.',
}
main_opts['--mp-disable'] = \
{
'action' : 'store_true',
'dest' : 'mp_disable',
'help' : 'Disable multiprocessing (mp) optimizations in PyRadmon.',
}
main_opts['--mp-priority-mode'] = \
{
'action' : 'store',
'metavar' : 'PRIORITY_MODE',
'dest' : 'mp_priority_mode',
'help' : 'Set the priority mode for the multiprocessing (mp) optimizations in PyRadmon. Options are GENEROUS, NORMAL, AGGRESSIVE, EXTREME, and NUCLEAR. GENEROUS yields to other CPU hungry processes, while NUCLEAR spawns as many processes as it can regardless of CPU usage.',
}
main_opts['--mp-cpu-limit'] = \
{
'action' : 'store',
'metavar' : 'NUM_CPUS',
'dest' : 'mp_cpu_limit',
'help' : 'Limit the number of CPUs that the multiprocessing (mp) optimizations in PyRadmon can use.',
}
add_args(parser, False, main_opts)
# A list command
list_parser = subparsers.add_parser('list', help='Lists the data set available for use.', \
description = 'Lists the data set available for use.',
epilog = textwrap.dedent("""\
For --data-time-delta:
Available time units (case-sensitive):
s: seconds m: minutes h: hours d: days
w: weeks M: months y: years
For instance, to specify 5 months, 3 minutes, 2 seconds:
"5M 3m 2s"
Multiple elements with same time units will be summed up,
regardless of position or order. "3m 2s 5m" == "8m 2s"
Years is not a native time delta, so this uses the
conversion "1 year = 365 days".
Note that specifying years may have some caveats, see:
http://stackoverflow.com/a/765990
"""), \
formatter_class=argparse.RawDescriptionHelpFormatter)
add_list_args(list_parser, False)
# [dump]
dump_parser = subparsers.add_parser('dump', help='Dumps data from the data set.', \
description = textwrap.dedent("""\
Dumps data from the data set.
Inherits arguments from [list]. [list] arguments may be required to use
[dump]. For information on [list] arguments, see the [list] help.
"""), \
formatter_class=argparse.RawDescriptionHelpFormatter)
add_list_args(dump_parser, True)
add_dump_args(dump_parser, False)
# [plot]
plot_parser = subparsers.add_parser('plot', help='Creates a plot given plot parameters.', \
description = textwrap.dedent("""\
Creates a plot given plot parameters.
Inherits arguments from [list] and [dump]. [list] and [dump] arguments
may be required to use [plot]. For information on [list] and [dump]
arguments, see their respective help.
NOTE: These options are advanced - although you could (potentially)
plot using these options, it would probably be very painful!
All of these options can be defined via config file, which is
much easier and less verbose! These options are meant to be
used in conjunction with a config file for overriding certain
configuration options and testing them.
"""), \
epilog = textwrap.dedent("""\
Definitions follow either a simple value list system or a simple
key-value pair system.
Value List System:
The value list system looks like:
"value1,value2,value3,..."
For the value list system, values are separated with commas.
Key-Value Pair System:
The key-value pair system looks like:
"key1:value1,value2;key2:value3,value4,..."
For the key-value pair system, a key is followed by a colon,
values are separated with commas, and key-value pairs are
separated by semicolons.
If settings are being set, values can look like subkey=value.
It is a pair with a sub-key and a sub-value, separated by an
equal sign.
These are called sub-key-value pairs, and are in place of values
in a key-value system.
They can look like this:
"key1:skey1=sval1,skey2=sval2;..."
Key/Value Naming:
Plots and subplots are indicated above with plot# and subplot#,
respectively. They are simply IDs for the plots and subplots.
You define them with --plot-define-plots and --plot-define-subplots.
They can take on any name - for instance:
--plot-define-plots="a,b" --plot-define-subplots="a:1,2;b:3,4"
Hierarchy is done by starting with the highest hierarchy, and then
adding additional lower levels to the right, sepearated by pipes.
For instance:
high|medium|low
In the case of plots and subplots, the hierarchy is as follows:
plot|subplot|attribute
This is the naming scheme for keys in key/value pairs.
Argument Definitions:
Note that arguments can be specified multiple times to reduce
the length of one argument. For instance:
--plot-define-plots="plot1" --plot-define-plots="plot2"
is the same as
--plot-define-plots="plot1,plot2"
This works regardless of argument order. This is encouraged, since
this helps with argument organization. (In particular, arguments
deals with a specific plot or subplot can be grouped together.)
--plot-define-plots
Value list of plot IDs.
This argument is REQUIRED to produce a plot.
Example:
"plot1,abc2,superplot3"
--plot-define-subplots
Key-value pairs of plot IDs and subplot IDs.
This argument is REQUIRED to produce a plot.
Example (using above plot IDs):
"plot1:sub1,sub2;abc2:abc,def;superplot3:s1,s2"
--plot-define-axes
Key-value pairs of plot/subplot IDs, and axes properties
defined as sub-key-value pairs. This is the first argument to
use key hierarchy naming.
Available hierarchy attributes:
plot|subplot|x
X axis settings.
plot|subplot|y
Y axis settings.
Available attributes (sub-key-value pairs):
ticks=#
Define number of ticks.
label=LABEL
Define the axis label.
Example (using above info):
"plot1|sub1|x:ticks=5,label=Hello world"
--plot-define-data
Key-value pairs of plot/subplot IDs, with data fields as
values. This also uses key hierarchy naming.
This argument is REQUIRED to produce a plot.
Available hierarchy attributes:
plot|subplot|x
X plotting settings.
plot|subplot|y
Y plotting settings.
Values:
Data field elements. Specified using a different hierarchy
system:
STEP/TYPE|FIELD
STEP/TYPE is either "ges" or "anl", but not both.
For instance: ges|bc_total|mean
Example (using above info):
"abc2|abc|x:timestamp;abc2|abc|y:ges|bc_total|mean,ges|bc_total|stddev"
--plot-define-title
Key-value pairs of plot/subplot IDs, with titles as values. This
also uses key hierarchy naming, but in a slightly different way.
Available hierarchy attributes:
plot|title
Title for the whole plot.
plot|subplot|title
Title for the subplot.
plot|subplot|legend
Title for the subplot's legend.
Values:
The title for the specified title attribute.
Example (using above info):
"abc2|title:Title;abc2|abc|title:SubTitle;abc2|abc|legend:LegendTitle"
--plot-define-output
Key-value pairs of plot and output paths.
Example (using above info):
"plot1:myplot1.png;abc2:abcplot.png;superplot3:super.png"
--plot-define-settings
Key-value pairs of plot IDs, and settings defined as
sub-key-value pairs.
This argument is REQUIRED to produce a plot.
Available attributes (sub-key-value pairs):
target_size=#x#
Define the output plot target size.
dpi=#
Define the output plot DPI.
Example (using above info):
"plot1:target_size=595x700,dpi=50"
--plot-define-custom-vars
Key-value pairs of custom variables to be used in the plot
title and output file name. Variables are case insensitive.
The suggested variable definition and usage standard is
using lowercase for variable definition, and uppercase for
variable usage in the title and/or output file name.
Example:
"expid:exp_2;author:theauthor"
Applying custom variables to plot title and output file name:
--plot-define-title "plot1:Experiment %EXPID% - By %AUTHOR%"
Result:
Experiment exp_2 - By theauthor
--plot-define-output "plot1:plot_%EXPID%-%AUTHOR%.png"
Result:
plot_exp_2-theauthor.png
--plot-make-dirs
If specified, automatically make non-existent directories,
as needed. No additional arguments or options needed.
NOTE: These options are advanced - although you could (potentially)
plot using these options, it would probably be very painful!
All of these options can be defined via config file, which is
much easier and less verbose! These options are meant to be
used in conjunction with a config file for overriding certain
configuration options and testing them.
"""),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_list_args(plot_parser, True)
add_dump_args(plot_parser, True)
add_plot_args(plot_parser, False)
# [config]
config_parser = subparsers.add_parser('config', help='Displays, loads, and saves the configuration.', \
description = textwrap.dedent("""\
Displays, loads, and saves the configuration.
Inherits arguments from [list], [dump], and [plot]. [list], [dump],
and [plot] arguments may be required to use [config]. For information
on [list], [dump], or [plot] arguments, see their respective help.
"""), \
formatter_class=argparse.RawDescriptionHelpFormatter)
add_list_args(config_parser, True)
add_dump_args(config_parser, True)
add_plot_args(config_parser, True)
add_config_args(config_parser, False)
return parser
def parse_to_config(parse):
# Init a few variables:
pyradmon_config = {}
plot_dict = {}
## Core args
# First, examine the core arguments.
if isset_obj("config_file", parse):
# OK, check if the file exists.
if os.path.isfile(parse.config_file):
# Attempt to load
res = config.load(parse.config_file)
if res == None:
print "ERROR: Could not open configuration file!"
return (None, None, None)
pyradmon_config = res[0]
plot_dict = res[1]
else:
print "ERROR: Configuration file path does not exist!"
return (None, None, None)
# Validate configuration
config.validate(pyradmon_config, plot_dict)
# Now check for logging args!
file_enabled = False
if isset_obj("logging_output", parse):
logging_output = parse.logging_output
if logging_output != "":
logging_output = logging_output.split(",")
logging_output = [x.strip() for x in logging_output]
final_logging_output = []
for log_o in logging_output:
if log_o == 'file':
file_enabled = True
elif log_o == 'stdout':
final_logging_output.append(sys.stdout)
elif log_o == 'stderr':
final_logging_output.append(sys.stderr)
else:
print "ERROR: Invalid logging output! Valid output: stdout, stderr, file"
return (None, None, None)
logging_output = final_logging_output
else:
logging_output = [ sys.stdout ]
if logging_output and file_enabled and isset_obj("logging_file", parse):
logging_file = parse.logging_file
else:
logging_file = None
if isset_obj("logging_level", parse):
logging_level = parse.logging_level
logging_level = logging_level.strip()
if logging_level == "INFO":
logging_level = logging.INFO
elif logging_level == "WARNING":
logging_level = logging.WARNING
elif logging_level == "ERROR":
logging_level = logging.ERROR
elif logging_level == "CRITICAL":
logging_level = logging.CRITICAL
elif logging_level == "DEBUG":
logging_level = logging.DEBUG
else:
print "ERROR: Invalid logging level specified!"
print "Valid levels: INFO, WARNING, ERROR, CRITICAL, DEBUG"
return (None, None, None)
else:
logging_level = logging.INFO
if isset_obj("mp_disable", parse):
pyradmon_config['mp_disable'] = parse.mp_disable
if isset_obj("mp_priority_mode", parse):
mp_priority_mode = parse.mp_priority_mode
mp_priority_mode = mp_priority_mode.strip()
if mp_priority_mode == "GENEROUS":
pyradmon_config['mp_priority_mode'] = "GENEROUS"
elif mp_priority_mode == "NORMAL":
pyradmon_config['mp_priority_mode'] = "NORMAL"
elif mp_priority_mode == "AGGRESSIVE":
pyradmon_config['mp_priority_mode'] = "AGGRESSIVE"
elif mp_priority_mode == "EXTREME":
pyradmon_config['mp_priority_mode'] = "EXTREME"
elif mp_priority_mode == "NUCLEAR":
pyradmon_config['mp_priority_mode'] = "NUCLEAR"
else:
print "ERROR: Invalid multiprocessing (mp) priority mode specified!"
print "Valid levels: GENEROUS, NORMAL, AGGRESSIVE, EXTREME, NUCLEAR"
return (None, None, None)
else:
pyradmon_config['mp_priority_mode'] = "NORMAL"
if isset_obj("mp_cpu_limit", parse):
if (parse.mp_cpu_limit).isdigit():
pyradmon_config['mp_cpu_limit'] = int(parse.mp_cpu_limit)
else:
print "ERROR: Invalid multiprocessing (mp) CPU limit! The CPU limit"
print "must specify an integer number of CPUs to limit use to."
return (None, None, None)
# We're ready - let's set up logging!
logger = log.init(logging_level, logging_output, logging_file)
# From now on, we'll stick to using the log module to print stuff
# out.
## Config args, part 1
if parse.verb == "config":
# We will only read --config-load here. All others will be
# checked at the end.
if isset_obj("config_load", parse):
if isset_obj("config_file", parse):
warn("Detected --config-load when --config-file is already specified! --config-load will override the configuration file specified in --config-file. You should only specify one argument, preferrably --config-file.")
# OK, check if the file exists.
if os.path.isfile(parse.config_load):
# Attempt to load
res = config.load(parse.config_load)
if res == None:
critical("ERROR: Could not open configuration file!")
return (None, None, None)
pyradmon_config = res[0]
plot_dict = res[1]
else:
critical("ERROR: Configuration file path does not exist!")
return (None, None, None)
# Validate configuration
config.validate(pyradmon_config, plot_dict)
## Plot args
# FUN TIME
if parse.verb == "plot" or parse.verb == "config":
if isset_obj("plot_define_plots", parse):
plots = ",".join(parse.plot_define_plots).split(",")
# Cleanup
plots = [x.strip() for x in plots]
for plot in plots:
if not plot in plot_dict:
if plot == "":
warn("Invalid plot ID detected - plot ID can't be blank!")
warn("This plot definition will be skipped.")
continue
plot_dict[plot] = {}
if isset_obj("plot_define_subplots", parse):
# "plot1:sub1,sub2;abc2:abc,def;superplot3:s1,s2"
subplots_def = ";".join(parse.plot_define_subplots).split(";")
# Cleanup
subplots_def = [x.strip() for x in subplots_def]
for subplot_def in subplots_def:
# Chunk: plot1:sub1,sub2
subplot_def_split = subplot_def.split(":")
subplot_def_split = [x.strip() for x in subplot_def_split]
# SAAAAAAAANITY CHECK!!!!!!!1111
# Sanity check 1: do we have 2 elements?
if len(subplot_def_split) != 2:
warn("Invalid subplot definition detected - invalid key-value pair '%s'!" % subplot_def)
warn("(Key-value pair should be key:value - make sure there are no extra colons!)")
warn("This subplot definition will be skipped.")
continue
# OK, now seperate it out!
subplot_def_plot = subplot_def_split[0]
subplot_def_subplots = subplot_def_split[1]
# Sanity check 2: does the plot named exist?!?
if not subplot_def_plot in plot_dict:
warn("Invalid subplot definition detected - the plot specified, '%s', does not exist!" % subplot_def_plot)
warn("Ensure spelling is correct. If it is a new plot, make sure it is defined.")
warn("This subplot definition will be skipped.")
continue
# OK, let's process subplots.
subplot_def_subplots = subplot_def_subplots.split(",")
subplot_def_subplots = [x.strip() for x in subplot_def_subplots]
# Prep plot_dict
if not "plots" in plot_dict[subplot_def_plot]:
plot_dict[subplot_def_plot]["plots"] = []
# Add away!
for subplot_def_subplot in subplot_def_subplots:
plot_dict[subplot_def_plot]["plots"].append({ subplot_def_subplot : {} })
# Done!
if isset_obj("plot_define_axes", parse):
# "plot1|sub1|x:ticks=5,label=Hello world"
axes_def = ";".join(parse.plot_define_axes).split(";")
# Cleanup
axes_def = [x.strip() for x in axes_def]
for axis_def in axes_def:
# Chunk: plot1|sub1|x:ticks=5,label=Hello world
axis_def_split = axis_def.split(":")
axis_def_split = [x.strip() for x in axis_def_split]
# SAAAAAAAANITY CHECK!!!!!!!1111
# Sanity check 1: do we have 2 elements?
if len(axis_def_split) != 2:
warn("Invalid axis definition detected - invalid key-value pair '%s'!" % axis_def)
warn("(Key-value pair should be key:value - make sure there are no extra colons!)")
warn("This axis definition will be skipped.")
continue
# OK, now seperate it out!
# Chunk: plot1|sub1|x --> [plot1, sub1, x]
axis_def_plot_subplot_axis = axis_def_split[0].split("|")
# Chunk: ticks=5,label=Hello world
axis_def_attrs = axis_def_split[1]
# Sanity check 2: does the plot/subplot/axe key have 3 elements?
if len(axis_def_plot_subplot_axis) != 3:
warn("Invalid axis definition detected - the key is invalid! It should only have")
warn("3 elements - plot|subplot|x/y!")
warn("This axis definition will be skipped.")
continue
# OK, let's seperate that out!
axis_def_plot = axis_def_plot_subplot_axis[0]
axis_def_subplot = axis_def_plot_subplot_axis[1]
axis_def_axis = axis_def_plot_subplot_axis[2].lower()
# Sanity check 3: does the plot/subplot named exist?!?
if not axis_def_plot in plot_dict:
warn("Invalid axis definition detected - the plot specified, '%s', does not exist!" % axis_def_plot)
warn("Ensure spelling is correct. If it is a new plot, make sure it is defined.")
warn("This axis definition will be skipped.")
continue
# OK, plot exists. How about subplot?
# We have to do some strange magic here...
axis_def_subplot_found = False
axis_def_subplot_dat = None
for axis_def_subplot_dict in plot_dict[axis_def_plot]['plots']:
if axis_def_subplot in axis_def_subplot_dict:
axis_def_subplot_dat = axis_def_subplot_dict[axis_def_subplot]
axis_def_subplot_found = True
break
if not axis_def_subplot_found:
warn("Invalid axis definition detected - the subplot specified, '%s', does not exist!" % axis_def_subplot)
warn("Ensure spelling is correct. If it is a new subplot, make sure it is defined and")
warn("in the right subplot. This axis definition will be skipped.")
continue
# Sanity check 4: Is the axis valid?
if axis_def_axis != "x" and axis_def_axis != "y":
warn("Invalid axis definition detected - the axis specified, '%s', is invalid!" % axis_def_axis)
warn("'x' and 'y' are the only axes allowed. This axis definition will be skipped.")
continue
# OK, let's setup shop.
if not "axes" in axis_def_subplot_dat:
axis_def_subplot_dat["axes"] = {}
if not axis_def_axis in axis_def_subplot_dat["axes"]:
axis_def_subplot_dat["axes"][axis_def_axis] = {}
# OK, let's process attributes.
axis_def_attrs = axis_def_attrs.split(",")
axis_def_attrs = [x.strip().split("=") for x in axis_def_attrs]
# Sanity check 5: Are these valid key-value pairs?
kvpair_bad = False
for kvpair in axis_def_attrs:
if len(kvpair) != 2:
warn("Invalid axis definition detected - the key/value subpair, '%s', is invalid!" % '='.join(kvpair))
kvpair_bad = True
if type(kvpair[0]) != str:
warn("Invalid axis definition detected - the key '%s' in the subpair, '%s', is a non-string!" % (str(kvpair[0]), '='.join(kvpair)))
if kvpair_bad:
warn("(Key-value subpair should be key=value - make sure there are no extra =s!)")
warn("This axis definition will be skipped.")
continue
# Whew, so much sanity lost from sanity checking!
# Install attributes!
for kvpair in axis_def_attrs:
if kvpair[0] == 'ticks':
axis_def_subplot_dat["axes"][axis_def_axis][kvpair[0]] = int(kvpair[1])
else:
axis_def_subplot_dat["axes"][axis_def_axis][kvpair[0]] = kvpair[1]
# Done!!!!!1111
if isset_obj("plot_define_data", parse):
# "abc2|abc|x:timestamp;abc2|abc|y:ges|bc_total|mean,ges|bc_total|stddev"
data_defs = ";".join(parse.plot_define_data).split(";")
# Cleanup
data_defs = [x.strip() for x in data_defs]
for data_def in data_defs:
# Chunk: abc2|abc|x:timestamp
# Chunk: abc2|abc|y:ges|bc_total|mean,ges|bc_total|stddev
data_def_split = data_def.split(":")
data_def_split = [x.strip() for x in data_def_split]
# SAAAAAAAANITY CHECK!!!!!!!1111
# Sanity check 1: do we have 2 elements?
if len(data_def_split) != 2:
warn("Invalid data definition detected - invalid key-value pair '%s'!" % data_def)
warn("(Key-value pair should be key:value - make sure there are no extra colons!)")
warn("This data definition will be skipped.")
continue
# OK, now seperate it out!
# Chunk: abc2|abc|x --> [abc2, abc, x]
data_def_plot_subplot_attr = data_def_split[0].split("|")
# Chunk: timestamp
data_def_attrs = data_def_split[1]
# Sanity check 2: does the plot/subplot/axe key have 3 elements?
if len(data_def_plot_subplot_attr) != 3:
warn("Invalid data definition detected - the key is invalid! It should only have")
warn("3 elements - plot|subplot|x/y!")
warn("This data definition will be skipped.")
continue
# OK, let's seperate that out!
data_def_plot = data_def_plot_subplot_attr[0]
data_def_subplot = data_def_plot_subplot_attr[1]
data_def_attr_name = data_def_plot_subplot_attr[2].lower()
# Sanity check 3: does the plot/subplot named exist?!?
if not data_def_plot in plot_dict:
warn("Invalid data definition detected - the plot specified, '%s', does not exist!" % data_def_plot)
warn("Ensure spelling is correct. If it is a new plot, make sure it is defined.")
warn("This data definition will be skipped.")
continue
# OK, plot exists. How about subplot?
# We have to do some strange magic here...
data_def_subplot_found = False
data_def_subplot_dat = None
for data_def_subplot_dict in plot_dict[data_def_plot]['plots']:
if data_def_subplot in data_def_subplot_dict:
data_def_subplot_dat = data_def_subplot_dict[data_def_subplot]
data_def_subplot_found = True
break
if not data_def_subplot_found:
warn("Invalid data definition detected - the subplot specified, '%s', does not exist!" % data_def_subplot)
warn("Ensure spelling is correct. If it is a new subplot, make sure it is defined and")
warn("in the right subplot. This data definition will be skipped.")
continue
# Sanity check 4: Is the attr field valid?
valid_attrs = [ "x", "y", "colors", "labels" ]
if not data_def_attr_name in valid_attrs:
warn("Invalid data definition detected - the attribute specified, '%s', is invalid!" % data_def_data)
warn("%s are the only attributes allowed. This data definition will be skipped." % (",".join(valid_attrs[:-1]) + ", and " + valid_attrs[-1]))
continue
# OK, let's setup shop.
if not "data" in data_def_subplot_dat:
data_def_subplot_dat["data"] = {}
# OK, let's process attributes.
data_def_attrs = data_def_attrs.split(",")
# Whew, so much sanity lost from sanity checking!
# (But not as bad as the axes part...)
# Install attributes!
if len(data_def_attrs) == 1:
data_def_subplot_dat["data"][data_def_attr_name] = data_def_attrs[0]
else:
if not data_def_attr_name in data_def_subplot_dat["data"]:
data_def_subplot_dat["data"][data_def_attr_name] = []
for attr_val in data_def_attrs:
data_def_subplot_dat["data"][data_def_attr_name].append(attr_val)
# Done!
if isset_obj("plot_define_title", parse):
# "abc2|title:Title;abc2|abc|title:SubTitle;abc2|abc|legend:LegendTitle"
title_defs = ";".join(parse.plot_define_title).split(";")
# Cleanup
title_defs = [x.strip() for x in title_defs]
for title_def in title_defs:
# Chunk: abc2|title:Title
# Chunk: abc2|abc|title:SubTitle
# Chunk: abc2|abc|legend:LegendTitle
title_def_split = title_def.split(":")
title_def_split = [x.strip() for x in title_def_split]
# SAAAAAAAANITY CHECK!!!!!!!1111
# Sanity check 1: do we have 2 elements?
if len(title_def_split) != 2:
warn("Invalid title definition detected - invalid key-value pair '%s'!" % title_def)
warn("(Key-value pair should be key:value - make sure there are no extra colons!)")
warn("This title definition will be skipped.")
continue
# OK, now seperate it out!
# Chunk: abc2|title --> [abc2, title]
# Chunk: abc2|abc|title --> [abc2, abc, title]
# Chunk: abc2|abc|legend --> [abc2, abc, legend]
title_def_plot_subplot_attr = title_def_split[0].split("|")
# Chunk: Title
# Chunk: SubTitle
# Chunk: LegendTitle
title_def_title = title_def_split[1]
# Sanity check 2: does the plot(/subplot)/attr key have 2 or 3 elements?
if (len(title_def_plot_subplot_attr) != 3) and (len(title_def_plot_subplot_attr) != 2):
warn("Invalid title definition detected - the key is invalid! It should only have")
warn("2-3 elements - plot|title or plot|subplot|title/legend!")
warn("This title definition will be skipped.")
continue
# OK, let's seperate that out!
# ...except this varies, so let's take it a bit slow.
title_def_plot_subplot = [ title_def_plot_subplot_attr[0] ]
if len(title_def_plot_subplot_attr) == 3:
title_def_plot_subplot.append(title_def_plot_subplot_attr[1])
title_def_attr_name = title_def_plot_subplot_attr[-1].lower()
# Sanity check 3: does the plot/subplot named exist?!?
if not title_def_plot_subplot[0] in plot_dict:
warn("Invalid title definition detected - the plot specified, '%s', does not exist!" % title_def_plot)
warn("Ensure spelling is correct. If it is a new plot, make sure it is defined.")
warn("This title definition will be skipped.")
continue
# OK, plot exists. How about subplot?
# We have to do some strange magic here...
# ...but only if we need to.
if len(title_def_plot_subplot) == 2:
title_def_subplot_found = False
title_def_subplot_dat = None
for title_def_subplot_dict in plot_dict[title_def_plot_subplot[0]]['plots']:
if title_def_plot_subplot[1] in title_def_subplot_dict:
title_def_subplot_dat = title_def_subplot_dict[title_def_plot_subplot[1]]
title_def_subplot_found = True
break
if not title_def_subplot_found:
warn("Invalid title definition detected - the subplot specified, '%s', does not exist!" % title_def_subplot)
warn("Ensure spelling is correct. If it is a new subplot, make sure it is defined and")
warn("in the right subplot. This title definition will be skipped.")
continue
# Sanity check 4: Is the attr field valid?
# Title is valid for both, but legend is only valid for
# plot|subplot.
if title_def_attr_name != "title" and not ( (len(title_def_plot_subplot) == 2) and (title_def_attr_name == "legend") ):
warn("Invalid title definition detected - the attribute specified, '%s', is invalid!" % title_def_title)
warn("legend (for plot|subplot) and title are the only attributes allowed.")
warn("This title definition will be skipped.")
continue
# OK, let's go!
if len(title_def_plot_subplot) == 1:
plot_dict[title_def_plot_subplot[0]]["title"] = title_def_title
else:
if title_def_attr_name == "legend":
if not isset_obj("legend", title_def_subplot_dat):
title_def_subplot_dat["legend"] = {}
title_def_subplot_dat["legend"]["title"] = title_def_title
else:
title_def_subplot_dat["title"] = title_def_title
# Done!
if isset_obj("plot_define_output", parse):
# "plot1:myplot1.png;abc2:abcplot.png;superplot3:super.png"
output_defs = ";".join(parse.plot_define_output).split(";")
# Cleanup
output_defs = [x.strip() for x in output_defs]
for output_def in output_defs:
# Chunk: plot1:myplot1.png
output_def_split = output_def.split(":")
output_def_split = [x.strip() for x in output_def_split]
# SAAAAAAAANITY CHECK!!!!!!!1111
# Sanity check 1: do we have 2 elements?
if len(output_def_split) != 2:
warn("Invalid output definition detected - invalid key-value pair '%s'!" % output_def)
warn("(Key-value pair should be key:value - make sure there are no extra colons!)")
warn("This output definition will be skipped.")
continue
# OK, now seperate it out!
# Chunk: plot1
output_def_plot = output_def_split[0]
# Chunk: myplot1.png
output_def_output = output_def_split[1]
# Sanity check 2: does the plot named exist?!?
if not output_def_plot in plot_dict:
warn("Invalid output definition detected - the plot specified, '%s', does not exist!" % output_def_plot)
warn("Ensure spelling is correct. If it is a new plot, make sure it is defined.")
warn("This output definition will be skipped.")
continue
# OK, plot exists. This is super easy, so we're free to
# fly from here!
plot_dict[output_def_plot]["output"] = output_def_output
# Done!
if isset_obj("plot_define_settings", parse):
# "plot1:target_size=595x700,dpi=50"
settings_def = ";".join(parse.plot_define_settings).split(";")
# Cleanup
settings_def = [x.strip() for x in settings_def]
for settings_def in settings_def:
# Chunk: plot1:target_size=595x700,dpi=50
settings_def_split = settings_def.split(":")
settings_def_split = [x.strip() for x in settings_def_split]
# SAAAAAAAANITY CHECK!!!!!!!1111
# Sanity check 1: do we have 2 elements?
if len(settings_def_split) != 2:
warn("Invalid settings definition detected - invalid key-value pair '%s'!" % settings_def)
warn("(Key-value pair should be key:value - make sure there are no extra colons!)")
warn("This settings definition will be skipped.")
continue
# OK, now seperate it out!
# Chunk: plot1
settings_def_plot = settings_def_split[0]
# Chunk: ticks=5,label=Hello world
settings_def_attrs = settings_def_split[1]
# Sanity check 2: does the plot/subplot named exist?!?
if not settings_def_plot in plot_dict:
warn("Invalid settings definition detected - the plot specified, '%s', does not exist!" % settings_def_plot)
warn("Ensure spelling is correct. If it is a new plot, make sure it is defined.")
warn("This settings definition will be skipped.")
continue
# OK, let's setup shop.
if not "settings" in plot_dict[settings_def_plot]:
plot_dict[settings_def_plot]["settings"] = {}
# OK, let's process attributes.
settings_def_attrs = settings_def_attrs.split(",")
settings_def_attrs = [x.strip().split("=") for x in settings_def_attrs]
# Sanity check 5: Are these valid key-value pairs?
kvpair_bad = False
for kvpair in settings_def_attrs:
if len(kvpair) != 2:
warn("Invalid settings definition detected - the key/value subpair, '%s', is invalid!" % '='.join(kvpair))
kvpair_bad = True
if type(kvpair[0]) != str:
warn("Invalid settings definition detected - the key '%s' in the subpair, '%s', is a non-string!" % (str(kvpair[0]), '='.join(kvpair)))
if kvpair_bad:
warn("(Key-value subpair should be key=value - make sure there are no extra =s!)")
warn("This settings definition will be skipped.")
continue
# Whew, so much sanity lost from sanity checking!
# Install attributes!
for kvpair in settings_def_attrs:
if kvpair[0] == 'target_size':
size_arr = [ int(x) for x in kvpair[1].lower().split("x") ]
if len(size_arr) != 2:
warn("Key-value pair for target_size is invalid - should be INTxINT!")
warn("Skipping key-value pair.")
continue
plot_dict[settings_def_plot]["settings"]["target_size"] = size_arr
elif kvpair[0] == 'dpi':
if not kvpair[1].isdigit():
warn("Key-value pair for dpi is invalid - value is not an int!")
warn("Skipping key-value pair.")
continue
plot_dict[settings_def_plot]["settings"][kvpair[0]] = int(kvpair[1])
else:
# Strange stuff
warn("Key '%s' in not defined - setting anyway." % str(kvpair[0]))
plot_dict[settings_def_plot]["settings"][kvpair[0]] = kvpair[1]
# Done!!!!!1111
if isset_obj("plot_define_custom_vars", parse):
# "customvar1:value1;customvar2:value2"
custom_var_defs = ";".join(parse.plot_define_custom_vars).split(";")
# Cleanup
custom_var_defs = [x.strip() for x in custom_var_defs]
for output_def in output_defs:
# Chunk: customvar1:value1
custom_var_def_split = custom_var_def.split(":")
custom_var_def_split = [x.strip() for x in custom_var_def_split]
# Sanity check!
# Sanity check 1: do we have 2 elements?
if len(custom_var_def_split) != 2:
warn("Invalid custom variable definition detected - invalid key-value pair '%s'!" % output_def)
warn("(Key-value pair should be key:value - make sure there are no extra colons!)")
warn("This custom variable definition will be skipped.")
continue
# OK, now seperate it out!
# Chunk: customvar1
custom_var_def_var = custom_var_def_split[0]
# Chunk: value1
custom_var_def_val = custom_var_def_split[1]
# OK, we got everything. This is super easy, so we're free to
# leap from here!
if "custom_vars" not in pyradmon_config:
pyradmon_config["custom_vars"] = {}
pyradmon_config["custom_vars"][custom_var_def_var] = custom_var_def_val
# Done!
if isset_obj("plot_make_dirs", parse) and parse.plot_make_dirs:
pyradmon_config["make_dirs"] = parse.plot_make_dirs
# Done!
## Dump args
if parse.verb == "dump" or parse.verb == "plot" or parse.verb == "config":
# --dump-columns
if isset_obj("dump_columns", parse):
for data_column in (parse.dump_columns).split(","):
data_column = data_column.strip()
if not data_column in SPECIAL_FIELDS:
if not ((data_column.startswith("ges|") or data_column.startswith("anl|"))):
die("ERROR: Invalid data column '%s' specified in --dump-columns! Must have ges| or anl| as a prefix." % data_column)
pyradmon_config["data_columns"] = parse.dump_columns
# --dump-all-channels
if isset_obj("dump_all_channels", parse) and parse.dump_all_channels:
pyradmon_config["data_all_channels"] = parse.dump_all_channels
# --dump-channels
if isset_obj("dump_channels", parse):
if isset_obj("dump_all_channels", parse) and parse.dump_all_channels:
die("ERROR: You can not specify --dump-all-channels and --dump-channels at the same time!")
for data_channel in (parse.dump_channels).split(","):
data_channel = data_channel.strip()
if not ( (data_channel.isdigit()) or \
( (len(data_channel.split("-")) == 2) and data_channel.split("-")[0].isdigit() and data_channel.split("-")[1].isdigit() ) ):
die("ERROR: Invalid data channel '%s' specified in --dump-channels! Must be a number or a numeric range (#-#)." % data_channel)
pyradmon_config["data_channels"] = parse.dump_channels
# --dump-assim-only
if isset_obj("dump_assim_only", parse):
pyradmon_config["data_assim_only"] = parse.dump_assim_only
# --dump-suppress-warnings
if isset_obj("dump_suppress_warnings", parse):
pyradmon_config["data_suppress_warnings"] = parse.dump_suppress_warnings
## List args
if parse.verb == "list" or parse.verb == "dump" or parse.verb == "plot" or parse.verb == "config":
# --data-single-date (action flag)
if isset_obj("data_single_date", parse):
pyradmon_config["data_start_date"] = parse.data_single_date
pyradmon_config["data_end_date"] = parse.data_single_date
# --data-path-format
if isset_obj("data_path_format", parse):
pyradmon_config["data_path_format"] = parse.data_path_format
# --data-experiment-id
if isset_obj("data_experiment_id", parse):
pyradmon_config["data_experiment_id"] = parse.data_experiment_id
# --data-start-date
if isset_obj("data_start_date", parse):
if isset_obj("data_single_date", parse):
die("ERROR: You can not specify --data-single-date and --data-start-date at the same time!")
# FORMAT: YYYY-MM-DD HHz
pyradmon_config["data_start_date"] = parse.data_start_date
if not (pyradmon_config["data_start_date"][:4].isdigit() and \
pyradmon_config["data_start_date"][5:7].isdigit() and \
pyradmon_config["data_start_date"][8:10].isdigit() and \
pyradmon_config["data_start_date"][11:13].isdigit()):
die("ERROR: Start date '%s' specified in --data-start-date is not valid! It must be in 'YYYY-MM-DD HHz' format!" % pyradmon_config["data_start_date"])
# --data-end-date
if isset_obj("data_end_date", parse):
if isset_obj("data_single_date", parse):
die("ERROR: You can not specify --data-single-date and --data-end-date at the same time!")
# FORMAT: YYYY-MM-DD HHz
pyradmon_config["data_end_date"] = parse.data_end_date
if not (pyradmon_config["data_end_date"][:4].isdigit() and \
pyradmon_config["data_end_date"][5:7].isdigit() and \
pyradmon_config["data_end_date"][8:10].isdigit() and \
pyradmon_config["data_end_date"][11:13].isdigit()):
die("ERROR: End date '%s' specified in --data-end-date is not valid! It must be in 'YYYY-MM-DD HHz' format!" % pyradmon_config["data_end_date"])
# --data-instrument-sat
if isset_obj("data_instrument_sat", parse):
pyradmon_config["data_instrument_sat"] = parse.data_instrument_sat
# --data-step
if isset_obj("data_step", parse):
pyradmon_config["data_step"] = parse.data_step
if not ((pyradmon_config['data_step'] == "anl") or \
(pyradmon_config['data_step'] == "ges") or \
(pyradmon_config['data_step'] == "anl|ges") or \
(pyradmon_config['data_step'] == "ges|anl")):
die("ERROR: Data step '%s' specified in --date-step is not valid! Must either be 'anl', 'ges', or the two combined with a pipe ('anl|ges')." % pyradmon_config["data_step"])
# --data-time-delta
if isset_obj("data_time_delta", parse):
pyradmon_config["data_time_delta"] = parse.data_time_delta
dtd_split = pyradmon_config['data_time_delta'].split(" ")
unit_valid = [ "s", "w", "m", "M", "h", "y", "d" ]
for dtd in dtd_split:
# s: seconds m: minutes h: hours d: days
# w: weeks M: months y: years
if not ( (dtd[-1] in unit_valid) and (dtd[:-1].isdigit()) ):
die("ERROR: Invalid delta time '%s' specified in --data-delta-time! Must be a # followed by a valid unit letter. ([s]ecs, [m]inutes, [h]ours, [d]ays, [w]eeks, [M]onths, [y]ears)" % pyradmon_config["data_delta_time"])
if isset_obj("config_unset", parse):
# "config.var1;plot1|subplot1"
config_unset_def = ";".join(parse.config_unset).split(";")
for config_def in config_unset_def:
if config_def.startswith("config."):
pyr_config_var = config_def.split("config.")[1:]
if len(pyr_config_var) > 1:
warn("WARNING: Detected strange unset entry, but continuing anyway.")
pyr_config_var = "config.".join(pyr_config_var)
else:
pyr_config_var = str(pyr_config_var[0])
info("Unsetting pyradmon_config variable %s." % pyr_config_var)
pyradmon_config.pop(pyr_config_var, None)
else:
plot_config_var = config_def.split("|")
info("Unsetting plot_dict variable %s." % "|".join(plot_config_var))
if not plot_config_var[0] in plot_dict:
warn("Invalid unset definition detected - the plot specified, '%s', does not exist!" % title_def_plot)
warn("Ensure spelling is correct. If it is a new plot, make sure it is defined.")
warn("This unset definition will be skipped.")
continue
# OK, plot exists. How about subplot?
# We have to do some strange magic here...
# ...but only if we need to.
if len(plot_config_var) >= 2:
plot_config_subplot_found = False
title_def_subplot_dat = None
for subplot_dict in plot_dict[plot_config_var[0]]['plots']:
if plot_config_var[1] in subplot_dict:
subplot_dat = subplot_dict[plot_config_var[1]]
plot_config_subplot_found = True
break
if not plot_config_subplot_found:
warn("Invalid unset definition detected - the subplot specified, '%s', does not exist!" % title_def_subplot)
warn("Ensure spelling is correct. If it is a new subplot, make sure it is defined and")
warn("in the right subplot. This unset definition will be skipped.")
continue
# Check
if len(plot_config_var) == 2:
plot_dict[plot_config_var[0]]['plots'].remove(subplot_dict)
else:
# Now recursively delete!
delete_keys_from_dict(subplot_dat, plot_config_var[2:])
else:
plot_dict.pop(plot_config_var[0], None)
#import pprint
#pprint.pprint(plot_dict)
#print plot_dict
return (pyradmon_config, plot_dict, parse)
if __name__ == "__main__":
parser = make_argparser()
parse = parser.parse_args()
print parse_to_config(parse)
|
StarcoderdataPython
|
3322879
|
<gh_stars>1-10
__all__ = ["Sample", "Generator"]
import random
import itertools
import collections
from dataclasses import dataclass
from typing import Tuple, Sequence, Mapping, Optional
import torch
import utils
from utils import TensorMap
from datasets import Dialog
from datasets import DialogProcessor
from datasets import create_dataloader
from datasets import BatchData
from datasets import DialogDataset
from models import AbstractTDA
@dataclass
class Sample:
input: Dialog
output: Dialog
log_prob: float
@dataclass
class Generator:
model: AbstractTDA
processor: DialogProcessor
batch_size: int = 32
device: torch.device = torch.device("cpu")
global_step: int = 0
asv_tensor: utils.Stacked1DTensor = None
_num_instances: int = utils.private_field(default=None)
def __post_init__(self):
if self.asv_tensor is None:
self.asv_tensor = self.processor.tensorize_state_vocab("goal_state")
self.asv_tensor = self.asv_tensor.to(self.device)
def on_run_started(self):
return
def on_run_ended(self, samples: Sequence[Sample], stats: TensorMap
) -> Tuple[Sequence[Sample], TensorMap]:
return samples, stats
def on_batch_started(self, batch: BatchData) -> BatchData:
return batch
def validate_sample(self, sample: Sample):
return True
def on_batch_ended(self, samples: Sequence[Sample]) -> TensorMap:
return dict()
def generate_kwargs(self) -> dict:
return dict()
def prepare_batch(self, batch: BatchData) -> dict:
return {
"conv_lens": batch.conv_lens,
"sent": batch.sent.value,
"sent_lens": batch.sent.lens1,
"speaker": batch.speaker.value,
"goal": batch.goal.value,
"goal_lens": batch.goal.lens1,
"state": batch.state.value,
"state_lens": batch.state.lens1,
"asv": self.asv_tensor.value,
"asv_lens": self.asv_tensor.lens
}
def __call__(self, data: Optional[Sequence[Dialog]] = None,
num_instances: Optional[int] = None
) -> Tuple[Sequence[Sample], TensorMap]:
if data is None and num_instances is None:
raise ValueError(f"must provide a data source or "
f"number of instances.")
dataloader = None
if data is not None:
dataloader = create_dataloader(
dataset=DialogDataset(
data=data,
processor=self.processor
),
batch_size=self.batch_size,
drop_last=False,
shuffle=False
)
if num_instances is None:
num_instances = len(data)
self._num_instances = num_instances
self.on_run_started()
dataloader = (itertools.repeat(None) if dataloader is None
else itertools.cycle(dataloader))
cum_stats = collections.defaultdict(float)
samples = []
for batch in dataloader:
self.model.eval()
if batch is None:
batch_size = min(self.batch_size, num_instances - len(samples))
self.model.genconv_prior()
with torch.no_grad():
pred, info = self.model(
torch.tensor(batch_size).to(self.device),
**self.generate_kwargs()
)
else:
batch = batch.to(self.device)
batch_size = batch.batch_size
self.global_step += batch_size
batch = self.on_batch_started(batch)
self.model.genconv_post()
with torch.no_grad():
pred, info = self.model(self.prepare_batch(batch),
**self.generate_kwargs())
batch_samples = list(filter(self.validate_sample, (
Sample(*args) for args in
zip(map(self.processor.lexicalize_global, batch),
map(self.processor.lexicalize_global, pred),
info["logprob"])
)))
num_res = max(0, len(samples) + len(batch_samples) - num_instances)
if num_res > 0:
batch_samples = random.sample(batch_samples,
num_instances - len(samples))
batch_size = len(batch_samples)
self.global_step += batch_size
stats = self.on_batch_ended(batch_samples)
samples.extend(batch_samples)
for k, v in stats.items():
cum_stats[k] += v * batch_size
if len(samples) >= num_instances:
break
assert len(samples) == num_instances
cum_stats = {k: v / len(samples) for k, v in cum_stats.items()}
return self.on_run_ended(samples, cum_stats)
|
StarcoderdataPython
|
1715843
|
import logging
import time
from abc import ABC, abstractmethod
from contextlib import contextmanager
from functools import partial
from pathlib import Path
import numpy as np
from moviepy.video.io.ffmpeg_writer import FFMPEG_VideoWriter
from tqdm import tqdm
from PIL import Image
from tao.utils import vis
_GREEN = (18, 127, 15)
_GRAY = (218, 227, 218)
_BLACK = (0, 0, 0)
COLOR_BOX = COLOR_MASK = [255*x for x in (0.000, 0.447, 0.741)]
COLOR_TEXT = _GRAY
COLOR_TEXT_INACTIVE = _BLACK
COLOR_MASK_INACTIVE = COLOR_BOX_INACTIVE = _GRAY
WIDTH_BOX = 10
WIDTH_BOX_INACTIVE = 1
WIDTH_MASK = 2
BORDER_ALPHA_MASK = 0.9
WIDTH_MASK_INACTIVE = 1
class Tracker(ABC):
@property
def stateless(self):
return False
@abstractmethod
def init(self, image, box):
"""
Args:
image (np.array): Shape (height, width, num_channels). RGB image.
box (list of int): (x0, y0, x1, y1). 0-indexed coordinates from
top-left.
"""
pass
@abstractmethod
def update(self, image):
"""
Args:
image (np.array): Shape (height, width, num_channels). RGB image.
Returns:
box (list of int): (x0, y0, x1, y1). 0-indexed coordinates from
top-left.
score (float)
"""
pass
def track_yield(self,
img_files,
box,
yield_image=False,
**unused_extra_args):
"""
Args:
img_files (list of str/Path): Ordered list of image paths
box (list of int): (x0, y0, x1, y1). 0-indexed coordinates from
top-left.
yield_image (bool): Whether to yield the original image. Useful
if the caller wants to operate on images without having to
re-read them from disk.
Yields:
box (np.array): Shape (5, ), containing (x0, y0, x1, y1, score).
0-indexed coordinates from top-left.
tracker_time (float): Time elapsed in tracker.
image (optional, np.array): Image loaded from img_files; see
yield_image.
"""
for f, img_file in enumerate(img_files):
image = Image.open(img_file)
if not image.mode == 'RGB':
image = image.convert('RGB')
image = np.array(image)
start_time = time.time()
if f == 0:
self.init(image, box)
elapsed_time = time.time() - start_time
box = np.array([box[0], box[1], box[2], box[3], float('inf')])
extra_output = {}
else:
output = self.update(image)
assert len(output) in (2, 3)
box, score = output[:2]
extra_output = output[2] if len(output) == 3 else {}
elapsed_time = time.time() - start_time
box = np.array([box[0], box[1], box[2], box[3], score])
if yield_image:
yield box, elapsed_time, extra_output, image
else:
yield box, elapsed_time, extra_output
@contextmanager
def videowriter(self,
output_video,
width,
height,
fps=30,
ffmpeg_params=None):
if isinstance(output_video, Path):
output_video = str(output_video)
if ffmpeg_params is None:
ffmpeg_params = [
'-vf', "scale=trunc(iw/2)*2:trunc(ih/2)*2", '-pix_fmt',
'yuv420p'
]
with FFMPEG_VideoWriter(
output_video,
size=(width, height),
fps=fps,
ffmpeg_params=ffmpeg_params) as writer:
yield writer
def vis_single_prediction(self,
image,
box,
mask=None,
label=None,
mask_border_width=WIDTH_MASK,
mask_border_alpha=BORDER_ALPHA_MASK,
box_color=COLOR_BOX,
text_color=COLOR_TEXT,
mask_color=COLOR_MASK):
"""
Args:
image (np.array)
box (list-like): x0, y0, x1, y1, score
mask (np.array): Shape (height, width)
"""
if mask is None:
image = vis.vis_bbox(
image, (box[0], box[1], box[2] - box[0], box[3] - box[1]),
fill_color=box_color)
if label is None:
text = f'Object: {box[4]:.02f}'
else:
# text = f'{label}: {box[4]:.02f}'
text = f'{label}'
image = vis.vis_class(image, (box[0], box[1] - 2),
text,
font_scale=0.75,
text_color=text_color)
# if box[4] < 0.8: # Draw gray masks when below threshold.
# mask_color = [100, 100, 100]
if mask is not None:
image = vis.vis_mask(
image,
mask,
mask_color,
border_thick=mask_border_width,
border_alpha=mask_border_alpha)
return image
def vis_image(self,
image,
box,
mask=None,
label=None,
other_boxes=[],
other_masks=[],
other_labels=[],
vis_threshold=0.1):
"""
Args:
image (np.array)
box (list-like): x0, y0, x1, y1, score
mask (np.array): Shape (height, width)
other_boxes (list[list-like]): Contains alternative boxes that
were not selected.
other_masks (list[list-like]): Contains masks for alternative
boxes that were not selected.
"""
return self.vis_single_prediction(image, box, mask, label=label)
def track(self,
img_files,
box,
show_progress=False,
output_video=None,
output_video_fps=30,
visualize_subsample=1,
visualize_threshold=0.1,
return_masks=False,
**tracker_args):
"""
Like self.track, but collect all tracking results in numpy arrays.
Args:
img_files (list of str/Path): Ordered list of image paths
box (list of int): (x0, y0, x1, y1). 0-indexed coordinates from
top-left.
output_vis
return_masks (bool): If false, don't return masks. This is helpful
for OxUvA, where collecting all the masks may use too much
memory.
Returns:
boxes (np.array): Shape (num_frames, 5), contains
(x0, y0, x1, y1, score) for each frame. 0-indexed coordinates
from top-left.
times (np.array): Shape (num_frames,), contains timings for each
frame.
"""
frame_num = len(img_files)
boxes = np.zeros((frame_num, 5))
if return_masks:
masks = [None] * frame_num
times = np.zeros(frame_num)
pbar = partial(tqdm, total=len(img_files), disable=not show_progress)
if output_video is None:
for f, (box, elapsed_time, extra) in enumerate(
pbar(self.track_yield(img_files, box, **tracker_args))):
boxes[f] = box
times[f] = elapsed_time
if return_masks:
masks[f] = extra.get('mask', None)
else:
output_video = Path(output_video)
output_video.parent.mkdir(exist_ok=True, parents=True)
# Some videos don't play in Firefox and QuickTime if '-pix_fmt
# yuv420p' is not specified, and '-pix_fmt yuv420p' requires that
# the dimensions be even, so we need the '-vf scale=...' filter.
width, height = Image.open(img_files[0]).size
with self.videowriter(
output_video, width=width, height=height,
fps=output_video_fps) as writer:
track_outputs = self.track_yield(
img_files, box, yield_image=True, **tracker_args)
for f, (box, elapsed_time, extra, image) in enumerate(
pbar(track_outputs)):
mask = extra.get('mask', None)
if mask is not None and mask.shape != image.shape[:2]:
logging.warn(
f'Resizing mask (shape {mask.shape}) to match '
f'image (shape {image.shape[:2]})')
new_h, new_w = image.shape[:2]
mask = np.asarray(
Image.fromarray(mask).resize(
(new_w, new_h), resample=Image.NEAREST))
other_boxes = extra.get('other_boxes', [])
other_masks = extra.get('other_masks', [])
label = extra.get('label', None)
other_labels = extra.get('other_labels', [])
if (f % visualize_subsample) == 0:
writer.write_frame(
self.vis_image(image,
box,
mask,
label=label,
other_boxes=other_boxes,
other_masks=other_masks,
other_labels=other_labels,
vis_threshold=visualize_threshold))
boxes[f] = box
times[f] = elapsed_time
if return_masks:
masks[f] = mask
if return_masks:
return boxes, masks, times
else:
return boxes, None, times
|
StarcoderdataPython
|
4824712
|
<filename>opencti/src/opencti.py
import os
import yaml
import time
import urllib.request
from datetime import datetime
from pycti import OpenCTIConnectorHelper, get_config_variable
class OpenCTI:
def __init__(self):
# Instantiate the connector helper from config
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/config.yml"
config = (
yaml.load(open(config_file_path), Loader=yaml.SafeLoader)
if os.path.isfile(config_file_path)
else {}
)
self.helper = OpenCTIConnectorHelper(config)
# Extra config
self.opencti_sectors_file_url = get_config_variable(
"CONFIG_SECTORS_FILE_URL", ["config", "sectors_file_url"], config
)
self.opencti_geography_file_url = get_config_variable(
"CONFIG_GEOGRAPHY_FILE_URL", ["config", "geography_file_url"], config
)
self.opencti_interval = get_config_variable(
"CONFIG_INTERVAL", ["config", "interval"], config, True
)
self.update_existing_data = get_config_variable(
"CONNECTOR_UPDATE_EXISTING_DATA",
["connector", "update_existing_data"],
config,
)
def get_interval(self):
return int(self.opencti_interval) * 60 * 60 * 24
def run(self):
self.helper.log_info("Fetching OpenCTI datasets...")
while True:
try:
# Get the current timestamp and check
timestamp = int(time.time())
current_state = self.helper.get_state()
if current_state is not None and "last_run" in current_state:
last_run = current_state["last_run"]
self.helper.log_info(
"Connector last run: "
+ datetime.utcfromtimestamp(last_run).strftime(
"%Y-%m-%d %H:%M:%S"
)
)
else:
last_run = None
self.helper.log_info("Connector has never run")
# If the last_run is more than interval-1 day
if last_run is None or (
(timestamp - last_run)
> ((int(self.opencti_interval) - 1) * 60 * 60 * 24)
):
now = datetime.utcfromtimestamp(timestamp)
friendly_name = "OpenCTI datasets run @ " + now.strftime(
"%Y-%m-%d %H:%M:%S"
)
work_id = self.helper.api.work.initiate_work(
self.helper.connect_id, friendly_name
)
try:
sectors_data = urllib.request.urlopen(
self.opencti_sectors_file_url
).read()
self.helper.send_stix2_bundle(
sectors_data.decode("utf-8"),
entities_types=self.helper.connect_scope,
update=self.update_existing_data,
work_id=work_id,
)
except Exception as e:
self.helper.log_error(str(e))
try:
geography_data = urllib.request.urlopen(
self.opencti_geography_file_url
).read()
self.helper.send_stix2_bundle(
geography_data.decode("utf-8"),
entities_types=self.helper.connect_scope,
update=self.update_existing_data,
work_id=work_id,
)
except Exception as e:
self.helper.log_error(str(e))
# Store the current timestamp as a last run
message = "Connector successfully run, storing last_run as " + str(
timestamp
)
self.helper.log_info(message)
self.helper.set_state({"last_run": timestamp})
self.helper.api.work.to_processed(work_id, message)
self.helper.log_info(
"Last_run stored, next run in: "
+ str(round(self.get_interval() / 60 / 60 / 24, 2))
+ " days"
)
time.sleep(60)
else:
new_interval = self.get_interval() - (timestamp - last_run)
self.helper.log_info(
"Connector will not run, next run in: "
+ str(round(new_interval / 60 / 60 / 24, 2))
+ " days"
)
time.sleep(60)
except (KeyboardInterrupt, SystemExit):
self.helper.log_info("Connector stop")
exit(0)
except Exception as e:
self.helper.log_error(str(e))
time.sleep(60)
if __name__ == "__main__":
try:
openCTIConnector = OpenCTI()
openCTIConnector.run()
except Exception as e:
print(e)
time.sleep(10)
exit(0)
|
StarcoderdataPython
|
178367
|
# Author: <NAME>
# Date: 26/06/2018
# Project: TdaToolbox
try: from filtration.imports import *
except: from imports import *
# Time delay embedded procedure
# val refers to a 1D time-serie
# step corresponds to the time-delay
# dimension is the dimension of the time-delay embedding
# point_size refers to the dimensionnial plot
# graph is a boolean, whether we want to display the result
def vectorize(val, step, dimension=3, point_size=1, graph=False):
# Working on matrix
m_i = np.arange(dimension)*(step+1)
m_j = np.arange(np.max(val.shape[0]-(dimension-1)*(step+1), 0))
val = val[m_i + m_j.reshape(-1,1)]
# Memory efficiency
del m_i, m_j
if graph:
# Display
if dimension == 2:
plt.figure(figsize=(18,4))
plt.title('Vectorized Signal')
plt.scatter(val[:,0], val[:,1], c='grey', marker='x')
plt.grid()
plt.show()
elif dimension == 3:
lay = go.Layout(margin=dict(l=0, r=0, b=0, t=0))
img = go.Scatter3d(x=val[:,0], y=val[:,1], z=val[:,2], mode='markers',
marker=dict(size=point_size), opacity=0.5)
fig = tools.make_subplots(rows=1, cols=1)
fig['data'].append(img)
pyo.iplot(fig)
return val
|
StarcoderdataPython
|
1683029
|
"""Initial Database Creation
Revision ID: 9c57eb87e918
Revises: None
Create Date: 2016-08-11 22:23:51.191035
"""
# revision identifiers, used by Alembic.
revision = '9c57eb87e<PASSWORD>'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('committee_meetings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('committee', sa.Enum('Evaluations', 'History', 'Social', 'Opcomm', 'R&D', 'House Improvements', 'Financial', 'Chairman', name='committees_enum'), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('conditional',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uid', sa.String(length=32), nullable=False),
sa.Column('description', sa.String(length=512), nullable=False),
sa.Column('date_created', sa.Date(), nullable=False),
sa.Column('date_due', sa.Date(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('status', sa.Enum('Pending', 'Passed', 'Failed', name='conditional_enum'), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('current_coops',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uid', sa.String(length=32), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('date_created', sa.Date(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('freshman_accounts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=False),
sa.Column('eval_date', sa.Date(), nullable=False),
sa.Column('onfloor_status', sa.Boolean(), nullable=True),
sa.Column('room_number', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('freshman_eval_data',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uid', sa.String(length=32), nullable=False),
sa.Column('freshman_project', sa.Enum('Pending', 'Passed', 'Failed', name='freshman_project_enum'), nullable=False),
sa.Column('eval_date', sa.DateTime(), nullable=False),
sa.Column('signatures_missed', sa.Integer(), nullable=False),
sa.Column('social_events', sa.Text(), nullable=True),
sa.Column('other_notes', sa.Text(), nullable=True),
sa.Column('freshman_eval_result', sa.Enum('Pending', 'Passed', 'Failed', name='freshman_eval_enum'), nullable=False),
sa.Column('active', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('house_meetings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date', sa.Date(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('housing_evals',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uid', sa.String(length=32), nullable=False),
sa.Column('social_attended', sa.Text(), nullable=False),
sa.Column('social_hosted', sa.Text(), nullable=False),
sa.Column('technical_attended', sa.Text(), nullable=False),
sa.Column('technical_hosted', sa.Text(), nullable=False),
sa.Column('projects', sa.Text(), nullable=False),
sa.Column('comments', sa.Text(), nullable=False),
sa.Column('points', sa.Integer(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('date_created', sa.Date(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('major_projects',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uid', sa.String(length=32), nullable=False),
sa.Column('name', sa.String(length=64), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('status', sa.Enum('Pending', 'Passed', 'Failed', name='major_project_enum'), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('onfloor_datetime',
sa.Column('uid', sa.String(length=32), nullable=False),
sa.Column('onfloor_granted', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('uid', 'onfloor_granted')
)
op.create_table('settings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('housing_form_active', sa.Boolean(), nullable=True),
sa.Column('intro_form_active', sa.Boolean(), nullable=True),
sa.Column('site_lockdown', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('spring_evals',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uid', sa.String(length=32), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('date_created', sa.Date(), nullable=False),
sa.Column('status', sa.Enum('Pending', 'Passed', 'Failed', name='spring_eval_enum'), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('technical_seminars',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('freshman_committee_attendance',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fid', sa.Integer(), nullable=False),
sa.Column('meeting_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['fid'], ['freshman_accounts.id'], ),
sa.ForeignKeyConstraint(['meeting_id'], ['committee_meetings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('freshman_hm_attendance',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fid', sa.Integer(), nullable=False),
sa.Column('meeting_id', sa.Integer(), nullable=False),
sa.Column('excuse', sa.Text(), nullable=True),
sa.Column('attendance_status', sa.Enum('Attended', 'Excused', 'Absent', name='attendance_enum'), nullable=True),
sa.ForeignKeyConstraint(['fid'], ['freshman_accounts.id'], ),
sa.ForeignKeyConstraint(['meeting_id'], ['house_meetings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('freshman_seminar_attendance',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fid', sa.Integer(), nullable=False),
sa.Column('seminar_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['fid'], ['freshman_accounts.id'], ),
sa.ForeignKeyConstraint(['seminar_id'], ['technical_seminars.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('member_committee_attendance',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uid', sa.String(length=32), nullable=False),
sa.Column('meeting_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['meeting_id'], ['committee_meetings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('member_hm_attendance',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uid', sa.String(length=32), nullable=False),
sa.Column('meeting_id', sa.Integer(), nullable=False),
sa.Column('excuse', sa.Text(), nullable=True),
sa.Column('attendance_status', sa.Enum('Attended', 'Excused', 'Absent', name='attendance_enum'), nullable=True),
sa.ForeignKeyConstraint(['meeting_id'], ['house_meetings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('member_seminar_attendance',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uid', sa.String(length=32), nullable=False),
sa.Column('seminar_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['seminar_id'], ['technical_seminars.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('member_seminar_attendance')
op.drop_table('member_hm_attendance')
op.drop_table('member_committee_attendance')
op.drop_table('freshman_seminar_attendance')
op.drop_table('freshman_hm_attendance')
op.drop_table('freshman_committee_attendance')
op.drop_table('technical_seminars')
op.drop_table('spring_evals')
op.drop_table('settings')
op.drop_table('onfloor_datetime')
op.drop_table('major_projects')
op.drop_table('housing_evals')
op.drop_table('house_meetings')
op.drop_table('freshman_eval_data')
op.drop_table('freshman_accounts')
op.drop_table('current_coops')
op.drop_table('conditional')
op.drop_table('committee_meetings')
### end Alembic commands ###
|
StarcoderdataPython
|
1725312
|
<gh_stars>0
import sys
import os
import importlib.util
from collections import deque
from interlib.utility import ImportQueue
from interlib.utility import print_line
from interlib.utility import show_error
from importlib import import_module
def interpret(pseudo_file, python_file, keyword_dict, is_debug_on):
in_file = open(pseudo_file, "r")
py_file = open(python_file, "w")
default_stdout = sys.stdout
output_file = open('output.txt', 'w')
sys.stdout = output_file
if (os.stat(pseudo_file).st_size == 0):
print("Error: Pseudo file emtpy. Did you properly saved it first?")
# Puts all of the lines from the input file to a list
in_file_lines = []
for line in in_file:
in_file_lines.append(line)
# All varaibles, list, and dictionaries are put into here from the "Create" keyword
# Key = variable name, Values = {data type, data for that variable name}
# For example, an entry in the dictionary could be {"x": {"data_type": "number", "value": 1}}
# This means that x is a variable with the value 1
all_variables = {}
# Line number where the parser is at, used for showing error exceptions.
line_numb = 0
# List of all python lines to be written to the outfile
py_lines = deque()
# Some initialization to write to the py_file
py_lines.appendleft('if __name__ == "__main__":\n')
indent = 2
parse_success = False
# Get the directory where the main pseudo file is located at
pseudo_filepath_list = pseudo_file.split("/")
# In the future, we might want to add our own values to pass to a function for specific cases. In order to generalize, we will put any important values into this dictionary and pass this dictionary around to all of the keyword functions
# NOTE: dict and list are passed by reference
# NOTE: numbers and strings are passed by value
interpret_state = {}
interpret_state["all_variables"] = all_variables
interpret_state["line_numb"] = line_numb
interpret_state["py_lines"] = py_lines
interpret_state["indent"] = indent
interpret_state["parse_success"] = parse_success
interpret_state["in_file_lines"] = in_file_lines
interpret_state["keyword_dict"] = keyword_dict
interpret_state["pseudo_indent"] = 0
interpret_state["import_queue"] = ImportQueue()
interpret_state["pseudo_filepath"] = "\\".join(pseudo_filepath_list[0:-1])
interpret_state["pseudo_file"] = pseudo_filepath_list[-1]
interpret_state["plain_import_files"] = []
interpret_state["is_debug_on"] = is_debug_on
# main loop to parse all words in the file
while interpret_state["line_numb"] < len(in_file_lines):
curr_pc = interpret_state["line_numb"]
line = in_file_lines[interpret_state["line_numb"]]
# If a line contains only whitespace, then we just add a newline
# to keep the lines consistant with both the Pseudo code and the
# Python code
if line.isspace():
py_lines.append("\n")
interpret_state["line_numb"] = interpret_state["line_numb"] + 1
continue
# Checks how many spaces are at the beginning of the line
pseudo_indent = 0
for char in line:
if char != " ":
break
pseudo_indent += 1
interpret_state["pseudo_indent"] = pseudo_indent
line_list = line.split(" ")
# Filters out each line
# Removes any newline characters if they are in the list
while "\n" in line_list:
line_list.remove("\n")
# Removes any empty strings if they are in the list
while '' in line_list:
line_list.remove("")
# Removes the newline character from the last word
if line_list[-1][-1] == "\n":
line_list[-1] = line_list[-1][:-1]
# Check if the very first character of the line is a #, Pycode, or % and if so then put the whole line into the py_lines list and parse the next line
interpret_state["parse_success"] = True
if line_list[0][0] == "#":
py_lines.append((pseudo_indent+interpret_state["indent"])*" " + line)
elif line_list[0].lower() == "pycode" or line_list[0][0] == "%":
py_line = ""
if line_list[0][0] == "%" and len(line_list[0]) > 1:
py_line += line_list[0][1:] + " "
for i in range(1, len(line_list)):
py_line += line_list[i] + " "
py_lines.append((pseudo_indent+interpret_state["indent"])*" " + py_line + "\n")
else:
interpret_state["line_list"] = line_list
# Parse the lines based on the keywords
keyword = line_list[0].lower()
try:
interpret_state["parse_success"] = keyword_dict[keyword].handler(interpret_state)
except KeyError:
if interpret_state["is_debug_on"]:
show_error()
else:
print("Error: \"" + line_list[0] + "\" keyword not known.")
print_line(interpret_state["line_numb"], line_list)
interpret_state["parse_success"] = False;
except IndexError:
if interpret_state["is_debug_on"]:
show_error()
else:
print("Pseudo code line incomplete")
print_line(interpret_state["line_numb"], line_list)
interpret_state["parse_success"] = False;
# At the end of parsing the line, increment the line counter
# if we did not change the program counter
if interpret_state["line_numb"] == curr_pc:
interpret_state["line_numb"] += 1
# Terminate loop when error occurs
if not interpret_state["parse_success"]:
in_file.close()
py_file.close()
output_file.close()
sys.stdout = default_stdout
return
# write every line into py_file
for line in py_lines:
py_file.write(line)
# Close the files
in_file.close()
py_file.close()
# Runs the output file, stores output into output.txt file
#py_cmds = "import sys\nprint(sys.path)\n"
py_cmds = ""
# Kind of stupid, but apparently the output.txt file will not
# generate the outputs because of the if __name__ == "__main__"
# line. So we need to omit this and then omit the two space
# indentation for all lines after it.
py_line_main_pos = py_lines.index('if __name__ == "__main__":\n')
py_lines.remove('if __name__ == "__main__":\n')
for i in range(0, len(py_lines)):
if i < py_line_main_pos:
py_cmds += py_lines[i]
else:
py_cmds += py_lines[i][2:]
# Print the outputs to the output.txt file
recursive_fix = {}
path_list = interpret_state["pseudo_filepath"].split("/")
for var in all_variables:
try:
filename = all_variables[var]["source"]
except:
filename = interpret_state["pseudo_file"]
if not filename == interpret_state["pseudo_file"]:
file_regular_name = filename[0:-7]
py_name = file_regular_name + ".py"
path_list.append(py_name)
full_path = "\\".join(path_list)
# Import the funcitons
spec = importlib.util.spec_from_file_location(py_name, full_path)
module = importlib.util.module_from_spec(spec)
sys.modules[py_name] = module
sys.modules[file_regular_name] = module
spec.loader.exec_module(module)
if full_path not in sys.path:
sys.path.append(full_path)
path_list.pop()
for filename in interpret_state["plain_import_files"]:
filename_py = filename + ".py"
path_to_file = interpret_state["pseudo_filepath"] + "\\" + filename_py
spec = importlib.util.spec_from_file_location(filename_py, path_to_file)
module = importlib.util.module_from_spec(spec)
sys.modules[filename_py] = module
sys.modules[filename] = module
spec.loader.exec_module(module)
if path_to_file not in sys.path:
sys.path.append(path_to_file)
try:
exec(py_cmds, recursive_fix)
except:
if interpret_state["is_debug_on"]:
show_error()
else:
print("Error: Something in your python code did not execute successfully.")
print(" Check to make sure that your pseudo code is correctly formatted.")
print(" Enable the \"Toggle Debug\" option in Help to see raised exception.")
output_file.close()
sys.stdout = default_stdout
|
StarcoderdataPython
|
1703364
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from datetime import datetime
from gae_libs.testcase import TestCase
from libs import analysis_status
from model import result_status
from model import triage_status
from model.flake.flake_culprit import FlakeCulprit
from model.flake.master_flake_analysis import DataPoint
from model.flake.master_flake_analysis import MasterFlakeAnalysis
class MasterFlakeAnalysisTest(TestCase):
def testMasterFlakeAnalysisStatusIsCompleted(self):
for status in (analysis_status.COMPLETED, analysis_status.ERROR):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.status = status
self.assertTrue(analysis.completed)
def testMasterFlakeAnalysisStatusIsNotCompleted(self):
for status in (analysis_status.PENDING, analysis_status.RUNNING):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.status = status
self.assertFalse(analysis.completed)
def testMasterFlakeAnalysisDurationWhenNotCompleted(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.status = analysis_status.RUNNING
self.assertIsNone(analysis.duration)
def testMasterFlakeAnalysisDurationWhenStartTimeNotSet(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.status = analysis_status.COMPLETED
analysis.end_time = datetime(2015, 07, 30, 21, 15, 30, 40)
self.assertIsNone(analysis.duration)
def testMasterFlakeAnalysisDurationWhenEndTimeNotSet(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.status = analysis_status.COMPLETED
analysis.start_time = datetime(2015, 07, 30, 21, 15, 30, 40)
self.assertIsNone(analysis.duration)
def testMasterFlakeAnalysisDurationWhenCompleted(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.status = analysis_status.COMPLETED
analysis.start_time = datetime(2015, 07, 30, 21, 15, 30, 40)
analysis.end_time = datetime(2015, 07, 30, 21, 16, 15, 50)
self.assertEqual(45, analysis.duration)
def testMasterFlakeAnalysisStatusIsFailed(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.status = analysis_status.ERROR
self.assertTrue(analysis.failed)
def testMasterFlakeAnalysisStatusIsNotFailed(self):
for status in (analysis_status.PENDING, analysis_status.RUNNING,
analysis_status.COMPLETED):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.status = status
self.assertFalse(analysis.failed)
def testMasterFlakeAnalysisStatusDescriptionPending(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.status = analysis_status.PENDING
self.assertEqual('Pending', analysis.status_description)
def testMasterFlakeAnalysisStatusDescriptionRunning(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.status = analysis_status.RUNNING
self.assertEqual('Running', analysis.status_description)
def testMasterFlakeAnalysisStatusDescriptionCompleted(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.status = analysis_status.COMPLETED
self.assertEqual('Completed', analysis.status_description)
def testMasterFlakeAnalysisStatusDescriptionError(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.status = analysis_status.ERROR
self.assertEqual('Error', analysis.status_description)
def testMasterFlakeAnalysisStepTestName(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's on OS', 't')
self.assertEqual('s on OS', analysis.step_name)
self.assertEqual('s', analysis.canonical_step_name)
self.assertEqual('t', analysis.test_name)
def testMasterFlakeAnalysisUpdateTriageResultCorrect(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.UpdateTriageResult(
triage_status.TRIAGED_CORRECT, {'build_number': 100}, 'test')
self.assertEqual(analysis.result_status, result_status.FOUND_CORRECT)
def testMasterFlakeAnalysisUpdateTriageResultIncorrect(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.UpdateTriageResult(
triage_status.TRIAGED_INCORRECT, {'build_number': 100}, 'test')
self.assertEqual(analysis.result_status, result_status.FOUND_INCORRECT)
def testMasterFlakeAnalysisUpdateTriageResultCorrectCulprit(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.UpdateTriageResult(
triage_status.TRIAGED_CORRECT, {'culprit_revision': 'rev'}, 'test')
self.assertEqual(analysis.result_status, result_status.FOUND_CORRECT)
self.assertTrue(analysis.correct_culprit)
def testMasterFlakeAnalysisUpdateTriageResultIncorrectCulprit(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.UpdateTriageResult(
triage_status.TRIAGED_INCORRECT, {'culprit_revision': 'rev'}, 'test')
self.assertEqual(analysis.result_status, result_status.FOUND_INCORRECT)
self.assertFalse(analysis.correct_culprit)
def testReset(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.swarming_rerun_results = [{}]
analysis.status = analysis_status.RUNNING
analysis.correct_regression_range = True
analysis.correct_culprit = False
analysis.correct_culprit = None
analysis.data_points = [DataPoint()]
analysis.suspected_flake_build_number = 123
analysis.culprit = FlakeCulprit.Create('r', 'a1b2c3d4', 12345, 'url')
analysis.try_job_status = analysis_status.COMPLETED
analysis.Reset()
self.assertEqual([], analysis.swarming_rerun_results)
self.assertEqual(analysis_status.PENDING, analysis.status)
self.assertIsNone(analysis.correct_regression_range)
self.assertIsNone(analysis.correct_culprit)
self.assertIsNone(analysis.suspected_flake_build_number)
self.assertEqual([], analysis.data_points)
self.assertIsNone(analysis.culprit)
self.assertIsNone(analysis.try_job_status)
def testGetErrorMessage(self):
cases = [
(None, None),
('error', {'message': 'error', 'code': 'code'}),
]
for expected_error_message, error in cases:
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.error = error
self.assertEqual(expected_error_message, analysis.error_message)
def testGetIterationsToRerun(self):
cases = [
(-1, {}),
(5, {'key': 'value', 'iterations_to_rerun': 5}),
]
for expected_rerun, algorithm_parameters in cases:
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
analysis.algorithm_parameters = algorithm_parameters
self.assertEqual(expected_rerun, analysis.iterations_to_rerun)
def testGetBuildConfigurationFromKey(self):
master_name = 'm'
builder_name = 'b'
build_number = 123
step_name = 's'
test_name = 't'
key = MasterFlakeAnalysis.Create(
master_name, builder_name, build_number, step_name, test_name).key
self.assertEqual(
(None, None),
MasterFlakeAnalysis.GetBuildConfigurationFromKey(None))
self.assertEqual(
(master_name, builder_name),
MasterFlakeAnalysis.GetBuildConfigurationFromKey(key))
def testGetDataPointOfSuspectedBuildNoSuspectedFlakeBuildNumber(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
self.assertIsNone(analysis.GetDataPointOfSuspectedBuild())
def testGetDataPointOfSuspectedBuild(self):
expected_build_number = 123
data_point = DataPoint()
data_point.build_number = expected_build_number
analysis = MasterFlakeAnalysis.Create('m', 'b', 125, 's', 't')
analysis.suspected_flake_build_number = expected_build_number
analysis.data_points.append(data_point)
suspected_data_point = analysis.GetDataPointOfSuspectedBuild()
self.assertEqual(expected_build_number, suspected_data_point.build_number)
def testGetDataPointOfSuspectedBuildNoDataPoint(self):
# This scenario should not happen.
expected_build_number = 123
unexpected_build_number = 124
data_point = DataPoint()
data_point.build_number = expected_build_number
analysis = MasterFlakeAnalysis.Create('m', 'b', 125, 's', 't')
analysis.suspected_flake_build_number = unexpected_build_number
analysis.data_points.append(data_point)
self.assertIsNone(analysis.GetDataPointOfSuspectedBuild())
def testGetCommitPosition(self):
data_point = DataPoint()
data_point.blame_list = ['r1', 'r2', 'r3']
data_point.commit_position = 100
data_point.previous_build_commit_position = 97
self.assertEqual(98, data_point.GetCommitPosition('r1'))
self.assertEqual(99, data_point.GetCommitPosition('r2'))
self.assertEqual(100, data_point.GetCommitPosition('r3'))
def testGetRevisionAtCommitPosition(self):
data_point = DataPoint()
data_point.blame_list = ['r1', 'r2', 'r3']
data_point.commit_position = 100
self.assertEqual('r1', data_point.GetRevisionAtCommitPosition(98))
self.assertEqual('r2', data_point.GetRevisionAtCommitPosition(99))
self.assertEqual('r3', data_point.GetRevisionAtCommitPosition(100))
def testGetDictOfCommitPositionAndRevision(self):
data_point = DataPoint()
data_point.blame_list = ['r1', 'r2', 'r3']
data_point.commit_position = 100
expected_CLs = {
100: 'r3',
99: 'r2',
98: 'r1'
}
self.assertEqual(expected_CLs,
data_point.GetDictOfCommitPositionAndRevision())
|
StarcoderdataPython
|
1792587
|
<filename>orb_simulator/orbsim_language/orbsim_ast/and_node.py
from orbsim_language.orbsim_ast.binary_expr_node import BinaryExprNode
class AndNode(BinaryExprNode):
pass
|
StarcoderdataPython
|
43223
|
<filename>wagtail/wagtailsearch/tests.py<gh_stars>1-10
from django.test import TestCase
from django.test.client import Client
from django.utils import timezone
from django.core import management
from django.conf import settings
import datetime
import unittest
from StringIO import StringIO
from wagtail.wagtailcore import models as core_models
from wagtail.wagtailsearch import models
from wagtail.wagtailsearch.backends import get_search_backend
from wagtail.wagtailsearch.backends.base import InvalidSearchBackendError
from wagtail.wagtailsearch.backends.db import DBSearch
from wagtail.wagtailsearch.backends.elasticsearch import ElasticSearch
def find_backend(cls):
if not hasattr(settings, 'WAGTAILSEARCH_BACKENDS'):
if cls == DBSearch:
return 'default'
else:
return
for backend in settings.WAGTAILSEARCH_BACKENDS.keys():
if isinstance(get_search_backend(backend), cls):
return backend
class TestBackend(TestCase):
def __init__(self, *args, **kwargs):
super(TestBackend, self).__init__(*args, **kwargs)
self.backends_tested = []
def test_backend_loader(self):
# Test DB backend import
db = get_search_backend(backend='wagtail.wagtailsearch.backends.db.DBSearch')
self.assertIsInstance(db, DBSearch)
# Test Elastic search backend import
elasticsearch = get_search_backend(backend='wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch')
self.assertIsInstance(elasticsearch, ElasticSearch)
# Test loading a non existant backend
self.assertRaises(InvalidSearchBackendError, get_search_backend, backend='wagtail.wagtailsearch.backends.doesntexist.DoesntExist')
# Test something that isn't a backend
self.assertRaises(InvalidSearchBackendError, get_search_backend, backend="I'm not a backend!")
def test_search(self, backend=None):
# Don't run this test directly (this will be called from other tests)
if backend is None:
raise unittest.SkipTest()
# Don't test the same backend more than once!
if backend in self.backends_tested:
return
self.backends_tested.append(backend)
# Get search backend and reset the index
s = get_search_backend(backend=backend)
s.reset_index()
# Create a couple of objects and add them to the index
testa = models.SearchTest()
testa.title = "Hello World"
testa.save()
s.add(testa)
testb = models.SearchTest()
testb.title = "Hello"
testb.save()
s.add(testb)
testc = models.SearchTestChild()
testc.title = "Hello"
testc.save()
s.add(testc)
# Refresh index
s.refresh_index()
# Ordinary search
results = s.search("Hello", models.SearchTest)
self.assertEqual(len(results), 3)
# Retrieve single result
self.assertIsInstance(results[0], models.SearchTest)
# Retrieve results through iteration
iterations = 0
for result in results:
self.assertIsInstance(result, models.SearchTest)
iterations += 1
self.assertEqual(iterations, 3)
# Retrieve results through slice
iterations = 0
for result in results[:]:
self.assertIsInstance(result, models.SearchTest)
iterations += 1
self.assertEqual(iterations, 3)
# Ordinary search on "World"
results = s.search("World", models.SearchTest)
self.assertEqual(len(results), 1)
# Searcher search
results = models.SearchTest.title_search("Hello", backend=backend)
self.assertEqual(len(results), 3)
# Ordinary search on child
results = s.search("Hello", models.SearchTestChild)
self.assertEqual(len(results), 1)
# Searcher search on child
results = models.SearchTestChild.title_search("Hello", backend=backend)
self.assertEqual(len(results), 1)
# Delete a record
testc.delete()
results = s.search("Hello", models.SearchTest)
# TODO: FIXME Deleting records doesn't seem to be deleting them from the index! (but they still get deleted on update_index)
#self.assertEqual(len(results), 2)
# Try to index something that shouldn't be indexed
# TODO: This currently fails on the DB backend
if not isinstance(s, DBSearch):
testd = models.SearchTest()
testd.title = "Don't index me!"
testd.save()
s.add(testd)
results = s.search("Don't", models.SearchTest)
self.assertEqual(len(results), 0)
# Reset the index, this should clear out the index (but doesn't have to!)
s.reset_index()
# Run update_index command
management.call_command('update_index', backend, interactive=False, stdout=StringIO())
# Should have results again now
results = s.search("Hello", models.SearchTest)
self.assertEqual(len(results), 2)
def test_db_backend(self):
self.test_search(backend='wagtail.wagtailsearch.backends.db.DBSearch')
def test_elastic_search_backend(self):
backend = find_backend(ElasticSearch)
if backend is not None:
self.test_search(backend)
else:
raise unittest.SkipTest("Cannot find an ElasticSearch search backend in configuration.")
def test_query_hit_counter(self):
# Add 10 hits to hello query
for i in range(10):
models.Query.get("Hello").add_hit()
# Check that each hit was registered
self.assertEqual(models.Query.get("Hello").hits, 10)
def test_query_string_normalisation(self):
# Get a query
query = models.Query.get("Hello World!")
# Check that it it stored correctly
self.assertEqual(str(query), "hello world")
# Check queries that should be the same
self.assertEqual(query, models.Query.get("Hello World"))
self.assertEqual(query, models.Query.get("Hello World!!"))
self.assertEqual(query, models.Query.get("hello world"))
self.assertEqual(query, models.Query.get("Hello' world"))
# Check queries that should be different
self.assertNotEqual(query, models.Query.get("HelloWorld"))
self.assertNotEqual(query, models.Query.get("Hello orld!!"))
self.assertNotEqual(query, models.Query.get("Hello"))
def test_query_popularity(self):
# Add 3 hits to unpopular query
for i in range(3):
models.Query.get("unpopular query").add_hit()
# Add 10 hits to popular query
for i in range(10):
models.Query.get("popular query").add_hit()
# Get most popular queries
popular_queries = models.Query.get_most_popular()
# Check list
self.assertEqual(popular_queries.count(), 2)
self.assertEqual(popular_queries[0], models.Query.get("popular query"))
self.assertEqual(popular_queries[1], models.Query.get("unpopular query"))
# Add 5 hits to little popular query
for i in range(5):
models.Query.get("little popular query").add_hit()
# Check list again, little popular query should be in the middle
self.assertEqual(popular_queries.count(), 3)
self.assertEqual(popular_queries[0], models.Query.get("popular query"))
self.assertEqual(popular_queries[1], models.Query.get("little popular query"))
self.assertEqual(popular_queries[2], models.Query.get("unpopular query"))
# Unpopular query goes viral!
for i in range(20):
models.Query.get("unpopular query").add_hit()
# Unpopular query should be most popular now
self.assertEqual(popular_queries.count(), 3)
self.assertEqual(popular_queries[0], models.Query.get("unpopular query"))
self.assertEqual(popular_queries[1], models.Query.get("popular query"))
self.assertEqual(popular_queries[2], models.Query.get("little popular query"))
@unittest.expectedFailure # Time based popularity isn't implemented yet
def test_query_popularity_over_time(self):
today = timezone.now().date()
two_days_ago = today - datetime.timedelta(days=2)
a_week_ago = today - datetime.timedelta(days=7)
a_month_ago = today - datetime.timedelta(days=30)
# Add 10 hits to a query that was very popular query a month ago
for i in range(10):
models.Query.get("old popular query").add_hit(date=a_month_ago)
# Add 5 hits to a query that is was popular 2 days ago
for i in range(5):
models.Query.get("new popular query").add_hit(date=two_days_ago)
# Get most popular queries
popular_queries = models.Query.get_most_popular()
# Old popular query should be most popular
self.assertEqual(popular_queries.count(), 2)
self.assertEqual(popular_queries[0], models.Query.get("old popular query"))
self.assertEqual(popular_queries[1], models.Query.get("new popular query"))
# Get most popular queries for past week
past_week_popular_queries = models.Query.get_most_popular(date_since=a_week_ago)
# Only new popular query should be in this list
self.assertEqual(past_week_popular_queries.count(), 1)
self.assertEqual(past_week_popular_queries[0], models.Query.get("new popular query"))
# Old popular query gets a couple more hits!
for i in range(2):
models.Query.get("old popular query").add_hit()
# Old popular query should now be in the most popular queries
self.assertEqual(past_week_popular_queries.count(), 2)
self.assertEqual(past_week_popular_queries[0], models.Query.get("new popular query"))
self.assertEqual(past_week_popular_queries[1], models.Query.get("old popular query"))
def test_editors_picks(self):
# Get root page
root = core_models.Page.objects.first()
# Create an editors pick to the root page
models.EditorsPick.objects.create(
query=models.Query.get("root page"),
page=root,
sort_order=0,
description="First editors pick",
)
# Get editors pick
self.assertEqual(models.Query.get("root page").editors_picks.count(), 1)
self.assertEqual(models.Query.get("root page").editors_picks.first().page, root)
# Create a couple more editors picks to test the ordering
models.EditorsPick.objects.create(
query=models.Query.get("root page"),
page=root,
sort_order=2,
description="Last editors pick",
)
models.EditorsPick.objects.create(
query=models.Query.get("root page"),
page=root,
sort_order=1,
description="Middle editors pick",
)
# Check
self.assertEqual(models.Query.get("root page").editors_picks.count(), 3)
self.assertEqual(models.Query.get("root page").editors_picks.first().description, "First editors pick")
self.assertEqual(models.Query.get("root page").editors_picks.last().description, "Last editors pick")
# Add editors pick with different terms
models.EditorsPick.objects.create(
query=models.Query.get("root page 2"),
page=root,
sort_order=0,
description="Other terms",
)
# Check
self.assertEqual(models.Query.get("root page 2").editors_picks.count(), 1)
self.assertEqual(models.Query.get("root page").editors_picks.count(), 3)
def test_garbage_collect(self):
# Call garbage collector command
management.call_command('search_garbage_collect', interactive=False, stdout=StringIO())
def get_default_host():
from wagtail.wagtailcore.models import Site
return Site.objects.filter(is_default_site=True).first().root_url.split('://')[1]
def get_search_page_test_data():
params_list = []
params_list.append({})
for query in ['', 'Hello', "'", '%^W&*$']:
params_list.append({'q': query})
for page in ['-1', '0', '1', '99999', 'Not a number']:
params_list.append({'q': 'Hello', 'p': page})
return params_list
class TestFrontend(TestCase):
def setUp(self):
s = get_search_backend()
# Stick some documents into the index
testa = models.SearchTest()
testa.title = "Hello World"
testa.save()
s.add(testa)
testb = models.SearchTest()
testb.title = "Hello"
testb.save()
s.add(testb)
testc = models.SearchTestChild()
testc.title = "Hello"
testc.save()
s.add(testc)
def test_views(self):
c = Client()
# Test urls
for url in ['/search/', '/search/suggest/']:
for params in get_search_page_test_data():
r = c.get(url, params, HTTP_HOST=get_default_host())
self.assertEqual(r.status_code, 200)
# Try an extra one with AJAX
r = c.get(url, HTTP_HOST=get_default_host(), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(r.status_code, 200)
class TestAdmin(TestCase):
def setUp(self):
# Create a user
from django.contrib.auth.models import User
User.objects.create_superuser(username='test', email='<EMAIL>', password='password')
# Setup client
self.c = Client()
login = self.c.login(username='test', password='password')
self.assertEqual(login, True)
def test_editors_picks(self):
# Test index
for params in get_search_page_test_data():
r = self.c.get('/admin/search/editorspicks/', params, HTTP_HOST=get_default_host())
self.assertEqual(r.status_code, 200)
# Try an extra one with AJAX
r = self.c.get('/admin/search/editorspicks/', HTTP_HOST=get_default_host(), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(r.status_code, 200)
def test_queries_chooser(self):
for params in get_search_page_test_data():
r = self.c.get('/admin/search/queries/chooser/', params, HTTP_HOST=get_default_host())
self.assertEqual(r.status_code, 200)
# Try an extra one with AJAX
r = self.c.get('/admin/search/queries/chooser/', HTTP_HOST=get_default_host(), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(r.status_code, 200)
|
StarcoderdataPython
|
1704446
|
<reponame>aurelienline/scikit-extremes
"""
This module provides utility functions that are used within scikit-extremes
that are also useful for external consumption.
"""
import warnings as _warnings
from numpy.random import randint as _randint
import numpy as _np
import scipy.stats as _st
from scipy import optimize as _op
from scipy.special import gamma as _gamma
###############################################################################
# Bootstrap confidence intervals calculations using percentile interval method
###############################################################################
class InstabilityWarning(UserWarning):
"""Issued when results may be unstable."""
pass
# On import, make sure that InstabilityWarnings are not filtered out.
_warnings.simplefilter('always', InstabilityWarning)
_warnings.simplefilter('always', UserWarning)
def bootstrap_ci(data, statfunction=_np.average, alpha = 0.05,
n_samples = 100):
"""
Given a set of data ``data``, and a statistics function ``statfunction`` that
applies to that data, computes the bootstrap confidence interval for
``statfunction`` on that data. Data points are assumed to be delineated by
axis 0.
This function has been derived and simplified from scikits-bootstrap
package created by cgevans (https://github.com/cgevans/scikits-bootstrap).
All the credits shall go to him.
**Parameters**
data : array_like, shape (N, ...) OR tuple of array_like all with shape (N, ...)
Input data. Data points are assumed to be delineated by axis 0. Beyond this,
the shape doesn't matter, so long as ``statfunction`` can be applied to the
array. If a tuple of array_likes is passed, then samples from each array (along
axis 0) are passed in order as separate parameters to the statfunction. The
type of data (single array or tuple of arrays) can be explicitly specified
by the multi parameter.
statfunction : function (data, weights = (weights, optional)) -> value
This function should accept samples of data from ``data``. It is applied
to these samples individually.
alpha : float, optional
The percentiles to use for the confidence interval (default=0.05). The
returned values are (alpha/2, 1-alpha/2) percentile confidence
intervals.
n_samples : int or float, optional
The number of bootstrap samples to use (default=100)
**Returns**
confidences : tuple of floats
The confidence percentiles specified by alpha
**Calculation Methods**
'pi' : Percentile Interval (Efron 13.3)
The percentile interval method simply returns the 100*alphath bootstrap
sample's values for the statistic. This is an extremely simple method of
confidence interval calculation. However, it has several disadvantages
compared to the bias-corrected accelerated method.
If you want to use more complex calculation methods, please, see
`scikits-bootstrap package
<https://github.com/cgevans/scikits-bootstrap>`_.
**References**
Efron (1993): 'An Introduction to the Bootstrap', Chapman & Hall.
"""
def bootstrap_indexes(data, n_samples=10000):
"""
Given data points data, where axis 0 is considered to delineate points, return
an generator for sets of bootstrap indexes. This can be used as a list
of bootstrap indexes (with list(bootstrap_indexes(data))) as well.
"""
for _ in range(n_samples):
yield _randint(data.shape[0], size=(data.shape[0],))
alphas = _np.array([alpha / 2,1 - alpha / 2])
data = _np.array(data)
tdata = (data,)
# We don't need to generate actual samples; that would take more memory.
# Instead, we can generate just the indexes, and then apply the statfun
# to those indexes.
bootindexes = bootstrap_indexes(tdata[0], n_samples)
stat = _np.array([statfunction(*(x[indexes] for x in tdata)) for indexes in bootindexes])
stat.sort(axis=0)
# Percentile Interval Method
avals = alphas
nvals = _np.round((n_samples - 1)*avals).astype('int')
if _np.any(nvals == 0) or _np.any(nvals == n_samples - 1):
_warnings.warn("Some values used extremal samples; results are probably unstable.", InstabilityWarning)
elif _np.any(nvals<10) or _np.any(nvals>=n_samples-10):
_warnings.warn("Some values used top 10 low/high samples; results may be unstable.", InstabilityWarning)
if nvals.ndim == 1:
# All nvals are the same. Simple broadcasting
return stat[nvals]
else:
# Nvals are different for each data point. Not simple broadcasting.
# Each set of nvals along axis 0 corresponds to the data at the same
# point in other axes.
return stat[(nvals, _np.indices(nvals.shape)[1:].squeeze())]
###############################################################################
# Function to estimate parameters of GEV using method of moments
###############################################################################
def gev_momfit(data):
"""
Estimate parameters of Generalised Extreme Value distribution using the
method of moments. The methodology has been extracted from appendix A.4
on EVA (see references below).
**Parameters**
data : array_like
Sample extreme data
**Returns**
tuple
tuple with the shape, location and scale parameters. In this,
case, the shape parameter is always 0.
**References**
DHI, (2003): '`EVA(Extreme Value Analysis - Reference manual)
<http://www.tnmckc.org/upload/document/wup/1/1.3/Manuals/MIKE%2011/eva/EVA_RefManual.pdf>`_',
DHI.
"""
g = lambda n, x : _gamma(1 + n * x)
mean = _np.mean(data)
std = _np.std(data)
skew = _st.skew(data)
def minimize_skew(x):
a = -g(3, x) + 3 * g(1, x) * g(2, x) - 2 * g(1, x)**3
b = (g(2, x) - (g(1, x))**2)**1.5
c = abs(a / b - skew)
return c
c = _op.fmin(minimize_skew, 0)[0] # first guess is set to 0
scale = std * abs(c) / _np.sqrt((g(2, c) - g(1, c)**2))
loc = mean - scale * (1 - g(1, c)) / c
return c, loc, scale
###############################################################################
# Function to estimate parameters of Gumbel using method of moments
###############################################################################
def gum_momfit(data):
"""
Estimate parameters of Gumbel distribution using the
method of moments. The methodology has been extracted from Wilks
(see references below).
**Parameters**
data : array_like
Sample extreme data
**Returns**
tuple
tuple with the shape, location and scale parameters. In this,
case, the shape parameter is always 0.
**References**
Wilks,D.S. (2006): '`Statistical Methods in the Atmospheric Sciences,
second edition <http://store.elsevier.com/Statistical-Methods-in-the-Atmospheric-Sciences/Daniel-Wilks/isbn-9780080456225/>`_',
Academic Press.
"""
mean = _np.mean(data)
std = _np.std(data)
euler_cte = 0.5772156649015328606065120900824024310421
scale = std * _np.sqrt(6) / _np.pi
loc = mean - scale * euler_cte
return 0, loc, scale
|
StarcoderdataPython
|
43309
|
import time
from datetime import datetime
from serial import Serial # Library needed to open serial connection
PIN = 'a5'
PORT = 'COM11'
PORT = Serial(port=PORT, baudrate=9600, timeout=0) # Open the Serial port
def encode_command(command):
return bytearray(command, encoding='utf-8')
print('-' * 50)
print('AC:', 'Connecting to:', PORT)
print('AC:', 'Press Ctrl+C to close the program.')
try:
while True:
PORT.write(encode_command('S0|i{0}|r{0}|.'.format(PIN))) # Ask for analog status
time.sleep(0.1)
# Read untill ';' is found
result = "|"
while len(result) == 0 or result[-1] != ';':
result += PORT.read().decode('utf-8')
if result[-1] == '|': # Clear string at each command start
result = ""
result = result[:-1] # Skip the ';' character
print('AC ({}):'.format(datetime.now()), '{} is at ->'.format(PIN.upper()), result)
except KeyboardInterrupt:
pass
finally:
PORT.close()
print('AC:', 'Connection closed.')
print('-' * 50)
|
StarcoderdataPython
|
3290592
|
<filename>lib/python2.7/site-packages/selectable/tests/fields.py
from django import forms
from selectable.forms import fields
from selectable.tests import ThingLookup
from selectable.tests.base import BaseSelectableTestCase
__all__ = (
'AutoCompleteSelectFieldTestCase',
'AutoComboboxSelectFieldTestCase',
'AutoCompleteSelectMultipleFieldTestCase',
'AutoComboboxSelectMultipleFieldTestCase',
)
class BaseFieldTestCase(BaseSelectableTestCase):
field_cls = None
lookup_cls = None
def get_field_instance(self, allow_new=False, limit=None):
return self.__class__.field_cls(self.__class__.lookup_cls, allow_new=allow_new, limit=limit)
def test_init(self):
field = self.get_field_instance()
self.assertEqual(field.lookup_class, self.__class__.lookup_cls)
def test_init_with_limit(self):
field = self.get_field_instance(limit=10)
self.assertEqual(field.limit, 10)
self.assertEqual(field.widget.limit, 10)
def test_clean(self):
self.fail('This test has not yet been written')
class AutoCompleteSelectFieldTestCase(BaseFieldTestCase):
field_cls = fields.AutoCompleteSelectField
lookup_cls = ThingLookup
def test_clean(self):
thing = self.create_thing()
field = self.get_field_instance()
value = field.clean([thing.name, thing.id])
self.assertEqual(thing, value)
def test_new_not_allowed(self):
field = self.get_field_instance()
value = self.get_random_string()
self.assertRaises(forms.ValidationError, field.clean, [value, ''])
def test_new_allowed(self):
field = self.get_field_instance(allow_new=True)
value = self.get_random_string()
value = field.clean([value, ''])
self.assertTrue(isinstance(value, ThingLookup.model))
class AutoComboboxSelectFieldTestCase(BaseFieldTestCase):
field_cls = fields.AutoComboboxSelectField
lookup_cls = ThingLookup
def test_clean(self):
thing = self.create_thing()
field = self.get_field_instance()
value = field.clean([thing.name, thing.id])
self.assertEqual(thing, value)
def test_new_not_allowed(self):
field = self.get_field_instance()
value = self.get_random_string()
self.assertRaises(forms.ValidationError, field.clean, [value, ''])
def test_new_allowed(self):
field = self.get_field_instance(allow_new=True)
value = self.get_random_string()
value = field.clean([value, ''])
self.assertTrue(isinstance(value, ThingLookup.model))
class AutoCompleteSelectMultipleFieldTestCase(BaseFieldTestCase):
field_cls = fields.AutoCompleteSelectMultipleField
lookup_cls = ThingLookup
def get_field_instance(self, limit=None):
return self.__class__.field_cls(self.__class__.lookup_cls, limit=limit)
def test_clean(self):
thing = self.create_thing()
field = self.get_field_instance()
value = field.clean([thing.id])
self.assertEqual([thing], value)
def test_clean_multiple(self):
thing = self.create_thing()
other_thing = self.create_thing()
field = self.get_field_instance()
ids = [thing.id, other_thing.id]
value = field.clean(ids)
self.assertEqual([thing, other_thing], value)
class AutoComboboxSelectMultipleFieldTestCase(BaseFieldTestCase):
field_cls = fields.AutoComboboxSelectMultipleField
lookup_cls = ThingLookup
def get_field_instance(self, limit=None):
return self.__class__.field_cls(self.__class__.lookup_cls, limit=limit)
def test_clean(self):
thing = self.create_thing()
field = self.get_field_instance()
value = field.clean([thing.id])
self.assertEqual([thing], value)
def test_clean_multiple(self):
thing = self.create_thing()
other_thing = self.create_thing()
field = self.get_field_instance()
ids = [thing.id, other_thing.id]
value = field.clean(ids)
self.assertEqual([thing, other_thing], value)
|
StarcoderdataPython
|
3297911
|
# Leetcode 417. Pacific Atlantic Water Flow
#
# Link: https://leetcode.com/problems/pacific-atlantic-water-flow/
# Difficulty: Medium
# Solution using DFS and sets
# Complexity:
# O(M*N) time | where M and N represent the rows and cols of the input matrix
# O(M*N) time | where M and N represent the rows and cols of the input matrix
class Solution:
def pacificAtlantic(self, heights: List[List[int]]) -> List[List[int]]:
ROWS, COLS = len(heights), len(heights[0])
DIRECTIONS = ((0,1), (0,-1), (1,0), (-1,0))
reach_pacific = set()
reach_atlantic = set()
def dfs(r, c, visited, old_height):
if ((r, c) in visited or
r < 0 or r >= ROWS or
c < 0 or c >= COLS or
heights[r][c] < old_height):
return
visited.add((r, c))
for dr, dc in DIRECTIONS:
dfs(r + dr, c + dc, visited, heights[r][c])
for r in range(ROWS):
dfs(r, 0, reach_pacific, 0)
dfs(r, COLS-1, reach_atlantic, 0)
for c in range(COLS):
dfs(0, c, reach_pacific, 0)
dfs(ROWS-1, c, reach_atlantic, 0)
return reach_pacific.intersection(reach_atlantic)
|
StarcoderdataPython
|
1728782
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gapic.utils import checks
from test_utils.test_utils import make_field, make_message
def test_is_str_field_pb():
msg_field = make_field('msg_field', message=make_message('test_msg'))
str_field = make_field('str_field', type=9)
int_field = make_field('int_field', type=5)
assert not checks.is_str_field_pb(msg_field.field_pb)
assert checks.is_str_field_pb(str_field.field_pb)
assert not checks.is_str_field_pb(int_field.field_pb)
def test_is_msg_field_pb():
msg_field = make_field('msg_field', message=make_message('test_msg'))
str_field = make_field('str_field', type=9)
int_field = make_field('int_field', type=5)
assert checks.is_msg_field_pb(msg_field.field_pb)
assert not checks.is_msg_field_pb(str_field.field_pb)
assert not checks.is_msg_field_pb(int_field.field_pb)
|
StarcoderdataPython
|
194841
|
"""Set up a fake hook for testing purposes."""
import platform
from pathlib import Path
from typing import Dict
import pluggy
import pytest
from edgetest import hookspecs
hookimpl = pluggy.HookimplMarker("edgetest")
class FakeHook:
"""Create a series of fake hooks."""
@hookimpl
def path_to_python(self, basedir: str, envname: str) -> str:
"""Return the path to the python executable."""
# return str(Path(basedir, envname, "bin", "python"))
if platform.system() == "Windows":
return str(Path(basedir) / envname / "Scripts" / "python")
else:
return str(Path(basedir) / envname / "bin" / "python")
@hookimpl
def create_environment(self, basedir: str, envname: str, conf: Dict):
"""Create the virtual environment for testing."""
pass
@pytest.fixture
def plugin_manager():
"""The plugin manager for our fake hook."""
pm = pluggy.PluginManager("edgetest")
pm.add_hookspecs(hookspecs)
pm.register(FakeHook())
return pm
|
StarcoderdataPython
|
1693706
|
<filename>brainiac_priv/brainiac_test_priv/brainiac.py
class brainiac_test_test:
def __init__(self,dark):
self.dark=dark
def test(dark):
print(dark)
|
StarcoderdataPython
|
3348242
|
from multiprocessing import Pool
from rdkit.Chem import AllChem
from autode.input_output import xyz_file_to_atoms
from autode.conformer import Conformer
from autode.conf_gen import get_simanl_atoms
from autode.conformers import conf_is_unique_rmsd, get_atoms_from_rdkit_mol_object
from autode.atoms import metals
from autode.config import Config
from autode.log import logger
from autode.mol_graphs import make_graph
# from autode.smiles import init_organic_smiles
# from autode.smiles import init_smiles
from autode.species import Species
from autode.utils import requires_atoms
class Molecule(Species):
# def _init_smiles(self, smiles):
# """Initialise a molecule from a SMILES string using RDKit if it's
# purely organic"""
# if any(metal in smiles for metal in metals):
# init_smiles(self, smiles)
# else:
# init_organic_smiles(self, smiles)
# logger.info(f'Initialisation with SMILES successful. '
# f'Charge={self.charge}, Multiplicity={self.mult}, '
# f'Num. Atoms={self.n_atoms}')
# return None
def _init_xyz_file(self, xyz_filename):
"""Initialise a molecule from a .xyz file"""
logger.info('Generating species from .xyz file')
self.set_atoms(atoms=xyz_file_to_atoms(xyz_filename))
# Override the default name with something more descriptive
if self.name == 'molecule' or self.name.endswith('.xyz'):
self.name = xyz_filename.rstrip('.xyz')
return None
@requires_atoms()
def _generate_conformers(self, n_confs=None):
"""
Use a simulated annealing approach to generate conformers for this
molecule.
Keyword Arguments:
n_confs (int): Number of conformers requested if None default to
autode.Config.num_conformers
"""
n_confs = n_confs if n_confs is not None else Config.num_conformers
self.conformers = []
if self.smiles is not None and self.rdkit_conf_gen_is_fine:
logger.info(f'Using RDKit to gen conformers. {n_confs} requested')
method = AllChem.ETKDGv2()
method.pruneRmsThresh = Config.rmsd_threshold
method.numThreads = Config.n_cores
logger.info('Running conformation generation with RDKit... running')
conf_ids = list(AllChem.EmbedMultipleConfs(self.rdkit_mol_obj,
numConfs=n_confs,
params=method))
logger.info(' ... done')
conf_atoms_list = [get_atoms_from_rdkit_mol_object(self.rdkit_mol_obj, conf_id) for conf_id in conf_ids]
else:
logger.info('Using simulated annealing to generate conformers')
with Pool(processes=Config.n_cores) as pool:
results = [pool.apply_async(get_simanl_atoms, (self, None, i)) for i in range(n_confs)]
conf_atoms_list = [res.get(timeout=None) for res in results]
for i, atoms in enumerate(conf_atoms_list):
conf = Conformer(name=f'{self.name}_conf{i}',
charge=self.charge,
mult=self.mult,
atoms=atoms)
# If the conformer is unique on an RMSD threshold
if conf_is_unique_rmsd(conf, self.conformers):
self.conformers.append(conf)
logger.info(f'Generated {len(self.conformers)} unique conformer(s)')
return None
def populate_conformers(self, n_confs):
"""Populate self.conformers with a list of Conformer objects"""
return self._generate_conformers(n_confs=n_confs)
def __init__(self, name='molecule', smiles=None, atoms=None,
solvent_name=None, charge=0, mult=1):
"""
Molecule class
Keyword Arguments:
name (str): Name of the molecule or a .xyz filename
smiles (str): Standard SMILES string. e.g. generated by
Chemdraw
atoms (list(autode.atoms.Atom)): List of atoms in the species
solvent_name (str): Solvent that the molecule is immersed in
charge (int): Charge on the molecule
mult (int): Spin multiplicity on the molecule
"""
logger.info(f'Generating a Molecule object for {name}')
super().__init__(name, atoms, charge, mult, solvent_name)
if name.endswith('.xyz'):
self._init_xyz_file(xyz_filename=name)
self.smiles = smiles
self.rdkit_mol_obj = None
self.rdkit_conf_gen_is_fine = True
self.conformers = None
# if smiles is not None:
# self._init_smiles(smiles)
if atoms is not None:
make_graph(self)
# class SolvatedMolecule(Molecule):
# @requires_atoms()
# def optimise(self, method):
# raise NotImplementedError
# def __init__(self, name='solvated_molecule', smiles=None, atoms=None,
# solvent_name=None, charge=0, mult=1, solvent_mol=None):
# super().__init__(name, smiles, atoms, solvent_name, charge, mult)
# self.solvent_mol = solvent_mol
# self.qm_solvent_atoms = None
# self.mm_solvent_atoms = None
class Reactant(Molecule):
pass
class Product(Molecule):
pass
# def reactant_to_product(reactant):
# reactant.__class__ = Product
# return reactant
# def product_to_reactant(product):
# product.__class__ = Reactant
# return product
|
StarcoderdataPython
|
14901
|
<reponame>kkcookies99/UAST
class Solution:
def XXX(self, root: TreeNode) -> bool:
stack = []
cur = root
last = float("-inf")
while cur or stack:
while cur:
stack.append(cur)
cur = cur.left
cur = stack.pop()
if cur.val > last:
last = cur.val
else:
return False
cur = cur.right
return True
|
StarcoderdataPython
|
3341063
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2020/5/26 18:54
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : <EMAIL>
-------------------------------------------------
Description :
-------------------------------------------------
"""
import time
from typing import List
# list_to_tree 我自己写的一个 list 转 root 的方法
from LeetCode.leetcode_utils.leetcode_list2tree import list_to_tree, TreeNode
__author__ = 'Max_Pengjb'
start_time = time.time()
# https://leetcode-cn.com/problems/game-of-life/solution/xiong-mao-shua-ti-python3-bao-xue-bao-hui-cvzhong-/
# 下面写上代码块
class Solution:
def wordPattern(self, pattern: str, str: str) -> bool:
word_char = {}
char_word = {}
words = str.split()
if len(pattern) != len(words):
return False
for p, word in zip(pattern, words):
if word in word_char:
if p != word_char[word]:
return False
else:
if p in char_word:
return False
else:
char_word[p] = word
word_char[word] = p
return True
pattern = "abba"
str = "dog cat cat dog"
rr = Solution().wordPattern(pattern, str)
print(rr)
# 上面中间写上代码块
end_time = time.time()
print('Running time: %s Seconds' % (end_time - start_time))
|
StarcoderdataPython
|
77945
|
import torch
import numpy as np
from onnx import numpy_helper
from thop.vision.basic_hooks import zero_ops
from .counter import counter_matmul, counter_zero_ops,\
counter_conv, counter_mul, counter_norm, counter_pow,\
counter_sqrt, counter_div, counter_softmax, counter_avgpool
def onnx_counter_matmul(diction, node):
input1 = node.input[0]
input2 = node.input[1]
input1_dim = diction[input1]
input2_dim = diction[input2]
out_size = np.append(input1_dim[0:-1], input2_dim[-1])
output_name = node.output[0]
macs = counter_matmul(input1_dim, out_size[-2:])
return macs, out_size, output_name
def onnx_counter_add(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
out_size = diction[node.input[1]]
else:
out_size = diction[node.input[0]]
output_name = node.output[0]
macs = counter_zero_ops()
# if '140' in diction:
# print(diction['140'],output_name)
return macs, out_size, output_name
def onnx_counter_conv(diction, node):
# print(node)
# bias,kernelsize,outputsize
dim_bias = 0
input_count = 0
for i in node.input:
input_count += 1
if (input_count == 3):
dim_bias = 1
dim_weight = diction[node.input[1]]
else:
dim_weight = diction[node.input[1]]
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
if(attr.name == 'strides'):
dim_stride = attr.ints
if(attr.name == 'pads'):
dim_pad = attr.ints
if(attr.name == 'dilations'):
dim_dil = attr.ints
if(attr.name == 'group'):
group = attr.i
# print(dim_dil)
dim_input = diction[node.input[0]]
output_size = np.append(
dim_input[0:-np.array(dim_kernel).size-1], dim_weight[0])
hw = np.array(dim_input[-np.array(dim_kernel).size:])
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_dil[i] *
(dim_kernel[i]-1)-1)/dim_stride[i]+1)
output_size = np.append(output_size, hw)
macs = counter_conv(dim_bias, np.prod(dim_kernel),
np.prod(output_size), dim_weight[1], group)
output_name = node.output[0]
# if '140' in diction:
# print("conv",diction['140'],output_name)
return macs, output_size, output_name
def onnx_counter_constant(diction, node):
# print("constant",node)
macs = counter_zero_ops()
output_name = node.output[0]
output_size = [1]
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_mul(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_mul(np.prod(input_size))
output_size = diction[node.input[0]]
output_name = node.output[0]
return macs, output_size, output_name
def onnx_counter_bn(diction, node):
input_size = diction[node.input[0]]
macs = counter_norm(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_relu(diction, node):
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
#print(macs, output_size, output_name)
# if '140' in diction:
# print("relu",diction['140'],output_name)
return macs, output_size, output_name
def onnx_counter_reducemean(diction, node):
keep_dim = 0
for attr in node.attribute:
if('axes' in attr.name):
dim_axis = np.array(attr.ints)
elif('keepdims' in attr.name):
keep_dim = attr.i
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
if (keep_dim == 1):
output_size = input_size
else:
output_size = np.delete(input_size, dim_axis)
#output_size = input_size
return macs, output_size, output_name
def onnx_counter_sub(diction, node):
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_pow(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_pow(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_sqrt(diction, node):
input_size = diction[node.input[0]]
macs = counter_sqrt(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_div(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_div(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_instance(diction, node):
input_size = diction[node.input[0]]
macs = counter_norm(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_softmax(diction, node):
input_size = diction[node.input[0]]
dim = node.attribute[0].i
nfeatures = input_size[dim]
batch_size = np.prod(input_size) / nfeatures
macs = counter_softmax(nfeatures, batch_size)
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_pad(diction, node):
# # TODO add constant name and output real vector
# if
# if (np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size):
# input_size = diction[node.input[1]]
# else:
# input_size = diction[node.input[0]]
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_averagepool(diction, node):
# TODO add support of ceil_mode and floor
macs = counter_avgpool(np.prod(diction[node.input[0]]))
output_name = node.output[0]
dim_pad = None
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
elif(attr.name == 'strides'):
dim_stride = attr.ints
elif(attr.name == 'pads'):
dim_pad = attr.ints
elif(attr.name == 'dilations'):
dim_dil = attr.ints
# print(dim_dil)
dim_input = diction[node.input[0]]
hw = dim_input[-np.array(dim_kernel).size:]
if dim_pad is not None:
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
else:
for i in range(hw.size):
hw[i] = int((hw[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_flatten(diction, node):
# print(node)
macs = counter_zero_ops()
output_name = node.output[0]
axis = node.attribute[0].i
input_size = diction[node.input[0]]
output_size = np.append(input_size[axis-1], np.prod(input_size[axis:]))
# print("flatten",output_size)
return macs, output_size, output_name
def onnx_counter_gemm(diction, node):
# print(node)
# Compute Y = alpha * A' * B' + beta * C
input_size = diction[node.input[0]]
dim_weight = diction[node.input[1]]
# print(input_size,dim_weight)
macs = np.prod(input_size) * dim_weight[1] + dim_weight[0]
output_size = np.append(input_size[0:-1], dim_weight[0])
output_name = node.output[0]
return macs, output_size, output_name
pass
def onnx_counter_maxpool(diction, node):
# TODO add support of ceil_mode and floor
# print(node)
macs = counter_zero_ops()
output_name = node.output[0]
dim_pad = None
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
elif(attr.name == 'strides'):
dim_stride = attr.ints
elif(attr.name == 'pads'):
dim_pad = attr.ints
elif(attr.name == 'dilations'):
dim_dil = attr.ints
# print(dim_dil)
dim_input = diction[node.input[0]]
hw = dim_input[-np.array(dim_kernel).size:]
if dim_pad is not None:
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
else:
for i in range(hw.size):
hw[i] = int((hw[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_globalaveragepool(diction, node):
macs = counter_zero_ops()
output_name = node.output[0]
input_size = diction[node.input[0]]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_concat(diction, node):
# print(node)
# print(diction[node.input[0]])
axis = node.attribute[0].i
input_size = diction[node.input[0]]
for i in node.input:
dim_concat = diction[i][axis]
output_size = input_size
output_size[axis] = dim_concat
output_name = node.output[0]
macs = counter_zero_ops()
return macs, output_size, output_name
def onnx_counter_clip(diction, node):
macs = counter_zero_ops()
output_name = node.output[0]
input_size = diction[node.input[0]]
output_size = input_size
return macs, output_size, output_name
onnx_operators = {
'MatMul': onnx_counter_matmul,
'Add': onnx_counter_add,
'Conv': onnx_counter_conv,
'Mul': onnx_counter_mul,
'Constant': onnx_counter_constant,
'BatchNormalization': onnx_counter_bn,
'Relu': onnx_counter_relu,
'ReduceMean': onnx_counter_reducemean,
'Sub': onnx_counter_sub,
'Pow': onnx_counter_pow,
'Sqrt': onnx_counter_sqrt,
'Div': onnx_counter_div,
'InstanceNormalization': onnx_counter_instance,
'Softmax': onnx_counter_softmax,
'Pad': onnx_counter_pad,
'AveragePool': onnx_counter_averagepool,
'MaxPool': onnx_counter_maxpool,
'Flatten': onnx_counter_flatten,
'Gemm': onnx_counter_gemm,
'GlobalAveragePool': onnx_counter_globalaveragepool,
'Concat': onnx_counter_concat,
'Clip': onnx_counter_clip,
None: None,
}
|
StarcoderdataPython
|
4826556
|
#! /usr/bin/env python3
# We need this to define our package
from setuptools import setup
# We use this to find and deploy our unittests
import unittest
import os
# We need to know the version to backfill some dependencies
from sys import version_info, exit
# Define our list of installation dependencies
DEPENDS = ["pyjwt", "snowflake-connector-python", "furl", "cryptography"]
# If we're at version less than 3.4 - fail
if version_info[0] < 3 or version_info[1] < 4:
exit("Unsupported version of Python. Minimum version for the Ingest SDK is 3.4")
# If we're at version 3.4, backfill the typing library
elif version_info[1] == 4:
DEPENDS.append("typing")
# Python 3.5.0 and 3.5.1 have incompatible typing modules. Use typing_extensions instead.
elif version_info[1] == 5 and version_info[2] < 2:
DEPENDS.append("typing_extensions")
here = os.path.abspath(os.path.dirname(__file__))
def test_suite():
"""
Defines the test suite for the snowflake ingest SDK
"""
loader = unittest.TestLoader()
return loader.discover("tests", pattern="test_*.py")
about = {}
with open(os.path.join(here, 'snowflake', 'ingest', 'version.py'),
mode='r', encoding='utf-8') as f:
exec(f.read(), about)
__version__ = about['__version__']
if 'SF_BUILD_NUMBER' in os.environ:
__version__ += ('.' + str(os.environ['SF_BUILD_NUMBER']))
with open(os.path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='snowflake_ingest',
version=__version__,
description='Official SnowflakeDB File Ingest SDK',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://www.snowflake.net',
packages=['snowflake.ingest',
'snowflake.ingest.utils'],
license='Apache',
keywords="snowflake ingest sdk copy loading",
package_data={
'snowflake.ingest':['*.rst', 'LICENSE']
},
# From here we describe the package classifiers
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Topic :: Database"
],
# Now we describe the dependencies
install_requires=DEPENDS,
# At last we set the test suite
test_suite="setup.test_suite"
)
|
StarcoderdataPython
|
1736567
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from cmsplugin_cascade import settings
def remove_duplicates(lst):
"""
Emulate what a Python ``set()`` does, but keeping the element's order.
"""
dset = set()
return [l for l in lst if l not in dset and not dset.add(l)]
def resolve_dependencies(filenames):
"""
Given a filename literal or a list of filenames and a mapping of dependencies (use
``settings.CASCADE_PLUGIN_DEPENDENCIES`` to check for details), return a list of other files
resolving the dependency. The returned list is ordered, so that files having no further
dependency come as first element and the passed in filenames come as the last element.
Use this function to automatically resolve dependencies of CSS and JavaScript files in the
``Media`` subclasses.
"""
dependencies = []
if isinstance(filenames, (list, tuple)):
for filename in filenames:
dependencies.extend(resolve_dependencies(filename))
else:
filename = filenames
dependency_list = getattr(settings, 'CASCADE_PLUGIN_DEPENDENCIES', {}).get(filename)
if dependency_list:
dependencies.extend(resolve_dependencies(dependency_list))
dependencies.append(filename)
return remove_duplicates(dependencies)
def rectify_partial_form_field(base_field, partial_form_fields):
"""
In base_field reset the attributes label and help_text, since they are overriden by the
partial field. Additionally, from the list, or list of lists of partial_form_fields
append the bound validator methods to the given base field.
"""
base_field.label = ''
base_field.help_text = ''
for fieldset in partial_form_fields:
if not isinstance(fieldset, (list, tuple)):
fieldset = [fieldset]
for field in fieldset:
base_field.validators.append(field.run_validators)
|
StarcoderdataPython
|
97476
|
<filename>src/su/aes.py
from Crypto.Cipher import AES
from Crypto.Util import Counter
from binascii import hexlify, unhexlify
import os
__all__ = ["encrypt", "decrypt", "main"]
MODE = AES.MODE_CTR
BS = AES.block_size
KS = AES.key_size[-1]
def _pad_bytes(byte_str, size):
if len(byte_str) > size:
return byte_str[:size]
return byte_str.ljust(size)
def encrypt(key, data, output_binary=False):
byte_key = bytes(key, 'utf-8') if not isinstance(key, bytes) else key
encrypt_key = _pad_bytes(byte_key, KS)
byte_data = bytes(data, 'utf-8') if not isinstance(data, bytes) else data
counter_prefix = os.urandom(BS)
cipher = AES.new(encrypt_key, MODE, counter=Counter.new(BS * 8))
byte_result = counter_prefix + cipher.encrypt(byte_data)
if output_binary:
return byte_result
return hexlify(byte_result).decode()
def decrypt(key, data, is_hex_input=True, output_binary=False):
byte_key = bytes(key, 'utf-8') if not isinstance(key, bytes) else key
encrypt_key = _pad_bytes(byte_key, KS)
input_byte_data = bytes(data, 'utf-8') if not isinstance(data, bytes) else data
byte_data = unhexlify(input_byte_data) if is_hex_input else input_byte_data
counter_prefix = byte_data[:BS]
ciphertext = byte_data[BS:]
cipher = AES.new(encrypt_key, MODE, counter=Counter.new(BS * 8))
byte_result = cipher.decrypt(ciphertext)
if output_binary:
return byte_result
return byte_result.decode()
def main():
from argparse import ArgumentParser
parser = ArgumentParser('A simple encrypt/decrypt lib based on AES.\n')
parser.add_argument('data')
parser.add_argument('-k', '--key', dest='key', default='<KEY>')
parser.add_argument('-o', '--output', dest='output', default='')
parser.add_argument('-d', '--decrypt', dest='is_decrypt', default=False, action='store_true')
parser.add_argument('-f', '--is_file', dest='is_file', default=False, action='store_true')
parser.add_argument('-p', '--print', dest='print', default=False, action='store_true')
parser.add_argument('-t', '--is_test', dest='is_test', default=False, action='store_true')
options = parser.parse_args()
if options.is_file:
with open(options.data, 'rb') as f:
input_data = f.read()
else:
input_data = options.data
print(f'key: {options.key}')
input_preview = f'{input_data[:10]}...{input_data[-10:]}' if len(input_data) > 30 else input_data
print(f'input: {input_preview}')
if options.is_test:
encrypted = encrypt(options.key, input_data, output_binary=options.is_file)
options.print and print('encrypted: ', encrypted)
decrypted = decrypt(options.key, encrypted, is_hex_input=not options.is_file, output_binary=options.is_file)
options.print and print('decrypted: ', decrypted)
assert input_data == decrypted
output = encrypted
print('test passed')
elif not options.is_decrypt:
output = encrypted = encrypt(options.key, input_data)
options.print and print('encrypted: ', encrypted)
else:
output = decrypted = decrypt(options.key, input_data, is_hex_input=not options.is_file,
output_binary=options.is_file)
options.print and print('decrypted: ', decrypted)
if options.output:
with open(options.output, 'wb') as f:
f.write(output)
print(f'output: {options.output}')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1696460
|
<filename>qlink/duplicates_merger.py<gh_stars>1-10
import json
class Merger:
RANDOM_MODE = 0
ENRICH_MODE = 1
def __init__(self, dataframe, duplicates_path, result_path, merger_mode):
self.dataframe = dataframe
with open(duplicates_path, 'r') as fp:
self.duplicates = json.load(fp)[0]['items']
self.result_path = result_path
self.merger_mode = merger_mode
def enrich_records(self, cluster):
"""
:param cluster:
:return: int, row
"""
cluster = sorted(cluster)
main_record = self.dataframe.loc[cluster[0]]
for field_name in self.dataframe.columns.values:
if main_record[field_name] in [None, 'nan']:
for other_id in cluster[1:]:
potential_value = self.dataframe.iloc[other_id][field_name]
if potential_value not in [None, 'nan']:
main_record[field_name] = potential_value
break
return cluster[0], main_record
def merge_duplicates(self):
duplicate_ids = list(map(lambda d: list(map(int, d.keys())), self.duplicates))
for cluster in duplicate_ids:
main_id, main_record = self.enrich_records(cluster)
self.dataframe.loc[main_id] = main_record
cluster.remove(main_id)
for other_id in cluster:
self.dataframe.drop(other_id, inplace=True)
def save_results(self):
self.dataframe.to_csv(self.result_path, index=False)
|
StarcoderdataPython
|
1741918
|
<filename>voltha/leader.py<gh_stars>0
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from hash_ring import HashRing
from structlog import get_logger
from twisted.internet import reactor
from twisted.internet.base import DelayedCall
from twisted.internet.defer import inlineCallbacks, DeferredList, returnValue
from simplejson import dumps, loads
from common.utils.asleep import asleep
from common.utils.id_generation import get_next_core_id
log = get_logger()
class ConfigMappingException(Exception):
pass
class Leader(object):
"""
A single instance of this object shall exist across the whole cluster.
This is guaranteed by the coordinator which instantiates this class
only when it secured the leadership lock, as well as calling the halt()
method in cases it looses the leadership lock.
"""
ID_EXTRACTOR = '^(%s)([^/]+)$'
CORE_STORE_KEY_EXTRACTOR = '^%s/(?P<core_store_id>[^/]+)/root$'
START_TIMESTAMP_EXTRACTOR = '^.*_([0-9]+)$'
ASSIGNMENT_ID_EXTRACTOR = '^(%s)([^/]+)/core_store$'
# Public methods:
def __init__(self, coordinator):
self.coord = coordinator
self.halted = False
self.soak_time = 3 # soak till membership/workload changes settle
self.workload = []
self.members = []
self.core_store_ids = []
self.core_store_assignment = None
self.reassignment_soak_timer = None
self.core_store_reassignment_soak_timer = None
self.workload_id_match = re.compile(
self.ID_EXTRACTOR % self.coord.workload_prefix).match
self.member_id_match = re.compile(
self.ID_EXTRACTOR % self.coord.membership_prefix).match
self.core_data_id_match = re.compile(
self.CORE_STORE_KEY_EXTRACTOR % self.coord.core_store_prefix).match
self.core_match = re.compile(self.coord.container_name_regex).match
self.timestamp_match = re.compile(self.START_TIMESTAMP_EXTRACTOR).match
self.assignment_id_match = re.compile(
self.ASSIGNMENT_ID_EXTRACTOR % self.coord.assignment_prefix).match
self.members_tracking_sleep_to_prevent_flood = \
self.coord.leader_config.get((self.coord.leader_config[
'members_track_error_to_prevent_flood']), 1)
@inlineCallbacks
def start(self):
log.debug('starting')
# yield self._validate_workload()
yield self._start_tracking_assignments()
log.info('started')
def stop(self):
"""Suspend leadership duties immediately"""
log.debug('stopping')
self.halted = True
# any active cancellations, releases, etc., should happen here
if isinstance(self.reassignment_soak_timer, DelayedCall):
if not self.reassignment_soak_timer.called:
self.reassignment_soak_timer.cancel()
if isinstance(self.core_store_reassignment_soak_timer, DelayedCall):
if not self.core_store_reassignment_soak_timer.called:
self.core_store_reassignment_soak_timer.cancel()
log.info('stopped')
# Private methods:
def _start_tracking_assignments(self):
"""
We must track both the cluster member list as well as the workload
list. Upon change in either, we must rerun our sharding algorithm
and reassign work as/if needed.
"""
reactor.callLater(0, self._track_members, 0)
@inlineCallbacks
def _get_core_store_mappings(self):
try:
# Get the mapping record
(_, mappings) = yield self.coord.kv_get(
self.coord.core_store_assignment_key, recurse=True)
if mappings:
self.core_store_assignment = loads(mappings[0]['Value'])
return
else: # Key has not been created yet
# Create the key with an empty dictionary value
value = dict()
result = yield self.coord.kv_put(
self.coord.core_store_assignment_key,
dumps(value))
if not result:
raise ConfigMappingException(self.instance_id)
# Ensure the record was created
(_, mappings) = yield self.coord.kv_get(
self.coord.core_store_assignment_key, recurse=True)
self.core_store_assignment = loads(mappings[0]['Value'])
except Exception, e:
log.exception('error', e=e)
@inlineCallbacks
def _update_core_store_references(self):
try:
# Get the current set of configs keys
(_, results) = yield self.coord.kv_get(
self.coord.core_store_prefix, recurse=False, keys=True)
matches = (self.core_data_id_match(e) for e in results or [])
core_ids = [m.group(1) for m in matches if m is not None]
self.core_store_ids = core_ids
# Update the config mapping
self._get_core_store_mappings()
log.debug('core-data', core_ids=core_ids,
assignment=self.core_store_assignment)
except Exception, e:
log.exception('error-update-store', e=e)
def _sanitize_member_list(self, members):
# This method removes any duplicates from the member list using the
# voltha number from the member id and the time that voltha instance
# started, again from the member id. This method is meaningful only
# in a clustered environment (e.g. Docker swarm or Kubernetes). In
# a non-cluster environment the member id is formatted differently.
# In such a case, the method below will create an exception and
# return the member list as is.
try:
unique_members = {}
update_occurred = False
log.info('members', members=members)
for member in members:
log.info('member', member=member)
# Extract the swarm assigned number of the voltha instance
voltha_number = self.core_match(member['id']).group(1)
timestamp = self.timestamp_match(member['id']).group(1)
if voltha_number not in unique_members:
unique_members[voltha_number] = {'id': member['id'],
'timestamp': timestamp,
'host': member['host']}
else:
# Verify whether if this member has the latest timestamp. If
# yes, overwrite the previous one
if unique_members[voltha_number]['timestamp'] < timestamp:
unique_members[voltha_number] = {'id': member['id'],
'timestamp': timestamp,
'host': member['host']}
update_occurred = True
if update_occurred:
updated_members = []
for _, unique_member in unique_members.iteritems():
updated_members.append({'host': unique_member['host'],
'id': unique_member['id']})
return updated_members
else:
return members
except Exception as e:
log.exception('extraction-error', e=e)
return members
@inlineCallbacks
def _is_temporal_state(self, members):
try:
# First get the current core assignments
(_, results) = yield self.coord.kv_get(
self.coord.assignment_prefix,
recurse=True)
log.debug('core-assignments', assignment=results)
if results:
old_assignment = [
{'id': self.assignment_id_match(e['Key']).group(2),
'core': e['Value']}
for e in results]
# If there are no curr_assignments then we are starting the
# system. In this case we should keep processing
if len(old_assignment) == 0:
returnValue(False)
# Tackle the simplest scenario - #members >= #old_assignment
if members is not None and len(members) >= len(old_assignment):
returnValue(False)
# Everything else is a temporal state
log.info('temporal-state-detected', members=members,
old_assignments=old_assignment)
returnValue(True)
else:
returnValue(False)
except Exception as e:
log.exception('temporal-state-error', e=e)
returnValue(True)
@inlineCallbacks
def _track_members(self, index):
previous_index = index
try:
log.info('member-tracking-before')
is_timeout, (tmp_index, results) = yield \
self.coord.consul_get_with_timeout(
key=self.coord.membership_prefix,
recurse=True,
index=index,
timeout=10)
# Check whether we are still the leader - a new regime may be in
# place by the time we see a membership update
if self.halted:
log.info('I am no longer the leader')
return
if is_timeout:
log.debug('timeout-or-no-membership-changed')
return
# This can happen if consul went down and came back with no data
if not results:
log.error('no-active-members')
# Bail out of leadership and go for an early election
self.coord._just_lost_leadership()
return
# After timeout event the index returned from
# consul_get_with_timeout is None. If we are here it's not a
# timeout, therefore the index is a valid one.
index=tmp_index
log.info('membership-tracking-data', index=index, results=results)
if previous_index != index:
log.info('membership-updated',
previous_index=previous_index, index=index)
# Rebuild the membership, if any
# Only members with valid session are considered active
members = [{'id': self.member_id_match(e['Key']).group(2),
'host': loads(e['Value'])['host_address']}
for e in results if 'Session' in e]
if members:
updated_members = self._sanitize_member_list(members)
else:
updated_members = None
log.info('active-members', active_members=members,
sanitized_members=updated_members)
# Check if we are in a temporal state. If true wait for the
# next membership changes
temporal_state = yield self._is_temporal_state(updated_members)
if temporal_state:
log.info('temporal-state-detected')
pass # Wait for next member list change
elif updated_members != self.members:
# if the two sets are the same
# update the current set of config
yield self._update_core_store_references()
log.info('membership-changed',
prev_members=self.members,
curr_members=updated_members,
core_store_mapping=self.core_store_assignment)
self.members = updated_members
self._restart_core_store_reassignment_soak_timer()
else:
log.debug('no-membership-change', index=index)
except Exception, e:
log.exception('members-track-error', e=e)
# to prevent flood
yield asleep(self.members_tracking_sleep_to_prevent_flood)
finally:
if not self.halted:
reactor.callLater(1, self._track_members, index)
def _restart_reassignment_soak_timer(self):
if self.reassignment_soak_timer is not None:
assert isinstance(self.reassignment_soak_timer, DelayedCall)
if not self.reassignment_soak_timer.called:
self.reassignment_soak_timer.cancel()
self.reassignment_soak_timer = reactor.callLater(
self.soak_time, self._reassign_work)
def _restart_core_store_reassignment_soak_timer(self):
if self.core_store_reassignment_soak_timer is not None:
assert isinstance(self.core_store_reassignment_soak_timer, DelayedCall)
if not self.core_store_reassignment_soak_timer.called:
self.core_store_reassignment_soak_timer.cancel()
self.core_store_reassignment_soak_timer = reactor.callLater(
self.soak_time, self._reassign_core_stores)
@inlineCallbacks
def _reassign_core_stores(self):
def _get_core_data_id_from_instance(instance_name):
for id, instance in self.core_store_assignment.iteritems():
if instance and instance['id'] == instance_name:
return id
try:
log.info('core-members', curr_members=self.members,
prev_members=self.core_store_assignment)
# 1. clear the mapping for instances that are no longer running
updated_mapping = dict()
existing_active_config_members = set()
cleared_config_ids = set()
inactive_members = set()
if self.core_store_assignment:
for id, instance in self.core_store_assignment.iteritems():
if instance not in self.members:
updated_mapping[id] = None
cleared_config_ids.add(id)
if instance:
inactive_members.add(instance['id'])
else:
updated_mapping[id] = instance
existing_active_config_members.add(instance['id'])
# 2. Update the mapping with the new set
current_id = max(self.core_store_assignment) \
if self.core_store_assignment else '0000'
for instance in self.members:
if instance['id'] not in existing_active_config_members:
# Add the member to the config map
if cleared_config_ids:
# There is an empty slot
next_id = cleared_config_ids.pop()
updated_mapping[next_id] = instance
else:
# There are no empty slot, create new ids
current_id = get_next_core_id(current_id)
updated_mapping[current_id] = instance
self.core_store_assignment = updated_mapping
log.info('updated-assignment',
core_store_assignment=self.core_store_assignment,
inactive_members=inactive_members)
# 3. save the mapping into consul
yield self.coord.kv_put(self.coord.core_store_assignment_key,
dumps(self.core_store_assignment))
# 4. Assign the new workload to the newly created members
curr_members_set = set([m['id'] for m in self.members])
new_members = curr_members_set.difference(
existing_active_config_members)
for new_member_id in new_members:
yield self.coord.kv_put(
self.coord.assignment_prefix
+ new_member_id + '/' +
self.coord.core_storage_suffix,
_get_core_data_id_from_instance(new_member_id))
# 5. Remove non-existent members
for member_id in inactive_members:
yield self.coord.kv_delete(
self.coord.assignment_prefix + member_id, recurse=True)
yield self.coord.kv_delete(
self.coord.membership_prefix + member_id,
recurse=True)
except Exception as e:
log.exception('config-reassignment-failure', e=e)
self._restart_core_store_reassignment_soak_timer()
|
StarcoderdataPython
|
44679
|
#!/usr/bin/env python
import numpy
import storm_analysis
import storm_analysis.simulator.pupil_math as pupilMath
def test_pupil_math_1():
"""
Test GeometryC, intensity, no scaling.
"""
geo = pupilMath.Geometry(20, 0.1, 0.6, 1.5, 1.4)
geo_c = pupilMath.GeometryC(20, 0.1, 0.6, 1.5, 1.4)
pf = geo.createFromZernike(1.0, [[1.3, -1, 3], [1.3, -2, 2]])
z_vals = numpy.linspace(-1.0,1.0,10)
psf_py = geo.pfToPSF(pf, z_vals)
psf_c = geo_c.pfToPSF(pf, z_vals)
assert numpy.allclose(psf_c, psf_py)
def test_pupil_math_2():
"""
Test GeometryC, complex values, no scaling.
"""
geo = pupilMath.Geometry(20, 0.1, 0.6, 1.5, 1.4)
geo_c = pupilMath.GeometryC(20, 0.1, 0.6, 1.5, 1.4)
pf = geo.createFromZernike(1.0, [[1.3, -1, 3], [1.3, -2, 2]])
z_vals = numpy.linspace(-1.0,1.0,10)
psf_py = geo.pfToPSF(pf, z_vals, want_intensity = False)
psf_c = geo_c.pfToPSF(pf, z_vals, want_intensity = False)
assert numpy.allclose(psf_c, psf_py)
def test_pupil_math_3():
"""
Test GeometryC, intensity, scaling.
"""
geo = pupilMath.Geometry(20, 0.1, 0.6, 1.5, 1.4)
geo_c = pupilMath.GeometryC(20, 0.1, 0.6, 1.5, 1.4)
pf = geo.createFromZernike(1.0, [[1.3, -1, 3], [1.3, -2, 2]])
z_vals = numpy.linspace(-1.0,1.0,10)
gsf = geo.gaussianScalingFactor(1.8)
psf_py = geo.pfToPSF(pf, z_vals, scaling_factor = gsf)
psf_c = geo_c.pfToPSF(pf, z_vals, scaling_factor = gsf)
assert numpy.allclose(psf_c, psf_py)
def test_pupil_math_4():
"""
Test GeometryCVectorial, intensity, no scaling.
"""
geo = pupilMath.GeometryVectorial(20, 0.1, 0.6, 1.5, 1.4)
geo_c = pupilMath.GeometryCVectorial(20, 0.1, 0.6, 1.5, 1.4)
pf = geo.createFromZernike(1.0, [[1.3, -1, 3], [1.3, -2, 2]])
z_vals = numpy.linspace(-1.0,1.0,10)
psf_py = geo.pfToPSF(pf, z_vals)
psf_c = geo_c.pfToPSF(pf, z_vals)
assert numpy.allclose(psf_c, psf_py)
def test_pupil_math_5():
"""
Test GeometryCVectorial, intensity, scaling.
"""
geo = pupilMath.GeometryVectorial(20, 0.1, 0.6, 1.5, 1.4)
geo_c = pupilMath.GeometryCVectorial(20, 0.1, 0.6, 1.5, 1.4)
pf = geo.createFromZernike(1.0, [[1.3, -1, 3], [1.3, -2, 2]])
z_vals = numpy.linspace(-1.0,1.0,10)
gsf = geo.gaussianScalingFactor(1.8)
psf_py = geo.pfToPSF(pf, z_vals, scaling_factor = gsf)
psf_c = geo_c.pfToPSF(pf, z_vals, scaling_factor = gsf)
if (__name__ == "__main__"):
test_pupil_math_1()
test_pupil_math_2()
test_pupil_math_3()
|
StarcoderdataPython
|
1711825
|
<reponame>smartdolphin/toxic-comment-classification<gh_stars>1-10
from keras import Model
from keras.layers import Input, Dense, Embedding, Conv1D
from keras.layers import GRU, Bidirectional
from keras.layers import SpatialDropout1D, GlobalMaxPooling1D, GlobalAveragePooling1D
from keras.layers import concatenate
from keras import optimizers
def classifier_model(units,
max_length,
max_features,
output_num,
embedding_size,
dropout_rate,
embedding_matrix,
filter_num,
filter_size,
non_static=False,
zero_masking=False):
inp = Input(shape=(max_length,))
emb = Embedding(max_features, embedding_size, weights=[embedding_matrix],
trainable=non_static, mask_zero=zero_masking)(inp)
x = SpatialDropout1D(dropout_rate)(emb)
x = Bidirectional(GRU(units, return_sequences=True))(x)
x = Conv1D(filter_num, kernel_size=filter_size, padding="valid", kernel_initializer="he_uniform")(x)
avg_pool = GlobalAveragePooling1D()(x)
max_pool = GlobalMaxPooling1D()(x)
x = concatenate([avg_pool, max_pool])
x = Dense(output_num, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy',
optimizer=optimizers.Adam(),
metrics=['accuracy'])
model.summary()
return model
|
StarcoderdataPython
|
3305
|
<filename>script.py<gh_stars>0
import os
import pyfiglet
from pytube import YouTube, Playlist
file_size = 0
folder_name = ""
# Progress Bar
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='#', print_end="\r"):
percent = ("{0:." + str(decimals) + "f}").format(100 *
(iteration / float(total)))
filled_length = int(length * iteration // total)
bar = fill * filled_length + '-' * (length - filled_length)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end=print_end)
if iteration == total:
print()
# Show Progress Bar
def show_progress_bar(chunk, file_handle, bytes_remaining):
print_progress_bar(file_size - bytes_remaining, file_size, prefix='Progress:', suffix='Complete', length=50)
return
# Get Download Location
def get_download_location():
if os.name == 'nt':
download_location = os.path.join(os.path.expanduser('~'), 'Downloads')
else:
download_location = os.path.join(
os.path.expanduser('~'), 'Downloads')
return download_location
# Get Desired Resolution
def get_resolution(video_url):
yt_obj = YouTube(video_url, on_progress_callback=show_progress_bar)
filters = yt_obj.streams.filter(progressive=True, file_extension='mp4')
print("\nAvailable Resolutions -")
for num, res in enumerate(filters, start=1):
print("\t{}. {}".format(num, str(res.resolution)))
selected_res = int(input('Please enter desired resolution : '))
filters = filters[selected_res - 1]
return filters
# Single Video Download
def download_video():
global file_size
try:
video_url = input('Provide Video Download Link : ')
filters = get_resolution(video_url)
file_size = int(filters.filesize)
download_location = get_download_location()
print("\nDownloading {}".format(str(filters.title)))
filters.download(output_path=download_location)
print("Video Downloaded. Thanks for using!!\nYou can find the video here - {}".format(download_location))
except Exception as e:
print("Some Error occured. Exception message is : ", e)
# Playlist Single Video Download
def download_playlist_video(video_url, res):
global file_size
yt_obj = YouTube(video_url, on_progress_callback=show_progress_bar)
filters = yt_obj.streams.filter(progressive=True, file_extension='mp4', resolution=res).first()
file_size = int(filters.filesize)
if not filters:
filters = yt_obj.streams.filter(
progressive=True, file_extension='mp4').first()
print("\nDownloading {}".format(str(filters.title)))
download_location = get_download_location()
filters.download(output_path="{}/{}".format(download_location, folder_name))
print("Download Complete")
# Playlist Download
def download_playlist():
global folder_name
try:
playlist_url = input('Provide Playlist Link : ')
videos_list = Playlist(playlist_url)
folder_name = videos_list.title
resolution = get_resolution(videos_list[0]).resolution
for video in videos_list:
download_playlist_video(video, resolution)
print("All Videos Downloaded. Thanks for Using!!")
except Exception as e:
print("Some Error occurred. Exception message is : ", e)
# Main Function
def main():
ascii_banner = pyfiglet.figlet_format("YT Downloader")
print(ascii_banner)
print("\t By <NAME>\n\n")
choice = int(input(
"""MENU
1.Download Single Video
2.Download Playlist\n
Enter Your Choice : """))
if choice == 1:
download_video()
elif choice == 2:
download_playlist()
else:
print("Wrong Option")
# Start of Program
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
103106
|
<filename>api/app.py
# -*- encoding: utf-8 -*-
'''
@File : app.py
@Time : 2020/04/29 01:22:13
@Author : white_walker cailiang
@Version : 1.0
@Contact : <EMAIL>
@Desc : None
'''
# here put the import lib
from flask import Flask, request, make_response
from thunder_subtitle.search import search, get_url
import mimetypes
from urllib.parse import quote
app = Flask(__name__)
@app.route('/api/state')
def api_state():
return {'state': 'ok'}, 200
@app.route('/api/subs', methods=['POST'])
def get_subs():
print('fpath: ', request.form.get('fpath'))
fpath = request.form.get('fpath')
if not fpath:
fpath = request.args.get('fpath')
try:
subs = search(fpath)
print('subs raw data: ', subs)
except Exception as e:
return {'message': "get subs found a exception: {}".format(e), 'status':'0000', 'subs': []}, 200
if not subs:
return {'message': 'not found subs', 'status':'1110', 'subs':[]}, 200
subs.sort(key=lambda x: x['rate'], reverse=True)
return {'message': 'search success', 'status':'1111', 'subs': subs}, 200
@app.route('/api/downsub', methods=['POST'])
def download_sub():
surl = request.form.get('surl')
sname = request.form.get('sname')
print("surl:", surl, "sname:", sname)
data = get_url(surl)
# with open(sname, 'wb') as f:
# f.write(data)
# return {'message': 'download success'}, 200
response = make_response(data)
# mime_type = mimetypes.guess_type(surl)
# response.headers['Content-Type'] = mime_type
response.headers['Content-Type'] = "application/text, charset=utf-8"
response.headers['Content-Disposition'] = 'attachment; filename={}'.format(quote(sname))
return response
@app.after_request
def after_request(response):
header = response.headers
header['Access-Control-Allow-Origin'] = '*'
header['Access-Control-Expose-Headers'] = '*'
header['Access-Control-Allow-Headers'] = '*'
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True, threaded=True)
|
StarcoderdataPython
|
3289089
|
#!/usr/bin/env python
# encoding: utf-8
class Solution(object):
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
cur_min = cur_max = 0
pre_min = pre_max = 1
maxval = float("-inf")
for num in nums:
cur_min = min(pre_min*num, pre_max*num, num)
cur_max = max(pre_min*num, pre_max*num, num)
pre_min = cur_min
pre_max = cur_max
maxval = max(maxval, cur_max)
return maxval
|
StarcoderdataPython
|
1741474
|
# Copyright (C) by <NAME>. See LICENSE.txt for licensing information.
import os
import imp
import traceback
#try to import module
def __try_module_import(filename):
directory, module_name = os.path.split(filename)
module_name = os.path.splitext(module_name)[0]
try:
fp, pathname, description = imp.find_module(module_name, [directory])
module = imp.load_module(module_name, fp, pathname, description)
except Exception as ex:
print 'Could not import', filename, ex
print traceback.format_exc()
#recursive search for modules in path
def __module_import(p):
if not os.path.exists(p): return
if os.path.isfile(p):
return __try_module_import(p)
if not os.path.isdir(p): return
if os.path.exists(os.path.join(p, '__init__.py')):
return __try_module_import(p)
for sub in os.listdir(p):
name, ext = os.path.splitext(sub)
#prefer .pyo over .pyc, prefer .pyc over .py
has_pyc = os.path.exists(os.path.join(p, name+'.pyc'))
has_pyo = os.path.exists(os.path.join(p, name+'.pyo'))
if ext == ".py" and (has_pyc or has_pyo): continue
if ext == ".pyc" and has_pyo: continue
__module_import(os.path.join(p, sub))
#separate the paths and load each one
def __load_modules_from_paths(paths, suffix):
if not paths: return
for path in paths.split(os.pathsep):
if not path: continue
if suffix: path = os.path.join(path, suffix)
__module_import(path)
__load_modules_from_paths(os.getenv("GRAS_ROOT", "@GRAS_ROOT@"), os.path.join("lib@LIB_SUFFIX@", "gras", "python"))
__load_modules_from_paths(os.getenv("GRAS_PATH", ""), os.path.join("lib@LIB_SUFFIX@", "gras", "python"))
__load_modules_from_paths(os.getenv("GRAS_PYTHON_PATH", ""), "")
|
StarcoderdataPython
|
1703147
|
<gh_stars>1-10
# ============================================================================================================================ #
# PROTEIN STRUCTURE PREDICTION : REINFORCEMENT LEARNING AGENTS #
# ============================================================================================================================ #
# ================= #
# IMPORT LIBRARIES #
# ================= #
import numpy as np
from collections import OrderedDict
import pickle
import tensorflow as tf
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.losses import Huber
from .utils import moving_average, softmax, Memory
# =================================== #
# CLASS DEFINITION : Q-LEARNING AGENT #
# =================================== #
class QAgent:
def __init__(self, state_size, action_size, alpha = 1.0, gamma = 0.9, epsilon = 1.0, epsilon_min = 0.05, epsilon_decay = 0.99985):
# Parameters
self.state_size = state_size # Size of the state space (not used)
self.action_size = action_size # Size of the action space
self.gamma = gamma # Discount factor
self.epsilon = epsilon # Exploration rate
self.epsilon_min = epsilon_min # Minimum exploration rate
self.epsilon_decay = epsilon_decay # Exploration decay rate
self.alpha = alpha # Learning rate of the Q-learning update rule
self.temperature = 1.0 # Temperature constant for the Botlzmann Distributed Exploration
self.temperature_min = 0.01 # Minimum temperature
self.temperature_decay = 0.99991 # Temperature decay rate
# Q Matrix
self.Q = OrderedDict({((0,0),(0,1)) : [0, 0, 0]}) # Initial State : ((0,0), (0,1))
def act(self, state, policy='egreedy'):
"""Method to select an action according to a policy."""
if policy == 'egreedy' :
# With probability (1-epsilon), take the best action (exploit)
if np.random.uniform(0, 1) > self.epsilon: action = np.argmax(self.Q[tuple(state)])
# With probability epsilon, take random action (explore)
else: action = np.random.choice(self.action_size)
elif policy == 'boltzmann' :
# Take action according to boltzmann distribution
Q_dist = softmax(np.array(self.Q[tuple(state)])/self.temperature)
action = np.random.choice(range(self.action_size), p=Q_dist)
else : # greedy policy
action = np.argmax(self.Q[tuple(state)])
return action
def train(self, state, action, reward, next_state, done):
"""Method to update the Q entries."""
# Check if next_state is in Q, if not add it
if tuple(next_state) not in self.Q.keys():
self.Q[tuple(next_state)] = [0, 0, 0]
if done:
target = reward
else:
target = reward + self.gamma * np.max(self.Q[tuple(next_state)])
# Update Q entry of Current State
self.Q[tuple(state)][action] = (1 - self.alpha) * self.Q[tuple(state)][action] + self.alpha * target
return None
def save(self, fname='../models/model.pkl'):
"""Method to save a Q-Learning agent."""
with open(fname, 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
return None
@staticmethod
def load(fname):
"""Method to load a Q-Learning agent."""
with open(fname, 'rb') as f:
print(fname)
agent = pickle.load(f)
return agent
# ============================================= #
# CLASS DEFINITION : DEEP Q-NETWORK (DQN) AGENT #
# ============================================= #
class DQNAgent:
def __init__(self, input_dim, nb_actions, learning_rate = 0.001, batch_size = 32, memory_capacity=10000, gamma = 0.95, epsilon = 1.0, epsilon_min = 0.05, epsilon_decay = 0.99997):
"""Initializes a Deep Q-Network (DQN) Agent."""
# Parameters
self.input_dim = input_dim # Input dimension of the neural network
self.nb_actions = nb_actions # Number of possibble actions
self.memory = Memory(capacity=memory_capacity) # Replay memory
self.gamma = gamma # Discount factor
self.epsilon = epsilon # Exploration rate
self.epsilon_min = epsilon_min # Minimum exploration rate
self.epsilon_decay = epsilon_decay # Exploration decay rate
self.learning_rate = learning_rate # Learning rate of neural network
self.batch_size = batch_size # Batch size
# Models
self.policy_network = self._build_model()
def _build_model(self):
"""Builds architecture of neural network."""
model = Sequential()
model.add(Dense(16, input_dim=self.input_dim, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(self.nb_actions, activation='linear'))
model.compile(loss=Huber(), optimizer=Adam(lr=self.learning_rate))
return model
def remember(self, state, action, next_state, reward, done):
"""Stores memory of experienced events."""
self.memory.push(state, action, next_state, reward, done)
def act(self, state, policy='egreedy'):
"""Method to select an action according to a policy."""
if policy == 'egreedy' : # e-greedy policy
if np.random.uniform(0, 1) > self.epsilon : # With probability (1-epsilon), take the best action (exploit, Greedy Policy)
action = np.argmax(self.policy_network.predict(state.reshape(1,-1)).squeeze(0))
else : # With probability epsilon, take random action (explore)
action = np.random.choice(self.nb_actions)
else : # greedy policy
action = np.argmax(self.policy_network.predict(state.reshape(1,-1)).squeeze(0))
return action
def train(self):
"""Trains the policy neural network using transitions randomly sampled from memory."""
if len(self.memory) < self.batch_size:
return {'loss':[np.nan]}
batch = self.memory.sample(self.batch_size)
input_batch = np.vstack(batch.state)
target_batch = self.policy_network.predict(input_batch)
next_Q = self.policy_network.predict(np.vstack(batch.next_state))
ix = np.arange(self.batch_size)
target_batch[ix, batch.action] = np.array(batch.reward) + (1 - np.array(batch.done)) * self.gamma * np.max(next_Q, axis=1)
history = self.policy_network.fit(input_batch, target_batch, batch_size=self.batch_size, epochs=1, verbose=0)
return history.history
def save(self, fname='../models/model.h5'):
"""Method to save the Policy Network of a DQN agent."""
self.policy_network.save(fname)
return None
@staticmethod
def load(fname):
"""Method to load a Policy Network in a new DQN agent."""
agent = DQNAgent(14, 3)
agent.policy_network = load_model(fname)
return agent
# ===================================================== #
# CLASS DEFINITION : DOUBLE DEEP Q-NETWORK (DDQN) AGENT #
# ===================================================== #
class DDQNAgent:
def __init__(self, input_dim, nb_actions, learning_rate = 0.001, batch_size = 64, target_update = 100, memory_capacity=10000, gamma = 0.95, epsilon = 1.0, epsilon_min = 0.05, epsilon_decay = 0.99997):
"""Initializes a Double Deep Q-Network (DDQN) Agent."""
# Parameters
self.input_dim = input_dim # Input dimension of the neural network
self.nb_actions = nb_actions # Number of possibble actions
self.memory = Memory(capacity=memory_capacity) #
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_min = epsilon_min
self.epsilon_decay = epsilon_decay
self.learning_rate = learning_rate
self.batch_size = batch_size
self.target_update = target_update
# Models
self.policy_network = self._build_model()
self.target_network = self._build_model()
self.update_target_network()
def _build_model(self):
"""Builds architecture of neural network."""
model = Sequential()
model.add(Dense(16, input_dim=self.input_dim, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(self.nb_actions, activation='linear'))
model.compile(loss=Huber(), optimizer=Adam(lr=self.learning_rate))
return model
def update_target_network(self):
"""Copy the weights from Model into Target_Model"""
self.target_network.set_weights(self.policy_network.get_weights())
def remember(self, state, action, next_state, reward, done):
"""Stores memory of experienced events."""
self.memory.push(state, action, next_state, reward, done)
def act(self, state, policy='egreedy'):
"""Method to select an action according to a policy."""
if policy == 'egreedy' : # e-greedy policy
if np.random.uniform(0, 1) > self.epsilon : # With probability (1-epsilon), take the best action (exploit, Greedy Policy)
action = np.argmax(self.policy_network.predict(state.reshape(1,-1)).squeeze(0))
else : # With probability epsilon, take random action (explore)
action = np.random.choice(self.nb_actions)
else : # greedy policy
action = np.argmax(self.policy_network.predict(state.reshape(1,-1)).squeeze(0))
return action
def train(self):
"""Trains the neural network using experiences randomly sampled from memory."""
if len(self.memory) < self.batch_size:
return {'loss':[np.nan]}
batch = self.memory.sample(self.batch_size)
input_batch = np.vstack(batch.state)
target_batch = self.policy_network.predict(input_batch)
next_Q = self.target_network.predict(np.vstack(batch.next_state))
ix = np.arange(self.batch_size)
target_batch[ix, batch.action] = np.array(batch.reward) + (1 - np.array(batch.done)) * self.gamma * np.max(next_Q, axis=1)
history = self.policy_network.fit(input_batch, target_batch, batch_size=self.batch_size, epochs=1, verbose=0)
return history.history
def save(self, fname='../models/model.h5'):
"""Method to save the Policy Network of a DDQN agent."""
self.policy_network.save(fname)
return None
@staticmethod
def load(fname):
"""Method to load a Policy Network in a new DDQN agent."""
agent = DDQNAgent(14, 3)
agent.policy_network = load_model(fname)
return agent
|
StarcoderdataPython
|
1641964
|
from django.db import models
# Create your models here.
class FileAndImage(models.Model):
createdTime = models.DateTimeField(auto_now_add=True)
filePath = models.CharField(max_length=100,default="")
|
StarcoderdataPython
|
3359558
|
<filename>source/pkgsrc/graphics/py-imaging/patches/patch-PIL_IcnsImagePlugin.py
$NetBSD: patch-PIL_IcnsImagePlugin.py,v 1.1 2014/09/07 09:37:46 spz Exp $
Icns DOS fix -- CVE-2014-3589
from https://github.com/python-pillow/Pillow/commit/205e056f8f9b06ed7b925cf8aa0874bc4aaf8a7d
--- PIL/IcnsImagePlugin.py.orig 2009-11-01 00:44:11.000000000 +0000
+++ PIL/IcnsImagePlugin.py
@@ -115,6 +115,8 @@ class IcnsFile:
i = HEADERSIZE
while i < filesize:
sig, blocksize = nextheader(fobj)
+ if blocksize <= 0:
+ raise SyntaxError('invalid block header')
i = i + HEADERSIZE
blocksize = blocksize - HEADERSIZE
dct[sig] = (i, blocksize)
|
StarcoderdataPython
|
1740486
|
<filename>data collection.py
import threading
import time
from pynput import keyboard
import websocket
import _thread
import json
import ssl
import numpy as np
import pickle
auth_key = ""
values = []
current_keys = set()
is_esc = False
key_mapped_values = {i: [] for i in ["up-left", "up-right", "down-left", "down-right", "up", "down", "left", "right", "space", "enter"]}
key_values_seq = ""
key_pressed = ""
def on_press(key):
global current_keys
global key_pressed
global key_mapped_values
global key_values_seq
current_keys.add(key)
if len(current_keys)>0:
key_pressed = ""
if keyboard.Key.up in current_keys:
if keyboard.Key.left in current_keys:
key_pressed = "up-left"
elif keyboard.Key.right in current_keys:
key_pressed = "up-right"
else:
key_pressed = "up"
elif keyboard.Key.down in current_keys:
if keyboard.Key.left in current_keys:
key_pressed = "down-left"
elif keyboard.Key.right in current_keys:
key_pressed = "down-right"
else:
key_pressed = "down"
elif keyboard.Key.left in current_keys:
key_pressed = "left"
elif keyboard.Key.right in current_keys:
key_pressed = "right"
if keyboard.Key.space in current_keys:
key_pressed = "space"
if keyboard.Key.enter in current_keys:
key_pressed = "enter"
print(key_pressed)
# print(key_mapped_values)
# print(values)
try:
key_mapped_values[key_pressed].append(values)
key_values_seq += key_pressed + str(values) + "\n"
except Exception as e:
print("error", e)
# print(key_mapped_values)
def on_release(key):
global current_keys
global is_esc
current_keys.remove(key)
if key == keyboard.Key.esc:
print("true")
is_esc = True
save_vals()
return False # Stop listener
# ids -1: login, 0: auth, 1: headset search, 2: create session, 3: close session, 4: gyro info, 5: close gyro info
def readEmotiv(ws):
global auth_key
while auth_key == "":
pass
print("#" * 10, "Authorized through", auth_key)
payload = {
"jsonrpc": "2.0",
"method": "queryHeadsets",
"params": {},
"id": 1
}
ws.send(json.dumps(payload))
payload = {
"jsonrpc": "2.0",
"method": "createSession",
"params": {
"_auth": auth_key,
"status": "open",
"headset": "EPOCPLUS-3B9AE2E6"
},
"id": 2
}
ws.send(json.dumps(payload))
payload = {
"jsonrpc": "2.0",
"method": "subscribe",
"params": {
"_auth": auth_key,
"streams": [
"pow"
]
},
"id": 4
}
ws.send(json.dumps(payload))
def on_open(ws):
print("on_open")
print("#" * 10, "Connected to headset")
payload = {
"jsonrpc": "2.0",
"method": "getUserLogin",
"id": -1
}
ws.send(json.dumps(payload))
print("sending login info")
payload = {
"jsonrpc": "2.0",
"method": "authorize",
"params": {},
"id": 0
}
ws.send(json.dumps(payload))
# print("sending authorization req")
_thread.start_new_thread(readEmotiv, (ws,))
def on_message(ws, message):
global auth_key
global values
message = json.loads(message)
try:
if message["id"] == 0:
auth_key = message["result"]["_auth"]
print("!" * 5, "on_message", message)
except:
if message["pow"]:
# print("!" * 5, "on_data", message)
values = np.array(message["pow"])
# print(key_pressed, values)
# print(is_esc)
if is_esc:
return False
def on_error(ws, error):
print("on_error")
print(error)
def on_close(ws):
payload = {
"jsonrpc": "2.0",
"method": "unsubscribe",
"params": {
"_auth": auth_key,
"streams": [
"pow"
]
},
"id": 5
}
ws.send(json.dumps(payload))
payload = {
"jsonrpc": "2.0",
"method": "updateSession",
"params": {
"_auth": auth_key,
"status": "close"
},
"id": 3
}
ws.send(json.dumps(payload))
ws.close()
print("on_close")
def save_vals():
with open("dataset.pkl","wb") as f:
pickle.dump(key_mapped_values, f)
with open("dataseq.txt","w") as g:
g.write(key_values_seq)
def epochInput():
ws = websocket.WebSocketApp("wss://emotivcortex.com:54321", on_message=on_message, on_close=on_close,
on_error=on_error,
on_open=on_open, )
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
ws.close()
listener = keyboard.Listener(on_press=on_press, on_release=on_release)
listener.start()
epochInput()
listener.join()
print("!!!! Finished Recording ")
for i in key_mapped_values.keys():
print(i, len(key_mapped_values[i]))
|
StarcoderdataPython
|
3240182
|
from rest_framework.test import APITestCase, APIClient
from django.urls import reverse
from rest_framework.authtoken.models import Token
class NotificationsTest(APITestCase):
"""
Test the metadata APIv2 endpoint.
"""
fixtures = ['dojo_testdata.json']
def setUp(self):
token = Token.objects.get(user__username='admin')
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
r = self.create(
template=True,
scan_added=['alert', 'slack']
)
self.assertEqual(r.status_code, 201)
def create(self, **kwargs):
return self.client.post(reverse('notifications-list'), kwargs, format='json')
def create_test_user(self):
password = '<PASSWORD>!@#$'
r = self.client.post(reverse('user-list'), {
"username": "api-user-notification",
"password": password
}, format='json')
return r.json()["id"]
def test_notification_get(self):
r = self.client.get(reverse('notifications-list'), format='json')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json()['results'][0]['template'], False)
def test_notification_template(self):
q = {'template': True}
r = self.client.get(reverse('notifications-list'), q, format='json')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json()['results'][0]['template'], True)
def test_notification_template_multiple(self):
q = {'template': True, 'scan_added': ['alert', 'slack']}
r = self.client.post(reverse('notifications-list'), q, format='json')
self.assertEqual("Notification template already exists", r.json()["non_field_errors"][0])
def test_user_notifications(self):
"""
creates user and checks if template is assigned
"""
user = {"user": self.create_test_user()}
r = self.client.get(reverse('notifications-list'), user, format='json')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json()['results'][0]['template'], False)
self.assertIn('alert', r.json()['results'][0]['scan_added'])
self.assertIn('slack', r.json()['results'][0]['scan_added'])
|
StarcoderdataPython
|
84603
|
<reponame>ppelleti/berp
print(-1)
print(-0)
print(-(6))
print(-(12*2))
print(- -10)
|
StarcoderdataPython
|
1730461
|
<reponame>jtackaberry/stagehand
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# webradio.py - read webradio attributes
# -----------------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------------
# kaa-Metadata - Media Metadata for Python
# Copyright (C) 2003-2006 <NAME>, <NAME>
#
# First Edition: <NAME> <<EMAIL>>
# Maintainer: <NAME> <<EMAIL>>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
__all__ = ['Parser']
# python imports
import urllib.parse
import string
import urllib.request, urllib.parse, urllib.error
# import kaa.metadata.audio core
from . import core
# http://172.16.58.3:80/stream/1006
ICY = { 'icy-name': 'title',
'icy-genre': 'genre',
'icy-br': 'bitrate',
'icy-url': 'caption'
}
class WebRadio(core.Music):
table_mapping = { 'ICY' : ICY }
def __init__(self, url):
core.Music.__init__(self)
tup = urllib.parse.urlsplit(url)
scheme, location, path, query, fragment = tup
if scheme != 'http':
raise core.ParseError()
# Open an URL Connection
fi = urllib.request.urlopen(url)
# grab the statusline
self.statusline = fi.readline()
try:
statuslist = string.split(self.statusline)
except ValueError:
# assume it is okay since so many servers are badly configured
statuslist = ["ICY", "200"]
if statuslist[1] != "200":
if fi:
fi.close()
raise core.ParseError()
self.type = 'audio'
self.subtype = 'mp3'
# grab any headers for a max of 10 lines
linecnt = 0
tab = {}
lines = fi.readlines(512)
for linecnt in range(0,11):
icyline = lines[linecnt]
icyline = icyline.rstrip('\r\n')
if len(icyline) < 4:
break
cidx = icyline.find(':')
if cidx != -1:
# break on short line (ie. really should be a blank line)
# strip leading and trailing whitespace
tab[icyline[:cidx].strip()] = icyline[cidx+2:].strip()
if fi:
fi.close()
self._appendtable('ICY', tab)
def _finalize(self):
core.Music._finalize(self)
self.bitrate = string.atoi(self.bitrate)*1000
Parser = WebRadio
|
StarcoderdataPython
|
1736427
|
<gh_stars>0
# O construtor __init__ é um método especial que nos permite inicializar atributos
# no momento em que uma instância é construída. Como vimos no script anterior, os
# atributos são tradicionalmente configurados usando um método set_attribute_val().
class MyNumber(object):
def __init__(self, value): # Duplos 'underscores' antes e depois dos nomes são utilizados para
print('Calling __init__ method') # métodos chamados 'private' ou 'magic'. 'Private', porque não têm
try: # a intenção de serem usados pelo usuário e, 'magic', porque são
attribute = int(value) # automaticamente chamados quando um determinado evento acontece.
except ValueError:
print('\nWARNING: You cannot use \'str\' values in \'MyNumber\' class.\n'
'Setting value to 0.')
self.attribute = 0
else:
self.attribute = attribute
def get_value(self):
return self.attribute
def increment(self):
self.attribute += 1
num = MyNumber(10) # __init__ é chamada durante a inicialização de uma instância.
print(num.get_value())
num.increment()
num.increment()
print(num.get_value())
num2 = MyNumber('hello')
print(num2.get_value())
num2.increment()
num2.increment()
print(num2.get_value())
|
StarcoderdataPython
|
3300413
|
<reponame>Enestst/codewars
def num_key_strokes(text):
one_type = "qazwsxedcrfvtgb yhnujmik,./l;o'p][`1234567890-\="
sum = 0
for i in text:
if i in one_type: sum += 1
else: sum+=2
return sum
|
StarcoderdataPython
|
1654493
|
<gh_stars>1-10
default_app_config = 'peacecorps.apps.PeaceCorpsConfig'
|
StarcoderdataPython
|
3209924
|
<filename>demo_srl_utils.py
import logging
import os
import codecs
import random
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union, Dict
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available, RobertaModel, BertPreTrainedModel, XLMRobertaConfig
logger = logging.getLogger(__name__)
@dataclass
class InputExample:
"""
A single training/test example for semantic role labeling.
Args:
guid: `str` Unique id for the example.
predicate_indicator: `List[int]` The predicate indicator for the examples.
words: `List[str]` The words of the sequence.
labels: (Optional) `List[str]` The labels for each word of the sequence. This should be specified for train and dev examples, but not for test examples.
"""
guid: str
predicate_indicator: List[int]
words: List[str]
tags: Optional[List[str]]
@dataclass
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
"""
input_ids: List[int]
attention_mask: List[int]
labels: Optional[List[int]] = None
token_type_ids: Optional[List[int]] = None
if is_torch_available():
import torch
from torch import nn
from torch.utils.data.dataset import Dataset
class SRLDataset(Dataset):
"""
Dataset for reading SRL data.
"""
features: List[InputFeatures]
pad_token_label_id: int = nn.CrossEntropyLoss().ignore_index
# Use cross entropy ignore_index as padding label id so that only real labe ids contribute to loss later.
def __init__(
self,
data: List[Dict],
tokenizer: PreTrainedTokenizer,
labels: List[str],
model_type: str,
max_seq_length: Optional[int] = None,
):
# Load data features
# NOTE this is kind of hacky, but it works for now.
examples = read_prediction_input(data)
self.features = convert_examples_to_append_features(
examples,
labels,
max_seq_length,
tokenizer,
cls_token_at_end = bool(model_type in ["xlnet"]), # xlnet has a cls token at the end
cls_token = tokenizer.cls_token,
cls_token_segment_id = 2 if model_type in ["xlnet"] else 0,
sep_token = tokenizer.sep_token,
sep_token_extra = False, # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left = bool(tokenizer.padding_side == "left"),
pad_token = tokenizer.pad_token_id,
pad_token_segment_id = tokenizer.pad_token_type_id,
pad_token_label_id = self.pad_token_label_id,
)
return
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def read_prediction_input(data) -> List[InputExample]:
guid_index = 1
examples = []
for entry in data:
sentence = entry["sentence"] # .strip().split()
predicate_index = entry["index"]
if predicate_index not in range(len(sentence)):
continue
predicate = [0 if index != predicate_index else 1 for index in range(len(sentence))]
one_hot_tags = ["O" for _ in sentence]
one_hot_tags[predicate_index] = "B-V"
examples.append(InputExample(guid=f"input-{guid_index}", words=sentence, predicate_indicator=predicate, tags=one_hot_tags))
guid_index += 1
return examples
def convert_examples_to_append_features(
examples: List[InputExample],
label_list: List[str],
max_seq_length: int,
tokenizer: PreTrainedTokenizer,
cls_token_at_end = False,
cls_token = "[CLS]",
cls_token_segment_id = 1,
sep_token = "[SEP]",
sep_token_extra = False,
pad_on_left = False,
pad_token = 0,
pad_token_segment_id = 0,
pad_token_label_id = -100,
sequence_a_segment_id = 0,
sequence_b_segment_id = 1,
mask_padding_with_zero = True,
) -> List[InputFeatures]:
"""
Loads a list of input examples from read_better_examples_from_file into a list of `InputFeatures`
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10_000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
predicate_ids = []
predicate = []
predicate_label = ""
for word, label, pred_ind in zip(example.words, example.tags, example.predicate_indicator):
word_tokens = tokenizer.tokenize(word)
if pred_ind == 1:
predicate = word_tokens
predicate_label = label
if len(word_tokens) > 0:
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens)-1))
predicate_ids.extend([pred_ind] * len(word_tokens))
# Account for [CLS] and [SEP] with "- 2" and "- 3" for RoBERTa then additional for the predicate as the second sentence
special_tokens_count = tokenizer.num_special_tokens_to_add() + len(predicate) + 1
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length-special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
predicate_ids = predicate_ids[:(max_seq_length - special_tokens_count)]
tokens += [sep_token]
label_ids += [pad_token_label_id]
predicate_ids += [0]
if sep_token_extra:
tokens += [sep_token]
label_ids += [pad_token_label_id]
predicate_ids += [0]
segment_ids = [sequence_a_segment_id] * len(tokens)
tokens.extend(predicate)
label_ids.extend([label_map[predicate_label]] + [pad_token_label_id]*(len(predicate)-1)) # TODO what should the label id for the second sentence (the predicate) be?
predicate_ids.extend([0] * len(predicate)) # TODO or should it be 1?
segment_ids.extend([sequence_b_segment_id] * len(predicate))
tokens += [sep_token]
label_ids += [pad_token_label_id]
predicate_ids += [0]
segment_ids += [sequence_b_segment_id]
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
predicate_ids += [0]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
predicate_ids = [0] + predicate_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
predicate_ids = ([0] * padding_length) + predicate_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
predicate_ids += [0] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(input_ids) == max_seq_length
assert len(predicate_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index % 1000 == 0:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
# logger.info("predicate_ids: %s", " ".join([str(x) for x in predicate_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
segment_ids = None
# predicate_ids = None
features.append(
InputFeatures(
input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, labels=label_ids
)
)
return features
def get_labels(path: str) -> List[str]:
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return ['O', 'B-A1', 'I-A1', 'B-A0', 'I-A0', 'B-V', 'I-V']
|
StarcoderdataPython
|
1635868
|
<reponame>Valaraucoo/raven
import datetime
import os
import uuid
from django.conf import settings
from django.contrib.auth import models as auth_models
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.db import models
from django.dispatch import receiver
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from users import managers
def get_file_path(instance, filename: str) -> str:
today = datetime.date.today().strftime("%Y-%m-%d")
return os.path.join(settings.UPLOAD_FILES_DIR, today, str(uuid.uuid4()) + filename)
GENDER_CHOICES = (
('male', _('Male')),
('female', _('Female')),
('none', 'none'),
)
ROLE_CHOICES = (
('student', _('Student')),
('teacher', _('Teacher')),
)
class User(auth_models.AbstractUser):
"""
Custom User model for raven platform.
email: PK user's email, used for logging in
first_name: str user's first name
last_name: str user's last name
...
if role == 'student'
grade: FK(...) user's grade/class model
grades: FK(...) user's grades
if role == 'teacher':
running_courses: FK(...)
"""
username = None
first_name = models.CharField(max_length=30, blank=True, verbose_name=_('First name'))
last_name = models.CharField(max_length=150, blank=True, verbose_name=_('Last name'))
email = models.EmailField(unique=True, verbose_name=_('Email address'))
address = models.CharField(max_length=200, blank=True, verbose_name=_('Address'),
help_text=_('<b>Address in format</b>: [STREET NAME] [NUMBER], [CITY]'))
phone = models.CharField(max_length=9, blank=True, verbose_name=_('Phone number'))
gender = models.CharField(max_length=10, default='none', choices=GENDER_CHOICES,
verbose_name=_("User's gender"))
role = models.CharField(max_length=9, choices=ROLE_CHOICES)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
date_joined = models.DateTimeField(verbose_name=_('Date joined'), default=timezone.now)
date_birth = models.DateField(verbose_name=_('Date of birth'), blank=True, null=True,
help_text=_('<b>Birthday date in format:</b> YYYY-MM-DD'))
is_online = models.BooleanField(default=False)
description = models.TextField(null=True, blank=True, default="")
image = models.ImageField(upload_to=get_file_path, default=settings.DEFAULT_USER_IMAGE)
first_login = models.BooleanField(default=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ('first_name', 'last_name',)
objects = managers.CustomUserManager()
@property
def full_username(self) -> str:
return f"{self.first_name} {self.last_name} ({self.email})"
@property
def is_student(self) -> bool:
return self.role == 'student'
@property
def is_teacher(self) -> bool:
return self.role == 'teacher'
def __str__(self):
return self.full_username
def get_absolute_url(self):
return reverse('users:profile-detail', args=(self.pk,))
def get_image_url(self):
return self.image.url
class Teacher(User):
"""
Teacher is a submodel of the User
"""
objects = managers.TeacherUserManager()
class Meta:
proxy = True
verbose_name = _("Teacher")
verbose_name_plural = _("Teachers")
class Student(User):
"""
Student is a submodel of the User
"""
objects = managers.StudentUserManager()
class Meta:
proxy = True
verbose_name = _("Student")
verbose_name_plural = _("Students")
@receiver(user_logged_in)
def got_online(sender, user, request, **kwargs):
user.is_online = True
user.save()
@receiver(user_logged_out)
def got_offline(sender, user, request, **kwargs):
user.is_online = False
user.save()
|
StarcoderdataPython
|
12659
|
<reponame>johnjohndoe/c3nav
from django import template
register = template.Library()
@register.filter
def negate(value):
return -value
@register.filter
def subtract(value, arg):
return value - arg
|
StarcoderdataPython
|
70777
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import rosparam
from std_msgs.msg import String
from intersection_recognition.srv import Scenario
import MeCab
class ScenarioParser:
def __init__(self):
self.hz = 1
self.loop_rate = rospy.Rate(self.hz)
self.scenario_service_proxy_ = rospy.ServiceProxy('scenario', Scenario)
self.ACTIONS = ["直進", "前進", "右折", "左折", "向く", "停止", "曲がる"]
self.DIRECTIONS = ["前", "前方", "右", "右手", "左", "左手", "後ろ", "後方"]
self.TYPE = ["一本道", "三叉路", "行き止まり", "十字路", "突き当り", "曲がり角", "角", "通路", "交差点"]
self.ORDERS = ["つ", "番", "次"]
self.ACTIONS_ENG = ["go_straight", "go_straight", "turn_right", "turn_left", "turn_", "stop", "turn_"]
self.DIRECTIONS_ENG = ["front", "front", "right", "right", "left", "left", "back", "back"]
self.TYPE_ENG = ["straight_road", "3_way", "dead_end", "crossroads", "end", "corridor", "corridor", "corridor", "corridor"]
self.action_ = []
# self.distance_ = []
self.order_ = []
self.direction_ = []
self.type_ = []
self.condition_ = []
self.mecab = MeCab.Tagger('-Owakati -d /usr/lib/x86_64-linux-gnu/mecab/dic/mecab-ipadic-neologd')
# complement direction to turn if scenario-action is "turn_"
def complement_scenario_action_turn_xxx(self, direction, action):
if(action == "turn_"):
if(direction == "left" or direction == "right" or direction == "back"):
result = action + direction
else:
result = action
return result
# Translate each features(for example, action and direction) of scenario from Japanese to English
# Not translate a word about "order" in scenario because order is integer
def translation_from_ja_to_en(self, types, direction, action):
if(types in self.TYPE):
type_index = self.TYPE.index(types)
return_type = self.TYPE_ENG[type_index]
else:
return_type = types
if(direction in self.DIRECTIONS):
direction_index = self.DIRECTIONS.index(direction)
return_direction = self.DIRECTIONS_ENG[direction_index]
else:
return_direction = direction
if(action in self.ACTIONS):
action_index = self.ACTIONS.index(action)
return_action = self.ACTIONS_ENG[action_index]
else:
return_action = action
return_action = self.complement_scenario_action_turn_xxx(return_direction, return_action)
return return_type, return_direction, return_action
# get a word related to "distance"
def get_distance(self, sentence):
try:
tmp = [i for i, word in enumerate(sentence) if("m" in word)]
if(len(tmp) != 0):
i = tmp[0]
if("m" in sentence[i]):
tmp = sentence[i]
distance = int(tmp.rstrip("m"))
return distance
except Exception as e:
print("############## ERROR ##############")
print(e)
return "nothing"
return "nothing"
# get a word related to "order"
def get_order(self, sentence):
try:
tmp = [i for i, word in enumerate(sentence) for order in self.ORDERS if(order in word)]
if(len(tmp) != 0):
i = tmp[0]
if(sentence[i] == "次"):
order = 1
return int(order)
elif(sentence[i+1] == "目"):
order = sentence[i].rstrip("つ")
order = order.rstrip("番")
order = int(order)
return order
except Exception as e:
print("############## ERROR ##############")
print(e)
return 1
return 1
# get a word related to "direction"
def get_direction(self, sentence):
try:
tmp = [i for i, word in enumerate(sentence) for direction in self.DIRECTIONS if((direction in word) and \
(("右折" not in word) and ("左折" not in word)))]
if(len(tmp) != 0):
i = tmp[0]
direction = sentence[i]
return direction
except Exception as e:
print("############## ERROR ##############")
print(e)
return "nothing"
return "nothing"
# get a word related to "type"
def get_type(self, sentence):
try:
tmp = [i for i, word in enumerate(sentence) for road_type in self.TYPE if(road_type in word)]
if(len(tmp) != 0):
i = tmp[0]
road_type = sentence[i]
return road_type
except Exception as e:
print("############## ERROR ##############")
print(e)
return "nothing"
return "nothing"
# get a word related to "action"
def get_action(self, sentence):
try:
tmp = [i for i, word in enumerate(sentence) for action in self.ACTIONS if(action in word)]
if(len(tmp) != 0):
i = tmp[0]
action = sentence[i]
return action
except Exception as e:
print("############## ERROR ##############")
print(e)
return "nothing"
return "nothing"
# get a word related to condition
def get_condition(self, sentence):
try:
tmp = [i for i, word in enumerate(sentence) if("まで" in word)]
if(len(tmp) != 0):
i = tmp[0]
condition = sentence[i]
if("見える" in sentence[i-1]):
condition = "見えるまで"
return condition
except Exception as e:
print("############## ERROR ##############")
print(e)
return "nothing"
return "nothing"
def get_condition_and_action(self, scenario):
all_sentences = scenario.split(".")
# delete '\n'
del all_sentences[-1]
for sentence in all_sentences:
splitted_sentence = self.mecab.parse(sentence)
splitted_sentence = splitted_sentence.split(" ")
# delete '\n'
del splitted_sentence[-1]
# distance = self.get_distance(splitted_sentence)
order = self.get_order(splitted_sentence)
direction = self.get_direction(splitted_sentence)
road_type = self.get_type(splitted_sentence)
condition = self.get_condition(splitted_sentence)
action = self.get_action(splitted_sentence)
# self.distance_.append(distance)
self.order_.append(order)
self.direction_.append(direction)
self.type_.append(road_type)
self.condition_.append(condition)
self.action_.append(action)
# send conditions and action
def send_scenarios(self, i):
# debug
# print("type is ", self.type_[i])
# print("order is ", self.order_[i])
# print("direction is ", self.direction_[i])
# print("action is ", self.action_[i])
scenario_order = self.order_[i]
scenario_type, scenario_direction, scenario_action = self.translation_from_ja_to_en(self.type_[i], self.direction_[i], self.action_[i])
try:
self.scenario_service_proxy_(scenario_type, scenario_order, scenario_direction, scenario_action)
except rospy.ServiceException as e:
print("Service call failed: ", e)
def show_result(self):
for i in range(len(self.action_)):
print("#######################################")
# print("距離: ", self.distance_[i])
print("順番: ", self.order_[i])
print("方向: ", self.direction_[i])
print("通路: ", self.type_[i])
print("条件: ", self.condition_[i])
print("行動: ", self.action_[i])
print("\n")
if __name__ == '__main__':
try:
rospy.init_node('scenario_parser', anonymous=True)
scenario_parser = ScenarioParser()
scenario_path = "/root/share/mecab/scenario.txt"
try:
scenario_path = rosparam.get_param("scenario_parser/scenario_path")
except:
rospy.logwarn("scenario_path is not set")
with open(scenario_path) as f:
data = f.readlines()
for scenario in data:
print("#######################################")
print(scenario)
scenario_parser.get_condition_and_action(scenario)
scenario_parser.show_result()
print("#######################################\n")
rospy.wait_for_service('scenario')
for i in range(len(scenario_parser.type_)):
scenario_parser.send_scenarios(i)
print("Finish sending scenarios")
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("except occur")
pass
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.