code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
# taken from http://python-packaging.readthedocs.io/en/latest/everything.html and modified a little from setuptools import setup, find_packages # random values __version__ = '0.1.0' # this part taken from https://github.com/dr-guangtou/riker with open('requirements.txt') as infd: INSTALL_REQUIRES = [x.strip('\n') for x in infd.readlines()] # code taken from above def readme(): with open('README.md') as f: return f.read() setup(name='mlfinder', version=__version__, description='Find possible microlensing events.', long_description=readme(), classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2.7', 'Topic :: Text Processing :: Linguistic', ], keywords='astronomy', url='https://github.com/JudahRockLuberto/mlfinder', author='<NAME>', author_email='<EMAIL>', license='MIT', packages=find_packages(), install_requires=INSTALL_REQUIRES, include_package_data=True, zip_safe=False, python_requires='>=3.6')
[ "setuptools.find_packages" ]
[((982, 997), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (995, 997), False, 'from setuptools import setup, find_packages\n')]
from utils import * import logging import os import time import warnings import subprocess from subprocess import PIPE formatter = '%(levelname)s : %(asctime)s : %(message)s' logging.basicConfig(filename='mk8d.log', level=logging.INFO, format=formatter) warnings.simplefilter('ignore') """################################### パラメータ ###################################""" SC_COMMAND = "screenshot OBS -t OBS -f " # スクリーンショット用コマンド TEMP_IMG_FILENAME = "temp.png" # キャプチャ結果の保存先 WAIT_SECOND = 0.2 # 処理間の待機時間(秒) WAITTIME_BEFORE_DELETE = 6 # 画像を消すまでに猶予を持たせる IS_RACING_CHECK_LENGTH = 4 # xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする IS_RACING_CHECK_LENGTH_RACEEND = 3 # レース終わり確認 DRAW_LAPLINE = False # ラップの区切りを見せる def run_server(): frame_num = 0 is_racing_flag_list = [False for _ in range(IS_RACING_CHECK_LENGTH + 1)] is_racing_now = False curent_lap = 1 lap_history = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] lap_index = [] coin_history = [0, ] rank_history = [12, ] im_before_coin = 0 # 直前の時刻のコイン while(True): logging.info("[log] is_racing_now==%s" % is_racing_now) if cv2.waitKey(1) & 0xFF == ord('q'): break # OBSのSCを取得 im_before_coin = coin_history[-1] frame_num += 1 res = subprocess.run("screenshot OBS -t OBS -f temp_raw.png", shell=True, stdout=PIPE, stderr=PIPE, text=True) time.sleep(WAIT_SECOND) if not res: continue frame_gray = cv2.imread("temp_raw.png", 0) frame_gray = frame_gray[70:1230:, 90:2180] frame_gray = cv2.resize(frame_gray, (400, 300)) cv2.imwrite(TEMP_IMG_FILENAME, frame_gray) # 画像の認識結果をcsvに書き出し frame_gray = cv2.imread(TEMP_IMG_FILENAME, 0) ret1, lp = get_lap(frame_gray) ret2, cn = get_coinnum(frame_gray, im_before_coin) ret3, rk = get_rank(frame_gray) is_racing_flag = ret1 and ret2 and ret3 is_racing_flag_list.append(is_racing_flag) # 現在の状態を更新 lap = lp.replace('.png', '') coin = cn.replace('.png', '') rank = rk.replace('.png', '') # レース判定が降りない場合は手前の時刻の結果を再利用する if not is_racing_flag: lap = lap_history[-1] coin = coin_history[-1] rank = rank_history[-1] logging.info("lap:%s coin:%s rank:%s is_racing_flag==%s" % (lap, coin, rank, is_racing_flag)) if lap in LAP_LIST: lap_number = LAP_LIST.index(lap) + 1 lap_history.append(lap_number) lap_stat_mode = 1 lap_2_count = lap_history[-6:].count(2) lap_3_count = lap_history[-6:].count(3) logging.info("[lap_history] %s " % lap_history[-6:]) if curent_lap == 1 and lap_2_count > 3 and len(lap_history) > 20: lap_stat_mode = 2 elif curent_lap == 2 and lap_3_count > 4 and len(lap_history) > 40: lap_stat_mode = 3 # ラップが更新された場合はそのインデックスを記録する if lap_stat_mode > curent_lap: curent_lap = lap_stat_mode lap_index.append((len(lap_history) - 10) / PLOT_WINDOW - 2.0) else: curent_lap = 1 if coin in COIN_LIST: coin_history.append(COIN_LIST.index(coin)) elif is_racing_now: coin_history.append(coin_history[-1]) else: coin_history.append(-2) if rank in RANK_LIST: rank_history.append(RANK_LIST.index(rank) + 1) elif is_racing_now: rank_history.append(rank_history[-1] + 1) else: rank_history.append(-2) # 3週以上は無視 if len(lap_index) > 3: curent_lap = 3 lap_index = lap_index[:3] if not is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) == 1): # レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する is_racing_now = True curent_lap = 1 lap_history = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] lap_index = [] coin_history = [0, ] + coin_history[-2:] rank_history = [12, ] + rank_history[-2:] output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info("レースを開始") continue elif is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) == 0): # 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する is_racing_now = False curent_lap = 1 lap_history = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] lap_index = [] coin_history = [0] rank_history = [12] time.sleep(WAITTIME_BEFORE_DELETE) # 画像を消すまでに猶予を持たせる output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) delete_temp_file() logging.info("レースを終了") continue elif not is_racing_flag: # レースフラグ判定が降りない場合は一旦プロットしない continue elif is_racing_now: # レース中はグラフを出す output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE) logging.info("Finish!!!!") if __name__ == '__main__': delete_temp_file() run_server()
[ "subprocess.run", "warnings.simplefilter", "logging.basicConfig", "time.sleep", "logging.info" ]
[((178, 256), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""mk8d.log"""', 'level': 'logging.INFO', 'format': 'formatter'}), "(filename='mk8d.log', level=logging.INFO, format=formatter)\n", (197, 256), False, 'import logging\n'), ((257, 288), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (278, 288), False, 'import warnings\n'), ((5291, 5317), 'logging.info', 'logging.info', (['"""Finish!!!!"""'], {}), "('Finish!!!!')\n", (5303, 5317), False, 'import logging\n'), ((1049, 1104), 'logging.info', 'logging.info', (["('[log] is_racing_now==%s' % is_racing_now)"], {}), "('[log] is_racing_now==%s' % is_racing_now)\n", (1061, 1104), False, 'import logging\n'), ((1270, 1379), 'subprocess.run', 'subprocess.run', (['"""screenshot OBS -t OBS -f temp_raw.png"""'], {'shell': '(True)', 'stdout': 'PIPE', 'stderr': 'PIPE', 'text': '(True)'}), "('screenshot OBS -t OBS -f temp_raw.png', shell=True, stdout=\n PIPE, stderr=PIPE, text=True)\n", (1284, 1379), False, 'import subprocess\n'), ((1383, 1406), 'time.sleep', 'time.sleep', (['WAIT_SECOND'], {}), '(WAIT_SECOND)\n', (1393, 1406), False, 'import time\n'), ((2295, 2392), 'logging.info', 'logging.info', (["('lap:%s coin:%s rank:%s is_racing_flag==%s' % (lap, coin, rank,\n is_racing_flag))"], {}), "('lap:%s coin:%s rank:%s is_racing_flag==%s' % (lap, coin, rank,\n is_racing_flag))\n", (2307, 2392), False, 'import logging\n'), ((2657, 2709), 'logging.info', 'logging.info', (["('[lap_history] %s ' % lap_history[-6:])"], {}), "('[lap_history] %s ' % lap_history[-6:])\n", (2669, 2709), False, 'import logging\n'), ((4289, 4311), 'logging.info', 'logging.info', (['"""レースを開始"""'], {}), "('レースを開始')\n", (4301, 4311), False, 'import logging\n'), ((4717, 4751), 'time.sleep', 'time.sleep', (['WAITTIME_BEFORE_DELETE'], {}), '(WAITTIME_BEFORE_DELETE)\n', (4727, 4751), False, 'import time\n'), ((4955, 4977), 'logging.info', 'logging.info', (['"""レースを終了"""'], {}), "('レースを終了')\n", (4967, 4977), False, 'import logging\n')]
# 作者:西岛闲鱼 # https://github.com/globien/easy-python # https://gitee.com/globien/easy-python # 验证三门问题(Monty Hall problem) import random 获奖次数_不换 = 0 # 不换而获奖的计数器 获奖次数_换 = 0 # 换而获奖的计数器 试验次数 = 100000 # 换和不换各做这么多次实验 for i in range(试验次数): # 不换的实验 door_list = ["A","B","C"] # 三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 if bet == car: # 不换!直接揭晓答案 获奖次数_不换 = 获奖次数_不换 + 1 for i in range(试验次数): # 换的实验 door_list = ["A","B","C"] # 三扇门的编号 car = random.choice(door_list) # 汽车随机放在某扇门后 bet = random.choice(door_list) # 挑战者随机选择一扇门 # 现在主持人随机选择一扇门予以排除 # 这扇门不是挑战者选择的门,也不是汽车所在的门 host_list = ["A","B","C"] host_list.remove(bet) if car in host_list: host_list.remove(car) discard = random.choice(host_list) # 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖 door_list.remove(bet) # 去掉自己已经选过的门 door_list.remove(discard) # 去掉主持人排除的门 bet = door_list[0] # 只剩下一扇门,换成它! if bet == car: # 换!揭晓答案 获奖次数_换 = 获奖次数_换 + 1 print("不换的获奖概率:", 获奖次数_不换/试验次数) print("换的获奖概率: ", 获奖次数_换/试验次数)
[ "random.choice" ]
[((335, 359), 'random.choice', 'random.choice', (['door_list'], {}), '(door_list)\n', (348, 359), False, 'import random\n'), ((388, 412), 'random.choice', 'random.choice', (['door_list'], {}), '(door_list)\n', (401, 412), False, 'import random\n'), ((619, 643), 'random.choice', 'random.choice', (['door_list'], {}), '(door_list)\n', (632, 643), False, 'import random\n'), ((672, 696), 'random.choice', 'random.choice', (['door_list'], {}), '(door_list)\n', (685, 696), False, 'import random\n'), ((904, 928), 'random.choice', 'random.choice', (['host_list'], {}), '(host_list)\n', (917, 928), False, 'import random\n')]
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-06-03 06:54 from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('freefolks', '0005_auto_20180602_2334'), ] operations = [ migrations.AddField( model_name='account', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='account', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='transaction', name='created_date', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='transaction', name='date_time', field=models.DateTimeField(blank=True, null=True, verbose_name=b'Transaction date'), ), migrations.AddField( model_name='transaction', name='modified_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='account', name='bank_name', field=models.CharField(choices=[('axis', 'Axis Bank'), ('citi', 'Citi Bank'), ('hdfc', 'HDFC Bank')], max_length=50, verbose_name=b'Select your bank'), ), migrations.AlterField( model_name='transaction', name='amount', field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12, verbose_name=b'Transaction amount'), ), migrations.AlterField( model_name='transaction', name='transaction_type', field=models.CharField(choices=[('credit', 'Credited to account'), ('debit', 'Debited from account')], max_length=50, verbose_name=b'Transaction type'), ), ]
[ "django.db.models.CharField", "django.db.models.DateTimeField", "django.db.models.DecimalField" ]
[((435, 509), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'default': 'django.utils.timezone.now'}), '(auto_now_add=True, default=django.utils.timezone.now)\n', (455, 509), False, 'from django.db import migrations, models\n'), ((673, 708), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (693, 708), False, 'from django.db import migrations, models\n'), ((839, 913), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'default': 'django.utils.timezone.now'}), '(auto_now_add=True, default=django.utils.timezone.now)\n', (859, 913), False, 'from django.db import migrations, models\n'), ((1077, 1154), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': "b'Transaction date'"}), "(blank=True, null=True, verbose_name=b'Transaction date')\n", (1097, 1154), False, 'from django.db import migrations, models\n'), ((1286, 1321), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1306, 1321), False, 'from django.db import migrations, models\n'), ((1447, 1596), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('axis', 'Axis Bank'), ('citi', 'Citi Bank'), ('hdfc', 'HDFC Bank')]", 'max_length': '(50)', 'verbose_name': "b'Select your bank'"}), "(choices=[('axis', 'Axis Bank'), ('citi', 'Citi Bank'), (\n 'hdfc', 'HDFC Bank')], max_length=50, verbose_name=b'Select your bank')\n", (1463, 1596), False, 'from django.db import migrations, models\n'), ((1718, 1823), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'default': '(0.0)', 'max_digits': '(12)', 'verbose_name': "b'Transaction amount'"}), "(decimal_places=2, default=0.0, max_digits=12,\n verbose_name=b'Transaction amount')\n", (1737, 1823), False, 'from django.db import migrations, models\n'), ((1956, 2105), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('credit', 'Credited to account'), ('debit', 'Debited from account')]", 'max_length': '(50)', 'verbose_name': "b'Transaction type'"}), "(choices=[('credit', 'Credited to account'), ('debit',\n 'Debited from account')], max_length=50, verbose_name=b'Transaction type')\n", (1972, 2105), False, 'from django.db import migrations, models\n')]
from django.db import models from adapter import models as adapter_models class SensorUsage(models.Model): value = models.FloatField() sensor = models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE) datetime = models.DateTimeField() @staticmethod def get_usage_by_day(day, monitors): return SensorUsage.objects.filter(datetime__day=day.day, datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') @staticmethod def get_usage_by_month(day, monitors): return SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime') def __str__(self): return self.sensor.element + ' ' + str(self.value) + "@" + self.datetime.isoformat() class Settings(models.Model): setting = models.CharField(max_length=20, unique=True) value = models.CharField(max_length=50, null=True, unique=False) description = models.CharField(max_length=240, unique=False, null=True)
[ "django.db.models.ForeignKey", "django.db.models.DateTimeField", "django.db.models.CharField", "django.db.models.FloatField" ]
[((122, 141), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (139, 141), False, 'from django.db import models\n'), ((155, 221), 'django.db.models.ForeignKey', 'models.ForeignKey', (['adapter_models.Sensor'], {'on_delete': 'models.CASCADE'}), '(adapter_models.Sensor, on_delete=models.CASCADE)\n', (172, 221), False, 'from django.db import models\n'), ((237, 259), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (257, 259), False, 'from django.db import models\n'), ((978, 1022), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'unique': '(True)'}), '(max_length=20, unique=True)\n', (994, 1022), False, 'from django.db import models\n'), ((1035, 1091), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'unique': '(False)'}), '(max_length=50, null=True, unique=False)\n', (1051, 1091), False, 'from django.db import models\n'), ((1110, 1167), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(240)', 'unique': '(False)', 'null': '(True)'}), '(max_length=240, unique=False, null=True)\n', (1126, 1167), False, 'from django.db import models\n')]
import requests from json import loads def _args_to_params(kwargs): """ Creates a tuple of keyword, value tuples and changes parameter names for ESI :param kwargs: :return: ((parameter, value), (parameter, value), ...) :rtype: tuple """ params = () for parameter, value in kwargs.items(): if value is None: continue if parameter == 'if_none_match': parameter = 'If-None-Match' if parameter == 'accept_language': parameter = 'Accept-Language' params = (*params, (parameter, value)) return params def request(data_source, version, HTTP_method, path, proxies=None, **kwargs): """ Requests and processes ESI json file :param data_source: ['tranquility', 'singularity'] :param version: ESI version ['dev', 'latest', 'legacy', 'v1', 'v2', ...] :param HTTP_method: ['GET', 'POST', 'PUT', 'DELETE', ...] :param path: endpoint :param proxies: Dictionary mapping protocol to the URL of the proxy :param kwargs: parameters for the endpoint :return: json.load(requests.get().text) :rtype: dict """ headers = {'accept': 'application/json'} params = _args_to_params(kwargs) response = requests.request(HTTP_method, f'https://esi.evetech.net/{version}{path}', headers=headers, params=params, proxies=proxies) return loads(response.text)
[ "requests.request", "json.loads" ]
[((1235, 1361), 'requests.request', 'requests.request', (['HTTP_method', 'f"""https://esi.evetech.net/{version}{path}"""'], {'headers': 'headers', 'params': 'params', 'proxies': 'proxies'}), "(HTTP_method, f'https://esi.evetech.net/{version}{path}',\n headers=headers, params=params, proxies=proxies)\n", (1251, 1361), False, 'import requests\n'), ((1465, 1485), 'json.loads', 'loads', (['response.text'], {}), '(response.text)\n', (1470, 1485), False, 'from json import loads\n')]
''' Present an interactive function explorer with slider widgets. Scrub the sliders to change the properties of the ``sin`` curve, or type into the title text box to update the title of the plot. Use the ``bokeh serve`` command to run the example by executing: bokeh serve sliders.py at your command prompt. Then navigate to the URL http://localhost:5006/sliders in your browser. ''' import numpy as np from bokeh.io import curdoc from bokeh.layouts import column, row from bokeh.models import ColumnDataSource, Slider, TextInput from bokeh.plotting import figure, output_file,show from sklearn import datasets import hdbscan import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs ### SET UP THE DATA ### n_samples = 1500 random_state = 170 # Dataset #1 X, y = make_blobs(n_samples=n_samples, random_state=random_state) X1, Y1 = X[:,0], X[:,1] # Dataset #2 transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) X2, Y2 = X_aniso[:,0], X_aniso[:,1] # Dataset #3 X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) X3, Y3 = X_varied[:,0], X_varied[:,1] # Dataset #4 X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])) X4, Y4 = X_filtered[:,0], X_filtered[:,1] source1 = ColumnDataSource(data=dict(X=X1, Y=Y1)) source2 = ColumnDataSource(data=dict(X=X2, Y=Y2)) source3 = ColumnDataSource(data=dict(X=X3, Y=Y3)) source4 = ColumnDataSource(data=dict(X=X4, Y=Y4)) print(source1, source2, source3, source4) ### Set up Plot plot = figure(plot_height=400, plot_width=400, title="Clusters", tools="crosshair,pan,reset,save,wheel_zoom", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.scatter('X', 'Y', source=source1)#, line_width=3, line_alpha=0.6) show(plot) output_file('clustering.html') ''' # Set up data N = 200 x = np.linspace(0, 4*np.pi, N) y = np.sin(x) source = ColumnDataSource(data=dict(x=x, y=y)) # Set up plot plot = figure(plot_height=400, plot_width=400, title="my sine wave", tools="crosshair,pan,reset,save,wheel_zoom", x_range=[0, 4*np.pi], y_range=[-2.5, 2.5]) plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6) # Set up widgets text = TextInput(title="title", value='my sine wave') offset = Slider(title="offset", value=0.0, start=-5.0, end=5.0, step=0.1) amplitude = Slider(title="amplitude", value=1.0, start=-5.0, end=5.0, step=0.1) phase = Slider(title="phase", value=0.0, start=0.0, end=2*np.pi) freq = Slider(title="frequency", value=1.0, start=0.1, end=5.1, step=0.1) # Set up callbacks def update_title(attrname, old, new): plot.title.text = text.value text.on_change('value', update_title) def update_data(attrname, old, new): # Get the current slider values a = amplitude.value b = offset.value w = phase.value k = freq.value # Generate the new curve x = np.linspace(0, 4*np.pi, N) y = a*np.sin(k*x + w) + b source.data = dict(x=x, y=y) for w in [offset, amplitude, phase, freq]: w.on_change('value', update_data) # Set up layouts and add to document inputs = column(text, offset, amplitude, phase, freq) curdoc().add_root(row(inputs, plot, width=800)) curdoc().title = "Sliders" '''
[ "bokeh.plotting.figure", "sklearn.datasets.make_blobs", "bokeh.plotting.output_file", "bokeh.plotting.show", "numpy.dot", "numpy.vstack" ]
[((828, 886), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': 'n_samples', 'random_state': 'random_state'}), '(n_samples=n_samples, random_state=random_state)\n', (838, 886), False, 'from sklearn.datasets import make_blobs\n'), ((1008, 1033), 'numpy.dot', 'np.dot', (['X', 'transformation'], {}), '(X, transformation)\n', (1014, 1033), True, 'import numpy as np\n'), ((1105, 1197), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': 'n_samples', 'cluster_std': '[1.0, 2.5, 0.5]', 'random_state': 'random_state'}), '(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=\n random_state)\n', (1115, 1197), False, 'from sklearn.datasets import make_blobs\n'), ((1323, 1384), 'numpy.vstack', 'np.vstack', (['(X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])'], {}), '((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))\n', (1332, 1384), True, 'import numpy as np\n'), ((1696, 1853), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': '(400)', 'plot_width': '(400)', 'title': '"""Clusters"""', 'tools': '"""crosshair,pan,reset,save,wheel_zoom"""', 'x_range': '[0, 4 * np.pi]', 'y_range': '[-2.5, 2.5]'}), "(plot_height=400, plot_width=400, title='Clusters', tools=\n 'crosshair,pan,reset,save,wheel_zoom', x_range=[0, 4 * np.pi], y_range=\n [-2.5, 2.5])\n", (1702, 1853), False, 'from bokeh.plotting import figure, output_file, show\n'), ((1941, 1951), 'bokeh.plotting.show', 'show', (['plot'], {}), '(plot)\n', (1945, 1951), False, 'from bokeh.plotting import figure, output_file, show\n'), ((1952, 1982), 'bokeh.plotting.output_file', 'output_file', (['"""clustering.html"""'], {}), "('clustering.html')\n", (1963, 1982), False, 'from bokeh.plotting import figure, output_file, show\n')]
#!/usr/bin/env python3 """Command line tool for the tesseract-recognize API server.""" """ @version $Version: 2020.01.13$ @author <NAME> <<EMAIL>> @copyright Copyright(c) 2017-present, <NAME> <<EMAIL>> @requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl @requirements jsonargparse>=2.20.0 @requirements flask-restplus>=0.12.1 @requirements prance>=0.15.0 """ import os import re import sys import json import shutil import queue import threading import tempfile import pagexml pagexml.set_omnius_schema() from time import time from functools import wraps from subprocess import Popen, PIPE, STDOUT from jsonargparse import ArgumentParser, ActionConfigFile, ActionYesNo from flask import Flask, Response, request, abort from flask_restplus import Api, Resource, reqparse from werkzeug.datastructures import FileStorage from werkzeug.exceptions import BadRequest from prance.util import url from prance.convert import convert_url def get_cli_parser(logger=True): """Returns the parser object for the command line tool.""" parser = ArgumentParser( error_handler='usage_and_exit_error_handler', logger=logger, default_env=True, description=__doc__) parser.add_argument('--cfg', action=ActionConfigFile, help='Path to a yaml configuration file.') parser.add_argument('--threads', type=int, default=4, help='Maximum number of tesseract-recognize instances to run in parallel.') parser.add_argument('--prefix', default='/tesseract-recognize', help='Prefix string for all API endpoints. Use "%%s" in string to replace by the API version.') parser.add_argument('--host', default='127.0.0.1', help='Hostname to listen on.') parser.add_argument('--port', type=int, default=5000, help='Port for the server.') parser.add_argument('--debug', action=ActionYesNo, default=False, help='Whether to run in debugging mode.') return parser def TypePageXML(value): """Parse Page XML request type. Args: value: The raw type value. Returns: dict[str, {str,PageXML}]: Dictionary including the page xml 'filename', the 'string' representation and the PageXML 'object'. """ if type(value) != FileStorage: raise ValueError('Expected pagexml to be of type FileStorage.') spxml = value.read().decode('utf-8') pxml = pagexml.PageXML() pxml.loadXmlString(spxml) return {'filename': value.filename, 'object': pxml, 'string': spxml} class ParserPageXML(reqparse.RequestParser): """Class for parsing requests including a Page XML.""" def parse_args(self, **kwargs): """Extension of parse_args that additionally does some Page XML checks.""" req_dict = super().parse_args(**kwargs) if req_dict['pagexml'] is not None and req_dict['images'] is not None: pxml = req_dict['pagexml']['object'] images_xml = set() for page in pxml.select('//_:Page'): fname = re.sub(r'\[[0-9]+]$', '', pxml.getAttr(page, 'imageFilename')) images_xml.add(fname) images_received = [os.path.basename(x.filename) for x in req_dict['images']] for fname in images_received: if fname not in images_xml: raise BadRequest('Received image not referenced in the Page XML: '+fname) if len(images_xml) != len(images_received): raise BadRequest('Expected to receive all images referenced in the Page XML ('+str(len(images_xml))+') but only got a subset ('+str(len(images_received))+')') return req_dict def write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_', basedir='/tmp'): """Writes images and page xml from a request to a temporal directory. Args: req_dict (dict): Parsed Page XML request. prefix (str): Prefix for temporal directory name. basedir (str): Base temporal directory. Returns: The path to the temporal directory where saved. """ tmpdir = tempfile.mkdtemp(prefix=prefix, dir=basedir) if req_dict['pagexml'] is not None: fxml = os.path.basename(req_dict['pagexml']['filename']) with open(os.path.join(tmpdir, fxml), 'w') as f: f.write(req_dict['pagexml']['string']) if req_dict['images'] is not None: for image in req_dict['images']: image.save(os.path.join(tmpdir, os.path.basename(image.filename))) return tmpdir class images_pagexml_request: """Decorator class for endpoints receiving images with optionally a page xml and responding with a page xml.""" def __init__(self, api, images_help='Images with file names as referenced in the Page XML if given.', pagexml_help='Optional valid Page XML file.', options_help='Optional configuration options to be used for processing.', response_help='Resulting Page XML after processing.'): """Initializer for images_pagexml_request class. Args: api (flask_restplus.Api): The flask_restplus Api instance. images_help (str): Help for images field in swagger documentation. pagexml_help (str): Help for pagexml field in swagger documentation. options_help (str): Help for config field in swagger documentation. response_help (str): Help for pagexml response in swagger documentation. """ self.api = api self.response_help = response_help parser = ParserPageXML(bundle_errors=True) parser.add_argument('images', location='files', type=FileStorage, required=True, action='append', help=images_help) parser.add_argument('pagexml', location='files', type=TypePageXML, required=False, help=pagexml_help) parser.add_argument('options', location='form', type=str, required=False, default=[], action='append', help=options_help) self.parser = parser def __call__(self, method): """Makes a flask_restplus.Resource method expect a page xml and/or respond with a page xml.""" method = self.api.expect(self.parser)(method) method = self.api.response(200, description=self.response_help)(method) method = self.api.produces(['application/xml'])(method) @wraps(method) def images_pagexml_request_wrapper(func): req_dict = self.parser.parse_args() pxml = method(func, req_dict) return Response( pxml.toString(True), mimetype='application/xml', headers={'Content-type': 'application/xml; charset=utf-8'}) return images_pagexml_request_wrapper def run_tesseract_recognize(*args): """Runs a tesseract-recognize command using given arguments.""" cmd = ['tesseract-recognize'] cmd.extend(list(args)) proc = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) cmd_out = proc.stdout.read().decode("utf-8") proc.communicate() cmd_rc = proc.returncode return cmd_rc, cmd_out if __name__ == '__main__': ## Parse config ## parser = get_cli_parser(logger=os.path.basename(__file__)) cfg = parser.parse_args(env=True) ## Create a Flask WSGI application ## app = Flask(__name__) # pylint: disable=invalid-name app.logger = parser.logger ## Create a Flask-RESTPlus API ## api = Api(app, doc=cfg.prefix+'/swagger', version='2.0', prefix=cfg.prefix, title='tesseract-recognize API', description='An API for running tesseract-recognition jobs.') sys.modules['flask.cli'].show_server_banner = lambda *x: None # type: ignore ## Definition of endpoints ## @api.route('/openapi.json') class OpenAPI(Resource): def get(self): """Endpoint to get the OpenAPI json.""" absurl = url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json')) content, _ = convert_url(absurl) return json.loads(content) @api.route('/version') class ServiceVersion(Resource): @api.response(200, description='Version of the running service.') @api.produces(['text/plain']) def get(self): """Endpoint to get the version of the running service.""" rc, out = run_tesseract_recognize('--version') if rc != 0: abort(500, 'problems getting version from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') @api.route('/help') class ServiceHelp(Resource): @api.response(200, description='Help for the running service.') @api.produces(['text/plain']) def get(self): """Endpoint to get the help for the running service.""" rc, out = run_tesseract_recognize('--help') if rc != 0: abort(500, 'problems getting help from tesseract-recognize command :: '+str(out)) return Response(out, mimetype='text/plain') num_requests = 0 @api.route('/process') class ProcessRequest(Resource): @images_pagexml_request(api) @api.doc(responses={400: 'tesseract-recognize execution failed.'}) def post(self, req_dict): """Endpoint for running tesseract-recognize on given images or page xml file.""" start_time = time() done_queue = queue.Queue() process_queue.put((done_queue, req_dict)) while True: try: thread, num_requests, pxml = done_queue.get(True, 0.05) break except queue.Empty: continue if isinstance(pxml, Exception): app.logger.error('Request '+str(num_requests)+' on thread '+str(thread)+' unsuccessful, ' +('%.4g' % (time()-start_time))+' sec. :: '+str(pxml)) abort(400, 'processing failed :: '+str(pxml)) else: app.logger.info('Request '+str(num_requests)+' on thread '+str(thread)+' successful, ' +('%.4g' % (time()-start_time))+' sec.') return pxml process_queue = queue.Queue() # type: ignore ## Processor thread function ## def start_processing(thread, process_queue): num_requests = 0 tmpdir = None while True: try: done_queue, req_dict = process_queue.get(True, 0.05) num_requests += 1 tmpdir = write_to_tmpdir(req_dict) opts = list(req_dict['options']) if len(opts) == 1 and opts[0][0] == '[': opts = json.loads(opts[0]) if req_dict['pagexml'] is not None: opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename']))) elif req_dict['images'] is not None: for image in req_dict['images']: opts.append(os.path.join(tmpdir, os.path.basename(image.filename))) else: raise KeyError('No images found in request.') opts.extend(['-o', os.path.join(tmpdir, 'output.xml')]) rc, out = run_tesseract_recognize(*opts) if rc != 0: raise RuntimeError('tesseract-recognize execution failed :: opts: '+str(opts)+' :: '+str(out)) pxml = pagexml.PageXML(os.path.join(tmpdir, 'output.xml')) done_queue.put((thread, num_requests, pxml)) except queue.Empty: continue except json.decoder.JSONDecodeError as ex: done_queue.put((thread, num_requests, RuntimeError('JSONDecodeError: '+str(ex)+' while parsing '+opts[0]))) except Exception as ex: done_queue.put((thread, num_requests, ex)) finally: if not cfg.debug and tmpdir is not None: shutil.rmtree(tmpdir) tmpdir = None for thread in range(cfg.threads): threading.Thread(target=start_processing, args=(thread+1, process_queue)).start() app.run(host=cfg.host, port=cfg.port, debug=cfg.debug)
[ "flask_restplus.Api", "werkzeug.exceptions.BadRequest", "shutil.rmtree", "os.path.join", "json.loads", "pagexml.PageXML", "prance.convert.convert_url", "tempfile.mkdtemp", "flask.Response", "flask.request.base_url.replace", "threading.Thread", "subprocess.Popen", "os.path.basename", "pagexml.set_omnius_schema", "functools.wraps", "queue.Queue", "jsonargparse.ArgumentParser", "flask.Flask", "time.time" ]
[((553, 580), 'pagexml.set_omnius_schema', 'pagexml.set_omnius_schema', ([], {}), '()\n', (578, 580), False, 'import pagexml\n'), ((1115, 1233), 'jsonargparse.ArgumentParser', 'ArgumentParser', ([], {'error_handler': '"""usage_and_exit_error_handler"""', 'logger': 'logger', 'default_env': '(True)', 'description': '__doc__'}), "(error_handler='usage_and_exit_error_handler', logger=logger,\n default_env=True, description=__doc__)\n", (1129, 1233), False, 'from jsonargparse import ArgumentParser, ActionConfigFile, ActionYesNo\n'), ((2511, 2528), 'pagexml.PageXML', 'pagexml.PageXML', ([], {}), '()\n', (2526, 2528), False, 'import pagexml\n'), ((4203, 4247), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': 'prefix', 'dir': 'basedir'}), '(prefix=prefix, dir=basedir)\n', (4219, 4247), False, 'import tempfile\n'), ((7258, 7337), 'subprocess.Popen', 'Popen', (['cmd'], {'shell': '(False)', 'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'STDOUT', 'close_fds': '(True)'}), '(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)\n', (7263, 7337), False, 'from subprocess import Popen, PIPE, STDOUT\n'), ((7673, 7688), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (7678, 7688), False, 'from flask import Flask, Response, request, abort\n'), ((7801, 7976), 'flask_restplus.Api', 'Api', (['app'], {'doc': "(cfg.prefix + '/swagger')", 'version': '"""2.0"""', 'prefix': 'cfg.prefix', 'title': '"""tesseract-recognize API"""', 'description': '"""An API for running tesseract-recognition jobs."""'}), "(app, doc=cfg.prefix + '/swagger', version='2.0', prefix=cfg.prefix,\n title='tesseract-recognize API', description=\n 'An API for running tesseract-recognition jobs.')\n", (7804, 7976), False, 'from flask_restplus import Api, Resource, reqparse\n'), ((10684, 10697), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (10695, 10697), False, 'import queue\n'), ((4303, 4352), 'os.path.basename', 'os.path.basename', (["req_dict['pagexml']['filename']"], {}), "(req_dict['pagexml']['filename'])\n", (4319, 4352), False, 'import os\n'), ((6692, 6705), 'functools.wraps', 'wraps', (['method'], {}), '(method)\n', (6697, 6705), False, 'from functools import wraps\n'), ((7554, 7580), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (7570, 7580), False, 'import os\n'), ((8415, 8434), 'prance.convert.convert_url', 'convert_url', (['absurl'], {}), '(absurl)\n', (8426, 8434), False, 'from prance.convert import convert_url\n'), ((8454, 8473), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (8464, 8473), False, 'import json\n'), ((8947, 8983), 'flask.Response', 'Response', (['out'], {'mimetype': '"""text/plain"""'}), "(out, mimetype='text/plain')\n", (8955, 8983), False, 'from flask import Flask, Response, request, abort\n'), ((9441, 9477), 'flask.Response', 'Response', (['out'], {'mimetype': '"""text/plain"""'}), "(out, mimetype='text/plain')\n", (9449, 9477), False, 'from flask import Flask, Response, request, abort\n'), ((9828, 9834), 'time.time', 'time', ([], {}), '()\n', (9832, 9834), False, 'from time import time\n'), ((9860, 9873), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (9871, 9873), False, 'import queue\n'), ((3272, 3300), 'os.path.basename', 'os.path.basename', (['x.filename'], {}), '(x.filename)\n', (3288, 3300), False, 'import os\n'), ((4371, 4397), 'os.path.join', 'os.path.join', (['tmpdir', 'fxml'], {}), '(tmpdir, fxml)\n', (4383, 4397), False, 'import os\n'), ((8322, 8390), 'flask.request.base_url.replace', 'request.base_url.replace', (['request.path', "(cfg.prefix + '/swagger.json')"], {}), "(request.path, cfg.prefix + '/swagger.json')\n", (8346, 8390), False, 'from flask import Flask, Response, request, abort\n'), ((12581, 12656), 'threading.Thread', 'threading.Thread', ([], {'target': 'start_processing', 'args': '(thread + 1, process_queue)'}), '(target=start_processing, args=(thread + 1, process_queue))\n', (12597, 12656), False, 'import threading\n'), ((3442, 3511), 'werkzeug.exceptions.BadRequest', 'BadRequest', (["('Received image not referenced in the Page XML: ' + fname)"], {}), "('Received image not referenced in the Page XML: ' + fname)\n", (3452, 3511), False, 'from werkzeug.exceptions import BadRequest\n'), ((4585, 4617), 'os.path.basename', 'os.path.basename', (['image.filename'], {}), '(image.filename)\n', (4601, 4617), False, 'import os\n'), ((11174, 11193), 'json.loads', 'json.loads', (['opts[0]'], {}), '(opts[0])\n', (11184, 11193), False, 'import json\n'), ((11950, 11984), 'os.path.join', 'os.path.join', (['tmpdir', '"""output.xml"""'], {}), "(tmpdir, 'output.xml')\n", (11962, 11984), False, 'import os\n'), ((12477, 12498), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (12490, 12498), False, 'import shutil\n'), ((11672, 11706), 'os.path.join', 'os.path.join', (['tmpdir', '"""output.xml"""'], {}), "(tmpdir, 'output.xml')\n", (11684, 11706), False, 'import os\n'), ((11299, 11348), 'os.path.basename', 'os.path.basename', (["req_dict['pagexml']['filename']"], {}), "(req_dict['pagexml']['filename'])\n", (11315, 11348), False, 'import os\n'), ((10605, 10611), 'time.time', 'time', ([], {}), '()\n', (10609, 10611), False, 'from time import time\n'), ((11514, 11546), 'os.path.basename', 'os.path.basename', (['image.filename'], {}), '(image.filename)\n', (11530, 11546), False, 'import os\n'), ((10335, 10341), 'time.time', 'time', ([], {}), '()\n', (10339, 10341), False, 'from time import time\n')]
import unittest from unittest.mock import ( patch, MagicMock ) from client.communication.messages import MessageCommand from client.communication.on_board import OnBoard, IncomingMessageHandler class OnBoardTest(unittest.TestCase): """ A suite of tests surrounding the OnBoard class functionality. """ def setUp(self) -> None: self._mock_message_handler = patch( 'client.communication.on_board.IncomingMessageHandler' ).start() self.addCleanup(patch.stopall) @patch('client.communication.on_board.build_command_message_with_args') def test_start_streaming_data(self, mock_build_message): """ Test the start streaming data send message. """ mock_message = 'mock_message' mock_build_message.return_value = mock_message mock_comm_link = MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.start_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['start', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) @patch('client.communication.on_board.build_command_message_with_args') def test_stop_streaming_data(self, mock_build_message): """ Test the start streaming data send message. """ mock_message = 'mock_message' mock_build_message.return_value = mock_message mock_comm_link = MagicMock() mock_comm_link.send = MagicMock() on_board = OnBoard(mock_comm_link) on_board.stop_streaming_sensor_data(['key1', 'key2', 'key3']) mock_build_message.assert_called_with( MessageCommand.STREAM_BIKE_SENSOR_DATA, ['stop', 'key1', 'key2', 'key3'] ) mock_comm_link.send.assert_called_with(mock_message.encode()) def test_incoming_stream_bike_sensor_data_msg(self): """ Test incoming data from the bike is dealt with properly. """ # Create some test sensor data messages. test_sensor_data_messages = [ b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1', b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2', ( b'stream-bike-sensor-data:' b'AIR_TEMPERATURE:10:TIMESTAMP3:' b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:' b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5' ), b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6', ] # Create OnBoard object. mock_comm_link = MagicMock() on_board = OnBoard(mock_comm_link) # Simulate handle_incoming_message call for the test messages. for message in test_sensor_data_messages: on_board.handle_incoming_message(message) # Verify all messages are handled correctly. expected_data = { 'AIR_TEMPERATURE': [ (b'TIMESTAMP1', b'30'), (b'TIMESTAMP2', b'20'), (b'TIMESTAMP3', b'10'), (b'TIMESTAMP6', b'0') ], 'TYRE_PRESSURE_REAR': [ (b'TIMESTAMP4', b'5') ], 'BRAKE_FRONT_ACTIVE': [ (b'TIMESTAMP5', b'50') ], } self.assertListEqual( expected_data['AIR_TEMPERATURE'], on_board.get_recorded_sensor_data('AIR_TEMPERATURE') ) self.assertListEqual( expected_data['TYRE_PRESSURE_REAR'], on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR') ) self.assertListEqual( expected_data['BRAKE_FRONT_ACTIVE'], on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE') ) class IncomingMessageHandlerTest(unittest.TestCase): """ A suite of tests surrounding the IncomingMessageHandler class functionality. """ def test_incoming_msg_handled(self): """ Test incoming message is handled correctly. """ test_msg = 'test:message' mock_comm_link = MagicMock() mock_comm_link.receive.side_effect = [test_msg] mock_on_board = MagicMock() msg_handler = IncomingMessageHandler(mock_on_board, mock_comm_link) # Run the thread in a try catch and ignore StopIteration errors. It # will error because it will exhaust the side_effect list and cause # a StopIteration error. This is a messy solution but it works to # test for now. try: msg_handler.run() except StopIteration: pass mock_on_board.handle_incoming_message.assert_called_with(test_msg)
[ "unittest.mock.patch", "unittest.mock.MagicMock", "client.communication.on_board.IncomingMessageHandler", "client.communication.on_board.OnBoard" ]
[((529, 599), 'unittest.mock.patch', 'patch', (['"""client.communication.on_board.build_command_message_with_args"""'], {}), "('client.communication.on_board.build_command_message_with_args')\n", (534, 599), False, 'from unittest.mock import patch, MagicMock\n'), ((1254, 1324), 'unittest.mock.patch', 'patch', (['"""client.communication.on_board.build_command_message_with_args"""'], {}), "('client.communication.on_board.build_command_message_with_args')\n", (1259, 1324), False, 'from unittest.mock import patch, MagicMock\n'), ((855, 866), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (864, 866), False, 'from unittest.mock import patch, MagicMock\n'), ((897, 908), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (906, 908), False, 'from unittest.mock import patch, MagicMock\n'), ((928, 951), 'client.communication.on_board.OnBoard', 'OnBoard', (['mock_comm_link'], {}), '(mock_comm_link)\n', (935, 951), False, 'from client.communication.on_board import OnBoard, IncomingMessageHandler\n'), ((1579, 1590), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1588, 1590), False, 'from unittest.mock import patch, MagicMock\n'), ((1621, 1632), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1630, 1632), False, 'from unittest.mock import patch, MagicMock\n'), ((1652, 1675), 'client.communication.on_board.OnBoard', 'OnBoard', (['mock_comm_link'], {}), '(mock_comm_link)\n', (1659, 1675), False, 'from client.communication.on_board import OnBoard, IncomingMessageHandler\n'), ((2710, 2721), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2719, 2721), False, 'from unittest.mock import patch, MagicMock\n'), ((2741, 2764), 'client.communication.on_board.OnBoard', 'OnBoard', (['mock_comm_link'], {}), '(mock_comm_link)\n', (2748, 2764), False, 'from client.communication.on_board import OnBoard, IncomingMessageHandler\n'), ((4216, 4227), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4225, 4227), False, 'from unittest.mock import patch, MagicMock\n'), ((4308, 4319), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4317, 4319), False, 'from unittest.mock import patch, MagicMock\n'), ((4343, 4396), 'client.communication.on_board.IncomingMessageHandler', 'IncomingMessageHandler', (['mock_on_board', 'mock_comm_link'], {}), '(mock_on_board, mock_comm_link)\n', (4365, 4396), False, 'from client.communication.on_board import OnBoard, IncomingMessageHandler\n'), ((392, 453), 'unittest.mock.patch', 'patch', (['"""client.communication.on_board.IncomingMessageHandler"""'], {}), "('client.communication.on_board.IncomingMessageHandler')\n", (397, 453), False, 'from unittest.mock import patch, MagicMock\n')]
import pytest from django.contrib.auth.models import User from django.test import Client @pytest.fixture(scope="session") def superuser(django_db_setup, django_db_blocker) -> User: with django_db_blocker.unblock(): user: User = User.objects.get_or_create( username="x", email="<EMAIL>", is_staff=True, is_superuser=True, )[0] user.set_password("x") user.save() return user @pytest.fixture(scope="session") def django_client(django_db_blocker, superuser: User) -> Client: client = Client() with django_db_blocker.unblock(): client.force_login(superuser) return client
[ "pytest.fixture", "django.contrib.auth.models.User.objects.get_or_create", "django.test.Client" ]
[((97, 128), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (111, 128), False, 'import pytest\n'), ((487, 518), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (501, 518), False, 'import pytest\n'), ((599, 607), 'django.test.Client', 'Client', ([], {}), '()\n', (605, 607), False, 'from django.test import Client\n'), ((250, 345), 'django.contrib.auth.models.User.objects.get_or_create', 'User.objects.get_or_create', ([], {'username': '"""x"""', 'email': '"""<EMAIL>"""', 'is_staff': '(True)', 'is_superuser': '(True)'}), "(username='x', email='<EMAIL>', is_staff=True,\n is_superuser=True)\n", (276, 345), False, 'from django.contrib.auth.models import User\n')]
import setuptools with open('README.md', 'r') as fh: long_description = fh.read() setuptools.setup( name='lambdata-pkutrich', version='0.0.4', author='<NAME>', author_email='<EMAIL>', description='Some very basic DataFrame tools.', long_description=long_description, long_description_content_type='text/markdown', url= 'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews', packages=setuptools.find_packages(), classifiers=['Programming Language :: Python :: 3', 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Operating System :: OS Independent'], )
[ "setuptools.find_packages" ]
[((490, 516), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (514, 516), False, 'import setuptools\n')]
from rlutilities.simulation import Car, Ball from rlutilities.linear_algebra import * from analysis.throttle import * from analysis.boost import * from analysis.jump import * from rlbot.agents.base_agent import SimpleControllerState from rlbot.utils.game_state_util import CarState from util.drive import steer_toward_target from util.vec import Vec3 from util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb from math import pi, atan, atan2, degrees def get_car_front_center(car: Car): return car.location + normalize(car.forward()) * car.hitbox().half_width[0] + normalize(car.up()) * car.hitbox().half_width[2] class Intercept(): def __init__(self, location: vec3, boost = True): self.location = location self.boost = boost self.time = None self.purpose = None # rip self.dodge = False def simulate(self, bot) -> vec3: # print('simulate intercept') # Init vars c = Car(bot.game.my_car) b = Ball(bot.game.ball) t = vec3(bot.target) intercept = self.location dt = 1.0 / 60.0 hit = False min_error = None # Drive towards intercept (moving in direction of c.forward()) c.rotation = look_at(intercept, c.up()) direction = normalize(intercept - c.location)#c.forward() advance_distance = norm(intercept - c.location) - c.hitbox().half_width[0] - b.collision_radius translation = direction * advance_distance sim_start_state: ThrottleFrame = BoostAnalysis().travel_distance(advance_distance, norm(c.velocity)) c.velocity = direction * sim_start_state.speed c.location += translation c.time += sim_start_state.time bot.ball_predictions = [vec3(b.location)] while b.time < c.time: b.step(dt) bot.ball_predictions.append(vec3(b.location)) # print(c.time, b.time) # print(c.location, b.location) # Simulate the collision and resulting for i in range(60*3): c.location += c.velocity * dt b.step(dt, c) # Check if we hit the ball yet if norm(b.location - c.location) < (c.hitbox().half_width[0] + b.collision_radius) * 1.05: hit = True # print('hit') # Measure dist from target error = t - b.location if hit and (min_error == None or norm(error) < norm(min_error)): min_error = error # Record trajectory bot.ball_predictions.append(vec3(b.location)) if not hit: return None return min_error # warning: lazy conversions and variable scope def get_controls(self, car_state: CarState, car: Car): controls = SimpleControllerState() target_Vec3 = Vec3(self.location[0], self.location[1], self.location[2]) if angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 2: controls.boost = False controls.handbrake = True elif angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 4: controls.boost = False controls.handbrake = False else: controls.boost = self.boost controls.handbrake = False # Be smart about not using boost at max speed # if Vec3(car.physics.velocity).length() > self.boost_analysis.frames[-1].speed - 10: # controls.boost = False controls.steer = steer_toward_target(car_state, target_Vec3) controls.throttle = 1 return controls @staticmethod def calculate_old(car: Car, ball: Ball, target: vec3, ball_predictions = None): # Init vars fake_car = Car(car) b = Ball(ball) # Generate predictions of ball path if ball_predictions is None: ball_predictions = [vec3(b.location)] for i in range(60*5): b.step(1.0 / 60.0) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location by aiming at a location, checking time to that location, # and then aiming at the ball's NEW position. Guaranteed to converge (typically in <10 iterations) # unless the ball is moving away from the car faster than the car's max boost speed intercept = Intercept(b.location) intercept.purpose = 'ball' intercept.boost = True intercept_ball_position = vec3(b.location) i = 0 max_tries = 100 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i < max_tries: # Find optimal spot to hit the ball optimal_hit_vector = normalize(target - intercept_ball_position) * b.collision_radius optimal_hit_location = intercept_ball_position - optimal_hit_vector # Find ideal rotation, unless it intersects with ground optimal_rotation = look_at(optimal_hit_vector, vec3(0, 0, 1))#axis_to_rotation(optimal_hit_vector) # this might be wrong fake_car.rotation = optimal_rotation # print(f'fake_car.location {fake_car.location}') # print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}') fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to position the car's front center directly on top of the best hit vector euler = rotation_to_euler(optimal_rotation) # todo put some super precise trigonometry in here to find the max angle allowed at given height if fake_car.location[2] <= fake_car.hitbox().half_width[0]: euler.pitch = 0 fake_car.rotation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to position the car's front center directly on top of the best hit vector # Adjust vertical position if it (still) intersects with ground if fake_car.location[2] < 17.0: fake_car.location[2] = 17.0 intercept.location = get_car_front_center(fake_car) # Calculate jump time needed jump_height_time = JumpAnalysis().get_frame_by_height(intercept.location[2]).time # or solve with motion equation # car_euler = rotation_to_euler(car.rotation) # jump_pitch_time = (euler.pitch - car_euler.pitch) / 5.5 + 0.35 # disregarding angular acceleration # jump_yaw_time = (euler.yaw - car_euler.yaw) / 5.5 + 0.35 # disregarding angular acceleration # jump_roll_time = (euler.roll - car_euler.roll) / 5.5 + 0.35 # disregarding angular acceleration # jump_time = max(jump_height_time, jump_pitch_time, jump_yaw_time, jump_roll_time) jump_time = jump_height_time # todo revisit rotation time # print('jump_time', jump_time) # Calculate distance to drive before jumping (to arrive perfectly on target) total_translation = intercept.location - get_car_front_center(car) total_translation[2] = 0 total_distance = norm(total_translation) start_index = analyzer.get_index_by_speed(norm(car.velocity)) start_frame = analyzer.frames[start_index] custom_error_func = lambda frame : abs(total_distance - (frame.distance - start_frame.distance) - frame.speed * jump_time) drive_analysis = analyzer.get_frame_by_error(custom_error_func, start_index) arrival_time = drive_analysis.time - start_frame.time + jump_time # print('drive_analysis.time', drive_analysis.time) # print('drive_analysis', start_index) # arrival_time = analyzer.travel_distance(total_distance, norm(car.velocity)).time # drive_analysis = analyzer.travel_distance(norm(intercept.location - c.location), norm(c.velocity)) ball_index = int(round(arrival_time * 60)) if ball_index >= len(ball_predictions): intercept.location = ball_predictions[-1] intercept.time = len(ball_predictions) / 60.0 break ball_location = ball_predictions[ball_index] # print(f'Iteration {i} distance {norm(ball_location + vec3(optimal_hit_vector[0], optimal_hit_vector[1], 0) - intercept.location)}') if norm(ball_location - intercept_ball_position) <= 1: # if norm(intercept_ball_position - get_car_front_center(fake_car)) > 100: # intercept.location = ball_predictions[-1] # intercept.time = len(ball_predictions) / 60.0 # return intercept intercept.dodge = True #jump_time > 0.2 intercept.jump_time = car.time + arrival_time - jump_time intercept.dodge_preorientation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll)) intercept.dodge_delay = jump_time intercept.dodge_direction = normalize(vec2(optimal_hit_vector)) # print(f'intercept_ball_position', intercept_ball_position) # print(f'intercept.location', intercept.location) # print(f'time until jump {drive_analysis.time}') # print(f'time now {car.time}') # print(f'distance until jump {drive_analysis.distance}') # print(f'total distance to target {total_distance}') # print(f'horiz speed @ jump {drive_analysis.speed}') # print(f'time intended to be in air {jump_time}') # print(f'distance travelled in air {jump_time * drive_analysis.speed}') # print(f'distance remaining to target @ jump {total_distance - drive_analysis.distance}') # print(f'Intercept convergence in {i} iterations') # print(f'desired roll {euler.roll}') # print(f'actual roll {rotation_to_euler(c.rotation).roll}') break intercept_ball_position = vec3(ball_location) # intercept.location = vec3(ball_location) # intercept.location[2] = 0 intercept.time = arrival_time i += 1 if i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded for calculating intercept') # Intercept is only meant for ground paths (and walls/cieling are only indirectly supported) # collision_radius = c.hitbox().half_width[2] * 2 + b.collision_radius + b.collision_radius * 8 # on_ground = intercept.location[2] <= collision_radius # on_back_wall = abs(intercept.location[1]) >= 5120 - collision_radius # on_side_wall = abs(intercept.location[0]) >= 4096 - collision_radius # # on_cieling = intercept.location[2] >= 2044 - collision_radius # reachable = on_ground # or on_back_wall or on_side_wall # or on_cieling # if not reachable: # return None return intercept @staticmethod def calculate(car: Car, ball: Ball, target: vec3, ball_predictions = None): # Init vars b = Ball(ball) dt = 1.0 / 60.0 # Generate predictions of ball path if ball_predictions is None: ball_predictions = [] for i in range(60*5): b.step(dt) ball_predictions.append(vec3(b.location)) # Gradually converge on ball location by aiming at a location, checking time to that location, # and then aiming at the ball's NEW position. Guaranteed to converge (typically in <10 iterations) # unless the ball is moving away from the car faster than the car's max boost speed intercept = Intercept(b.location) intercept.purpose = 'ball' intercept.boost = True intercept_ball_position = vec3(b.location) collision_achieved = False last_horizontal_error = None last_horizontal_offset = None i = 0 max_tries = 101 analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis() while i < max_tries: i += 1 fake_car = Car(car) direction = normalize(intercept.location - car.location) fake_car.rotation = look_at(direction, fake_car.up()) for t in range(60*5): # Step car location with throttle/boost analysis data # Not super efficient but POITROAE frame = analyzer.travel_time(dt, norm(fake_car.velocity)) # print('in 1 frame I travel', frame.time, frame.distance, frame.speed) fake_car.location += direction * frame.distance fake_car.velocity = direction * frame.speed fake_car.time += dt ball_location = ball_predictions[t] # Check for collision p = closest_point_on_obb(fake_car.hitbox(), ball_location) if norm(p - ball_location) <= ball.collision_radius: direction_vector = p - (fake_car.location - normalize(fake_car.forward()) * 13.88) # octane center of mass direction_vector[2] = 0 target_direction_vector = target - ball_location target_direction_vector[2] = 0 intercept_ball_position = ball_location direction = atan2(direction_vector[1], direction_vector[0]) ideal_direction = atan2(target_direction_vector[1], target_direction_vector[0]) horizontal_error = direction - ideal_direction # intercept.location = vec3(ball_location) # intercept.time = fake_car.time # return intercept # Now descend the hit direction gradient # Kick off the gradient descent with an arbitrary seed value if last_horizontal_error is None: last_horizontal_error = horizontal_error last_horizontal_offset = 0 if horizontal_error > 0: horizontal_offset = 25 else: horizontal_offset = 25 intercept.location = ball_location - normalize(fake_car.left()) * horizontal_offset break # Recursive case of gradient descent if horizontal_offset == last_horizontal_offset: gradient = 0 else: gradient = (horizontal_error - last_horizontal_error) / (horizontal_offset - last_horizontal_offset) if gradient == 0: predicted_horizontal_offset = horizontal_offset else: predicted_horizontal_offset = horizontal_offset - horizontal_error / gradient # Base case (convergence) if abs(gradient) < 0.0005: print(f'convergence in {i} iterations') print(f'gradient = {gradient}') print(f'last_horizontal_offset = {last_horizontal_offset}') print(f'direction = {degrees(direction)}') print(f'ideal direction = {degrees(ideal_direction)}') print(f'target = {target}') print(f'ball_location = {ball_location}') return intercept # Edge case exit: offset maxed out max_horizontal_offset = car.hitbox().half_width[1] + ball.collision_radius if predicted_horizontal_offset > max_horizontal_offset: predicted_horizontal_offset = max_horizontal_offset elif predicted_horizontal_offset < -max_horizontal_offset: predicted_horizontal_offset = - max_horizontal_offset last_horizontal_offset = horizontal_offset last_horizontal_error = horizontal_error horizontal_offset = predicted_horizontal_offset # Return the latest intercept location and continue descending the gradient intercept.location = ball_location - normalize(fake_car.left()) * predicted_horizontal_offset print(f'iteration {i}') print(f'gradient = {gradient}') print(f'horizontal_offset = {horizontal_offset}') print(f'horizontal_error = {degrees(horizontal_error)}') # print(f'ideal direction = {degrees(ideal_direction)}') break # Check for arrival if norm(fake_car.location - intercept.location) < ball.collision_radius / 2: intercept.location = ball_location break if i >= max_tries: print(f'Warning: max tries ({max_tries}) exceeded for calculating intercept') return intercept
[ "rlbot.agents.base_agent.SimpleControllerState", "util.vec.Vec3", "util.rlutilities.rotation_to_euler", "math.atan2", "rlutilities.simulation.Car", "util.rlutilities.to_vec3", "util.drive.steer_toward_target", "rlutilities.simulation.Ball", "math.degrees" ]
[((968, 988), 'rlutilities.simulation.Car', 'Car', (['bot.game.my_car'], {}), '(bot.game.my_car)\n', (971, 988), False, 'from rlutilities.simulation import Car, Ball\n'), ((1001, 1020), 'rlutilities.simulation.Ball', 'Ball', (['bot.game.ball'], {}), '(bot.game.ball)\n', (1005, 1020), False, 'from rlutilities.simulation import Car, Ball\n'), ((2783, 2806), 'rlbot.agents.base_agent.SimpleControllerState', 'SimpleControllerState', ([], {}), '()\n', (2804, 2806), False, 'from rlbot.agents.base_agent import SimpleControllerState\n'), ((2829, 2887), 'util.vec.Vec3', 'Vec3', (['self.location[0]', 'self.location[1]', 'self.location[2]'], {}), '(self.location[0], self.location[1], self.location[2])\n', (2833, 2887), False, 'from util.vec import Vec3\n'), ((3549, 3592), 'util.drive.steer_toward_target', 'steer_toward_target', (['car_state', 'target_Vec3'], {}), '(car_state, target_Vec3)\n', (3568, 3592), False, 'from util.drive import steer_toward_target\n'), ((3789, 3797), 'rlutilities.simulation.Car', 'Car', (['car'], {}), '(car)\n', (3792, 3797), False, 'from rlutilities.simulation import Car, Ball\n'), ((3810, 3820), 'rlutilities.simulation.Ball', 'Ball', (['ball'], {}), '(ball)\n', (3814, 3820), False, 'from rlutilities.simulation import Car, Ball\n'), ((11255, 11265), 'rlutilities.simulation.Ball', 'Ball', (['ball'], {}), '(ball)\n', (11259, 11265), False, 'from rlutilities.simulation import Car, Ball\n'), ((5501, 5536), 'util.rlutilities.rotation_to_euler', 'rotation_to_euler', (['optimal_rotation'], {}), '(optimal_rotation)\n', (5518, 5536), False, 'from util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb\n'), ((12284, 12292), 'rlutilities.simulation.Car', 'Car', (['car'], {}), '(car)\n', (12287, 12292), False, 'from rlutilities.simulation import Car, Ball\n'), ((2930, 2965), 'util.rlutilities.to_vec3', 'to_vec3', (['car_state.physics.location'], {}), '(car_state.physics.location)\n', (2937, 2965), False, 'from util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb\n'), ((13536, 13583), 'math.atan2', 'atan2', (['direction_vector[1]', 'direction_vector[0]'], {}), '(direction_vector[1], direction_vector[0])\n', (13541, 13583), False, 'from math import pi, atan, atan2, degrees\n'), ((13622, 13683), 'math.atan2', 'atan2', (['target_direction_vector[1]', 'target_direction_vector[0]'], {}), '(target_direction_vector[1], target_direction_vector[0])\n', (13627, 13683), False, 'from math import pi, atan, atan2, degrees\n'), ((3108, 3143), 'util.rlutilities.to_vec3', 'to_vec3', (['car_state.physics.location'], {}), '(car_state.physics.location)\n', (3115, 3143), False, 'from util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb\n'), ((16772, 16797), 'math.degrees', 'degrees', (['horizontal_error'], {}), '(horizontal_error)\n', (16779, 16797), False, 'from math import pi, atan, atan2, degrees\n'), ((15435, 15453), 'math.degrees', 'degrees', (['direction'], {}), '(direction)\n', (15442, 15453), False, 'from math import pi, atan, atan2, degrees\n'), ((15508, 15532), 'math.degrees', 'degrees', (['ideal_direction'], {}), '(ideal_direction)\n', (15515, 15532), False, 'from math import pi, atan, atan2, degrees\n')]
#!/usr/bin/env python # coding: utf-8 from typing import List import pandas as pd import rpy2.robjects as robjects from rpy2.robjects import pandas2ri def tsfeatures_r(ts: pd.DataFrame, freq: int, features: List[str] = ["length", "acf_features", "arch_stat", "crossing_points", "entropy", "flat_spots", "heterogeneity", "holt_parameters", "hurst", "hw_parameters", "lumpiness", "nonlinearity", "pacf_features", "stability", "stl_features", "unitroot_kpss", "unitroot_pp"], **kwargs) -> pd.DataFrame: """tsfeatures wrapper using r. Parameters ---------- ts: pandas df Pandas DataFrame with columns ['unique_id', 'ds', 'y']. Long panel of time series. freq: int Frequency of the time series. features: List[str] String list of features to calculate. **kwargs: Arguments used by the original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html """ rstring = """ function(df, freq, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) dt <- as.data.table(df) setkey(dt, unique_id) series_list <- split(dt, by = "unique_id", keep.by = FALSE) series_list <- lapply(series_list, function(serie) serie[, ts(y, frequency = freq)]) if("hw_parameters" %in% features){ features <- setdiff(features, "hw_parameters") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, "hw_parameters", ...)) names(hw_series_features) <- paste0("hw_", names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list, features, ...)) series_features <- cbind(series_features, hw_series_features) } else { series_features <- suppressMessages(tsfeatures(series_list, "hw_parameters", ...)) names(series_features) <- paste0("hw_", names(series_features)) } } else { series_features <- suppressMessages(tsfeatures(series_list, features, ...)) } setDT(series_features) series_features[, unique_id := names(series_list)] } """ pandas2ri.activate() rfunc = robjects.r(rstring) feats = rfunc(ts, freq, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer) return feats def tsfeatures_r_wide(ts: pd.DataFrame, features: List[str] = ["length", "acf_features", "arch_stat", "crossing_points", "entropy", "flat_spots", "heterogeneity", "holt_parameters", "hurst", "hw_parameters", "lumpiness", "nonlinearity", "pacf_features", "stability", "stl_features", "unitroot_kpss", "unitroot_pp"], **kwargs) -> pd.DataFrame: """tsfeatures wrapper using r. Parameters ---------- ts: pandas df Pandas DataFrame with columns ['unique_id', 'seasonality', 'y']. Wide panel of time series. features: List[str] String list of features to calculate. **kwargs: Arguments used by the original tsfeatures function. References ---------- https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html """ rstring = """ function(uids, seasonalities, ys, features, ...){ suppressMessages(library(data.table)) suppressMessages(library(tsfeatures)) suppressMessages(library(purrr)) series_list <- pmap( list(uids, seasonalities, ys), function(uid, seasonality, y) ts(y, frequency=seasonality) ) names(series_list) <- uids if("hw_parameters" %in% features){ features <- setdiff(features, "hw_parameters") if(length(features)>0){ hw_series_features <- suppressMessages(tsfeatures(series_list, "hw_parameters", ...)) names(hw_series_features) <- paste0("hw_", names(hw_series_features)) series_features <- suppressMessages(tsfeatures(series_list, features, ...)) series_features <- cbind(series_features, hw_series_features) } else { series_features <- suppressMessages(tsfeatures(series_list, "hw_parameters", ...)) names(series_features) <- paste0("hw_", names(series_features)) } } else { series_features <- suppressMessages(tsfeatures(series_list, features, ...)) } setDT(series_features) series_features[, unique_id := names(series_list)] } """ pandas2ri.activate() rfunc = robjects.r(rstring) uids = ts['unique_id'].to_list() seasonalities = ts['seasonality'].to_list() ys = ts['y'].to_list() feats = rfunc(uids, seasonalities, ys, features, **kwargs) pandas2ri.deactivate() renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'} feats = feats.rename(columns=renamer) return feats
[ "rpy2.robjects.pandas2ri.deactivate", "rpy2.robjects.pandas2ri.activate", "rpy2.robjects.r" ]
[((2616, 2636), 'rpy2.robjects.pandas2ri.activate', 'pandas2ri.activate', ([], {}), '()\n', (2634, 2636), False, 'from rpy2.robjects import pandas2ri\n'), ((2649, 2668), 'rpy2.robjects.r', 'robjects.r', (['rstring'], {}), '(rstring)\n', (2659, 2668), True, 'import rpy2.robjects as robjects\n'), ((2722, 2744), 'rpy2.robjects.pandas2ri.deactivate', 'pandas2ri.deactivate', ([], {}), '()\n', (2742, 2744), False, 'from rpy2.robjects import pandas2ri\n'), ((5410, 5430), 'rpy2.robjects.pandas2ri.activate', 'pandas2ri.activate', ([], {}), '()\n', (5428, 5430), False, 'from rpy2.robjects import pandas2ri\n'), ((5443, 5462), 'rpy2.robjects.r', 'robjects.r', (['rstring'], {}), '(rstring)\n', (5453, 5462), True, 'import rpy2.robjects as robjects\n'), ((5644, 5666), 'rpy2.robjects.pandas2ri.deactivate', 'pandas2ri.deactivate', ([], {}), '()\n', (5664, 5666), False, 'from rpy2.robjects import pandas2ri\n')]
#!/usr/bin/env python3 import logging from adb.adb import ADB if __name__ == "__main__": # Logging configuration. logger = logging.getLogger(__name__) logging.basicConfig( format="%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s", datefmt="%d/%m/%Y %H:%M:%S", level=logging.INFO, ) # This is an example file showing how the adb wrapper can be used. adb = ADB() # Start with a clean adb server. adb.kill_server() adb.connect() adb_version = adb.get_version() logger.info("ADB version: {0}".format(adb_version)) connected_devices = adb.get_available_devices() logger.info("Connected devices: {0}".format(connected_devices)) # Set the first device in the list as the target of the subsequent commands. adb.target_device = connected_devices[0] adb.wait_for_device() logger.info( "Message from Android device: {0}".format(adb.shell(['echo "Hello World!"'])) )
[ "adb.adb.ADB", "logging.basicConfig", "logging.getLogger" ]
[((135, 162), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (152, 162), False, 'import logging\n'), ((167, 321), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s"""', 'datefmt': '"""%d/%m/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s',\n datefmt='%d/%m/%Y %H:%M:%S', level=logging.INFO)\n", (186, 321), False, 'import logging\n'), ((427, 432), 'adb.adb.ADB', 'ADB', ([], {}), '()\n', (430, 432), False, 'from adb.adb import ADB\n')]
import numpy as np import xarray as xr from glob import glob import observation_operators as obs import tropomi_tools as tt import scipy.linalg as la import toolbox as tx from datetime import date,datetime,timedelta def getLETKFConfig(testing=False): data = tx.getSpeciesConfig(testing) err_config = data['OBS_ERROR_MATRICES'] if '.npy' in err_config[0]: #Load error matrices from numpy files raise NotImplementedError else: #Assume list of strings errs = np.array([float(e) for e in err_config]) #Provide a list of observation operator classes in order of the species to assimilate. obs_operator_classes = [getattr(obs, s) for s in data['OBS_OPERATORS']] #If you are simulating nature (SIMULATE_NATURE=true in setup_ensemble.sh), provide the nature helper class. if data['SIMULATE_NATURE'] == "false": raise NotImplementedError #No support for real observations yet! else: nature_h_functions = [getattr(obs, h) for h in data['NATURE_H_FUNCTIONS']] inflation = float(data['INFLATION_FACTOR']) return [errs, obs_operator_classes,nature_h_functions,inflation] #This class contains useful methods for getting data from GEOS-Chem restart files and #emissions scaling factor netCDFs. After initialization it contains the necessary data #and can output it in useful ways to other functions in the LETKF procedure. class GC_Translator(object): def __init__(self, path_to_rundir,timestamp,computeStateVec = False,testing=False): #self.latinds,self.loninds = tx.getLatLonList(ensnum) self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4' self.timestamp=timestamp self.timestring = f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00' self.restart_ds = xr.load_dataset(self.filename) self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc') self.testing=testing if self.testing: self.num = path_to_rundir.split('_')[-1][0:4] print(f"GC_translator number {self.num} has been called for directory {path_to_rundir} and restart {self.filename}; construction beginning") self.emis_ds_list = {} for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name] = xr.load_dataset(file) if self.testing: print(f"GC_translator number {self.num} has loaded scaling factors for {name}") if computeStateVec: self.buildStateVector() else: self.statevec = None self.statevec_lengths = None #Until state vector is initialized this variable is None if self.testing: print(f"GC_Translator number {self.num} construction complete.") #Since only one timestamp, returns in format lev,lat,lon def getSpecies3Dconc(self, species): da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze() if self.testing: print(f"GC_Translator number {self.num} got 3D conc for species {species} which are of dimension {np.shape(da)}.") return da def setSpecies3Dconc(self, species, conc3d): baseshape = np.shape(conc3d) conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape])) if self.testing: print(f"GC_Translator number {self.num} set 3D conc for species {species} which are of dimension {np.shape(conc4d)}.") self.restart_ds[f'SpeciesRst_{species}'] = (["time","lev","lat","lon"],conc4d,{"long_name":f"Dry mixing ratio of species {species}","units":"mol mol-1 dry","averaging_method":"instantaneous"}) def getLat(self): return np.array(self.restart_ds['lat']) def getLon(self): return np.array(self.restart_ds['lon']) def getLev(self): return np.array(self.restart_ds['lev']) def getRestartTime(self): return np.array(self.restart_ds['time']) def getEmisTime(self): return np.array(list(self.emis_ds_list.values())[0]['time']) #We work with the most recent timestamp. Rest are just for archival purposes. def getEmisSF(self, species): da = self.emis_ds_list[species]['Scalar'] return np.array(da)[-1,:,:].squeeze() def getEmisLat(self, species): return np.array(self.emis_ds_list[species]['lat']) def getEmisLon(self, species): return np.array(self.emis_ds_list[species]['lon']) #Add 2d emissions scaling factors to the end of the emissions scaling factor def addEmisSF(self, species, emis2d, assim_time): timelist = self.getEmisTime() last_time = timelist[-1] #new_last_time = last_time+np.timedelta64(assim_time,'h') #Add assim time hours to the last timestamp tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000' new_last_time = np.datetime64(tstr) if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true': START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START'] else: START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE'] orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date from JSON END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE'] end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}' #Create dataset with this timestep's scaling factors ds = xr.Dataset( {"Scalar": (("time","lat","lon"), np.expand_dims(emis2d,axis = 0),{"long_name": "Scaling factor", "units":"1"})}, coords={ "time": (["time"], np.array([new_last_time]), {"long_name": "time", "calendar": "standard", "units":f"hours since {orig_timestamp} 00:00:00"}), "lat": (["lat"], self.getEmisLat(species),{"long_name": "Latitude", "units":"degrees_north"}), "lon": (["lon"], self.getEmisLon(species),{"long_name": "Longitude", "units":"degrees_east"}) }, attrs={ "Title":"CHEEREIO scaling factors", "Conventions":"COARDS", "Format":"NetCDF-4", "Model":"GENERIC", "NLayers":"1", "History":f"The LETKF utility added new scaling factors on {str(date.today())}", "Start_Date":f"{orig_timestamp}", "Start_Time":"0", "End_Date":f"{end_timestamp}", "End_Time":"0" } ) self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim = 'time') #Concatenate def buildStateVector(self): if self.testing: print("*****************************************************************") print(f"GC_Translator number {self.num} is starting build of statevector!") species_config = tx.getSpeciesConfig(self.testing) statevec_components = [] for spec_conc in species_config['STATE_VECTOR_CONC']: statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten()) #If no scaling factor files, append 1s because this is a nature directory if len(self.emis_sf_filenames)==0: lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS']) statevec_components.append(np.ones(lenones)) else: for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): statevec_components.append(self.getEmisSF(spec_emis).flatten()) self.statevec_lengths = np.array([len(vec) for vec in statevec_components]) self.statevec = np.concatenate(statevec_components) if self.testing: print(f"GC_Translator number {self.num} has built statevector; it is of dimension {np.shape(self.statevec)}.") print("*****************************************************************") def getLocalizedStateVectorIndices(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f"GC_Translator is getting localized statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and lon inds are {surr_loninds}.") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() if self.testing: print(f"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries are valid.") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() if self.testing: print(f"Within a flattened 2D dummy square, {len(dummy2dwhere_flat)} entries are valid.") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_flat+cur_offset)) cur_offset+=totalcount for i in range(emcount): ind_collector.append((dummy2dwhere_flat+cur_offset)) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f"There are a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.") return statevecinds def getColumnIndicesFromFullStateVector(self,latind,lonind): if self.testing: print(f"GC_Translator is getting column statevec indices FOR FULL VECTOR at {(latind,lonind)}.") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,latind,lonind].flatten() if self.testing: print(f"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries are valid.") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[latind,lonind] if self.testing: print(f"Within a flattened 2D dummy square, {dummy2dwhere_flat} is sole valid entry.") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append(dummywhere_flat+cur_offset) cur_offset+=totalcount for i in range(emcount): ind_collector.append(np.array([dummy2dwhere_flat+cur_offset])) cur_offset+=(latcount*loncount) statevecinds = np.concatenate(ind_collector) if self.testing: print(f"There are a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.") return statevecinds def getSpeciesConcIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = 0 for ind,spec in enumerate(species_config['STATE_VECTOR_CONC']): if species == spec: return np.arange(cur_offset,cur_offset+levcount) cur_offset+=levcount return None #If loop doesn't terminate we did not find the species def getSpeciesEmisIndicesInColumn(self,species): levcount = len(self.getLev()) species_config = tx.getSpeciesConfig(self.testing) cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount for ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']): if species == spec: return cur_offset cur_offset+=1 return None #If loop doesn't terminate we did not find the species def getColumnIndicesFromLocalizedStateVector(self,latind,lonind): surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing) if self.testing: print(f"GC_Translator is getting column statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and lon inds are {surr_loninds}.") levcount = len(self.getLev()) latcount = len(self.getLat()) loncount = len(self.getLon()) totalcount = levcount*latcount*loncount dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount)) dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten() dummywhere_flat_column = dummy3d[:,latind,lonind].flatten() dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0] if self.testing: print(f"Within a flattened 3D dummy cube, {len(dummywhere_flat_column)} entries are valid in the column.") print(f"Matched {len(dummywhere_match)} entries in the overall flattened and subsetted column; values are {dummywhere_match}") dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount)) dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten() dummy2dwhere_flat_column = dummy2d[latind,lonind] dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0] if self.testing: print(f"Within a flattened 2D dummy square, {dummy2dwhere_flat_column} is the sole valid index in the column.") print(f"Matched value in the overall flattened and subsetted square is {dummy2dwhere_match}") species_config = tx.getSpeciesConfig(self.testing) conccount = len(species_config['STATE_VECTOR_CONC']) emcount = len(species_config['CONTROL_VECTOR_EMIS']) ind_collector = [] cur_offset = 0 for i in range(conccount): ind_collector.append((dummywhere_match+cur_offset)) cur_offset+=len(dummywhere_flat) for i in range(emcount): ind_collector.append((dummy2dwhere_match+cur_offset)) cur_offset+=len(dummy2dwhere_flat) #Only one value here. localizedstatevecinds = np.concatenate(ind_collector) if self.testing: print(f"There are a total of {len(localizedstatevecinds)}/{len(self.statevec)} selected from total statevec.") return localizedstatevecinds def getStateVector(self,latind=None,lonind=None): if self.statevec is None: self.buildStateVector() if not (latind is None): #User supplied ind statevecinds = self.getLocalizedStateVectorIndices(latind,lonind) statevec_toreturn = self.statevec[statevecinds] else: #Return the whole vector statevec_toreturn = self.statevec if self.testing: print(f"GC Translator number {self.num} got statevector for inds {(latind,lonind)}; this vec has length {len(statevec_toreturn)} of total statevec {len(self.statevec)}.") return statevec_toreturn #Randomize the restart for purposes of testing. Perturbation is 1/2 of range of percent change selected from a uniform distribution. #E.g. 0.1 would range from 90% to 110% of initial values. Bias adds that percent on top of the perturbed fields (0.1 raises everything 10%). #Repeats this procedure for every species in the state vector (excluding emissions). def randomizeRestart(self,perturbation=0.1,bias=0): statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC'] offset = 1-perturbation scale = perturbation*2 for spec in statevec_species: conc3d = self.getSpecies3Dconc(spec) conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset conc3d *= 1+bias self.setSpecies3Dconc(spec,conc3d) #Reconstruct all the 3D concentrations from the analysis vector and overwrite relevant terms in the xr restart dataset. #Also construct new scaling factors and add them as a separate array at the new timestep in each of the scaling factor netCDFs. #However, only do so for species in the control vectors of emissions and concentrations. def reconstructArrays(self,analysis_vector): species_config = tx.getSpeciesConfig(self.testing) restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0])) emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys()) emis_shape = np.shape(self.getEmisSF(emislist[0])) counter = 0 for spec_conc in species_config['STATE_VECTOR_CONC']: if spec_conc in species_config['CONTROL_VECTOR_CONC']: #Only overwrite if in the control vector; otherwise just increment. index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_3d = np.reshape(analysis_subset,restart_shape) #Unflattens with 'C' order in python self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite. counter+=1 for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling factors are all in the control vector index_start = np.sum(self.statevec_lengths[0:counter]) index_end = np.sum(self.statevec_lengths[0:(counter+1)]) analysis_subset = analysis_vector[index_start:index_end] analysis_emis_2d = np.reshape(analysis_subset,emis_shape) #Unflattens with 'C' order in python self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME']) counter+=1 def saveRestart(self): self.restart_ds["time"] = (["time"], np.array([0]), {"long_name": "Time", "calendar": "gregorian", "axis":"T", "units":self.timestring}) self.restart_ds.to_netcdf(self.filename) def saveEmissions(self): for file in self.emis_sf_filenames: name = '_'.join(file.split('/')[-1].split('_')[0:-1]) self.emis_ds_list[name].to_netcdf(file) #A class that takes history files and connects them with the main state vector and observation matrices class HIST_Translator(object): def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False): self.testing = testing self.spc_config = tx.getSpeciesConfig(self.testing) self.hist_dir = f'{path_to_rundir}OutputDir' self.timeperiod = timeperiod self.interval = interval def globSubDir(self,timeperiod,useLevelEdge = False): specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4') specconc_list.sort() ts = [datetime.strptime(spc.split('.')[-2][0:13], "%Y%m%d_%H%M") for spc in specconc_list] if self.interval: specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1]) and (t.hour % self.interval == 0)] else: specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] if useLevelEdge: le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4') le_list.sort() le_ts = [datetime.strptime(le.split('.')[-2][0:13], "%Y%m%d_%H%M") for le in le_list] le_list = [le for le,t in zip(le_list,le_ts) if (t>=timeperiod[0]) and (t<timeperiod[1])] return [specconc_list,le_list] else: return specconc_list def combineHist(self,species,useLevelEdge=False): dataset=[] if useLevelEdge: specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile,lefile in zip(specconc_list,le_list): hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] lev_val = xr.load_dataset(lefile)[f'Met_PEDGE'] data_val = xr.merge([hist_val, lev_val]) dataset.append(data_val) else: specconc_list=self.globSubDir(self.timeperiod,useLevelEdge) for specfile in specconc_list: hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}'] dataset.append(hist_val) dataset = xr.merge(dataset) return dataset #4D ensemble interface with satellite operators. class HIST_Ens(object): def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False): self.testing = testing self.useLevelEdge = useLevelEdge self.spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs" subdirs = glob(f"{path_to_ensemble}/*/") subdirs.remove(f"{path_to_ensemble}/logs/") dirnames = [d.split('/')[-2] for d in subdirs] subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] endtime = datetime.strptime(timestamp, "%Y%m%d_%H%M") if fullperiod: START_DATE = self.spc_config['START_DATE'] starttime = datetime.strptime(f'{START_DATE}_0000', "%Y%m%d_%H%M") else: ASSIM_TIME = self.spc_config['ASSIM_TIME'] delta = timedelta(hours=int(ASSIM_TIME)) starttime = endtime-delta self.timeperiod = (starttime,endtime) self.ht = {} self.observed_species = self.spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens!=0: if fullperiod: self.ht[ens] = HIST_Translator(directory, self.timeperiod,interval,testing=self.testing) else: self.ht[ens] = HIST_Translator(directory, self.timeperiod,testing=self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) self.maxobs=int(self.spc_config['MAXNUMOBS']) self.interval=interval self.makeBigY() def makeSatTrans(self): self.SAT_TRANSLATOR = {} self.satSpecies = [] for spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']): if (bool4D and boolTROPOMI): self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing) self.satSpecies.append(spec) def getSatData(self): self.SAT_DATA = {} for spec in self.satSpecies: self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval) def makeBigY(self): self.makeSatTrans() self.getSatData() self.bigYDict = {} for spec in self.satSpecies: self.bigYDict[spec] = self.getColsforSpecies(spec) #This is just a filler. def makeRforSpecies(self,species,latind,lonind): inds = self.getIndsOfInterest(species,latind,lonind) return np.diag(np.repeat(15,len(inds))) def makeR(self,latind,lonind): errmats = [] for spec in self.satSpecies: errmats.append(self.makeRforSpecies(spec,latind,lonind)) return la.block_diag(*errmats) def getColsforSpecies(self,species): col3D = [] firstens = self.ensemble_numbers[0] hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']=="True": firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) shape2D = np.zeros(2) shape2D[0] = len(firstcol) shape2D[1]=len(self.ensemble_numbers) shape2D = shape2D.astype(int) conc2D = np.zeros(shape2D) conc2D[:,firstens-1] = firstcol for i in self.ensemble_numbers: if i!=firstens: hist4D = self.ht[i].combineHist(species,self.useLevelEdge) if self.spc_config['AV_TO_GC_GRID']=="True": col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) else: col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D) conc2D[:,i-1] = col if self.spc_config['AV_TO_GC_GRID']=="True": return [conc2D,satcol,satlat,satlon,sattime,numav] else: return [conc2D,satcol,satlat,satlon,sattime] def getIndsOfInterest(self,species,latind,lonind): loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km']) origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing) latval = origlat[latind] lonval = origlon[lonind] distvec = np.array([tx.calcDist_km(latval,lonval,a,b) for a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])]) inds = np.where(distvec<=loc_rad)[0] if len(inds) > self.maxobs: inds = np.random.choice(inds, self.maxobs,replace=False) #Randomly subset down to appropriate number of observations return inds def getLocObsMeanPertDiff(self,latind,lonind): obsmeans = [] obsperts = [] obsdiffs = [] for spec in self.satSpecies: ind = self.getIndsOfInterest(spec,latind,lonind) if self.spc_config['AV_TO_GC_GRID']=="True": gccol,satcol,_,_,_,_ = self.bigYDict[spec] else: gccol,satcol,_,_,_ = self.bigYDict[spec] gccol = gccol[ind,:] satcol = satcol[ind] obsmean = np.mean(gccol,axis=1) obspert = np.zeros(np.shape(gccol)) for i in range(np.shape(gccol)[1]): obspert[:,i]=gccol[:,i]-obsmean obsdiff = satcol-obsmean obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(obsdiff) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) return [full_obsmeans,full_obsperts,full_obsdiffs] #Lightweight container for GC_Translators; used to combine columns, update restarts, and diff columns. class GT_Container(object): def __init__(self,timestamp,testing=False,constructStateVecs=True): self.testing = testing spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs" self.path_to_scratch = f"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch" npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True) npy_col_names = [file.split('/')[-1] for file in npy_column_files] npy_columns = [np.load(file) for file in npy_column_files] self.columns = dict(zip(npy_col_names,npy_columns)) subdirs = glob(f"{path_to_ensemble}/*/") subdirs.remove(f"{path_to_ensemble}/logs/") dirnames = [d.split('/')[-2] for d in subdirs] subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.gt = {} self.nature = None self.observed_species = spc_config['OBSERVED_SPECIES'] for ens, directory in zip(subdir_numbers,subdirs): if ens==0: self.nature = GC_Translator(directory, timestamp, constructStateVecs,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, constructStateVecs,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) #Gets saved column and compares to the original files def constructColStatevec(self,latind,lonind): firstens = self.ensemble_numbers[0] col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers))) backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec] for i in self.ensemble_numbers: if i!=firstens: colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind) backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds] return backgroundEnsemble def diffColumns(self,latind,lonind): filenames = list(self.columns.keys()) substr = f'lat_{latind}_lon_{lonind}.npy' search = [i for i in filenames if substr in i] saved_col = self.columns[search[0]] backgroundEnsemble = self.constructColStatevec(latind,lonind) diff = saved_col-backgroundEnsemble return [saved_col,backgroundEnsemble,diff] def compareSpeciesConc(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} CONCENTRATION COLUMN AT INDEX {(latind,lonind)} ************************************') for i in range(np.shape(saved_col)[1]): print(f' ') print(f'{species} in ensemble member {i+1} had background concentration of {100*(backgroundEnsemble[:,i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis concentration of {100*(saved_col[:,i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%') print(f' ') def compareSpeciesEmis(self,species,latind,lonind): firstens = self.ensemble_numbers[0] colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species) saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind) saved_col = saved_col[colind,:] #Now will just be a vector of length NumEnsemble backgroundEnsemble = backgroundEnsemble[colind,:] diff = diff[colind,:] col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind) naturecol = self.nature.statevec[col1indvec][colind] print(f'*********************************** {species} EMISSIONS SCALING AT INDEX {(latind,lonind)} ************************************') for i in range(len(saved_col)): print(f' ') print(f'{species} in ensemble member {i+1} had background emissions scaling of {100*(backgroundEnsemble[i]/naturecol)}% nature') print(f'{species} in ensemble member {i+1} had analysis emissions scaling of {100*(saved_col[i]/naturecol)}% nature') print(f'This represents a percent difference of {100*(diff[i]/backgroundEnsemble[i])}%') print(f' ') def reconstructAnalysisEnsemble(self): self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers))) for name, cols in zip(self.columns.keys(),self.columns.values()): split_name = name.split('_') latind = int(split_name[-3]) lonind = int(split_name[-1].split('.')[0]) colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind) self.analysisEnsemble[colinds,:] = cols def updateRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1]) def saveRestartsAndScalingFactors(self): for i in self.ensemble_numbers: self.gt[i].saveRestart() self.gt[i].saveEmissions() #Contains a dictionary referencing GC_Translators for every run directory. #In the special case where there is a nature run present (with number 0) #store the nature run in GC_Translator object nature. #Also contains an observation operator (pass in the class you would like to use) for each species to assimilate. #Class contains function to calculate relvant assimilation variables. #SPECIAL NOTE ON FILES: we will be assuming that geos-chem stopped and left a restart at assimilation time in each run directory. #That restart will be overwritten in place (name not changed) so next run starts from the assimilation state vector. #Emissions scaling factors are most recent available (one assimilation timestep ago). New values will be appended to netCDF. class Assimilator(object): def __init__(self,timestamp,ensnum,corenum,testing=False): self.testing = testing self.ensnum = ensnum self.corenum = corenum self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing) if self.testing: print(f"Assimilator has been called for ens {self.ensnum} core {self.corenum}; construction beginning") print(f"This core will be handling lat and lon values {[(latval,lonval) for latval,lonval in zip(self.latinds,self.loninds)]}") spc_config = tx.getSpeciesConfig(self.testing) path_to_ensemble = f"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs" self.path_to_scratch = f"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch" self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}' subdirs = glob(f"{path_to_ensemble}/*/") subdirs.remove(f"{path_to_ensemble}/logs/") dirnames = [d.split('/')[-2] for d in subdirs] if self.testing: print(f"The following ensemble directories were detected: {dirnames}") subdir_numbers = [int(n.split('_')[-1]) for n in dirnames] ensemble_numbers = [] self.nature = None self.emcount = len(spc_config['CONTROL_VECTOR_EMIS']) self.MINNUMOBS = int(spc_config['MINNUMOBS']) self.MinimumScalingFactorAllowed = [float(s) for s in spc_config["MinimumScalingFactorAllowed"]] self.MaximumScalingFactorAllowed = [float(s) for s in spc_config["MaximumScalingFactorAllowed"]] self.InflateScalingsToXOfPreviousStandardDeviation = [float(s) for s in spc_config["InflateScalingsToXOfPreviousStandardDeviation"]] self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s in spc_config["MaximumScaleFactorRelativeChangePerAssimilationPeriod"]] self.AveragePriorAndPosterior = spc_config["AveragePriorAndPosterior"] == "True" self.PriorWeightinPriorPosteriorAverage = float(spc_config["PriorWeightinPriorPosteriorAverage"]) self.forceOverrideNature=True #Set to true to ignore existing nature directory. Only for testing self.gt = {} self.observed_species = spc_config['OBSERVED_SPECIES'] if self.testing: print(f"Begin creating GC Translators with state vectors.") for ens, directory in zip(subdir_numbers,subdirs): if (ens==0) and (not self.forceOverrideNature): self.nature = GC_Translator(directory, timestamp, False,self.testing) else: self.gt[ens] = GC_Translator(directory, timestamp, True,self.testing) ensemble_numbers.append(ens) self.ensemble_numbers=np.array(ensemble_numbers) if self.testing: print(f"GC Translators created. Ensemble number list: {self.ensemble_numbers}") if self.nature is None: self.full4D = True #Implement me self.inflation = float(spc_config['INFLATION_FACTOR']) self.histens = HIST_Ens(timestamp,True,testing=self.testing) else: self.full4D = False error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing) self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing) self.makeObsOps() if self.testing: print(f"Assimilator construction complete") def getLat(self): return self.gt[1].getLat() #Latitude of first ensemble member, who should always exist def getLon(self): return self.gt[1].getLon() def getLev(self): return self.gt[1].getLev() def makeObsOps(self): if self.testing: print(f'makeObsOps called in Assimilator') self.ObsOp = {} for i,obs_spec_key in enumerate(self.observed_species.keys()): ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i]) self.ObsOp[obs_spec_key] = ObsOp_instance def combineEnsemble(self,latind=None,lonind=None): if self.testing: print(f'combineEnsemble called in Assimilator for lat/lon inds {(latind,lonind)}') firstens = self.ensemble_numbers[0] firstvec = self.gt[firstens].getStateVector(latind,lonind) statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers))) statevecs[:,firstens-1] = firstvec for i in self.ensemble_numbers: if i!=firstens: statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind) if self.testing: print(f'Ensemble combined in Assimilator for lat/lon inds {(latind,lonind)} and has dimensions {np.shape(statevecs)}.') return statevecs def ensMeanAndPert(self,latval,lonval): if self.testing: print(f'ensMeanAndPert called in Assimilator for lat/lon inds {(latval,lonval)}') statevecs = self.combineEnsemble(latval,lonval) state_mean = np.mean(statevecs,axis = 1) #calculate ensemble mean bigX = np.zeros(np.shape(statevecs)) for i in range(np.shape(bigX)[1]): bigX[:,i] = statevecs[:,i]-state_mean if self.testing: print(f'Ensemble mean at {(latval,lonval)} has dimensions {np.shape(state_mean)} and bigX at at {(latval,lonval)} has dimensions {np.shape(bigX)}.') return [state_mean,bigX] def ensObsMeanPertDiff(self,latval,lonval): if self.testing: print(f'ensObsMeanPertDiff called in Assimilator for lat/lon inds {(latval,lonval)}') obsmeans = [] obsperts = [] obsdiffs = [] for obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())): obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval) obsmeans.append(obsmean) obsperts.append(obspert) obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval)) full_obsmeans = np.concatenate(obsmeans) full_obsperts = np.concatenate(obsperts,axis = 0) full_obsdiffs = np.concatenate(obsdiffs) if self.testing: print(f'Full ObsMeans at {(latval,lonval)} has dimensions {np.shape(full_obsmeans)}; Full ObsPerts at {(latval,lonval)} has dimensions {np.shape(full_obsperts)}; and Full ObsDiffs at {(latval,lonval)} has dimensions {np.shape(full_obsdiffs)}.') return [full_obsmeans,full_obsperts,full_obsdiffs] def combineEnsembleForSpecies(self,species): if self.testing: print(f'combineEnsembleForSpecies called in Assimilator for species {species}') conc3D = [] firstens = self.ensemble_numbers[0] first3D = self.gt[firstens].getSpecies3Dconc(species) shape4D = np.zeros(4) shape4D[0:3] = np.shape(first3D) shape4D[3]=len(self.ensemble_numbers) shape4D = shape4D.astype(int) conc4D = np.zeros(shape4D) conc4D[:,:,:,firstens-1] = first3D for i in self.ensemble_numbers: if i!=firstens: conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species) return conc4D def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval): if self.testing: print(f'ensObsMeanAndPertForSpecies called for keys {observation_key} -> {species} in Assimilator for lat/lon inds {(latval,lonval)}') spec_4D = self.combineEnsembleForSpecies(species) return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval) def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called for {observation_key} in Assimilator for lat/lon inds {(latval,lonval)}') return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval) def prepareMeansAndPerts(self,latval,lonval): if self.testing: print(f'prepareMeansAndPerts called in Assimilator for lat/lon inds {(latval,lonval)}') if self.full4D: self.ybar_background, self.Ypert_background, self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval) else: self.ybar_background, self.Ypert_background, self.ydiff = self.ensObsMeanPertDiff(latval,lonval) self.xbar_background, self.Xpert_background = self.ensMeanAndPert(latval,lonval) if self.testing: print(f'ybar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ybar_background)}.') print(f'Ypert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Ypert_background)}.') print(f'ydiff for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ydiff)}.') print(f'xbar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.xbar_background)}.') print(f'Xpert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Xpert_background)}.') def makeR(self,latind=None,lonind=None): if self.testing: print(f"Making R for lat/lon inds {(latind,lonind)}.") if self.full4D: self.R = self.histens.makeR(latind,lonind) else: errmats = [] for species in self.observed_species: errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind)) self.R = la.block_diag(*errmats) if self.testing: print(f'R for {(latind,lonind)} has dimension {np.shape(self.R)} and value {self.R}') def makeC(self): self.C = np.transpose(self.Ypert_background) @ la.inv(self.R) if self.testing: print(f'C made in Assimilator. It has dimension {np.shape(self.C)} and value {self.C}') def makePtildeAnalysis(self): cyb = self.C @ self.Ypert_background k = len(self.ensemble_numbers) iden = (k-1)*np.identity(k)/(1+self.inflation) self.PtildeAnalysis = la.inv(iden+cyb) if self.testing: print(f'PtildeAnalysis made in Assimilator. It has dimension {np.shape(self.PtildeAnalysis)} and value {self.PtildeAnalysis}') def makeWAnalysis(self): k = len(self.ensemble_numbers) self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis) if self.testing: print(f'WAnalysis initialized in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeWbarAnalysis(self): self.WbarAnalysis = [email protected]@self.ydiff if self.testing: print(f'WbarAnalysis made in Assimilator. It has dimension {np.shape(self.WbarAnalysis)} and value {self.WbarAnalysis}') def adjWAnalysis(self): k = len(self.ensemble_numbers) for i in range(k): self.WAnalysis[:,i]+=self.WbarAnalysis if self.testing: print(f'WAnalysis adjusted in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}') def makeAnalysisCombinedEnsemble(self): self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background if self.testing: print(f'analysisEnsemble made in Assimilator. It has dimension {np.shape(self.analysisEnsemble)} and value {self.analysisEnsemble}') def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True): colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval) analysisSubset = self.analysisEnsemble[colinds,:] if doBackground: backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:])) k = len(self.ensemble_numbers) for i in range(k): backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds] return [analysisSubset,backgroundSubset] else: return analysisSubset def applyAnalysisCorrections(self,analysisSubset,backgroundSubset): #Get scalefactors off the end of statevector analysisScalefactor = analysisSubset[(-1*self.emcount)::,:] backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:] #Inflate scalings to the X percent of the background standard deviation, per Miyazaki et al 2015 for i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)): inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i] if ~np.isnan(inflator): analysis_std = np.std(analysisScalefactor[i,:]) background_std = np.std(backgroundScalefactor[i,:]) ratio=analysis_std/background_std if ~np.isnan(ratio): #Sometimes background standard deviation is approximately 0. if ratio < inflator: new_std = inflator*background_std analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std) #Apply maximum relative change per assimilation period: for i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)): maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i] if ~np.isnan(maxchange): relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:] relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0] analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite] #Set min/max scale factor: for i in range(len(self.MinimumScalingFactorAllowed)): if ~np.isnan(self.MinimumScalingFactorAllowed[i]): minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0] analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i] if ~np.isnan(self.MaximumScalingFactorAllowed[i]): maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0] analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i] #Done with the scalings analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor #Now average with prior if self.AveragePriorAndPosterior: priorweight = self.PriorWeightinPriorPosteriorAverage if (priorweight<0) or (priorweight>1): raise ValueError('Invalid prior weight; must be between 0 and 1.') posteriorweight = 1-priorweight analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight) return analysisSubset def saveColumn(self,latval,lonval,analysisSubset): np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset) def LETKF(self): if self.testing: print(f"LETKF called! Beginning loop.") for latval,lonval in zip(self.latinds,self.loninds): if self.testing: print(f"Beginning LETKF loop for lat/lon inds {(latval,lonval)}.") self.prepareMeansAndPerts(latval,lonval) if len(self.ybar_background)<self.MINNUMOBS: self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background)) k = len(self.ensemble_numbers) for i in range(k): self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False) else: self.makeR(latval,lonval) self.makeC() self.makePtildeAnalysis() self.makeWAnalysis() self.makeWbarAnalysis() self.adjWAnalysis() self.makeAnalysisCombinedEnsemble() analysisSubset,backgroundSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=True) analysisSubset = self.applyAnalysisCorrections(analysisSubset,backgroundSubset) self.saveColumn(latval,lonval,analysisSubset)
[ "toolbox.getSpeciesConfig", "numpy.load", "numpy.sum", "toolbox.getLatLonList", "numpy.abs", "numpy.ones", "numpy.isnan", "numpy.shape", "numpy.mean", "numpy.arange", "glob.glob", "toolbox.calcDist_km", "toolbox.getIndsOfInterest", "observation_operators.NatureHelper", "numpy.std", "numpy.transpose", "numpy.identity", "xarray.merge", "scipy.linalg.inv", "numpy.reshape", "numpy.random.choice", "scipy.linalg.block_diag", "toolbox.getLatLonVals", "datetime.date.today", "xarray.concat", "datetime.datetime.strptime", "scipy.linalg.sqrtm", "numpy.concatenate", "tropomi_tools.TROPOMI_Translator", "numpy.datetime64", "numpy.zeros", "numpy.expand_dims", "numpy.where", "numpy.array", "numpy.sign", "xarray.load_dataset", "numpy.in1d" ]
[((261, 289), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['testing'], {}), '(testing)\n', (280, 289), True, 'import toolbox as tx\n'), ((1749, 1779), 'xarray.load_dataset', 'xr.load_dataset', (['self.filename'], {}), '(self.filename)\n', (1764, 1779), True, 'import xarray as xr\n'), ((1807, 1848), 'glob.glob', 'glob', (['f"""{path_to_rundir}*_SCALEFACTOR.nc"""'], {}), "(f'{path_to_rundir}*_SCALEFACTOR.nc')\n", (1811, 1848), False, 'from glob import glob\n'), ((2989, 3005), 'numpy.shape', 'np.shape', (['conc3d'], {}), '(conc3d)\n', (2997, 3005), True, 'import numpy as np\n'), ((3439, 3471), 'numpy.array', 'np.array', (["self.restart_ds['lat']"], {}), "(self.restart_ds['lat'])\n", (3447, 3471), True, 'import numpy as np\n'), ((3500, 3532), 'numpy.array', 'np.array', (["self.restart_ds['lon']"], {}), "(self.restart_ds['lon'])\n", (3508, 3532), True, 'import numpy as np\n'), ((3561, 3593), 'numpy.array', 'np.array', (["self.restart_ds['lev']"], {}), "(self.restart_ds['lev'])\n", (3569, 3593), True, 'import numpy as np\n'), ((3630, 3663), 'numpy.array', 'np.array', (["self.restart_ds['time']"], {}), "(self.restart_ds['time'])\n", (3638, 3663), True, 'import numpy as np\n'), ((3986, 4029), 'numpy.array', 'np.array', (["self.emis_ds_list[species]['lat']"], {}), "(self.emis_ds_list[species]['lat'])\n", (3994, 4029), True, 'import numpy as np\n'), ((4071, 4114), 'numpy.array', 'np.array', (["self.emis_ds_list[species]['lon']"], {}), "(self.emis_ds_list[species]['lon'])\n", (4079, 4114), True, 'import numpy as np\n'), ((4563, 4582), 'numpy.datetime64', 'np.datetime64', (['tstr'], {}), '(tstr)\n', (4576, 4582), True, 'import numpy as np\n'), ((5956, 6011), 'xarray.concat', 'xr.concat', (['[self.emis_ds_list[species], ds]'], {'dim': '"""time"""'}), "([self.emis_ds_list[species], ds], dim='time')\n", (5965, 6011), True, 'import xarray as xr\n'), ((6249, 6282), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (6268, 6282), True, 'import toolbox as tx\n'), ((6933, 6968), 'numpy.concatenate', 'np.concatenate', (['statevec_components'], {}), '(statevec_components)\n', (6947, 6968), True, 'import numpy as np\n'), ((7268, 7326), 'toolbox.getIndsOfInterest', 'tx.getIndsOfInterest', (['latind', 'lonind'], {'testing': 'self.testing'}), '(latind, lonind, testing=self.testing)\n', (7288, 7326), True, 'import toolbox as tx\n'), ((8237, 8270), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (8256, 8270), True, 'import toolbox as tx\n'), ((8663, 8692), 'numpy.concatenate', 'np.concatenate', (['ind_collector'], {}), '(ind_collector)\n', (8677, 8692), True, 'import numpy as np\n'), ((9642, 9675), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (9661, 9675), True, 'import toolbox as tx\n'), ((10076, 10105), 'numpy.concatenate', 'np.concatenate', (['ind_collector'], {}), '(ind_collector)\n', (10090, 10105), True, 'import numpy as np\n'), ((10353, 10386), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (10372, 10386), True, 'import toolbox as tx\n'), ((10740, 10773), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (10759, 10773), True, 'import toolbox as tx\n'), ((11136, 11194), 'toolbox.getIndsOfInterest', 'tx.getIndsOfInterest', (['latind', 'lonind'], {'testing': 'self.testing'}), '(latind, lonind, testing=self.testing)\n', (11156, 11194), True, 'import toolbox as tx\n'), ((12656, 12689), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (12675, 12689), True, 'import toolbox as tx\n'), ((13128, 13157), 'numpy.concatenate', 'np.concatenate', (['ind_collector'], {}), '(ind_collector)\n', (13142, 13157), True, 'import numpy as np\n'), ((15023, 15056), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (15042, 15056), True, 'import toolbox as tx\n'), ((16926, 16959), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (16945, 16959), True, 'import toolbox as tx\n'), ((17138, 17188), 'glob.glob', 'glob', (['f"""{self.hist_dir}/GEOSChem.SpeciesConc*.nc4"""'], {}), "(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4')\n", (17142, 17188), False, 'from glob import glob\n'), ((18541, 18558), 'xarray.merge', 'xr.merge', (['dataset'], {}), '(dataset)\n', (18549, 18558), True, 'import xarray as xr\n'), ((18825, 18858), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (18844, 18858), True, 'import toolbox as tx\n'), ((18968, 18998), 'glob.glob', 'glob', (['f"""{path_to_ensemble}/*/"""'], {}), "(f'{path_to_ensemble}/*/')\n", (18972, 18998), False, 'from glob import glob\n'), ((19191, 19234), 'datetime.datetime.strptime', 'datetime.strptime', (['timestamp', '"""%Y%m%d_%H%M"""'], {}), "(timestamp, '%Y%m%d_%H%M')\n", (19208, 19234), False, 'from datetime import date, datetime, timedelta\n'), ((19944, 19970), 'numpy.array', 'np.array', (['ensemble_numbers'], {}), '(ensemble_numbers)\n', (19952, 19970), True, 'import numpy as np\n'), ((21071, 21094), 'scipy.linalg.block_diag', 'la.block_diag', (['*errmats'], {}), '(*errmats)\n', (21084, 21094), True, 'import scipy.linalg as la\n'), ((21600, 21611), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (21608, 21611), True, 'import numpy as np\n'), ((21724, 21741), 'numpy.zeros', 'np.zeros', (['shape2D'], {}), '(shape2D)\n', (21732, 21741), True, 'import numpy as np\n'), ((22493, 22540), 'toolbox.getLatLonVals', 'tx.getLatLonVals', (['self.spc_config', 'self.testing'], {}), '(self.spc_config, self.testing)\n', (22509, 22540), True, 'import toolbox as tx\n'), ((23578, 23602), 'numpy.concatenate', 'np.concatenate', (['obsmeans'], {}), '(obsmeans)\n', (23592, 23602), True, 'import numpy as np\n'), ((23621, 23653), 'numpy.concatenate', 'np.concatenate', (['obsperts'], {'axis': '(0)'}), '(obsperts, axis=0)\n', (23635, 23653), True, 'import numpy as np\n'), ((23673, 23697), 'numpy.concatenate', 'np.concatenate', (['obsdiffs'], {}), '(obsdiffs)\n', (23687, 23697), True, 'import numpy as np\n'), ((23992, 24025), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (24011, 24025), True, 'import toolbox as tx\n'), ((24219, 24275), 'glob.glob', 'glob', (['f"""{self.path_to_scratch}/**/*.npy"""'], {'recursive': '(True)'}), "(f'{self.path_to_scratch}/**/*.npy', recursive=True)\n", (24223, 24275), False, 'from glob import glob\n'), ((24471, 24501), 'glob.glob', 'glob', (['f"""{path_to_ensemble}/*/"""'], {}), "(f'{path_to_ensemble}/*/')\n", (24475, 24501), False, 'from glob import glob\n'), ((25083, 25109), 'numpy.array', 'np.array', (['ensemble_numbers'], {}), '(ensemble_numbers)\n', (25091, 25109), True, 'import numpy as np\n'), ((29821, 29868), 'toolbox.getLatLonList', 'tx.getLatLonList', (['ensnum', 'corenum', 'self.testing'], {}), '(ensnum, corenum, self.testing)\n', (29837, 29868), True, 'import toolbox as tx\n'), ((30139, 30172), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (30158, 30172), True, 'import toolbox as tx\n'), ((30426, 30456), 'glob.glob', 'glob', (['f"""{path_to_ensemble}/*/"""'], {}), "(f'{path_to_ensemble}/*/')\n", (30430, 30456), False, 'from glob import glob\n'), ((32092, 32118), 'numpy.array', 'np.array', (['ensemble_numbers'], {}), '(ensemble_numbers)\n', (32100, 32118), True, 'import numpy as np\n'), ((34162, 34188), 'numpy.mean', 'np.mean', (['statevecs'], {'axis': '(1)'}), '(statevecs, axis=1)\n', (34169, 34188), True, 'import numpy as np\n'), ((35065, 35089), 'numpy.concatenate', 'np.concatenate', (['obsmeans'], {}), '(obsmeans)\n', (35079, 35089), True, 'import numpy as np\n'), ((35108, 35140), 'numpy.concatenate', 'np.concatenate', (['obsperts'], {'axis': '(0)'}), '(obsperts, axis=0)\n', (35122, 35140), True, 'import numpy as np\n'), ((35160, 35184), 'numpy.concatenate', 'np.concatenate', (['obsdiffs'], {}), '(obsdiffs)\n', (35174, 35184), True, 'import numpy as np\n'), ((35773, 35784), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (35781, 35784), True, 'import numpy as np\n'), ((35802, 35819), 'numpy.shape', 'np.shape', (['first3D'], {}), '(first3D)\n', (35810, 35819), True, 'import numpy as np\n'), ((35903, 35920), 'numpy.zeros', 'np.zeros', (['shape4D'], {}), '(shape4D)\n', (35911, 35920), True, 'import numpy as np\n'), ((38570, 38588), 'scipy.linalg.inv', 'la.inv', (['(iden + cyb)'], {}), '(iden + cyb)\n', (38576, 38588), True, 'import scipy.linalg as la\n'), ((38814, 38853), 'scipy.linalg.sqrtm', 'la.sqrtm', (['((k - 1) * self.PtildeAnalysis)'], {}), '((k - 1) * self.PtildeAnalysis)\n', (38822, 38853), True, 'import scipy.linalg as la\n'), ((2233, 2254), 'xarray.load_dataset', 'xr.load_dataset', (['file'], {}), '(file)\n', (2248, 2254), True, 'import xarray as xr\n'), ((4902, 4935), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (4921, 4935), True, 'import toolbox as tx\n'), ((14320, 14353), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (14339, 14353), True, 'import toolbox as tx\n'), ((15951, 15991), 'numpy.sum', 'np.sum', (['self.statevec_lengths[0:counter]'], {}), '(self.statevec_lengths[0:counter])\n', (15957, 15991), True, 'import numpy as np\n'), ((16007, 16051), 'numpy.sum', 'np.sum', (['self.statevec_lengths[0:counter + 1]'], {}), '(self.statevec_lengths[0:counter + 1])\n', (16013, 16051), True, 'import numpy as np\n'), ((16134, 16173), 'numpy.reshape', 'np.reshape', (['analysis_subset', 'emis_shape'], {}), '(analysis_subset, emis_shape)\n', (16144, 16173), True, 'import numpy as np\n'), ((16362, 16375), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (16370, 16375), True, 'import numpy as np\n'), ((17607, 17660), 'glob.glob', 'glob', (['f"""{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4"""'], {}), "(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4')\n", (17611, 17660), False, 'from glob import glob\n'), ((19313, 19367), 'datetime.datetime.strptime', 'datetime.strptime', (['f"""{START_DATE}_0000"""', '"""%Y%m%d_%H%M"""'], {}), "(f'{START_DATE}_0000', '%Y%m%d_%H%M')\n", (19330, 19367), False, 'from datetime import date, datetime, timedelta\n'), ((22729, 22757), 'numpy.where', 'np.where', (['(distvec <= loc_rad)'], {}), '(distvec <= loc_rad)\n', (22737, 22757), True, 'import numpy as np\n'), ((22799, 22849), 'numpy.random.choice', 'np.random.choice', (['inds', 'self.maxobs'], {'replace': '(False)'}), '(inds, self.maxobs, replace=False)\n', (22815, 22849), True, 'import numpy as np\n'), ((23312, 23334), 'numpy.mean', 'np.mean', (['gccol'], {'axis': '(1)'}), '(gccol, axis=1)\n', (23319, 23334), True, 'import numpy as np\n'), ((24361, 24374), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (24368, 24374), True, 'import numpy as np\n'), ((32593, 32714), 'observation_operators.NatureHelper', 'obs.NatureHelper', (['self.nature', 'self.observed_species', 'nature_h_functions', 'error_multipliers_or_matrices', 'self.testing'], {}), '(self.nature, self.observed_species, nature_h_functions,\n error_multipliers_or_matrices, self.testing)\n', (32609, 32714), True, 'import observation_operators as obs\n'), ((34233, 34252), 'numpy.shape', 'np.shape', (['statevecs'], {}), '(statevecs)\n', (34241, 34252), True, 'import numpy as np\n'), ((38070, 38093), 'scipy.linalg.block_diag', 'la.block_diag', (['*errmats'], {}), '(*errmats)\n', (38083, 38093), True, 'import scipy.linalg as la\n'), ((38231, 38266), 'numpy.transpose', 'np.transpose', (['self.Ypert_background'], {}), '(self.Ypert_background)\n', (38243, 38266), True, 'import numpy as np\n'), ((38269, 38283), 'scipy.linalg.inv', 'la.inv', (['self.R'], {}), '(self.R)\n', (38275, 38283), True, 'import scipy.linalg as la\n'), ((39558, 39589), 'numpy.shape', 'np.shape', (['self.Xpert_background'], {}), '(self.Xpert_background)\n', (39566, 39589), True, 'import numpy as np\n'), ((2719, 2769), 'numpy.array', 'np.array', (["self.restart_ds[f'SpeciesRst_{species}']"], {}), "(self.restart_ds[f'SpeciesRst_{species}'])\n", (2727, 2769), True, 'import numpy as np\n'), ((4588, 4621), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (4607, 4621), True, 'import toolbox as tx\n'), ((4664, 4697), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (4683, 4697), True, 'import toolbox as tx\n'), ((4742, 4775), 'toolbox.getSpeciesConfig', 'tx.getSpeciesConfig', (['self.testing'], {}), '(self.testing)\n', (4761, 4775), True, 'import toolbox as tx\n'), ((6677, 6693), 'numpy.ones', 'np.ones', (['lenones'], {}), '(lenones)\n', (6684, 6693), True, 'import numpy as np\n'), ((7728, 7752), 'numpy.arange', 'np.arange', (['(0)', 'totalcount'], {}), '(0, totalcount)\n', (7737, 7752), True, 'import numpy as np\n'), ((7978, 8011), 'numpy.arange', 'np.arange', (['(0)', '(latcount * loncount)'], {}), '(0, latcount * loncount)\n', (7987, 8011), True, 'import numpy as np\n'), ((9170, 9194), 'numpy.arange', 'np.arange', (['(0)', 'totalcount'], {}), '(0, totalcount)\n', (9179, 9194), True, 'import numpy as np\n'), ((9408, 9441), 'numpy.arange', 'np.arange', (['(0)', '(latcount * loncount)'], {}), '(0, latcount * loncount)\n', (9417, 9441), True, 'import numpy as np\n'), ((9982, 10024), 'numpy.array', 'np.array', (['[dummy2dwhere_flat + cur_offset]'], {}), '([dummy2dwhere_flat + cur_offset])\n', (9990, 10024), True, 'import numpy as np\n'), ((10504, 10548), 'numpy.arange', 'np.arange', (['cur_offset', '(cur_offset + levcount)'], {}), '(cur_offset, cur_offset + levcount)\n', (10513, 10548), True, 'import numpy as np\n'), ((11593, 11617), 'numpy.arange', 'np.arange', (['(0)', 'totalcount'], {}), '(0, totalcount)\n', (11602, 11617), True, 'import numpy as np\n'), ((11815, 11863), 'numpy.in1d', 'np.in1d', (['dummywhere_flat', 'dummywhere_flat_column'], {}), '(dummywhere_flat, dummywhere_flat_column)\n', (11822, 11863), True, 'import numpy as np\n'), ((12138, 12171), 'numpy.arange', 'np.arange', (['(0)', '(latcount * loncount)'], {}), '(0, latcount * loncount)\n', (12147, 12171), True, 'import numpy as np\n'), ((12350, 12402), 'numpy.in1d', 'np.in1d', (['dummy2dwhere_flat', 'dummy2dwhere_flat_column'], {}), '(dummy2dwhere_flat, dummy2dwhere_flat_column)\n', (12357, 12402), True, 'import numpy as np\n'), ((15477, 15517), 'numpy.sum', 'np.sum', (['self.statevec_lengths[0:counter]'], {}), '(self.statevec_lengths[0:counter])\n', (15483, 15517), True, 'import numpy as np\n'), ((15534, 15578), 'numpy.sum', 'np.sum', (['self.statevec_lengths[0:counter + 1]'], {}), '(self.statevec_lengths[0:counter + 1])\n', (15540, 15578), True, 'import numpy as np\n'), ((15658, 15700), 'numpy.reshape', 'np.reshape', (['analysis_subset', 'restart_shape'], {}), '(analysis_subset, restart_shape)\n', (15668, 15700), True, 'import numpy as np\n'), ((18269, 18298), 'xarray.merge', 'xr.merge', (['[hist_val, lev_val]'], {}), '([hist_val, lev_val])\n', (18277, 18298), True, 'import xarray as xr\n'), ((20339, 20374), 'tropomi_tools.TROPOMI_Translator', 'tt.TROPOMI_Translator', (['self.testing'], {}), '(self.testing)\n', (20360, 20374), True, 'import tropomi_tools as tt\n'), ((22616, 22652), 'toolbox.calcDist_km', 'tx.calcDist_km', (['latval', 'lonval', 'a', 'b'], {}), '(latval, lonval, a, b)\n', (22630, 22652), True, 'import toolbox as tx\n'), ((23356, 23371), 'numpy.shape', 'np.shape', (['gccol'], {}), '(gccol)\n', (23364, 23371), True, 'import numpy as np\n'), ((26694, 26713), 'numpy.shape', 'np.shape', (['saved_col'], {}), '(saved_col)\n', (26702, 26713), True, 'import numpy as np\n'), ((34271, 34285), 'numpy.shape', 'np.shape', (['bigX'], {}), '(bigX)\n', (34279, 34285), True, 'import numpy as np\n'), ((38512, 38526), 'numpy.identity', 'np.identity', (['k'], {}), '(k)\n', (38523, 38526), True, 'import numpy as np\n'), ((40156, 40199), 'numpy.shape', 'np.shape', (['self.Xpert_background[colinds, :]'], {}), '(self.Xpert_background[colinds, :])\n', (40164, 40199), True, 'import numpy as np\n'), ((40917, 40935), 'numpy.isnan', 'np.isnan', (['inflator'], {}), '(inflator)\n', (40925, 40935), True, 'import numpy as np\n'), ((40956, 40989), 'numpy.std', 'np.std', (['analysisScalefactor[i, :]'], {}), '(analysisScalefactor[i, :])\n', (40962, 40989), True, 'import numpy as np\n'), ((41010, 41045), 'numpy.std', 'np.std', (['backgroundScalefactor[i, :]'], {}), '(backgroundScalefactor[i, :])\n', (41016, 41045), True, 'import numpy as np\n'), ((41539, 41558), 'numpy.isnan', 'np.isnan', (['maxchange'], {}), '(maxchange)\n', (41547, 41558), True, 'import numpy as np\n'), ((41955, 42000), 'numpy.isnan', 'np.isnan', (['self.MinimumScalingFactorAllowed[i]'], {}), '(self.MinimumScalingFactorAllowed[i])\n', (41963, 42000), True, 'import numpy as np\n'), ((42180, 42225), 'numpy.isnan', 'np.isnan', (['self.MaximumScalingFactorAllowed[i]'], {}), '(self.MaximumScalingFactorAllowed[i])\n', (42188, 42225), True, 'import numpy as np\n'), ((3048, 3061), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (3056, 3061), True, 'import numpy as np\n'), ((3914, 3926), 'numpy.array', 'np.array', (['da'], {}), '(da)\n', (3922, 3926), True, 'import numpy as np\n'), ((5128, 5158), 'numpy.expand_dims', 'np.expand_dims', (['emis2d'], {'axis': '(0)'}), '(emis2d, axis=0)\n', (5142, 5158), True, 'import numpy as np\n'), ((18150, 18175), 'xarray.load_dataset', 'xr.load_dataset', (['specfile'], {}), '(specfile)\n', (18165, 18175), True, 'import xarray as xr\n'), ((18216, 18239), 'xarray.load_dataset', 'xr.load_dataset', (['lefile'], {}), '(lefile)\n', (18231, 18239), True, 'import xarray as xr\n'), ((18448, 18473), 'xarray.load_dataset', 'xr.load_dataset', (['specfile'], {}), '(specfile)\n', (18463, 18473), True, 'import xarray as xr\n'), ((23391, 23406), 'numpy.shape', 'np.shape', (['gccol'], {}), '(gccol)\n', (23399, 23406), True, 'import numpy as np\n'), ((41091, 41106), 'numpy.isnan', 'np.isnan', (['ratio'], {}), '(ratio)\n', (41099, 41106), True, 'import numpy as np\n'), ((42021, 42094), 'numpy.where', 'np.where', (['(analysisScalefactor[i, :] < self.MinimumScalingFactorAllowed[i])'], {}), '(analysisScalefactor[i, :] < self.MinimumScalingFactorAllowed[i])\n', (42029, 42094), True, 'import numpy as np\n'), ((42246, 42319), 'numpy.where', 'np.where', (['(analysisScalefactor[i, :] > self.MaximumScalingFactorAllowed[i])'], {}), '(analysisScalefactor[i, :] > self.MaximumScalingFactorAllowed[i])\n', (42254, 42319), True, 'import numpy as np\n'), ((43427, 43458), 'numpy.shape', 'np.shape', (['self.Xpert_background'], {}), '(self.Xpert_background)\n', (43435, 43458), True, 'import numpy as np\n'), ((2900, 2912), 'numpy.shape', 'np.shape', (['da'], {}), '(da)\n', (2908, 2912), True, 'import numpy as np\n'), ((3195, 3211), 'numpy.shape', 'np.shape', (['conc4d'], {}), '(conc4d)\n', (3203, 3211), True, 'import numpy as np\n'), ((5243, 5268), 'numpy.array', 'np.array', (['[new_last_time]'], {}), '([new_last_time])\n', (5251, 5268), True, 'import numpy as np\n'), ((7074, 7097), 'numpy.shape', 'np.shape', (['self.statevec'], {}), '(self.statevec)\n', (7082, 7097), True, 'import numpy as np\n'), ((7464, 7486), 'numpy.shape', 'np.shape', (['surr_latinds'], {}), '(surr_latinds)\n', (7472, 7486), True, 'import numpy as np\n'), ((7489, 7511), 'numpy.shape', 'np.shape', (['surr_loninds'], {}), '(surr_loninds)\n', (7497, 7511), True, 'import numpy as np\n'), ((11329, 11351), 'numpy.shape', 'np.shape', (['surr_latinds'], {}), '(surr_latinds)\n', (11337, 11351), True, 'import numpy as np\n'), ((11354, 11376), 'numpy.shape', 'np.shape', (['surr_loninds'], {}), '(surr_loninds)\n', (11362, 11376), True, 'import numpy as np\n'), ((33909, 33928), 'numpy.shape', 'np.shape', (['statevecs'], {}), '(statevecs)\n', (33917, 33928), True, 'import numpy as np\n'), ((34413, 34433), 'numpy.shape', 'np.shape', (['state_mean'], {}), '(state_mean)\n', (34421, 34433), True, 'import numpy as np\n'), ((34484, 34498), 'numpy.shape', 'np.shape', (['bigX'], {}), '(bigX)\n', (34492, 34498), True, 'import numpy as np\n'), ((35266, 35289), 'numpy.shape', 'np.shape', (['full_obsmeans'], {}), '(full_obsmeans)\n', (35274, 35289), True, 'import numpy as np\n'), ((35343, 35366), 'numpy.shape', 'np.shape', (['full_obsperts'], {}), '(full_obsperts)\n', (35351, 35366), True, 'import numpy as np\n'), ((35424, 35447), 'numpy.shape', 'np.shape', (['full_obsdiffs'], {}), '(full_obsdiffs)\n', (35432, 35447), True, 'import numpy as np\n'), ((37286, 37316), 'numpy.shape', 'np.shape', (['self.ybar_background'], {}), '(self.ybar_background)\n', (37294, 37316), True, 'import numpy as np\n'), ((37395, 37426), 'numpy.shape', 'np.shape', (['self.Ypert_background'], {}), '(self.Ypert_background)\n', (37403, 37426), True, 'import numpy as np\n'), ((37494, 37514), 'numpy.shape', 'np.shape', (['self.ydiff'], {}), '(self.ydiff)\n', (37502, 37514), True, 'import numpy as np\n'), ((37592, 37622), 'numpy.shape', 'np.shape', (['self.xbar_background'], {}), '(self.xbar_background)\n', (37600, 37622), True, 'import numpy as np\n'), ((37701, 37732), 'numpy.shape', 'np.shape', (['self.Xpert_background'], {}), '(self.Xpert_background)\n', (37709, 37732), True, 'import numpy as np\n'), ((38163, 38179), 'numpy.shape', 'np.shape', (['self.R'], {}), '(self.R)\n', (38171, 38179), True, 'import numpy as np\n'), ((38355, 38371), 'numpy.shape', 'np.shape', (['self.C'], {}), '(self.C)\n', (38363, 38371), True, 'import numpy as np\n'), ((38671, 38700), 'numpy.shape', 'np.shape', (['self.PtildeAnalysis'], {}), '(self.PtildeAnalysis)\n', (38679, 38700), True, 'import numpy as np\n'), ((38936, 38960), 'numpy.shape', 'np.shape', (['self.WAnalysis'], {}), '(self.WAnalysis)\n', (38944, 38960), True, 'import numpy as np\n'), ((39162, 39189), 'numpy.shape', 'np.shape', (['self.WbarAnalysis'], {}), '(self.WbarAnalysis)\n', (39170, 39189), True, 'import numpy as np\n'), ((39427, 39451), 'numpy.shape', 'np.shape', (['self.WAnalysis'], {}), '(self.WAnalysis)\n', (39435, 39451), True, 'import numpy as np\n'), ((39831, 39862), 'numpy.shape', 'np.shape', (['self.analysisEnsemble'], {}), '(self.analysisEnsemble)\n', (39839, 39862), True, 'import numpy as np\n'), ((14534, 14550), 'numpy.shape', 'np.shape', (['conc3d'], {}), '(conc3d)\n', (14542, 14550), True, 'import numpy as np\n'), ((41689, 41712), 'numpy.abs', 'np.abs', (['relativechanges'], {}), '(relativechanges)\n', (41695, 41712), True, 'import numpy as np\n'), ((41773, 41811), 'numpy.sign', 'np.sign', (['relativechanges[relOverwrite]'], {}), '(relativechanges[relOverwrite])\n', (41780, 41811), True, 'import numpy as np\n'), ((5785, 5797), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5795, 5797), False, 'from datetime import date, datetime, timedelta\n')]
# This is where our imports go. from alembic.config import Config from flask import Flask from flask_migrate import Migrate from flask_sqlalchemy import SQLAlchemy from sqlalchemy import create_engine from flask_googlemaps import GoogleMaps from os import environ # make key.py with API_KEY='your_api_string' from flaskr import config, key alembic_cfg = Config() # These are the configurations we need for flask and SQLite app = Flask(__name__) app.config.from_object(config.Config) app.config["GOOGLEMAPS_KEY"] = key.API_KEY if "DOCKERENV" in environ: app.config["SQLALCHEMY_DATABASE_URI"] = "postgresql+psycopg2://postgres:[email protected]:5432/asymptomatix" else: app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///cases.db" db = SQLAlchemy(app) db.init_app(app) # you can also pass the key here if you prefer # Create all database tables engine = create_engine("sqlite:///cases.db", echo=True) migrate = Migrate(app, db, include_schemas=True) from flaskr import routes
[ "alembic.config.Config", "flask.Flask", "flask_migrate.Migrate", "flask_sqlalchemy.SQLAlchemy", "sqlalchemy.create_engine" ]
[((356, 364), 'alembic.config.Config', 'Config', ([], {}), '()\n', (362, 364), False, 'from alembic.config import Config\n'), ((432, 447), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (437, 447), False, 'from flask import Flask\n'), ((759, 774), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (769, 774), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((880, 926), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///cases.db"""'], {'echo': '(True)'}), "('sqlite:///cases.db', echo=True)\n", (893, 926), False, 'from sqlalchemy import create_engine\n'), ((937, 975), 'flask_migrate.Migrate', 'Migrate', (['app', 'db'], {'include_schemas': '(True)'}), '(app, db, include_schemas=True)\n', (944, 975), False, 'from flask_migrate import Migrate\n')]
#Python wrapper / library for Einstein Analytics API import sys import browser_cookie3 import requests import json import time import datetime from dateutil import tz import pandas as pd import numpy as np import re from pandas import json_normalize from decimal import Decimal import base64 import csv import unicodecsv from unidecode import unidecode import math class salesforceEinsteinAnalytics(object): def __init__(self, env_url, browser): self.env_url = env_url try: if browser == 'chrome': cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters since browser cookie does not expect "https://" my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} elif browser == 'firefox': cj = browser_cookie3.firefox(domain_name=env_url[8:]) my_cookies = requests.utils.dict_from_cookiejar(cj) self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'} else: print('Please select a valid browser (chrome or firefox)') sys.exit(1) except: print('ERROR: Could not get session ID. Make sure you are logged into a live Salesforce session (chrome/firefox).') sys.exit(1) #set timezone for displayed operation start time def get_local_time(self, add_sec=None, timeFORfile=False): curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) if add_sec is not None: return (curr_time + datetime.timedelta(seconds=add_sec)).strftime("%I:%M:%S %p") elif timeFORfile == True: return curr_time.strftime("%m_%d_%Y__%I%p") else: return curr_time.strftime("%I:%M:%S %p") def get_dataset_id(self, dataset_name, search_type='API Name', verbose=False): params = {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name} dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params) dataset_df = json_normalize(json.loads(dataset_json.text)['datasets']) #check if the user wants to seach by API name or label name if search_type == 'UI Label': dataset_df = dataset_df[dataset_df['label'] == dataset_name] else: dataset_df = dataset_df[dataset_df['name'] == dataset_name] #show user how many matches that they got. Might want to use exact API name if getting multiple matches for label search. if verbose == True: print('Found '+str(dataset_df.shape[0])+' matching datasets.') #if dataframe is empty then return not found message or return the dataset ID if dataset_df.empty == True: print('Dataset not found. Please check name or API name in Einstein Analytics.') sys.exit(1) else: dsnm = dataset_df['name'].tolist()[0] dsid = dataset_df['id'].tolist()[0] #get dataset version ID r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header) dsvid = json.loads(r.text)['currentVersionId'] return dsnm, dsid, dsvid def run_saql_query(self, saql, save_path=None, verbose=False): ''' This function takes a saql query as an argument and returns a dataframe or saves to csv The query can be in JSON form or can be in the UI SAQL form load statements must have the appropreate spaces: =_load_\"datasetname\"; ''' if verbose == True: start = time.time() print('Checking SAQL and Finding Dataset IDs...') print('Process started at: '+str(self.get_local_time())) saql = saql.replace('\"','\\"') #convert UI saql query to JSON format #create a dictionary with all datasets used in the query load_stmt_old = re.findall(r"(= load )(.*?)(;)", saql) load_stmt_new = load_stmt_old.copy() for ls in range(0,len(load_stmt_new)): load_stmt_old[ls] = ''.join(load_stmt_old[ls]) dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\"',''), verbose=verbose) load_stmt_new[ls] = ''.join(load_stmt_new[ls]) load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid) #update saql with dataset ID and version ID for i in range(0,len(load_stmt_new)): saql = saql.replace(load_stmt_old[i], load_stmt_new[i]) saql = saql.replace('\\"','\"') if verbose == True: print('Running SAQL Query...') #run query and return dataframe or save as csv payload = {"query":saql} r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) ) df = json_normalize(json.loads(r.text)['results']['records']) if save_path is not None: if verbose == True: print('Saving result to CSV...') df.to_csv(save_path, index=False) if verbose == True: end = time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return df else: if verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return df def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None): ''' version number goes backwards 0 = current version 20 is max oldest version. Typically best practice to run the function and view the history first before supplying a version number. ''' #get broken dashboard version history r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header) history_df = json_normalize(json.loads(r.text)['histories']) if save_json_path is not None and version_num is not None: preview_link = history_df['previewUrl'].tolist()[version_num] r_restore = requests.get(self.env_url+preview_link, headers=self.header) with open(save_json_path, 'w', encoding='utf-8') as f: json.dump(r_restore.json(), f, ensure_ascii=False, indent=4) elif version_num is not None: payload = { "historyId": history_df['id'].tolist()[version_num] } fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload)) else: return history_df def get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3): if verbose == True: start = time.time() progress_counter = 0 print('Getting app user list and access details...') print('Process started at: '+str(self.get_local_time())) if app_id is None: '''ALERT: CURRENTLY GETTING AN ERROR FOR ALL APP REQUEST ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF') Proposed Solution is to add a try/except block to handle the error ''' attempts = 0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header) response = json.loads(r.text) total_size = response['totalSize'] next_page = response['nextPageUrl'] app_user_df = pd.DataFrame() break except: attempts += 1 if verbose == True: print("Unexpected error:", sys.exc_info()[0]) print("Trying again...") for app in response['folders']: attempts = 0 while attempts < max_request_attempts: try: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app["id"], headers=self.header) users = json.loads(r.text)['shares'] for u in users: app_user_df = app_user_df.append( { "AppId": app['id'], "AppName": app['name'], "UserId": u['sharedWithId'], "UserName": u['sharedWithLabel'], "AccessType": u['accessType'], "UserType": u['shareType'] }, ignore_index=True) break except: attempts += 1 if verbose == True: print("Unexpected error:", sys.exc_info()[0]) print("Trying again...") #continue to pull data from next page attempts = 0 # reset attempts for additional pages while next_page is not None: if verbose == True: progress_counter += 25 print('Progress: '+str(round(progress_counter/total_size*100,1))+'%') while attempts < max_request_attempts: try: np = requests.get(self.env_url+next_page, headers=self.header) response = json.loads(np.text) next_page = response['nextPageUrl'] break except KeyError: next_page = None print(sys.exc_info()[0]) break except: attempts += 1 if verbose == True: print("Unexpected error:", sys.exc_info()[0]) print("Trying again...") while attempts < max_request_attempts: try: for app in response['folders']: r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app["id"], headers=self.header) users = json.loads(r.text)['shares'] for u in users: app_user_df = app_user_df.append( { "AppId": app['id'], "AppName": app['name'], "UserId": u['sharedWithId'], "UserName": u['sharedWithLabel'], "AccessType": u['accessType'], "UserType": u['shareType'] }, ignore_index=True) break except: attempts += 1 if verbose == True: print("Unexpected error:", sys.exc_info()[0]) print("Trying again...") elif app_id is not None: if type(app_id) is list or type(app_id) is tuple: for app in app_id: app_user_df = pd.DataFrame() r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header) response = json.loads(r.text) for u in response['shares']: app_user_df = app_user_df.append( { "AppId": app, "AppName": response['name'], "UserId": u['sharedWithId'], "UserName": u['sharedWithLabel'], "AccessType": u['accessType'], "UserType": u['shareType'] }, ignore_index=True) else: print('Please input a list or tuple of app Ids') sys.exit(1) if save_path is not None: if verbose == True: print('Saving result to CSV...') app_user_df.to_csv(save_path, index=False) if verbose == True: end = time.time() print('Dataframe saved to CSV...') print('Completed in '+str(round(end-start,3))+'sec') return app_user_df else: if verbose == True: end = time.time() print('Completed in '+str(round(end-start,3))+'sec') return app_user_df def update_app_access(self, user_dict, app_id, update_type, verbose=False): ''' update types include: addNewUsers, fullReplaceAccess, removeUsers, updateUsers ''' if verbose == True: start = time.time() print('Updating App Access...') print('Process started at: '+str(self.get_local_time())) if update_type == 'fullReplaceAccess': shares = user_dict elif update_type == 'addNewUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] #remove fields in the JSON that we don't want for s in shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass shares = shares + user_dict elif update_type == 'removeUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_remove = [] for u in user_dict: to_remove.append(u['sharedWithId']) for s in shares: if s['sharedWithId'] in to_remove: shares.remove(s) #remove fields in the JSON that we don't want for s in shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass elif update_type == 'updateUsers': r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header) response = json.loads(r.text) shares = response['shares'] to_update = [] for u in user_dict: to_update.append(u['sharedWithId']) for s in range(0,len(shares)): if shares[s]['sharedWithId'] in to_update: shares[s] = next(item for item in user_dict if item["sharedWithId"] == shares[s]['sharedWithId']) #remove fields in the JSON that we don't want for s in shares: try: del s['sharedWithLabel'] except: pass try: del s['imageUrl'] except: pass else: shares = None print('Please choose a user update operation. Options are: addNewUsers, fullReplaceAccess, removeUsers, updateUsers') sys.exit(1) if shares is not None: payload = {"shares": shares} r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time() print('User Access Updated') print('Completed in '+str(round(end-start,3))+'sec') def update_dashboard_access(self, update_df, update_type, verbose=True): ''' Function to make it easier to update access using dashboard names vs finding all apps needed. update dataframe should have the following columns: Dashboard Id, Access Type, and User Id ''' pass def remove_non_ascii(self, df, columns=None): if columns == None: columns = df.columns else: columns = columns for c in columns: if df[c].dtype == "O": df[c] = df[c].apply(lambda x: unidecode(x).replace("?","")) def create_xmd(self, df, dataset_label, useNumericDefaults=True, default_measure_val="0.0", default_measure_fmt="0.0#", charset="UTF-8", deliminator=",", lineterminator="\r\n"): dataset_label = dataset_label dataset_api_name = dataset_label.replace(" ","_") fields = [] for c in df.columns: if df[c].dtype == "datetime64[ns]": name = c.replace(" ","_") name = name.replace("__","_") date = { "fullyQualifiedName": name, "name": name, "type": "Date", "label": c, "format": "yyyy-MM-dd HH:mm:ss" } fields.append(date) elif np.issubdtype(df[c].dtype, np.number): if useNumericDefaults == True: precision = 18 scale = 2 elif useNumericDefaults == False: precision = df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max() scale = -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min() name = c.replace(" ","_") name = name.replace("__","_") measure = { "fullyQualifiedName": name, "name": name, "type": "Numeric", "label": c, "precision": precision, "defaultValue": default_measure_val, "scale": scale, "format": default_measure_fmt, "decimalSeparator": "." } fields.append(measure) else: name = c.replace(" ","_") name = name.replace("__","_") dimension = { "fullyQualifiedName": name, "name": name, "type": "Text", "label": c } fields.append(dimension) xmd = { "fileFormat": { "charsetName": charset, "fieldsDelimitedBy": deliminator, "linesTerminatedBy": lineterminator }, "objects": [ { "connector": "CSV", "fullyQualifiedName": dataset_api_name, "label": dataset_label, "name": dataset_api_name, "fields": fields } ] } return str(xmd).replace("'",'"') def load_df_to_EA(self, df, dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val="0.0", default_measure_fmt="0.0#", charset="UTF-8", deliminator=",", lineterminator="\r\n", removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None, verbose=False): ''' field names will show up exactly as the column names in the supplied dataframe ''' if verbose == True: start = time.time() print('Loading Data to Einstein Analytics...') print('Process started at: '+str(self.get_local_time())) dataset_api_name = dataset_api_name.replace(" ","_") if fillna == True: for c in df.columns: if df[c].dtype == "O": df[c].fillna('NONE', inplace=True) elif np.issubdtype(df[c].dtype, np.number): df[c].fillna(0, inplace=True) elif df[c].dtype == "datetime64[ns]": df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True) if ascii_columns is not None: self.remove_non_ascii(df, columns=ascii_columns) elif removeNONascii == True: self.remove_non_ascii(df) # Upload Config Steps if xmd is not None: xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode() else: xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val, default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode() upload_config = { 'Format' : 'CSV', 'EdgemartAlias' : dataset_api_name, 'Operation' : operation, 'Action' : 'None', 'MetadataJson': xmd64 } r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config)) try: json.loads(r1.text)['success'] == True except: print('ERROR: Upload Config Failed') print(r1.text) sys.exit(1) if verbose == True: print('Upload Configuration Complete...') print('Chunking and Uploading Data Parts...') MAX_FILE_SIZE = 10 * 1000 * 1000 - 49 df_memory = sys.getsizeof(df) rows_in_part = math.ceil(df.shape[0] / math.ceil(df_memory / MAX_FILE_SIZE)) partnum = 0 range_start = 0 max_data_part = rows_in_part for chunk in range(0, math.ceil(df_memory / MAX_FILE_SIZE)): df_part = df.iloc[range_start:max_data_part,:] if chunk == 0: data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() else: data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode() range_start += rows_in_part max_data_part += rows_in_part partnum += 1 if verbose == True: print('\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed', end='', flush=True) payload = { "InsightsExternalDataId" : json.loads(r1.text)['id'], "PartNumber" : str(partnum), "DataFile" : data_part64 } r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload)) try: json.loads(r2.text)['success'] == True except: print('\nERROR: Datapart Upload Failed') print(r2.text) sys.exit(1) if verbose == True: print('\nDatapart Upload Complete...') payload = { "Action" : "Process" } r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload)) if verbose == True: end = time.time() print('Data Upload Process Started. Check Progress in Data Monitor.') print('Job ID: '+str(json.loads(r1.text)['id'])) print('Completed in '+str(round(end-start,3))+'sec') if __name__ == '__main__': pass
[ "json.dumps", "requests.utils.dict_from_cookiejar", "datetime.datetime.utcnow", "sys.exc_info", "sys.getsizeof", "pandas.DataFrame", "json.loads", "dateutil.tz.tzlocal", "re.findall", "datetime.timedelta", "requests.get", "math.ceil", "pandas.to_datetime", "numpy.issubdtype", "sys.exit", "unidecode.unidecode", "decimal.Decimal", "browser_cookie3.firefox", "time.time", "dateutil.tz.tzutc", "browser_cookie3.chrome" ]
[((1998, 2104), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/datasets')"], {'headers': 'self.header', 'params': 'params'}), "(self.env_url + '/services/data/v48.0/wave/datasets', headers=\n self.header, params=params)\n", (2010, 2104), False, 'import requests\n'), ((3753, 3790), 're.findall', 're.findall', (['"""(= load )(.*?)(;)"""', 'saql'], {}), "('(= load )(.*?)(;)', saql)\n", (3763, 3790), False, 'import re\n'), ((5416, 5539), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/dashboards/' + dashboard_id +\n '/histories')"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/dashboards/' +\n dashboard_id + '/histories', headers=self.header)\n", (5428, 5539), False, 'import requests\n'), ((17615, 17632), 'sys.getsizeof', 'sys.getsizeof', (['df'], {}), '(df)\n', (17628, 17632), False, 'import sys\n'), ((1532, 1544), 'dateutil.tz.tzlocal', 'tz.tzlocal', ([], {}), '()\n', (1542, 1544), False, 'from dateutil import tz\n'), ((2816, 2827), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2824, 2827), False, 'import sys\n'), ((2954, 3052), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/datasets/' + dsid)"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/datasets/' + dsid,\n headers=self.header)\n", (2966, 3052), False, 'import requests\n'), ((3473, 3484), 'time.time', 'time.time', ([], {}), '()\n', (3482, 3484), False, 'import time\n'), ((5738, 5800), 'requests.get', 'requests.get', (['(self.env_url + preview_link)'], {'headers': 'self.header'}), '(self.env_url + preview_link, headers=self.header)\n', (5750, 5800), False, 'import requests\n'), ((6326, 6337), 'time.time', 'time.time', ([], {}), '()\n', (6335, 6337), False, 'import time\n'), ((10763, 10774), 'time.time', 'time.time', ([], {}), '()\n', (10772, 10774), False, 'import time\n'), ((12952, 12963), 'time.time', 'time.time', ([], {}), '()\n', (12961, 12963), False, 'import time\n'), ((15916, 15927), 'time.time', 'time.time', ([], {}), '()\n', (15925, 15927), False, 'import time\n'), ((17800, 17836), 'math.ceil', 'math.ceil', (['(df_memory / MAX_FILE_SIZE)'], {}), '(df_memory / MAX_FILE_SIZE)\n', (17809, 17836), False, 'import math\n'), ((19141, 19152), 'time.time', 'time.time', ([], {}), '()\n', (19150, 19152), False, 'import time\n'), ((526, 573), 'browser_cookie3.chrome', 'browser_cookie3.chrome', ([], {'domain_name': 'env_url[8:]'}), '(domain_name=env_url[8:])\n', (548, 573), False, 'import browser_cookie3\n'), ((672, 710), 'requests.utils.dict_from_cookiejar', 'requests.utils.dict_from_cookiejar', (['cj'], {}), '(cj)\n', (706, 710), False, 'import requests\n'), ((1326, 1337), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1334, 1337), False, 'import sys\n'), ((2129, 2158), 'json.loads', 'json.loads', (['dataset_json.text'], {}), '(dataset_json.text)\n', (2139, 2158), False, 'import json\n'), ((3056, 3074), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (3066, 3074), False, 'import json\n'), ((4565, 4584), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (4575, 4584), False, 'import json\n'), ((4823, 4834), 'time.time', 'time.time', ([], {}), '()\n', (4832, 4834), False, 'import time\n'), ((4986, 4997), 'time.time', 'time.time', ([], {}), '()\n', (4995, 4997), False, 'import time\n'), ((5560, 5578), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (5570, 5578), False, 'import json\n'), ((10288, 10299), 'time.time', 'time.time', ([], {}), '()\n', (10297, 10299), False, 'import time\n'), ((10465, 10476), 'time.time', 'time.time', ([], {}), '()\n', (10474, 10476), False, 'import time\n'), ((10981, 11080), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders/' + app_id)"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders/' + app_id,\n headers=self.header)\n", (10993, 11080), False, 'import requests\n'), ((11087, 11105), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (11097, 11105), False, 'import json\n'), ((14157, 14194), 'numpy.issubdtype', 'np.issubdtype', (['df[c].dtype', 'np.number'], {}), '(df[c].dtype, np.number)\n', (14170, 14194), True, 'import numpy as np\n'), ((17281, 17306), 'json.dumps', 'json.dumps', (['upload_config'], {}), '(upload_config)\n', (17291, 17306), False, 'import json\n'), ((17429, 17440), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (17437, 17440), False, 'import sys\n'), ((17674, 17710), 'math.ceil', 'math.ceil', (['(df_memory / MAX_FILE_SIZE)'], {}), '(df_memory / MAX_FILE_SIZE)\n', (17683, 17710), False, 'import math\n'), ((18822, 18833), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (18830, 18833), False, 'import sys\n'), ((19089, 19108), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (19099, 19108), False, 'import json\n'), ((866, 914), 'browser_cookie3.firefox', 'browser_cookie3.firefox', ([], {'domain_name': 'env_url[8:]'}), '(domain_name=env_url[8:])\n', (889, 914), False, 'import browser_cookie3\n'), ((938, 976), 'requests.utils.dict_from_cookiejar', 'requests.utils.dict_from_cookiejar', (['cj'], {}), '(cj)\n', (972, 976), False, 'import requests\n'), ((1175, 1186), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1183, 1186), False, 'import sys\n'), ((4609, 4627), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (4619, 4627), False, 'import json\n'), ((6775, 6865), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders')"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders', headers=\n self.header)\n", (6787, 6865), False, 'import requests\n'), ((6875, 6893), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (6885, 6893), False, 'import json\n'), ((6995, 7009), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7007, 7009), True, 'import pandas as pd\n'), ((10097, 10108), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10105, 10108), False, 'import sys\n'), ((11405, 11504), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders/' + app_id)"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders/' + app_id,\n headers=self.header)\n", (11417, 11504), False, 'import requests\n'), ((11511, 11529), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (11521, 11529), False, 'import json\n'), ((12898, 12917), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (12908, 12917), False, 'import json\n'), ((16216, 16253), 'numpy.issubdtype', 'np.issubdtype', (['df[c].dtype', 'np.number'], {}), '(df[c].dtype, np.number)\n', (16229, 16253), True, 'import numpy as np\n'), ((17318, 17337), 'json.loads', 'json.loads', (['r1.text'], {}), '(r1.text)\n', (17328, 17337), False, 'import json\n'), ((18462, 18481), 'json.loads', 'json.loads', (['r1.text'], {}), '(r1.text)\n', (18472, 18481), False, 'import json\n'), ((18676, 18695), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (18686, 18695), False, 'import json\n'), ((18707, 18726), 'json.loads', 'json.loads', (['r2.text'], {}), '(r2.text)\n', (18717, 18726), False, 'import json\n'), ((19036, 19055), 'json.loads', 'json.loads', (['r1.text'], {}), '(r1.text)\n', (19046, 19055), False, 'import json\n'), ((1467, 1493), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1491, 1493), False, 'import datetime\n'), ((1509, 1519), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (1517, 1519), False, 'from dateutil import tz\n'), ((1604, 1639), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'add_sec'}), '(seconds=add_sec)\n', (1622, 1639), False, 'import datetime\n'), ((6134, 6153), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (6144, 6153), False, 'import json\n'), ((7276, 7379), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders/' + app['id'])"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders/' + app['id'\n ], headers=self.header)\n", (7288, 7379), False, 'import requests\n'), ((8249, 8308), 'requests.get', 'requests.get', (['(self.env_url + next_page)'], {'headers': 'self.header'}), '(self.env_url + next_page, headers=self.header)\n', (8261, 8308), False, 'import requests\n'), ((8324, 8343), 'json.loads', 'json.loads', (['np.text'], {}), '(np.text)\n', (8334, 8343), False, 'import json\n'), ((9520, 9534), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9532, 9534), True, 'import pandas as pd\n'), ((9544, 9640), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders/' + app)"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders/' + app,\n headers=self.header)\n", (9556, 9640), False, 'import requests\n'), ((9649, 9667), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (9659, 9667), False, 'import json\n'), ((11958, 12057), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders/' + app_id)"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders/' + app_id,\n headers=self.header)\n", (11970, 12057), False, 'import requests\n'), ((12064, 12082), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (12074, 12082), False, 'import json\n'), ((12720, 12731), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12728, 12731), False, 'import sys\n'), ((7385, 7403), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (7395, 7403), False, 'import json\n'), ((8734, 8837), 'requests.get', 'requests.get', (["(self.env_url + '/services/data/v48.0/wave/folders/' + app['id'])"], {'headers': 'self.header'}), "(self.env_url + '/services/data/v48.0/wave/folders/' + app['id'\n ], headers=self.header)\n", (8746, 8837), False, 'import requests\n'), ((19250, 19269), 'json.loads', 'json.loads', (['r1.text'], {}), '(r1.text)\n', (19260, 19269), False, 'import json\n'), ((8844, 8862), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (8854, 8862), False, 'import json\n'), ((13543, 13555), 'unidecode.unidecode', 'unidecode', (['x'], {}), '(x)\n', (13552, 13555), False, 'from unidecode import unidecode\n'), ((16350, 16387), 'pandas.to_datetime', 'pd.to_datetime', (['"""1900-01-01 00:00:00"""'], {}), "('1900-01-01 00:00:00')\n", (16364, 16387), True, 'import pandas as pd\n'), ((16635, 16650), 'json.dumps', 'json.dumps', (['xmd'], {}), '(xmd)\n', (16645, 16650), False, 'import json\n'), ((18343, 18379), 'math.ceil', 'math.ceil', (['(df_memory / MAX_FILE_SIZE)'], {}), '(df_memory / MAX_FILE_SIZE)\n', (18352, 18379), False, 'import math\n'), ((7110, 7124), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7122, 7124), False, 'import sys\n'), ((8455, 8469), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8467, 8469), False, 'import sys\n'), ((7878, 7892), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7890, 7892), False, 'import sys\n'), ((8579, 8593), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8591, 8593), False, 'import sys\n'), ((9345, 9359), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (9357, 9359), False, 'import sys\n'), ((14438, 14448), 'decimal.Decimal', 'Decimal', (['x'], {}), '(x)\n', (14445, 14448), False, 'from decimal import Decimal\n')]
""" Тестирование """ import sys sys.path.append('.') from objects import auto_type, equal_object, RegistryStore def test_auto_type(): """ Проверка преобразования значений """ assert auto_type('test') == str('test') assert auto_type('5') == 5 for i in ['true', 'y', 'yes']: assert auto_type(i) for i in ['false', 'f', 'no']: assert not auto_type(i) def test_equal_object(): """ Проверка функции идентификации объекта по атрибутам """ test_object = RegistryStore() test_object.test1 = True test_object.test2 = 'foo' test_object.test3 = 5 assert equal_object(test_object, ['test1=true', 'test2=foo', 'test3=5']) assert not equal_object(test_object, ['test1=false', 'test2=foo', 'test3=5'])
[ "sys.path.append", "objects.equal_object", "objects.auto_type", "objects.RegistryStore" ]
[((37, 57), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (52, 57), False, 'import sys\n'), ((519, 534), 'objects.RegistryStore', 'RegistryStore', ([], {}), '()\n', (532, 534), False, 'from objects import auto_type, equal_object, RegistryStore\n'), ((632, 697), 'objects.equal_object', 'equal_object', (['test_object', "['test1=true', 'test2=foo', 'test3=5']"], {}), "(test_object, ['test1=true', 'test2=foo', 'test3=5'])\n", (644, 697), False, 'from objects import auto_type, equal_object, RegistryStore\n'), ((209, 226), 'objects.auto_type', 'auto_type', (['"""test"""'], {}), "('test')\n", (218, 226), False, 'from objects import auto_type, equal_object, RegistryStore\n'), ((253, 267), 'objects.auto_type', 'auto_type', (['"""5"""'], {}), "('5')\n", (262, 267), False, 'from objects import auto_type, equal_object, RegistryStore\n'), ((323, 335), 'objects.auto_type', 'auto_type', (['i'], {}), '(i)\n', (332, 335), False, 'from objects import auto_type, equal_object, RegistryStore\n'), ((713, 779), 'objects.equal_object', 'equal_object', (['test_object', "['test1=false', 'test2=foo', 'test3=5']"], {}), "(test_object, ['test1=false', 'test2=foo', 'test3=5'])\n", (725, 779), False, 'from objects import auto_type, equal_object, RegistryStore\n'), ((390, 402), 'objects.auto_type', 'auto_type', (['i'], {}), '(i)\n', (399, 402), False, 'from objects import auto_type, equal_object, RegistryStore\n')]
import PyQt5.QtWidgets as W class ProfilesTable(W.QTableWidget): def __init__(self, parent, profiles=[]): super(ProfilesTable, self).__init__(parent) self.init() self.update_profiles(profiles) def init(self): self.setColumnCount(2) self.setHorizontalHeaderLabels(["Name", "Number of summoners"]) self.setEditTriggers(W.QAbstractItemView.NoEditTriggers) self.verticalHeader().hide() self.setSelectionBehavior(W.QAbstractItemView.SelectRows) def update_profiles(self, profiles): self.setRowCount(len(profiles)) i = 0 for profile in profiles: self.setItem(i, 0, W.QTableWidgetItem(profile)) self.setItem(i, 1, W.QTableWidgetItem(str(len(profiles[profile])))) i += 1 self.resizeColumnsToContents() def get_current_profile_name(self): current_row = self.currentRow() if current_row == -1: print("Is empty") return profile = self.item(current_row, 0).text() return profile def delete(self, profiles): current_row = self.currentRow() if current_row == -1: print("Is empty") return profile = self.takeItem(current_row, 0).text() if profile in profiles: return profile
[ "PyQt5.QtWidgets.QTableWidgetItem" ]
[((677, 704), 'PyQt5.QtWidgets.QTableWidgetItem', 'W.QTableWidgetItem', (['profile'], {}), '(profile)\n', (695, 704), True, 'import PyQt5.QtWidgets as W\n')]
import click from parsec.cli import pass_context, json_loads from parsec.decorators import custom_exception, json_output @click.command('get_invocations') @click.option( "--workflow_id", help="Encoded workflow ID to filter on", type=str ) @click.option( "--history_id", help="Encoded history ID to filter on", type=str ) @click.option( "--user_id", help="Encoded user ID to filter on. This must be your own user ID if your are not an admin user.", type=str ) @click.option( "--include_terminal", help="Whether to include terminal states.", default="True", show_default=True, is_flag=True ) @click.option( "--limit", help="Maximum number of invocations to return - if specified, the most recent invocations will be returned.", type=int ) @click.option( "--view", help="Level of detail to return per invocation, either 'element' or 'collection'.", default="collection", show_default=True, type=str ) @click.option( "--step_details", help="If 'view' is 'element', also include details on individual steps.", is_flag=True ) @pass_context @custom_exception @json_output def cli(ctx, workflow_id="", history_id="", user_id="", include_terminal=True, limit="", view="collection", step_details=False): """Get all workflow invocations, or select a subset by specifying optional arguments for filtering (e.g. a workflow ID). Output: A list of workflow invocations. For example:: [{'history_id': '2f94e8ae9edff68a', 'id': 'df7a1f0c02a5b08e', 'model_class': 'WorkflowInvocation', 'state': 'new', 'update_time': '2015-10-31T22:00:22', 'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c', 'workflow_id': '03501d7626bd192f'}] """ return ctx.gi.invocations.get_invocations(workflow_id=workflow_id, history_id=history_id, user_id=user_id, include_terminal=include_terminal, limit=limit, view=view, step_details=step_details)
[ "click.option", "click.command" ]
[((124, 156), 'click.command', 'click.command', (['"""get_invocations"""'], {}), "('get_invocations')\n", (137, 156), False, 'import click\n'), ((158, 243), 'click.option', 'click.option', (['"""--workflow_id"""'], {'help': '"""Encoded workflow ID to filter on"""', 'type': 'str'}), "('--workflow_id', help='Encoded workflow ID to filter on', type=str\n )\n", (170, 243), False, 'import click\n'), ((254, 332), 'click.option', 'click.option', (['"""--history_id"""'], {'help': '"""Encoded history ID to filter on"""', 'type': 'str'}), "('--history_id', help='Encoded history ID to filter on', type=str)\n", (266, 332), False, 'import click\n'), ((348, 492), 'click.option', 'click.option', (['"""--user_id"""'], {'help': '"""Encoded user ID to filter on. This must be your own user ID if your are not an admin user."""', 'type': 'str'}), "('--user_id', help=\n 'Encoded user ID to filter on. This must be your own user ID if your are not an admin user.'\n , type=str)\n", (360, 492), False, 'import click\n'), ((498, 635), 'click.option', 'click.option', (['"""--include_terminal"""'], {'help': '"""Whether to include terminal states."""', 'default': '"""True"""', 'show_default': '(True)', 'is_flag': '(True)'}), "('--include_terminal', help=\n 'Whether to include terminal states.', default='True', show_default=\n True, is_flag=True)\n", (510, 635), False, 'import click\n'), ((649, 802), 'click.option', 'click.option', (['"""--limit"""'], {'help': '"""Maximum number of invocations to return - if specified, the most recent invocations will be returned."""', 'type': 'int'}), "('--limit', help=\n 'Maximum number of invocations to return - if specified, the most recent invocations will be returned.'\n , type=int)\n", (661, 802), False, 'import click\n'), ((808, 975), 'click.option', 'click.option', (['"""--view"""'], {'help': '"""Level of detail to return per invocation, either \'element\' or \'collection\'."""', 'default': '"""collection"""', 'show_default': '(True)', 'type': 'str'}), '(\'--view\', help=\n "Level of detail to return per invocation, either \'element\' or \'collection\'."\n , default=\'collection\', show_default=True, type=str)\n', (820, 975), False, 'import click\n'), ((989, 1116), 'click.option', 'click.option', (['"""--step_details"""'], {'help': '"""If \'view\' is \'element\', also include details on individual steps."""', 'is_flag': '(True)'}), '(\'--step_details\', help=\n "If \'view\' is \'element\', also include details on individual steps.",\n is_flag=True)\n', (1001, 1116), False, 'import click\n')]
import argparse import json import os import numpy as np import utils import util def main(args): config = utils.get_hocon_config(config_path="./config/main.conf", config_name="base") input_file = args.input_file if args.is_training == 0: is_training = False else: is_training = True tokenizer = util.get_tokenizer(args.tokenizer_name) max_seg_len = args.seg_len genre_dict = {genre: idx for idx, genre in enumerate(config["genres"])} dataset = [] with open(input_file, "r") as f: for line in f.readlines(): # 1データの読み込み json_data = json.loads(line) doc_key = json_data["doc_key"] # Mentions and clusters clusters = json_data["clusters"] gold_mentions = sorted(tuple(mention) for mention in util.flatten(clusters)) gold_mention_map = {mention: idx for idx, mention in enumerate(gold_mentions)} # span -> index gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0: no cluster for cluster_id, cluster in enumerate(clusters): for mention in cluster: gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1 # Speakers speakers = json_data["speakers"] speaker_dict = get_speaker_dict(util.flatten(speakers), config["max_num_speakers"]) # Segments segments = json_data["segments"] sentence_map = json_data["sentence_map"] num_words = sum([len(s) for s in segments]) segment_len = np.array([len(s) for s in segments]) # BERT input IDs/mask, speaker IDs input_ids, input_mask, speaker_ids = [], [], [] for idx, (sent_tokens, sent_speakers) in enumerate(zip(segments, speakers)): sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens) sent_input_mask = [1] * len(sent_input_ids) sent_speaker_ids = [speaker_dict[speaker] for speaker in sent_speakers] while len(sent_input_ids) < max_seg_len: sent_input_ids.append(0) sent_input_mask.append(0) sent_speaker_ids.append(0) input_ids.append(sent_input_ids) input_mask.append(sent_input_mask) speaker_ids.append(sent_speaker_ids) input_ids = np.array(input_ids) input_mask = np.array(input_mask) speaker_ids = np.array(speaker_ids) assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask)) # Genre genre = genre_dict.get(doc_key[:2], 0) # Gold spans if len(gold_mentions) > 0: gold_starts, gold_ends = zip(*gold_mentions) else: gold_starts, gold_ends = [], [] gold_starts = np.array(gold_starts) gold_ends = np.array(gold_ends) # Others tokens = json_data["tokens"] original_sentence_boundaries = json_data["original_sentence_boundaries"] # XXX gold_clusters = json_data["clusters"] subtoken_map = json_data.get("subtoken_map", None) # DataInstanceに変換 kargs = { "doc_key": doc_key, "tokens": tokens, "original_sentence_boundaries": original_sentence_boundaries, # XXX "segments": segments, "sentence_map": sentence_map, "speakers": speakers, "gold_clusters": gold_clusters, "subtoken_map": subtoken_map, # "input_ids": input_ids, "input_mask": input_mask, "speaker_ids": speaker_ids, "segment_len": segment_len, "genre": genre, "is_training": is_training, "gold_starts": gold_starts, "gold_ends": gold_ends, "gold_mention_cluster_map": gold_mention_cluster_map, } data = utils.DataInstance(**kargs) dataset.append(data) dataset = np.asarray(dataset, dtype="O") output_file = os.path.basename(input_file).replace(".jsonlines", ".npy") output_file = os.path.join(config["caches"], output_file) np.save(output_file, dataset) print("Cached %s to %s" % (input_file, output_file)) def get_speaker_dict(speakers, max_num_speakers): """ Parameters ---------- speakers: list[str] Returns ------- dict[str, int] """ speaker_dict = {"UNK": 0, "[SPL]": 1} for speaker in speakers: if len(speaker_dict) > max_num_speakers: pass # "break" to limit # speakers if speaker not in speaker_dict: speaker_dict[speaker] = len(speaker_dict) return speaker_dict if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file', type=str, required=True) parser.add_argument("--is_training", type=int, required=True) parser.add_argument('--tokenizer_name', type=str, required=True) parser.add_argument('--seg_len', type=int, required=True) args = parser.parse_args() main(args)
[ "numpy.save", "numpy.sum", "argparse.ArgumentParser", "json.loads", "os.path.basename", "numpy.asarray", "util.get_tokenizer", "utils.DataInstance", "utils.get_hocon_config", "numpy.array", "os.path.join", "util.flatten" ]
[((116, 192), 'utils.get_hocon_config', 'utils.get_hocon_config', ([], {'config_path': '"""./config/main.conf"""', 'config_name': '"""base"""'}), "(config_path='./config/main.conf', config_name='base')\n", (138, 192), False, 'import utils\n'), ((338, 377), 'util.get_tokenizer', 'util.get_tokenizer', (['args.tokenizer_name'], {}), '(args.tokenizer_name)\n', (356, 377), False, 'import util\n'), ((4216, 4246), 'numpy.asarray', 'np.asarray', (['dataset'], {'dtype': '"""O"""'}), "(dataset, dtype='O')\n", (4226, 4246), True, 'import numpy as np\n'), ((4343, 4386), 'os.path.join', 'os.path.join', (["config['caches']", 'output_file'], {}), "(config['caches'], output_file)\n", (4355, 4386), False, 'import os\n'), ((4391, 4420), 'numpy.save', 'np.save', (['output_file', 'dataset'], {}), '(output_file, dataset)\n', (4398, 4420), True, 'import numpy as np\n'), ((4971, 4996), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4994, 4996), False, 'import argparse\n'), ((624, 640), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (634, 640), False, 'import json\n'), ((2444, 2463), 'numpy.array', 'np.array', (['input_ids'], {}), '(input_ids)\n', (2452, 2463), True, 'import numpy as np\n'), ((2489, 2509), 'numpy.array', 'np.array', (['input_mask'], {}), '(input_mask)\n', (2497, 2509), True, 'import numpy as np\n'), ((2536, 2557), 'numpy.array', 'np.array', (['speaker_ids'], {}), '(speaker_ids)\n', (2544, 2557), True, 'import numpy as np\n'), ((2932, 2953), 'numpy.array', 'np.array', (['gold_starts'], {}), '(gold_starts)\n', (2940, 2953), True, 'import numpy as np\n'), ((2978, 2997), 'numpy.array', 'np.array', (['gold_ends'], {}), '(gold_ends)\n', (2986, 2997), True, 'import numpy as np\n'), ((4140, 4167), 'utils.DataInstance', 'utils.DataInstance', ([], {}), '(**kargs)\n', (4158, 4167), False, 'import utils\n'), ((4266, 4294), 'os.path.basename', 'os.path.basename', (['input_file'], {}), '(input_file)\n', (4282, 4294), False, 'import os\n'), ((1356, 1378), 'util.flatten', 'util.flatten', (['speakers'], {}), '(speakers)\n', (1368, 1378), False, 'import util\n'), ((2590, 2608), 'numpy.sum', 'np.sum', (['input_mask'], {}), '(input_mask)\n', (2596, 2608), True, 'import numpy as np\n'), ((2622, 2640), 'numpy.sum', 'np.sum', (['input_mask'], {}), '(input_mask)\n', (2628, 2640), True, 'import numpy as np\n'), ((832, 854), 'util.flatten', 'util.flatten', (['clusters'], {}), '(clusters)\n', (844, 854), False, 'import util\n')]
from django.test import TestCase from basin3d.models import DataSource, SamplingMedium, \ ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable class DataSourceTestCase(TestCase): def setUp(self): DataSource.objects.create(name="Foo", plugin_module="foo.bar.plugins", plugin_class="Baz", id_prefix="F") DataSource.objects.create(name="Bar", plugin_module="foo.plugins", plugin_class="Bar", id_prefix="B") def test_get(self): """Assert that the Data Sources were created""" foo = DataSource.objects.get(name="Foo") bar = DataSource.objects.get(name="Bar") self.assertEqual(bar.name, "Bar") self.assertEqual(foo.name, 'Foo') class ObservedPropertyTestCase(TestCase): """ Assert that the parameters are created """ def setUp(self): """ Load some fake data to use in the tests """ self.datasource = DataSource.objects.get(name="Alpha") self.observed_property_var = ObservedPropertyVariable( id="FOO", full_name="Groundwater Flux", categories="Hydrology,Subsurface") self.sampling_medium = SamplingMedium() def test_observed_property_create(self): """ Was the object created correctly? """ obj = ObservedProperty(description="Acetate (CH3COO)", observed_property_variable=self.observed_property_var, sampling_medium=self.sampling_medium, datasource=self.datasource) assert obj.description == "Acetate (CH3COO)" assert obj.observed_property_variable == self.observed_property_var assert obj.sampling_medium == self.sampling_medium assert obj.datasource == self.datasource def test_observed_property_variable_create(self): """ create the object and test attributes """ assert self.observed_property_var.id == "FOO" assert self.observed_property_var.full_name == "Groundwater Flux" assert self.observed_property_var.categories == "Hydrology,Subsurface" def test_datasource_observed_property_variable_create(self): """ Was the object created correctly? """ obj = DataSourceObservedPropertyVariable( datasource=self.datasource, observed_property_variable=self.observed_property_var, name="Alpha") assert obj.datasource == self.datasource assert obj.observed_property_variable == self.observed_property_var assert obj.name == "Alpha"
[ "basin3d.models.ObservedProperty", "basin3d.models.SamplingMedium", "basin3d.models.ObservedPropertyVariable", "basin3d.models.DataSource.objects.create", "basin3d.models.DataSourceObservedPropertyVariable", "basin3d.models.DataSource.objects.get" ]
[((242, 351), 'basin3d.models.DataSource.objects.create', 'DataSource.objects.create', ([], {'name': '"""Foo"""', 'plugin_module': '"""foo.bar.plugins"""', 'plugin_class': '"""Baz"""', 'id_prefix': '"""F"""'}), "(name='Foo', plugin_module='foo.bar.plugins',\n plugin_class='Baz', id_prefix='F')\n", (267, 351), False, 'from basin3d.models import DataSource, SamplingMedium, ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable\n'), ((356, 461), 'basin3d.models.DataSource.objects.create', 'DataSource.objects.create', ([], {'name': '"""Bar"""', 'plugin_module': '"""foo.plugins"""', 'plugin_class': '"""Bar"""', 'id_prefix': '"""B"""'}), "(name='Bar', plugin_module='foo.plugins',\n plugin_class='Bar', id_prefix='B')\n", (381, 461), False, 'from basin3d.models import DataSource, SamplingMedium, ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable\n'), ((553, 587), 'basin3d.models.DataSource.objects.get', 'DataSource.objects.get', ([], {'name': '"""Foo"""'}), "(name='Foo')\n", (575, 587), False, 'from basin3d.models import DataSource, SamplingMedium, ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable\n'), ((602, 636), 'basin3d.models.DataSource.objects.get', 'DataSource.objects.get', ([], {'name': '"""Bar"""'}), "(name='Bar')\n", (624, 636), False, 'from basin3d.models import DataSource, SamplingMedium, ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable\n'), ((943, 979), 'basin3d.models.DataSource.objects.get', 'DataSource.objects.get', ([], {'name': '"""Alpha"""'}), "(name='Alpha')\n", (965, 979), False, 'from basin3d.models import DataSource, SamplingMedium, ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable\n'), ((1017, 1121), 'basin3d.models.ObservedPropertyVariable', 'ObservedPropertyVariable', ([], {'id': '"""FOO"""', 'full_name': '"""Groundwater Flux"""', 'categories': '"""Hydrology,Subsurface"""'}), "(id='FOO', full_name='Groundwater Flux', categories\n ='Hydrology,Subsurface')\n", (1041, 1121), False, 'from basin3d.models import DataSource, SamplingMedium, ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable\n'), ((1173, 1189), 'basin3d.models.SamplingMedium', 'SamplingMedium', ([], {}), '()\n', (1187, 1189), False, 'from basin3d.models import DataSource, SamplingMedium, ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable\n'), ((1301, 1479), 'basin3d.models.ObservedProperty', 'ObservedProperty', ([], {'description': '"""Acetate (CH3COO)"""', 'observed_property_variable': 'self.observed_property_var', 'sampling_medium': 'self.sampling_medium', 'datasource': 'self.datasource'}), "(description='Acetate (CH3COO)', observed_property_variable\n =self.observed_property_var, sampling_medium=self.sampling_medium,\n datasource=self.datasource)\n", (1317, 1479), False, 'from basin3d.models import DataSource, SamplingMedium, ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable\n'), ((2249, 2384), 'basin3d.models.DataSourceObservedPropertyVariable', 'DataSourceObservedPropertyVariable', ([], {'datasource': 'self.datasource', 'observed_property_variable': 'self.observed_property_var', 'name': '"""Alpha"""'}), "(datasource=self.datasource,\n observed_property_variable=self.observed_property_var, name='Alpha')\n", (2283, 2384), False, 'from basin3d.models import DataSource, SamplingMedium, ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable\n')]
import functools import hashlib import json import random from urlparse import urlparse import uuid from operator import attrgetter from django import http from django.conf import settings from django.db.models import Q from django.shortcuts import get_list_or_404, get_object_or_404, redirect from django.utils.translation import trans_real as translation from django.views.decorators.cache import cache_control from django.views.decorators.csrf import csrf_exempt from django.views.decorators.vary import vary_on_headers import caching.base as caching import jingo import jinja2 import commonware.log import session_csrf from tower import ugettext as _, ugettext_lazy as _lazy import waffle from mobility.decorators import mobilized, mobile_template import amo from amo import messages from amo.decorators import login_required, post_required, write from amo.forms import AbuseForm from amo.helpers import shared_url from amo.utils import randslice, sorted_groupby, urlparams from amo.models import manual_order from amo import urlresolvers from amo.urlresolvers import reverse from abuse.models import send_abuse_report from bandwagon.models import Collection, CollectionFeature, CollectionPromo from market.forms import PriceCurrencyForm import paypal from reviews.forms import ReviewForm from reviews.models import Review, GroupedRating from session_csrf import anonymous_csrf, anonymous_csrf_exempt from sharing.views import share as share_redirect from stats.models import Contribution from translations.query import order_by_translation from versions.models import Version from .forms import ContributionForm from .models import Addon, Persona, FrozenAddon from .decorators import (addon_view_factory, can_be_purchased, has_purchased, has_not_purchased) from mkt.webapps.models import Installed log = commonware.log.getLogger('z.addons') paypal_log = commonware.log.getLogger('z.paypal') addon_view = addon_view_factory(qs=Addon.objects.valid) addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed) addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled) def author_addon_clicked(f): """Decorator redirecting clicks on "Other add-ons by author".""" @functools.wraps(f) def decorated(request, *args, **kwargs): redirect_id = request.GET.get('addons-author-addons-select', None) if not redirect_id: return f(request, *args, **kwargs) try: target_id = int(redirect_id) return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[target_id])) except ValueError: return http.HttpResponseBadRequest('Invalid add-on ID.') return decorated @addon_disabled_view def addon_detail(request, addon): """Add-ons details page dispatcher.""" if addon.is_deleted: raise http.Http404 if addon.is_disabled: return jingo.render(request, 'addons/impala/disabled.html', {'addon': addon}, status=404) if addon.is_webapp(): # Apps don't deserve AMO detail pages. raise http.Http404 # addon needs to have a version and be valid for this app. if addon.type in request.APP.types: if addon.type == amo.ADDON_PERSONA: return persona_detail(request, addon) else: if not addon.current_version: raise http.Http404 return extension_detail(request, addon) else: # Redirect to an app that supports this type. try: new_app = [a for a in amo.APP_USAGE if addon.type in a.types][0] except IndexError: raise http.Http404 else: prefixer = urlresolvers.get_url_prefix() prefixer.app = new_app.short return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[addon.slug])) @vary_on_headers('X-Requested-With') def extension_detail(request, addon): """Extensions details page.""" # If current version is incompatible with this app, redirect. comp_apps = addon.compatible_apps if comp_apps and request.APP not in comp_apps: prefixer = urlresolvers.get_url_prefix() prefixer.app = comp_apps.keys()[0].short return redirect('addons.detail', addon.slug, permanent=True) # get satisfaction only supports en-US. lang = translation.to_locale(translation.get_language()) addon.has_satisfaction = (lang == 'en_US' and addon.get_satisfaction_company) # Addon recommendations. recommended = Addon.objects.listed(request.APP).filter( recommended_for__addon=addon)[:6] # Popular collections this addon is part of. collections = Collection.objects.listed().filter( addons=addon, application__id=request.APP.id) ctx = { 'addon': addon, 'src': request.GET.get('src', 'dp-btn-primary'), 'version_src': request.GET.get('src', 'dp-btn-version'), 'tags': addon.tags.not_blacklisted(), 'grouped_ratings': GroupedRating.get(addon.id), 'recommendations': recommended, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'collections': collections.order_by('-subscribers')[:3], 'abuse_form': AbuseForm(request=request), } # details.html just returns the top half of the page for speed. The bottom # does a lot more queries we don't want on the initial page load. if request.is_ajax(): # Other add-ons/apps from the same author(s). ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6] return jingo.render(request, 'addons/impala/details-more.html', ctx) else: if addon.is_webapp(): ctx['search_placeholder'] = 'apps' return jingo.render(request, 'addons/impala/details.html', ctx) @mobilized(extension_detail) def extension_detail(request, addon): return jingo.render(request, 'addons/mobile/details.html', {'addon': addon}) def _category_personas(qs, limit): f = lambda: randslice(qs, limit=limit) key = 'cat-personas:' + qs.query_key() return caching.cached(f, key) @mobile_template('addons/{mobile/}persona_detail.html') def persona_detail(request, addon, template=None): """Details page for Personas.""" if not addon.is_public(): raise http.Http404 persona = addon.persona # this persona's categories categories = addon.categories.filter(application=request.APP.id) if categories: qs = Addon.objects.public().filter(categories=categories[0]) category_personas = _category_personas(qs, limit=6) else: category_personas = None data = { 'addon': addon, 'persona': persona, 'categories': categories, 'author_personas': persona.authors_other_addons(request.APP)[:3], 'category_personas': category_personas, } if not persona.is_new(): # Remora uses persona.author despite there being a display_username. data['author_gallery'] = settings.PERSONAS_USER_ROOT % persona.author if not request.MOBILE: # tags dev_tags, user_tags = addon.tags_partitioned_by_developer data.update({ 'dev_tags': dev_tags, 'user_tags': user_tags, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'search_cat': 'personas', 'abuse_form': AbuseForm(request=request), }) return jingo.render(request, template, data) class BaseFilter(object): """ Filters help generate querysets for add-on listings. You have to define ``opts`` on the subclass as a sequence of (key, title) pairs. The key is used in GET parameters and the title can be used in the view. The chosen filter field is combined with the ``base`` queryset using the ``key`` found in request.GET. ``default`` should be a key in ``opts`` that's used if nothing good is found in request.GET. """ def __init__(self, request, base, key, default, model=Addon): self.opts_dict = dict(self.opts) self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {} self.request = request self.base_queryset = base self.key = key self.model = model self.field, self.title = self.options(self.request, key, default) self.qs = self.filter(self.field) def options(self, request, key, default): """Get the (option, title) pair we want according to the request.""" if key in request.GET and (request.GET[key] in self.opts_dict or request.GET[key] in self.extras_dict): opt = request.GET[key] else: opt = default if opt in self.opts_dict: title = self.opts_dict[opt] else: title = self.extras_dict[opt] return opt, title def all(self): """Get a full mapping of {option: queryset}.""" return dict((field, self.filter(field)) for field in dict(self.opts)) def filter(self, field): """Get the queryset for the given field.""" filter = self._filter(field) & self.base_queryset order = getattr(self, 'order_%s' % field, None) if order: return order(filter) return filter def _filter(self, field): return getattr(self, 'filter_%s' % field)() def filter_featured(self): ids = self.model.featured_random(self.request.APP, self.request.LANG) return manual_order(self.model.objects, ids, 'addons.id') def filter_price(self): return self.model.objects.order_by('addonpremium__price__price', 'id') def filter_free(self): if self.model == Addon: return self.model.objects.top_free(self.request.APP, listed=False) else: return self.model.objects.top_free(listed=False) def filter_paid(self): if self.model == Addon: return self.model.objects.top_paid(self.request.APP, listed=False) else: return self.model.objects.top_paid(listed=False) def filter_popular(self): return (self.model.objects.order_by('-weekly_downloads') .with_index(addons='downloads_type_idx')) def filter_downloads(self): return self.filter_popular() def filter_users(self): return (self.model.objects.order_by('-average_daily_users') .with_index(addons='adus_type_idx')) def filter_created(self): return (self.model.objects.order_by('-created') .with_index(addons='created_type_idx')) def filter_updated(self): return (self.model.objects.order_by('-last_updated') .with_index(addons='last_updated_type_idx')) def filter_rating(self): return (self.model.objects.order_by('-bayesian_rating') .with_index(addons='rating_type_idx')) def filter_hotness(self): return self.model.objects.order_by('-hotness') def filter_name(self): return order_by_translation(self.model.objects.all(), 'name') class ESBaseFilter(BaseFilter): """BaseFilter that uses elasticsearch.""" def __init__(self, request, base, key, default): super(ESBaseFilter, self).__init__(request, base, key, default) def filter(self, field): sorts = {'name': 'name_sort', 'created': '-created', 'updated': '-last_updated', 'popular': '-weekly_downloads', 'users': '-average_daily_users', 'rating': '-bayesian_rating'} return self.base_queryset.order_by(sorts[field]) class HomepageFilter(BaseFilter): opts = (('featured', _lazy(u'Featured')), ('popular', _lazy(u'Popular')), ('new', _lazy(u'Recently Added')), ('updated', _lazy(u'Recently Updated'))) filter_new = BaseFilter.filter_created def home(request): # Add-ons. base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION) # This is lame for performance. Kill it with ES. frozen = list(FrozenAddon.objects.values_list('addon', flat=True)) # Collections. collections = Collection.objects.filter(listed=True, application=request.APP.id, type=amo.COLLECTION_FEATURED) featured = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_EXTENSION)[:18] popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10] hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18] personas = Addon.objects.featured(request.APP, request.LANG, amo.ADDON_PERSONA)[:18] return jingo.render(request, 'addons/home.html', {'popular': popular, 'featured': featured, 'hotness': hotness, 'personas': personas, 'src': 'homepage', 'collections': collections}) @mobilized(home) def home(request): # Shuffle the list and get 3 items. rand = lambda xs: random.shuffle(xs) or xs[:3] # Get some featured add-ons with randomness. featured = Addon.featured_random(request.APP, request.LANG)[:3] # Get 10 popular add-ons, then pick 3 at random. qs = list(Addon.objects.listed(request.APP) .filter(type=amo.ADDON_EXTENSION) .order_by('-average_daily_users') .values_list('id', flat=True)[:10]) popular = rand(qs) # Do one query and split up the add-ons. addons = (Addon.objects.filter(id__in=featured + popular) .filter(type=amo.ADDON_EXTENSION)) featured = [a for a in addons if a.id in featured] popular = sorted([a for a in addons if a.id in popular], key=attrgetter('average_daily_users'), reverse=True) return jingo.render(request, 'addons/mobile/home.html', {'featured': featured, 'popular': popular}) def homepage_promos(request): from discovery.views import promos version, platform = request.GET.get('version'), request.GET.get('platform') if not (platform or version): raise http.Http404 return promos(request, 'home', version, platform) class CollectionPromoBox(object): def __init__(self, request): self.request = request def features(self): return CollectionFeature.objects.all() def collections(self): features = self.features() lang = translation.to_language(translation.get_language()) locale = Q(locale='') | Q(locale=lang) promos = (CollectionPromo.objects.filter(locale) .filter(collection_feature__in=features) .transform(CollectionPromo.transformer)) groups = sorted_groupby(promos, 'collection_feature_id') # We key by feature_id and locale, so we can favor locale specific # promos. promo_dict = {} for feature_id, v in groups: promo = v.next() key = (feature_id, translation.to_language(promo.locale)) promo_dict[key] = promo rv = {} # If we can, we favor locale specific collections. for feature in features: key = (feature.id, lang) if key not in promo_dict: key = (feature.id, '') if key not in promo_dict: continue # We only want to see public add-ons on the front page. c = promo_dict[key].collection c.public_addons = c.addons.all() & Addon.objects.public() rv[feature] = c return rv def __nonzero__(self): return self.request.APP == amo.FIREFOX @addon_view def eula(request, addon, file_id=None): if not addon.eula: return http.HttpResponseRedirect(addon.get_url_path()) if file_id: version = get_object_or_404(addon.versions, files__id=file_id) else: version = addon.current_version return jingo.render(request, 'addons/eula.html', {'addon': addon, 'version': version}) @addon_view def privacy(request, addon): if not addon.privacy_policy: return http.HttpResponseRedirect(addon.get_url_path()) return jingo.render(request, 'addons/privacy.html', {'addon': addon}) @addon_view def developers(request, addon, page): if addon.is_persona(): raise http.Http404() if 'version' in request.GET: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=request.GET['version'])[0] else: version = addon.current_version if 'src' in request.GET: contribution_src = src = request.GET['src'] else: page_srcs = { 'developers': ('developers', 'meet-developers'), 'installed': ('meet-the-developer-post-install', 'post-download'), 'roadblock': ('meetthedeveloper_roadblock', 'roadblock'), } # Download src and contribution_src are different. src, contribution_src = page_srcs.get(page) return jingo.render(request, 'addons/impala/developers.html', {'addon': addon, 'page': page, 'src': src, 'contribution_src': contribution_src, 'version': version}) # TODO(andym): remove this once we figure out how to process for # anonymous users. For now we are concentrating on logged in users. @login_required @addon_view @can_be_purchased @has_not_purchased @write @post_required def purchase(request, addon): log.debug('Starting purchase of addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) amount = addon.premium.get_price() source = request.POST.get('source', '') uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest() # l10n: {0} is the addon name contrib_for = _(u'Purchase of {0}').format(jinja2.escape(addon.name)) # Default is USD. amount, currency = addon.premium.get_price(), 'USD' # If tier is specified, then let's look it up. form = PriceCurrencyForm(data=request.POST, addon=addon) if form.is_valid(): tier = form.get_tier() if tier: amount, currency = tier.price, tier.currency paykey, status, error = '', '', '' preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() try: pattern = 'addons.purchase.finished' slug = addon.slug if addon.is_webapp(): pattern = 'apps.purchase.finished' slug = addon.app_slug paykey, status = paypal.get_paykey( dict(amount=amount, chains=settings.PAYPAL_CHAINS, currency=currency, email=addon.paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern=pattern, preapproval=preapproval, qs={'realurl': request.POST.get('realurl')}, slug=slug, uuid=uuid_)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, uuid_, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting the paykey') log.error('Error getting paykey, purchase of addon: %s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, amount=amount, source=source, source_locale=request.LANG, uuid=str(uuid_), type=amo.CONTRIB_PENDING, paykey=paykey, user=request.amo_user) log.debug('Storing contrib for uuid: %s' % uuid_) # If this was a pre-approval, it's completed already, we'll # double check this with PayPal, just to be sure nothing went wrong. if status == 'COMPLETED': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase', 'PURCHASE', 'A user purchased using pre-approval') log.debug('Status is completed for uuid: %s' % uuid_) if paypal.check_purchase(paykey) == 'COMPLETED': log.debug('Check purchase is completed for uuid: %s' % uuid_) contrib.type = amo.CONTRIB_PURCHASE else: # In this case PayPal disagreed, we should not be trusting # what get_paykey said. Which is a worry. log.error('Check purchase failed on uuid: %s' % uuid_) status = 'NOT-COMPLETED' contrib.save() else: log.error('No paykey present for uuid: %s' % uuid_) log.debug('Got paykey for addon: %s by user: %s' % (addon.pk, request.amo_user.pk)) url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.POST.get('result_type') == 'json' or request.is_ajax(): return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') # This is the non-Ajax fallback. if status != 'COMPLETED': return http.HttpResponseRedirect(url) messages.success(request, _('Purchase complete')) return http.HttpResponseRedirect(shared_url('addons.detail', addon)) # TODO(andym): again, remove this once we figure out logged out flow. @csrf_exempt @login_required @addon_view @can_be_purchased @write def purchase_complete(request, addon, status): result = '' if status == 'complete': uuid_ = request.GET.get('uuid') log.debug('Looking up contrib for uuid: %s' % uuid_) # The IPN may, or may not have come through. Which means looking for # a for pre or post IPN contributions. If both fail, then we've not # got a matching contribution. lookup = (Q(uuid=uuid_, type=amo.CONTRIB_PENDING) | Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE)) con = get_object_or_404(Contribution, lookup) log.debug('Check purchase paypal addon: %s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10])) try: result = paypal.check_purchase(con.paykey) if result == 'ERROR': paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'Checking purchase state returned error') raise except: paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail', 'PURCHASEFAIL', 'There was an error checking purchase state') log.error('Check purchase paypal addon: %s, user: %s, paykey: %s' % (addon.pk, request.amo_user.pk, con.paykey[:10]), exc_info=True) result = 'ERROR' status = 'error' log.debug('Paypal returned: %s for paykey: %s' % (result, con.paykey[:10])) if result == 'COMPLETED' and con.type == amo.CONTRIB_PENDING: con.update(type=amo.CONTRIB_PURCHASE) context = {'realurl': request.GET.get('realurl', ''), 'status': status, 'result': result} # For mobile, bounce back to the details page. if request.MOBILE: url = urlparams(shared_url('detail', addon), **context) return http.HttpResponseRedirect(url) context.update({'addon': addon}) response = jingo.render(request, 'addons/paypal_result.html', context) response['x-frame-options'] = 'allow' return response @login_required @addon_view @can_be_purchased @has_purchased def purchase_thanks(request, addon): download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download} if addon.is_webapp(): installed, c = Installed.objects.safer_get_or_create( addon=addon, user=request.amo_user) data['receipt'] = installed.receipt return jingo.render(request, 'addons/paypal_thanks.html', data) @login_required @addon_view @can_be_purchased def purchase_error(request, addon): data = {'addon': addon, 'is_ajax': request.is_ajax()} return jingo.render(request, 'addons/paypal_error.html', data) @addon_view @anonymous_csrf_exempt @post_required def contribute(request, addon): webapp = addon.is_webapp() contrib_type = request.POST.get('type', 'suggested') is_suggested = contrib_type == 'suggested' source = request.POST.get('source', '') comment = request.POST.get('comment', '') amount = { 'suggested': addon.suggested_amount, 'onetime': request.POST.get('onetime-amount', '') }.get(contrib_type, '') if not amount: amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION # This is all going to get shoved into solitude. Temporary. form = ContributionForm({'amount': amount}) if not form.is_valid(): return http.HttpResponse(json.dumps({'error': 'Invalid data.', 'status': '', 'url': '', 'paykey': ''}), content_type='application/json') contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest() if addon.charity: # TODO(andym): Figure out how to get this in the addon authors # locale, rather than the contributors locale. name, paypal_id = (u'%s: %s' % (addon.name, addon.charity.name), addon.charity.paypal) else: name, paypal_id = addon.name, addon.paypal_id # l10n: {0} is the addon name contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name)) preapproval = None if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user: preapproval = request.amo_user.get_preapproval() paykey, error, status = '', '', '' try: paykey, status = paypal.get_paykey( dict(amount=amount, email=paypal_id, ip=request.META.get('REMOTE_ADDR'), memo=contrib_for, pattern='%s.paypal' % ('apps' if webapp else 'addons'), preapproval=preapproval, slug=addon.slug, uuid=contribution_uuid)) except paypal.PaypalError as error: paypal.paypal_log_cef(request, addon, contribution_uuid, 'PayKey Failure', 'PAYKEYFAIL', 'There was an error getting the paykey') log.error('Error getting paykey, contribution for addon: %s' % addon.pk, exc_info=True) if paykey: contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id, amount=amount, source=source, source_locale=request.LANG, annoying=addon.annoying, uuid=str(contribution_uuid), is_suggested=is_suggested, suggested_amount=addon.suggested_amount, comment=comment, paykey=paykey) contrib.save() url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey) if request.GET.get('result_type') == 'json' or request.is_ajax(): # If there was an error getting the paykey, then JSON will # not have a paykey and the JS can cope appropriately. return http.HttpResponse(json.dumps({'url': url, 'paykey': paykey, 'error': str(error), 'status': status}), content_type='application/json') return http.HttpResponseRedirect(url) @csrf_exempt @addon_view def paypal_result(request, addon, status): uuid = request.GET.get('uuid') if not uuid: raise http.Http404() if status == 'cancel': log.info('User cancelled contribution: %s' % uuid) else: log.info('User completed contribution: %s' % uuid) response = jingo.render(request, 'addons/paypal_result.html', {'addon': addon, 'status': status}) response['x-frame-options'] = 'allow' return response @addon_view @can_be_purchased @anonymous_csrf def paypal_start(request, addon=None): download = urlparse(request.GET.get('realurl', '')).path data = {'addon': addon, 'is_ajax': request.is_ajax(), 'download': download, 'currencies': addon.premium.price.currencies()} if request.user.is_authenticated(): return jingo.render(request, 'addons/paypal_start.html', data) from users.views import _login return _login(request, data=data, template='addons/paypal_start.html', dont_redirect=True) @addon_view def share(request, addon): """Add-on sharing""" return share_redirect(request, addon, addon.name, addon.summary) @addon_view def license(request, addon, version=None): if version is not None: qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES) version = get_list_or_404(qs, version=version)[0] else: version = addon.current_version if not (version and version.license): raise http.Http404 return jingo.render(request, 'addons/impala/license.html', dict(addon=addon, version=version)) def license_redirect(request, version): version = get_object_or_404(Version, pk=version) return redirect(version.license_url(), permanent=True) @session_csrf.anonymous_csrf_exempt @addon_view def report_abuse(request, addon): form = AbuseForm(request.POST or None, request=request) if request.method == "POST" and form.is_valid(): send_abuse_report(request, addon, form.cleaned_data['text']) messages.success(request, _('Abuse reported.')) return http.HttpResponseRedirect(addon.get_url_path()) else: return jingo.render(request, 'addons/report_abuse_full.html', {'addon': addon, 'abuse_form': form, }) @cache_control(max_age=60 * 60 * 24) def persona_redirect(request, persona_id): persona = get_object_or_404(Persona, persona_id=persona_id) to = reverse('addons.detail', args=[persona.addon.slug]) return http.HttpResponsePermanentRedirect(to)
[ "abuse.models.send_abuse_report", "amo.utils.randslice", "random.shuffle", "reviews.models.GroupedRating.get", "mobility.decorators.mobile_template", "bandwagon.models.Collection.objects.filter", "django.views.decorators.cache.cache_control", "json.dumps", "users.views._login", "django.http.HttpResponseRedirect", "tower.ugettext", "amo.models.manual_order", "reviews.models.Review.objects.valid", "reviews.forms.ReviewForm", "amo.utils.sorted_groupby", "bandwagon.models.CollectionFeature.objects.all", "amo.helpers.shared_url", "discovery.views.promos", "django.http.Http404", "caching.base.cached", "django.views.decorators.vary.vary_on_headers", "market.forms.PriceCurrencyForm", "waffle.flag_is_active", "amo.urlresolvers.reverse", "django.utils.translation.trans_real.get_language", "django.shortcuts.get_list_or_404", "amo.forms.AbuseForm", "django.http.HttpResponseBadRequest", "sharing.views.share", "django.shortcuts.get_object_or_404", "tower.ugettext_lazy", "operator.attrgetter", "functools.wraps", "paypal.check_purchase", "django.http.HttpResponsePermanentRedirect", "bandwagon.models.Collection.objects.listed", "uuid.uuid4", "django.shortcuts.redirect", "jingo.render", "mobility.decorators.mobilized", "django.db.models.Q", "mkt.webapps.models.Installed.objects.safer_get_or_create", "bandwagon.models.CollectionPromo.objects.filter", "django.utils.translation.trans_real.to_language", "amo.urlresolvers.get_url_prefix", "jinja2.escape", "paypal.paypal_log_cef" ]
[((3944, 3979), 'django.views.decorators.vary.vary_on_headers', 'vary_on_headers', (['"""X-Requested-With"""'], {}), "('X-Requested-With')\n", (3959, 3979), False, 'from django.views.decorators.vary import vary_on_headers\n'), ((6012, 6039), 'mobility.decorators.mobilized', 'mobilized', (['extension_detail'], {}), '(extension_detail)\n', (6021, 6039), False, 'from mobility.decorators import mobilized, mobile_template\n'), ((6343, 6397), 'mobility.decorators.mobile_template', 'mobile_template', (['"""addons/{mobile/}persona_detail.html"""'], {}), "('addons/{mobile/}persona_detail.html')\n", (6358, 6397), False, 'from mobility.decorators import mobilized, mobile_template\n'), ((13435, 13450), 'mobility.decorators.mobilized', 'mobilized', (['home'], {}), '(home)\n', (13444, 13450), False, 'from mobility.decorators import mobilized, mobile_template\n'), ((31068, 31103), 'django.views.decorators.cache.cache_control', 'cache_control', ([], {'max_age': '(60 * 60 * 24)'}), '(max_age=60 * 60 * 24)\n', (31081, 31103), False, 'from django.views.decorators.cache import cache_control\n'), ((2236, 2254), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (2251, 2254), False, 'import functools\n'), ((6089, 6158), 'jingo.render', 'jingo.render', (['request', '"""addons/mobile/details.html"""', "{'addon': addon}"], {}), "(request, 'addons/mobile/details.html', {'addon': addon})\n", (6101, 6158), False, 'import jingo\n'), ((6317, 6339), 'caching.base.cached', 'caching.cached', (['f', 'key'], {}), '(f, key)\n', (6331, 6339), True, 'import caching.base as caching\n'), ((7816, 7853), 'jingo.render', 'jingo.render', (['request', 'template', 'data'], {}), '(request, template, data)\n', (7828, 7853), False, 'import jingo\n'), ((12583, 12684), 'bandwagon.models.Collection.objects.filter', 'Collection.objects.filter', ([], {'listed': '(True)', 'application': 'request.APP.id', 'type': 'amo.COLLECTION_FEATURED'}), '(listed=True, application=request.APP.id, type=amo\n .COLLECTION_FEATURED)\n', (12608, 12684), False, 'from bandwagon.models import Collection, CollectionFeature, CollectionPromo\n'), ((13183, 13365), 'jingo.render', 'jingo.render', (['request', '"""addons/home.html"""', "{'popular': popular, 'featured': featured, 'hotness': hotness, 'personas':\n personas, 'src': 'homepage', 'collections': collections}"], {}), "(request, 'addons/home.html', {'popular': popular, 'featured':\n featured, 'hotness': hotness, 'personas': personas, 'src': 'homepage',\n 'collections': collections})\n", (13195, 13365), False, 'import jingo\n'), ((14320, 14416), 'jingo.render', 'jingo.render', (['request', '"""addons/mobile/home.html"""', "{'featured': featured, 'popular': popular}"], {}), "(request, 'addons/mobile/home.html', {'featured': featured,\n 'popular': popular})\n", (14332, 14416), False, 'import jingo\n'), ((14660, 14702), 'discovery.views.promos', 'promos', (['request', '"""home"""', 'version', 'platform'], {}), "(request, 'home', version, platform)\n", (14666, 14702), False, 'from discovery.views import promos\n'), ((16469, 16548), 'jingo.render', 'jingo.render', (['request', '"""addons/eula.html"""', "{'addon': addon, 'version': version}"], {}), "(request, 'addons/eula.html', {'addon': addon, 'version': version})\n", (16481, 16548), False, 'import jingo\n'), ((16724, 16786), 'jingo.render', 'jingo.render', (['request', '"""addons/privacy.html"""', "{'addon': addon}"], {}), "(request, 'addons/privacy.html', {'addon': addon})\n", (16736, 16786), False, 'import jingo\n'), ((17580, 17744), 'jingo.render', 'jingo.render', (['request', '"""addons/impala/developers.html"""', "{'addon': addon, 'page': page, 'src': src, 'contribution_src':\n contribution_src, 'version': version}"], {}), "(request, 'addons/impala/developers.html', {'addon': addon,\n 'page': page, 'src': src, 'contribution_src': contribution_src,\n 'version': version})\n", (17592, 17744), False, 'import jingo\n'), ((18559, 18608), 'market.forms.PriceCurrencyForm', 'PriceCurrencyForm', ([], {'data': 'request.POST', 'addon': 'addon'}), '(data=request.POST, addon=addon)\n', (18576, 18608), False, 'from market.forms import PriceCurrencyForm\n'), ((24322, 24381), 'jingo.render', 'jingo.render', (['request', '"""addons/paypal_result.html"""', 'context'], {}), "(request, 'addons/paypal_result.html', context)\n", (24334, 24381), False, 'import jingo\n'), ((24891, 24947), 'jingo.render', 'jingo.render', (['request', '"""addons/paypal_thanks.html"""', 'data'], {}), "(request, 'addons/paypal_thanks.html', data)\n", (24903, 24947), False, 'import jingo\n'), ((25101, 25156), 'jingo.render', 'jingo.render', (['request', '"""addons/paypal_error.html"""', 'data'], {}), "(request, 'addons/paypal_error.html', data)\n", (25113, 25156), False, 'import jingo\n'), ((28695, 28725), 'django.http.HttpResponseRedirect', 'http.HttpResponseRedirect', (['url'], {}), '(url)\n', (28720, 28725), False, 'from django import http\n'), ((29047, 29137), 'jingo.render', 'jingo.render', (['request', '"""addons/paypal_result.html"""', "{'addon': addon, 'status': status}"], {}), "(request, 'addons/paypal_result.html', {'addon': addon,\n 'status': status})\n", (29059, 29137), False, 'import jingo\n'), ((29683, 29770), 'users.views._login', '_login', (['request'], {'data': 'data', 'template': '"""addons/paypal_start.html"""', 'dont_redirect': '(True)'}), "(request, data=data, template='addons/paypal_start.html',\n dont_redirect=True)\n", (29689, 29770), False, 'from users.views import _login\n'), ((29862, 29919), 'sharing.views.share', 'share_redirect', (['request', 'addon', 'addon.name', 'addon.summary'], {}), '(request, addon, addon.name, addon.summary)\n', (29876, 29919), True, 'from sharing.views import share as share_redirect\n'), ((30434, 30472), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Version'], {'pk': 'version'}), '(Version, pk=version)\n', (30451, 30472), False, 'from django.shortcuts import get_list_or_404, get_object_or_404, redirect\n'), ((30627, 30675), 'amo.forms.AbuseForm', 'AbuseForm', (['(request.POST or None)'], {'request': 'request'}), '(request.POST or None, request=request)\n', (30636, 30675), False, 'from amo.forms import AbuseForm\n'), ((31161, 31210), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Persona'], {'persona_id': 'persona_id'}), '(Persona, persona_id=persona_id)\n', (31178, 31210), False, 'from django.shortcuts import get_list_or_404, get_object_or_404, redirect\n'), ((31220, 31271), 'amo.urlresolvers.reverse', 'reverse', (['"""addons.detail"""'], {'args': '[persona.addon.slug]'}), "('addons.detail', args=[persona.addon.slug])\n", (31227, 31271), False, 'from amo.urlresolvers import reverse\n'), ((31283, 31321), 'django.http.HttpResponsePermanentRedirect', 'http.HttpResponsePermanentRedirect', (['to'], {}), '(to)\n', (31317, 31321), False, 'from django import http\n'), ((2929, 3015), 'jingo.render', 'jingo.render', (['request', '"""addons/impala/disabled.html"""', "{'addon': addon}"], {'status': '(404)'}), "(request, 'addons/impala/disabled.html', {'addon': addon},\n status=404)\n", (2941, 3015), False, 'import jingo\n'), ((4227, 4256), 'amo.urlresolvers.get_url_prefix', 'urlresolvers.get_url_prefix', ([], {}), '()\n', (4254, 4256), False, 'from amo import urlresolvers\n'), ((4321, 4374), 'django.shortcuts.redirect', 'redirect', (['"""addons.detail"""', 'addon.slug'], {'permanent': '(True)'}), "('addons.detail', addon.slug, permanent=True)\n", (4329, 4374), False, 'from django.shortcuts import get_list_or_404, get_object_or_404, redirect\n'), ((4453, 4479), 'django.utils.translation.trans_real.get_language', 'translation.get_language', ([], {}), '()\n', (4477, 4479), True, 'from django.utils.translation import trans_real as translation\n'), ((5115, 5142), 'reviews.models.GroupedRating.get', 'GroupedRating.get', (['addon.id'], {}), '(addon.id)\n', (5132, 5142), False, 'from reviews.models import Review, GroupedRating\n'), ((5207, 5219), 'reviews.forms.ReviewForm', 'ReviewForm', ([], {}), '()\n', (5217, 5219), False, 'from reviews.forms import ReviewForm\n'), ((5430, 5456), 'amo.forms.AbuseForm', 'AbuseForm', ([], {'request': 'request'}), '(request=request)\n', (5439, 5456), False, 'from amo.forms import AbuseForm\n'), ((5788, 5849), 'jingo.render', 'jingo.render', (['request', '"""addons/impala/details-more.html"""', 'ctx'], {}), "(request, 'addons/impala/details-more.html', ctx)\n", (5800, 5849), False, 'import jingo\n'), ((5952, 6008), 'jingo.render', 'jingo.render', (['request', '"""addons/impala/details.html"""', 'ctx'], {}), "(request, 'addons/impala/details.html', ctx)\n", (5964, 6008), False, 'import jingo\n'), ((6236, 6262), 'amo.utils.randslice', 'randslice', (['qs'], {'limit': 'limit'}), '(qs, limit=limit)\n', (6245, 6262), False, 'from amo.utils import randslice, sorted_groupby, urlparams\n'), ((9885, 9935), 'amo.models.manual_order', 'manual_order', (['self.model.objects', 'ids', '"""addons.id"""'], {}), "(self.model.objects, ids, 'addons.id')\n", (9897, 9935), False, 'from amo.models import manual_order\n'), ((14844, 14875), 'bandwagon.models.CollectionFeature.objects.all', 'CollectionFeature.objects.all', ([], {}), '()\n', (14873, 14875), False, 'from bandwagon.models import Collection, CollectionFeature, CollectionPromo\n'), ((15245, 15292), 'amo.utils.sorted_groupby', 'sorted_groupby', (['promos', '"""collection_feature_id"""'], {}), "(promos, 'collection_feature_id')\n", (15259, 15292), False, 'from amo.utils import randslice, sorted_groupby, urlparams\n'), ((16355, 16407), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['addon.versions'], {'files__id': 'file_id'}), '(addon.versions, files__id=file_id)\n', (16372, 16407), False, 'from django.shortcuts import get_list_or_404, get_object_or_404, redirect\n'), ((16880, 16894), 'django.http.Http404', 'http.Http404', ([], {}), '()\n', (16892, 16894), False, 'from django import http\n'), ((18390, 18415), 'jinja2.escape', 'jinja2.escape', (['addon.name'], {}), '(addon.name)\n', (18403, 18415), False, 'import jinja2\n'), ((18808, 18856), 'waffle.flag_is_active', 'waffle.flag_is_active', (['request', '"""allow-pre-auth"""'], {}), "(request, 'allow-pre-auth')\n", (18829, 18856), False, 'import waffle\n'), ((21936, 21966), 'django.http.HttpResponseRedirect', 'http.HttpResponseRedirect', (['url'], {}), '(url)\n', (21961, 21966), False, 'from django import http\n'), ((21998, 22020), 'tower.ugettext', '_', (['"""Purchase complete"""'], {}), "('Purchase complete')\n", (21999, 22020), True, 'from tower import ugettext as _, ugettext_lazy as _lazy\n'), ((22059, 22093), 'amo.helpers.shared_url', 'shared_url', (['"""addons.detail"""', 'addon'], {}), "('addons.detail', addon)\n", (22069, 22093), False, 'from amo.helpers import shared_url\n'), ((22763, 22802), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Contribution', 'lookup'], {}), '(Contribution, lookup)\n', (22780, 22802), False, 'from django.shortcuts import get_list_or_404, get_object_or_404, redirect\n'), ((24238, 24268), 'django.http.HttpResponseRedirect', 'http.HttpResponseRedirect', (['url'], {}), '(url)\n', (24263, 24268), False, 'from django import http\n'), ((24748, 24821), 'mkt.webapps.models.Installed.objects.safer_get_or_create', 'Installed.objects.safer_get_or_create', ([], {'addon': 'addon', 'user': 'request.amo_user'}), '(addon=addon, user=request.amo_user)\n', (24785, 24821), False, 'from mkt.webapps.models import Installed\n'), ((26587, 26606), 'jinja2.escape', 'jinja2.escape', (['name'], {}), '(name)\n', (26600, 26606), False, 'import jinja2\n'), ((26639, 26687), 'waffle.flag_is_active', 'waffle.flag_is_active', (['request', '"""allow-pre-auth"""'], {}), "(request, 'allow-pre-auth')\n", (26660, 26687), False, 'import waffle\n'), ((28862, 28876), 'django.http.Http404', 'http.Http404', ([], {}), '()\n', (28874, 28876), False, 'from django import http\n'), ((29580, 29635), 'jingo.render', 'jingo.render', (['request', '"""addons/paypal_start.html"""', 'data'], {}), "(request, 'addons/paypal_start.html', data)\n", (29592, 29635), False, 'import jingo\n'), ((30737, 30797), 'abuse.models.send_abuse_report', 'send_abuse_report', (['request', 'addon', "form.cleaned_data['text']"], {}), "(request, addon, form.cleaned_data['text'])\n", (30754, 30797), False, 'from abuse.models import send_abuse_report\n'), ((30942, 31038), 'jingo.render', 'jingo.render', (['request', '"""addons/report_abuse_full.html"""', "{'addon': addon, 'abuse_form': form}"], {}), "(request, 'addons/report_abuse_full.html', {'addon': addon,\n 'abuse_form': form})\n", (30954, 31038), False, 'import jingo\n'), ((3754, 3783), 'amo.urlresolvers.get_url_prefix', 'urlresolvers.get_url_prefix', ([], {}), '()\n', (3781, 3783), False, 'from amo import urlresolvers\n'), ((4793, 4820), 'bandwagon.models.Collection.objects.listed', 'Collection.objects.listed', ([], {}), '()\n', (4818, 4820), False, 'from bandwagon.models import Collection, CollectionFeature, CollectionPromo\n'), ((12098, 12116), 'tower.ugettext_lazy', '_lazy', (['u"""Featured"""'], {}), "(u'Featured')\n", (12103, 12116), True, 'from tower import ugettext as _, ugettext_lazy as _lazy\n'), ((12143, 12160), 'tower.ugettext_lazy', '_lazy', (['u"""Popular"""'], {}), "(u'Popular')\n", (12148, 12160), True, 'from tower import ugettext as _, ugettext_lazy as _lazy\n'), ((12183, 12207), 'tower.ugettext_lazy', '_lazy', (['u"""Recently Added"""'], {}), "(u'Recently Added')\n", (12188, 12207), True, 'from tower import ugettext as _, ugettext_lazy as _lazy\n'), ((12234, 12260), 'tower.ugettext_lazy', '_lazy', (['u"""Recently Updated"""'], {}), "(u'Recently Updated')\n", (12239, 12260), True, 'from tower import ugettext as _, ugettext_lazy as _lazy\n'), ((13532, 13550), 'random.shuffle', 'random.shuffle', (['xs'], {}), '(xs)\n', (13546, 13550), False, 'import random\n'), ((14260, 14293), 'operator.attrgetter', 'attrgetter', (['"""average_daily_users"""'], {}), "('average_daily_users')\n", (14270, 14293), False, 'from operator import attrgetter\n'), ((14978, 15004), 'django.utils.translation.trans_real.get_language', 'translation.get_language', ([], {}), '()\n', (15002, 15004), True, 'from django.utils.translation import trans_real as translation\n'), ((15023, 15035), 'django.db.models.Q', 'Q', ([], {'locale': '""""""'}), "(locale='')\n", (15024, 15035), False, 'from django.db.models import Q\n'), ((15038, 15052), 'django.db.models.Q', 'Q', ([], {'locale': 'lang'}), '(locale=lang)\n', (15039, 15052), False, 'from django.db.models import Q\n'), ((17019, 17070), 'django.shortcuts.get_list_or_404', 'get_list_or_404', (['qs'], {'version': "request.GET['version']"}), "(qs, version=request.GET['version'])\n", (17034, 17070), False, 'from django.shortcuts import get_list_or_404, get_object_or_404, redirect\n'), ((18361, 18382), 'tower.ugettext', '_', (['u"""Purchase of {0}"""'], {}), "(u'Purchase of {0}')\n", (18362, 18382), True, 'from tower import ugettext as _, ugettext_lazy as _lazy\n'), ((19673, 19794), 'paypal.paypal_log_cef', 'paypal.paypal_log_cef', (['request', 'addon', 'uuid_', '"""PayKey Failure"""', '"""PAYKEYFAIL"""', '"""There was an error getting the paykey"""'], {}), "(request, addon, uuid_, 'PayKey Failure', 'PAYKEYFAIL',\n 'There was an error getting the paykey')\n", (19694, 19794), False, 'import paypal\n'), ((20508, 20619), 'paypal.paypal_log_cef', 'paypal.paypal_log_cef', (['request', 'addon', 'uuid_', '"""Purchase"""', '"""PURCHASE"""', '"""A user purchased using pre-approval"""'], {}), "(request, addon, uuid_, 'Purchase', 'PURCHASE',\n 'A user purchased using pre-approval')\n", (20529, 20619), False, 'import paypal\n'), ((22637, 22676), 'django.db.models.Q', 'Q', ([], {'uuid': 'uuid_', 'type': 'amo.CONTRIB_PENDING'}), '(uuid=uuid_, type=amo.CONTRIB_PENDING)\n', (22638, 22676), False, 'from django.db.models import Q\n'), ((22697, 22747), 'django.db.models.Q', 'Q', ([], {'transaction_id': 'uuid_', 'type': 'amo.CONTRIB_PURCHASE'}), '(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE)\n', (22698, 22747), False, 'from django.db.models import Q\n'), ((22982, 23015), 'paypal.check_purchase', 'paypal.check_purchase', (['con.paykey'], {}), '(con.paykey)\n', (23003, 23015), False, 'import paypal\n'), ((24183, 24210), 'amo.helpers.shared_url', 'shared_url', (['"""detail"""', 'addon'], {}), "('detail', addon)\n", (24193, 24210), False, 'from amo.helpers import shared_url\n'), ((25863, 25940), 'json.dumps', 'json.dumps', (["{'error': 'Invalid data.', 'status': '', 'url': '', 'paykey': ''}"], {}), "({'error': 'Invalid data.', 'status': '', 'url': '', 'paykey': ''})\n", (25873, 25940), False, 'import json\n'), ((26553, 26579), 'tower.ugettext', '_', (['u"""Contribution for {0}"""'], {}), "(u'Contribution for {0}')\n", (26554, 26579), True, 'from tower import ugettext as _, ugettext_lazy as _lazy\n'), ((27253, 27386), 'paypal.paypal_log_cef', 'paypal.paypal_log_cef', (['request', 'addon', 'contribution_uuid', '"""PayKey Failure"""', '"""PAYKEYFAIL"""', '"""There was an error getting the paykey"""'], {}), "(request, addon, contribution_uuid, 'PayKey Failure',\n 'PAYKEYFAIL', 'There was an error getting the paykey')\n", (27274, 27386), False, 'import paypal\n'), ((30096, 30132), 'django.shortcuts.get_list_or_404', 'get_list_or_404', (['qs'], {'version': 'version'}), '(qs, version=version)\n', (30111, 30132), False, 'from django.shortcuts import get_list_or_404, get_object_or_404, redirect\n'), ((30832, 30852), 'tower.ugettext', '_', (['"""Abuse reported."""'], {}), "('Abuse reported.')\n", (30833, 30852), True, 'from tower import ugettext as _, ugettext_lazy as _lazy\n'), ((2558, 2600), 'amo.urlresolvers.reverse', 'reverse', (['"""addons.detail"""'], {'args': '[target_id]'}), "('addons.detail', args=[target_id])\n", (2565, 2600), False, 'from amo.urlresolvers import reverse\n'), ((2665, 2714), 'django.http.HttpResponseBadRequest', 'http.HttpResponseBadRequest', (['"""Invalid add-on ID."""'], {}), "('Invalid add-on ID.')\n", (2692, 2714), False, 'from django import http\n'), ((3879, 3922), 'amo.urlresolvers.reverse', 'reverse', (['"""addons.detail"""'], {'args': '[addon.slug]'}), "('addons.detail', args=[addon.slug])\n", (3886, 3922), False, 'from amo.urlresolvers import reverse\n'), ((5240, 5262), 'reviews.models.Review.objects.valid', 'Review.objects.valid', ([], {}), '()\n', (5260, 5262), False, 'from reviews.models import Review, GroupedRating\n'), ((7504, 7516), 'reviews.forms.ReviewForm', 'ReviewForm', ([], {}), '()\n', (7514, 7516), False, 'from reviews.forms import ReviewForm\n'), ((7765, 7791), 'amo.forms.AbuseForm', 'AbuseForm', ([], {'request': 'request'}), '(request=request)\n', (7774, 7791), False, 'from amo.forms import AbuseForm\n'), ((15508, 15545), 'django.utils.translation.trans_real.to_language', 'translation.to_language', (['promo.locale'], {}), '(promo.locale)\n', (15531, 15545), True, 'from django.utils.translation import trans_real as translation\n'), ((20766, 20795), 'paypal.check_purchase', 'paypal.check_purchase', (['paykey'], {}), '(paykey)\n', (20787, 20795), False, 'import paypal\n'), ((23066, 23189), 'paypal.paypal_log_cef', 'paypal.paypal_log_cef', (['request', 'addon', 'uuid_', '"""Purchase Fail"""', '"""PURCHASEFAIL"""', '"""Checking purchase state returned error"""'], {}), "(request, addon, uuid_, 'Purchase Fail',\n 'PURCHASEFAIL', 'Checking purchase state returned error')\n", (23087, 23189), False, 'import paypal\n'), ((23312, 23439), 'paypal.paypal_log_cef', 'paypal.paypal_log_cef', (['request', 'addon', 'uuid_', '"""Purchase Fail"""', '"""PURCHASEFAIL"""', '"""There was an error checking purchase state"""'], {}), "(request, addon, uuid_, 'Purchase Fail',\n 'PURCHASEFAIL', 'There was an error checking purchase state')\n", (23333, 23439), False, 'import paypal\n'), ((18282, 18294), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (18292, 18294), False, 'import uuid\n'), ((26139, 26151), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (26149, 26151), False, 'import uuid\n'), ((7541, 7563), 'reviews.models.Review.objects.valid', 'Review.objects.valid', ([], {}), '()\n', (7561, 7563), False, 'from reviews.models import Review, GroupedRating\n'), ((15071, 15109), 'bandwagon.models.CollectionPromo.objects.filter', 'CollectionPromo.objects.filter', (['locale'], {}), '(locale)\n', (15101, 15109), False, 'from bandwagon.models import Collection, CollectionFeature, CollectionPromo\n')]
# Copyright 2022 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import json import os import random import numpy as np import pandas as pd from preprocess_dicom import dicom_preprocess from sklearn.model_selection import GroupKFold # density labels # 1 - fatty # 2 - scattered fibroglandular density # 3 - heterogeneously dense # 4 - extremely dense def preprocess(dicom_root, out_path, ids, images, densities, process_image=True): data_list = [] dc_tags = [] saved_filenames = [] assert len(ids) == len(images) == len(densities) for i, (id, image, density) in enumerate(zip(ids, images, densities)): if (i + 1) % 200 == 0: print(f"processing {i+1} of {len(ids)}...") dir_name = image.split(os.path.sep)[0] img_file = glob.glob( os.path.join(dicom_root, dir_name, "**", "*.dcm"), recursive=True ) assert len(img_file) == 1, f"No unique dicom image found for {dir_name}!" save_prefix = os.path.join(out_path, dir_name) if process_image: _success, _dc_tags = dicom_preprocess(img_file[0], save_prefix) else: if os.path.isfile(save_prefix + ".npy"): _success = True else: _success = False _dc_tags = [] if _success and density >= 1: # label can be 0 sometimes, excluding those cases dc_tags.append(_dc_tags) data_list.append( { "patient_id": id, "image": dir_name + ".npy", "label": int(density - 1), } ) saved_filenames.append(dir_name + ".npy") return data_list, dc_tags, saved_filenames def write_datalist(save_datalist_file, data_set): os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True) with open(save_datalist_file, "w") as f: json.dump(data_set, f, indent=4) print(f"Data list saved at {save_datalist_file}") def get_indices(all_ids, search_ids): indices = [] for _id in search_ids: _indices = np.where(all_ids == _id) indices.extend(_indices[0].tolist()) return indices def main(): process_image = True # set False if dicoms have already been preprocessed out_path = "./data/preprocessed" # YOUR DEST FOLDER SHOULD BE WRITTEN HERE out_dataset_prefix = "./data/dataset" # Input folders label_root = "/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/" dicom_root = "/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM" n_clients = 3 """ Run preprocessing """ """ 1. Load the label data """ random.seed(0) label_files = [ os.path.join(label_root, "mass_case_description_train_set.csv"), os.path.join(label_root, "calc_case_description_train_set.csv"), os.path.join(label_root, "mass_case_description_test_set.csv"), os.path.join(label_root, "calc_case_description_test_set.csv"), ] breast_densities = [] patients_ids = [] image_file_path = [] # read annotations for label_file in label_files: print(f"add {label_file}") label_data = pd.read_csv(label_file) unique_images, unique_indices = np.unique( label_data["image file path"], return_index=True ) print( f"including {len(unique_images)} unique images of {len(label_data['image file path'])} image entries" ) try: breast_densities.extend(label_data["breast_density"][unique_indices]) except BaseException: breast_densities.extend(label_data["breast density"][unique_indices]) patients_ids.extend(label_data["patient_id"][unique_indices]) image_file_path.extend(label_data["image file path"][unique_indices]) assert len(breast_densities) == len(patients_ids) == len(image_file_path), ( f"Mismatch between label data, breast_densities: " f"{len(breast_densities)}, patients_ids: {len(patients_ids)}, image_file_path: {len(image_file_path)}" ) print(f"Read {len(image_file_path)} data entries.") """ 2. Split the data """ # shuffle data label_data = list(zip(breast_densities, patients_ids, image_file_path)) random.shuffle(label_data) breast_densities, patients_ids, image_file_path = zip(*label_data) # Split data breast_densities = np.array(breast_densities) patients_ids = np.array(patients_ids) image_file_path = np.array(image_file_path) unique_patient_ids = np.unique(patients_ids) n_patients = len(unique_patient_ids) print(f"Found {n_patients} patients.") # generate splits using roughly the same ratios as for challenge data: n_train_challenge = 60_000 n_val_challenge = 6_500 n_test_challenge = 40_000 test_ratio = n_test_challenge / ( n_train_challenge + n_val_challenge + n_test_challenge ) val_ratio = n_val_challenge / ( n_val_challenge + n_test_challenge ) # test cases will be removed at this point # use groups to avoid patient overlaps # test split n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * test_ratio))) print( f"Splitting into {n_splits} folds for test split. (Only the first fold is used.)" ) group_kfold = GroupKFold(n_splits=n_splits) for train_val_index, test_index in group_kfold.split( image_file_path, breast_densities, groups=patients_ids ): break # just use first fold test_images = image_file_path[test_index] test_patients_ids = patients_ids[test_index] test_densities = breast_densities[test_index] # train/val splits train_val_images = image_file_path[train_val_index] train_val_patients_ids = patients_ids[train_val_index] train_val_densities = breast_densities[train_val_index] n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * val_ratio))) print( f"Splitting into {n_splits} folds for train/val splits. (Only the first fold is used.)" ) group_kfold = GroupKFold(n_splits=n_splits) for train_index, val_index in group_kfold.split( train_val_images, train_val_densities, groups=train_val_patients_ids ): break # just use first fold train_images = train_val_images[train_index] train_patients_ids = train_val_patients_ids[train_index] train_densities = train_val_densities[train_index] val_images = train_val_images[val_index] val_patients_ids = train_val_patients_ids[val_index] val_densities = train_val_densities[val_index] # check that there is no patient overlap assert ( len(np.intersect1d(train_patients_ids, val_patients_ids)) == 0 ), "Overlapping patients in train and validation!" assert ( len(np.intersect1d(train_patients_ids, test_patients_ids)) == 0 ), "Overlapping patients in train and test!" assert ( len(np.intersect1d(val_patients_ids, test_patients_ids)) == 0 ), "Overlapping patients in validation and test!" n_total = len(train_images) + len(val_images) + len(test_images) print(20 * "-") print(f"Train : {len(train_images)} ({100*len(train_images)/n_total:.2f}%)") print(f"Val : {len(val_images)} ({100*len(val_images)/n_total:.2f}%)") print(f"Test : {len(test_images)} ({100*len(test_images)/n_total:.2f}%)") print(20 * "-") print(f"Total : {n_total}") assert n_total == len(image_file_path), ( f"mismatch between total split images ({n_total})" f" and length of all images {len(image_file_path)}!" ) """ split train/validation dataset for n_clients """ # Split and avoid patient overlap unique_train_patients_ids = np.unique(train_patients_ids) split_train_patients_ids = np.array_split(unique_train_patients_ids, n_clients) unique_val_patients_ids = np.unique(val_patients_ids) split_val_patients_ids = np.array_split(unique_val_patients_ids, n_clients) unique_test_patients_ids = np.unique(test_patients_ids) split_test_patients_ids = np.array_split(unique_test_patients_ids, n_clients) """ 3. Preprocess the images """ dc_tags = [] saved_filenames = [] for c in range(n_clients): site_name = f"site-{c+1}" print(f"Preprocessing training set of client {site_name}") _curr_patient_ids = split_train_patients_ids[c] _curr_indices = get_indices(train_patients_ids, _curr_patient_ids) train_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, train_patients_ids[_curr_indices], train_images[_curr_indices], train_densities[_curr_indices], process_image=process_image, ) print( f"Converted {len(train_list)} of {len(train_patients_ids)} training images" ) dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print("Preprocessing validation") _curr_patient_ids = split_val_patients_ids[c] _curr_indices = get_indices(val_patients_ids, _curr_patient_ids) val_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, val_patients_ids[_curr_indices], val_images[_curr_indices], val_densities[_curr_indices], process_image=process_image, ) print(f"Converted {len(val_list)} of {len(val_patients_ids)} validation images") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) print("Preprocessing testing") _curr_patient_ids = split_test_patients_ids[c] _curr_indices = get_indices(test_patients_ids, _curr_patient_ids) test_list, _dc_tags, _saved_filenames = preprocess( dicom_root, out_path, test_patients_ids[_curr_indices], test_images[_curr_indices], test_densities[_curr_indices], process_image=process_image, ) print(f"Converted {len(test_list)} of {len(test_patients_ids)} testing images") dc_tags.extend(_dc_tags) saved_filenames.extend(_saved_filenames) data_set = { "train": train_list, # will stay the same for both phases "test1": val_list, # like phase 1 leaderboard "test2": test_list, # like phase 2 - final leaderboard } write_datalist(f"{out_dataset_prefix}_{site_name}.json", data_set) print(50 * "=") print( f"Successfully converted a total {len(saved_filenames)} of {len(image_file_path)} images." ) # check that there were no duplicated files assert len(saved_filenames) == len( np.unique(saved_filenames) ), f"Not all generated files ({len(saved_filenames)}) are unique ({len(np.unique(saved_filenames))})!" print(f"Data lists saved wit prefix {out_dataset_prefix}") print(50 * "=") print("Processed unique DICOM tags", np.unique(dc_tags)) if __name__ == "__main__": main()
[ "json.dump", "pandas.read_csv", "random.shuffle", "os.path.dirname", "os.path.isfile", "numpy.where", "numpy.array", "random.seed", "sklearn.model_selection.GroupKFold", "preprocess_dicom.dicom_preprocess", "numpy.array_split", "numpy.intersect1d", "os.path.join", "numpy.unique" ]
[((3213, 3227), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (3224, 3227), False, 'import random\n'), ((4820, 4846), 'random.shuffle', 'random.shuffle', (['label_data'], {}), '(label_data)\n', (4834, 4846), False, 'import random\n'), ((4959, 4985), 'numpy.array', 'np.array', (['breast_densities'], {}), '(breast_densities)\n', (4967, 4985), True, 'import numpy as np\n'), ((5005, 5027), 'numpy.array', 'np.array', (['patients_ids'], {}), '(patients_ids)\n', (5013, 5027), True, 'import numpy as np\n'), ((5050, 5075), 'numpy.array', 'np.array', (['image_file_path'], {}), '(image_file_path)\n', (5058, 5075), True, 'import numpy as np\n'), ((5102, 5125), 'numpy.unique', 'np.unique', (['patients_ids'], {}), '(patients_ids)\n', (5111, 5125), True, 'import numpy as np\n'), ((5885, 5914), 'sklearn.model_selection.GroupKFold', 'GroupKFold', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (5895, 5914), False, 'from sklearn.model_selection import GroupKFold\n'), ((6643, 6672), 'sklearn.model_selection.GroupKFold', 'GroupKFold', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (6653, 6672), False, 'from sklearn.model_selection import GroupKFold\n'), ((8305, 8334), 'numpy.unique', 'np.unique', (['train_patients_ids'], {}), '(train_patients_ids)\n', (8314, 8334), True, 'import numpy as np\n'), ((8366, 8418), 'numpy.array_split', 'np.array_split', (['unique_train_patients_ids', 'n_clients'], {}), '(unique_train_patients_ids, n_clients)\n', (8380, 8418), True, 'import numpy as np\n'), ((8450, 8477), 'numpy.unique', 'np.unique', (['val_patients_ids'], {}), '(val_patients_ids)\n', (8459, 8477), True, 'import numpy as np\n'), ((8507, 8557), 'numpy.array_split', 'np.array_split', (['unique_val_patients_ids', 'n_clients'], {}), '(unique_val_patients_ids, n_clients)\n', (8521, 8557), True, 'import numpy as np\n'), ((8590, 8618), 'numpy.unique', 'np.unique', (['test_patients_ids'], {}), '(test_patients_ids)\n', (8599, 8618), True, 'import numpy as np\n'), ((8649, 8700), 'numpy.array_split', 'np.array_split', (['unique_test_patients_ids', 'n_clients'], {}), '(unique_test_patients_ids, n_clients)\n', (8663, 8700), True, 'import numpy as np\n'), ((1504, 1536), 'os.path.join', 'os.path.join', (['out_path', 'dir_name'], {}), '(out_path, dir_name)\n', (1516, 1536), False, 'import os\n'), ((2323, 2358), 'os.path.dirname', 'os.path.dirname', (['save_datalist_file'], {}), '(save_datalist_file)\n', (2338, 2358), False, 'import os\n'), ((2428, 2460), 'json.dump', 'json.dump', (['data_set', 'f'], {'indent': '(4)'}), '(data_set, f, indent=4)\n', (2437, 2460), False, 'import json\n'), ((2618, 2642), 'numpy.where', 'np.where', (['(all_ids == _id)'], {}), '(all_ids == _id)\n', (2626, 2642), True, 'import numpy as np\n'), ((3257, 3320), 'os.path.join', 'os.path.join', (['label_root', '"""mass_case_description_train_set.csv"""'], {}), "(label_root, 'mass_case_description_train_set.csv')\n", (3269, 3320), False, 'import os\n'), ((3330, 3393), 'os.path.join', 'os.path.join', (['label_root', '"""calc_case_description_train_set.csv"""'], {}), "(label_root, 'calc_case_description_train_set.csv')\n", (3342, 3393), False, 'import os\n'), ((3403, 3465), 'os.path.join', 'os.path.join', (['label_root', '"""mass_case_description_test_set.csv"""'], {}), "(label_root, 'mass_case_description_test_set.csv')\n", (3415, 3465), False, 'import os\n'), ((3475, 3537), 'os.path.join', 'os.path.join', (['label_root', '"""calc_case_description_test_set.csv"""'], {}), "(label_root, 'calc_case_description_test_set.csv')\n", (3487, 3537), False, 'import os\n'), ((3734, 3757), 'pandas.read_csv', 'pd.read_csv', (['label_file'], {}), '(label_file)\n', (3745, 3757), True, 'import pandas as pd\n'), ((3798, 3857), 'numpy.unique', 'np.unique', (["label_data['image file path']"], {'return_index': '(True)'}), "(label_data['image file path'], return_index=True)\n", (3807, 3857), True, 'import numpy as np\n'), ((11575, 11593), 'numpy.unique', 'np.unique', (['dc_tags'], {}), '(dc_tags)\n', (11584, 11593), True, 'import numpy as np\n'), ((1324, 1373), 'os.path.join', 'os.path.join', (['dicom_root', 'dir_name', '"""**"""', '"""*.dcm"""'], {}), "(dicom_root, dir_name, '**', '*.dcm')\n", (1336, 1373), False, 'import os\n'), ((1596, 1638), 'preprocess_dicom.dicom_preprocess', 'dicom_preprocess', (['img_file[0]', 'save_prefix'], {}), '(img_file[0], save_prefix)\n', (1612, 1638), False, 'from preprocess_dicom import dicom_preprocess\n'), ((1668, 1704), 'os.path.isfile', 'os.path.isfile', (["(save_prefix + '.npy')"], {}), "(save_prefix + '.npy')\n", (1682, 1704), False, 'import os\n'), ((7238, 7290), 'numpy.intersect1d', 'np.intersect1d', (['train_patients_ids', 'val_patients_ids'], {}), '(train_patients_ids, val_patients_ids)\n', (7252, 7290), True, 'import numpy as np\n'), ((7377, 7430), 'numpy.intersect1d', 'np.intersect1d', (['train_patients_ids', 'test_patients_ids'], {}), '(train_patients_ids, test_patients_ids)\n', (7391, 7430), True, 'import numpy as np\n'), ((7511, 7562), 'numpy.intersect1d', 'np.intersect1d', (['val_patients_ids', 'test_patients_ids'], {}), '(val_patients_ids, test_patients_ids)\n', (7525, 7562), True, 'import numpy as np\n'), ((11316, 11342), 'numpy.unique', 'np.unique', (['saved_filenames'], {}), '(saved_filenames)\n', (11325, 11342), True, 'import numpy as np\n'), ((11418, 11444), 'numpy.unique', 'np.unique', (['saved_filenames'], {}), '(saved_filenames)\n', (11427, 11444), True, 'import numpy as np\n')]
"""Helpers to check core security.""" from datetime import timedelta from typing import List, Optional from ...const import CoreState from ...jobs.const import JobCondition, JobExecutionLimit from ...jobs.decorator import Job from ..const import ContextType, IssueType from .base import CheckBase class CheckAddonPwned(CheckBase): """CheckAddonPwned class for check.""" @Job( conditions=[JobCondition.INTERNET_SYSTEM], limit=JobExecutionLimit.THROTTLE, throttle_period=timedelta(hours=24), ) async def run_check(self) -> None: """Run check if not affected by issue.""" @Job(conditions=[JobCondition.INTERNET_SYSTEM]) async def approve_check(self, reference: Optional[str] = None) -> bool: """Approve check if it is affected by issue.""" return False @property def issue(self) -> IssueType: """Return a IssueType enum.""" return IssueType.PWNED @property def context(self) -> ContextType: """Return a ContextType enum.""" return ContextType.ADDON @property def states(self) -> List[CoreState]: """Return a list of valid states when this check can run.""" return [CoreState.RUNNING]
[ "datetime.timedelta" ]
[((505, 524), 'datetime.timedelta', 'timedelta', ([], {'hours': '(24)'}), '(hours=24)\n', (514, 524), False, 'from datetime import timedelta\n')]
from rest_framework import viewsets, mixins from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from core.models import Tag, Ingredient from recipe import serializers # tag and ingredients are attributes of a recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): """Base ViewSet for user-owned recipe attributes""" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): """return objects for user only""" return self.queryset.filter(user=self.request.user).order_by("-name") def perform_create(self, serializer): """Create a new object""" return serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): """Manage tags in the database""" queryset = Tag.objects.all() serializer_class = serializers.TagSerializer class IngredientViewSet(BaseRecipeAttrViewSet): """Manage Ingredients in Database""" queryset = Ingredient.objects.all() serializer_class = serializers.IngredientSerializer
[ "core.models.Ingredient.objects.all", "core.models.Tag.objects.all" ]
[((974, 991), 'core.models.Tag.objects.all', 'Tag.objects.all', ([], {}), '()\n', (989, 991), False, 'from core.models import Tag, Ingredient\n'), ((1147, 1171), 'core.models.Ingredient.objects.all', 'Ingredient.objects.all', ([], {}), '()\n', (1169, 1171), False, 'from core.models import Tag, Ingredient\n')]
#!/usr/bin/env python3 import asyncio import sys import os class EchoServer(asyncio.Protocol): clients = {} def connection_made(self, transport): peername = transport.get_extra_info('peername') print('connection from {}'.format(peername)) self.transport = transport self.clients[transport] = None def data_received(self, data): # print('data received: {}'.format(data.decode())) for transport in self.clients: if transport == self.transport: pass #continue transport.write(data) def connection_lost(self, exc): print("connection lost") self.transport.close() del self.clients[self.transport] server_address = sys.argv[1] try: os.unlink(server_address) except OSError: if os.path.exists(server_address): raise loop = asyncio.get_event_loop() #coro = loop.create_server(EchoServer, "127.0.0.1", 8888) coro = loop.create_unix_server(EchoServer, server_address) server = loop.run_until_complete(coro) print('serving on {}'.format(server_address)) try: loop.run_forever() except KeyboardInterrupt: print("exit") finally: server.close() loop.close()
[ "asyncio.get_event_loop", "os.unlink", "os.path.exists" ]
[((882, 906), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (904, 906), False, 'import asyncio\n'), ((779, 804), 'os.unlink', 'os.unlink', (['server_address'], {}), '(server_address)\n', (788, 804), False, 'import os\n'), ((828, 858), 'os.path.exists', 'os.path.exists', (['server_address'], {}), '(server_address)\n', (842, 858), False, 'import os\n')]
# Import required packages import sqlite3 import operations import os import time # Establish connection with database connection = sqlite3.connect("books.db") # Instantiate cursor cursor = connection.cursor() # Create the books table cursor.execute(""" CREATE TABLE IF NOT EXISTS books ( BookId INTEGER PRIMARY KEY, Title TEXT NOT NULL, Price DECIMAL(5, 2), Stock INTEGER ) """) if __name__ == "__main__": # Print welcome screen operations.clear_screen() print("Hello \U0001f600\n") time.sleep(2) begin_page = None # Ask user for valid page number while begin_page not in range(1, 51): try: print("At what page would you like to start scraping?\n\n" "Please type a number between 1 and 50.\n\n" "The higher the number, the faster the program will be done.\n" ) begin_page = int(input("Number > ")) if begin_page not in range(1, 51): raise Exception() except: operations.clear_screen() # Start scraping operations.clear_screen() print("Let the scraping begin! \U0001f600") time.sleep(2) book_urls = operations.scrape_book_urls(begin=begin_page) books = operations.scrape_books(book_urls) operations.write_to_csv_file(books) try: current_book = 1 for book in books: query = """ INSERT INTO [dbo].[Books] (title, price, stock) VALUES (?, ?, ?)""" cursor.execute(query, (book["title"], book["price"], book["stock"])) print(f"Saving book {current_book}/{len(books)} to the database") current_book += 1 time.sleep(0.1) except: pass # Commit data to the database connection.commit() operations.clear_screen() print("Completed!\nNow type 'open books.csv' \U0001f600")
[ "operations.clear_screen", "operations.write_to_csv_file", "operations.scrape_books", "time.sleep", "sqlite3.connect", "operations.scrape_book_urls" ]
[((133, 160), 'sqlite3.connect', 'sqlite3.connect', (['"""books.db"""'], {}), "('books.db')\n", (148, 160), False, 'import sqlite3\n'), ((482, 507), 'operations.clear_screen', 'operations.clear_screen', ([], {}), '()\n', (505, 507), False, 'import operations\n'), ((544, 557), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (554, 557), False, 'import time\n'), ((1111, 1136), 'operations.clear_screen', 'operations.clear_screen', ([], {}), '()\n', (1134, 1136), False, 'import operations\n'), ((1189, 1202), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1199, 1202), False, 'import time\n'), ((1219, 1264), 'operations.scrape_book_urls', 'operations.scrape_book_urls', ([], {'begin': 'begin_page'}), '(begin=begin_page)\n', (1246, 1264), False, 'import operations\n'), ((1277, 1311), 'operations.scrape_books', 'operations.scrape_books', (['book_urls'], {}), '(book_urls)\n', (1300, 1311), False, 'import operations\n'), ((1316, 1351), 'operations.write_to_csv_file', 'operations.write_to_csv_file', (['books'], {}), '(books)\n', (1344, 1351), False, 'import operations\n'), ((1843, 1868), 'operations.clear_screen', 'operations.clear_screen', ([], {}), '()\n', (1866, 1868), False, 'import operations\n'), ((1739, 1754), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1749, 1754), False, 'import time\n'), ((1060, 1085), 'operations.clear_screen', 'operations.clear_screen', ([], {}), '()\n', (1083, 1085), False, 'import operations\n')]
from .contribution import Contribution import numpy as np from taurex.cache import OpacityCache class AbsorptionContribution(Contribution): """ Computes the contribution to the optical depth occuring from molecular absorption. """ def __init__(self): super().__init__('Absorption') self._opacity_cache = OpacityCache() def prepare_each(self, model, wngrid): """ Prepares each molecular opacity by weighting them by their mixing ratio in the atmosphere Parameters ---------- model: :class:`~taurex.model.model.ForwardModel` Forward model wngrid: :obj:`array` Wavenumber grid Yields ------ component: :obj:`tuple` of type (str, :obj:`array`) Name of molecule and weighted opacity """ self.debug('Preparing model with %s', wngrid.shape) self._ngrid = wngrid.shape[0] sigma_xsec = np.zeros(shape=(model.nLayers, wngrid.shape[0])) # Get the opacity cache self._opacity_cache = OpacityCache() # Loop through all active gases for gas in model.chemistry.activeGases: # Clear sigma array sigma_xsec[...] = 0.0 # Get the mix ratio of the gas gas_mix = model.chemistry.get_gas_mix_profile(gas) self.info('Recomputing active gas %s opacity', gas) # Get the cross section object relating to the gas xsec = self._opacity_cache[gas] # Loop through the layers for idx_layer, tp in enumerate(zip(model.temperatureProfile, model.pressureProfile)): self.debug('Got index,tp %s %s', idx_layer, tp) temperature, pressure = tp # Place into the array sigma_xsec[idx_layer] += \ xsec.opacity(temperature, pressure, wngrid)*gas_mix[idx_layer] # Temporarily assign to master cross-section self.sigma_xsec = sigma_xsec yield gas, sigma_xsec @property def sigma(self): """ Returns the fused weighted cross-section of all active gases """ return self.sigma_xsec
[ "numpy.zeros", "taurex.cache.OpacityCache" ]
[((344, 358), 'taurex.cache.OpacityCache', 'OpacityCache', ([], {}), '()\n', (356, 358), False, 'from taurex.cache import OpacityCache\n'), ((975, 1023), 'numpy.zeros', 'np.zeros', ([], {'shape': '(model.nLayers, wngrid.shape[0])'}), '(shape=(model.nLayers, wngrid.shape[0]))\n', (983, 1023), True, 'import numpy as np\n'), ((1087, 1101), 'taurex.cache.OpacityCache', 'OpacityCache', ([], {}), '()\n', (1099, 1101), False, 'from taurex.cache import OpacityCache\n')]
import sys sys.path.append('../') import matplotlib; matplotlib.use('macosx') import time import numpy as np import matplotlib.pyplot as plt import dolfin as dl; dl.set_log_level(40) # ROMML imports from fom.forward_solve import Fin from fom.thermal_fin import get_space from rom.averaged_affine_ROM import AffineROMFin from deep_learning.dl_model import load_parametric_model_avg, load_bn_model from gaussian_field import make_cov_chol # Tensorflow related imports from tensorflow.keras.optimizers import Adam class SolverWrapper: def __init__(self, solver, data): self.solver = solver self.data = data self.z = dl.Function(V) def cost_function(self, z_v): self.z.vector().set_local(z_v) w, y, A, B, C = self.solver.forward(self.z) y = self.solver.qoi_operator(w) reg_cost = dl.assemble(self.solver.reg) cost = 0.5 * np.linalg.norm(y - self.data)**2 # cost = cost + reg_cost return cost def gradient(self, z_v): self.z.vector().set_local(z_v) grad = self.solver.gradient(self.z, self.data) reg_grad = dl.assemble(self.solver.grad_reg)[:] # grad = grad + reg_grad return grad class ROMMLSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r = solver_r self.z = dl.Function(V) self.solver = solver self.data = self.solver_r.data self.cost = None self.grad = None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) e_NN = self.err_model.predict([[z_v]])[0] self.solver._k.assign(self.z) y_romml = y_r + e_NN # self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_romml(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad class RSolverWrapper: def __init__(self, err_model, solver_r, solver): self.err_model = err_model self.solver_r = solver_r self.z = dl.Function(V) self.solver = solver self.data = self.solver_r.data self.cost = None self.grad = None def cost_function(self, z_v): self.z.vector().set_local(z_v) w_r = self.solver_r.forward_reduced(self.z) y_r = self.solver_r.qoi_reduced(w_r) self.solver._k.assign(self.z) # self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 + dl.assemble(self.solver.reg) self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 return self.cost def gradient(self, z_v): self.z.vector().set_local(z_v) self.solver._k.assign(self.z) self.grad, self.cost = self.solver_r.grad_reduced(self.z) # self.grad = self.grad + dl.assemble(self.solver.grad_reg) return self.grad resolution = 40 V = get_space(resolution) chol = make_cov_chol(V, length=1.2) solver = Fin(V, True) # Generate synthetic observations z_true = dl.Function(V) norm = np.random.randn(len(chol)) nodal_vals = np.exp(0.5 * chol.T @ norm) z_true.vector().set_local(nodal_vals) w, y, A, B, C = solver.forward(z_true) data = solver.qoi_operator(w) # Setup DL error model # err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim()) err_model = load_bn_model() # Initialize reduced order model phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=",") solver_r = AffineROMFin(V, err_model, phi, True) solver_r.set_data(data) solver_romml = ROMMLSolverWrapper(err_model, solver_r, solver) solver_w = RSolverWrapper(err_model, solver_r, solver) solver_fom = SolverWrapper(solver, data) # Determine direction of gradient z = dl.Function(V) norm = np.random.randn(len(chol)) eps_z = np.exp(0.5 * chol.T @ norm) z.vector().set_local(eps_z) eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx)) eps_norm = np.linalg.norm(eps_z) eps_z = eps_z/eps_norm # Determine location to evaluate gradient at norm = np.random.randn(len(chol)) z_ = np.exp(0.5 * chol.T @ norm) # Evaluate directional derivative using ROMML dir_grad = np.dot(solver_romml.gradient(z_), eps_z) print(f"Directional gradient ROMML: {dir_grad}") n_eps = 32 hs = np.power(2., -np.arange(n_eps)) err_grads = [] grads = [] pi_0 = solver_romml.cost_function(z_) for h in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) # err = abs(a_g - dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, "-ob", label="Error Grad") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, "-.k", label="First Order") plt.savefig('grad_test_ROMML.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, "-ob") plt.savefig('gradients_ROMML.png') plt.cla() plt.clf() err_grads = [] grads = [] pi_0 = solver_w.cost_function(z_) dir_grad = np.dot(solver_w.gradient(z_), eps_z) for h in hs: pi_h = solver_w.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) # err = abs(a_g - dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, "-ob", label="Error Grad") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, "-.k", label="First Order") plt.savefig('grad_test_ROM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, "-ob") plt.savefig('gradients_ROM.png') plt.cla() plt.clf() err_grads = [] grads = [] pi_0 = solver_fom.cost_function(z_) dir_grad = np.dot(solver_fom.gradient(z_), eps_z) for h in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) a_g = (pi_h - pi_0)/h grads.append(a_g) err = abs(a_g - dir_grad)/abs(dir_grad) err_grads.append(err) plt.loglog(hs, err_grads, "-ob", label="Error Grad") plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, "-.k", label="First Order") plt.savefig('grad_test_FOM.png', dpi=200) plt.cla() plt.clf() plt.semilogx(hs, grads, "-ob") plt.savefig('gradients_FOM.png') plt.cla() plt.clf() ##### ## Examine function behavior #### hs = np.linspace(0, 1, 500) pis = [] # grads = [] for h in hs: pi_h = solver_w.cost_function(z_ + h * eps_z) pis.append(pi_h) # grad = solver_w.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads.append(dir_grad) pi_foms = [] # grads_fom = [] dir_grad_fom = np.dot(solver_fom.gradient(z_), eps_z) print(f"Direction gradient FOM: {dir_grad_fom}") for h in hs: pi_h = solver_fom.cost_function(z_ + h * eps_z) pi_foms.append(pi_h) # grad = solver_fom.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads_fom.append(dir_grad) pi_rommls = [] # grads_romml = [] for h in hs: pi_h = solver_romml.cost_function(z_ + h * eps_z) pi_rommls.append(pi_h) # grad = solver_romml.gradient(z_ + h * eps_z) # dir_grad = np.dot(grad, eps_z) # grads_romml.append(dir_grad) plt.plot(hs, pi_foms) plt.savefig('func_dir_FOM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pis) plt.savefig('func_dir_ROM.png', dpi=200) plt.cla() plt.clf() plt.plot(hs, pi_rommls) plt.savefig('func_dir_ROMML.png', dpi=200) plt.cla() plt.clf() # plt.plot(hs, grads_fom) # plt.plot(hs, grads) # plt.plot(hs, grads_romml) # plt.legend(["FOM", "ROM", "ROMML"]) # plt.savefig('grad_dir.png', dpi=200)
[ "matplotlib.pyplot.loglog", "gaussian_field.make_cov_chol", "fom.thermal_fin.get_space", "matplotlib.pyplot.clf", "rom.averaged_affine_ROM.AffineROMFin", "numpy.linalg.norm", "numpy.exp", "numpy.arange", "sys.path.append", "dolfin.inner", "matplotlib.pyplot.cla", "numpy.loadtxt", "numpy.linspace", "fom.forward_solve.Fin", "matplotlib.use", "dolfin.set_log_level", "matplotlib.pyplot.semilogx", "dolfin.assemble", "matplotlib.pyplot.plot", "dolfin.Function", "deep_learning.dl_model.load_bn_model", "matplotlib.pyplot.savefig" ]
[((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((54, 78), 'matplotlib.use', 'matplotlib.use', (['"""macosx"""'], {}), "('macosx')\n", (68, 78), False, 'import matplotlib\n'), ((163, 183), 'dolfin.set_log_level', 'dl.set_log_level', (['(40)'], {}), '(40)\n', (179, 183), True, 'import dolfin as dl\n'), ((3232, 3253), 'fom.thermal_fin.get_space', 'get_space', (['resolution'], {}), '(resolution)\n', (3241, 3253), False, 'from fom.thermal_fin import get_space\n'), ((3261, 3289), 'gaussian_field.make_cov_chol', 'make_cov_chol', (['V'], {'length': '(1.2)'}), '(V, length=1.2)\n', (3274, 3289), False, 'from gaussian_field import make_cov_chol\n'), ((3299, 3311), 'fom.forward_solve.Fin', 'Fin', (['V', '(True)'], {}), '(V, True)\n', (3302, 3311), False, 'from fom.forward_solve import Fin\n'), ((3356, 3370), 'dolfin.Function', 'dl.Function', (['V'], {}), '(V)\n', (3367, 3370), True, 'import dolfin as dl\n'), ((3418, 3445), 'numpy.exp', 'np.exp', (['(0.5 * chol.T @ norm)'], {}), '(0.5 * chol.T @ norm)\n', (3424, 3445), True, 'import numpy as np\n'), ((3678, 3693), 'deep_learning.dl_model.load_bn_model', 'load_bn_model', ([], {}), '()\n', (3691, 3693), False, 'from deep_learning.dl_model import load_parametric_model_avg, load_bn_model\n'), ((3734, 3791), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/basis_nine_param.txt"""'], {'delimiter': '""","""'}), "('../data/basis_nine_param.txt', delimiter=',')\n", (3744, 3791), True, 'import numpy as np\n'), ((3802, 3839), 'rom.averaged_affine_ROM.AffineROMFin', 'AffineROMFin', (['V', 'err_model', 'phi', '(True)'], {}), '(V, err_model, phi, True)\n', (3814, 3839), False, 'from rom.averaged_affine_ROM import AffineROMFin\n'), ((4064, 4078), 'dolfin.Function', 'dl.Function', (['V'], {}), '(V)\n', (4075, 4078), True, 'import dolfin as dl\n'), ((4121, 4148), 'numpy.exp', 'np.exp', (['(0.5 * chol.T @ norm)'], {}), '(0.5 * chol.T @ norm)\n', (4127, 4148), True, 'import numpy as np\n'), ((4241, 4262), 'numpy.linalg.norm', 'np.linalg.norm', (['eps_z'], {}), '(eps_z)\n', (4255, 4262), True, 'import numpy as np\n'), ((4371, 4398), 'numpy.exp', 'np.exp', (['(0.5 * chol.T @ norm)'], {}), '(0.5 * chol.T @ norm)\n', (4377, 4398), True, 'import numpy as np\n'), ((4881, 4933), 'matplotlib.pyplot.loglog', 'plt.loglog', (['hs', 'err_grads', '"""-ob"""'], {'label': '"""Error Grad"""'}), "(hs, err_grads, '-ob', label='Error Grad')\n", (4891, 4933), True, 'import matplotlib.pyplot as plt\n'), ((4934, 5009), 'matplotlib.pyplot.loglog', 'plt.loglog', (['hs', '(0.5 * err_grads[0] / hs[0] * hs)', '"""-.k"""'], {'label': '"""First Order"""'}), "(hs, 0.5 * err_grads[0] / hs[0] * hs, '-.k', label='First Order')\n", (4944, 5009), True, 'import matplotlib.pyplot as plt\n'), ((5005, 5048), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""grad_test_ROMML.png"""'], {'dpi': '(200)'}), "('grad_test_ROMML.png', dpi=200)\n", (5016, 5048), True, 'import matplotlib.pyplot as plt\n'), ((5049, 5058), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5056, 5058), True, 'import matplotlib.pyplot as plt\n'), ((5059, 5068), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5066, 5068), True, 'import matplotlib.pyplot as plt\n'), ((5070, 5100), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['hs', 'grads', '"""-ob"""'], {}), "(hs, grads, '-ob')\n", (5082, 5100), True, 'import matplotlib.pyplot as plt\n'), ((5101, 5135), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gradients_ROMML.png"""'], {}), "('gradients_ROMML.png')\n", (5112, 5135), True, 'import matplotlib.pyplot as plt\n'), ((5136, 5145), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5143, 5145), True, 'import matplotlib.pyplot as plt\n'), ((5146, 5155), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5153, 5155), True, 'import matplotlib.pyplot as plt\n'), ((5481, 5533), 'matplotlib.pyplot.loglog', 'plt.loglog', (['hs', 'err_grads', '"""-ob"""'], {'label': '"""Error Grad"""'}), "(hs, err_grads, '-ob', label='Error Grad')\n", (5491, 5533), True, 'import matplotlib.pyplot as plt\n'), ((5534, 5609), 'matplotlib.pyplot.loglog', 'plt.loglog', (['hs', '(0.5 * err_grads[0] / hs[0] * hs)', '"""-.k"""'], {'label': '"""First Order"""'}), "(hs, 0.5 * err_grads[0] / hs[0] * hs, '-.k', label='First Order')\n", (5544, 5609), True, 'import matplotlib.pyplot as plt\n'), ((5605, 5646), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""grad_test_ROM.png"""'], {'dpi': '(200)'}), "('grad_test_ROM.png', dpi=200)\n", (5616, 5646), True, 'import matplotlib.pyplot as plt\n'), ((5647, 5656), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5654, 5656), True, 'import matplotlib.pyplot as plt\n'), ((5657, 5666), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5664, 5666), True, 'import matplotlib.pyplot as plt\n'), ((5668, 5698), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['hs', 'grads', '"""-ob"""'], {}), "(hs, grads, '-ob')\n", (5680, 5698), True, 'import matplotlib.pyplot as plt\n'), ((5699, 5731), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gradients_ROM.png"""'], {}), "('gradients_ROM.png')\n", (5710, 5731), True, 'import matplotlib.pyplot as plt\n'), ((5732, 5741), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5739, 5741), True, 'import matplotlib.pyplot as plt\n'), ((5742, 5751), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5749, 5751), True, 'import matplotlib.pyplot as plt\n'), ((6050, 6102), 'matplotlib.pyplot.loglog', 'plt.loglog', (['hs', 'err_grads', '"""-ob"""'], {'label': '"""Error Grad"""'}), "(hs, err_grads, '-ob', label='Error Grad')\n", (6060, 6102), True, 'import matplotlib.pyplot as plt\n'), ((6103, 6178), 'matplotlib.pyplot.loglog', 'plt.loglog', (['hs', '(0.5 * err_grads[0] / hs[0] * hs)', '"""-.k"""'], {'label': '"""First Order"""'}), "(hs, 0.5 * err_grads[0] / hs[0] * hs, '-.k', label='First Order')\n", (6113, 6178), True, 'import matplotlib.pyplot as plt\n'), ((6174, 6215), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""grad_test_FOM.png"""'], {'dpi': '(200)'}), "('grad_test_FOM.png', dpi=200)\n", (6185, 6215), True, 'import matplotlib.pyplot as plt\n'), ((6216, 6225), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (6223, 6225), True, 'import matplotlib.pyplot as plt\n'), ((6226, 6235), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6233, 6235), True, 'import matplotlib.pyplot as plt\n'), ((6237, 6267), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['hs', 'grads', '"""-ob"""'], {}), "(hs, grads, '-ob')\n", (6249, 6267), True, 'import matplotlib.pyplot as plt\n'), ((6268, 6300), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gradients_FOM.png"""'], {}), "('gradients_FOM.png')\n", (6279, 6300), True, 'import matplotlib.pyplot as plt\n'), ((6301, 6310), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (6308, 6310), True, 'import matplotlib.pyplot as plt\n'), ((6311, 6320), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6318, 6320), True, 'import matplotlib.pyplot as plt\n'), ((6369, 6391), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(500)'], {}), '(0, 1, 500)\n', (6380, 6391), True, 'import numpy as np\n'), ((7221, 7242), 'matplotlib.pyplot.plot', 'plt.plot', (['hs', 'pi_foms'], {}), '(hs, pi_foms)\n', (7229, 7242), True, 'import matplotlib.pyplot as plt\n'), ((7243, 7283), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""func_dir_FOM.png"""'], {'dpi': '(200)'}), "('func_dir_FOM.png', dpi=200)\n", (7254, 7283), True, 'import matplotlib.pyplot as plt\n'), ((7284, 7293), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (7291, 7293), True, 'import matplotlib.pyplot as plt\n'), ((7294, 7303), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7301, 7303), True, 'import matplotlib.pyplot as plt\n'), ((7305, 7322), 'matplotlib.pyplot.plot', 'plt.plot', (['hs', 'pis'], {}), '(hs, pis)\n', (7313, 7322), True, 'import matplotlib.pyplot as plt\n'), ((7323, 7363), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""func_dir_ROM.png"""'], {'dpi': '(200)'}), "('func_dir_ROM.png', dpi=200)\n", (7334, 7363), True, 'import matplotlib.pyplot as plt\n'), ((7364, 7373), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (7371, 7373), True, 'import matplotlib.pyplot as plt\n'), ((7374, 7383), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7381, 7383), True, 'import matplotlib.pyplot as plt\n'), ((7385, 7408), 'matplotlib.pyplot.plot', 'plt.plot', (['hs', 'pi_rommls'], {}), '(hs, pi_rommls)\n', (7393, 7408), True, 'import matplotlib.pyplot as plt\n'), ((7409, 7451), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""func_dir_ROMML.png"""'], {'dpi': '(200)'}), "('func_dir_ROMML.png', dpi=200)\n", (7420, 7451), True, 'import matplotlib.pyplot as plt\n'), ((7452, 7461), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (7459, 7461), True, 'import matplotlib.pyplot as plt\n'), ((7462, 7471), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7469, 7471), True, 'import matplotlib.pyplot as plt\n'), ((647, 661), 'dolfin.Function', 'dl.Function', (['V'], {}), '(V)\n', (658, 661), True, 'import dolfin as dl\n'), ((847, 875), 'dolfin.assemble', 'dl.assemble', (['self.solver.reg'], {}), '(self.solver.reg)\n', (858, 875), True, 'import dolfin as dl\n'), ((1384, 1398), 'dolfin.Function', 'dl.Function', (['V'], {}), '(V)\n', (1395, 1398), True, 'import dolfin as dl\n'), ((2421, 2435), 'dolfin.Function', 'dl.Function', (['V'], {}), '(V)\n', (2432, 2435), True, 'import dolfin as dl\n'), ((4578, 4594), 'numpy.arange', 'np.arange', (['n_eps'], {}), '(n_eps)\n', (4587, 4594), True, 'import numpy as np\n'), ((1127, 1160), 'dolfin.assemble', 'dl.assemble', (['self.solver.grad_reg'], {}), '(self.solver.grad_reg)\n', (1138, 1160), True, 'import dolfin as dl\n'), ((4208, 4222), 'dolfin.inner', 'dl.inner', (['z', 'z'], {}), '(z, z)\n', (4216, 4222), True, 'import dolfin as dl\n'), ((897, 926), 'numpy.linalg.norm', 'np.linalg.norm', (['(y - self.data)'], {}), '(y - self.data)\n', (911, 926), True, 'import numpy as np\n'), ((1930, 1965), 'numpy.linalg.norm', 'np.linalg.norm', (['(y_romml - self.data)'], {}), '(y_romml - self.data)\n', (1944, 1965), True, 'import numpy as np\n'), ((2884, 2915), 'numpy.linalg.norm', 'np.linalg.norm', (['(y_r - self.data)'], {}), '(y_r - self.data)\n', (2898, 2915), True, 'import numpy as np\n')]
import json import pytest from ansiblemetrics.metrics_cal import MetricsCal class TestRoleTaskMetrics: def test_(self): metricCal = MetricsCal() js = json.loads(metricCal.calculate('testResources/configure.yml', 'atss')) assert 1 == js['bloc']['count'] assert 1 == js['cloc']['count'] assert 23 == js['loc']['count'] assert 3 == js['nun']['count']
[ "ansiblemetrics.metrics_cal.MetricsCal" ]
[((148, 160), 'ansiblemetrics.metrics_cal.MetricsCal', 'MetricsCal', ([], {}), '()\n', (158, 160), False, 'from ansiblemetrics.metrics_cal import MetricsCal\n')]
''' VARIABLES EXPECTED: a) Trade-Off Parameter (Alpha) b) Weight/Reputation Score (Gamma) c) Last Time The Agent was selected (b) RETURNS a LIST of addresses of SAMPLED AGENTS ''' #agents_record = {"ETH_ADDRESS":[GAMMA,B_VAL]} from dataForAgentSelection import agents_record from collections import defaultdict,OrderedDict def calc_sum(agents_record): sum_gamma = 0 sum_b_val = 0 for items in agents_record.keys(): sum_gamma+=agents_record[items][0] sum_b_val+=agents_record[items][1] return sum_gamma,sum_b_val def calc_probabilities(agents_record,trade_off_param): ret_mapping = defaultdict(int) sum_gamma,sum_b_val = calc_sum(agents_record) for items in agents_record.keys(): agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val)) ret_mapping[items] = agent_prob return ret_mapping def sample_agents(number,final_structure): ret_list = [] dd = OrderedDict(sorted(final_structure.items(), key = lambda x: x[1],reverse=True)) dd = dict(dd) counter = 0 for items in dd.keys(): if counter == number: break ret_list.append(items) counter+=1 return ret_list ##DRIVER## if __name__ == '__main__': print("The Sampled Agents are:") #a_record = {"ascaadcadcac":[0.5,0.4],"ssacdcdac":[0.9,0.4],"adscdac":[0.8,0.9]} trade_off = 0.6 final = calc_probabilities(agents_record,trade_off) print(sample_agents(6,final))
[ "collections.defaultdict", "dataForAgentSelection.agents_record.keys" ]
[((398, 418), 'dataForAgentSelection.agents_record.keys', 'agents_record.keys', ([], {}), '()\n', (416, 418), False, 'from dataForAgentSelection import agents_record\n'), ((594, 610), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (605, 610), False, 'from collections import defaultdict, OrderedDict\n'), ((673, 693), 'dataForAgentSelection.agents_record.keys', 'agents_record.keys', ([], {}), '()\n', (691, 693), False, 'from dataForAgentSelection import agents_record\n')]
import urllib.request import os from pathlib import Path def get_project_root() -> Path: return Path(__file__).parent.parent PROTOCOL = "http://" FILES_URL_ROOT = PROTOCOL + "localhost:3000" ROOT_DIR = get_project_root() def download(uri): create_dirs_from_uri(uri) urllib.request.urlretrieve(f"{FILES_URL_ROOT}{uri}", f"{ROOT_DIR}{uri}") def create_dirs_from_uri(path_string): file_path = '/'.join(path_string.split("/")[1:-1]) if not os.path.isdir(file_path): os.makedirs(file_path)
[ "os.path.isdir", "pathlib.Path", "os.makedirs" ]
[((464, 488), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (477, 488), False, 'import os\n'), ((498, 520), 'os.makedirs', 'os.makedirs', (['file_path'], {}), '(file_path)\n', (509, 520), False, 'import os\n'), ((102, 116), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (106, 116), False, 'from pathlib import Path\n')]
# Faça um programa que leia um ângulo qualquer e mostre na tela o valor do seno, cosseno e tangente desse ângulo. import math angulo = float(input('Digite o valor do angulo:')) coseno = math.cos(math.radians(angulo)) seno = math.sin(math.radians(angulo)) tangente = math.tan(math.radians(angulo)) print('Coseno de {} é {:.2f}'.format(angulo,coseno)) print('Seno de {} é {:.2f}'.format(angulo,seno)) print('Tangente de {} é {:.2f}'.format(angulo,tangente))
[ "math.radians" ]
[((196, 216), 'math.radians', 'math.radians', (['angulo'], {}), '(angulo)\n', (208, 216), False, 'import math\n'), ((234, 254), 'math.radians', 'math.radians', (['angulo'], {}), '(angulo)\n', (246, 254), False, 'import math\n'), ((276, 296), 'math.radians', 'math.radians', (['angulo'], {}), '(angulo)\n', (288, 296), False, 'import math\n')]
import re from decimal import Decimal def decode_mutez(value): return Decimal(value) / 10000 def decode_percent(value, decimals=2): return Decimal(value) / 10 ** decimals def decode_split(value): return 1 - decode_percent(value, decimals=4) def decode_hex(value): return value.decode() def decode_info(info): data = info['data'] return { 'bakerName': decode_hex(data['bakerName']), 'openForDelegation': data['openForDelegation'], 'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']), 'fee': str(decode_split(data['split'])), 'bakerPaysFromAccounts': data['bakerPaysFromAccounts'], 'minDelegation': str(decode_mutez(data['minDelegation'])), 'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'], 'payoutDelay': data['payoutDelay'], 'payoutFrequency': data['payoutFrequency'], 'minPayout': str(decode_mutez(data['minPayout'])), 'bakerChargesTransactionFee': data['bakerChargesTransactionFee'], 'paymentConfig': { 'payForOwnBlocks': data['paymentConfigMask'] & 1 > 0, 'payForStolenBlocks': data['paymentConfigMask'] & 2048 > 0, 'compensateMissedBlocks': data['paymentConfigMask'] & 1024 == 0, 'payForEndorsements': data['paymentConfigMask'] & 2 > 0, 'compensateLowPriorityEndorsementLoss': data['paymentConfigMask'] & 8192 == 0, 'compensateMissedEndorsements': data['paymentConfigMask'] & 4096 == 0, 'payGainedFees': data['paymentConfigMask'] & 4 > 0, 'payForAccusationGains': data['paymentConfigMask'] & 8 > 0, 'subtractLostDepositsWhenAccused': data['paymentConfigMask'] & 16 > 0, 'subtractLostRewardsWhenAccused': data['paymentConfigMask'] & 32 > 0, 'subtractLostFeesWhenAccused': data['paymentConfigMask'] & 64 > 0, 'payForRevelation': data['paymentConfigMask'] & 128 > 0, 'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask'] & 256 > 0, 'subtractLostFeesWhenMissRevelation': data['paymentConfigMask'] & 512 > 0 }, 'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])), 'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'], 'reporterAccount': info['reporterAccount'] } def try_hex_encode(data): if re.match('^[0-9a-f]$', data) and len(data) % 2 == 0: return bytes.fromhex(data) else: return data.encode() def encode_config_mask(data, default): if data.get('paymentConfigMask'): return int(data['paymentConfigMask']) if data.get('paymentConfig'): mask = 0 config = data['paymentConfig'] if config.get('payForOwnBlocks'): mask |= 1 if config.get('payForStolenBlocks'): mask |= 2048 if not config.get('compensateMissedBlocks'): mask |= 1024 if config.get('payForEndorsements'): mask |= 2 if not config.get('compensateLowPriorityEndorsementLoss'): mask |= 8192 if not config.get('compensateMissedEndorsements'): mask |= 4096 if config.get('payGainedFees'): mask |= 4 if config.get('payForAccusationGains'): mask |= 8 if config.get('subtractLostDepositsWhenAccused'): mask |= 16 if config.get('subtractLostRewardsWhenAccused'): mask |= 32 if config.get('subtractLostFeesWhenAccused'): mask |= 64 if config.get('payForRevelation'): mask |= 128 if config.get('subtractLostRewardsWhenMissRevelation'): mask |= 256 if config.get('subtractLostFeesWhenMissRevelation'): mask |= 512 return default def encode_mutez(value): if isinstance(value, str): res = int(Decimal(value) * 10000) elif isinstance(value, int): res = value else: assert False, value assert res >= 0, 'Cannot be negative' return res def encode_percent(value, decimals=2): factor = 10 ** decimals if isinstance(value, str): res = int(Decimal(value) * factor) elif isinstance(value, int): res = value else: assert False, value assert 0 <= res <= factor, f'Should be between 0 and {factor}' return res def encode_split(data): if data.get('split'): res = int(data['split']) elif data.get('fee'): res = 10000 - encode_percent(data['fee'], decimals=4) else: res = 10000 return res def encode_info(info): return { 'data': {'bakerName': try_hex_encode(info.get('bakerName', '')), 'openForDelegation': info.get('openForDelegation', True), 'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl', '')), 'split': encode_split(info), 'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts', []), 'minDelegation': encode_mutez(info.get('minDelegation', 0)), 'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin', True), 'payoutDelay': info.get('payoutDelay', 0), 'payoutFrequency': info.get('payoutFrequency', 1), 'minPayout': encode_mutez(info.get('minPayout', 0)), 'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee', False), 'paymentConfigMask': encode_config_mask(info, 16383), 'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold', 100)), 'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation', True)}, 'reporterAccount': info['reporterAccount'] } def decode_snapshot(snapshot: dict): return dict(map(lambda x: (x[0], decode_info(x[1])), snapshot.items()))
[ "re.match", "decimal.Decimal" ]
[((76, 90), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (83, 90), False, 'from decimal import Decimal\n'), ((151, 165), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (158, 165), False, 'from decimal import Decimal\n'), ((2445, 2473), 're.match', 're.match', (['"""^[0-9a-f]$"""', 'data'], {}), "('^[0-9a-f]$', data)\n", (2453, 2473), False, 'import re\n'), ((3947, 3961), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (3954, 3961), False, 'from decimal import Decimal\n'), ((4237, 4251), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (4244, 4251), False, 'from decimal import Decimal\n')]
from dataclasses import astuple, dataclass, fields from importlib import resources import os import sqlite3 from .exception import ArtifactNotFoundError, DuplicateArtifactError import logging LOG = logging.getLogger(__name__) DATABASE_NAME = 'chameleon' @dataclass class Artifact: id: str path: str deposition_repo: str ownership: str ARTIFACT_COLUMNS = [f.name for f in fields(Artifact)] class DB: IN_MEMORY = ':memory:' def __init__(self, database=None): if not database: raise ValueError('A database path is required') if database != DB.IN_MEMORY: try: os.makedirs(os.path.dirname(database), exist_ok=True) except OSError: LOG.exception(f'Failed to lazy-create DB path {database}') self.database = database self._conn = None def build_schema(self): with resources.open_text(__package__, 'db_schema.sql') as f: with self.connect() as conn: conn.executescript(f.read()) def reset(self): with self.connect() as conn: cur = conn.cursor() cur.execute('delete from artifacts') def list_artifacts(self): with self.connect() as conn: cur = conn.cursor() cur.execute(f'select {",".join(ARTIFACT_COLUMNS)} from artifacts') return [Artifact(*row) for row in cur.fetchall()] def insert_artifact(self, artifact: Artifact): with self.connect() as conn: cur = conn.cursor() cur.execute( (f'insert into artifacts ({",".join(ARTIFACT_COLUMNS)}) ' 'values (?, ?, ?, ?)'), astuple(artifact)) def update_artifact(self, artifact: Artifact): path = artifact.path with self.connect() as conn: cur = conn.cursor() cur.execute('select id from artifacts where path = ?', (path,)) found = cur.fetchall() if len(found) > 1: raise DuplicateArtifactError( 'Multiple artifacts already found at %s', path) elif found and found[0][0] is not None: raise DuplicateArtifactError( 'Would create duplicate artifact at %s: %s', path, found[0][0]) elif not found: raise ArtifactNotFoundError( 'Cannot find artifact at %s', path) updates = ','.join([f'{col}=?' for col in ARTIFACT_COLUMNS]) cur.execute(f'update artifacts set {updates} where path=?', astuple(artifact) + (path,)) def connect(self) -> sqlite3.Connection: if not self._conn: self._conn = sqlite3.connect(self.database) return self._conn
[ "importlib.resources.open_text", "os.path.dirname", "sqlite3.connect", "dataclasses.fields", "dataclasses.astuple", "logging.getLogger" ]
[((200, 227), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (217, 227), False, 'import logging\n'), ((392, 408), 'dataclasses.fields', 'fields', (['Artifact'], {}), '(Artifact)\n', (398, 408), False, 'from dataclasses import astuple, dataclass, fields\n'), ((903, 952), 'importlib.resources.open_text', 'resources.open_text', (['__package__', '"""db_schema.sql"""'], {}), "(__package__, 'db_schema.sql')\n", (922, 952), False, 'from importlib import resources\n'), ((2727, 2757), 'sqlite3.connect', 'sqlite3.connect', (['self.database'], {}), '(self.database)\n', (2742, 2757), False, 'import sqlite3\n'), ((1703, 1720), 'dataclasses.astuple', 'astuple', (['artifact'], {}), '(artifact)\n', (1710, 1720), False, 'from dataclasses import astuple, dataclass, fields\n'), ((656, 681), 'os.path.dirname', 'os.path.dirname', (['database'], {}), '(database)\n', (671, 681), False, 'import os\n'), ((2600, 2617), 'dataclasses.astuple', 'astuple', (['artifact'], {}), '(artifact)\n', (2607, 2617), False, 'from dataclasses import astuple, dataclass, fields\n')]
# License: Apache-2.0 from gators.encoders import WOEEncoder import pytest def test_init(): with pytest.raises(TypeError): WOEEncoder(dtype=str)
[ "pytest.raises", "gators.encoders.WOEEncoder" ]
[((103, 127), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (116, 127), False, 'import pytest\n'), ((137, 158), 'gators.encoders.WOEEncoder', 'WOEEncoder', ([], {'dtype': 'str'}), '(dtype=str)\n', (147, 158), False, 'from gators.encoders import WOEEncoder\n')]
from email.utils import formataddr, parseaddr import re emails_to_validate = int(input()) EMAIL_RE = re.compile( r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\.[a-zA-Z]{1,3}$' ) def validate_email(email_guess): return bool(EMAIL_RE.match(email_guess)) for i in range(emails_to_validate): real_name, email_address = parseaddr(input()) if not real_name and not email_address: continue if not validate_email(email_address): continue print(formataddr((real_name, email_address)))
[ "email.utils.formataddr", "re.compile" ]
[((104, 169), 're.compile', 're.compile', (['"""^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\\\.[a-zA-Z]{1,3}$"""'], {}), "('^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\\\.[a-zA-Z]{1,3}$')\n", (114, 169), False, 'import re\n'), ((477, 515), 'email.utils.formataddr', 'formataddr', (['(real_name, email_address)'], {}), '((real_name, email_address))\n', (487, 515), False, 'from email.utils import formataddr, parseaddr\n')]
import sys import pandas as pd from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): """loads the specified message and category data Args: messages_filepath (string): The file path of the messages csv categories_filepath (string): The file path of the categories cv Returns: df (pandas dataframe): The combined messages and categories df """ messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) return pd.merge(messages, categories, on='id') def clean_data(df): """Cleans the data: - drops duplicates - removes messages missing classes - cleans up the categories column Args: df (pandas dataframe): combined categories and messages df Returns: df (pandas dataframe): Cleaned dataframe with split categories """ # expand the categories column categories = df.categories.str.split(';', expand=True) row = categories[:1] # get the category names category_colnames = row.applymap(lambda s: s[:-2]).iloc[0, :].tolist() categories.columns = category_colnames # get only the last value in each value as an integer categories = categories.applymap(lambda s: int(s[-1])) # add the categories back to the original df df.drop('categories', axis=1, inplace=True) df = pd.concat([df, categories], axis=1) # clean up the final data df.drop_duplicates(subset='message', inplace=True) df.dropna(subset=category_colnames, inplace=True) df.related.replace(2, 0, inplace=True) return df def save_data(df, database_filename): """Saves the resulting data to a sqlite db Args: df (pandas dataframe): The cleaned dataframe database_filename (string): the file path to save the db Returns: None """ engine = create_engine('sqlite:///'+database_filename) df.to_sql('labeled_messages', engine, index=False, if_exists='replace') engine.dispose() def main(): if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df = load_data(messages_filepath, categories_filepath) print('Cleaning data...') df = clean_data(df) print('Saving data...\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved to database!') else: print('Please provide the filepaths of the messages and categories '\ 'datasets as the first and second argument respectively, as '\ 'well as the filepath of the database to save the cleaned data '\ 'to as the third argument. \n\nExample: python process_data.py '\ 'disaster_messages.csv disaster_categories.csv '\ 'DisasterResponse.db') if __name__ == '__main__': main()
[ "pandas.read_csv", "pandas.merge", "sqlalchemy.create_engine", "pandas.concat" ]
[((438, 468), 'pandas.read_csv', 'pd.read_csv', (['messages_filepath'], {}), '(messages_filepath)\n', (449, 468), True, 'import pandas as pd\n'), ((486, 518), 'pandas.read_csv', 'pd.read_csv', (['categories_filepath'], {}), '(categories_filepath)\n', (497, 518), True, 'import pandas as pd\n'), ((530, 569), 'pandas.merge', 'pd.merge', (['messages', 'categories'], {'on': '"""id"""'}), "(messages, categories, on='id')\n", (538, 569), True, 'import pandas as pd\n'), ((1390, 1425), 'pandas.concat', 'pd.concat', (['[df, categories]'], {'axis': '(1)'}), '([df, categories], axis=1)\n', (1399, 1425), True, 'import pandas as pd\n'), ((1886, 1933), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///' + database_filename)"], {}), "('sqlite:///' + database_filename)\n", (1899, 1933), False, 'from sqlalchemy import create_engine\n')]
from flask import Flask, jsonify from flask.templating import render_template from threading import Thread from data import * # config app = Flask(__name__) app.config["JSON_SORT_KEYS"] = False app.config["JSONIFY_PRETTYPRINT_REGULAR"] = True # home route @app.route("/") def home(): return render_template("index.html") # f"/{KEY}/data/nifty/index/all/sort/sortby" @app.route("/help") def help(): return render_template("help.html") # index route @app.route(f"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>") def send_nifty_index_data(index, sortby): index = str(index).lower() sortby = str(sortby).lower() indices = { 'it': fetch_nifty_index_data("it", sortby), 'bank': fetch_nifty_index_data("bank", sortby), '50': fetch_nifty_index_data("50", sortby), 'auto': fetch_nifty_index_data("auto", sortby), 'pharma': fetch_nifty_index_data("pharma", sortby), 'fmcg': fetch_nifty_index_data("fmcg", sortby) } if index in indices: return jsonify(indices[index]) # all index route @app.route(f"/{KEY}/data/nifty/indices/all") def send_all_nifty_indices_data(): return jsonify(fetch_all_indices_data()) def run(): app.run(host='0.0.0.0', port=8080) def main(): t = Thread(target=run) t.start() if __name__ == "__main__": main()
[ "threading.Thread", "flask.jsonify", "flask.Flask", "flask.templating.render_template" ]
[((148, 163), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (153, 163), False, 'from flask import Flask, jsonify\n'), ((312, 341), 'flask.templating.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (327, 341), False, 'from flask.templating import render_template\n'), ((440, 468), 'flask.templating.render_template', 'render_template', (['"""help.html"""'], {}), "('help.html')\n", (455, 468), False, 'from flask.templating import render_template\n'), ((1335, 1353), 'threading.Thread', 'Thread', ([], {'target': 'run'}), '(target=run)\n', (1341, 1353), False, 'from threading import Thread\n'), ((1076, 1099), 'flask.jsonify', 'jsonify', (['indices[index]'], {}), '(indices[index])\n', (1083, 1099), False, 'from flask import Flask, jsonify\n')]
from pytube import YouTube YouTube('http://youtube.com/watch?v=9bZkp7q19f0').streams[0].download()
[ "pytube.YouTube" ]
[((27, 76), 'pytube.YouTube', 'YouTube', (['"""http://youtube.com/watch?v=9bZkp7q19f0"""'], {}), "('http://youtube.com/watch?v=9bZkp7q19f0')\n", (34, 76), False, 'from pytube import YouTube\n')]
from app import app app.run(host='0.0.0.0', port="5000")
[ "app.app.run" ]
[((21, 57), 'app.app.run', 'app.run', ([], {'host': '"""0.0.0.0"""', 'port': '"""5000"""'}), "(host='0.0.0.0', port='5000')\n", (28, 57), False, 'from app import app\n')]
from django.contrib.auth import get_user_model from .enums import LogReason, LogStatus from .models import Log class MandrillLogger(): REASON_TRANSLATOR = { 'hard-bounce': LogReason.HARD_BOUNCE, 'soft-bounce': LogReason.SOFT_BOUNCE, 'spam': LogReason.SPAM, 'unsub': LogReason.UNSUB, 'custom': LogReason.CUSTOM, 'invalid-sender': LogReason.INVALID_SENDER, 'invalid': LogReason.INVALID, 'test-mode-limit': LogReason.TEST_MODE_LIMIT, 'unsigned': LogReason.UNSIGNED, 'rule': LogReason.RULE, } # The sending status of the recipient - either "sent", "queued", "scheduled", "rejected", or "invalid" STATUS_TRANSLATOR = { 'sent': LogStatus.SENT, 'queued': LogStatus.QUEUED, 'scheduled': LogStatus.SCHEDULED, 'rejected': LogStatus.REJECTED, 'invalid': LogStatus.INVALID, } def __init__(self): pass def log_email(self, email): for recipient in email.to: _data = {} _data['template'] = email.template_name _data['email'] = recipient _data['user'] = self.get_user_from_email(recipient) try: mandrill_response = email.mandrill_response[0] _data['mandrill_id'] = mandrill_response['_id'] _data['meta_data'] = mandrill_response _data['status'] = self.get_status_enum(mandrill_response.get('status', None)) _data['reason'] = self.get_reason_enum(mandrill_response.get('reject_reason', None)) except Exception as e: pass self.save_log(_data) def save_log(self, _data): Log.objects.create(**_data) def get_user_from_email(self, email): user = get_user_model() try: return user.objects.get(email=email) except Exception as e: print(e) return None def get_reason_enum(self, reason): return self.translate_enum(self.REASON_TRANSLATOR, reason, LogReason.NA) def get_status_enum(self, status): return self.translate_enum(self.STATUS_TRANSLATOR, status, LogStatus.DEFAULT) def translate_enum(self, _dict, _str, _default=None): try: return _dict[_str] except Exception as e: return _default
[ "django.contrib.auth.get_user_model" ]
[((1814, 1830), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1828, 1830), False, 'from django.contrib.auth import get_user_model\n')]
from talon import Context ctx = Context() ctx.matches = r""" tag: user.vim_ultisnips mode: user.markdown mode: command and code.language: markdown """ # spoken name -> ultisnips snippet name ctx.lists["user.snippets"] = { # Sections and Paragraphs # "section": "sec", "sub section": "ssec", "sub sub section": "sssec", "paragraph": "par", "sub paragraph": "spar", # Text formatting # "italics": "*", "bold": "**", "bold italics": "***", "strike through": "~~", "comment": "/*", # Common stuff # "link": "link", "image": "img", "inline code": "ilc", "code block": "cbl", "shell block": "shellcbl", "reference link": "refl", "footnote": "fnt", "detail": "detail", }
[ "talon.Context" ]
[((33, 42), 'talon.Context', 'Context', ([], {}), '()\n', (40, 42), False, 'from talon import Context\n')]
from django.db import models from community.models import Community from root import settings """ Subscription object model """ class Subscription(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=False, null=False, db_index=True) community = models.ForeignKey(Community, on_delete=models.PROTECT, blank=False, null=False, db_index=True) created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False) def __str__(self): return str(self.id) class Meta: verbose_name_plural = "subscriptions"
[ "django.db.models.ForeignKey", "django.db.models.DateTimeField" ]
[((177, 291), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.PROTECT', 'blank': '(False)', 'null': '(False)', 'db_index': '(True)'}), '(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank\n =False, null=False, db_index=True)\n', (194, 291), False, 'from django.db import models\n'), ((303, 402), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Community'], {'on_delete': 'models.PROTECT', 'blank': '(False)', 'null': '(False)', 'db_index': '(True)'}), '(Community, on_delete=models.PROTECT, blank=False, null=\n False, db_index=True)\n', (320, 402), False, 'from django.db import models\n'), ((415, 479), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'blank': '(False)', 'null': '(False)'}), '(auto_now_add=True, blank=False, null=False)\n', (435, 479), False, 'from django.db import models\n')]
from rest_framework import serializers from SIFUser.serializer import UserSerializer from .models import StudentVillage class StudentVillageSerializer(serializers.ModelSerializer): managers = UserSerializer(many=True) dormrooms = serializers.PrimaryKeyRelatedField(many=True, read_only=True) class Meta: model = StudentVillage fields = "__all__"
[ "rest_framework.serializers.PrimaryKeyRelatedField", "SIFUser.serializer.UserSerializer" ]
[((200, 225), 'SIFUser.serializer.UserSerializer', 'UserSerializer', ([], {'many': '(True)'}), '(many=True)\n', (214, 225), False, 'from SIFUser.serializer import UserSerializer\n'), ((242, 303), 'rest_framework.serializers.PrimaryKeyRelatedField', 'serializers.PrimaryKeyRelatedField', ([], {'many': '(True)', 'read_only': '(True)'}), '(many=True, read_only=True)\n', (276, 303), False, 'from rest_framework import serializers\n')]
import argparse import vulncat import logging import os loglevel='DEBUG' logpath=f'{os.getcwd()}/log' # create the log directory if it does not exist if os.path.exists(logpath) == False: os.mkdir(logpath) logging.basicConfig( level=loglevel, filename=f'{logpath}/app.log', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%a, %d %b %Y %H:%M:%S' ) parser = argparse.ArgumentParser(description='Vulncat web parser cli') #parser.add_argument('-h', "--help", help='cli helper') parser.add_argument('-category', action='store_true', help='list all the categories of vulnerabilities') parser.add_argument('-kingdom', action='store_true', help='list all the kingdoms') parser.add_argument('-language', action='store_true', help='list all the languages') args = parser.parse_args() if args.category: vulncat.scrape_filters('category') if args.kingdom: vulncat.scrape_filters('kingdom') if args.language: vulncat.scrape_filters('codelang')
[ "os.mkdir", "argparse.ArgumentParser", "logging.basicConfig", "os.getcwd", "vulncat.scrape_filters", "os.path.exists" ]
[((208, 369), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'loglevel', 'filename': 'f"""{logpath}/app.log"""', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""', 'datefmt': '"""%a, %d %b %Y %H:%M:%S"""'}), "(level=loglevel, filename=f'{logpath}/app.log', format=\n '%(asctime)s - %(levelname)s - %(message)s', datefmt=\n '%a, %d %b %Y %H:%M:%S')\n", (227, 369), False, 'import logging\n'), ((395, 456), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Vulncat web parser cli"""'}), "(description='Vulncat web parser cli')\n", (418, 456), False, 'import argparse\n'), ((155, 178), 'os.path.exists', 'os.path.exists', (['logpath'], {}), '(logpath)\n', (169, 178), False, 'import os\n'), ((189, 206), 'os.mkdir', 'os.mkdir', (['logpath'], {}), '(logpath)\n', (197, 206), False, 'import os\n'), ((838, 872), 'vulncat.scrape_filters', 'vulncat.scrape_filters', (['"""category"""'], {}), "('category')\n", (860, 872), False, 'import vulncat\n'), ((895, 928), 'vulncat.scrape_filters', 'vulncat.scrape_filters', (['"""kingdom"""'], {}), "('kingdom')\n", (917, 928), False, 'import vulncat\n'), ((952, 986), 'vulncat.scrape_filters', 'vulncat.scrape_filters', (['"""codelang"""'], {}), "('codelang')\n", (974, 986), False, 'import vulncat\n'), ((85, 96), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (94, 96), False, 'import os\n')]
#!/usr/bin/env python3 import json import base64 import random from io import BytesIO import socket from binascii import unhexlify, hexlify # Trusting SeedSigner's embit library # https://github.com/SeedSigner/embit from embit import bip32 from embit import bip39 from embit import wordlists from embit import script from embit.networks import NETWORKS # Trusting qrcode library as offline qr code creation import qrcode # Trusting Flask as simple web interface from flask import Flask, render_template, request app = Flask(__name__) wordlist = wordlists.bip39.WORDLIST def is_online(): """ Check if we are online Thanks @KeithMukai for the suggestion! https://twitter.com/KeithMukai/status/1470571942000443392 """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(('1.1.1.1', 53)) return True except OSError: return False def seed_qr_string(words): """ Return the string value of our SeedQR. """ return ''.join([str(wordlist.index(w)).zfill(4) for w in words.split()]) def seed_qr_base64(words): """ Return a base64 PNG image of our SeedQR. """ # create a qrcode of our seed_qr_string img = qrcode.make( seed_qr_string(words)) # generate a base64 encoding of our png image # https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ im_file = BytesIO() img.save(im_file, format="PNG") im_b64 = base64.b64encode(im_file.getvalue()) return im_b64.decode() def get_seed_phrase(entropy): """ Generate random seedphrase """ words = bip39.mnemonic_from_bytes(entropy) return words @app.route("/") def home(): """ Main home page which generates random seed phrases """ if is_online(): return render_template('panic.html') params = {} # generate a random seed phrase params['entropy'] = random.randbytes(32) # seedQR our our entropy params['words'] = get_seed_phrase(params['entropy']) params['seed_qr_string'] = seed_qr_string(params['words']) params['seed_qr_base64'] = seed_qr_base64(params['words']) params['seed'] = bip39.mnemonic_to_seed(params['words']) params['derivation_path'] = "m/84'/0'/0'" version = bip32.detect_version(params['derivation_path'], default="xpub", network=NETWORKS['main']) root = bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv']) params['fingerprint'] = hexlify(root.child(0).fingerprint).decode() xpriv = root.derive(params['derivation_path']) xpub = xpriv.to_public() params['xpriv'] = xpriv params['xpub'] = xpub.to_string(version=version) return render_template('index.html', **params, wordlist=wordlist) if __name__ == "__main__": app.run(debug=True)
[ "io.BytesIO", "embit.bip32.detect_version", "embit.bip39.mnemonic_from_bytes", "socket.socket", "flask.Flask", "random.randbytes", "embit.bip39.mnemonic_to_seed", "flask.render_template", "embit.bip32.HDKey.from_seed" ]
[((524, 539), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (529, 539), False, 'from flask import Flask, render_template, request\n'), ((753, 802), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (766, 802), False, 'import socket\n'), ((1405, 1414), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1412, 1414), False, 'from io import BytesIO\n'), ((1621, 1655), 'embit.bip39.mnemonic_from_bytes', 'bip39.mnemonic_from_bytes', (['entropy'], {}), '(entropy)\n', (1646, 1655), False, 'from embit import bip39\n'), ((1918, 1938), 'random.randbytes', 'random.randbytes', (['(32)'], {}), '(32)\n', (1934, 1938), False, 'import random\n'), ((2174, 2213), 'embit.bip39.mnemonic_to_seed', 'bip39.mnemonic_to_seed', (["params['words']"], {}), "(params['words'])\n", (2196, 2213), False, 'from embit import bip39\n'), ((2276, 2370), 'embit.bip32.detect_version', 'bip32.detect_version', (["params['derivation_path']"], {'default': '"""xpub"""', 'network': "NETWORKS['main']"}), "(params['derivation_path'], default='xpub', network=\n NETWORKS['main'])\n", (2296, 2370), False, 'from embit import bip32\n'), ((2378, 2441), 'embit.bip32.HDKey.from_seed', 'bip32.HDKey.from_seed', (["params['seed']", "NETWORKS['main']['xprv']"], {}), "(params['seed'], NETWORKS['main']['xprv'])\n", (2399, 2441), False, 'from embit import bip32\n'), ((2692, 2750), 'flask.render_template', 'render_template', (['"""index.html"""'], {'wordlist': 'wordlist'}), "('index.html', **params, wordlist=wordlist)\n", (2707, 2750), False, 'from flask import Flask, render_template, request\n'), ((1810, 1839), 'flask.render_template', 'render_template', (['"""panic.html"""'], {}), "('panic.html')\n", (1825, 1839), False, 'from flask import Flask, render_template, request\n')]
# -*- coding: utf-8 -*- # Generated by Django 1.11.8 on 2018-02-07 21:09 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Pending', fields=[ ('user', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('admin_contact_1', models.EmailField(blank=True, max_length=254, null=True)), ('admin_contact_2', models.EmailField(blank=True, max_length=254, null=True)), ('admin_token', models.CharField(editable=False, max_length=32, null=True)), ('email_verified', models.BooleanField(default=False, editable=False)), ], options={ 'verbose_name': 'Pending user', 'verbose_name_plural': 'Pending users', }, ), migrations.CreateModel( name='Recovery', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
[ "django.db.models.OneToOneField", "django.db.migrations.swappable_dependency", "django.db.models.CharField", "django.db.models.ForeignKey", "django.db.models.BooleanField", "django.db.models.EmailField", "django.db.models.AutoField", "django.db.models.DateTimeField" ]
[((369, 426), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (400, 426), False, 'from django.db import migrations, models\n'), ((560, 710), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'editable': '(False)', 'on_delete': 'django.db.models.deletion.CASCADE', 'primary_key': '(True)', 'serialize': '(False)', 'to': 'settings.AUTH_USER_MODEL'}), '(editable=False, on_delete=django.db.models.deletion.\n CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)\n', (580, 710), False, 'from django.db import migrations, models\n'), ((739, 778), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (759, 778), False, 'from django.db import migrations, models\n'), ((807, 854), 'django.db.models.CharField', 'models.CharField', ([], {'editable': '(False)', 'max_length': '(32)'}), '(editable=False, max_length=32)\n', (823, 854), False, 'from django.db import migrations, models\n'), ((893, 949), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'max_length': '(254)', 'null': '(True)'}), '(blank=True, max_length=254, null=True)\n', (910, 949), False, 'from django.db import migrations, models\n'), ((988, 1044), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'max_length': '(254)', 'null': '(True)'}), '(blank=True, max_length=254, null=True)\n', (1005, 1044), False, 'from django.db import migrations, models\n'), ((1079, 1137), 'django.db.models.CharField', 'models.CharField', ([], {'editable': '(False)', 'max_length': '(32)', 'null': '(True)'}), '(editable=False, max_length=32, null=True)\n', (1095, 1137), False, 'from django.db import migrations, models\n'), ((1175, 1225), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'editable': '(False)'}), '(default=False, editable=False)\n', (1194, 1225), False, 'from django.db import migrations, models\n'), ((1500, 1593), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1516, 1593), False, 'from django.db import migrations, models\n'), ((1623, 1662), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1643, 1662), False, 'from django.db import migrations, models\n'), ((1691, 1738), 'django.db.models.CharField', 'models.CharField', ([], {'editable': '(False)', 'max_length': '(32)'}), '(editable=False, max_length=32)\n', (1707, 1738), False, 'from django.db import migrations, models\n'), ((1766, 1862), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1783, 1862), False, 'from django.db import migrations, models\n')]
#!/usr/bin/env python3 import os import sys import json in_fmt = '%d.in' out_fmt = '%d.out' def get_test(path, test_num, test_fmt): return os.path.join(path, 'tests', test_fmt % test_num) def testerize(path): src_prob = json.loads(open(os.path.join(path, 'problem.json'), 'r').read())['problem'] dst_prob = {} dst_prob['InputFile'] = 'stdin' if src_prob['input'] == '' else src_prob['input'] dst_prob['OutputFile'] = 'stdout' if src_prob['output'] == '' else src_prob['output'] dst_prob['TimeLimit'] = int(src_prob['timeLimit'] * 1000) dst_prob['MemoryLimit'] = int(src_prob['memoryLimit'] * 1024) dst_prob['StopAfterFirstFail'] = False if os.path.exists(os.path.join(path, 'checker.cpp')): dst_prob['Checker'] = { 'Type': 'TTextChecker', 'Value': { 'CheckerFileName': 'checker.exe', 'ParamsPolicy': 'secpInOutAns' } } else: dst_prob['Checker'] = { 'Type': 'TFileCompareChecker', 'Value': { 'StripSpaces': True } } dst_prob['Version'] = { 'Build': 129, 'Major': 1, 'Minor': 2, 'Release': 3, 'Tag': '' } dst_prob['TestList'] = [] test_id = 1 while os.path.exists(get_test(path, test_id, in_fmt)) or os.path.exists(get_test(path, test_id, out_fmt)): cur_test = { 'Cost': 1.0, } cur_test['InputFile'] = in_fmt % test_id cur_test['OutputFile'] = out_fmt % test_id dst_prob['TestList'] += [cur_test] test_id += 1 test_cost = 0 if test_id == 1 else 100.0 / (test_id - 1) for i in range(test_id - 1): dst_prob['TestList'][i]['Cost'] = test_cost open(os.path.join(path, 'tests', 'props.json'), 'w').write(json.dumps(dst_prob, indent=2)) def main(args): if len(args) <= 1: print('Usage: ./testerize.py <problem directory>') return 1 testerize(args[1]) return 0 if __name__ == '__main__': sys.exit(main(sys.argv))
[ "os.path.join", "json.dumps" ]
[((146, 194), 'os.path.join', 'os.path.join', (['path', '"""tests"""', '(test_fmt % test_num)'], {}), "(path, 'tests', test_fmt % test_num)\n", (158, 194), False, 'import os\n'), ((695, 728), 'os.path.join', 'os.path.join', (['path', '"""checker.cpp"""'], {}), "(path, 'checker.cpp')\n", (707, 728), False, 'import os\n'), ((1833, 1863), 'json.dumps', 'json.dumps', (['dst_prob'], {'indent': '(2)'}), '(dst_prob, indent=2)\n', (1843, 1863), False, 'import json\n'), ((1779, 1820), 'os.path.join', 'os.path.join', (['path', '"""tests"""', '"""props.json"""'], {}), "(path, 'tests', 'props.json')\n", (1791, 1820), False, 'import os\n'), ((248, 282), 'os.path.join', 'os.path.join', (['path', '"""problem.json"""'], {}), "(path, 'problem.json')\n", (260, 282), False, 'import os\n')]
#!/usr/bin/env python #-*- coding:utf-8 -*- """ File Name: setup.py Author: gadfy """ from setuptools import setup, find_packages #这个包没有的可以pip一下 with open("README.md", "r", encoding="utf-8") as f: long_description = f.read() setup( name = "samebirthdayrate", #这里是pip项目发布的名称 version = "1.0.0", #版本号,数值大的会优先被pip keywords = ("pip","samebirthdayrate"), description = "caculate same birthday rate", long_description = long_description, license = "MIT Licence", url = "https://narwelplists.herokuapp.com/", #项目相关文件地址,一般是github author = "gadfy", author_email = "<EMAIL>", packages = find_packages(), include_package_data = True, platforms = "any", install_requires = [] #这个项目需要的第三方库 )
[ "setuptools.find_packages" ]
[((647, 662), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (660, 662), False, 'from setuptools import setup, find_packages\n')]
# -*- coding: utf-8 -*- from __future__ import unicode_literals try: from unittest.mock import patch, Mock except ImportError: from mock import patch, Mock import unittest import logging import os import tempfile from contextlib import contextmanager import six from v8cffi.platform import platform from v8cffi.vm import VM from v8cffi import exceptions from v8cffi import context logging.disable(logging.CRITICAL) class StringTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_with(self): """ It should support with statement """ with context._String() as s: self.assertIsInstance(s, context._String) self.assertEqual( context.ffi.typeof('char **'), context.ffi.typeof(s.string_ptr)) self.assertEqual( context.ffi.typeof('size_t *'), context.ffi.typeof(s.len_ptr)) self.assertEqual(s.string_ptr[0], context.ffi.NULL) self.assertEqual(s.len_ptr[0], 0) def test_to_str(self): """ It should support str call """ with context._String() as s: string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(six.text_type(s), 'foo') s.string_ptr = string_ptr def test_to_bytes(self): """ It should return the string bytes """ with context._String() as s: string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(s.to_bytes(), b'foo') s.string_ptr = string_ptr def test_free(self): """ It should free the string """ with patch('v8cffi.context.lib', autospec=True) as r: s = context._String() s.__enter__() free = Mock() r.v8cffi_free = free s.__exit__() self.assertTrue(free.called) def test_assert_on_re_enter(self): """ It should fail to re enter """ s = context._String() with s as _: self.assertRaises(AssertionError, s.__enter__) def test_assert_on_re_exit(self): """ It should fail to re exit """ s = context._String() self.assertRaises(AssertionError, s.__exit__) with s as _: pass self.assertRaises(AssertionError, s.__exit__) def test_assert_on_re_create(self): """ It should allow to re create """ s = context._String() with s as _: self.assertIsNotNone(s.string_ptr) self.assertIsNone(s.string_ptr) with s as _: self.assertIsNotNone(s.string_ptr) @contextmanager def js_file(data): temp = tempfile.NamedTemporaryFile(delete=False) temp.write(data) temp.close() try: yield temp.name finally: os.remove(temp.name) class ContextTest(unittest.TestCase): def setUp(self): self.vm = VM(platform) self.vm.set_up() def tearDown(self): self.vm.tear_down() def test_keep_vm(self): """ It should keep a reference to the VM """ ctx = context.Context(self.vm) self.assertIsInstance(ctx._vm, VM) def test_with(self): """ It should support with statement """ with context.Context(self.vm) as ctx: self.assertIsInstance(ctx, context.Context) def test_set_up(self): """ It should call __enter__ """ ctx = context.Context(self.vm) with patch.object(ctx, '__enter__', autospec=True) as r: r.return_value = 'foo' self.assertEqual(ctx.set_up(), 'foo') r.assert_called_once_with() def test_tear_down(self): """ It should call __exit__ """ ctx = context.Context(self.vm) with patch.object(ctx, '__exit__', autospec=True) as r: ctx.tear_down() r.assert_called_once_with() def test_load_libs(self): """ It should run the script file content on V8 """ script = b'var foo = "foo";' with js_file(script) as path: with context.Context(self.vm) as ctx: with patch.object(ctx, 'run_script', autospec=True) as r: ctx.load_libs([path]) r.assert_called_once_with(script, identifier=path) def test_run_script(self): """ It should run the script on V8 """ script_foo = b'var foo = "foo!";' script_bar = 'var bar = "bar!";' script_special = 'var txt = "áéíóú";' with context.Context(self.vm) as ctx: ctx.run_script(script_foo) ctx.run_script(script_bar) ctx.run_script(script_special) self.assertEqual("foo!", ctx.run_script(b'foo')) self.assertEqual("bar!", ctx.run_script('bar')) self.assertEqual("áéíóú", ctx.run_script('txt')) self.assertRaises(exceptions.V8JSError, ctx.run_script, 'baz') self.assertRaises(exceptions.V8JSError, ctx.run_script, 'function[]();') with context.Context(self.vm) as ctx: self.assertRaises(exceptions.V8JSError, ctx.run_script, 'foo') def test_builtin_libs(self): """ It should pre-load builtin libraries """ with context.Context(self.vm) as ctx: self.assertEqual("20", ctx.run_script('Math.max(10, 20);')) def test_run_script_trace_back(self): """ It should run the script on V8\ and get a useful traceback """ def get_exception_message(ctx, script): try: return ctx.run_script(script) except exceptions.V8JSError as ex: return six.text_type(ex) script_oops = ( 'function oops() {\n' ' thereMayBeErrors();\n' ' var my_var_2;\n' '}') script_oops2 = ( 'function oops2() {\n' ' thereMayBeMoreErrors();\n' ' var my_var_2;\n' '}') var_a = 'var a;' script_long = ( 'function oops3() {\n' + var_a * 100 + 'thereMayBeMoreErrors();' + var_a * 100 + '\n' '}') # todo: trim source line when too long with context.Context(self.vm) as ctx: ctx.run_script(script_oops, identifier='my_file_áéíóú.js') ctx.run_script(script_oops2, identifier='my_other_file.js') ctx.run_script(script_long) self.assertEqual( 'my_file_áéíóú.js:2\n' ' thereMayBeErrors();\n' ' ^\n' 'ReferenceError: thereMayBeErrors is not defined\n' ' at oops (my_file_áéíóú.js:2:3)\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops()')) self.assertEqual( 'my_other_file.js:2\n' ' thereMayBeMoreErrors();\n' ' ^\n' 'ReferenceError: thereMayBeMoreErrors is not defined\n' ' at oops2 (my_other_file.js:2:3)\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops2()')) self.assertEqual( '<anonymous>:2\n' ' ~Line too long to display.\n' 'ReferenceError: thereMayBeMoreErrors is not defined\n' ' at oops3 (<anonymous>:2:601)\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops3()')) self.assertEqual( '<anonymous>:1\n' ' nonExistentFunc();\n' ' ^\n' 'ReferenceError: nonExistentFunc is not defined\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'nonExistentFunc();')) self.assertEqual( '<anonymous>:1\n' ' function[]();\n' ' ^\n' 'SyntaxError: Unexpected token [', get_exception_message(ctx, 'function[]();')) # Has no .stack property self.assertEqual( '<anonymous>:2\n' ' throw "myException";\n' ' ^\n' 'myException', get_exception_message( ctx, '(function() {\n' ' throw "myException";\n' '})();'))
[ "tempfile.NamedTemporaryFile", "os.remove", "v8cffi.vm.VM", "mock.patch.object", "v8cffi.context.Context", "v8cffi.context._String", "mock.patch", "six.text_type", "logging.disable", "mock.Mock", "v8cffi.context.ffi.new", "v8cffi.context.ffi.typeof" ]
[((396, 429), 'logging.disable', 'logging.disable', (['logging.CRITICAL'], {}), '(logging.CRITICAL)\n', (411, 429), False, 'import logging\n'), ((2961, 3002), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (2988, 3002), False, 'import tempfile\n'), ((2227, 2244), 'v8cffi.context._String', 'context._String', ([], {}), '()\n', (2242, 2244), False, 'from v8cffi import context\n'), ((2435, 2452), 'v8cffi.context._String', 'context._String', ([], {}), '()\n', (2450, 2452), False, 'from v8cffi import context\n'), ((2716, 2733), 'v8cffi.context._String', 'context._String', ([], {}), '()\n', (2731, 2733), False, 'from v8cffi import context\n'), ((3096, 3116), 'os.remove', 'os.remove', (['temp.name'], {}), '(temp.name)\n', (3105, 3116), False, 'import os\n'), ((3197, 3209), 'v8cffi.vm.VM', 'VM', (['platform'], {}), '(platform)\n', (3199, 3209), False, 'from v8cffi.vm import VM\n'), ((3400, 3424), 'v8cffi.context.Context', 'context.Context', (['self.vm'], {}), '(self.vm)\n', (3415, 3424), False, 'from v8cffi import context\n'), ((3760, 3784), 'v8cffi.context.Context', 'context.Context', (['self.vm'], {}), '(self.vm)\n', (3775, 3784), False, 'from v8cffi import context\n'), ((4077, 4101), 'v8cffi.context.Context', 'context.Context', (['self.vm'], {}), '(self.vm)\n', (4092, 4101), False, 'from v8cffi import context\n'), ((646, 663), 'v8cffi.context._String', 'context._String', ([], {}), '()\n', (661, 663), False, 'from v8cffi import context\n'), ((1186, 1203), 'v8cffi.context._String', 'context._String', ([], {}), '()\n', (1201, 1203), False, 'from v8cffi import context\n'), ((1541, 1558), 'v8cffi.context._String', 'context._String', ([], {}), '()\n', (1556, 1558), False, 'from v8cffi import context\n'), ((1881, 1923), 'mock.patch', 'patch', (['"""v8cffi.context.lib"""'], {'autospec': '(True)'}), "('v8cffi.context.lib', autospec=True)\n", (1886, 1923), False, 'from mock import patch, Mock\n'), ((1946, 1963), 'v8cffi.context._String', 'context._String', ([], {}), '()\n', (1961, 1963), False, 'from v8cffi import context\n'), ((2010, 2016), 'mock.Mock', 'Mock', ([], {}), '()\n', (2014, 2016), False, 'from mock import patch, Mock\n'), ((3572, 3596), 'v8cffi.context.Context', 'context.Context', (['self.vm'], {}), '(self.vm)\n', (3587, 3596), False, 'from v8cffi import context\n'), ((3799, 3844), 'mock.patch.object', 'patch.object', (['ctx', '"""__enter__"""'], {'autospec': '(True)'}), "(ctx, '__enter__', autospec=True)\n", (3811, 3844), False, 'from mock import patch, Mock\n'), ((4116, 4160), 'mock.patch.object', 'patch.object', (['ctx', '"""__exit__"""'], {'autospec': '(True)'}), "(ctx, '__exit__', autospec=True)\n", (4128, 4160), False, 'from mock import patch, Mock\n'), ((4893, 4917), 'v8cffi.context.Context', 'context.Context', (['self.vm'], {}), '(self.vm)\n', (4908, 4917), False, 'from v8cffi import context\n'), ((5403, 5427), 'v8cffi.context.Context', 'context.Context', (['self.vm'], {}), '(self.vm)\n', (5418, 5427), False, 'from v8cffi import context\n'), ((5627, 5651), 'v8cffi.context.Context', 'context.Context', (['self.vm'], {}), '(self.vm)\n', (5642, 5651), False, 'from v8cffi import context\n'), ((6607, 6631), 'v8cffi.context.Context', 'context.Context', (['self.vm'], {}), '(self.vm)\n', (6622, 6631), False, 'from v8cffi import context\n'), ((770, 799), 'v8cffi.context.ffi.typeof', 'context.ffi.typeof', (['"""char **"""'], {}), "('char **')\n", (788, 799), False, 'from v8cffi import context\n'), ((817, 849), 'v8cffi.context.ffi.typeof', 'context.ffi.typeof', (['s.string_ptr'], {}), '(s.string_ptr)\n', (835, 849), False, 'from v8cffi import context\n'), ((897, 927), 'v8cffi.context.ffi.typeof', 'context.ffi.typeof', (['"""size_t *"""'], {}), "('size_t *')\n", (915, 927), False, 'from v8cffi import context\n'), ((945, 974), 'v8cffi.context.ffi.typeof', 'context.ffi.typeof', (['s.len_ptr'], {}), '(s.len_ptr)\n', (963, 974), False, 'from v8cffi import context\n'), ((1276, 1309), 'v8cffi.context.ffi.new', 'context.ffi.new', (['"""char[]"""', "b'foo'"], {}), "('char[]', b'foo')\n", (1291, 1309), False, 'from v8cffi import context\n'), ((1369, 1385), 'six.text_type', 'six.text_type', (['s'], {}), '(s)\n', (1382, 1385), False, 'import six\n'), ((1631, 1664), 'v8cffi.context.ffi.new', 'context.ffi.new', (['"""char[]"""', "b'foo'"], {}), "('char[]', b'foo')\n", (1646, 1664), False, 'from v8cffi import context\n'), ((4435, 4459), 'v8cffi.context.Context', 'context.Context', (['self.vm'], {}), '(self.vm)\n', (4450, 4459), False, 'from v8cffi import context\n'), ((4489, 4535), 'mock.patch.object', 'patch.object', (['ctx', '"""run_script"""'], {'autospec': '(True)'}), "(ctx, 'run_script', autospec=True)\n", (4501, 4535), False, 'from mock import patch, Mock\n'), ((6055, 6072), 'six.text_type', 'six.text_type', (['ex'], {}), '(ex)\n', (6068, 6072), False, 'import six\n')]
#!/usr/bin/python3 #+-+-+-+-+-+-+-+-+-+-+-+ #|R|i|c|e|L|e|e|.|c|o|m| #+-+-+-+-+-+-+-+-+-+-+-+ # Copyright (c) 2021, <EMAIL> # All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # # Origin: p111 at https://hackspace.raspberrypi.org/books/micropython-pico import machine import utime sensor_temp = machine.ADC(machine.ADC.CORE_TEMP) conversion_factor = 3.3 / (65535) file = open("temps.txt", "w") while True: reading = sensor_temp.read_u16() * conversion_factor temperature = 27 - (reading - 0.706)/0.001721 file.write(str(temperature) + "\n") file.flush() utime.sleep(10)
[ "utime.sleep", "machine.ADC" ]
[((370, 404), 'machine.ADC', 'machine.ADC', (['machine.ADC.CORE_TEMP'], {}), '(machine.ADC.CORE_TEMP)\n', (381, 404), False, 'import machine\n'), ((650, 665), 'utime.sleep', 'utime.sleep', (['(10)'], {}), '(10)\n', (661, 665), False, 'import utime\n')]
# Generated by Django 3.2 on 2021-04-18 06:36 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Meal', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=50, null=True)), ('category', models.CharField(blank=True, max_length=10, null=True)), ('instructions', models.CharField(blank=True, max_length=4000, null=True)), ('region', models.CharField(blank=True, max_length=20, null=True)), ('slug', models.SlugField(default='test')), ('image_url', models.CharField(blank=True, max_length=50, null=True)), ], ), ]
[ "django.db.models.BigAutoField", "django.db.models.CharField", "django.db.models.SlugField" ]
[((298, 394), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (317, 394), False, 'from django.db import migrations, models\n'), ((418, 472), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'null': '(True)'}), '(blank=True, max_length=50, null=True)\n', (434, 472), False, 'from django.db import migrations, models\n'), ((504, 558), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (520, 558), False, 'from django.db import migrations, models\n'), ((594, 650), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(4000)', 'null': '(True)'}), '(blank=True, max_length=4000, null=True)\n', (610, 650), False, 'from django.db import migrations, models\n'), ((680, 734), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)', 'null': '(True)'}), '(blank=True, max_length=20, null=True)\n', (696, 734), False, 'from django.db import migrations, models\n'), ((762, 794), 'django.db.models.SlugField', 'models.SlugField', ([], {'default': '"""test"""'}), "(default='test')\n", (778, 794), False, 'from django.db import migrations, models\n'), ((827, 881), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)', 'null': '(True)'}), '(blank=True, max_length=50, null=True)\n', (843, 881), False, 'from django.db import migrations, models\n')]
# -*- coding: utf-8 -*- """ Created on Mon Dec 10 11:32:52 2018 @author: Eric """ import glob import pandas as pd files = glob.glob("./split_files/*.csv") for file_name in files: new_name = file_name.split(".csv")[0] + '.json' df = pd.read_csv(file_name, engine = 'python', encoding = 'utf-8') df.to_json(new_name)
[ "pandas.read_csv", "glob.glob" ]
[((133, 165), 'glob.glob', 'glob.glob', (['"""./split_files/*.csv"""'], {}), "('./split_files/*.csv')\n", (142, 165), False, 'import glob\n'), ((256, 313), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'engine': '"""python"""', 'encoding': '"""utf-8"""'}), "(file_name, engine='python', encoding='utf-8')\n", (267, 313), True, 'import pandas as pd\n')]
"""Test that types defined in shared libraries work correctly.""" import lldb import unittest2 from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * import lldbsuite.test.lldbutil as lldbutil class SharedLibTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) def setUp(self): # Call super's setUp(). TestBase.setUp(self) # Find the line number to break inside main(). self.source = "shared.c" self.line = line_number(self.source, "// Set breakpoint 0 here.") self.shlib_names = ["foo"] def common_setup(self): # Run in synchronous mode self.dbg.SetAsync(False) self.runCmd("settings set symbols.load-on-demand true") # Create a target by the debugger. self.target = self.dbg.CreateTarget(self.getBuildArtifact("a.out")) self.assertTrue(self.target, VALID_TARGET) # Register our shared libraries for remote targets so they get # automatically uploaded self.environment = self.registerSharedLibrariesWithTarget( self.target, self.shlib_names ) ctx = self.platformContext self.shared_lib_name = ctx.shlib_prefix + "foo." + ctx.shlib_extension @skipIfWindows def test_source_line_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, "foo.c", 4, num_expected_locations=1, loc_exact=True ) # Now launch the process, and do not stop at entry point. process = self.target.LaunchSimple( None, self.environment, self.get_process_working_directory() ) self.assertTrue(process, PROCESS_IS_VALID) # The stop reason of the thread should be breakpoint. self.expect( "thread list", STOPPED_DUE_TO_BREAKPOINT, substrs=["stopped", "stop reason = breakpoint"], ) # The breakpoint should have a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) thread = process.GetSelectedThread() stack_frames = lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames), 2) leaf_frame = stack_frames[0] self.assertEqual("foo.c", leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4, leaf_frame.GetLineEntry().GetLine()) parent_frame = stack_frames[1] self.assertEqual( "shared.c", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_symbolic_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_symbol( self, "foo", sym_exact=True, num_expected_locations=1 ) # Now launch the process, and do not stop at entry point. process = self.target.LaunchSimple( None, self.environment, self.get_process_working_directory() ) self.assertTrue(process, PROCESS_IS_VALID) # The stop reason of the thread should be breakpoint. self.expect( "thread list", STOPPED_DUE_TO_BREAKPOINT, substrs=["stopped", "stop reason = breakpoint"], ) # The breakpoint should have a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) thread = process.GetSelectedThread() stack_frames = lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames), 2) leaf_frame = stack_frames[0] self.assertEqual("foo.c", leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4, leaf_frame.GetLineEntry().GetLine()) parent_frame = stack_frames[1] self.assertEqual( "shared.c", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_global_variable_hydration(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, self.source, self.line, num_expected_locations=1, loc_exact=True ) # Now launch the process, and do not stop at entry point. process = self.target.LaunchSimple( None, self.environment, self.get_process_working_directory() ) self.assertTrue(process, PROCESS_IS_VALID) # The stop reason of the thread should be breakpoint. self.expect( "thread list", STOPPED_DUE_TO_BREAKPOINT, substrs=["stopped", "stop reason = breakpoint"], ) # The breakpoint should have a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) self.expect( "target variable --shlib a.out", "Breakpoint in a.out should have hydrated the debug info", substrs=["global_shared = 897"], ) self.expect( "target variable --shlib " + self.shared_lib_name, "shared library should not have debug info by default", matching=False, substrs=["global_foo"], ) self.expect( "target variable global_foo --shlib " + self.shared_lib_name, "Match global_foo in symbol table should hydrate debug info", matching=True, substrs=["global_foo = 321"], )
[ "lldbsuite.test.lldbutil.run_break_set_by_symbol", "lldbsuite.test.lldbutil.run_break_set_by_file_and_line", "lldbsuite.test.lldbutil.check_breakpoint", "lldbsuite.test.lldbutil.get_stack_frames" ]
[((1372, 1475), 'lldbsuite.test.lldbutil.run_break_set_by_file_and_line', 'lldbutil.run_break_set_by_file_and_line', (['self', '"""foo.c"""', '(4)'], {'num_expected_locations': '(1)', 'loc_exact': '(True)'}), "(self, 'foo.c', 4,\n num_expected_locations=1, loc_exact=True)\n", (1411, 1475), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((2023, 2084), 'lldbsuite.test.lldbutil.check_breakpoint', 'lldbutil.check_breakpoint', (['self'], {'bpno': '(1)', 'expected_hit_count': '(1)'}), '(self, bpno=1, expected_hit_count=1)\n', (2048, 2084), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((2154, 2187), 'lldbsuite.test.lldbutil.get_stack_frames', 'lldbutil.get_stack_frames', (['thread'], {}), '(thread)\n', (2179, 2187), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((2770, 2861), 'lldbsuite.test.lldbutil.run_break_set_by_symbol', 'lldbutil.run_break_set_by_symbol', (['self', '"""foo"""'], {'sym_exact': '(True)', 'num_expected_locations': '(1)'}), "(self, 'foo', sym_exact=True,\n num_expected_locations=1)\n", (2802, 2861), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((3409, 3470), 'lldbsuite.test.lldbutil.check_breakpoint', 'lldbutil.check_breakpoint', (['self'], {'bpno': '(1)', 'expected_hit_count': '(1)'}), '(self, bpno=1, expected_hit_count=1)\n', (3434, 3470), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((3540, 3573), 'lldbsuite.test.lldbutil.get_stack_frames', 'lldbutil.get_stack_frames', (['thread'], {}), '(thread)\n', (3565, 3573), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((4162, 4277), 'lldbsuite.test.lldbutil.run_break_set_by_file_and_line', 'lldbutil.run_break_set_by_file_and_line', (['self', 'self.source', 'self.line'], {'num_expected_locations': '(1)', 'loc_exact': '(True)'}), '(self, self.source, self.line,\n num_expected_locations=1, loc_exact=True)\n', (4201, 4277), True, 'import lldbsuite.test.lldbutil as lldbutil\n'), ((4826, 4887), 'lldbsuite.test.lldbutil.check_breakpoint', 'lldbutil.check_breakpoint', (['self'], {'bpno': '(1)', 'expected_hit_count': '(1)'}), '(self, bpno=1, expected_hit_count=1)\n', (4851, 4887), True, 'import lldbsuite.test.lldbutil as lldbutil\n')]
import os import pathlib import platform import subprocess import unittest # Ensure we're running in the correct folder so we don't destroy anything important cwd = pathlib.Path(os.getcwd()) if cwd.name == "SenSchema": os.chdir("test") cwd = pathlib.Path(os.getcwd()) assert cwd.name == "test" assert cwd.parent.name == "SenSchema" current_os = platform.system() if not pathlib.Path("kaitaistruct.py").exists(): print("Creating link to kaitai struct compiler") if current_os == "Windows": os.link( "3rdparty/kaitai_struct_python_runtime/kaitaistruct.py", "kaitaistruct.py" ) else: os.symlink( "3rdparty/kaitai_struct_python_runtime/kaitaistruct.py", "kaitaistruct.py" ) print("Cleaning up...") for file in cwd.glob("cs3tbl/*.py"): if file.name == "__init__.py": continue print(f"Removing {file}") file.unlink() print("Generating parser code") os.chdir("cs3tbl") executable_file = ( "kaitai-struct-compiler.bat" if current_os == "Windows" else "kaitai-struct-compiler" ) proc = subprocess.run( [ executable_file, "-t", "python", "--python-package", ".", "-I", "../../schemas/", "../../schemas/cs3.ksy", ] ) if proc.returncode != 0: raise RuntimeError(f"kaitai-struct-compiler returned {proc.returncode}") os.chdir("..") print("Setup done.") print("Loading tests.") suite = unittest.TestSuite( unittest.TestLoader().discover(start_dir=path, pattern="test_*.py") for path in pathlib.Path("tbl").iterdir() ) print("Running tests.") unittest.TextTestRunner().run(suite)
[ "subprocess.run", "unittest.TextTestRunner", "os.getcwd", "pathlib.Path", "unittest.TestLoader", "os.link", "platform.system", "os.symlink", "os.chdir" ]
[((356, 373), 'platform.system', 'platform.system', ([], {}), '()\n', (371, 373), False, 'import platform\n'), ((945, 963), 'os.chdir', 'os.chdir', (['"""cs3tbl"""'], {}), "('cs3tbl')\n", (953, 963), False, 'import os\n'), ((1092, 1219), 'subprocess.run', 'subprocess.run', (["[executable_file, '-t', 'python', '--python-package', '.', '-I',\n '../../schemas/', '../../schemas/cs3.ksy']"], {}), "([executable_file, '-t', 'python', '--python-package', '.',\n '-I', '../../schemas/', '../../schemas/cs3.ksy'])\n", (1106, 1219), False, 'import subprocess\n'), ((1395, 1409), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (1403, 1409), False, 'import os\n'), ((179, 190), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (188, 190), False, 'import os\n'), ((224, 240), 'os.chdir', 'os.chdir', (['"""test"""'], {}), "('test')\n", (232, 240), False, 'import os\n'), ((264, 275), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (273, 275), False, 'import os\n'), ((517, 604), 'os.link', 'os.link', (['"""3rdparty/kaitai_struct_python_runtime/kaitaistruct.py"""', '"""kaitaistruct.py"""'], {}), "('3rdparty/kaitai_struct_python_runtime/kaitaistruct.py',\n 'kaitaistruct.py')\n", (524, 604), False, 'import os\n'), ((641, 731), 'os.symlink', 'os.symlink', (['"""3rdparty/kaitai_struct_python_runtime/kaitaistruct.py"""', '"""kaitaistruct.py"""'], {}), "('3rdparty/kaitai_struct_python_runtime/kaitaistruct.py',\n 'kaitaistruct.py')\n", (651, 731), False, 'import os\n'), ((1630, 1655), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (1653, 1655), False, 'import unittest\n'), ((382, 413), 'pathlib.Path', 'pathlib.Path', (['"""kaitaistruct.py"""'], {}), "('kaitaistruct.py')\n", (394, 413), False, 'import pathlib\n'), ((1489, 1510), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (1508, 1510), False, 'import unittest\n'), ((1573, 1592), 'pathlib.Path', 'pathlib.Path', (['"""tbl"""'], {}), "('tbl')\n", (1585, 1592), False, 'import pathlib\n')]
# Generated by Django 3.0.3 on 2020-02-29 14:20 import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("Dormroom", "0004_auto_20200229_1420"), ("SIFUser", "0005_merge_20200228_1005"), ] operations = [ migrations.AlterField( model_name="user", name="dormroom", field=models.ForeignKey( blank=True, help_text="Kollektivet personen bor i", null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="residents", to="Dormroom.Dormroom", ), ) ]
[ "django.db.models.ForeignKey" ]
[((419, 605), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""Kollektivet personen bor i"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""residents"""', 'to': '"""Dormroom.Dormroom"""'}), "(blank=True, help_text='Kollektivet personen bor i', null=\n True, on_delete=django.db.models.deletion.SET_NULL, related_name=\n 'residents', to='Dormroom.Dormroom')\n", (436, 605), False, 'from django.db import migrations, models\n')]
import uuid from datetime import datetime from typing import Optional, List import pydantic from sqlalchemy import Column, JSON from sqlmodel import Field, Relationship from api.db.models.base import BaseModel, BaseTable class SchemaDef(pydantic.BaseModel): id: Optional[str] = None name: Optional[str] = None version: Optional[str] = None attributes: Optional[List[str]] = [] class Governance(pydantic.BaseModel): schema_def: Optional[SchemaDef] = None cred_def_id: Optional[str] = None cred_def_tag: Optional[str] = None class SandboxBase(BaseModel): tag: Optional[str] = Field(nullable=True) governance: dict = Field(default={}, sa_column=Column(JSON)) governance_cas: dict = Field(default={}, sa_column=Column(JSON)) class Sandbox(SandboxBase, BaseTable, table=True): lobs: List["Lob"] = Relationship(back_populates="sandbox") # noqa: F821 students: List["Student"] = Relationship(back_populates="sandbox") # noqa: F821 applicants: List["Applicant"] = Relationship(back_populates="sandbox") # noqa: F821 class SandboxCreate(SandboxBase): tag: Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance] = None class SandboxUpdate(SandboxBase): id: uuid.UUID tag: Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance] = None class SandboxRead(SandboxBase): id: uuid.UUID created_at: datetime updated_at: datetime tag: Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance] = None
[ "sqlmodel.Relationship", "sqlmodel.Field", "sqlalchemy.Column" ]
[((614, 634), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (619, 634), False, 'from sqlmodel import Field, Relationship\n'), ((846, 884), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""sandbox"""'}), "(back_populates='sandbox')\n", (858, 884), False, 'from sqlmodel import Field, Relationship\n'), ((931, 969), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""sandbox"""'}), "(back_populates='sandbox')\n", (943, 969), False, 'from sqlmodel import Field, Relationship\n'), ((1020, 1058), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""sandbox"""'}), "(back_populates='sandbox')\n", (1032, 1058), False, 'from sqlmodel import Field, Relationship\n'), ((686, 698), 'sqlalchemy.Column', 'Column', (['JSON'], {}), '(JSON)\n', (692, 698), False, 'from sqlalchemy import Column, JSON\n'), ((755, 767), 'sqlalchemy.Column', 'Column', (['JSON'], {}), '(JSON)\n', (761, 767), False, 'from sqlalchemy import Column, JSON\n')]
import random class MagicMissile: def __init__(self, spell_slot_lvl, spell_mode): try: self.lvl = int(spell_slot_lvl) except: raise TypeError("spell_slot_level should be an integer") if spell_mode == "roll_die" or spell_mode == "roll_dice": self.mode = spell_mode else: raise Exception("spell_mode should be 'roll_die', or 'roll_dice'") def _dart_num(self): if self.lvl == 0: print("You clearly have no magic ability,\ and are utterly weak") exit() elif self.lvl == 1: return 3 else: bonus = self.lvl - 1 return (3 + bonus) def _attack_damage(self): for x in range(1): return random.randint(1, 4) def _damage_roll_die(self): dart_num = self._dart_num() base_damage = self._attack_damage() damage_per_dart = (base_damage + 1) total_damage = damage_per_dart * dart_num return { "darts_fired": dart_num, "base_damage": base_damage, "damage_per_dart": damage_per_dart, "total_damage": total_damage } def _damage_roll_dice(self): dart_num = self._dart_num() base_damage_per_dart = {} total_damage_per_dart = {} for dart in range(dart_num): damage = self._attack_damage() base_damage_per_dart["dart_{}".format(dart + 1)]\ = (damage) total_damage_per_dart["dart_{}".format(dart + 1)]\ = (damage + 1) total_damage = sum(total_damage_per_dart.values()) return { "darts_fired": dart_num, "base_damage_by_dart": base_damage_per_dart, "total_damage_by_dart": total_damage_per_dart, "total_damage_all_darts": total_damage } def cast(self): if self.mode == "roll_die": return self._damage_roll_die() elif self.mode == "roll_dice": return self._damage_roll_dice()
[ "random.randint" ]
[((798, 818), 'random.randint', 'random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (812, 818), False, 'import random\n')]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import datetime import markupfield.fields from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='PaymentRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100, blank=True)), ('description', markupfield.fields.MarkupField(rendered_field=True, blank=True)), ('description_markup_type', models.CharField(default=b'markdown', max_length=30, editable=False, blank=True, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('amount', models.FloatField()), ('_description_rendered', models.TextField(editable=False)), ('currency', models.CharField(default=b'USD', max_length=3, choices=[(b'USD', b'USD'), (b'EUR', b'EUR')])), ('settled', models.BooleanField(default=False)), ('created_on', models.DateTimeField(auto_now_add=True)), ('paid_on', models.DateTimeField(default=datetime.datetime.now, null=True, blank=True)), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), ]
[ "django.db.models.TextField", "django.db.migrations.swappable_dependency", "django.db.models.CharField", "django.db.models.ForeignKey", "django.db.models.FloatField", "django.db.models.BooleanField", "django.db.models.AutoField", "django.db.models.DateTimeField" ]
[((252, 309), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (283, 309), False, 'from django.db import models, migrations\n'), ((448, 541), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (464, 541), False, 'from django.db import models, migrations\n'), ((566, 610), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)'}), '(max_length=100, blank=True)\n', (582, 610), False, 'from django.db import models, migrations\n'), ((755, 980), 'django.db.models.CharField', 'models.CharField', ([], {'default': "b'markdown'", 'max_length': '(30)', 'editable': '(False)', 'blank': '(True)', 'choices': "[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown',\n 'Markdown'), (b'restructuredtext', 'Restructured Text')]"}), "(default=b'markdown', max_length=30, editable=False, blank=\n True, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (\n b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])\n", (771, 980), False, 'from django.db import models, migrations\n'), ((1000, 1019), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1017, 1019), False, 'from django.db import models, migrations\n'), ((1064, 1096), 'django.db.models.TextField', 'models.TextField', ([], {'editable': '(False)'}), '(editable=False)\n', (1080, 1096), False, 'from django.db import models, migrations\n'), ((1128, 1225), 'django.db.models.CharField', 'models.CharField', ([], {'default': "b'USD'", 'max_length': '(3)', 'choices': "[(b'USD', b'USD'), (b'EUR', b'EUR')]"}), "(default=b'USD', max_length=3, choices=[(b'USD', b'USD'), (\n b'EUR', b'EUR')])\n", (1144, 1225), False, 'from django.db import models, migrations\n'), ((1251, 1285), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1270, 1285), False, 'from django.db import models, migrations\n'), ((1319, 1358), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1339, 1358), False, 'from django.db import models, migrations\n'), ((1389, 1463), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'datetime.datetime.now', 'null': '(True)', 'blank': '(True)'}), '(default=datetime.datetime.now, null=True, blank=True)\n', (1409, 1463), False, 'from django.db import models, migrations\n'), ((1491, 1537), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': 'settings.AUTH_USER_MODEL'}), '(to=settings.AUTH_USER_MODEL)\n', (1508, 1537), False, 'from django.db import models, migrations\n')]
#!/usr/bin/env python import logging import tornado import tornado.web from tornado import httpserver from tornado import ioloop from tornado import websocket import os import sys import json import webbrowser import nbformat from queue import Queue from .execute import ThreadedExecutor from .cells import ExecutableNotebook STATIC_PATH = os.path.join(os.path.split(__file__)[0], 'client') ExecutableNotebook.STATIC_PATH = STATIC_PATH class Session(object): """ Global state of the server that doesn't belong to any single websocket connection but to the whole server session. """ def __init__(self): self._browser = None self._editor = None self.notebook = None self.buffers = {} def reset(self): self._browser = None self.editors = [] self.notebook = None self.buffers = {} @property def browser(self): return self._browser @browser.setter def browser(self, connection): if self._browser is not None: logging.info("WARNING: Only one browser connection expected") self._browser = connection @property def editor(self): return self._editor @editor.setter def editor(self, connection): if self._editor is not None: logging.info("WARNING: Only editor browser connection expected") self._editor = connection session = Session() class PeriodicOutputCallback(object): """ Sets up a periodic callback to push output to cells by polling from the queue pushed to by the ThreadedExecutor. """ def __init__(self, server, period=20): self.server = server self.notebook = None self.period = period def switch_notebook(self, notebook): self.notebook = notebook def start(self): self.callback = ioloop.PeriodicCallback(self.__call__, self.period) self.callback.start() def stop(self): self.callback.stop() def __call__(self): "Processes queue pushed to by ThreadedExecutor" try: val = self.server.queue.get_nowait() self.server.queue.task_done() result, status = val except: return if status == 'completion': position = self.notebook.completion_info['position'] relative_position = self.notebook.completion_info['relative_position'] # Adjusted for emacs point position start_delta = relative_position - result['cursor_start'] end_delta = relative_position - result['cursor_end'] result['cursor_start'] = position - start_delta result['cursor_end'] = position - end_delta session.editor.write_message(json.dumps({'cmd':'completion', 'data': result})) self.notebook.completion_info = None return if session.browser and (status == 'comm_open'): logging.info("REQUEST TO OPEN COMM FOR JS: %s" % result) self.notebook.message(session.browser, 'comm_open', result['content']) # e.g: # {'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc', # 'target_name': 'ZOO', 'target_module': None} return elif session.browser and (status == 'comm_msg'): buffers = result['buffers'] metadata = result.get('metadata', {}) self.notebook.message(session.browser, 'comm_msg', # FIXME: redundant 'comm_msg' {'msg_type': 'comm_msg', 'metadata': metadata, 'content': result['content']}, buffers=buffers) return else: outnode, execution_count = result, status if session.browser: cell = self.notebook.find_cell(execution_count) if cell is None: return # There may be no cell if running a silent execution position = self.notebook.cell_position(cell) if execution_count is None: # For silent execution before *any* output return # What about silent execution after start? self.notebook.update_cell_outputs(session.browser, position, outnode) class WS(websocket.WebSocketHandler): def open(self): self.queue = Queue() self.output_callback = PeriodicOutputCallback(self) self.output_callback.start() logging.info("Connection opened") def toggle_notebook(self, name): notebook = session.buffers.get(name, None) if notebook is None: # Create notebook # Note that there are multiple connections and we want only one notebook! # (for now) notebook = ExecutableNotebook( (ThreadedExecutor, "threaded-kernel", self.queue), name=name, cells=list()) session.buffers[name] = notebook session.notebook = notebook self.output_callback.switch_notebook(notebook) def on_message(self, message): "Websocket on_message handler. Tracks connection type." try: payload = json.loads(message) except Exception as e: logging.info('JSON parse exception: %s' % str(e)) return if 'cmd' in payload: if payload['cmd'] in ['start_mirror']: # Verbose commands logging.info(u"Received %s command" % payload['cmd']) else: logging.info(u"Received message: {0:<.100}".format(message)) if payload.get('cmd') == 'reset_server': self.output_callback.stop() session.reset() return if payload.get('init', False) == 'editor': logging.info('Added editor client connection') session.editor = self return if payload.get('init', False) == 'browser': session.browser = self logging.info('Added browser client connection') if session.notebook and len(session.notebook.cells) > 0: # TODO: Needs updating logging.info("Restart with previously opened notebook") session.notebook.reload(self) # If you hit reload in the browser, the CSS needs to be re-sent session.notebook.update_theme(self, css=None) return # SOME COMMANDS (e.g mirroring) should happen even without a browser tab open! self.toggle_notebook(payload['name']) if payload.get('cmd', False) == 'reload_page': # Reload over the browser connection (currently assuming only one) if session.browser is not None: session.notebook.reload(session.browser) return editor_msg = session.notebook.dispatch(session.browser, payload) if (editor_msg is not None) and (session.editor is not None): session.editor.write_message(json.dumps(editor_msg)) def check_origin(self, origin): return True def on_close(self): logging.info("ON_CLOSE") if self is session.browser: session.browser = None self.output_callback.stop() def serve(ws_port=9999, html_port=8000, host='0.0.0.0'): import logging logging.basicConfig(level=logging.INFO) html_handler = (r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH}) tornado.web.Application([html_handler]).listen(html_port) ws_server = httpserver.HTTPServer(tornado.web.Application([(r"/", WS)])) ws_server.listen(ws_port, host) logging.info("STARTED: Server started and listening") ioloop.IOLoop.instance().start()
[ "json.loads", "logging.basicConfig", "tornado.ioloop.IOLoop.instance", "tornado.ioloop.PeriodicCallback", "json.dumps", "logging.info", "os.path.split", "tornado.web.Application", "queue.Queue" ]
[((7401, 7440), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (7420, 7440), False, 'import logging\n'), ((7724, 7777), 'logging.info', 'logging.info', (['"""STARTED: Server started and listening"""'], {}), "('STARTED: Server started and listening')\n", (7736, 7777), False, 'import logging\n'), ((357, 380), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (370, 380), False, 'import os\n'), ((1858, 1909), 'tornado.ioloop.PeriodicCallback', 'ioloop.PeriodicCallback', (['self.__call__', 'self.period'], {}), '(self.__call__, self.period)\n', (1881, 1909), False, 'from tornado import ioloop\n'), ((4460, 4467), 'queue.Queue', 'Queue', ([], {}), '()\n', (4465, 4467), False, 'from queue import Queue\n'), ((4573, 4606), 'logging.info', 'logging.info', (['"""Connection opened"""'], {}), "('Connection opened')\n", (4585, 4606), False, 'import logging\n'), ((7187, 7211), 'logging.info', 'logging.info', (['"""ON_CLOSE"""'], {}), "('ON_CLOSE')\n", (7199, 7211), False, 'import logging\n'), ((7645, 7681), 'tornado.web.Application', 'tornado.web.Application', (["[('/', WS)]"], {}), "([('/', WS)])\n", (7668, 7681), False, 'import tornado\n'), ((1044, 1105), 'logging.info', 'logging.info', (['"""WARNING: Only one browser connection expected"""'], {}), "('WARNING: Only one browser connection expected')\n", (1056, 1105), False, 'import logging\n'), ((1309, 1373), 'logging.info', 'logging.info', (['"""WARNING: Only editor browser connection expected"""'], {}), "('WARNING: Only editor browser connection expected')\n", (1321, 1373), False, 'import logging\n'), ((3009, 3065), 'logging.info', 'logging.info', (["('REQUEST TO OPEN COMM FOR JS: %s' % result)"], {}), "('REQUEST TO OPEN COMM FOR JS: %s' % result)\n", (3021, 3065), False, 'import logging\n'), ((5280, 5299), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (5290, 5299), False, 'import json\n'), ((5878, 5924), 'logging.info', 'logging.info', (['"""Added editor client connection"""'], {}), "('Added editor client connection')\n", (5890, 5924), False, 'import logging\n'), ((6078, 6125), 'logging.info', 'logging.info', (['"""Added browser client connection"""'], {}), "('Added browser client connection')\n", (6090, 6125), False, 'import logging\n'), ((7549, 7588), 'tornado.web.Application', 'tornado.web.Application', (['[html_handler]'], {}), '([html_handler])\n', (7572, 7588), False, 'import tornado\n'), ((7782, 7806), 'tornado.ioloop.IOLoop.instance', 'ioloop.IOLoop.instance', ([], {}), '()\n', (7804, 7806), False, 'from tornado import ioloop\n'), ((2769, 2818), 'json.dumps', 'json.dumps', (["{'cmd': 'completion', 'data': result}"], {}), "({'cmd': 'completion', 'data': result})\n", (2779, 2818), False, 'import json\n'), ((5528, 5581), 'logging.info', 'logging.info', (["(u'Received %s command' % payload['cmd'])"], {}), "(u'Received %s command' % payload['cmd'])\n", (5540, 5581), False, 'import logging\n'), ((6234, 6289), 'logging.info', 'logging.info', (['"""Restart with previously opened notebook"""'], {}), "('Restart with previously opened notebook')\n", (6246, 6289), False, 'import logging\n'), ((7071, 7093), 'json.dumps', 'json.dumps', (['editor_msg'], {}), '(editor_msg)\n', (7081, 7093), False, 'import json\n')]
import torch import torch.nn as nn from torchvision.datasets import CIFAR10 from torch.optim import Adam from torchvision.models import resnet50 from torch.utils.data import DataLoader import torchvision.transforms as transforms from torch.utils.tensorboard import SummaryWriter import torch.nn.functional as F import os import time # ---------------------配置阶段------------------------------ # 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定) root_dataset = '/home/team/xiaonan/Dataset/' root_project = '/home/team/xiaonan/experients/' root_data_save = '/home/team/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(本机) # self.root_dataset = '/home/xiaonan/Dataset/' # self.root_project = '/home/xiaonan/experients/' # self.root_data_save = '/home/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(服务器) # root_dataset = 'Dataset/' # root_project = '' # root_data_save = 'data_save/' # 模型保存目录、日志文件保存目录 model_dir = root_data_save + 'checkpoints/' log_dir = root_data_save + 'log/' # 若文件夹不存在,则创建 if os.path.exists(root_data_save) is False: os.mkdir(root_data_save) if os.path.exists(model_dir) is False: os.mkdir(model_dir) if os.path.exists(log_dir) is False: os.mkdir(log_dir) # cifar-10数据集目录;模型名称;类别数量 cifar_10_dir = root_dataset + 'cifar-10/' model_dir = model_dir + 'cifar10_resnet50_v1' + '.pth' log_dir = log_dir + 'cifar10_resnet50_v1' num_classes = 10 if os.path.exists(log_dir) is False: os.mkdir(log_dir) # 检查设备情况 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('device:{}'.format(device)) # 设置超参数 epochs = 200 batch_size = 32 learning_rate = 0.1 lr_step_size = 30 weight_decay = 1e-4 momentum = 0.9 # 均值和标准差值 mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.255] # -----------------------------读取数据集-------------------------------- # 训练集、验证集、测试集预处理 train_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), #transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, saturation=0.4, hue=0.4, contrast=0.4), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) valid_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) # 获取训练集、测试集 train_dataset = CIFAR10(root=cifar_10_dir, train=True, transform=train_data_preprocess) test_dataset = CIFAR10(root=cifar_10_dir, train=False, transform=valid_data_preprocess) # 获取训练集和测试集的加载器 train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True, num_workers=4) # ------------------------构建网络、定义损失函数和优化器------------------------ net = resnet50() print(net) # 重写网络的最后一层 fc_in_features = net.fc.in_features # 网络最后一层的输入通道 print(fc_in_features) net.fc = nn.Linear(in_features=fc_in_features, out_features=num_classes) print(net) # 将网络放置到GPU上 net.to(device) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = Adam(params=net.parameters(), weight_decay=weight_decay) # ----------------------进行网络的训练------------------------------------ print('进行训练....') # 获得记录日志信息的写入器 writer = SummaryWriter(log_dir) # ------------------定义训练、验证子函数-------------------- # 训练子函数 def _train(train_loader, num_step): print(' training stage....') # 将网络结构调成训练模式;初始化梯度张量 net.train() optimizer.zero_grad() # 定义准确率变量,损失值,批次数量,样本总数量 train_acc = 0.0 train_loss = 0.0 num_batch = 0 num_samples = 0 # 进行网络的训练 for index, data in enumerate(train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 images, labels = data images = images.to(device) labels = labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(images) outputs = F.softmax(outputs, dim=1) loss = criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc = torch.sum(preds == labels).item() train_acc += acc train_loss += loss num_batch += 1 num_samples += images.size(0) # 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量 loss.backward() optimizer.step() optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index + 1) % 30 == 0: # 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\n'. format(index, loss, acc / images.size(0))) # 记录训练批次的损失和准确率 # writer.add_scalar('Train/Loss', scalar_value=loss, global_step=index) # 单个标签 writer.add_scalars(main_tag='Train(batch)', tag_scalar_dict={'batch_loss': loss, 'batch_accuracy': acc / images.size(0)}, global_step=num_step) # 更新全局步骤 num_step += 1 # 计算训练的准确率和损失值 train_acc = train_acc / num_samples train_loss = train_loss / num_batch return train_acc, train_loss, num_step # 验证子函数 def _valid(valid_loader): print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() valid_acc = 0.0 valid_loss = 0.0 num_batch = 0 num_samples = 0 # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for images, labels in valid_loader: # 将测试数据放入GPU上 images, labels = images.to(device), labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(images) outputs = F.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1) loss = criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred == labels)).item() valid_loss += loss num_batch += 1 num_samples += images.size(0) # 计算测试精度和损失值 valid_acc = valid_acc / num_samples valid_loss = valid_loss / num_batch return valid_acc, valid_loss # ----------------------------开始周期训练-------------------------------- # 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数 start_time = time.time() best_acc = 0.0 num_step = 0 # 开始周期训练 for epoch in range(epochs): # 设定每周期开始时间点、周期信息 epoch_start_time = time.time() print('Epoch {}/{}'.format(epoch, epochs - 1)) print('-' * 20) # 训练 train_acc, train_loss, num_step = _train(train_loader, num_step) # 验证 valid_acc, valid_loss = _valid(test_loader) # 输出每周期的训练、验证的平均损失值、准确率 epoch_time = time.time() - epoch_start_time print(' epoch:{}/{}, time:{:.0f}m {:.0f}s'. format(epoch, epochs, epoch_time // 60, epoch_time % 60)) print(' train_loss:{:.4f}, train_acc:{:.4f}\n valid_loss:{:.4f}, valid_acc:{:.4f}'. format(train_loss, train_acc, valid_loss, valid_acc)) # 记录测试结果 writer.add_scalars(main_tag='Train(epoch)', tag_scalar_dict={'train_loss': train_loss, 'train_acc': train_acc, 'valid_loss': valid_loss, 'valid_acc': valid_acc}, global_step=epoch) # 选出最好的模型参数 if valid_acc > best_acc: # 更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(), model_dir) print(' epoch:{}, update model...'.format(epoch)) print() # 训练结束时间、输出最好的精度 end_time = time.time() - start_time print('Training complete in {:.0f}m {:.0f}s'.format(end_time // 60, end_time % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # 关闭writer writer.close()
[ "os.mkdir", "torch.argmax", "torchvision.datasets.CIFAR10", "torchvision.transforms.Normalize", "torch.no_grad", "torch.utils.data.DataLoader", "os.path.exists", "torch.utils.tensorboard.SummaryWriter", "torch.nn.Linear", "torchvision.transforms.RandomHorizontalFlip", "torchvision.models.resnet50", "torch.cuda.is_available", "torch.sum", "torchvision.transforms.Resize", "torchvision.transforms.ColorJitter", "torch.nn.CrossEntropyLoss", "time.time", "torch.nn.functional.softmax", "torchvision.transforms.ToTensor" ]
[((2759, 2830), 'torchvision.datasets.CIFAR10', 'CIFAR10', ([], {'root': 'cifar_10_dir', 'train': '(True)', 'transform': 'train_data_preprocess'}), '(root=cifar_10_dir, train=True, transform=train_data_preprocess)\n', (2766, 2830), False, 'from torchvision.datasets import CIFAR10\n'), ((2846, 2918), 'torchvision.datasets.CIFAR10', 'CIFAR10', ([], {'root': 'cifar_10_dir', 'train': '(False)', 'transform': 'valid_data_preprocess'}), '(root=cifar_10_dir, train=False, transform=valid_data_preprocess)\n', (2853, 2918), False, 'from torchvision.datasets import CIFAR10\n'), ((2951, 3040), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(4)'}), '(dataset=train_dataset, batch_size=batch_size, shuffle=True,\n num_workers=4)\n', (2961, 3040), False, 'from torch.utils.data import DataLoader\n'), ((3103, 3191), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(4)'}), '(dataset=test_dataset, batch_size=batch_size, shuffle=True,\n num_workers=4)\n', (3113, 3191), False, 'from torch.utils.data import DataLoader\n'), ((3311, 3321), 'torchvision.models.resnet50', 'resnet50', ([], {}), '()\n', (3319, 3321), False, 'from torchvision.models import resnet50\n'), ((3427, 3490), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'fc_in_features', 'out_features': 'num_classes'}), '(in_features=fc_in_features, out_features=num_classes)\n', (3436, 3490), True, 'import torch.nn as nn\n'), ((3556, 3577), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3575, 3577), True, 'import torch.nn as nn\n'), ((3759, 3781), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['log_dir'], {}), '(log_dir)\n', (3772, 3781), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((6755, 6766), 'time.time', 'time.time', ([], {}), '()\n', (6764, 6766), False, 'import time\n'), ((971, 1001), 'os.path.exists', 'os.path.exists', (['root_data_save'], {}), '(root_data_save)\n', (985, 1001), False, 'import os\n'), ((1016, 1040), 'os.mkdir', 'os.mkdir', (['root_data_save'], {}), '(root_data_save)\n', (1024, 1040), False, 'import os\n'), ((1044, 1069), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (1058, 1069), False, 'import os\n'), ((1084, 1103), 'os.mkdir', 'os.mkdir', (['model_dir'], {}), '(model_dir)\n', (1092, 1103), False, 'import os\n'), ((1107, 1130), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (1121, 1130), False, 'import os\n'), ((1145, 1162), 'os.mkdir', 'os.mkdir', (['log_dir'], {}), '(log_dir)\n', (1153, 1162), False, 'import os\n'), ((1349, 1372), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (1363, 1372), False, 'import os\n'), ((1387, 1404), 'os.mkdir', 'os.mkdir', (['log_dir'], {}), '(log_dir)\n', (1395, 1404), False, 'import os\n'), ((6878, 6889), 'time.time', 'time.time', ([], {}), '()\n', (6887, 6889), False, 'import time\n'), ((7983, 7994), 'time.time', 'time.time', ([], {}), '()\n', (7992, 7994), False, 'import time\n'), ((1449, 1474), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1472, 1474), False, 'import torch\n'), ((1831, 1865), 'torchvision.transforms.Resize', 'transforms.Resize', ([], {'size': '(224, 224)'}), '(size=(224, 224))\n', (1848, 1865), True, 'import torchvision.transforms as transforms\n'), ((1991, 2024), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2022, 2024), True, 'import torchvision.transforms as transforms\n'), ((2070, 2147), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.4)', 'saturation': '(0.4)', 'hue': '(0.4)', 'contrast': '(0.4)'}), '(brightness=0.4, saturation=0.4, hue=0.4, contrast=0.4)\n', (2092, 2147), True, 'import torchvision.transforms as transforms\n'), ((2260, 2281), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2279, 2281), True, 'import torchvision.transforms as transforms\n'), ((2327, 2367), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (2347, 2367), True, 'import torchvision.transforms as transforms\n'), ((2479, 2513), 'torchvision.transforms.Resize', 'transforms.Resize', ([], {'size': '(224, 224)'}), '(size=(224, 224))\n', (2496, 2513), True, 'import torchvision.transforms as transforms\n'), ((2558, 2579), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2577, 2579), True, 'import torchvision.transforms as transforms\n'), ((2624, 2664), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (2644, 2664), True, 'import torchvision.transforms as transforms\n'), ((4396, 4421), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (4405, 4421), True, 'import torch.nn.functional as F\n'), ((4510, 4538), 'torch.argmax', 'torch.argmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (4522, 4538), False, 'import torch\n'), ((5871, 5886), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5884, 5886), False, 'import torch\n'), ((7143, 7154), 'time.time', 'time.time', ([], {}), '()\n', (7152, 7154), False, 'import time\n'), ((6144, 6169), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (6153, 6169), True, 'import torch.nn.functional as F\n'), ((6228, 6256), 'torch.argmax', 'torch.argmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (6240, 6256), False, 'import torch\n'), ((4617, 4643), 'torch.sum', 'torch.sum', (['(preds == labels)'], {}), '(preds == labels)\n', (4626, 4643), False, 'import torch\n'), ((6365, 6390), 'torch.sum', 'torch.sum', (['(pred == labels)'], {}), '(pred == labels)\n', (6374, 6390), False, 'import torch\n')]
import discord from discord import app_commands from discord.ext import commands class owner(commands.Cog): def __init__(self, bot: commands.Bot) -> None: self.bot = bot @app_commands.command(name='eval', description='evaluate') async def eval(self, interaction: discord.Interaction, code: str) -> None: if await interaction.client.is_owner(interaction.user): try: result = eval(code) await interaction.response.send_message(f'> {result}', ephemeral=True) except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True) @app_commands.command(name='load', description='load or reload a cog') async def load(self, interaction: discord.Interaction, cog: str, reload: bool) -> None: if await interaction.client.is_owner(interaction.user): try: if reload: await interaction.response.send_message(f'Reloading `{cog}`...', ephemeral=True) await self.bot.reload_extension(cog) await interaction.edit_original_message(content=f'Reloaded `{cog}`!') else: await interaction.response.send_message(f'Loading `{cog}`...', ephemeral=True) await self.bot.load_extension(cog) await interaction.edit_original_message(content=f'Loaded `{cog}`!') except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True) @app_commands.command(name='check_perms', description='check the permissions of the user') async def check_perms(self, interaction: discord.Interaction, permissions: str, user: discord.User) -> None: from src.perms import perms # check if author is bot owner if await perms(interaction, 'bot_owner'): # run perms result = await perms(interaction, permissions, user) # send result await interaction.response.send_message(f'{result}', ephemeral=True) else: await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True)
[ "discord.app_commands.command", "src.perms.perms" ]
[((190, 247), 'discord.app_commands.command', 'app_commands.command', ([], {'name': '"""eval"""', 'description': '"""evaluate"""'}), "(name='eval', description='evaluate')\n", (210, 247), False, 'from discord import app_commands\n'), ((769, 838), 'discord.app_commands.command', 'app_commands.command', ([], {'name': '"""load"""', 'description': '"""load or reload a cog"""'}), "(name='load', description='load or reload a cog')\n", (789, 838), False, 'from discord import app_commands\n'), ((1789, 1883), 'discord.app_commands.command', 'app_commands.command', ([], {'name': '"""check_perms"""', 'description': '"""check the permissions of the user"""'}), "(name='check_perms', description=\n 'check the permissions of the user')\n", (1809, 1883), False, 'from discord import app_commands\n'), ((2085, 2116), 'src.perms.perms', 'perms', (['interaction', '"""bot_owner"""'], {}), "(interaction, 'bot_owner')\n", (2090, 2116), False, 'from src.perms import perms\n'), ((2169, 2206), 'src.perms.perms', 'perms', (['interaction', 'permissions', 'user'], {}), '(interaction, permissions, user)\n', (2174, 2206), False, 'from src.perms import perms\n')]
from astropy import cosmology as cosmo import autofit as af from autoarray import preloads as pload from autoarray.exc import PixelizationException, InversionException, GridException from autoarray.inversion import pixelizations as pix, inversions as inv from autofit.exc import FitException from autogalaxy.analysis import result as res from autogalaxy.analysis import visualizer as vis from autogalaxy.fit import fit_imaging, fit_interferometer from autogalaxy.galaxy import galaxy as g from autogalaxy.plane import plane as pl class Analysis(af.Analysis): def __init__(self, hyper_result=None, cosmology=cosmo.Planck15): self.hyper_result = hyper_result self.cosmology = cosmology class AnalysisDataset(Analysis): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__(hyper_result=hyper_result, cosmology=cosmology) self.dataset = dataset if self.hyper_result is not None: if hyper_result.search is not None: hyper_result.search.paths = None self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_image_path_dict = None self.hyper_model_image = None self.settings_pixelization = settings_pixelization self.settings_inversion = settings_inversion self.preloads = preloads def set_hyper_dataset(self, result): self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict self.hyper_model_image = result.hyper_model_image def hyper_image_sky_for_instance(self, instance): if hasattr(instance, "hyper_image_sky"): return instance.hyper_image_sky def hyper_background_noise_for_instance(self, instance): if hasattr(instance, "hyper_background_noise"): return instance.hyper_background_noise def plane_for_instance(self, instance): return pl.Plane(galaxies=instance.galaxies) def associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance: """ Takes images from the last result, if there is one, and associates them with galaxies in this search where full-path galaxy names match. If the galaxy collection has a different name then an association is not made. e.g. galaxies.lens will match with: galaxies.lens but not with: galaxies.lens galaxies.source Parameters ---------- instance A model instance with 0 or more galaxies in its tree Returns ------- instance The input instance with images associated with galaxies where possible. """ if self.hyper_galaxy_image_path_dict is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_image_path_dict: galaxy.hyper_model_image = self.hyper_model_image galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[ galaxy_path ] return instance def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): paths.save_object("data", self.dataset.data) paths.save_object("noise_map", self.dataset.noise_map) paths.save_object("settings_dataset", self.dataset.settings) paths.save_object("settings_inversion", self.settings_inversion) paths.save_object("settings_pixelization", self.settings_pixelization) paths.save_object("cosmology", self.cosmology) if self.hyper_model_image is not None: paths.save_object("hyper_model_image", self.hyper_model_image) if self.hyper_galaxy_image_path_dict is not None: paths.save_object( "hyper_galaxy_image_path_dict", self.hyper_galaxy_image_path_dict ) class AnalysisImaging(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) self.dataset = dataset @property def imaging(self): return self.dataset def log_likelihood_function(self, instance): """ Determine the fit of a lens galaxy and source galaxy to the imaging in this lens. Parameters ---------- instance A model instance with attributes Returns ------- fit : Fit A fractional value indicating how well this model fit and the model imaging itself """ self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e: raise FitException from e def fit_imaging_for_plane( self, plane, hyper_image_sky, hyper_background_noise, use_hyper_scalings=True ): return fit_imaging.FitImaging( imaging=self.dataset, plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): instance = self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_imaging(imaging=self.imaging) visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis) if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=None, hyper_background_noise=None, use_hyper_scalings=False, ) visualizer.visualize_fit_imaging( fit=fit, during_analysis=during_analysis, subfolders="fit_no_hyper" ) def make_result( self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultImaging( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object("psf", self.dataset.psf_unormalized) paths.save_object("mask", self.dataset.mask) class AnalysisInterferometer(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) if self.hyper_result is not None: self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_visibilities_path_dict = None self.hyper_model_visibilities = None def set_hyper_dataset(self, result): super().set_hyper_dataset(result=result) self.hyper_model_visibilities = result.hyper_model_visibilities self.hyper_galaxy_visibilities_path_dict = ( result.hyper_galaxy_visibilities_path_dict ) @property def interferometer(self): return self.dataset def log_likelihood_function(self, instance): """ Determine the fit of a lens galaxy and source galaxy to the interferometer in this lens. Parameters ---------- instance A model instance with attributes Returns ------- fit : Fit A fractional value indicating how well this model fit and the model interferometer itself """ self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e: raise FitException from e def associate_hyper_visibilities( self, instance: af.ModelInstance ) -> af.ModelInstance: """ Takes visibilities from the last result, if there is one, and associates them with galaxies in this search where full-path galaxy names match. If the galaxy collection has a different name then an association is not made. e.g. galaxies.lens will match with: galaxies.lens but not with: galaxies.lens galaxies.source Parameters ---------- instance A model instance with 0 or more galaxies in its tree Returns ------- instance The input instance with visibilities associated with galaxies where possible. """ if self.hyper_galaxy_visibilities_path_dict is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_visibilities_path_dict: galaxy.hyper_model_visibilities = self.hyper_model_visibilities galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[ galaxy_path ] return instance def fit_interferometer_for_plane( self, plane, hyper_background_noise, use_hyper_scalings=True ): return fit_interferometer.FitInterferometer( interferometer=self.dataset, plane=plane, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_interferometer(interferometer=self.interferometer) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis ) if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=None, use_hyper_scalings=False ) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis, subfolders="fit_no_hyper" ) def make_result( self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultInterferometer( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object("uv_wavelengths", self.dataset.uv_wavelengths) paths.save_object("real_space_mask", self.dataset.real_space_mask)
[ "autogalaxy.analysis.visualizer.Visualizer", "autogalaxy.plane.plane.Plane", "autoarray.inversion.pixelizations.SettingsPixelization", "autogalaxy.fit.fit_imaging.FitImaging", "autogalaxy.analysis.result.ResultInterferometer", "autogalaxy.analysis.result.ResultImaging", "autogalaxy.fit.fit_interferometer.FitInterferometer", "autoarray.inversion.inversions.SettingsInversion", "autoarray.preloads.Preloads" ]
[((908, 934), 'autoarray.inversion.pixelizations.SettingsPixelization', 'pix.SettingsPixelization', ([], {}), '()\n', (932, 934), True, 'from autoarray.inversion import pixelizations as pix, inversions as inv\n'), ((964, 987), 'autoarray.inversion.inversions.SettingsInversion', 'inv.SettingsInversion', ([], {}), '()\n', (985, 987), True, 'from autoarray.inversion import pixelizations as pix, inversions as inv\n'), ((1007, 1023), 'autoarray.preloads.Preloads', 'pload.Preloads', ([], {}), '()\n', (1021, 1023), True, 'from autoarray import preloads as pload\n'), ((2197, 2233), 'autogalaxy.plane.plane.Plane', 'pl.Plane', ([], {'galaxies': 'instance.galaxies'}), '(galaxies=instance.galaxies)\n', (2205, 2233), True, 'from autogalaxy.plane import plane as pl\n'), ((4505, 4531), 'autoarray.inversion.pixelizations.SettingsPixelization', 'pix.SettingsPixelization', ([], {}), '()\n', (4529, 4531), True, 'from autoarray.inversion import pixelizations as pix, inversions as inv\n'), ((4561, 4584), 'autoarray.inversion.inversions.SettingsInversion', 'inv.SettingsInversion', ([], {}), '()\n', (4582, 4584), True, 'from autoarray.inversion import pixelizations as pix, inversions as inv\n'), ((4604, 4620), 'autoarray.preloads.Preloads', 'pload.Preloads', ([], {}), '()\n', (4618, 4620), True, 'from autoarray import preloads as pload\n'), ((6297, 6581), 'autogalaxy.fit.fit_imaging.FitImaging', 'fit_imaging.FitImaging', ([], {'imaging': 'self.dataset', 'plane': 'plane', 'hyper_image_sky': 'hyper_image_sky', 'hyper_background_noise': 'hyper_background_noise', 'use_hyper_scalings': 'use_hyper_scalings', 'settings_pixelization': 'self.settings_pixelization', 'settings_inversion': 'self.settings_inversion'}), '(imaging=self.dataset, plane=plane, hyper_image_sky=\n hyper_image_sky, hyper_background_noise=hyper_background_noise,\n use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.\n settings_pixelization, settings_inversion=self.settings_inversion)\n', (6319, 6581), False, 'from autogalaxy.fit import fit_imaging, fit_interferometer\n'), ((7291, 7338), 'autogalaxy.analysis.visualizer.Visualizer', 'vis.Visualizer', ([], {'visualize_path': 'paths.image_path'}), '(visualize_path=paths.image_path)\n', (7305, 7338), True, 'from autogalaxy.analysis import visualizer as vis\n'), ((8424, 8501), 'autogalaxy.analysis.result.ResultImaging', 'res.ResultImaging', ([], {'samples': 'samples', 'model': 'model', 'analysis': 'self', 'search': 'search'}), '(samples=samples, model=model, analysis=self, search=search)\n', (8441, 8501), True, 'from autogalaxy.analysis import result as res\n'), ((8982, 9008), 'autoarray.inversion.pixelizations.SettingsPixelization', 'pix.SettingsPixelization', ([], {}), '()\n', (9006, 9008), True, 'from autoarray.inversion import pixelizations as pix, inversions as inv\n'), ((9038, 9061), 'autoarray.inversion.inversions.SettingsInversion', 'inv.SettingsInversion', ([], {}), '()\n', (9059, 9061), True, 'from autoarray.inversion import pixelizations as pix, inversions as inv\n'), ((9081, 9097), 'autoarray.preloads.Preloads', 'pload.Preloads', ([], {}), '()\n', (9095, 9097), True, 'from autoarray import preloads as pload\n'), ((12495, 12767), 'autogalaxy.fit.fit_interferometer.FitInterferometer', 'fit_interferometer.FitInterferometer', ([], {'interferometer': 'self.dataset', 'plane': 'plane', 'hyper_background_noise': 'hyper_background_noise', 'use_hyper_scalings': 'use_hyper_scalings', 'settings_pixelization': 'self.settings_pixelization', 'settings_inversion': 'self.settings_inversion'}), '(interferometer=self.dataset, plane=\n plane, hyper_background_noise=hyper_background_noise,\n use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.\n settings_pixelization, settings_inversion=self.settings_inversion)\n', (12531, 12767), False, 'from autogalaxy.fit import fit_imaging, fit_interferometer\n'), ((13320, 13367), 'autogalaxy.analysis.visualizer.Visualizer', 'vis.Visualizer', ([], {'visualize_path': 'paths.image_path'}), '(visualize_path=paths.image_path)\n', (13334, 13367), True, 'from autogalaxy.analysis import visualizer as vis\n'), ((14441, 14529), 'autogalaxy.analysis.result.ResultInterferometer', 'res.ResultInterferometer', ([], {'samples': 'samples', 'model': 'model', 'analysis': 'self', 'search': 'search'}), '(samples=samples, model=model, analysis=self,\n search=search)\n', (14465, 14529), True, 'from autogalaxy.analysis import result as res\n')]
from django.shortcuts import render from django.urls import reverse_lazy from customer.owner import * from .filters import * from .forms import * # class CustomerIndexView(LoginRequiredMixin, View): # login_url = '/login/' # redirect_field_name = 'redirect_to' class CustomerCreateView(OwnerCreateView): form_class = CustomerCreateForm paginate_by = 10 template_name = 'customer/create_customer.html' success_message = "%(name)s was created successfully." success_url = reverse_lazy("customer:customer_list") class CustomerListView(OwnerListView): filterset_class = CustomerFilter queryset = Customer.objects.filter(is_deleted=False) template_name = 'customer/customer_list.html' paginate_by = 10 class CustomerUpdateView(OwnerUpdateView): model = Customer template_name = 'customer/customer_edit.html' form_class = CustomerUpdateForm success_message = "%(name)s was updated successfully." success_url = reverse_lazy("customer:customer_list") class CustomerDeleteView(OwnerDeleteView): model = Customer template_name = 'customer/customer_delete.html' success_url = reverse_lazy('customer:customer_list') success_message = "Session %(name)s was removed successfully" class CustomerDetailsView(OwnerDetailView): model = Customer template_name = 'customer/customer_details.html' def CustomerLedgerView(request, pk): customer = Customer.objects.get(id=pk) ctx = {'customer': customer} return render(request, 'customer/customer_ledger.html', ctx)
[ "django.shortcuts.render", "django.urls.reverse_lazy" ]
[((502, 540), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""customer:customer_list"""'], {}), "('customer:customer_list')\n", (514, 540), False, 'from django.urls import reverse_lazy\n'), ((976, 1014), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""customer:customer_list"""'], {}), "('customer:customer_list')\n", (988, 1014), False, 'from django.urls import reverse_lazy\n'), ((1151, 1189), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""customer:customer_list"""'], {}), "('customer:customer_list')\n", (1163, 1189), False, 'from django.urls import reverse_lazy\n'), ((1502, 1555), 'django.shortcuts.render', 'render', (['request', '"""customer/customer_ledger.html"""', 'ctx'], {}), "(request, 'customer/customer_ledger.html', ctx)\n", (1508, 1555), False, 'from django.shortcuts import render\n')]
from selenium import webdriver from selenium.webdriver.common.by import By from pageObjects.footer import Footer from utilities.customLogger import LogGen from utilities.siteConfig import siteconfig from selenium.common.exceptions import ElementNotInteractableException import pandas as pd import time class Test_1: base_url = siteconfig.getsiteURl() base_xpath = siteconfig.getfooterXPATH() logger = LogGen.loggen() def test_footer_links(self, setup): self.logger.info("*************** footer links test started **************") driver = setup driver.get(self.base_url) driver.maximize_window() time.sleep(3) footers = Footer(driver) footer_xpath = footers.footer_links_xpath(self.base_xpath) time.sleep(3) df = { "S.no": [], "Link_name": [], "XPATH": [], "URL": [], "Directed URL": [], "Validation": [] } result = [] for n, i in enumerate(footer_xpath): footer = driver.find_element_by_xpath(i) footer.location_once_scrolled_into_view href = footer.get_attribute("href") text = footer.get_attribute("text") try: footer.click() except ElementNotInteractableException as e: self.logger.info("**************** {} ************".format(e)) self.logger.info("************ {} footer failed *********".format(text)) df["Validation"].append("failed") result.append("failed") else: current_url = driver.current_url time.sleep(1) driver.back() df["Validation"].append("passed") self.logger.info("************ {} footer passed *********".format(text)) result.append("passed") finally: df["S.no"].append(n + 1) df["Link_name"].append(text) df["URL"].append(href) df["Directed URL"].append(current_url) df["XPATH"].append(i) Data = pd.DataFrame(df, index=df["S.no"]) output = pd.ExcelWriter(".//reports/footers_links_validation.xlsx") Data.to_excel(output) output.save() if "failed" not in result: assert True else: assert False driver.close()
[ "pandas.DataFrame", "utilities.siteConfig.siteconfig.getfooterXPATH", "utilities.customLogger.LogGen.loggen", "time.sleep", "pageObjects.footer.Footer", "utilities.siteConfig.siteconfig.getsiteURl", "pandas.ExcelWriter" ]
[((332, 355), 'utilities.siteConfig.siteconfig.getsiteURl', 'siteconfig.getsiteURl', ([], {}), '()\n', (353, 355), False, 'from utilities.siteConfig import siteconfig\n'), ((373, 400), 'utilities.siteConfig.siteconfig.getfooterXPATH', 'siteconfig.getfooterXPATH', ([], {}), '()\n', (398, 400), False, 'from utilities.siteConfig import siteconfig\n'), ((414, 429), 'utilities.customLogger.LogGen.loggen', 'LogGen.loggen', ([], {}), '()\n', (427, 429), False, 'from utilities.customLogger import LogGen\n'), ((654, 667), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (664, 667), False, 'import time\n'), ((686, 700), 'pageObjects.footer.Footer', 'Footer', (['driver'], {}), '(driver)\n', (692, 700), False, 'from pageObjects.footer import Footer\n'), ((776, 789), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (786, 789), False, 'import time\n'), ((2176, 2210), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {'index': "df['S.no']"}), "(df, index=df['S.no'])\n", (2188, 2210), True, 'import pandas as pd\n'), ((2228, 2286), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['""".//reports/footers_links_validation.xlsx"""'], {}), "('.//reports/footers_links_validation.xlsx')\n", (2242, 2286), True, 'import pandas as pd\n'), ((1690, 1703), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1700, 1703), False, 'import time\n')]
import concurrent.futures import logging import logging.handlers import os import re import shutil import sys import time import urllib import htsget import psutil from tqdm import tqdm from pyega3.libs import utils DOWNLOAD_FILE_MEMORY_BUFFER_SIZE = 32 * 1024 class DataFile: DEFAULT_SLICE_SIZE = 100 * 1024 * 1024 temporary_files_should_be_deleted = False def __init__(self, data_client, file_id, display_file_name=None, file_name=None, size=None, unencrypted_checksum=None, status=None): self.data_client = data_client self.id = file_id self.temporary_files = set() self._display_file_name = display_file_name self._file_name = file_name self._file_size = size self._unencrypted_checksum = unencrypted_checksum self._file_status = status def load_metadata(self): res = self.data_client.get_json(f"/metadata/files/{self.id}") # If the user does not have access to the file then the server returns HTTP code 200 but the JSON payload has # all the fields empty if res['displayFileName'] is None or res['unencryptedChecksum'] is None: raise RuntimeError(f"Metadata for file id '{self.id}' could not be retrieved. " + "This is probably because your account does not have access to this file. " "You can check which datasets your account has access to at " "'https://ega-archive.org/my-datasets.php' after logging in.") self._display_file_name = res['displayFileName'] self._file_name = res['fileName'] self._file_size = res['fileSize'] self._unencrypted_checksum = res['unencryptedChecksum'] self._file_status = res['fileStatus'] @property def display_name(self): if self._display_file_name is None: self.load_metadata() return self._display_file_name @property def name(self): if self._file_name is None: self.load_metadata() return self._file_name @property def size(self): if self._file_size is None: self.load_metadata() return self._file_size @property def unencrypted_checksum(self): if self._unencrypted_checksum is None: self.load_metadata() return self._unencrypted_checksum @property def status(self): if self._file_status is None: self.load_metadata() return self._file_status @staticmethod def print_local_file_info(prefix_str, file, md5): logging.info(f"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, md5={md5})") def download_file(self, output_file, num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE): """Download an individual file""" file_size = self.size check_sum = self.unencrypted_checksum options = {"destinationFormat": "plain"} file_size -= 16 # 16 bytes IV not necesary in plain mode if os.path.exists(output_file) and utils.md5(output_file, file_size) == check_sum: DataFile.print_local_file_info('Local file exists:', output_file, check_sum) return num_connections = max(num_connections, 1) num_connections = min(num_connections, 128) if file_size < 100 * 1024 * 1024: num_connections = 1 logging.info(f"Download starting [using {num_connections} connection(s), file size {file_size} and chunk " f"length {max_slice_size}]...") chunk_len = max_slice_size temporary_directory = os.path.join(os.path.dirname(output_file), ".tmp_download") os.makedirs(temporary_directory, exist_ok=True) with tqdm(total=int(file_size), unit='B', unit_scale=True) as pbar: params = [ (os.path.join(temporary_directory, self.id), chunk_start_pos, min(chunk_len, file_size - chunk_start_pos), options, pbar) for chunk_start_pos in range(0, file_size, chunk_len)] for file in os.listdir(temporary_directory): match = re.match(r"(.*)-from-(\d*)-len-(\d*).*", file) file_id = match.group(1) file_from = match.group(2) file_length = match.group(3) if file_id != self.id: continue if (file_from, file_length) in [(param[1], param[2]) for param in params]: continue logging.warning(f'Deleting the leftover {file} temporary file because the MAX_SLICE_SIZE parameter (' f'and thus the slice sizes) have been modified since the last run.') os.remove(os.path.join(temporary_directory, file)) results = [] with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor: for part_file_name in executor.map(self.download_file_slice_, params): results.append(part_file_name) pbar.close() downloaded_file_total_size = sum(os.path.getsize(f) for f in results) if downloaded_file_total_size == file_size: utils.merge_bin_files_on_disk(output_file, results, downloaded_file_total_size) not_valid_server_md5 = len(str(check_sum or '')) != 32 logging.info("Calculating md5 (this operation can take a long time depending on the file size)") received_file_md5 = utils.md5(output_file, file_size) logging.info("Verifying file checksum") if received_file_md5 == check_sum or not_valid_server_md5: DataFile.print_local_file_info('Saved to : ', output_file, check_sum) if not_valid_server_md5: logging.info( f"WARNING: Unable to obtain valid MD5 from the server (received: {check_sum})." f" Can't validate download. Please contact EGA helpdesk on <EMAIL>") with open(utils.get_fname_md5(output_file), 'wb') as f: # save good md5 in aux file for future re-use f.write(received_file_md5.encode()) else: os.remove(output_file) raise Exception(f"Download process expected md5 value '{check_sum}' but got '{received_file_md5}'") def download_file_slice_(self, args): return self.download_file_slice(*args) def download_file_slice(self, file_name, start_pos, length, options=None, pbar=None): if start_pos < 0: raise ValueError("start : must be positive") if length <= 0: raise ValueError("length : must be positive") path = f"/files/{self.id}" if options is not None: path += '?' + urllib.parse.urlencode(options) final_file_name = f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice' file_name = final_file_name + '.tmp' self.temporary_files.add(file_name) existing_size = os.stat(file_name).st_size if os.path.exists(file_name) else 0 if existing_size > length: os.remove(file_name) if pbar: pbar.update(existing_size) if existing_size == length: return file_name try: with self.data_client.get_stream(path, { 'Range': f'bytes={start_pos + existing_size}-{start_pos + length - 1}'}) as r: with open(file_name, 'ba') as file_out: for chunk in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE): file_out.write(chunk) if pbar: pbar.update(len(chunk)) total_received = os.path.getsize(file_name) if total_received != length: raise Exception(f"Slice error: received={total_received}, requested={length}, file='{file_name}'") except Exception: if os.path.exists(file_name): os.remove(file_name) raise os.rename(file_name, final_file_name) return final_file_name @staticmethod def is_genomic_range(genomic_range_args): if not genomic_range_args: return False return genomic_range_args[0] is not None or genomic_range_args[1] is not None def generate_output_filename(self, folder, genomic_range_args): file_name = self.display_name ext_to_remove = ".cip" if file_name.endswith(ext_to_remove): file_name = file_name[:-len(ext_to_remove)] name, ext = os.path.splitext(os.path.basename(file_name)) genomic_range = '' if DataFile.is_genomic_range(genomic_range_args): genomic_range = "_genomic_range_" + (genomic_range_args[0] or genomic_range_args[1]) genomic_range += '_' + (str(genomic_range_args[2]) or '0') genomic_range += '_' + (str(genomic_range_args[3]) or '') format_ext = '.' + (genomic_range_args[4] or '').strip().lower() if format_ext != ext and len(format_ext) > 1: ext += format_ext ret_val = os.path.join(folder, self.id, name + genomic_range + ext) logging.debug(f"Output file:'{ret_val}'") return ret_val @staticmethod def print_local_file_info_genomic_range(prefix_str, file, gr_args): logging.info( f"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, referenceName={gr_args[0]}," f" referenceMD5={gr_args[1]}, start={gr_args[2]}, end={gr_args[3]}, format={gr_args[4]})" ) def download_file_retry(self, num_connections, output_dir, genomic_range_args, max_retries, retry_wait, max_slice_size=DEFAULT_SLICE_SIZE): if self.name.endswith(".gpg"): logging.info( "GPG files are currently not supported." " Please email EGA Helpdesk at <EMAIL>") return logging.info(f"File Id: '{self.id}'({self.size} bytes).") output_file = self.generate_output_filename(output_dir, genomic_range_args) temporary_directory = os.path.join(os.path.dirname(output_file), ".tmp_download") if not os.path.exists(temporary_directory): os.makedirs(temporary_directory) hdd = psutil.disk_usage(os.getcwd()) logging.info(f"Total space : {hdd.total / (2 ** 30):.2f} GiB") logging.info(f"Used space : {hdd.used / (2 ** 30):.2f} GiB") logging.info(f"Free space : {hdd.free / (2 ** 30):.2f} GiB") # If file is bigger than free space, warning if hdd.free < self.size: logging.warning(f"The size of the file that you want to download is bigger than your free space in this " f"location") if DataFile.is_genomic_range(genomic_range_args): with open(output_file, 'wb') as output: htsget.get( f"{self.data_client.htsget_url}/files/{self.id}", output, reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1], start=genomic_range_args[2], end=genomic_range_args[3], data_format=genomic_range_args[4], max_retries=sys.maxsize if max_retries < 0 else max_retries, retry_wait=retry_wait, bearer_token=self.data_client.auth_client.token) DataFile.print_local_file_info_genomic_range('Saved to : ', output_file, genomic_range_args) return done = False num_retries = 0 while not done: try: self.download_file(output_file, num_connections, max_slice_size) done = True except Exception as e: if e is ConnectionError: logging.info("Failed to connect to data service. Check that the necessary ports are open in your " "firewall. See the documentation for more information.") logging.exception(e) if num_retries == max_retries: if DataFile.temporary_files_should_be_deleted: self.delete_temporary_folder(temporary_directory) raise e time.sleep(retry_wait) num_retries += 1 logging.info(f"retry attempt {num_retries}") def delete_temporary_folder(self, temporary_directory): try: shutil.rmtree(temporary_directory) except FileNotFoundError as ex: logging.error(f'Could not delete the temporary folder: {ex}')
[ "os.remove", "htsget.get", "pyega3.libs.utils.md5", "shutil.rmtree", "os.path.join", "logging.error", "os.path.abspath", "logging.warning", "os.path.dirname", "os.path.exists", "pyega3.libs.utils.merge_bin_files_on_disk", "pyega3.libs.utils.get_fname_md5", "urllib.parse.urlencode", "os.path.basename", "os.stat", "os.path.getsize", "os.rename", "re.match", "time.sleep", "os.listdir", "logging.exception", "logging.debug", "os.makedirs", "os.getcwd", "logging.info" ]
[((3521, 3665), 'logging.info', 'logging.info', (['f"""Download starting [using {num_connections} connection(s), file size {file_size} and chunk length {max_slice_size}]..."""'], {}), "(\n f'Download starting [using {num_connections} connection(s), file size {file_size} and chunk length {max_slice_size}]...'\n )\n", (3533, 3665), False, 'import logging\n'), ((3816, 3863), 'os.makedirs', 'os.makedirs', (['temporary_directory'], {'exist_ok': '(True)'}), '(temporary_directory, exist_ok=True)\n', (3827, 3863), False, 'import os\n'), ((5520, 5626), 'logging.info', 'logging.info', (['"""Calculating md5 (this operation can take a long time depending on the file size)"""'], {}), "(\n 'Calculating md5 (this operation can take a long time depending on the file size)'\n )\n", (5532, 5626), False, 'import logging\n'), ((5646, 5679), 'pyega3.libs.utils.md5', 'utils.md5', (['output_file', 'file_size'], {}), '(output_file, file_size)\n', (5655, 5679), False, 'from pyega3.libs import utils\n'), ((5689, 5728), 'logging.info', 'logging.info', (['"""Verifying file checksum"""'], {}), "('Verifying file checksum')\n", (5701, 5728), False, 'import logging\n'), ((8247, 8284), 'os.rename', 'os.rename', (['file_name', 'final_file_name'], {}), '(file_name, final_file_name)\n', (8256, 8284), False, 'import os\n'), ((9346, 9403), 'os.path.join', 'os.path.join', (['folder', 'self.id', '(name + genomic_range + ext)'], {}), '(folder, self.id, name + genomic_range + ext)\n', (9358, 9403), False, 'import os\n'), ((9412, 9453), 'logging.debug', 'logging.debug', (['f"""Output file:\'{ret_val}\'"""'], {}), '(f"Output file:\'{ret_val}\'")\n', (9425, 9453), False, 'import logging\n'), ((10194, 10251), 'logging.info', 'logging.info', (['f"""File Id: \'{self.id}\'({self.size} bytes)."""'], {}), '(f"File Id: \'{self.id}\'({self.size} bytes).")\n', (10206, 10251), False, 'import logging\n'), ((10579, 10639), 'logging.info', 'logging.info', (['f"""Total space : {hdd.total / 2 ** 30:.2f} GiB"""'], {}), "(f'Total space : {hdd.total / 2 ** 30:.2f} GiB')\n", (10591, 10639), False, 'import logging\n'), ((10650, 10708), 'logging.info', 'logging.info', (['f"""Used space : {hdd.used / 2 ** 30:.2f} GiB"""'], {}), "(f'Used space : {hdd.used / 2 ** 30:.2f} GiB')\n", (10662, 10708), False, 'import logging\n'), ((10719, 10777), 'logging.info', 'logging.info', (['f"""Free space : {hdd.free / 2 ** 30:.2f} GiB"""'], {}), "(f'Free space : {hdd.free / 2 ** 30:.2f} GiB')\n", (10731, 10777), False, 'import logging\n'), ((3146, 3173), 'os.path.exists', 'os.path.exists', (['output_file'], {}), '(output_file)\n', (3160, 3173), False, 'import os\n'), ((3761, 3789), 'os.path.dirname', 'os.path.dirname', (['output_file'], {}), '(output_file)\n', (3776, 3789), False, 'import os\n'), ((4215, 4246), 'os.listdir', 'os.listdir', (['temporary_directory'], {}), '(temporary_directory)\n', (4225, 4246), False, 'import os\n'), ((6328, 6350), 'os.remove', 'os.remove', (['output_file'], {}), '(output_file)\n', (6337, 6350), False, 'import os\n'), ((7168, 7193), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (7182, 7193), False, 'import os\n'), ((7248, 7268), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (7257, 7268), False, 'import os\n'), ((7931, 7957), 'os.path.getsize', 'os.path.getsize', (['file_name'], {}), '(file_name)\n', (7946, 7957), False, 'import os\n'), ((8805, 8832), 'os.path.basename', 'os.path.basename', (['file_name'], {}), '(file_name)\n', (8821, 8832), False, 'import os\n'), ((10038, 10139), 'logging.info', 'logging.info', (['"""GPG files are currently not supported. Please email EGA Helpdesk at <EMAIL>"""'], {}), "(\n 'GPG files are currently not supported. Please email EGA Helpdesk at <EMAIL>'\n )\n", (10050, 10139), False, 'import logging\n'), ((10381, 10409), 'os.path.dirname', 'os.path.dirname', (['output_file'], {}), '(output_file)\n', (10396, 10409), False, 'import os\n'), ((10443, 10478), 'os.path.exists', 'os.path.exists', (['temporary_directory'], {}), '(temporary_directory)\n', (10457, 10478), False, 'import os\n'), ((10492, 10524), 'os.makedirs', 'os.makedirs', (['temporary_directory'], {}), '(temporary_directory)\n', (10503, 10524), False, 'import os\n'), ((10558, 10569), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10567, 10569), False, 'import os\n'), ((10879, 11003), 'logging.warning', 'logging.warning', (['f"""The size of the file that you want to download is bigger than your free space in this location"""'], {}), "(\n f'The size of the file that you want to download is bigger than your free space in this location'\n )\n", (10894, 11003), False, 'import logging\n'), ((12760, 12794), 'shutil.rmtree', 'shutil.rmtree', (['temporary_directory'], {}), '(temporary_directory)\n', (12773, 12794), False, 'import shutil\n'), ((3178, 3211), 'pyega3.libs.utils.md5', 'utils.md5', (['output_file', 'file_size'], {}), '(output_file, file_size)\n', (3187, 3211), False, 'from pyega3.libs import utils\n'), ((4272, 4319), 're.match', 're.match', (['"""(.*)-from-(\\\\d*)-len-(\\\\d*).*"""', 'file'], {}), "('(.*)-from-(\\\\d*)-len-(\\\\d*).*', file)\n", (4280, 4319), False, 'import re\n'), ((4655, 4831), 'logging.warning', 'logging.warning', (['f"""Deleting the leftover {file} temporary file because the MAX_SLICE_SIZE parameter (and thus the slice sizes) have been modified since the last run."""'], {}), "(\n f'Deleting the leftover {file} temporary file because the MAX_SLICE_SIZE parameter (and thus the slice sizes) have been modified since the last run.'\n )\n", (4670, 4831), False, 'import logging\n'), ((5367, 5446), 'pyega3.libs.utils.merge_bin_files_on_disk', 'utils.merge_bin_files_on_disk', (['output_file', 'results', 'downloaded_file_total_size'], {}), '(output_file, results, downloaded_file_total_size)\n', (5396, 5446), False, 'from pyega3.libs import utils\n'), ((5932, 6099), 'logging.info', 'logging.info', (['f"""WARNING: Unable to obtain valid MD5 from the server (received: {check_sum}). Can\'t validate download. Please contact EGA helpdesk on <EMAIL>"""'], {}), '(\n f"WARNING: Unable to obtain valid MD5 from the server (received: {check_sum}). Can\'t validate download. Please contact EGA helpdesk on <EMAIL>"\n )\n', (5944, 6099), False, 'import logging\n'), ((6903, 6934), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['options'], {}), '(options)\n', (6925, 6934), False, 'import urllib\n'), ((7138, 7156), 'os.stat', 'os.stat', (['file_name'], {}), '(file_name)\n', (7145, 7156), False, 'import os\n'), ((8156, 8181), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (8170, 8181), False, 'import os\n'), ((11153, 11543), 'htsget.get', 'htsget.get', (['f"""{self.data_client.htsget_url}/files/{self.id}"""', 'output'], {'reference_name': 'genomic_range_args[0]', 'reference_md5': 'genomic_range_args[1]', 'start': 'genomic_range_args[2]', 'end': 'genomic_range_args[3]', 'data_format': 'genomic_range_args[4]', 'max_retries': '(sys.maxsize if max_retries < 0 else max_retries)', 'retry_wait': 'retry_wait', 'bearer_token': 'self.data_client.auth_client.token'}), "(f'{self.data_client.htsget_url}/files/{self.id}', output,\n reference_name=genomic_range_args[0], reference_md5=genomic_range_args[\n 1], start=genomic_range_args[2], end=genomic_range_args[3], data_format\n =genomic_range_args[4], max_retries=sys.maxsize if max_retries < 0 else\n max_retries, retry_wait=retry_wait, bearer_token=self.data_client.\n auth_client.token)\n", (11163, 11543), False, 'import htsget\n'), ((12847, 12908), 'logging.error', 'logging.error', (['f"""Could not delete the temporary folder: {ex}"""'], {}), "(f'Could not delete the temporary folder: {ex}')\n", (12860, 12908), False, 'import logging\n'), ((2734, 2755), 'os.path.abspath', 'os.path.abspath', (['file'], {}), '(file)\n', (2749, 2755), False, 'import os\n'), ((2759, 2780), 'os.path.getsize', 'os.path.getsize', (['file'], {}), '(file)\n', (2774, 2780), False, 'import os\n'), ((3981, 4023), 'os.path.join', 'os.path.join', (['temporary_directory', 'self.id'], {}), '(temporary_directory, self.id)\n', (3993, 4023), False, 'import os\n'), ((4884, 4923), 'os.path.join', 'os.path.join', (['temporary_directory', 'file'], {}), '(temporary_directory, file)\n', (4896, 4923), False, 'import os\n'), ((5258, 5276), 'os.path.getsize', 'os.path.getsize', (['f'], {}), '(f)\n', (5273, 5276), False, 'import os\n'), ((6157, 6189), 'pyega3.libs.utils.get_fname_md5', 'utils.get_fname_md5', (['output_file'], {}), '(output_file)\n', (6176, 6189), False, 'from pyega3.libs import utils\n'), ((8199, 8219), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (8208, 8219), False, 'import os\n'), ((9618, 9639), 'os.path.abspath', 'os.path.abspath', (['file'], {}), '(file)\n', (9633, 9639), False, 'import os\n'), ((9643, 9664), 'os.path.getsize', 'os.path.getsize', (['file'], {}), '(file)\n', (9658, 9664), False, 'import os\n'), ((12303, 12323), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (12320, 12323), False, 'import logging\n'), ((12557, 12579), 'time.sleep', 'time.sleep', (['retry_wait'], {}), '(retry_wait)\n', (12567, 12579), False, 'import time\n'), ((12629, 12673), 'logging.info', 'logging.info', (['f"""retry attempt {num_retries}"""'], {}), "(f'retry attempt {num_retries}')\n", (12641, 12673), False, 'import logging\n'), ((12098, 12260), 'logging.info', 'logging.info', (['"""Failed to connect to data service. Check that the necessary ports are open in your firewall. See the documentation for more information."""'], {}), "(\n 'Failed to connect to data service. Check that the necessary ports are open in your firewall. See the documentation for more information.'\n )\n", (12110, 12260), False, 'import logging\n')]
#!/usr/local/bin/python3.4 # # run.cgi -- run steps of the GISTEMP algorithm # # <NAME>, 2009-12-08 # <NAME>, Revision 2016-01-06 """run.cgi [options] -- run steps of the GISTEMP algorithm. Options: --help Print this text. --steps=STEPS Specify which steps to run, as a comma-separated list of numbers from 0 to 5. For example, --steps=2,3,5 The steps are run in the order you specify. If this option is omitted, run all steps in order. """ # http://www.python.org/doc/2.4.4/lib/module-os.html import os # http://docs.python.org/release/2.4.4/lib/module-re.html import re # http://www.python.org/doc/2.4.4/lib/module-sys.html import sys try: rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.getcwd() != rootdir: print("The GISTEMP procedure must be run from the root " "directory of the project.\nPlease change directory " "to %s and try again." % rootdir) sys.exit() except: sys.exit() sys.path.append(os.getcwd()) from settings import * # Clear Climate Code import gio class Fatal(Exception): pass # Record the original standard output so we can log to it; in steps 2 and 5 # we'll be changing the value of sys.stdout before calling other modules that # use "print" to generate their output. logfile = sys.stdout def log(msg): print(msg, file=logfile) progress = open(PROGRESS_DIR + 'progress.txt', 'a') progress.write(msg + '\n\n') progress.flush() def mkdir(path): """mkdir(PATH): create the directory PATH, and all intermediate-level directories needed to contain it, unless it already exists.""" if not os.path.isdir(path): log("... creating directory %s" % path) os.makedirs(path) # Each of the run_stepN functions below takes a data object, its input, # and produces a data object, its output. Ordinarily the data objects # are iterators, either produced from the previous step, or an iterator # that feeds from a file. def run_step0(data): from steps import step0 if data is None: data = gio.step0_input() result = step0.step0(data) return gio.step0_output(result) def run_step1(data): from steps import step1 from extension import step1 as estep1 if data is None: data = gio.step1_input() pre = estep1.pre_step1(data) result = step1.step1(pre) post = estep1.post_step1(result) return gio.step1_output(post) def run_step2(data): from steps import step2 if data is None: data = gio.step2_input() result = step2.step2(data) return gio.step2_output(result) def run_step3(data): from steps import step3 if data is None: data = gio.step3_input() result = step3.step3(data) return gio.step3_output(result) def run_step3c(data): """An alternative to Step 3 that reads (copies) the output file created by the Sordinary Step 3. Effectively using the data produced by Step 3 without re-running it.""" if data: raise Fatal("Expect to run 3c first in pipeline.") return gio.step3c_input() def run_step4(data): from steps import step4 # Unlike earlier steps, Step 4 always gets input data, ocean # temperatures, from disk; data from earlier stages is land data and # is zipped up. data = gio.step4_input(data) result = step4.step4(data) return gio.step4_output(result) def run_step5(data): from steps import step5 # Step 5 takes a land mask as optional input, this is all handled in # the step5_input() function. data = gio.step5_input(data) result = step5.step5(data) return gio.step5_output(result) def parse_steps(steps): """Parse the -s, steps, option. Produces a list of strings.""" steps = steps.strip() if not steps: return [str(x) for x in range(6)] result = set() for part in steps.split(','): # Part can be integer number with an optional letter suffix... if re.match(r'^\d+[a-z]?$', part): result.add(part) else: # Or a range in the form '1-3'. try: l, r = part.split('-') result.update(str(s) for s in range(int(l), int(r) + 1)) except ValueError: # Expect to catch both # "ValueError: too many values to unpack" when the split # produces too many values ("1-3-"), and # "ValueError: invalid literal for int() with base 10: 'a'" # when int fails ("1,a") raise Fatal("Can't understand steps argument.") return list(sorted(result)) def parse_options(arglist): import optparse usage = "usage: %prog [options]" parser = optparse.OptionParser(usage) parser.add_option("-s", "--steps", action="store", metavar="S[,S]", default="", help="Select range of steps to run") parser.add_option('-p', '--parameter', action='append', help="Redefine parameter from parameters/*.py during run") parser.add_option("--no-work_files", "--suppress-work-files", action="store_false", default=True, dest="save_work", help="Do not save intermediate files in the work sub-directory") options, args = parser.parse_args(arglist) if len(args) != 0: parser.error("Unexpected arguments") options.steps = parse_steps(options.steps) return options, args def update_parameters(parm): """Take a parameter string from the command line and update the parameters module.""" if not parm: return import parameters for p in parm: try: key, value = p.split('=', 1) except ValueError: raise Fatal("Can't understand parameter option: %r" % p) if not hasattr(parameters, key): raise Fatal("Ignoring unknown parameter %r" % key) # Coerce value, a string, to the same type as the existing parameter # value. That works nicely for strings, ints, and floats... x = getattr(parameters, key) # ... but we need a hack for bool. if type(x) == bool: try: value = ['false', 'true'].index(value.lower()) except ValueError: raise Fatal("Boolean parameter %r must be True or False" % key) # Now value is 0 or 1 and the default case will correctly # coerce it. elif value[0] == '(' and value[-1] == ')': value = value[1:-1] value = [int(x) for x in value.split(',')] value = type(x)(value) setattr(parameters, key, value) # Download input files def dl_input_files(): import fetch fetcher = fetch.Fetcher() fetcher.fetch() def main(argv=None): import time import os if argv is None: argv = sys.argv options, args = parse_options(argv[1:]) update_parameters(options.parameter) step_list = list(options.steps) # overwrite progress popup if not os.path.exists(PROGRESS_DIR): os.makedirs(PROGRESS_DIR) progress = open(PROGRESS_DIR + "progress.txt", 'w') progress.write("Setting up parameters...\n\n") # Create all the temporary directories we're going to use. for d in ['log', 'result', 'work', "input"]: mkdir(TMP_DIR + '/' + d) # delete files in /tmp/input to re-download the input data files # otherwise the files in /tmp/input will be used. dl_input_files() step_fn = { '0': run_step0, '1': run_step1, '2': run_step2, '3': run_step3, '3c': run_step3c, '4': run_step4, '5': run_step5, } # Record start time now, and ending times for each step. start_time = time.time() cannot = [s for s in step_list if s not in step_fn] if cannot: raise Fatal("Can't run steps %s" % str(cannot)) # Create a message for stdout. if len(step_list) == 1: logit = "STEP %s" % step_list[0] else: assert len(step_list) >= 2 t = [str(s) for s in range(int(step_list[0]), int(step_list[-1]) + 1)] if step_list == t: logit = "STEPS %s to %s" % (step_list[0], step_list[-1]) else: logit = "STEPS %s" % ', '.join(step_list) log("====> %s ====" % logit) data = None for step in step_list: data = step_fn[step](data) # Consume the data in whatever the last step was, in order to # write its output, and hence suck data through the whole # pipeline. for _ in data: pass end_time = time.time() log("====> Timing Summary ====") log("Run took %.1f seconds" % (end_time - start_time)) return 0 if __name__ == '__main__': sys.exit(main())
[ "gio.step3_output", "extension.step1.post_step1", "optparse.OptionParser", "gio.step4_input", "gio.step0_output", "gio.step5_input", "gio.step0_input", "os.path.abspath", "steps.step4.step4", "fetch.Fetcher", "os.path.exists", "steps.step5.step5", "steps.step1.step1", "steps.step3.step3", "gio.step4_output", "re.match", "gio.step2_input", "steps.step0.step0", "steps.step2.step2", "gio.step1_input", "sys.exit", "gio.step3c_input", "os.makedirs", "gio.step1_output", "gio.step3_input", "os.getcwd", "os.path.isdir", "gio.step5_output", "time.time", "gio.step2_output", "extension.step1.pre_step1" ]
[((1061, 1072), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1070, 1072), False, 'import os\n'), ((2164, 2181), 'steps.step0.step0', 'step0.step0', (['data'], {}), '(data)\n', (2175, 2181), False, 'from steps import step0\n'), ((2193, 2217), 'gio.step0_output', 'gio.step0_output', (['result'], {}), '(result)\n', (2209, 2217), False, 'import gio\n'), ((2376, 2398), 'extension.step1.pre_step1', 'estep1.pre_step1', (['data'], {}), '(data)\n', (2392, 2398), True, 'from extension import step1 as estep1\n'), ((2412, 2428), 'steps.step1.step1', 'step1.step1', (['pre'], {}), '(pre)\n', (2423, 2428), False, 'from steps import step1\n'), ((2440, 2465), 'extension.step1.post_step1', 'estep1.post_step1', (['result'], {}), '(result)\n', (2457, 2465), True, 'from extension import step1 as estep1\n'), ((2477, 2499), 'gio.step1_output', 'gio.step1_output', (['post'], {}), '(post)\n', (2493, 2499), False, 'import gio\n'), ((2619, 2636), 'steps.step2.step2', 'step2.step2', (['data'], {}), '(data)\n', (2630, 2636), False, 'from steps import step2\n'), ((2648, 2672), 'gio.step2_output', 'gio.step2_output', (['result'], {}), '(result)\n', (2664, 2672), False, 'import gio\n'), ((2792, 2809), 'steps.step3.step3', 'step3.step3', (['data'], {}), '(data)\n', (2803, 2809), False, 'from steps import step3\n'), ((2821, 2845), 'gio.step3_output', 'gio.step3_output', (['result'], {}), '(result)\n', (2837, 2845), False, 'import gio\n'), ((3135, 3153), 'gio.step3c_input', 'gio.step3c_input', ([], {}), '()\n', (3151, 3153), False, 'import gio\n'), ((3374, 3395), 'gio.step4_input', 'gio.step4_input', (['data'], {}), '(data)\n', (3389, 3395), False, 'import gio\n'), ((3409, 3426), 'steps.step4.step4', 'step4.step4', (['data'], {}), '(data)\n', (3420, 3426), False, 'from steps import step4\n'), ((3438, 3462), 'gio.step4_output', 'gio.step4_output', (['result'], {}), '(result)\n', (3454, 3462), False, 'import gio\n'), ((3632, 3653), 'gio.step5_input', 'gio.step5_input', (['data'], {}), '(data)\n', (3647, 3653), False, 'import gio\n'), ((3667, 3684), 'steps.step5.step5', 'step5.step5', (['data'], {}), '(data)\n', (3678, 3684), False, 'from steps import step5\n'), ((3696, 3720), 'gio.step5_output', 'gio.step5_output', (['result'], {}), '(result)\n', (3712, 3720), False, 'import gio\n'), ((4799, 4827), 'optparse.OptionParser', 'optparse.OptionParser', (['usage'], {}), '(usage)\n', (4820, 4827), False, 'import optparse\n'), ((6785, 6800), 'fetch.Fetcher', 'fetch.Fetcher', ([], {}), '()\n', (6798, 6800), False, 'import fetch\n'), ((7820, 7831), 'time.time', 'time.time', ([], {}), '()\n', (7829, 7831), False, 'import time\n'), ((8658, 8669), 'time.time', 'time.time', ([], {}), '()\n', (8667, 8669), False, 'import time\n'), ((797, 808), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (806, 808), False, 'import os\n'), ((1010, 1020), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1018, 1020), False, 'import sys\n'), ((1033, 1043), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1041, 1043), False, 'import sys\n'), ((1709, 1728), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1722, 1728), False, 'import os\n'), ((1786, 1803), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1797, 1803), False, 'import os\n'), ((2133, 2150), 'gio.step0_input', 'gio.step0_input', ([], {}), '()\n', (2148, 2150), False, 'import gio\n'), ((2348, 2365), 'gio.step1_input', 'gio.step1_input', ([], {}), '()\n', (2363, 2365), False, 'import gio\n'), ((2588, 2605), 'gio.step2_input', 'gio.step2_input', ([], {}), '()\n', (2603, 2605), False, 'import gio\n'), ((2761, 2778), 'gio.step3_input', 'gio.step3_input', ([], {}), '()\n', (2776, 2778), False, 'import gio\n'), ((4036, 4066), 're.match', 're.match', (['"""^\\\\d+[a-z]?$"""', 'part'], {}), "('^\\\\d+[a-z]?$', part)\n", (4044, 4066), False, 'import re\n'), ((7086, 7114), 'os.path.exists', 'os.path.exists', (['PROGRESS_DIR'], {}), '(PROGRESS_DIR)\n', (7100, 7114), False, 'import os\n'), ((7124, 7149), 'os.makedirs', 'os.makedirs', (['PROGRESS_DIR'], {}), '(PROGRESS_DIR)\n', (7135, 7149), False, 'import os\n'), ((762, 787), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (777, 787), False, 'import os\n')]
# -*- coding: utf-8 -*- # -------------------------------------------------------------------- # The MIT License (MIT) # # Copyright (c) 2016 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # -------------------------------------------------------------------- """Python language schemas utilities.""" from re import compile as re_compile from b3j0f.utils.version import OrderedDict from b3j0f.utils.path import lookup from ..base import Schema, DynamicValue from .factory import SchemaBuilder, build from ..elementary import ElementarySchema, ArraySchema, OneOfSchema, TypeSchema from ..utils import updatecontent, data2schema, datatype2schemacls, RefSchema from types import ( FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType, MemberDescriptorType ) from six import get_function_globals from inspect import getargspec, getsourcelines, isclass, isbuiltin from functools import wraps __all__ = [ 'PythonSchemaBuilder', 'FunctionSchema', 'buildschema', 'ParamSchema' ] class PythonSchemaBuilder(SchemaBuilder): """In charge of building python classes.""" __name__ = 'python' def build(self, _resource, **kwargs): if not isclass(_resource): raise TypeError( 'Wrong type {0}, \'type\' expected'.format(_resource) ) if issubclass(_resource, Schema): result = _resource else: result = datatype2schemacls(_datatype=_resource, _force=False) if result is None: resname = _resource.__name__ if 'name' not in kwargs: kwargs['name'] = resname for attrname in dir(_resource): if ( attrname and attrname[0] != '_' and attrname not in kwargs and not hasattr(Schema, attrname) ): attr = getattr(_resource, attrname) if not isinstance(attr, MemberDescriptorType): kwargs[attrname] = attr result = type(resname, (Schema,), kwargs) return result def getresource(self, schemacls): result = None if hasattr(schemacls, 'mro'): for mro in schemacls.mro(): if issubclass(mro, Schema): result = mro break return result def buildschema(_cls=None, **kwargs): """Class decorator used to build a schema from the decorate class. :param type _cls: class to decorate. :param kwargs: schema attributes to set. :rtype: type :return: schema class. """ if _cls is None: return lambda _cls: buildschema(_cls=_cls, **kwargs) result = build(_cls, **kwargs) return result class ParamTypeSchema(Schema): """In charge of embedding a parameter type which met a problem while generating a schema.""" type = TypeSchema() def _validate(self, data, *args, **kwargs): super(ParamTypeSchema, self)._validate(data=data, *args, **kwargs) if not isinstance(data, self.type): raise TypeError( 'Wrong type of {0}. {1} expected.'.format(data, self.type) ) @updatecontent class ParamSchema(RefSchema): """Function parameter schema.""" #: if true (default), update self ref when default is given. autotype = True mandatory = True #: if true (default), parameter value is mandatory. def _setvalue(self, schema, value, *args, **kwargs): super(ParamSchema, self)._setvalue(schema, value, *args, **kwargs) if schema.name == 'default': if self.autotype and self.ref is None: self.ref = None if value is None else data2schema(value) if value is not None: self.mandatory = False class FunctionSchema(ElementarySchema): """Function schema. Dedicated to describe functions, methods and lambda objects. """ _PDESC = r':param (?P<ptype1>[\w_,]+) (?P<pname1>\w+):' _PTYPE = r':type (?P<pname2>[\w_]+):(?P<ptype2>[^\n]+)' _RTYPE = r':rtype:(?P<rtype>[^\n]+)' _REC = re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE, _RTYPE)) __data_types__ = [ FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType ] params = ArraySchema(itemtype=ParamSchema()) rtype = Schema() impl = '' impltype = '' safe = False varargs = False def _validate(self, data, owner, *args, **kwargs): ElementarySchema._validate(self, data=data, *args, **kwargs) if data != self.default or data is not self.default: errormsg = 'Error while validating {0} with {1}'.format(data, self) if data.__name__ != self.name: raise TypeError( '{0}. Wrong function name {1}. {2} expected.'.format( errormsg, data.__name__, self.name ) ) params, rtype, vargs, kwargs = self._getparams_rtype(function=data) var = self.varargs or vargs or kwargs if (not var) and len(params) != len(self.params): raise TypeError( '{0}. Wrong param length: {1}. {2} expected.'.format( errormsg, len(params), len(self.params) ) ) if self.rtype is not None and type(self.rtype) != type(rtype): raise TypeError( '{0}. Wrong rtype {1}. {2} expected.'.format( rtype, self.rtype ) ) for index, pkwargs in enumerate(params.values()): name = pkwargs['name'] default = pkwargs.get('default') param = self.params[index] if param.name != name: raise TypeError( '{0}. Wrong param {1} at {2}. {3} expected.'.format( errormsg, name, index, param.name ) ) val = param.default if isinstance(val, DynamicValue): val = val() if ( val is not None and default is not None and val != default ): raise TypeError( '{0}. Wrong val {1}/{2} at {3}. Expected {4}.'.format( errormsg, name, default, index, val ) ) def _setvalue(self, schema, value): if schema.name == 'default': self._setter(obj=self, value=value) def _setter(self, obj, value, *args, **kwargs): if hasattr(self, 'olddefault'): if self.olddefault is value: return self.olddefault = value ElementarySchema._setter(self, obj, value, *args, **kwargs) pkwargs, self.rtype, vargs, kwargs = self._getparams_rtype(value) self.vargs = vargs or kwargs params = [] selfparams = {} for selfparam in self.params: selfparams[selfparam.name] = selfparam index = 0 for index, pkwarg in enumerate(pkwargs.values()): name = pkwarg['name'] selfparam = None # old self param if name in selfparams: selfparam = selfparams[name] if selfparam is None: selfparam = ParamSchema(**pkwarg) else: for key in pkwarg: val = pkwarg[key] if val is not None: setattr(selfparam, key, val) params.append(selfparam) self.params = params self.impltype = 'python' try: self.impl = str(getsourcelines(value)) except TypeError: self.impl = '' @classmethod def _getparams_rtype(cls, function): """Get function params from input function and rtype. :return: OrderedDict, rtype, vargs and kwargs. :rtype: tuple """ try: args, vargs, kwargs, default = getargspec(function) except TypeError: args, vargs, kwargs, default = (), (), (), () indexlen = len(args) - (0 if default is None else len(default)) params = OrderedDict() for index, arg in enumerate(args): pkwargs = { 'name': arg, 'mandatory': True } # param kwargs if index >= indexlen: # has default value value = default[index - indexlen] pkwargs['default'] = value pkwargs['ref'] = None if value is None else data2schema(value) pkwargs['mandatory'] = False params[arg] = pkwargs rtype = None # parse docstring if function.__doc__ is not None and not isbuiltin(function): scope = get_function_globals(function) for match in cls._REC.findall(function.__doc__): if rtype is None: rrtype = match[4].strip() or None if rrtype: rtypes = rrtype.split(',') schemas = [] for rtype_ in rtypes: rtype_ = rtype_.strip() islist = False try: lkrtype = lookup(rtype_, scope=scope) except ImportError: islist = True try: if rtype_[-1] == 's': lkrtype = lookup( rtype_[:-1], scope=scope ) elif rtype_.startswith('list of '): lkrtype = lookup( rtype_[8:], scope=scope ) else: raise except ImportError: msg = 'rtype "{0}" ({1}) from {2} not found.' raise ImportError( msg.format(rtype_, rrtype, function) ) try: schemacls = datatype2schemacls(lkrtype) except TypeError: schemacls = ParamTypeSchema(type=lkrtype) rschema = schemacls() if islist: rschema = ArraySchema(itemtype=rschema) schemas.append(rschema) if len(rtypes) > 1: rtype = OneOfSchema(schemas=schemas, nullable=True) else: rtype = schemas[0] continue pname = (match[1] or match[2]).strip() if pname and pname in params: ptype = (match[0] or match[3]).strip() ptypes = ptype.split(',') schemas = [] for ptype in ptypes: ptype = ptype.strip() islist = False try: lkptype = lookup(ptype, scope=scope) except ImportError: islist = True try: if ptype[-1] == 's': lkptype = lookup(ptype[:-1], scope=scope) elif ptype.startswith('list of '): lkptype = lookup(ptype[8:], scope=scope) else: raise except ImportError: msg = 'Error on ptype "{0}" ({1}) from {2} not found.' raise ImportError( msg.format(pname, ptype, function) ) try: schemacls = datatype2schemacls(lkptype) except TypeError: schemacls = ParamTypeSchema(type=lkptype) pschema = schemacls() if islist: pschema = ArraySchema(itemtype=pschema) schemas.append(pschema) if len(ptypes) > 1: pschema = OneOfSchema(schemas=schemas, nullable=True) else: pschema = schemas[0] params[pname]['ref'] = pschema return params, rtype, vargs, kwargs def __call__(self, *args, **kwargs): return self.default(*args, **kwargs) def _getter(self, obj, *args, **kwargs): func = ElementarySchema._getter(self, obj, *args, **kwargs) @wraps(func) def result(*args, **kwargs): try: result = func(obj, *args, **kwargs) except TypeError: result = func(*args, **kwargs) return result result.source = func return result def funcschema(default=None, *args, **kwargs): """Decorator to use in order to transform a function into a schema.""" if default is None: return lambda default: funcschema(default=default, *args, **kwargs) return FunctionSchema(default=default, *args, **kwargs)
[ "inspect.getsourcelines", "b3j0f.utils.path.lookup", "inspect.isclass", "b3j0f.utils.version.OrderedDict", "inspect.getargspec", "functools.wraps", "six.get_function_globals", "inspect.isbuiltin" ]
[((9501, 9514), 'b3j0f.utils.version.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9512, 9514), False, 'from b3j0f.utils.version import OrderedDict\n'), ((14499, 14510), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (14504, 14510), False, 'from functools import wraps\n'), ((2220, 2238), 'inspect.isclass', 'isclass', (['_resource'], {}), '(_resource)\n', (2227, 2238), False, 'from inspect import getargspec, getsourcelines, isclass, isbuiltin\n'), ((9304, 9324), 'inspect.getargspec', 'getargspec', (['function'], {}), '(function)\n', (9314, 9324), False, 'from inspect import getargspec, getsourcelines, isclass, isbuiltin\n'), ((10124, 10154), 'six.get_function_globals', 'get_function_globals', (['function'], {}), '(function)\n', (10144, 10154), False, 'from six import get_function_globals\n'), ((8960, 8981), 'inspect.getsourcelines', 'getsourcelines', (['value'], {}), '(value)\n', (8974, 8981), False, 'from inspect import getargspec, getsourcelines, isclass, isbuiltin\n'), ((10082, 10101), 'inspect.isbuiltin', 'isbuiltin', (['function'], {}), '(function)\n', (10091, 10101), False, 'from inspect import getargspec, getsourcelines, isclass, isbuiltin\n'), ((12789, 12815), 'b3j0f.utils.path.lookup', 'lookup', (['ptype'], {'scope': 'scope'}), '(ptype, scope=scope)\n', (12795, 12815), False, 'from b3j0f.utils.path import lookup\n'), ((10645, 10672), 'b3j0f.utils.path.lookup', 'lookup', (['rtype_'], {'scope': 'scope'}), '(rtype_, scope=scope)\n', (10651, 10672), False, 'from b3j0f.utils.path import lookup\n'), ((13036, 13067), 'b3j0f.utils.path.lookup', 'lookup', (['ptype[:-1]'], {'scope': 'scope'}), '(ptype[:-1], scope=scope)\n', (13042, 13067), False, 'from b3j0f.utils.path import lookup\n'), ((10914, 10946), 'b3j0f.utils.path.lookup', 'lookup', (['rtype_[:-1]'], {'scope': 'scope'}), '(rtype_[:-1], scope=scope)\n', (10920, 10946), False, 'from b3j0f.utils.path import lookup\n'), ((13182, 13212), 'b3j0f.utils.path.lookup', 'lookup', (['ptype[8:]'], {'scope': 'scope'}), '(ptype[8:], scope=scope)\n', (13188, 13212), False, 'from b3j0f.utils.path import lookup\n'), ((11156, 11187), 'b3j0f.utils.path.lookup', 'lookup', (['rtype_[8:]'], {'scope': 'scope'}), '(rtype_[8:], scope=scope)\n', (11162, 11187), False, 'from b3j0f.utils.path import lookup\n')]
#!/usr/bin/env python # coding: utf-8 # Copyright (c) Qotto, 2019 import uuid import pytest from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner from tonga.errors import OutsideInstanceNumber def test_statefulset_partitioner_with_str_uuid_key(): statefulset_partitioner = StatefulsetPartitioner(instance=1) for i in range(0, 100): assert statefulset_partitioner(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1, 2, 3]) == 1 def test_statefulset_partitioner_bad_instance(): statefulset_partitioner = StatefulsetPartitioner(instance=100) with pytest.raises(OutsideInstanceNumber): statefulset_partitioner.__call__(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1, 2, 3])
[ "pytest.raises", "uuid.uuid4", "tonga.services.coordinator.partitioner.statefulset_partitioner.StatefulsetPartitioner" ]
[((325, 359), 'tonga.services.coordinator.partitioner.statefulset_partitioner.StatefulsetPartitioner', 'StatefulsetPartitioner', ([], {'instance': '(1)'}), '(instance=1)\n', (347, 359), False, 'from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner\n'), ((560, 596), 'tonga.services.coordinator.partitioner.statefulset_partitioner.StatefulsetPartitioner', 'StatefulsetPartitioner', ([], {'instance': '(100)'}), '(instance=100)\n', (582, 596), False, 'from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner\n'), ((607, 643), 'pytest.raises', 'pytest.raises', (['OutsideInstanceNumber'], {}), '(OutsideInstanceNumber)\n', (620, 643), False, 'import pytest\n'), ((686, 698), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (696, 698), False, 'import uuid\n'), ((428, 440), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (438, 440), False, 'import uuid\n')]
#!/usr/bin/python """ TempMAGE model architecture """ import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout def set_metrics(): """ metrics used to evaluate the model's perfromance """ METRICS = [ keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='ROC_auc'), keras.metrics.AUC(name='PR_auc', curve = "PR"), keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy') ] return METRICS def tempoMAGE(metrics, output_bias= None): if output_bias is not None: output_bias = tf.keras.initializers.Constant(output_bias) seq_input = keras.Input(shape=(400,5,1), name='sequence_conv') x = keras.layers.Conv2D(filters=32, kernel_size=(10,5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=64, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=128, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) sequence_features = keras.layers.Flatten()(x) depth_input = keras.Input(shape=(400,1), name= 'depth') x = keras.layers.Conv1D(filters= 32, kernel_size=(5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 64, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 128, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) depth_features = keras.layers.Flatten()(x) x = layers.concatenate([sequence_features, depth_features]) conv_dense = keras.layers.Dense(108, activation = 'relu')(x) expression_input = keras.Input(shape=(20,1), name= 'expression') expression_features = keras.layers.Flatten()(expression_input) weight_input = keras.Input(shape=(1,1), name= 'weight') weight_features = keras.layers.Flatten()(weight_input) x = layers.concatenate([expression_features, weight_features]) data_dense = keras.layers.Dense(20,activation = 'relu')(x) x = layers.concatenate([conv_dense, data_dense]) x = keras.layers.Dense(128, activation = 'relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.Dropout(0.3)(x) seq_pred = keras.layers.Dense(1, activation='sigmoid',bias_initializer= output_bias)(x) model = keras.Model(inputs=[seq_input,depth_input,expression_input, weight_input], outputs= seq_pred) model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(0.001), metrics= metrics) return model
[ "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Dense", "tensorflow.keras.metrics.FalseNegatives", "tensorflow.keras.layers.concatenate", "tensorflow.keras.metrics.BinaryAccuracy", "tensorflow.keras.layers.Flatten", "tensorflow.keras.regularizers.l2", "tensorflow.keras.Input", "tensorflow.keras.metrics.TrueNegatives", "tensorflow.keras.metrics.FalsePositives", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.metrics.Precision", "tensorflow.keras.metrics.AUC", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.MaxPooling1D", "tensorflow.keras.Model", "tensorflow.keras.initializers.Constant", "tensorflow.keras.metrics.TruePositives", "tensorflow.keras.metrics.Recall", "tensorflow.keras.losses.BinaryCrossentropy" ]
[((1054, 1106), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(400, 5, 1)', 'name': '"""sequence_conv"""'}), "(shape=(400, 5, 1), name='sequence_conv')\n", (1065, 1106), False, 'from tensorflow import keras\n'), ((1888, 1929), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(400, 1)', 'name': '"""depth"""'}), "(shape=(400, 1), name='depth')\n", (1899, 1929), False, 'from tensorflow import keras\n'), ((2692, 2747), 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['[sequence_features, depth_features]'], {}), '([sequence_features, depth_features])\n', (2710, 2747), False, 'from tensorflow.keras import layers\n'), ((2841, 2886), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(20, 1)', 'name': '"""expression"""'}), "(shape=(20, 1), name='expression')\n", (2852, 2886), False, 'from tensorflow import keras\n'), ((2978, 3018), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 1)', 'name': '"""weight"""'}), "(shape=(1, 1), name='weight')\n", (2989, 3018), False, 'from tensorflow import keras\n'), ((3091, 3149), 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['[expression_features, weight_features]'], {}), '([expression_features, weight_features])\n', (3109, 3149), False, 'from tensorflow.keras import layers\n'), ((3226, 3270), 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['[conv_dense, data_dense]'], {}), '([conv_dense, data_dense])\n', (3244, 3270), False, 'from tensorflow.keras import layers\n'), ((3548, 3646), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[seq_input, depth_input, expression_input, weight_input]', 'outputs': 'seq_pred'}), '(inputs=[seq_input, depth_input, expression_input, weight_input],\n outputs=seq_pred)\n', (3559, 3646), False, 'from tensorflow import keras\n'), ((346, 384), 'tensorflow.keras.metrics.TruePositives', 'keras.metrics.TruePositives', ([], {'name': '"""tp"""'}), "(name='tp')\n", (373, 384), False, 'from tensorflow import keras\n'), ((394, 433), 'tensorflow.keras.metrics.FalsePositives', 'keras.metrics.FalsePositives', ([], {'name': '"""fp"""'}), "(name='fp')\n", (422, 433), False, 'from tensorflow import keras\n'), ((443, 481), 'tensorflow.keras.metrics.TrueNegatives', 'keras.metrics.TrueNegatives', ([], {'name': '"""tn"""'}), "(name='tn')\n", (470, 481), False, 'from tensorflow import keras\n'), ((491, 530), 'tensorflow.keras.metrics.FalseNegatives', 'keras.metrics.FalseNegatives', ([], {'name': '"""fn"""'}), "(name='fn')\n", (519, 530), False, 'from tensorflow import keras\n'), ((541, 586), 'tensorflow.keras.metrics.BinaryAccuracy', 'keras.metrics.BinaryAccuracy', ([], {'name': '"""accuracy"""'}), "(name='accuracy')\n", (569, 586), False, 'from tensorflow import keras\n'), ((596, 637), 'tensorflow.keras.metrics.Precision', 'keras.metrics.Precision', ([], {'name': '"""precision"""'}), "(name='precision')\n", (619, 637), False, 'from tensorflow import keras\n'), ((647, 682), 'tensorflow.keras.metrics.Recall', 'keras.metrics.Recall', ([], {'name': '"""recall"""'}), "(name='recall')\n", (667, 682), False, 'from tensorflow import keras\n'), ((692, 725), 'tensorflow.keras.metrics.AUC', 'keras.metrics.AUC', ([], {'name': '"""ROC_auc"""'}), "(name='ROC_auc')\n", (709, 725), False, 'from tensorflow import keras\n'), ((735, 779), 'tensorflow.keras.metrics.AUC', 'keras.metrics.AUC', ([], {'name': '"""PR_auc"""', 'curve': '"""PR"""'}), "(name='PR_auc', curve='PR')\n", (752, 779), False, 'from tensorflow import keras\n'), ((791, 868), 'tensorflow.keras.losses.BinaryCrossentropy', 'keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)', 'name': '"""binary_crossentropy"""'}), "(from_logits=True, name='binary_crossentropy')\n", (822, 868), False, 'from tensorflow import keras\n'), ((993, 1036), 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['output_bias'], {}), '(output_bias)\n', (1023, 1036), True, 'import tensorflow as tf\n'), ((1300, 1343), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 1)'}), '(pool_size=(2, 1))\n', (1325, 1343), False, 'from tensorflow import keras\n'), ((1533, 1576), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 1)'}), '(pool_size=(2, 1))\n', (1558, 1576), False, 'from tensorflow import keras\n'), ((1767, 1810), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 1)'}), '(pool_size=(2, 1))\n', (1792, 1810), False, 'from tensorflow import keras\n'), ((1838, 1860), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (1858, 1860), False, 'from tensorflow import keras\n'), ((2125, 2163), 'tensorflow.keras.layers.MaxPooling1D', 'keras.layers.MaxPooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (2150, 2163), False, 'from tensorflow import keras\n'), ((2355, 2393), 'tensorflow.keras.layers.MaxPooling1D', 'keras.layers.MaxPooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (2380, 2393), False, 'from tensorflow import keras\n'), ((2586, 2624), 'tensorflow.keras.layers.MaxPooling1D', 'keras.layers.MaxPooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (2611, 2624), False, 'from tensorflow import keras\n'), ((2652, 2674), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (2672, 2674), False, 'from tensorflow import keras\n'), ((2765, 2807), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(108)'], {'activation': '"""relu"""'}), "(108, activation='relu')\n", (2783, 2807), False, 'from tensorflow import keras\n'), ((2913, 2935), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (2933, 2935), False, 'from tensorflow import keras\n'), ((3041, 3063), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (3061, 3063), False, 'from tensorflow import keras\n'), ((3167, 3208), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (3185, 3208), False, 'from tensorflow import keras\n'), ((3413, 3438), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.3)'], {}), '(0.3)\n', (3433, 3438), False, 'from tensorflow import keras\n'), ((3458, 3531), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""', 'bias_initializer': 'output_bias'}), "(1, activation='sigmoid', bias_initializer=output_bias)\n", (3476, 3531), False, 'from tensorflow import keras\n'), ((3716, 3744), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (3737, 3744), False, 'from tensorflow import keras\n'), ((1247, 1279), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.0001)'], {}), '(0.0001)\n', (1271, 1279), True, 'import tensorflow as tf\n'), ((1488, 1520), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (1512, 1520), True, 'import tensorflow as tf\n'), ((1721, 1753), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (1745, 1753), True, 'import tensorflow as tf\n'), ((2070, 2102), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.0001)'], {}), '(0.0001)\n', (2094, 2102), True, 'import tensorflow as tf\n'), ((2310, 2342), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (2334, 2342), True, 'import tensorflow as tf\n'), ((2541, 2573), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (2565, 2573), True, 'import tensorflow as tf\n'), ((3368, 3400), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (3392, 3400), True, 'import tensorflow as tf\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on 22/11/2018 @author: XXX """ import unittest import os, shutil from RecSysFramework.Recommender.NonPersonalized import TopPop, Random, GlobalEffects from RecSysFramework.Recommender.KNN import UserKNNCF from RecSysFramework.Recommender.KNN import ItemKNNCF from RecSysFramework.Recommender.SLIM.BPR import SLIM as SLIM_BPR from RecSysFramework.Recommender.SLIM.ElasticNet import SLIM as SLIM_RMSE from RecSysFramework.Recommender.GraphBased.P3alphaRecommender import P3alpha from RecSysFramework.Recommender.GraphBased.RP3betaRecommender import RP3beta from RecSysFramework.Recommender.MatrixFactorization import BPRMF, FunkSVD, AsySVD from RecSysFramework.Recommender.MatrixFactorization import PureSVD from RecSysFramework.Recommender.MatrixFactorization import IALS from RecSysFramework.Recommender.MatrixFactorization import NMF from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout from RecSysFramework.DataManager.Reader import Movielens1MReader from RecSysFramework.DataManager.Splitter import Holdout from RecSysFramework.Utils import EarlyStoppingModel class RecommenderTestCase(unittest.TestCase): recommender_class = None def setUp(self): self.dataset = Movielens1MReader().load_data() self.splitter = Holdout(train_perc=0.8, test_perc=0.2, validation_perc=0.0) self.train, self.test = self.splitter.split(self.dataset) def common_test_recommender(self, recommender_class): temp_save_file_folder = self.dataset.get_complete_folder() + os.sep + "__temp__" os.makedirs(temp_save_file_folder, exist_ok=True) URM_train = self.train.get_URM() URM_test = self.test.get_URM() recommender_object = recommender_class(URM_train) if isinstance(recommender_object, EarlyStoppingModel): fit_params = {"epochs": 10} else: fit_params = {} recommender_object.fit(**fit_params) evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) recommender_object.save_model(temp_save_file_folder, file_name="temp_model") recommender_object = recommender_class(URM_train) recommender_object.load_model(temp_save_file_folder, file_name="temp_model") evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) shutil.rmtree(temp_save_file_folder, ignore_errors=True) class RandomRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(Random) class TopPopRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(TopPop) class GlobalEffectsRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(GlobalEffects) class UserKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(UserKNNCF) class ItemKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(ItemKNNCF) class P3alphaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(P3alpha) class RP3betaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(RP3beta) class SLIM_BPRRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_BPR) class SLIM_RMSERecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_RMSE) class BPRMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(BPRMF) class FunkSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(FunkSVD) class AsySVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(AsySVD) class PureSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(PureSVD) class NMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(NMF) class IALSRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(IALS) if __name__ == '__main__': unittest.main()
[ "unittest.main", "os.makedirs", "RecSysFramework.DataManager.Splitter.Holdout", "RecSysFramework.Evaluation.Evaluator.EvaluatorHoldout", "RecSysFramework.DataManager.Reader.Movielens1MReader", "shutil.rmtree" ]
[((4648, 4663), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4661, 4663), False, 'import unittest\n'), ((1321, 1380), 'RecSysFramework.DataManager.Splitter.Holdout', 'Holdout', ([], {'train_perc': '(0.8)', 'test_perc': '(0.2)', 'validation_perc': '(0.0)'}), '(train_perc=0.8, test_perc=0.2, validation_perc=0.0)\n', (1328, 1380), False, 'from RecSysFramework.DataManager.Splitter import Holdout\n'), ((1605, 1654), 'os.makedirs', 'os.makedirs', (['temp_save_file_folder'], {'exist_ok': '(True)'}), '(temp_save_file_folder, exist_ok=True)\n', (1616, 1654), False, 'import os, shutil\n'), ((2008, 2048), 'RecSysFramework.Evaluation.Evaluator.EvaluatorHoldout', 'EvaluatorHoldout', (['[5]'], {'exclude_seen': '(True)'}), '([5], exclude_seen=True)\n', (2024, 2048), False, 'from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout\n'), ((2395, 2435), 'RecSysFramework.Evaluation.Evaluator.EvaluatorHoldout', 'EvaluatorHoldout', (['[5]'], {'exclude_seen': '(True)'}), '([5], exclude_seen=True)\n', (2411, 2435), False, 'from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout\n'), ((2540, 2596), 'shutil.rmtree', 'shutil.rmtree', (['temp_save_file_folder'], {'ignore_errors': '(True)'}), '(temp_save_file_folder, ignore_errors=True)\n', (2553, 2596), False, 'import os, shutil\n'), ((1265, 1284), 'RecSysFramework.DataManager.Reader.Movielens1MReader', 'Movielens1MReader', ([], {}), '()\n', (1282, 1284), False, 'from RecSysFramework.DataManager.Reader import Movielens1MReader\n')]
from django import setup setup() from scts.factory.build_app import build_app # noqa app = build_app()
[ "scts.factory.build_app.build_app", "django.setup" ]
[((26, 33), 'django.setup', 'setup', ([], {}), '()\n', (31, 33), False, 'from django import setup\n'), ((95, 106), 'scts.factory.build_app.build_app', 'build_app', ([], {}), '()\n', (104, 106), False, 'from scts.factory.build_app import build_app\n')]
import uvicorn uvicorn.run("bc_website.app:app", host="localhost", port=5000, reload=True)
[ "uvicorn.run" ]
[((17, 92), 'uvicorn.run', 'uvicorn.run', (['"""bc_website.app:app"""'], {'host': '"""localhost"""', 'port': '(5000)', 'reload': '(True)'}), "('bc_website.app:app', host='localhost', port=5000, reload=True)\n", (28, 92), False, 'import uvicorn\n')]
import argparse import list import depth VERSION = '1.0.2' def main(): parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version=VERSION) subparsers = parser.add_subparsers() list.init(subparsers) depth.init(subparsers) args = parser.parse_args() args.func(args) if __name__ == "__main__": main()
[ "list.init", "depth.init", "argparse.ArgumentParser" ]
[((87, 112), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (110, 112), False, 'import argparse\n'), ((232, 253), 'list.init', 'list.init', (['subparsers'], {}), '(subparsers)\n', (241, 253), False, 'import list\n'), ((258, 280), 'depth.init', 'depth.init', (['subparsers'], {}), '(subparsers)\n', (268, 280), False, 'import depth\n')]
#!/usr/local/bin/python3 """ This program takes a filename as input and displays the count for each word length, ignoring punctuation and non-alphanumeric characters. """ import re, sys word_table = {} with open(sys.argv[1]) as f: for line in f: for word in line.split(): stripped = re.sub("[^a-zA-Z ]", "", word) length = len(stripped) if length in word_table.keys(): word_table[length] += 1 else: if length > 0: word_table[length] = 1 print("Length Count") for key in word_table: print("{0} {1}".format(key, word_table[key]))
[ "re.sub" ]
[((311, 341), 're.sub', 're.sub', (['"""[^a-zA-Z ]"""', '""""""', 'word'], {}), "('[^a-zA-Z ]', '', word)\n", (317, 341), False, 'import re, sys\n')]
# Copyright 2018 The Google AI Language Team Authors and # The HuggingFace Inc. team. # Copyright (c) 2020, <NAME>. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, List from scipy.stats import pearsonr, spearmanr from sklearn.metrics import f1_score, matthews_corrcoef __all__ = ['compute_metrics'] def accuracy(preds: List[int], labels: List[int]): return {"acc": (preds == labels).mean()} def acc_and_f1(preds: List[int], labels: List[int]): accuracy = (preds == labels).mean() f1 = f1_score(y_true=labels, y_pred=preds) return {"acc": accuracy, "f1": f1} def mcc(preds: List[int], labels: List[int]): return {"mcc": matthews_corrcoef(labels, preds)} def pearson_and_spearman(preds: List[int], labels: List[int]): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return {"pearson": pearson_corr, "spearmanr": spearman_corr, "pear+spear av": (pearson_corr + spearman_corr) / 2} def compute_metrics(task_name: str, preds: List[int], labels: List[int]) -> Dict[str, float]: """ Computes metrics for GLUE tasks Args: task_name: GLUE task name preds: model predictions labels: golden labels Returns: metrics """ if len(preds) != len(labels): raise ValueError("Predictions and labels must have the same length") metric_fn = accuracy if task_name == 'cola': metric_fn = mcc elif task_name in ['mrpc', 'qqp']: metric_fn = acc_and_f1 elif task_name == 'sts-b': metric_fn = pearson_and_spearman return metric_fn(preds, labels)
[ "sklearn.metrics.f1_score", "sklearn.metrics.matthews_corrcoef", "scipy.stats.pearsonr", "scipy.stats.spearmanr" ]
[((1049, 1086), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'labels', 'y_pred': 'preds'}), '(y_true=labels, y_pred=preds)\n', (1057, 1086), False, 'from sklearn.metrics import f1_score, matthews_corrcoef\n'), ((1193, 1225), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['labels', 'preds'], {}), '(labels, preds)\n', (1210, 1225), False, 'from sklearn.metrics import f1_score, matthews_corrcoef\n'), ((1311, 1334), 'scipy.stats.pearsonr', 'pearsonr', (['preds', 'labels'], {}), '(preds, labels)\n', (1319, 1334), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((1358, 1382), 'scipy.stats.spearmanr', 'spearmanr', (['preds', 'labels'], {}), '(preds, labels)\n', (1367, 1382), False, 'from scipy.stats import pearsonr, spearmanr\n')]
""" There are two useful functions: 1. correlationCoef will tell you the coreelation coefficient of two patches of same size the greater this coefficient is, the similar this two patches are. 2. matchTemplate will automatically go through the whole input 'img' with a sliding window and implement correlationCoef function on every window comparing it to template. """ import cv2 import numpy as np from matplotlib import pyplot as plt def correlationCoef(g1,g2): """ Parameters: g1: graph one, grayscale(0-255) g2: graph two, grayscale(0-255) Return: Correlation coefficient(float). """ #1. make sure I read the correct patches if(g1.shape!=g2.shape): print('Invalid patch. Patch should be in same size') print('Size of graph 1:',(g1.shape)) print('Size of graph 2:',(g2.shape)) return 0 #2. Calculate Statistic Infomation std_g1=np.std(g1) std_g2=np.std(g2) array1=g1.ravel() array2=g2.ravel() cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0] #3. Calculate coefficient(float) coef=cov/(std_g1*std_g2) return coef def matchTemplate(img,template): """ Parameters: img: image, such as a cat, grayscale(0-255) template: your target, such as a cat's paw, grayscale(0-255) Return: a float image consisted of correlation coefficient of each pixel. """ win_w,win_h=template.shape[::-1] w,h=img.shape[::-1] result=np.zeros(img.shape) for row in range(h-win_h): for col in range(w-win_w): t_patch=img[row:row+win_h,col:col+win_w] result[row,col]=correlationCoef(template,t_patch) return result
[ "numpy.std", "numpy.cov", "numpy.zeros" ]
[((938, 948), 'numpy.std', 'np.std', (['g1'], {}), '(g1)\n', (944, 948), True, 'import numpy as np\n'), ((960, 970), 'numpy.std', 'np.std', (['g2'], {}), '(g2)\n', (966, 970), True, 'import numpy as np\n'), ((1030, 1052), 'numpy.cov', 'np.cov', (['array1', 'array2'], {}), '(array1, array2)\n', (1036, 1052), True, 'import numpy as np\n'), ((1510, 1529), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (1518, 1529), True, 'import numpy as np\n')]
'''Train CIFAR10 with PyTorch.''' import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn import torchvision import torchvision.transforms as transforms import os import argparse from sklearn.metrics import classification_report import pandas as pd import optuna from datasets import market1501 import metrics import torchvision.models as models def opt(): study = optuna.create_study(direction='maximize') #study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=1000) print('Number of finished trials: ', len(study.trials)) print('Best trial:') trial = study.best_trial print(' Value: ', trial.value) print(' Params: ') for key, value in trial.params.items(): print(' {}: {}'.format(key, value)) def objective(trial): # Parse arguments. args = parse_args() # Set device. device = 'cuda' if torch.cuda.is_available() else 'cpu' # Load dataset. if os.path.exists(args.anno_path) == False: market1501.make_train_anno(args.data_dir, args.anno_path) train_loader, class_names = market1501.load_train_data(args.anno_path, args.n_batch) # Set a model. # cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats = trial.suggest_categorical('n_feats', [256*1, 256*2]) model = models.resnet50(pretrained=True) model.fc = nn.Linear(2048, n_feats) model = model.to(device) #print(model) # Set a metric """ 'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. """ norm = trial.suggest_int('norm', 0, 30) margin = trial.suggest_uniform('margin', 0.0, 1e-3) easy_margin = trial.suggest_categorical('easy_margin', [0, 1]) metric = metrics.ArcMarginProduct(n_feats, len(class_names), s=norm, m=margin, easy_margin=easy_margin) metric.to(device) # Set loss function and optimization function. lr = trial.suggest_uniform('lr', 1e-3, 1e-1) weight_decay = trial.suggest_uniform('weight_decay', 1e-3, 1e-1) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD([{'params': model.parameters()}, {'params': metric.parameters()}], lr=lr, weight_decay=weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # Train and test. for epoch in range(args.n_epoch): # Train and test a model. train_acc, train_loss = train(device, train_loader, args.n_batch, model, metric, criterion, optimizer, scheduler) #test_acc, test_loss = test(device, test_loader, model, metric, criterion) # Output score. #stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}' #print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss)) stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(epoch+1, train_acc, train_loss)) # Save a model checkpoint. #model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1) #torch.save(model.state_dict(), model_ckpt_path) #print('Saved a model checkpoint at {}'.format(model_ckpt_path)) #print('') return train_acc def train(device, train_loader, n_batch, model, metric_fc, criterion, optimizer, scheduler): model.train() output_list = [] target_list = [] running_loss = 0.0 for batch_idx, (inputs, targets) in enumerate(train_loader): # Forward processing. inputs, targets = inputs.to(device), targets.to(device).long() features = model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs, targets) # Backward processing. optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() # Set data to calculate score. output_list += [int(o.argmax()) for o in outputs] target_list += [int(t) for t in targets] running_loss += loss.item() # Calculate score at present. train_acc, train_loss = calc_score(output_list, target_list, running_loss, n_batch, batch_idx, train_loader) if (batch_idx % 100 == 0 and batch_idx != 0) or (batch_idx == len(train_loader)): stdout_temp = 'batch: {:>3}/{:<3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(batch_idx, len(train_loader), train_acc, train_loss)) # Calculate score. #train_acc, train_loss = calc_score(output_list, target_list, running_loss, train_loader) return train_acc, train_loss def test(device, test_loader, model, metric_fc, criterion): model.eval() output_list = [] target_list = [] running_loss = 0.0 for batch_idx, (inputs, targets) in enumerate(test_loader): # Forward processing. inputs, targets = inputs.to(device), targets.to(device) features = model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs, targets) # Set data to calculate score. output_list += [int(o.argmax()) for o in outputs] target_list += [int(t) for t in targets] running_loss += loss.item() test_acc, test_loss = calc_score(output_list, target_list, running_loss, test_loader) return test_acc, test_loss def calc_score(output_list, target_list, running_loss, n_batch, batch_idx, data_loader): # Calculate accuracy. result = classification_report(output_list, target_list, output_dict=True) acc = round(result['weighted avg']['f1-score'], 6) #loss = round(running_loss / len(data_loader.dataset), 6) if n_batch * batch_idx < len(data_loader.dataset): loss = running_loss / (n_batch * (batch_idx+1)) else: loss = running_loss / len(data_loader.dataset) return acc, loss def parse_args(): # Set arguments. arg_parser = argparse.ArgumentParser(description="Image Classification") arg_parser.add_argument('--dataset_name', default='Market1501') arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/') arg_parser.add_argument('--anno_dir', default='../data/annos/') arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv') arg_parser.add_argument('--n_batch', default=32, type=int) arg_parser.add_argument("--model_name", type=str, default='ResNet50') arg_parser.add_argument("--model_ckpt_dir", type=str, default='../experiments/models/checkpoints/') arg_parser.add_argument("--model_ckpt_path_temp", type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth') arg_parser.add_argument('--n_epoch', default=12, type=int, help='The number of epoch') arg_parser.add_argument('--lr', default=0.001, type=float, help='Learning rate') arg_parser.add_argument('--n_feats', default=581, type=int, help='The number of base model output') arg_parser.add_argument('--easy_margin', default=1, type=int, help='0 is False, 1 is True') arg_parser.add_argument('--weight_decay', default=5e-4, type=float, help='') arg_parser.add_argument('--norm', default=1, type=int, help='ArcFace: norm of input feature') arg_parser.add_argument('--margin', default=0.0008, type=float, help='ArcFace: margin') arg_parser.add_argument('--step_size', default=10, type=int, help='Learning Rate: step size') arg_parser.add_argument('--gamma', default=0.1, type=float, help='Learning Rate: gamma') """ {'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. """ args = arg_parser.parse_args() # Make directory. os.makedirs(args.anno_dir, exist_ok=True) os.makedirs(args.model_ckpt_dir, exist_ok=True) # Validate paths. assert os.path.exists(args.data_dir) assert os.path.exists(args.anno_dir) assert os.path.exists(args.model_ckpt_dir) return args if __name__ == "__main__": opt() #main()
[ "torch.optim.lr_scheduler.StepLR", "datasets.market1501.load_train_data", "argparse.ArgumentParser", "os.makedirs", "torch.nn.CrossEntropyLoss", "os.path.exists", "sklearn.metrics.classification_report", "torchvision.models.resnet50", "torch.cuda.is_available", "torch.nn.Linear", "optuna.create_study", "datasets.market1501.make_train_anno" ]
[((446, 487), 'optuna.create_study', 'optuna.create_study', ([], {'direction': '"""maximize"""'}), "(direction='maximize')\n", (465, 487), False, 'import optuna\n'), ((1118, 1174), 'datasets.market1501.load_train_data', 'market1501.load_train_data', (['args.anno_path', 'args.n_batch'], {}), '(args.anno_path, args.n_batch)\n', (1144, 1174), False, 'from datasets import market1501\n'), ((1328, 1360), 'torchvision.models.resnet50', 'models.resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1343, 1360), True, 'import torchvision.models as models\n'), ((1373, 1397), 'torch.nn.Linear', 'nn.Linear', (['(2048)', 'n_feats'], {}), '(2048, n_feats)\n', (1382, 1397), True, 'import torch.nn as nn\n'), ((2069, 2090), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2088, 2090), True, 'import torch.nn as nn\n'), ((2245, 2330), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': 'args.step_size', 'gamma': 'args.gamma'}), '(optimizer, step_size=args.step_size, gamma=args.gamma\n )\n', (2270, 2330), True, 'import torch.optim as optim\n'), ((5227, 5292), 'sklearn.metrics.classification_report', 'classification_report', (['output_list', 'target_list'], {'output_dict': '(True)'}), '(output_list, target_list, output_dict=True)\n', (5248, 5292), False, 'from sklearn.metrics import classification_report\n'), ((5634, 5693), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Image Classification"""'}), "(description='Image Classification')\n", (5657, 5693), False, 'import argparse\n'), ((7394, 7435), 'os.makedirs', 'os.makedirs', (['args.anno_dir'], {'exist_ok': '(True)'}), '(args.anno_dir, exist_ok=True)\n', (7405, 7435), False, 'import os\n'), ((7437, 7484), 'os.makedirs', 'os.makedirs', (['args.model_ckpt_dir'], {'exist_ok': '(True)'}), '(args.model_ckpt_dir, exist_ok=True)\n', (7448, 7484), False, 'import os\n'), ((7513, 7542), 'os.path.exists', 'os.path.exists', (['args.data_dir'], {}), '(args.data_dir)\n', (7527, 7542), False, 'import os\n'), ((7551, 7580), 'os.path.exists', 'os.path.exists', (['args.anno_dir'], {}), '(args.anno_dir)\n', (7565, 7580), False, 'import os\n'), ((7589, 7624), 'os.path.exists', 'os.path.exists', (['args.model_ckpt_dir'], {}), '(args.model_ckpt_dir)\n', (7603, 7624), False, 'import os\n'), ((929, 954), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (952, 954), False, 'import torch\n'), ((988, 1018), 'os.path.exists', 'os.path.exists', (['args.anno_path'], {}), '(args.anno_path)\n', (1002, 1018), False, 'import os\n'), ((1031, 1088), 'datasets.market1501.make_train_anno', 'market1501.make_train_anno', (['args.data_dir', 'args.anno_path'], {}), '(args.data_dir, args.anno_path)\n', (1057, 1088), False, 'from datasets import market1501\n')]
import discord from discord.ext import commands import aiohttp import requests class Image(commands.Cog, name='Image'): def __init__(self, bot): self.bot = bot @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def cat(self, ctx): """Gives You Random Image Of Cat""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://aws.random.cat/meow') as r: data = await r.json() em = discord.Embed( title='Cat', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def dog(self, ctx): """Gives You Random Image Of Dog""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://random.dog/woof.json') as r: data = await r.json() em = discord.Embed( title='Dog', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def fox(self, ctx): """Gives You Random Image Of Fox""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/fox') as r: data = await r.json() em = discord.Embed( title='Fox', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def panda(self, ctx): """Gives You Random Image Of Panda""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/panda') as r: data = await r.json() em = discord.Embed( title='Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def red_panda(self, ctx): """Gives You Random Image Of Red Panda""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/red_panda') as r: data = await r.json() em = discord.Embed( title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def bird(self, ctx): """Gives You Random Image Of Bird""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/birb') as r: data = await r.json() em = discord.Embed( title='Bird', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def kola(self, ctx): """Gives You Random Image Of Kola""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/koala') as r: data = await r.json() em = discord.Embed( title='kola', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def pikachu(self, ctx): """Gives You Random Image Or GIF Of Pikachu""" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/pikachu') as r: data = await r.json() em = discord.Embed( title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f"Requested By {ctx.author.name}") await ctx.send(embed=em) # @commands.command() # @commands.cooldown(1, 10, commands.BucketType.user) # async def yt(self,ctx,comment:str): # """Comments On Youtube""" # url = f"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}" # em = discord.Embed(color = ctx.author.color) # em.set_image(url=url) # em.set_footer(text=f"Requested by {ctx.author}", icon_url=ctx.author.avatar_url) # await ctx.send(embed=em) def setup(bot): bot.add_cog(Image(bot))
[ "aiohttp.ClientSession", "discord.ext.commands.cooldown", "discord.ext.commands.command", "discord.Embed" ]
[((180, 198), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (196, 198), False, 'from discord.ext import commands\n'), ((204, 254), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(10)', 'commands.BucketType.user'], {}), '(1, 10, commands.BucketType.user)\n', (221, 254), False, 'from discord.ext import commands\n'), ((912, 930), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (928, 930), False, 'from discord.ext import commands\n'), ((936, 986), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(10)', 'commands.BucketType.user'], {}), '(1, 10, commands.BucketType.user)\n', (953, 986), False, 'from discord.ext import commands\n'), ((1644, 1662), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1660, 1662), False, 'from discord.ext import commands\n'), ((1668, 1718), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(10)', 'commands.BucketType.user'], {}), '(1, 10, commands.BucketType.user)\n', (1685, 1718), False, 'from discord.ext import commands\n'), ((2384, 2402), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2400, 2402), False, 'from discord.ext import commands\n'), ((2408, 2458), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(10)', 'commands.BucketType.user'], {}), '(1, 10, commands.BucketType.user)\n', (2425, 2458), False, 'from discord.ext import commands\n'), ((3132, 3150), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (3148, 3150), False, 'from discord.ext import commands\n'), ((3156, 3206), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(10)', 'commands.BucketType.user'], {}), '(1, 10, commands.BucketType.user)\n', (3173, 3206), False, 'from discord.ext import commands\n'), ((3896, 3914), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (3912, 3914), False, 'from discord.ext import commands\n'), ((3920, 3970), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(10)', 'commands.BucketType.user'], {}), '(1, 10, commands.BucketType.user)\n', (3937, 3970), False, 'from discord.ext import commands\n'), ((4640, 4658), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (4656, 4658), False, 'from discord.ext import commands\n'), ((4664, 4714), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(10)', 'commands.BucketType.user'], {}), '(1, 10, commands.BucketType.user)\n', (4681, 4714), False, 'from discord.ext import commands\n'), ((5385, 5403), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (5401, 5403), False, 'from discord.ext import commands\n'), ((5409, 5459), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(10)', 'commands.BucketType.user'], {}), '(1, 10, commands.BucketType.user)\n', (5426, 5459), False, 'from discord.ext import commands\n'), ((393, 416), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (414, 416), False, 'import aiohttp\n'), ((1125, 1148), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1146, 1148), False, 'import aiohttp\n'), ((1857, 1880), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1878, 1880), False, 'import aiohttp\n'), ((2601, 2624), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (2622, 2624), False, 'import aiohttp\n'), ((3357, 3380), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (3378, 3380), False, 'import aiohttp\n'), ((4111, 4134), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (4132, 4134), False, 'import aiohttp\n'), ((4855, 4878), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (4876, 4878), False, 'import aiohttp\n'), ((5613, 5636), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (5634, 5636), False, 'import aiohttp\n'), ((562, 649), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Cat"""', 'timestamp': 'ctx.message.created_at', 'color': 'self.bot.color'}), "(title='Cat', timestamp=ctx.message.created_at, color=self.bot\n .color)\n", (575, 649), False, 'import discord\n'), ((1295, 1382), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Dog"""', 'timestamp': 'ctx.message.created_at', 'color': 'self.bot.color'}), "(title='Dog', timestamp=ctx.message.created_at, color=self.bot\n .color)\n", (1308, 1382), False, 'import discord\n'), ((2034, 2121), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Fox"""', 'timestamp': 'ctx.message.created_at', 'color': 'self.bot.color'}), "(title='Fox', timestamp=ctx.message.created_at, color=self.bot\n .color)\n", (2047, 2121), False, 'import discord\n'), ((2780, 2869), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Panda"""', 'timestamp': 'ctx.message.created_at', 'color': 'self.bot.color'}), "(title='Panda', timestamp=ctx.message.created_at, color=self.\n bot.color)\n", (2793, 2869), False, 'import discord\n'), ((3540, 3633), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Red Panda"""', 'timestamp': 'ctx.message.created_at', 'color': 'self.bot.color'}), "(title='Red Panda', timestamp=ctx.message.created_at, color=\n self.bot.color)\n", (3553, 3633), False, 'import discord\n'), ((4289, 4377), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Bird"""', 'timestamp': 'ctx.message.created_at', 'color': 'self.bot.color'}), "(title='Bird', timestamp=ctx.message.created_at, color=self.\n bot.color)\n", (4302, 4377), False, 'import discord\n'), ((5034, 5122), 'discord.Embed', 'discord.Embed', ([], {'title': '"""kola"""', 'timestamp': 'ctx.message.created_at', 'color': 'self.bot.color'}), "(title='kola', timestamp=ctx.message.created_at, color=self.\n bot.color)\n", (5047, 5122), False, 'import discord\n'), ((5794, 5885), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Pikachu"""', 'timestamp': 'ctx.message.created_at', 'color': 'self.bot.color'}), "(title='Pikachu', timestamp=ctx.message.created_at, color=self\n .bot.color)\n", (5807, 5885), False, 'import discord\n')]
import os from oss_client.utils import content_md5 class FileObject(object): def __init__(self, name="", obj=None, hash_value=None, storage=None): if not (obj or hash_value): raise ValueError("obj and hash_value both are None") self.obj = obj self.name = name self.suffix = "" self.length = 0 self.hash_value = hash_value self.storage = storage names = name.split(".") if len(names) > 1: self.suffix = names[-1] if not self.hash_value and self.obj: content = self.obj.read() self.length = len(content) self.hash_value = content_md5(content) self.obj.seek(0, os.SEEK_SET) def __str__(self): return self.hash_value def key(self): if self.suffix: return ".".join([self.hash_value, self.suffix]) return self.hash_value def content(self, range=None): if self.obj: return self.obj.read() if self.storage: return self.storage.read(self.key(), range) raise Exception("can not find content")
[ "oss_client.utils.content_md5" ]
[((666, 686), 'oss_client.utils.content_md5', 'content_md5', (['content'], {}), '(content)\n', (677, 686), False, 'from oss_client.utils import content_md5\n')]
from matplotlib import pyplot as plt from ibm_botocore.client import Config, ClientError import rasterio import random import ibm_boto3 def plot_random_blocks(bucket, item, num): """ Plot num random blocks from IBM COS item located at bucket """ fig, axs = plt.subplots(num, figsize=(20,30)) cos = ibm_boto3.resource("s3", config=Config(signature_version="oauth"), endpoint_url="https://s3.eu-de.cloud-object-storage.appdomain.cloud" ) obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: for j in range(0,num): ij, window = random.choice(list(src.block_windows())) arr = src.read(1, window=window) plt.subplot(1 + (num-1)/2, 2, j+1) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) plt.show() def plot_results(bucket, results): """ Plot an array of COS from IBM Cloud """ size = len(results) fig, axs = plt.subplots(len(results), figsize=(20,30)) cos = ibm_boto3.resource("s3", config=Config(signature_version="oauth"), endpoint_url="https://s3.eu-de.cloud-object-storage.appdomain.cloud" ) i = 1 for item in results: obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: arr = src.read(1, out_shape=(src.height//10, src.width//10)) plt.subplot(1 + (size-1)/2, 2, i) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) i += 1 plt.show() def tiff_overview(tiff_url): """ Plot the a little version of the map (thumbnail) """ with rasterio.open(tiff_url) as dataset: oviews = dataset.overviews(1) # list of overviews from biggest to smallest oview = oviews[-1] # let's look at the smallest thumbnail print('Decimation factor= {}'.format(oview)) # NOTE this is using a 'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html) thumbnail = dataset.read(1, out_shape=(1, int(dataset.height // oview), int(dataset.width // oview))) print('array type: ', type(thumbnail)) plt.figure(figsize=(5, 5)) plt.imshow(thumbnail) plt.colorbar() plt.title('Overview - Band 4 {}'.format(thumbnail.shape)) plt.xlabel('Column #') plt.ylabel('Row #') def plot_map(image, title, x_label="", y_label=""): plt.figure(figsize=(10, 15)) plt.imshow(image) plt.colorbar(shrink=0.5) plt.title(title) plt.xlabel(x_label) plt.ylabel(y_label)
[ "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "rasterio.open", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.figure", "ibm_botocore.client.Config", "matplotlib.pyplot.gca", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.subplots" ]
[((279, 314), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num'], {'figsize': '(20, 30)'}), '(num, figsize=(20, 30))\n', (291, 314), True, 'from matplotlib import pyplot as plt\n'), ((930, 940), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (938, 940), True, 'from matplotlib import pyplot as plt\n'), ((1730, 1740), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1738, 1740), True, 'from matplotlib import pyplot as plt\n'), ((2362, 2388), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (2372, 2388), True, 'from matplotlib import pyplot as plt\n'), ((2393, 2414), 'matplotlib.pyplot.imshow', 'plt.imshow', (['thumbnail'], {}), '(thumbnail)\n', (2403, 2414), True, 'from matplotlib import pyplot as plt\n'), ((2419, 2433), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2431, 2433), True, 'from matplotlib import pyplot as plt\n'), ((2500, 2522), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Column #"""'], {}), "('Column #')\n", (2510, 2522), True, 'from matplotlib import pyplot as plt\n'), ((2527, 2546), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Row #"""'], {}), "('Row #')\n", (2537, 2546), True, 'from matplotlib import pyplot as plt\n'), ((2605, 2633), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 15)'}), '(figsize=(10, 15))\n', (2615, 2633), True, 'from matplotlib import pyplot as plt\n'), ((2638, 2655), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (2648, 2655), True, 'from matplotlib import pyplot as plt\n'), ((2660, 2684), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'shrink': '(0.5)'}), '(shrink=0.5)\n', (2672, 2684), True, 'from matplotlib import pyplot as plt\n'), ((2689, 2705), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2698, 2705), True, 'from matplotlib import pyplot as plt\n'), ((2710, 2729), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (2720, 2729), True, 'from matplotlib import pyplot as plt\n'), ((2734, 2753), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (2744, 2753), True, 'from matplotlib import pyplot as plt\n'), ((1849, 1872), 'rasterio.open', 'rasterio.open', (['tiff_url'], {}), '(tiff_url)\n', (1862, 1872), False, 'import rasterio\n'), ((385, 418), 'ibm_botocore.client.Config', 'Config', ([], {'signature_version': '"""oauth"""'}), "(signature_version='oauth')\n", (391, 418), False, 'from ibm_botocore.client import Config, ClientError\n'), ((788, 828), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1 + (num - 1) / 2)', '(2)', '(j + 1)'], {}), '(1 + (num - 1) / 2, 2, j + 1)\n', (799, 828), True, 'from matplotlib import pyplot as plt\n'), ((873, 888), 'matplotlib.pyplot.imshow', 'plt.imshow', (['arr'], {}), '(arr)\n', (883, 888), True, 'from matplotlib import pyplot as plt\n'), ((901, 925), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'shrink': '(0.5)'}), '(shrink=0.5)\n', (913, 925), True, 'from matplotlib import pyplot as plt\n'), ((1188, 1221), 'ibm_botocore.client.Config', 'Config', ([], {'signature_version': '"""oauth"""'}), "(signature_version='oauth')\n", (1194, 1221), False, 'from ibm_botocore.client import Config, ClientError\n'), ((1565, 1602), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1 + (size - 1) / 2)', '(2)', 'i'], {}), '(1 + (size - 1) / 2, 2, i)\n', (1576, 1602), True, 'from matplotlib import pyplot as plt\n'), ((1649, 1664), 'matplotlib.pyplot.imshow', 'plt.imshow', (['arr'], {}), '(arr)\n', (1659, 1664), True, 'from matplotlib import pyplot as plt\n'), ((1677, 1701), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'shrink': '(0.5)'}), '(shrink=0.5)\n', (1689, 1701), True, 'from matplotlib import pyplot as plt\n'), ((835, 844), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (842, 844), True, 'from matplotlib import pyplot as plt\n'), ((1611, 1620), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1618, 1620), True, 'from matplotlib import pyplot as plt\n')]
import pytest import numpy as np from scipy.integrate import trapz from newdust.graindist import * from . import percent_diff MD = 1.e-5 # g cm^-2 RHO = 3.0 # g c^-3 SDEFAULT = 'Powerlaw' CDEFAULT = 'Silicate' ALLOWED_SIZES = ['Grain','Powerlaw','ExpCutoff'] ALLOWED_COMPS = ['Drude','Silicate','Graphite'] # Test that the helper function runs on all types @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_sstring(sstring): test = GrainDist(sstring, CDEFAULT) assert isinstance(test, GrainDist) @pytest.mark.parametrize('cstring', ALLOWED_COMPS) def test_cstring(cstring): test = GrainDist(SDEFAULT, cstring) assert isinstance(test, GrainDist) # Test that the helper function does not run on weird strings def test_catch_exception(): ss, cc = 'foo', 'bar' with pytest.raises(AssertionError): GrainDist(ss, CDEFAULT) GrainDist(SDEFAULT, cc) # Test the basic properties and functions of GrainDist @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_GrainDist(sstring): test = GrainDist(sstring, CDEFAULT, md=MD) assert isinstance(test.a, np.ndarray) assert len(test.a) == len(test.ndens) assert len(test.a) == len(test.mdens) if isinstance(test.size, sizedist.Grain): mtot = test.mdens else: mtot = trapz(test.mdens, test.a) assert percent_diff(mtot, MD) <= 0.01 # Test that doubling the dust mass column doubles the total mass MD2 = 2.0 * MD def test_dmass(): for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD) test2 = GrainDist(ss, cc, md=MD2) if isinstance(test1.size, sizedist.Grain): mtot1, mtot2 = test1.mdens, test2.mdens else: mtot1 = trapz(test1.mdens, test1.a) mtot2 = trapz(test2.mdens, test2.a) assert percent_diff(mtot2, 2.0 * mtot1) <= 0.01 # Test that doubling the dust grain material density halves the total number RHO2 = 2.0 * RHO def test_ndens(): for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD, rho=RHO) test2 = GrainDist(ss, cc, md=MD, rho=RHO2) if isinstance(test1.size, sizedist.Grain): nd1, nd2 = test1.ndens, test2.ndens else: nd1 = trapz(test1.ndens, test1.a) nd2 = trapz(test2.ndens, test2.a) assert percent_diff(nd2, 0.5 * nd1) <= 0.01
[ "pytest.mark.parametrize", "pytest.raises", "scipy.integrate.trapz" ]
[((368, 417), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sstring"""', 'ALLOWED_SIZES'], {}), "('sstring', ALLOWED_SIZES)\n", (391, 417), False, 'import pytest\n'), ((526, 575), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cstring"""', 'ALLOWED_COMPS'], {}), "('cstring', ALLOWED_COMPS)\n", (549, 575), False, 'import pytest\n'), ((960, 1009), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sstring"""', 'ALLOWED_SIZES'], {}), "('sstring', ALLOWED_SIZES)\n", (983, 1009), False, 'import pytest\n'), ((808, 837), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (821, 837), False, 'import pytest\n'), ((1309, 1334), 'scipy.integrate.trapz', 'trapz', (['test.mdens', 'test.a'], {}), '(test.mdens, test.a)\n', (1314, 1334), False, 'from scipy.integrate import trapz\n'), ((1782, 1809), 'scipy.integrate.trapz', 'trapz', (['test1.mdens', 'test1.a'], {}), '(test1.mdens, test1.a)\n', (1787, 1809), False, 'from scipy.integrate import trapz\n'), ((1834, 1861), 'scipy.integrate.trapz', 'trapz', (['test2.mdens', 'test2.a'], {}), '(test2.mdens, test2.a)\n', (1839, 1861), False, 'from scipy.integrate import trapz\n'), ((2353, 2380), 'scipy.integrate.trapz', 'trapz', (['test1.ndens', 'test1.a'], {}), '(test1.ndens, test1.a)\n', (2358, 2380), False, 'from scipy.integrate import trapz\n'), ((2403, 2430), 'scipy.integrate.trapz', 'trapz', (['test2.ndens', 'test2.a'], {}), '(test2.ndens, test2.a)\n', (2408, 2430), False, 'from scipy.integrate import trapz\n')]
#imported the Stack.py from Stack import Stack def intToBinary(num: int) -> str : stack = Stack() while num > 0: remender = num % 2 stack.push(remender) num = num // 2 binary = "" while not stack.is_empty(): binary += str(stack.pop()) return binary num = int(input("Enter a Number: ")) if num < 0: print("Enter a Positive Number") quit() result = intToBinary(num) print("Binary: ",result)
[ "Stack.Stack" ]
[((97, 104), 'Stack.Stack', 'Stack', ([], {}), '()\n', (102, 104), False, 'from Stack import Stack\n')]
# Copyright 2014 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import mock from djhelpers.adminhelpers import ActionDecorator from djhelpers.modelhelpers import short_description class ShortDescriptionDecoratorTest(unittest.TestCase): def test_decorator(self): # Arrange description = 'description' # Act @short_description(description) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertFalse(hasattr(tfunc, 'boolean')) def test_decorator_with_kwargs(self): # Arrange description = 'description' # Act @short_description(description, boolean=mock.sentinel.kwarg) def tfunc(x): return x result = tfunc(mock.sentinel.func_test_parameter) # Assert self.assertEqual(result, mock.sentinel.func_test_parameter) self.assertEqual(tfunc.short_description, description) self.assertEqual(tfunc.boolean, mock.sentinel.kwarg) class ActionDecoratorTest(unittest.TestCase): def test_admin_action(self): # Arrange actions = ActionDecorator() # Act desc = 'test description' @actions.action(desc) def _t(x): return x # Assert self.assertIsInstance(actions, list) self.assertIn(_t, actions) self.assertEqual(len(actions), 1) self.assertEqual(_t.short_description, desc) if __name__ == '__main__': suite = unittest.TestLoader().discover('.') unittest.TextTestRunner(verbosity=2).run(suite)
[ "unittest.TextTestRunner", "djhelpers.adminhelpers.ActionDecorator", "djhelpers.modelhelpers.short_description", "unittest.TestLoader" ]
[((872, 902), 'djhelpers.modelhelpers.short_description', 'short_description', (['description'], {}), '(description)\n', (889, 902), False, 'from djhelpers.modelhelpers import short_description\n'), ((1328, 1387), 'djhelpers.modelhelpers.short_description', 'short_description', (['description'], {'boolean': 'mock.sentinel.kwarg'}), '(description, boolean=mock.sentinel.kwarg)\n', (1345, 1387), False, 'from djhelpers.modelhelpers import short_description\n'), ((1820, 1837), 'djhelpers.adminhelpers.ActionDecorator', 'ActionDecorator', ([], {}), '()\n', (1835, 1837), False, 'from djhelpers.adminhelpers import ActionDecorator\n'), ((2189, 2210), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (2208, 2210), False, 'import unittest\n'), ((2229, 2265), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (2252, 2265), False, 'import unittest\n')]
from PyQt5.QtCore import * from PyQt5.QtWidgets import * from PyQt5.QtGui import * import requests apiserver = "" class ListView(QWidget): def __init__(self, parent=None): super(ListView, self).__init__(parent) self.setWindowTitle('Asphodel Downloader Test Client') self.resize(400, 100) self.initUI() def append(self): """ 채팅을 추가함 """ res = requests.put(f"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}").json() def get(self): """ 클라에 보관된 해시가 없으면 그냥 보내서 10개 받아오고, 클라에 해시가 보관되어 있으면 그거 보내서 필요한 부분만 받아옴 """ if self.currentLastHashViewer.text() == '': res = requests.get("http://192.168.127.12:7474/publicchat").json() self.addItemList(res) else: res = requests.get(f"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}").json() self.addItemList(res) def addItemList(self, res): """ [[해시, 텍스트]] 형식의 리스트를 리스트뷰에 넣어줌 그리고 마지막 해시는 해시 뷰어에 넣어줌 """ print(res) for n, r in enumerate(res): print(n, r) self.chatting.addItem(str(r)) if n + 1== len(res): self.currentLastHashViewer.setText(str(r[0])) def save(self): """ API에 저장하라고 시킴 """ requests.get("http://192.168.127.12:7474/savedb") def initUI(self): self.mainLayout = QVBoxLayout() self.chatting = QListWidget() self.currentLastHashTitle = QLabel('currentLastHash : ') self.currentLastHashViewer = QLineEdit() self.hashes = QHBoxLayout() self.hashes.addWidget(self.currentLastHashTitle) self.hashes.addWidget(self.currentLastHashViewer) self.getChat = QPushButton('getChat') self.getChat.clicked.connect(self.get) self.appendChatInput = QLineEdit() self.appendChatInput.setPlaceholderText('Input text to send') self.appendChat = QPushButton('appendChat') self.appendChat.clicked.connect(self.append) self.saveBTN = QPushButton('SAVE') self.saveBTN.clicked.connect(self.save) self.mainLayout.addWidget(self.chatting) self.mainLayout.addWidget(self.getChat) self.mainLayout.addLayout(self.hashes) self.mainLayout.addWidget(self.appendChatInput) self.mainLayout.addWidget(self.appendChat) self.mainLayout.addWidget(self.saveBTN) self.setLayout(self.mainLayout) if __name__ == '__main__': import sys app = QApplication(sys.argv) window = ListView() window.show() sys.exit(app.exec())
[ "requests.get" ]
[((1411, 1460), 'requests.get', 'requests.get', (['"""http://192.168.127.12:7474/savedb"""'], {}), "('http://192.168.127.12:7474/savedb')\n", (1423, 1460), False, 'import requests\n'), ((717, 770), 'requests.get', 'requests.get', (['"""http://192.168.127.12:7474/publicchat"""'], {}), "('http://192.168.127.12:7474/publicchat')\n", (729, 770), False, 'import requests\n')]
from django.contrib import admin from .models import Project,Profile,Review,Comment # Register your models here. class ReviewAdmin(admin.ModelAdmin): model = Review list_display = ('wine', 'rating', 'user_name', 'comment', 'pub_date') list_filter = ['pub_date', 'user_name'] search_fields = ['comment'] admin.site.register(Project) admin.site.register(Profile) admin.site.register(Review) admin.site.register(Comment)
[ "django.contrib.admin.site.register" ]
[((321, 349), 'django.contrib.admin.site.register', 'admin.site.register', (['Project'], {}), '(Project)\n', (340, 349), False, 'from django.contrib import admin\n'), ((350, 378), 'django.contrib.admin.site.register', 'admin.site.register', (['Profile'], {}), '(Profile)\n', (369, 378), False, 'from django.contrib import admin\n'), ((379, 406), 'django.contrib.admin.site.register', 'admin.site.register', (['Review'], {}), '(Review)\n', (398, 406), False, 'from django.contrib import admin\n'), ((407, 435), 'django.contrib.admin.site.register', 'admin.site.register', (['Comment'], {}), '(Comment)\n', (426, 435), False, 'from django.contrib import admin\n')]
from abc import ABC import chainer import chainer.functions as F import chainer.links as L class AbstractModel(ABC): def predict(self, blob): pass class DefaultModel: def __init__(self, n_units=100, n_out=6, colors=('red', 'blue', 'green', 'cyan', 'magenta', 'yellow')): self.n_units = n_units self.n_out = n_out self.model = self._net_model() self.colors = colors def _net_model(self): layer = chainer.Sequential(L.Linear(self.n_units), F.relu) model = layer.repeat(1) model.append(L.Linear(self.n_out)) return L.Classifier( self._net_model(), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy) def _normalize_data(self, image): return image.reshape(100, -1, 3) def predict(self, blob): image = self._normalize_data(blob) return self.model.predictor(image[None]).data
[ "chainer.links.Linear" ]
[((480, 502), 'chainer.links.Linear', 'L.Linear', (['self.n_units'], {}), '(self.n_units)\n', (488, 502), True, 'import chainer.links as L\n'), ((565, 585), 'chainer.links.Linear', 'L.Linear', (['self.n_out'], {}), '(self.n_out)\n', (573, 585), True, 'import chainer.links as L\n')]
from collections import Counter from notify_run import Notify import os import time import dropbox import json dropboxkey="" notify = Notify() notifyendpoint="" notify.endpoint=notifyendpoint notify.write_config() from flask import Flask,request maindictionary={} dbx = dropbox.Dropbox(dropboxkey) dbx.files_download_to_file("bannedpixel.txt","/bannedpixel.txt") dbx.files_download_to_file("logpixel.txt","/logpixel.txt") dbx.files_download_to_file("dictionary.txt","/dictionary.txt") app = Flask(__name__) @app.route("/pixel") def home(): maindictionary=json.load(open("dictionary.txt")) if (request.args.get("id")) in open("bannedpixel.txt").read(): pass elif open("logpixel.txt").read().count(request.args.get("id")) == 0: with open("logpixel.txt","a+") as f: f.writelines(request.args.get("id") +"\n") dbx = dropbox.Dropbox("") dbx.files_upload(open("logpixel.txt","rb").read(),"/logpixel.txt",mode=dropbox.files.WriteMode.overwrite) maindictionary[request.args.get("id")] = time.time() with open('dictionary.txt', 'w+') as file: file.write(json.dumps(t)) dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open("dictionary.txt","rb").read(),"/dictionary.txt",mode=dropbox.files.WriteMode.overwrite) elif open("logpixel.txt").read().count(request.args.get("id")) == 1: if time.time() - maindictionary[request.args.get("id")] > 20: notify.send('Your email too ' + request.args.get("email") + " with subject: " + request.args.get("subject") + " has been opened") text=open("logpixel.txt","a+").read() text=text.replace((request.args.get("id")),"") with open("logpixel.txt","w+") as f: f.write(text) with open("bannedpixel.txt","a+") as f: f.writelines((request.args.get("id")) + "\n") dbx = dropbox.Dropbox(dropboxkey) dbx.files_upload(open("logpixel.txt","rb").read(),"/logpixel.txt",mode=dropbox.files.WriteMode.overwrite) dbx.files_upload(open("bannedpixel.txt","rb").read(),"/bannedpixel.txt",mode=dropbox.files.WriteMode.overwrite) return " NONE TEST VIEW " if __name__ == "__main__": app.run(port= int(os.environ.get('PORT', 5000)),host="0.0.0.0")
[ "flask.request.args.get", "dropbox.Dropbox", "flask.Flask", "json.dumps", "time.time", "notify_run.Notify", "os.environ.get" ]
[((134, 142), 'notify_run.Notify', 'Notify', ([], {}), '()\n', (140, 142), False, 'from notify_run import Notify\n'), ((271, 298), 'dropbox.Dropbox', 'dropbox.Dropbox', (['dropboxkey'], {}), '(dropboxkey)\n', (286, 298), False, 'import dropbox\n'), ((493, 508), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (498, 508), False, 'from flask import Flask, request\n'), ((604, 626), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (620, 626), False, 'from flask import Flask, request\n'), ((867, 886), 'dropbox.Dropbox', 'dropbox.Dropbox', (['""""""'], {}), "('')\n", (882, 886), False, 'import dropbox\n'), ((1053, 1064), 'time.time', 'time.time', ([], {}), '()\n', (1062, 1064), False, 'import time\n'), ((1185, 1212), 'dropbox.Dropbox', 'dropbox.Dropbox', (['dropboxkey'], {}), '(dropboxkey)\n', (1200, 1212), False, 'import dropbox\n'), ((719, 741), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (735, 741), False, 'from flask import Flask, request\n'), ((1027, 1049), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (1043, 1049), False, 'from flask import Flask, request\n'), ((2320, 2348), 'os.environ.get', 'os.environ.get', (['"""PORT"""', '(5000)'], {}), "('PORT', 5000)\n", (2334, 2348), False, 'import os\n'), ((1143, 1156), 'json.dumps', 'json.dumps', (['t'], {}), '(t)\n', (1153, 1156), False, 'import json\n'), ((1374, 1396), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (1390, 1396), False, 'from flask import Flask, request\n'), ((1969, 1996), 'dropbox.Dropbox', 'dropbox.Dropbox', (['dropboxkey'], {}), '(dropboxkey)\n', (1984, 1996), False, 'import dropbox\n'), ((823, 845), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (839, 845), False, 'from flask import Flask, request\n'), ((1422, 1433), 'time.time', 'time.time', ([], {}), '()\n', (1431, 1433), False, 'import time\n'), ((1717, 1739), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (1733, 1739), False, 'from flask import Flask, request\n'), ((1451, 1473), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (1467, 1473), False, 'from flask import Flask, request\n'), ((1586, 1613), 'flask.request.args.get', 'request.args.get', (['"""subject"""'], {}), "('subject')\n", (1602, 1613), False, 'from flask import Flask, request\n'), ((1906, 1928), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (1922, 1928), False, 'from flask import Flask, request\n'), ((1538, 1563), 'flask.request.args.get', 'request.args.get', (['"""email"""'], {}), "('email')\n", (1554, 1563), False, 'from flask import Flask, request\n')]
import sys from pathlib import Path sys.path.append(str(Path(".").absolute().parent)) from sheet2dict import Worksheet from io import BytesIO ws = Worksheet() ws.xlsx_to_dict(path="inventory.xlsx") print(">>", ws.header) print("ALL:", ws.sheet_items) print("SANITIZED:", ws.sanitize_sheet_items) path = "inventory.xlsx" xlsx_file = open(path, "rb") xlsx_file = BytesIO(xlsx_file.read()) ws = Worksheet() ws.xlsx_to_dict(path=xlsx_file) print(">>", ws.header) ws = Worksheet() path = "inventory.csv" csv_file = open(path, "r", encoding="utf-8-sig") ws.csv_to_dict(csv_file=csv_file, delimiter=";") print("ALL:", ws.sheet_items) print("SANITIZED:", ws.sanitize_sheet_items)
[ "pathlib.Path", "sheet2dict.Worksheet" ]
[((151, 162), 'sheet2dict.Worksheet', 'Worksheet', ([], {}), '()\n', (160, 162), False, 'from sheet2dict import Worksheet\n'), ((399, 410), 'sheet2dict.Worksheet', 'Worksheet', ([], {}), '()\n', (408, 410), False, 'from sheet2dict import Worksheet\n'), ((473, 484), 'sheet2dict.Worksheet', 'Worksheet', ([], {}), '()\n', (482, 484), False, 'from sheet2dict import Worksheet\n'), ((57, 66), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (61, 66), False, 'from pathlib import Path\n')]
import sys from MyStrategy import MyStrategy from RemoteProcessClient import RemoteProcessClient from model.Move import Move from time import sleep class Runner: def __init__(self): sleep(4) if sys.argv.__len__() == 4: self.remote_process_client = RemoteProcessClient(sys.argv[1], int(sys.argv[2])) self.token = sys.argv[3] else: self.remote_process_client = RemoteProcessClient("localhost", 31001) self.token = "0<PASSWORD>" #next line enables my custom debugger window debuggerEnabled = True def run(self): try: self.remote_process_client.write_token(self.token) team_size = self.remote_process_client.read_team_size() self.remote_process_client.write_protocol_version() game = self.remote_process_client.read_game_context() strategies = [] for strategy_index in xrange(team_size): strategies.append(MyStrategy()) while True: player_context = self.remote_process_client.read_player_context() if player_context is None: break player_trooper = player_context.trooper move = Move() strategies[player_trooper.teammate_index].move(player_trooper, player_context.world, game, move) self.remote_process_client.write_move(move) finally: self.remote_process_client.close() Runner().run()
[ "MyStrategy.MyStrategy", "sys.argv.__len__", "RemoteProcessClient.RemoteProcessClient", "time.sleep", "model.Move.Move" ]
[((196, 204), 'time.sleep', 'sleep', (['(4)'], {}), '(4)\n', (201, 204), False, 'from time import sleep\n'), ((216, 234), 'sys.argv.__len__', 'sys.argv.__len__', ([], {}), '()\n', (232, 234), False, 'import sys\n'), ((425, 464), 'RemoteProcessClient.RemoteProcessClient', 'RemoteProcessClient', (['"""localhost"""', '(31001)'], {}), "('localhost', 31001)\n", (444, 464), False, 'from RemoteProcessClient import RemoteProcessClient\n'), ((1270, 1276), 'model.Move.Move', 'Move', ([], {}), '()\n', (1274, 1276), False, 'from model.Move import Move\n'), ((999, 1011), 'MyStrategy.MyStrategy', 'MyStrategy', ([], {}), '()\n', (1009, 1011), False, 'from MyStrategy import MyStrategy\n')]
''' Controllers/Bindings/table __________________________ Class with designed inheritance for copy/paste methods. :copyright: (c) 2015 The Regents of the University of California. :license: GNU GPL, see licenses/GNU GPLv3.txt for more details. ''' # load modules/submodules from PySide import QtCore from xldlib.definitions import partial from xldlib.onstart.main import APP from xldlib.qt.objects import base, threads from xldlib.qt import resources as qt from . import copier, decorators, paster # MUTEX # ----- MUTEX = threads.ContextMutex(QtCore.QMutex.Recursive) # SLOTS # ----- def nullslot(result): pass def copyslot(result): APP.clipboard().setText(result) # OBJECTS # ------- class TableBindings(base.BaseObject): '''Provides methods to bind to QKeyShortcuts for facilitated table use''' def __init__(self, table): super(TableBindings, self).__init__(table) self.table = table self.copier = copier.HandleCopy(self.table) self.paster = paster.HandlePaste(self.table) self.set_shortcuts() # PUBLIC FUNCTIONS @decorators.newspinner(nullslot) def delete(self): self._delete() def _delete(self, blank=""): ''' Excel-like delete function. Deletes contents in all selected cells. delete() -> void (Row, Column) [Value] Selection: --> (1,2) ["AA"], (1,3) ["BB"], (2,2) ["CC"], (2,3) ["DD"] --> (1,2) [], (1,3) [], (2,2) [], (2,3) [] ''' selected_indexes = self.table.get_selected_indexes() items = (self.table.item(i.row, i.column) for i in selected_indexes) filtered = [i for i in items if i is not None] for item in filtered: item.setText(blank) self.table.model().delete(selected_indexes) if filtered and hasattr(self.table, "changed"): self.table.changed = True @decorators.newspinner(copyslot) def cut(self): '''Combines copy and delete operations for cut functionality''' result = self.copier.copy() self._delete() return result @decorators.newspinner(copyslot) def copy(self): return self.copier.copy() def paste(self): clipboard_text = self.app.clipboard().text() self._paste(clipboard_text) @decorators.newspinner(nullslot) def _paste(self, clipboard_text): self.paster.paste(clipboard_text) def select_all(self): '''Selects all item in the table''' with MUTEX: model = self.table.selectionModel() mode = self.table.selectionMode() self.table.setSelectionMode(qt.SELECTION_MODE['Extended']) # clear selection model.clearSelection() selection = model.selection() # reset the selection mode for all items for column in range(self.table.columnCount()): # only select visible items if not self.table.isColumnHidden(column): self.table.selectColumn(column) selection.merge(model.selection(), qt.SELECTION_MODEL['Select']) self.table.setSelectionMode(mode) def select_mode(self, mode=None): ''' Changes the QTableSelectionMode between the list options. mode -- QtGui.QAbstractItemView.<atribute> ExtendedSelection SingleSelection MultiSelection select_mode(QtGui.QAbstractItemView.ExtendedSelection) ''' mode = mode or qt.SELECTION_MODE['Extended'] self.table.setSelectionMode(mode) self.table.blockSignals(mode != qt.SELECTION_MODE['Single']) # GETTERS def set_shortcuts(self): self.shortcuts = { 'Ctrl+f': self.table.finder.show, 'Ctrl+b': self.table.block, 'Ctrl+c': self.copy, 'Ctrl+x': self.cut, 'Ctrl+v': self.paste, 'Del': self.delete, 'Ctrl+a': self.select_all } modes = { 'Ctrl+Shift+s': 'Single', 'Ctrl+Shift+m': 'Multi', 'Ctrl+Shift+e': 'Extended' } for keysequence, mode in modes.items(): fun = partial(self.select_mode, qt.SELECTION_MODE[mode]) self.shortcuts[keysequence] = fun
[ "xldlib.qt.objects.threads.ContextMutex", "xldlib.onstart.main.APP.clipboard", "xldlib.definitions.partial" ]
[((550, 595), 'xldlib.qt.objects.threads.ContextMutex', 'threads.ContextMutex', (['QtCore.QMutex.Recursive'], {}), '(QtCore.QMutex.Recursive)\n', (570, 595), False, 'from xldlib.qt.objects import base, threads\n'), ((675, 690), 'xldlib.onstart.main.APP.clipboard', 'APP.clipboard', ([], {}), '()\n', (688, 690), False, 'from xldlib.onstart.main import APP\n'), ((4294, 4344), 'xldlib.definitions.partial', 'partial', (['self.select_mode', 'qt.SELECTION_MODE[mode]'], {}), '(self.select_mode, qt.SELECTION_MODE[mode])\n', (4301, 4344), False, 'from xldlib.definitions import partial\n')]
#Tocando um MP3 from pygame import mixer mixer.init() mixer.music.load('EX021.mp3') #Adicione o nome da musica mixer.music.play()
[ "pygame.mixer.init", "pygame.mixer.music.play", "pygame.mixer.music.load" ]
[((41, 53), 'pygame.mixer.init', 'mixer.init', ([], {}), '()\n', (51, 53), False, 'from pygame import mixer\n'), ((54, 83), 'pygame.mixer.music.load', 'mixer.music.load', (['"""EX021.mp3"""'], {}), "('EX021.mp3')\n", (70, 83), False, 'from pygame import mixer\n'), ((111, 129), 'pygame.mixer.music.play', 'mixer.music.play', ([], {}), '()\n', (127, 129), False, 'from pygame import mixer\n')]
# Generated by Django 3.1.3 on 2020-12-01 14:32 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('tweets', '0003_auto_20201201_0211'), ] operations = [ migrations.AlterField( model_name='tweet', name='created', field=models.DateTimeField(auto_now_add=True), ), migrations.CreateModel( name='TweetLikes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='tweet', name='likes', field=models.ManyToManyField(blank=True, related_name='tweet_user', through='tweets.TweetLikes', to=settings.AUTH_USER_MODEL), ), ]
[ "django.db.migrations.swappable_dependency", "django.db.models.ManyToManyField", "django.db.models.ForeignKey", "django.db.models.AutoField", "django.db.models.DateTimeField" ]
[((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((468, 507), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (488, 507), False, 'from django.db import migrations, models\n'), ((1151, 1275), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""tweet_user"""', 'through': '"""tweets.TweetLikes"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, related_name='tweet_user', through=\n 'tweets.TweetLikes', to=settings.AUTH_USER_MODEL)\n", (1173, 1275), False, 'from django.db import migrations, models\n'), ((627, 720), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (643, 720), False, 'from django.db import migrations, models\n'), ((749, 788), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (769, 788), False, 'from django.db import migrations, models\n'), ((817, 903), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""tweets.tweet"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'tweets.tweet')\n", (834, 903), False, 'from django.db import migrations, models\n'), ((926, 1022), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (943, 1022), False, 'from django.db import migrations, models\n')]
#!/usr/bin/env python from setuptools import setup from os.path import dirname, abspath, join setup(name='drunkardswalk', packages=['drunkardswalk'], )
[ "setuptools.setup" ]
[((95, 150), 'setuptools.setup', 'setup', ([], {'name': '"""drunkardswalk"""', 'packages': "['drunkardswalk']"}), "(name='drunkardswalk', packages=['drunkardswalk'])\n", (100, 150), False, 'from setuptools import setup\n')]
from docs_snippets.concepts.io_management.subselection import ( execute_full, execute_subselection, ) def test_execute_job(): execute_full() def test_execute_subselection(): execute_subselection()
[ "docs_snippets.concepts.io_management.subselection.execute_subselection", "docs_snippets.concepts.io_management.subselection.execute_full" ]
[((140, 154), 'docs_snippets.concepts.io_management.subselection.execute_full', 'execute_full', ([], {}), '()\n', (152, 154), False, 'from docs_snippets.concepts.io_management.subselection import execute_full, execute_subselection\n'), ((194, 216), 'docs_snippets.concepts.io_management.subselection.execute_subselection', 'execute_subselection', ([], {}), '()\n', (214, 216), False, 'from docs_snippets.concepts.io_management.subselection import execute_full, execute_subselection\n')]
from datetime import datetime from django.conf import settings from django.core.exceptions import ImproperlyConfigured from requests import HTTPError from rest_framework import status from app.enquiries.common.client import APIClient from app.enquiries.common.hawk import HawkAuth CONSENT_SERVICE_PATH_PERSON = "/api/v1/person/" def request(url, method, **kwargs): if not all([ settings.CONSENT_SERVICE_HAWK_ID, settings.CONSENT_SERVICE_HAWK_KEY, settings.CONSENT_SERVICE_BASE_URL, ]): raise ImproperlyConfigured("CONSENT_SERVICE_* environment variables must be set") client = APIClient( api_url=settings.CONSENT_SERVICE_BASE_URL, auth=HawkAuth( api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.CONSENT_SERVICE_HAWK_KEY, verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE, ), default_timeout=( settings.CONSENT_SERVICE_CONNECT_TIMEOUT, settings.CONSENT_SERVICE_READ_TIMEOUT, ), ) return client.request(path=url, method=method, **kwargs) def check_consent(key): if not settings.FEATURE_FLAGS["ENFORCE_CONSENT_SERVICE"]: return None key = key.lower().replace(" ", "") url = f"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/" try: response = request(url=url, method="GET") return bool(len(response.json()["consents"])) except HTTPError as e: if e.response and e.response.status_code == status.HTTP_404_NOT_FOUND: return False return False def set_consent(key, value=True): if not settings.FEATURE_FLAGS["ENFORCE_CONSENT_SERVICE"]: return None key = key.lower() key_type = "email" if "@" in key else "phone" data = { "consents": [f"{key_type}_marketing"] if value else [], key_type: key, "modified_at": datetime.now().isoformat(), } try: url = f"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}" request(url=url, method="POST", json=data) return True except Exception: return None
[ "django.core.exceptions.ImproperlyConfigured", "datetime.datetime.now", "app.enquiries.common.hawk.HawkAuth" ]
[((537, 612), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""CONSENT_SERVICE_* environment variables must be set"""'], {}), "('CONSENT_SERVICE_* environment variables must be set')\n", (557, 612), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((702, 862), 'app.enquiries.common.hawk.HawkAuth', 'HawkAuth', ([], {'api_id': 'settings.CONSENT_SERVICE_HAWK_ID', 'api_key': 'settings.CONSENT_SERVICE_HAWK_KEY', 'verify_response': 'settings.CONSENT_SERVICE_VERIFY_RESPONSE'}), '(api_id=settings.CONSENT_SERVICE_HAWK_ID, api_key=settings.\n CONSENT_SERVICE_HAWK_KEY, verify_response=settings.\n CONSENT_SERVICE_VERIFY_RESPONSE)\n', (710, 862), False, 'from app.enquiries.common.hawk import HawkAuth\n'), ((1918, 1932), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1930, 1932), False, 'from datetime import datetime\n')]
"""Miscellaneous ECG Batch utils.""" import functools import pint import numpy as np from sklearn.preprocessing import LabelBinarizer as LB UNIT_REGISTRY = pint.UnitRegistry() def get_units_conversion_factor(old_units, new_units): """Return a multiplicative factor to convert a measured quantity from old to new units. Parameters ---------- old_units : str Current units in SI format. new_units : str Target units in SI format. Returns ------- factor : float A factor to convert quantities between units. """ try: # pint exceptions are wrapped with ValueError exceptions because they don't implement __repr__ method factor = UNIT_REGISTRY(old_units).to(new_units).magnitude except Exception as error: raise ValueError(error.__class__.__name__ + ": " + str(error)) return factor def partialmethod(func, *frozen_args, **frozen_kwargs): """Wrap a method with partial application of given positional and keyword arguments. Parameters ---------- func : callable A method to wrap. frozen_args : misc Fixed positional arguments. frozen_kwargs : misc Fixed keyword arguments. Returns ------- method : callable Wrapped method. """ @functools.wraps(func) def method(self, *args, **kwargs): """Wrapped method.""" return func(self, *frozen_args, *args, **frozen_kwargs, **kwargs) return method class LabelBinarizer(LB): """Encode categorical features using a one-hot scheme. Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will be encoded using ``n_classes`` numbers even for binary problems. """ # pylint: disable=invalid-name def transform(self, y): """Transform ``y`` using one-hot encoding. Parameters ---------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels. Returns ------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. """ Y = super().transform(y) if len(self.classes_) == 1: Y = 1 - Y if len(self.classes_) == 2: Y = np.hstack((1 - Y, Y)) return Y def inverse_transform(self, Y, threshold=None): """Transform one-hot encoded labels back to class labels. Parameters ---------- Y : 2-D ndarray of shape ``[n_samples, n_classes]`` One-hot encoded labels. threshold : float, optional The threshold used in the binary and multi-label cases. If ``None``, it is assumed to be half way between ``neg_label`` and ``pos_label``. Returns ------- y : 1-D ndarray of shape ``[n_samples,]`` Class labels. """ if len(self.classes_) == 1: y = super().inverse_transform(1 - Y, threshold) elif len(self.classes_) == 2: y = super().inverse_transform(Y[:, 1], threshold) else: y = super().inverse_transform(Y, threshold) return y
[ "functools.wraps", "pint.UnitRegistry", "numpy.hstack" ]
[((160, 179), 'pint.UnitRegistry', 'pint.UnitRegistry', ([], {}), '()\n', (177, 179), False, 'import pint\n'), ((1308, 1329), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1323, 1329), False, 'import functools\n'), ((2239, 2260), 'numpy.hstack', 'np.hstack', (['(1 - Y, Y)'], {}), '((1 - Y, Y))\n', (2248, 2260), True, 'import numpy as np\n')]
import pytest from models.parser import Parser def test_new(): pass #TODO #data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' #parser = Parser(data) #assert parser.parsed_data==None # --- Creation Tests --- def test_create_combined_data(): data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;' parser = Parser(data) assert parser.parsed_data==[ [[0.123, -0.123, 5.0], [0, 0, 0]], [[0.456, -0.789, 0.111], [0, 0, 0]], [[-0.2120710948533322, 0.0011468544965549535, 0.9994625125426089], [7.109485333219216e-05, -0.00014685449655495343, 0.0005374874573911294]]] def test_create_separated_data(): data = '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;' parser = Parser(data) assert parser.parsed_data==[[[0.028, -0.072, 5.0], [0.129, -0.945, -5.0]], [[0.0, -0.07, 0.06], [0.123, -0.947, 5.0]], [[0.2, -1.0, 2.0], [0.1, -0.9, 3.0]]] def test_create_string_values_parses_to_0s(): data = "1,2,foo;" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [0, 0, 0]]] data = "1,2,foo|4,bar,6;" parser = Parser(data) assert parser.parsed_data==[[[1.0, 2.0, 0.0], [4.0, 0.0, 6.0]]] # --- Creation Failure Tests --- #def test_create_none(): # pass #TODO # #def test_create_empty(): # pass #TODO # #def test_create_bad_input_too_many_values(): # pass #TODO # #def test_create_bad_input_too_few_values(): # pass #TODO # #def test_create_bad_input_delimiters(): # pass #TODO
[ "models.parser.Parser" ]
[((345, 357), 'models.parser.Parser', 'Parser', (['data'], {}), '(data)\n', (351, 357), False, 'from models.parser import Parser\n'), ((764, 776), 'models.parser.Parser', 'Parser', (['data'], {}), '(data)\n', (770, 776), False, 'from models.parser import Parser\n'), ((1035, 1047), 'models.parser.Parser', 'Parser', (['data'], {}), '(data)\n', (1041, 1047), False, 'from models.parser import Parser\n'), ((1155, 1167), 'models.parser.Parser', 'Parser', (['data'], {}), '(data)\n', (1161, 1167), False, 'from models.parser import Parser\n')]