code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
"""Unit tests for q0202.py."""
import unittest
from src.utils.linkedlist import LinkedList
from src.q0202 import kth_element_to_last
class TestKthElementToLast(unittest.TestCase):
"""Tests for kth element to last."""
def test_kth_element_to_last(self):
linked_list = LinkedList()
self.assertEqual(kth_element_to_last(None, 1), None)
self.assertEqual(kth_element_to_last(linked_list, 1), None)
linked_list.insert_at_head(3)
self.assertEqual(kth_element_to_last(linked_list, 0), None)
self.assertEqual(kth_element_to_last(linked_list, 1), 3)
self.assertEqual(kth_element_to_last(linked_list, 2), None)
linked_list.insert_at_head(2)
self.assertEqual(kth_element_to_last(linked_list, 0), None)
self.assertEqual(kth_element_to_last(linked_list, 1), 3)
self.assertEqual(kth_element_to_last(linked_list, 2), 2)
self.assertEqual(kth_element_to_last(linked_list, 3), None)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"src.q0202.kth_element_to_last",
"src.utils.linkedlist.LinkedList"
] |
[((1007, 1022), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1020, 1022), False, 'import unittest\n'), ((287, 299), 'src.utils.linkedlist.LinkedList', 'LinkedList', ([], {}), '()\n', (297, 299), False, 'from src.utils.linkedlist import LinkedList\n'), ((325, 353), 'src.q0202.kth_element_to_last', 'kth_element_to_last', (['None', '(1)'], {}), '(None, 1)\n', (344, 353), False, 'from src.q0202 import kth_element_to_last\n'), ((386, 421), 'src.q0202.kth_element_to_last', 'kth_element_to_last', (['linked_list', '(1)'], {}), '(linked_list, 1)\n', (405, 421), False, 'from src.q0202 import kth_element_to_last\n'), ((493, 528), 'src.q0202.kth_element_to_last', 'kth_element_to_last', (['linked_list', '(0)'], {}), '(linked_list, 0)\n', (512, 528), False, 'from src.q0202 import kth_element_to_last\n'), ((561, 596), 'src.q0202.kth_element_to_last', 'kth_element_to_last', (['linked_list', '(1)'], {}), '(linked_list, 1)\n', (580, 596), False, 'from src.q0202 import kth_element_to_last\n'), ((626, 661), 'src.q0202.kth_element_to_last', 'kth_element_to_last', (['linked_list', '(2)'], {}), '(linked_list, 2)\n', (645, 661), False, 'from src.q0202 import kth_element_to_last\n'), ((733, 768), 'src.q0202.kth_element_to_last', 'kth_element_to_last', (['linked_list', '(0)'], {}), '(linked_list, 0)\n', (752, 768), False, 'from src.q0202 import kth_element_to_last\n'), ((801, 836), 'src.q0202.kth_element_to_last', 'kth_element_to_last', (['linked_list', '(1)'], {}), '(linked_list, 1)\n', (820, 836), False, 'from src.q0202 import kth_element_to_last\n'), ((866, 901), 'src.q0202.kth_element_to_last', 'kth_element_to_last', (['linked_list', '(2)'], {}), '(linked_list, 2)\n', (885, 901), False, 'from src.q0202 import kth_element_to_last\n'), ((931, 966), 'src.q0202.kth_element_to_last', 'kth_element_to_last', (['linked_list', '(3)'], {}), '(linked_list, 3)\n', (950, 966), False, 'from src.q0202 import kth_element_to_last\n')]
|
"""
This file provides endpoints for everything URL shortener related
"""
from flask import Blueprint, request, make_response, redirect, json
import time
import validators
from rushapi.reusables.context import db_cursor
from rushapi.reusables.context import db_connection
from rushapi.reusables.rng import get_random_string
from rushapi.reusables.user_validation import get_user_context
url_shortener = Blueprint("url_shortener", __name__)
@url_shortener.route('/create_redirect', methods=['POST'])
@url_shortener.route('/create_redirect/<desired_id>', methods=['POST', 'PUT'])
def create_redirect(desired_id=None):
"""
This endpoint handles the POST data submitted by the client.
It will process this information and create a shortened a URL.
:return: A newly created shortened URL.
"""
if desired_id:
user_context = get_user_context()
if not (user_context and user_context.premium):
return json.dumps({
"error": "Creating a custom redirect requires a premium account. "
"If you already have one, put your token in the headers.",
}), 401
premium = 1
url_id = desired_id
author_id = user_context.id
else:
premium = 0
url_id = get_random_string(7)
author_id = None
if request.method == 'POST':
url = request.form['url']
delete_after = request.form['delete_after']
if len(delete_after) > 0:
try:
delete_after = int(delete_after)
except ValueError:
delete_after = 0
else:
delete_after = int(time.time()) + 2.592e+6
if not len(url) < 250:
return json.dumps({
"error": "URL length must be below 250 characters.",
}), 403
if not validators.url(url):
return json.dumps({
"error": "URL is not valid",
}), 403
domain_blacklist = tuple(db_cursor.execute("SELECT domain FROM domain_blacklist"))
for blacklisted_domain in domain_blacklist:
if blacklisted_domain[0] in url:
return json.dumps({
"error": "This domain is blacklisted.",
}), 403
db_cursor.execute("INSERT INTO urls "
"(id, author_id, url, creation_timestamp, premium, visits, delete_after, last_visit) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
[url_id, author_id, url, int(time.time()), premium, 0, delete_after, int(time.time())])
db_connection.commit()
return json.dumps({
"shortened_url": f"https://{request.host}/u/{url_id}"
}), 200
@url_shortener.route('/u/<url_id>')
def redirect_url(url_id):
"""
This endpoint looks up the unique identifier of the URL and redirects the user there.
Along the way it takes note of the time visited and increase the visit count.
:param url_id: URL's unique identifier
:return: A redirect to that URL
"""
post_url_lookup = tuple(db_cursor.execute("SELECT url, visits, delete_after FROM urls WHERE id = ?", [url_id]))
if not post_url_lookup:
return make_response(redirect("https://www.youtube.com/watch?v=dQw4w9WgXcQ"))
visits = int(post_url_lookup[0][1])
db_cursor.execute("UPDATE urls SET visits = ? AND last_visit = ? WHERE id = ?",
[(visits+1), int(time.time()), url_id])
db_connection.commit()
return make_response(redirect(post_url_lookup[0][0]))
@url_shortener.route('/my_urls')
def my_urls():
"""
:return: This endpoint returns all the URLs the user has created.
"""
user_context = get_user_context()
if not user_context:
return json.dumps({
"error": "This endpoint requires an account. "
"If you already have one, put your token in the headers.",
}), 401
urls = db_cursor.execute("SELECT id, author_id, url, creation_timestamp, premium, visits, delete_after, last_visit "
"FROM urls WHERE author_id = ?", [user_context.id])
buffer = []
for url in urls:
buffer.append({
"id": url[0],
"author_id": url[1],
"url": url[2],
"creation_timestamp": url[3],
"premium": url[4],
"visits": url[5],
"delete_after": url[6],
"last_visit": url[7],
})
return json.dumps(buffer)
|
[
"rushapi.reusables.context.db_cursor.execute",
"flask.Blueprint",
"flask.redirect",
"rushapi.reusables.rng.get_random_string",
"validators.url",
"time.time",
"flask.json.dumps",
"rushapi.reusables.context.db_connection.commit",
"rushapi.reusables.user_validation.get_user_context"
] |
[((407, 443), 'flask.Blueprint', 'Blueprint', (['"""url_shortener"""', '__name__'], {}), "('url_shortener', __name__)\n", (416, 443), False, 'from flask import Blueprint, request, make_response, redirect, json\n'), ((3527, 3549), 'rushapi.reusables.context.db_connection.commit', 'db_connection.commit', ([], {}), '()\n', (3547, 3549), False, 'from rushapi.reusables.context import db_connection\n'), ((3765, 3783), 'rushapi.reusables.user_validation.get_user_context', 'get_user_context', ([], {}), '()\n', (3781, 3783), False, 'from rushapi.reusables.user_validation import get_user_context\n'), ((4004, 4172), 'rushapi.reusables.context.db_cursor.execute', 'db_cursor.execute', (['"""SELECT id, author_id, url, creation_timestamp, premium, visits, delete_after, last_visit FROM urls WHERE author_id = ?"""', '[user_context.id]'], {}), "(\n 'SELECT id, author_id, url, creation_timestamp, premium, visits, delete_after, last_visit FROM urls WHERE author_id = ?'\n , [user_context.id])\n", (4021, 4172), False, 'from rushapi.reusables.context import db_cursor\n'), ((4538, 4556), 'flask.json.dumps', 'json.dumps', (['buffer'], {}), '(buffer)\n', (4548, 4556), False, 'from flask import Blueprint, request, make_response, redirect, json\n'), ((858, 876), 'rushapi.reusables.user_validation.get_user_context', 'get_user_context', ([], {}), '()\n', (874, 876), False, 'from rushapi.reusables.user_validation import get_user_context\n'), ((1299, 1319), 'rushapi.reusables.rng.get_random_string', 'get_random_string', (['(7)'], {}), '(7)\n', (1316, 1319), False, 'from rushapi.reusables.rng import get_random_string\n'), ((2638, 2660), 'rushapi.reusables.context.db_connection.commit', 'db_connection.commit', ([], {}), '()\n', (2658, 2660), False, 'from rushapi.reusables.context import db_connection\n'), ((3133, 3223), 'rushapi.reusables.context.db_cursor.execute', 'db_cursor.execute', (['"""SELECT url, visits, delete_after FROM urls WHERE id = ?"""', '[url_id]'], {}), "('SELECT url, visits, delete_after FROM urls WHERE id = ?',\n [url_id])\n", (3150, 3223), False, 'from rushapi.reusables.context import db_cursor\n'), ((3576, 3607), 'flask.redirect', 'redirect', (['post_url_lookup[0][0]'], {}), '(post_url_lookup[0][0])\n', (3584, 3607), False, 'from flask import Blueprint, request, make_response, redirect, json\n'), ((1868, 1887), 'validators.url', 'validators.url', (['url'], {}), '(url)\n', (1882, 1887), False, 'import validators\n'), ((2020, 2076), 'rushapi.reusables.context.db_cursor.execute', 'db_cursor.execute', (['"""SELECT domain FROM domain_blacklist"""'], {}), "('SELECT domain FROM domain_blacklist')\n", (2037, 2076), False, 'from rushapi.reusables.context import db_cursor\n'), ((2677, 2744), 'flask.json.dumps', 'json.dumps', (["{'shortened_url': f'https://{request.host}/u/{url_id}'}"], {}), "({'shortened_url': f'https://{request.host}/u/{url_id}'})\n", (2687, 2744), False, 'from flask import Blueprint, request, make_response, redirect, json\n'), ((3278, 3333), 'flask.redirect', 'redirect', (['"""https://www.youtube.com/watch?v=dQw4w9WgXcQ"""'], {}), "('https://www.youtube.com/watch?v=dQw4w9WgXcQ')\n", (3286, 3333), False, 'from flask import Blueprint, request, make_response, redirect, json\n'), ((3824, 3948), 'flask.json.dumps', 'json.dumps', (["{'error':\n 'This endpoint requires an account. If you already have one, put your token in the headers.'\n }"], {}), "({'error':\n 'This endpoint requires an account. If you already have one, put your token in the headers.'\n })\n", (3834, 3948), False, 'from flask import Blueprint, request, make_response, redirect, json\n'), ((952, 1096), 'flask.json.dumps', 'json.dumps', (["{'error':\n 'Creating a custom redirect requires a premium account. If you already have one, put your token in the headers.'\n }"], {}), "({'error':\n 'Creating a custom redirect requires a premium account. If you already have one, put your token in the headers.'\n })\n", (962, 1096), False, 'from flask import Blueprint, request, make_response, redirect, json\n'), ((1750, 1815), 'flask.json.dumps', 'json.dumps', (["{'error': 'URL length must be below 250 characters.'}"], {}), "({'error': 'URL length must be below 250 characters.'})\n", (1760, 1815), False, 'from flask import Blueprint, request, make_response, redirect, json\n'), ((1908, 1949), 'flask.json.dumps', 'json.dumps', (["{'error': 'URL is not valid'}"], {}), "({'error': 'URL is not valid'})\n", (1918, 1949), False, 'from flask import Blueprint, request, make_response, redirect, json\n'), ((3500, 3511), 'time.time', 'time.time', ([], {}), '()\n', (3509, 3511), False, 'import time\n'), ((1675, 1686), 'time.time', 'time.time', ([], {}), '()\n', (1684, 1686), False, 'import time\n'), ((2198, 2250), 'flask.json.dumps', 'json.dumps', (["{'error': 'This domain is blacklisted.'}"], {}), "({'error': 'This domain is blacklisted.'})\n", (2208, 2250), False, 'from flask import Blueprint, request, make_response, redirect, json\n'), ((2571, 2582), 'time.time', 'time.time', ([], {}), '()\n', (2580, 2582), False, 'import time\n'), ((2615, 2626), 'time.time', 'time.time', ([], {}), '()\n', (2624, 2626), False, 'import time\n')]
|
from nipype.interfaces.ants.base import ANTSCommandInputSpec, ANTSCommand
from nipype.interfaces.ants.segmentation import N4BiasFieldCorrectionOutputSpec
from nipype.interfaces.base import (File, traits, isdefined)
from nipype.utils.filemanip import split_filename
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as niu
import nipype.interfaces.fsl as fsl
import os
from nipype.workflows.dmri.fsl.artifacts import _xfm_jacobian, _checkrnum
from nipype.workflows.dmri.fsl.utils import b0_average, apply_all_corrections, insert_mat, \
rotate_bvecs, vsm2warp, extract_bval, recompose_xfm, recompose_dwi, _checkinitxfm, enhance
__author__ = '<NAME>'
__date__ = "2015-05-08"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
"""This module overwrites some parts of Nipype since they did not work correctly.
The idea is that at a low level two functions did not work correctly. To enable nipype to use the fixed versions of
these functions we have to copy the entire chain to make it work.
Also, the original implementation calculated the read out times from the EPI parameters. This implementation requires
you to predefine the read out times.
"""
def all_peb_pipeline(name='hmc_sdc_ecc',
epi_params={'read_out_times': None, 'enc_dir': 'y-'},
altepi_params={'read_out_times': None, 'enc_dir': 'y'}):
"""
Builds a pipeline including three artifact corrections: head-motion
correction (HMC), susceptibility-derived distortion correction (SDC),
and Eddy currents-derived distortion correction (ECC).
.. warning:: this workflow rotates the gradients table (*b*-vectors)
[Leemans09]_.
Examples
--------
>>> from nipype.workflows.dmri.fsl.artifacts import all_peb_pipeline
>>> allcorr = all_peb_pipeline()
>>> allcorr.inputs.inputnode.in_file = 'epi.nii'
>>> allcorr.inputs.inputnode.alt_file = 'epi_rev.nii'
>>> allcorr.inputs.inputnode.in_bval = 'diffusion.bval'
>>> allcorr.inputs.inputnode.in_bvec = 'diffusion.bvec'
>>> allcorr.run() # doctest: +SKIP
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_bvec',
'in_bval', 'alt_file']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'out_mask',
'out_bvec']), name='outputnode')
avg_b0_0 = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval'],
output_names=['out_file'], function=b0_average),
name='b0_avg_pre')
avg_b0_1 = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval'],
output_names=['out_file'], function=b0_average),
name='b0_avg_post')
bet_dwi0 = pe.Node(fsl.BET(frac=0.3, mask=True, robust=True),
name='bet_dwi_pre')
bet_dwi1 = pe.Node(fsl.BET(frac=0.3, mask=True, robust=True),
name='bet_dwi_post')
hmc = hmc_pipeline()
sdc = sdc_peb(epi_params=epi_params, altepi_params=altepi_params)
ecc = ecc_pipeline()
unwarp = apply_all_corrections()
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, hmc, [('in_file', 'inputnode.in_file'),
('in_bvec', 'inputnode.in_bvec'),
('in_bval', 'inputnode.in_bval')]),
(inputnode, avg_b0_0, [('in_file', 'in_dwi'),
('in_bval', 'in_bval')]),
(avg_b0_0, bet_dwi0, [('out_file', 'in_file')]),
(bet_dwi0, hmc, [('mask_file', 'inputnode.in_mask')]),
(hmc, sdc, [('outputnode.out_file', 'inputnode.in_file')]),
(bet_dwi0, sdc, [('mask_file', 'inputnode.in_mask')]),
(inputnode, sdc, [('in_bval', 'inputnode.in_bval'),
('alt_file', 'inputnode.alt_file')]),
(inputnode, ecc, [('in_file', 'inputnode.in_file'),
('in_bval', 'inputnode.in_bval')]),
(bet_dwi0, ecc, [('mask_file', 'inputnode.in_mask')]),
(hmc, ecc, [('outputnode.out_xfms', 'inputnode.in_xfms')]),
(ecc, avg_b0_1, [('outputnode.out_file', 'in_dwi')]),
(inputnode, avg_b0_1, [('in_bval', 'in_bval')]),
(avg_b0_1, bet_dwi1, [('out_file', 'in_file')]),
(inputnode, unwarp, [('in_file', 'inputnode.in_dwi')]),
(hmc, unwarp, [('outputnode.out_xfms', 'inputnode.in_hmc')]),
(ecc, unwarp, [('outputnode.out_xfms', 'inputnode.in_ecc')]),
(sdc, unwarp, [('outputnode.out_warp', 'inputnode.in_sdc')]),
(hmc, outputnode, [('outputnode.out_bvec', 'out_bvec')]),
(unwarp, outputnode, [('outputnode.out_file', 'out_file')]),
(bet_dwi1, outputnode, [('mask_file', 'out_mask')])
])
return wf
def hmc_pipeline(name='motion_correct'):
"""
HMC stands for head-motion correction.
Creates a pipeline that corrects for head motion artifacts in dMRI
sequences.
It takes a series of diffusion weighted images and rigidly co-registers
them to one reference image. Finally, the `b`-matrix is rotated accordingly
[Leemans09]_ making use of the rotation matrix obtained by FLIRT.
Search angles have been limited to 4 degrees, based on results in
[Yendiki13]_.
A list of rigid transformation matrices is provided, so that transforms
can be chained.
This is useful to correct for artifacts with only one interpolation process
(as previously discussed `here
<https://github.com/nipy/nipype/pull/530#issuecomment-14505042>`_),
and also to compute nuisance regressors as proposed by [Yendiki13]_.
.. warning:: This workflow rotates the `b`-vectors, so please be advised
that not all the dicom converters ensure the consistency between the
resulting nifti orientation and the gradients table (e.g. dcm2nii
checks it).
.. admonition:: References
.. [Leemans09] <NAME>, and <NAME>, `The B-matrix must be rotated
when correcting for subject motion in DTI data
<http://dx.doi.org/10.1002/mrm.21890>`_,
Magn Reson Med. 61(6):1336-49. 2009. doi: 10.1002/mrm.21890.
.. [Yendiki13] <NAME> et al., `Spurious group differences due to head
motion in a diffusion MRI study
<http://dx.doi.org/10.1016/j.neuroimage.2013.11.027>`_.
Neuroimage. 21(88C):79-90. 2013. doi: 10.1016/j.neuroimage.2013.11.027
Example
-------
>>> from nipype.workflows.dmri.fsl.artifacts import hmc_pipeline
>>> hmc = hmc_pipeline()
>>> hmc.inputs.inputnode.in_file = 'diffusion.nii'
>>> hmc.inputs.inputnode.in_bvec = 'diffusion.bvec'
>>> hmc.inputs.inputnode.in_bval = 'diffusion.bval'
>>> hmc.inputs.inputnode.in_mask = 'mask.nii'
>>> hmc.run() # doctest: +SKIP
Inputs::
inputnode.in_file - input dwi file
inputnode.in_mask - weights mask of reference image (a file with data \
range in [0.0, 1.0], indicating the weight of each voxel when computing the \
metric.
inputnode.in_bvec - gradients file (b-vectors)
inputnode.ref_num (optional, default=0) index of the b0 volume that \
should be taken as reference
Outputs::
outputnode.out_file - corrected dwi file
outputnode.out_bvec - rotated gradient vectors table
outputnode.out_xfms - list of transformation matrices
"""
from nipype.workflows.data import get_flirt_schedule
params = dict(dof=6, bgvalue=0, save_log=True, no_search=True,
# cost='mutualinfo', cost_func='mutualinfo', bins=64,
schedule=get_flirt_schedule('hmc'))
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'ref_num',
'in_bvec', 'in_bval', 'in_mask']), name='inputnode')
split = pe.Node(niu.Function(function=hmc_split,
input_names=['in_file', 'in_bval', 'ref_num'],
output_names=['out_ref', 'out_mov', 'out_bval', 'volid']),
name='SplitDWI')
flirt = dwi_flirt(flirt_param=params)
insmat = pe.Node(niu.Function(input_names=['inlist', 'volid'],
output_names=['out'], function=insert_mat),
name='InsertRefmat')
rot_bvec = pe.Node(niu.Function(input_names=['in_bvec', 'in_matrix'],
output_names=['out_file'], function=rotate_bvecs),
name='Rotate_Bvec')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file',
'out_bvec', 'out_xfms']),
name='outputnode')
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, split, [('in_file', 'in_file'),
('in_bval', 'in_bval'),
('ref_num', 'ref_num')]),
(inputnode, flirt, [('in_mask', 'inputnode.ref_mask')]),
(split, flirt, [('out_ref', 'inputnode.reference'),
('out_mov', 'inputnode.in_file'),
('out_bval', 'inputnode.in_bval')]),
(flirt, insmat, [('outputnode.out_xfms', 'inlist')]),
(split, insmat, [('volid', 'volid')]),
(inputnode, rot_bvec, [('in_bvec', 'in_bvec')]),
(insmat, rot_bvec, [('out', 'in_matrix')]),
(rot_bvec, outputnode, [('out_file', 'out_bvec')]),
(flirt, outputnode, [('outputnode.out_file', 'out_file')]),
(insmat, outputnode, [('out', 'out_xfms')])
])
return wf
def ecc_pipeline(name='eddy_correct'):
"""
ECC stands for Eddy currents correction.
Creates a pipeline that corrects for artifacts induced by Eddy currents in
dMRI sequences.
It takes a series of diffusion weighted images and linearly co-registers
them to one reference image (the average of all b0s in the dataset).
DWIs are also modulated by the determinant of the Jacobian as indicated by
[Jones10]_ and [Rohde04]_.
A list of rigid transformation matrices can be provided, sourcing from a
:func:`.hmc_pipeline` workflow, to initialize registrations in a *motion
free* framework.
A list of affine transformation matrices is available as output, so that
transforms can be chained (discussion
`here <https://github.com/nipy/nipype/pull/530#issuecomment-14505042>`_).
.. admonition:: References
.. [Jones10] Jones DK, `The signal intensity must be modulated by the
determinant of the Jacobian when correcting for eddy currents in
diffusion MRI
<http://cds.ismrm.org/protected/10MProceedings/files/1644_129.pdf>`_,
Proc. ISMRM 18th Annual Meeting, (2010).
.. [Rohde04] Rohde et al., `Comprehensive Approach for Correction of
Motion and Distortion in Diffusion-Weighted MRI
<http://stbb.nichd.nih.gov/pdf/com_app_cor_mri04.pdf>`_, MRM
51:103-114 (2004).
Example
-------
>>> from nipype.workflows.dmri.fsl.artifacts import ecc_pipeline
>>> ecc = ecc_pipeline()
>>> ecc.inputs.inputnode.in_file = 'diffusion.nii'
>>> ecc.inputs.inputnode.in_bval = 'diffusion.bval'
>>> ecc.inputs.inputnode.in_mask = 'mask.nii'
>>> ecc.run() # doctest: +SKIP
Inputs::
inputnode.in_file - input dwi file
inputnode.in_mask - weights mask of reference image (a file with data \
range sin [0.0, 1.0], indicating the weight of each voxel when computing the \
metric.
inputnode.in_bval - b-values table
inputnode.in_xfms - list of matrices to initialize registration (from \
head-motion correction)
Outputs::
outputnode.out_file - corrected dwi file
outputnode.out_xfms - list of transformation matrices
"""
from nipype.workflows.data import get_flirt_schedule
params = dict(dof=12, no_search=True, interp='spline', bgvalue=0,
schedule=get_flirt_schedule('ecc'))
# cost='normmi', cost_func='normmi', bins=64,
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_bval',
'in_mask', 'in_xfms']), name='inputnode')
avg_b0 = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval'],
output_names=['out_file'], function=b0_average),
name='b0_avg')
pick_dws = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval', 'b'],
output_names=['out_file'], function=extract_bval),
name='ExtractDWI')
pick_dws.inputs.b = 'diff'
flirt = dwi_flirt(flirt_param=params, excl_nodiff=True)
mult = pe.MapNode(fsl.BinaryMaths(operation='mul'), name='ModulateDWIs',
iterfield=['in_file', 'operand_value'])
thres = pe.MapNode(fsl.Threshold(thresh=0.0), iterfield=['in_file'],
name='RemoveNegative')
split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs')
get_mat = pe.Node(niu.Function(input_names=['in_bval', 'in_xfms'],
output_names=['out_files'], function=recompose_xfm),
name='GatherMatrices')
merge = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval', 'in_corrected'],
output_names=['out_file'], function=recompose_dwi), name='MergeDWIs')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'out_xfms']),
name='outputnode')
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, avg_b0, [('in_file', 'in_dwi'),
('in_bval', 'in_bval')]),
(inputnode, pick_dws, [('in_file', 'in_dwi'),
('in_bval', 'in_bval')]),
(inputnode, merge, [('in_file', 'in_dwi'),
('in_bval', 'in_bval')]),
(inputnode, flirt, [('in_mask', 'inputnode.ref_mask'),
('in_xfms', 'inputnode.in_xfms'),
('in_bval', 'inputnode.in_bval')]),
(inputnode, get_mat, [('in_bval', 'in_bval')]),
(avg_b0, flirt, [('out_file', 'inputnode.reference')]),
(pick_dws, flirt, [('out_file', 'inputnode.in_file')]),
(flirt, get_mat, [('outputnode.out_xfms', 'in_xfms')]),
(flirt, mult, [(('outputnode.out_xfms', _xfm_jacobian),
'operand_value')]),
(flirt, split, [('outputnode.out_file', 'in_file')]),
(split, mult, [('out_files', 'in_file')]),
(mult, thres, [('out_file', 'in_file')]),
(thres, merge, [('out_file', 'in_corrected')]),
(get_mat, outputnode, [('out_files', 'out_xfms')]),
(merge, outputnode, [('out_file', 'out_file')])
])
return wf
def sdc_peb(name='peb_correction',
epi_params={'read_out_times': None, 'enc_dir': 'y-'},
altepi_params={'read_out_times': None, 'enc_dir': 'y'}):
"""
SDC stands for susceptibility distortion correction. PEB stands for
phase-encoding-based.
The phase-encoding-based (PEB) method implements SDC by acquiring
diffusion images with two different enconding directions [Andersson2003]_.
The most typical case is acquiring with opposed phase-gradient blips
(e.g. *A>>>P* and *P>>>A*, or equivalently, *-y* and *y*)
as in [Chiou2000]_, but it is also possible to use orthogonal
configurations [Cordes2000]_ (e.g. *A>>>P* and *L>>>R*,
or equivalently *-y* and *x*).
This workflow uses the implementation of FSL
(`TOPUP <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/TOPUP>`_).
Example
-------
>>> from nipype.workflows.dmri.fsl.artifacts import sdc_peb
>>> peb = sdc_peb()
>>> peb.inputs.inputnode.in_file = 'epi.nii'
>>> peb.inputs.inputnode.alt_file = 'epi_rev.nii'
>>> peb.inputs.inputnode.in_bval = 'diffusion.bval'
>>> peb.inputs.inputnode.in_mask = 'mask.nii'
>>> peb.run() # doctest: +SKIP
.. admonition:: References
.. [Andersson2003] <NAME>L et al., `How to correct susceptibility
distortions in spin-echo echo-planar images: application to diffusion
tensor imaging <http://dx.doi.org/10.1016/S1053-8119(03)00336-7>`_.
Neuroimage. 2003 Oct;20(2):870-88. doi: 10.1016/S1053-8119(03)00336-7
.. [Cordes2000] <NAME> et al., Geometric distortion correction in EPI
using two images with orthogonal phase-encoding directions, in Proc.
ISMRM (8), p.1712, Denver, US, 2000.
.. [Chiou2000] <NAME>, and <NAME>, A simple method to correct
off-resonance related distortion in echo planar imaging, in Proc.
ISMRM (8), p.1712, Denver, US, 2000.
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_bval',
'in_mask', 'alt_file', 'ref_num']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'out_vsm',
'out_warp']), name='outputnode')
b0_ref = pe.Node(fsl.ExtractROI(t_size=1), name='b0_ref')
b0_alt = pe.Node(fsl.ExtractROI(t_size=1), name='b0_alt')
b0_comb = pe.Node(niu.Merge(2), name='b0_list')
b0_merge = pe.Node(fsl.Merge(dimension='t'), name='b0_merged')
topup = pe.Node(fsl.TOPUP(), name='topup')
topup.inputs.encoding_direction = [epi_params['enc_dir'],
altepi_params['enc_dir']]
readout = epi_params['read_out_time']
topup.inputs.readout_times = [readout,
altepi_params['read_out_time']]
unwarp = pe.Node(fsl.ApplyTOPUP(in_index=[1], method='jac'), name='unwarp')
# scaling = pe.Node(niu.Function(input_names=['in_file', 'enc_dir'],
# output_names=['factor'], function=_get_zoom),
# name='GetZoom')
# scaling.inputs.enc_dir = epi_params['enc_dir']
vsm2dfm = vsm2warp()
vsm2dfm.inputs.inputnode.enc_dir = epi_params['enc_dir']
vsm2dfm.inputs.inputnode.scaling = readout
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, b0_ref, [('in_file', 'in_file'),
(('ref_num', _checkrnum), 't_min')]),
(inputnode, b0_alt, [('alt_file', 'in_file'),
(('ref_num', _checkrnum), 't_min')]),
(b0_ref, b0_comb, [('roi_file', 'in1')]),
(b0_alt, b0_comb, [('roi_file', 'in2')]),
(b0_comb, b0_merge, [('out', 'in_files')]),
(b0_merge, topup, [('merged_file', 'in_file')]),
(topup, unwarp, [('out_fieldcoef', 'in_topup_fieldcoef'),
('out_movpar', 'in_topup_movpar'),
('out_enc_file', 'encoding_file')]),
(inputnode, unwarp, [('in_file', 'in_files')]),
(unwarp, outputnode, [('out_corrected', 'out_file')]),
# (b0_ref, scaling, [('roi_file', 'in_file')]),
# (scaling, vsm2dfm, [('factor', 'inputnode.scaling')]),
(b0_ref, vsm2dfm, [('roi_file', 'inputnode.in_ref')]),
(topup, vsm2dfm, [('out_field', 'inputnode.in_vsm')]),
(topup, outputnode, [('out_field', 'out_vsm')]),
(vsm2dfm, outputnode, [('outputnode.out_warp', 'out_warp')])
])
return wf
def hmc_split(in_file, in_bval, ref_num=0, lowbval=25.0):
"""
Selects the reference and moving volumes from a dwi dataset
for the purpose of HMC.
"""
import numpy as np
import nibabel as nb
import os.path as op
from nipype.interfaces.base import isdefined
im = nb.load(in_file)
data = im.get_data()
hdr = im.get_header().copy()
bval = np.loadtxt(in_bval)
lowbs = np.where(bval <= lowbval)[0]
volid = lowbs[0]
if (isdefined(ref_num) and (ref_num < len(lowbs))):
volid = [ref_num]
# todo add next two lines in Nipype git
if len(volid) == 1:
volid = volid[0]
if volid == 0:
data = data[..., 1:]
bval = bval[1:]
elif volid == (data.shape[-1] - 1):
data = data[..., :-1]
bval = bval[:-1]
else:
data = np.concatenate((data[..., :volid], data[..., (volid + 1):]),
axis=3)
bval = np.hstack((bval[:volid], bval[(volid + 1):]))
out_ref = op.abspath('hmc_ref.nii.gz')
out_mov = op.abspath('hmc_mov.nii.gz')
out_bval = op.abspath('bval_split.txt')
refdata = data[..., volid]
hdr.set_data_shape(refdata.shape)
nb.Nifti1Image(refdata, im.get_affine(), hdr).to_filename(out_ref)
hdr.set_data_shape(data.shape)
nb.Nifti1Image(data, im.get_affine(), hdr).to_filename(out_mov)
np.savetxt(out_bval, bval)
return [out_ref, out_mov, out_bval, volid]
class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec):
# todo dimensionality in Nipype git
dimension = traits.Enum(3, 2, argstr='--image-dimensionality %d',
usedefault=True,
desc='image dimension (2 or 3)')
input_image = File(argstr='--input-image %s', mandatory=True,
desc=('image to apply transformation to (generally a '
'coregistered functional)'))
mask_image = File(argstr='--mask-image %s')
weight_image = File(argstr='--weight-image %s')
output_image = traits.Str(argstr='--output %s',
desc='output file name', genfile=True,
hash_files=False)
bspline_fitting_distance = traits.Float(argstr="--bspline-fitting %s")
bspline_order = traits.Int(requires=['bspline_fitting_distance'])
shrink_factor = traits.Int(argstr="--shrink-factor %d")
n_iterations = traits.List(traits.Int(), argstr="--convergence %s",
requires=['convergence_threshold'])
convergence_threshold = traits.Float(requires=['n_iterations'])
save_bias = traits.Bool(False, mandatory=True, usedefault=True,
desc=('True if the estimated bias should be saved'
' to file.'), xor=['bias_image'])
bias_image = File(desc='Filename for the estimated bias.',
hash_files=False)
class N4BiasFieldCorrection(ANTSCommand):
"""N4 is a variant of the popular N3 (nonparameteric nonuniform normalization)
retrospective bias correction algorithm. Based on the assumption that the
corruption of the low frequency bias field can be modeled as a convolution of
the intensity histogram by a Gaussian, the basic algorithmic protocol is to
iterate between deconvolving the intensity histogram by a Gaussian, remapping
the intensities, and then spatially smoothing this result by a B-spline modeling
of the bias field itself. The modifications from and improvements obtained over
the original N3 algorithm are described in [Tustison2010]_.
.. [Tustison2010] <NAME> et al.,
N4ITK: Improved N3 Bias Correction, IEEE Transactions on Medical Imaging,
29(6):1310-1320, June 2010.
Examples
--------
>>> import copy
>>> from nipype.interfaces.ants import N4BiasFieldCorrection
>>> n4 = N4BiasFieldCorrection()
>>> n4.inputs.dimension = 3
>>> n4.inputs.input_image = 'structural.nii'
>>> n4.inputs.bspline_fitting_distance = 300
>>> n4.inputs.shrink_factor = 3
>>> n4.inputs.n_iterations = [50,50,30,20]
>>> n4.inputs.convergence_threshold = 1e-6
>>> n4.cmdline
'N4BiasFieldCorrection --bspline-fitting [ 300 ] \
--image-dimension 3 --input-image structural.nii \
--convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \
--shrink-factor 3'
>>> n4_2 = copy.deepcopy(n4)
>>> n4_2.inputs.bspline_order = 5
>>> n4_2.cmdline
'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] \
--image-dimension 3 --input-image structural.nii \
--convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \
--shrink-factor 3'
>>> n4_3 = N4BiasFieldCorrection()
>>> n4_3.inputs.input_image = 'structural.nii'
>>> n4_3.inputs.save_bias = True
>>> n4_3.cmdline
'N4BiasFieldCorrection --image-dimension 3 --input-image structural.nii \
--output [ structural_corrected.nii, structural_bias.nii ]'
"""
_cmd = 'N4BiasFieldCorrection'
input_spec = N4BiasFieldCorrectionInputSpec
output_spec = N4BiasFieldCorrectionOutputSpec
def _gen_filename(self, name):
if name == 'output_image':
output = self.inputs.output_image
if not isdefined(output):
_, name, ext = split_filename(self.inputs.input_image)
output = name + '_corrected' + ext
return output
if name == 'bias_image':
output = self.inputs.bias_image
if not isdefined(output):
_, name, ext = split_filename(self.inputs.input_image)
output = name + '_bias' + ext
return output
return None
def _format_arg(self, name, trait_spec, value):
if ((name == 'output_image') and
(self.inputs.save_bias or isdefined(self.inputs.bias_image))):
bias_image = self._gen_filename('bias_image')
output = self._gen_filename('output_image')
newval = '[ %s, %s ]' % (output, bias_image)
return trait_spec.argstr % newval
if name == 'bspline_fitting_distance':
if isdefined(self.inputs.bspline_order):
newval = '[ %g, %d ]' % (value, self.inputs.bspline_order)
else:
newval = '[ %g ]' % value
return trait_spec.argstr % newval
if ((name == 'n_iterations') and
(isdefined(self.inputs.convergence_threshold))):
newval = '[ %s, %g ]' % ('x'.join([str(elt) for elt in value]),
self.inputs.convergence_threshold)
return trait_spec.argstr % newval
return super(N4BiasFieldCorrection,
self)._format_arg(name, trait_spec, value)
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
skip += ['save_bias', 'bias_image']
return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_image'] = os.path.abspath(self._gen_filename('output_image'))
if self.inputs.save_bias or isdefined(self.inputs.bias_image):
outputs['bias_image'] = os.path.abspath(self._gen_filename('bias_image'))
return outputs
# todo remove this if N4BiasFieldCorrection works again
def dwi_flirt(name='DWICoregistration', excl_nodiff=False,
flirt_param={}):
"""
Generates a workflow for linear registration of dwi volumes
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['reference',
'in_file', 'ref_mask', 'in_xfms', 'in_bval']),
name='inputnode')
initmat = pe.Node(niu.Function(input_names=['in_bval', 'in_xfms',
'excl_nodiff'], output_names=['init_xfms'],
function=_checkinitxfm), name='InitXforms')
initmat.inputs.excl_nodiff = excl_nodiff
dilate = pe.Node(fsl.maths.MathsCommand(nan2zeros=True,
args='-kernel sphere 5 -dilM'), name='MskDilate')
split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs')
pick_ref = pe.Node(niu.Select(), name='Pick_b0')
n4 = pe.Node(N4BiasFieldCorrection(dimension=3), name='Bias')
enhb0 = pe.Node(niu.Function(input_names=['in_file', 'in_mask',
'clip_limit'], output_names=['out_file'],
function=enhance), name='B0Equalize')
enhb0.inputs.clip_limit = 0.015
enhdw = pe.MapNode(niu.Function(input_names=['in_file', 'in_mask'],
output_names=['out_file'], function=enhance),
name='DWEqualize', iterfield=['in_file'])
flirt = pe.MapNode(fsl.FLIRT(**flirt_param), name='CoRegistration',
iterfield=['in_file', 'in_matrix_file'])
thres = pe.MapNode(fsl.Threshold(thresh=0.0), iterfield=['in_file'],
name='RemoveNegative')
merge = pe.Node(fsl.Merge(dimension='t'), name='MergeDWIs')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file',
'out_xfms']), name='outputnode')
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, split, [('in_file', 'in_file')]),
(inputnode, dilate, [('ref_mask', 'in_file')]),
(inputnode, enhb0, [('ref_mask', 'in_mask')]),
(inputnode, initmat, [('in_xfms', 'in_xfms'),
('in_bval', 'in_bval')]),
(inputnode, n4, [('reference', 'input_image'),
('ref_mask', 'mask_image')]),
(dilate, flirt, [('out_file', 'ref_weight'),
('out_file', 'in_weight')]),
(n4, enhb0, [('output_image', 'in_file')]),
(split, enhdw, [('out_files', 'in_file')]),
(dilate, enhdw, [('out_file', 'in_mask')]),
(enhb0, flirt, [('out_file', 'reference')]),
(enhdw, flirt, [('out_file', 'in_file')]),
(initmat, flirt, [('init_xfms', 'in_matrix_file')]),
(flirt, thres, [('out_file', 'in_file')]),
(thres, merge, [('out_file', 'in_files')]),
(merge, outputnode, [('merged_file', 'out_file')]),
(flirt, outputnode, [('out_matrix_file', 'out_xfms')])
])
return wf
|
[
"nipype.interfaces.fsl.ApplyTOPUP",
"nipype.interfaces.base.traits.Float",
"nipype.interfaces.utility.IdentityInterface",
"nipype.interfaces.fsl.ExtractROI",
"nipype.interfaces.fsl.Split",
"nipype.interfaces.fsl.Merge",
"nipype.workflows.dmri.fsl.utils.apply_all_corrections",
"nipype.interfaces.base.isdefined",
"os.path.abspath",
"nipype.interfaces.fsl.Threshold",
"numpy.savetxt",
"nipype.interfaces.base.traits.Int",
"nipype.interfaces.base.traits.Bool",
"numpy.loadtxt",
"nipype.workflows.data.get_flirt_schedule",
"nipype.pipeline.engine.Workflow",
"nipype.utils.filemanip.split_filename",
"nipype.interfaces.fsl.TOPUP",
"nipype.interfaces.base.traits.Enum",
"nipype.interfaces.utility.Merge",
"numpy.hstack",
"nipype.interfaces.utility.Select",
"nipype.interfaces.fsl.FLIRT",
"nipype.interfaces.utility.Function",
"numpy.concatenate",
"nipype.interfaces.fsl.BET",
"nipype.interfaces.base.traits.Str",
"nipype.workflows.dmri.fsl.utils.vsm2warp",
"nibabel.load",
"nipype.interfaces.fsl.maths.MathsCommand",
"nipype.interfaces.base.File",
"numpy.where",
"nipype.interfaces.fsl.BinaryMaths"
] |
[((3100, 3123), 'nipype.workflows.dmri.fsl.utils.apply_all_corrections', 'apply_all_corrections', ([], {}), '()\n', (3121, 3123), False, 'from nipype.workflows.dmri.fsl.utils import b0_average, apply_all_corrections, insert_mat, rotate_bvecs, vsm2warp, extract_bval, recompose_xfm, recompose_dwi, _checkinitxfm, enhance\n'), ((3134, 3156), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', ([], {'name': 'name'}), '(name=name)\n', (3145, 3156), True, 'import nipype.pipeline.engine as pe\n'), ((8736, 8758), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', ([], {'name': 'name'}), '(name=name)\n', (8747, 8758), True, 'import nipype.pipeline.engine as pe\n'), ((13563, 13585), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', ([], {'name': 'name'}), '(name=name)\n', (13574, 13585), True, 'import nipype.pipeline.engine as pe\n'), ((18116, 18126), 'nipype.workflows.dmri.fsl.utils.vsm2warp', 'vsm2warp', ([], {}), '()\n', (18124, 18126), False, 'from nipype.workflows.dmri.fsl.utils import b0_average, apply_all_corrections, insert_mat, rotate_bvecs, vsm2warp, extract_bval, recompose_xfm, recompose_dwi, _checkinitxfm, enhance\n'), ((18245, 18267), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', ([], {'name': 'name'}), '(name=name)\n', (18256, 18267), True, 'import nipype.pipeline.engine as pe\n'), ((19860, 19876), 'nibabel.load', 'nb.load', (['in_file'], {}), '(in_file)\n', (19867, 19876), True, 'import nibabel as nb\n'), ((19946, 19965), 'numpy.loadtxt', 'np.loadtxt', (['in_bval'], {}), '(in_bval)\n', (19956, 19965), True, 'import numpy as np\n'), ((20574, 20602), 'os.path.abspath', 'op.abspath', (['"""hmc_ref.nii.gz"""'], {}), "('hmc_ref.nii.gz')\n", (20584, 20602), True, 'import os.path as op\n'), ((20617, 20645), 'os.path.abspath', 'op.abspath', (['"""hmc_mov.nii.gz"""'], {}), "('hmc_mov.nii.gz')\n", (20627, 20645), True, 'import os.path as op\n'), ((20661, 20689), 'os.path.abspath', 'op.abspath', (['"""bval_split.txt"""'], {}), "('bval_split.txt')\n", (20671, 20689), True, 'import os.path as op\n'), ((20939, 20965), 'numpy.savetxt', 'np.savetxt', (['out_bval', 'bval'], {}), '(out_bval, bval)\n', (20949, 20965), True, 'import numpy as np\n'), ((21131, 21239), 'nipype.interfaces.base.traits.Enum', 'traits.Enum', (['(3)', '(2)'], {'argstr': '"""--image-dimensionality %d"""', 'usedefault': '(True)', 'desc': '"""image dimension (2 or 3)"""'}), "(3, 2, argstr='--image-dimensionality %d', usedefault=True, desc\n ='image dimension (2 or 3)')\n", (21142, 21239), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21309, 21440), 'nipype.interfaces.base.File', 'File', ([], {'argstr': '"""--input-image %s"""', 'mandatory': '(True)', 'desc': '"""image to apply transformation to (generally a coregistered functional)"""'}), "(argstr='--input-image %s', mandatory=True, desc=\n 'image to apply transformation to (generally a coregistered functional)')\n", (21313, 21440), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21510, 21540), 'nipype.interfaces.base.File', 'File', ([], {'argstr': '"""--mask-image %s"""'}), "(argstr='--mask-image %s')\n", (21514, 21540), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21560, 21592), 'nipype.interfaces.base.File', 'File', ([], {'argstr': '"""--weight-image %s"""'}), "(argstr='--weight-image %s')\n", (21564, 21592), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21612, 21705), 'nipype.interfaces.base.traits.Str', 'traits.Str', ([], {'argstr': '"""--output %s"""', 'desc': '"""output file name"""', 'genfile': '(True)', 'hash_files': '(False)'}), "(argstr='--output %s', desc='output file name', genfile=True,\n hash_files=False)\n", (21622, 21705), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21793, 21836), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'argstr': '"""--bspline-fitting %s"""'}), "(argstr='--bspline-fitting %s')\n", (21805, 21836), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21857, 21906), 'nipype.interfaces.base.traits.Int', 'traits.Int', ([], {'requires': "['bspline_fitting_distance']"}), "(requires=['bspline_fitting_distance'])\n", (21867, 21906), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21927, 21966), 'nipype.interfaces.base.traits.Int', 'traits.Int', ([], {'argstr': '"""--shrink-factor %d"""'}), "(argstr='--shrink-factor %d')\n", (21937, 21966), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((22134, 22173), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'requires': "['n_iterations']"}), "(requires=['n_iterations'])\n", (22146, 22173), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((22190, 22326), 'nipype.interfaces.base.traits.Bool', 'traits.Bool', (['(False)'], {'mandatory': '(True)', 'usedefault': '(True)', 'desc': '"""True if the estimated bias should be saved to file."""', 'xor': "['bias_image']"}), "(False, mandatory=True, usedefault=True, desc=\n 'True if the estimated bias should be saved to file.', xor=['bias_image'])\n", (22201, 22326), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((22406, 22469), 'nipype.interfaces.base.File', 'File', ([], {'desc': '"""Filename for the estimated bias."""', 'hash_files': '(False)'}), "(desc='Filename for the estimated bias.', hash_files=False)\n", (22410, 22469), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((28747, 28769), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', ([], {'name': 'name'}), '(name=name)\n', (28758, 28769), True, 'import nipype.pipeline.engine as pe\n'), ((2117, 2192), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['in_file', 'in_bvec', 'in_bval', 'alt_file']"}), "(fields=['in_file', 'in_bvec', 'in_bval', 'alt_file'])\n", (2138, 2192), True, 'import nipype.interfaces.utility as niu\n'), ((2262, 2328), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['out_file', 'out_mask', 'out_bvec']"}), "(fields=['out_file', 'out_mask', 'out_bvec'])\n", (2283, 2328), True, 'import nipype.interfaces.utility as niu\n'), ((2398, 2497), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_dwi', 'in_bval']", 'output_names': "['out_file']", 'function': 'b0_average'}), "(input_names=['in_dwi', 'in_bval'], output_names=['out_file'],\n function=b0_average)\n", (2410, 2497), True, 'import nipype.interfaces.utility as niu\n'), ((2583, 2682), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_dwi', 'in_bval']", 'output_names': "['out_file']", 'function': 'b0_average'}), "(input_names=['in_dwi', 'in_bval'], output_names=['out_file'],\n function=b0_average)\n", (2595, 2682), True, 'import nipype.interfaces.utility as niu\n'), ((2769, 2810), 'nipype.interfaces.fsl.BET', 'fsl.BET', ([], {'frac': '(0.3)', 'mask': '(True)', 'robust': '(True)'}), '(frac=0.3, mask=True, robust=True)\n', (2776, 2810), True, 'import nipype.interfaces.fsl as fsl\n'), ((2878, 2919), 'nipype.interfaces.fsl.BET', 'fsl.BET', ([], {'frac': '(0.3)', 'mask': '(True)', 'robust': '(True)'}), '(frac=0.3, mask=True, robust=True)\n', (2885, 2919), True, 'import nipype.interfaces.fsl as fsl\n'), ((7792, 7881), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['in_file', 'ref_num', 'in_bvec', 'in_bval', 'in_mask']"}), "(fields=['in_file', 'ref_num', 'in_bvec', 'in_bval',\n 'in_mask'])\n", (7813, 7881), True, 'import nipype.interfaces.utility as niu\n'), ((7941, 8082), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': 'hmc_split', 'input_names': "['in_file', 'in_bval', 'ref_num']", 'output_names': "['out_ref', 'out_mov', 'out_bval', 'volid']"}), "(function=hmc_split, input_names=['in_file', 'in_bval',\n 'ref_num'], output_names=['out_ref', 'out_mov', 'out_bval', 'volid'])\n", (7953, 8082), True, 'import nipype.interfaces.utility as niu\n'), ((8220, 8312), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['inlist', 'volid']", 'output_names': "['out']", 'function': 'insert_mat'}), "(input_names=['inlist', 'volid'], output_names=['out'],\n function=insert_mat)\n", (8232, 8312), True, 'import nipype.interfaces.utility as niu\n'), ((8396, 8501), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_bvec', 'in_matrix']", 'output_names': "['out_file']", 'function': 'rotate_bvecs'}), "(input_names=['in_bvec', 'in_matrix'], output_names=['out_file'\n ], function=rotate_bvecs)\n", (8408, 8501), True, 'import nipype.interfaces.utility as niu\n'), ((8589, 8655), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['out_file', 'out_bvec', 'out_xfms']"}), "(fields=['out_file', 'out_bvec', 'out_xfms'])\n", (8610, 8655), True, 'import nipype.interfaces.utility as niu\n'), ((12161, 12235), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['in_file', 'in_bval', 'in_mask', 'in_xfms']"}), "(fields=['in_file', 'in_bval', 'in_mask', 'in_xfms'])\n", (12182, 12235), True, 'import nipype.interfaces.utility as niu\n'), ((12300, 12399), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_dwi', 'in_bval']", 'output_names': "['out_file']", 'function': 'b0_average'}), "(input_names=['in_dwi', 'in_bval'], output_names=['out_file'],\n function=b0_average)\n", (12312, 12399), True, 'import nipype.interfaces.utility as niu\n'), ((12477, 12584), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_dwi', 'in_bval', 'b']", 'output_names': "['out_file']", 'function': 'extract_bval'}), "(input_names=['in_dwi', 'in_bval', 'b'], output_names=[\n 'out_file'], function=extract_bval)\n", (12489, 12584), True, 'import nipype.interfaces.utility as niu\n'), ((12761, 12793), 'nipype.interfaces.fsl.BinaryMaths', 'fsl.BinaryMaths', ([], {'operation': '"""mul"""'}), "(operation='mul')\n", (12776, 12793), True, 'import nipype.interfaces.fsl as fsl\n'), ((12901, 12926), 'nipype.interfaces.fsl.Threshold', 'fsl.Threshold', ([], {'thresh': '(0.0)'}), '(thresh=0.0)\n', (12914, 12926), True, 'import nipype.interfaces.fsl as fsl\n'), ((13018, 13042), 'nipype.interfaces.fsl.Split', 'fsl.Split', ([], {'dimension': '"""t"""'}), "(dimension='t')\n", (13027, 13042), True, 'import nipype.interfaces.fsl as fsl\n'), ((13084, 13188), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_bval', 'in_xfms']", 'output_names': "['out_files']", 'function': 'recompose_xfm'}), "(input_names=['in_bval', 'in_xfms'], output_names=['out_files'],\n function=recompose_xfm)\n", (13096, 13188), True, 'import nipype.interfaces.utility as niu\n'), ((13273, 13391), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_dwi', 'in_bval', 'in_corrected']", 'output_names': "['out_file']", 'function': 'recompose_dwi'}), "(input_names=['in_dwi', 'in_bval', 'in_corrected'],\n output_names=['out_file'], function=recompose_dwi)\n", (13285, 13391), True, 'import nipype.interfaces.utility as niu\n'), ((13453, 13507), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['out_file', 'out_xfms']"}), "(fields=['out_file', 'out_xfms'])\n", (13474, 13507), True, 'import nipype.interfaces.utility as niu\n'), ((16923, 17013), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['in_file', 'in_bval', 'in_mask', 'alt_file', 'ref_num']"}), "(fields=['in_file', 'in_bval', 'in_mask', 'alt_file',\n 'ref_num'])\n", (16944, 17013), True, 'import nipype.interfaces.utility as niu\n'), ((17102, 17167), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['out_file', 'out_vsm', 'out_warp']"}), "(fields=['out_file', 'out_vsm', 'out_warp'])\n", (17123, 17167), True, 'import nipype.interfaces.utility as niu\n'), ((17235, 17259), 'nipype.interfaces.fsl.ExtractROI', 'fsl.ExtractROI', ([], {'t_size': '(1)'}), '(t_size=1)\n', (17249, 17259), True, 'import nipype.interfaces.fsl as fsl\n'), ((17297, 17321), 'nipype.interfaces.fsl.ExtractROI', 'fsl.ExtractROI', ([], {'t_size': '(1)'}), '(t_size=1)\n', (17311, 17321), True, 'import nipype.interfaces.fsl as fsl\n'), ((17360, 17372), 'nipype.interfaces.utility.Merge', 'niu.Merge', (['(2)'], {}), '(2)\n', (17369, 17372), True, 'import nipype.interfaces.utility as niu\n'), ((17413, 17437), 'nipype.interfaces.fsl.Merge', 'fsl.Merge', ([], {'dimension': '"""t"""'}), "(dimension='t')\n", (17422, 17437), True, 'import nipype.interfaces.fsl as fsl\n'), ((17478, 17489), 'nipype.interfaces.fsl.TOPUP', 'fsl.TOPUP', ([], {}), '()\n', (17487, 17489), True, 'import nipype.interfaces.fsl as fsl\n'), ((17806, 17848), 'nipype.interfaces.fsl.ApplyTOPUP', 'fsl.ApplyTOPUP', ([], {'in_index': '[1]', 'method': '"""jac"""'}), "(in_index=[1], method='jac')\n", (17820, 17848), True, 'import nipype.interfaces.fsl as fsl\n'), ((19979, 20004), 'numpy.where', 'np.where', (['(bval <= lowbval)'], {}), '(bval <= lowbval)\n', (19987, 20004), True, 'import numpy as np\n'), ((20038, 20056), 'nipype.interfaces.base.isdefined', 'isdefined', (['ref_num'], {}), '(ref_num)\n', (20047, 20056), False, 'from nipype.interfaces.base import isdefined\n'), ((21998, 22010), 'nipype.interfaces.base.traits.Int', 'traits.Int', ([], {}), '()\n', (22008, 22010), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((27120, 27212), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['reference', 'in_file', 'ref_mask', 'in_xfms', 'in_bval']"}), "(fields=['reference', 'in_file', 'ref_mask', 'in_xfms',\n 'in_bval'])\n", (27141, 27212), True, 'import nipype.interfaces.utility as niu\n'), ((27299, 27418), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_bval', 'in_xfms', 'excl_nodiff']", 'output_names': "['init_xfms']", 'function': '_checkinitxfm'}), "(input_names=['in_bval', 'in_xfms', 'excl_nodiff'],\n output_names=['init_xfms'], function=_checkinitxfm)\n", (27311, 27418), True, 'import nipype.interfaces.utility as niu\n'), ((27558, 27627), 'nipype.interfaces.fsl.maths.MathsCommand', 'fsl.maths.MathsCommand', ([], {'nan2zeros': '(True)', 'args': '"""-kernel sphere 5 -dilM"""'}), "(nan2zeros=True, args='-kernel sphere 5 -dilM')\n", (27580, 27627), True, 'import nipype.interfaces.fsl as fsl\n'), ((27688, 27712), 'nipype.interfaces.fsl.Split', 'fsl.Split', ([], {'dimension': '"""t"""'}), "(dimension='t')\n", (27697, 27712), True, 'import nipype.interfaces.fsl as fsl\n'), ((27755, 27767), 'nipype.interfaces.utility.Select', 'niu.Select', ([], {}), '()\n', (27765, 27767), True, 'import nipype.interfaces.utility as niu\n'), ((27871, 27983), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_file', 'in_mask', 'clip_limit']", 'output_names': "['out_file']", 'function': 'enhance'}), "(input_names=['in_file', 'in_mask', 'clip_limit'], output_names\n =['out_file'], function=enhance)\n", (27883, 27983), True, 'import nipype.interfaces.utility as niu\n'), ((28111, 28208), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_file', 'in_mask']", 'output_names': "['out_file']", 'function': 'enhance'}), "(input_names=['in_file', 'in_mask'], output_names=['out_file'],\n function=enhance)\n", (28123, 28208), True, 'import nipype.interfaces.utility as niu\n'), ((28317, 28341), 'nipype.interfaces.fsl.FLIRT', 'fsl.FLIRT', ([], {}), '(**flirt_param)\n', (28326, 28341), True, 'import nipype.interfaces.fsl as fsl\n'), ((28453, 28478), 'nipype.interfaces.fsl.Threshold', 'fsl.Threshold', ([], {'thresh': '(0.0)'}), '(thresh=0.0)\n', (28466, 28478), True, 'import nipype.interfaces.fsl as fsl\n'), ((28569, 28593), 'nipype.interfaces.fsl.Merge', 'fsl.Merge', ([], {'dimension': '"""t"""'}), "(dimension='t')\n", (28578, 28593), True, 'import nipype.interfaces.fsl as fsl\n'), ((28638, 28692), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['out_file', 'out_xfms']"}), "(fields=['out_file', 'out_xfms'])\n", (28659, 28692), True, 'import nipype.interfaces.utility as niu\n'), ((7740, 7765), 'nipype.workflows.data.get_flirt_schedule', 'get_flirt_schedule', (['"""hmc"""'], {}), "('hmc')\n", (7758, 7765), False, 'from nipype.workflows.data import get_flirt_schedule\n'), ((12059, 12084), 'nipype.workflows.data.get_flirt_schedule', 'get_flirt_schedule', (['"""ecc"""'], {}), "('ecc')\n", (12077, 12084), False, 'from nipype.workflows.data import get_flirt_schedule\n'), ((20399, 20465), 'numpy.concatenate', 'np.concatenate', (['(data[..., :volid], data[..., volid + 1:])'], {'axis': '(3)'}), '((data[..., :volid], data[..., volid + 1:]), axis=3)\n', (20413, 20465), True, 'import numpy as np\n'), ((20513, 20556), 'numpy.hstack', 'np.hstack', (['(bval[:volid], bval[volid + 1:])'], {}), '((bval[:volid], bval[volid + 1:]))\n', (20522, 20556), True, 'import numpy as np\n'), ((25700, 25736), 'nipype.interfaces.base.isdefined', 'isdefined', (['self.inputs.bspline_order'], {}), '(self.inputs.bspline_order)\n', (25709, 25736), False, 'from nipype.interfaces.base import isdefined\n'), ((25973, 26017), 'nipype.interfaces.base.isdefined', 'isdefined', (['self.inputs.convergence_threshold'], {}), '(self.inputs.convergence_threshold)\n', (25982, 26017), False, 'from nipype.interfaces.base import isdefined\n'), ((26724, 26757), 'nipype.interfaces.base.isdefined', 'isdefined', (['self.inputs.bias_image'], {}), '(self.inputs.bias_image)\n', (26733, 26757), False, 'from nipype.interfaces.base import isdefined\n'), ((24806, 24823), 'nipype.interfaces.base.isdefined', 'isdefined', (['output'], {}), '(output)\n', (24815, 24823), False, 'from nipype.interfaces.base import isdefined\n'), ((24856, 24895), 'nipype.utils.filemanip.split_filename', 'split_filename', (['self.inputs.input_image'], {}), '(self.inputs.input_image)\n', (24870, 24895), False, 'from nipype.utils.filemanip import split_filename\n'), ((25070, 25087), 'nipype.interfaces.base.isdefined', 'isdefined', (['output'], {}), '(output)\n', (25079, 25087), False, 'from nipype.interfaces.base import isdefined\n'), ((25120, 25159), 'nipype.utils.filemanip.split_filename', 'split_filename', (['self.inputs.input_image'], {}), '(self.inputs.input_image)\n', (25134, 25159), False, 'from nipype.utils.filemanip import split_filename\n'), ((25383, 25416), 'nipype.interfaces.base.isdefined', 'isdefined', (['self.inputs.bias_image'], {}), '(self.inputs.bias_image)\n', (25392, 25416), False, 'from nipype.interfaces.base import isdefined\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 2 14:02:26 2016
MIT License
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from zeex.core.ctrls.bookmark import BookmarkManager
from zeex.core.compat import QtGui, QtCore
Qt = QtCore.Qt
class BookMarkModel(QtGui.QStandardItemModel):
"""
A QStandardItemModel representing the data
stored in a BookmarkManager.
"""
header = ['name', 'file_path']
def __init__(self, manager: BookmarkManager):
QtGui.QStandardItemModel.__init__(self)
self.manager = manager
self.header = self.header.copy()
def headerData(self, col, orientation, role):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
return self.header[col]
elif orientation == Qt.Vertical:
return col
return None
def rowCount(self):
return len(self.manager.names)
def columnCount(self):
return len(self.header)
def data(self, index, role):
if not index.isValid():
return None
elif role not in (Qt.DisplayRole, Qt.EditRole):
return None
mark = self.manager.names[index.row()]
row = self.manager.bookmark(mark)
name = self.fields[index.column()]
return str(getattr(row, name))
def flags(self):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsDragEnabled | Qt.ItemIsDropEnabled
|
[
"zeex.core.compat.QtGui.QStandardItemModel.__init__"
] |
[((1483, 1522), 'zeex.core.compat.QtGui.QStandardItemModel.__init__', 'QtGui.QStandardItemModel.__init__', (['self'], {}), '(self)\n', (1516, 1522), False, 'from zeex.core.compat import QtGui, QtCore\n')]
|
import os
import glob
import numpy as np
import pandas as pd
# geospatial libaries
from osgeo import gdal, osr
from xml.etree import ElementTree
def read_geo_info(fname):
""" This function takes as input the geotiff name and the path of the
folder that the images are stored, reads the geographic information of
the image
Parameters
----------
fname : string
path and file name of a geotiff image
Returns
-------
spatialRef : string
osr.SpatialReference in well known text
geoTransform : tuple, size=(8,1)
affine transformation coefficients, but also giving the image dimensions
targetprj : osgeo.osr.SpatialReference() object
coordinate reference system (CRS)
rows : integer
number of rows in the image, that is its height
cols : integer
number of collumns in the image, that is its width
bands : integer
number of bands in the image, that is its depth
See Also
--------
read_geo_image : basic function to import geographic imagery data
"""
assert len(glob.glob(fname)) != 0, ('file does not seem to be present')
img = gdal.Open(fname)
spatialRef = img.GetProjection()
geoTransform = img.GetGeoTransform()
targetprj = osr.SpatialReference(wkt=img.GetProjection())
rows = img.RasterYSize
cols = img.RasterXSize
bands = img.RasterCount
geoTransform += (rows, cols,)
return spatialRef, geoTransform, targetprj, rows, cols, bands
def read_geo_image(fname, boi=np.array([])):
""" This function takes as input the geotiff name and the path of the
folder that the images are stored, reads the image and returns the data as
an array
Parameters
----------
fname : string
geotiff file name and path.
boi : numpy.array, size=(k,1)
bands of interest, if a multispectral image is read, a selection can
be specified
Returns
-------
data : numpy.array, size=(m,n), ndim=2
data array of the band
spatialRef : string
osr.SpatialReference in well known text
geoTransform : tuple, size=(6,1)
affine transformation coefficients.
targetprj : osgeo.osr.SpatialReference() object
coordinate reference system (CRS)
See Also
--------
make_geo_im : basic function to write out geographic data
read_geo_info : basic function to get meta data of geographic imagery
Example
-------
>>> import os
>>> fpath = os.path.join(os.getcwd(), "data.jp2" )
>>> (I, spatialRef, geoTransform, targetPrj) = read_geo_image(fpath)
>>> I_ones = np.zeros(I.shape, dtype=bool)
>>> make_geo_im(I_ones, geoTransformM, spatialRefM, "ones.tif")
assert os.path.exists(fname), ('file must exist')
"""
assert len(glob.glob(fname)) != 0, ('file does not seem to be present')
img = gdal.Open(fname)
# imagery can consist of multiple bands
if len(boi) == 0:
for counter in range(img.RasterCount):
band = np.array(img.GetRasterBand(counter+1).ReadAsArray())
data = band if counter == 0 else np.dstack((data,
band[:,:,np.newaxis]))
else:
num_bands = img.RasterCount
assert (np.max(boi)+1)<=num_bands, 'bands of interest is out of range'
for band_id, counter in enumerate(boi):
band = np.array(img.GetRasterBand(band_id+1).ReadAsArray())
data = band if counter==0 else np.dstack((data,
band[:, :, np.newaxis]))
spatialRef = img.GetProjection()
geoTransform = img.GetGeoTransform()
targetprj = osr.SpatialReference(wkt=img.GetProjection())
return data, spatialRef, geoTransform, targetprj
# output functions
def make_geo_im(I, R, crs, fName, meta_descr='project Eratosthenes',
no_dat=np.nan, sun_angles='az:360-zn:90', date_created='-0276-00-00'):
""" Create georeferenced tiff file (a GeoTIFF)
Parameters
----------
I : numpy.array, size=(m,n)
band image
R : list, size=(1,6)
GDAL georeference transform of an image
crs : string
coordinate reference string
fname : string
filename for the image with extension
no_dat : datatype, integer
no data value
sun_angles : string
string giving meta data about the illumination angles
date_created : string
string given the acquistion date in YYYY-MM-DD
Example
-------
>>> import os
>>> fpath = os.path.join(os.getcwd(), "data.jp2")
>>> (I, spatialRef, geoTransform, targetPrj) = read_geo_image(fpath)
>>> I_ones = np.zeros(I.shape, dtype=bool)
>>> make_geo_im(I_ones, geoTransformM, spatialRefM, ‘ones.tif’)
"""
drv = gdal.GetDriverByName("GTiff") # export image
if I.ndim == 3:
bands=I.shape[2]
else:
bands = 1
# make it type dependent
if I.dtype == 'float64':
ds = drv.Create(fName,xsize=I.shape[1], ysize=I.shape[0],bands=bands,
eType=gdal.GDT_Float64)
elif I.dtype == 'float32':
ds = drv.Create(fName,xsize=I.shape[1], ysize=I.shape[0],bands=bands,
eType=gdal.GDT_Float32)
elif I.dtype == 'bool':
ds = drv.Create(fName, xsize=I.shape[1], ysize=I.shape[0], bands=bands,
eType=gdal.GDT_Byte)
else:
ds = drv.Create(fName, xsize=I.shape[1], ysize=I.shape[0], bands=bands,
eType=gdal.GDT_Int32)
# set metadata in datasource
ds.SetMetadata({'TIFFTAG_SOFTWARE':'dhdt v0.1',
'TIFFTAG_ARTIST':'bas altena and team Atlas',
'TIFFTAG_COPYRIGHT': 'contains modified Copernicus data',
'TIFFTAG_IMAGEDESCRIPTION': meta_descr,
'TIFFTAG_RESOLUTIONUNIT' : sun_angles,
'TIFFTAG_DATETIME': date_created})
# set georeferencing metadata
if len(R)!=6: R = R[:6]
ds.SetGeoTransform(R)
if not isinstance(crs, str):
crs = crs.ExportToWkt()
ds.SetProjection(crs)
if I.ndim == 3:
for count in np.arange(1,I.shape[2]+1,1):
band = ds.GetRasterBand(int(count))
band.WriteArray(I[:,:,count-1],0,0)
if count==1:
band.SetNoDataValue(no_dat)
band = None
else:
ds.GetRasterBand(1).WriteArray(I)
ds.GetRasterBand(1).SetNoDataValue(no_dat)
ds = None
del ds
def make_multispectral_vrt(df, fpath=None, fname='multispec.vrt'):
""" virtual raster tile (VRT) is a description of datasets written in an XML
format, it eases the display of multi-spectral data or other means.
Parameters
----------
df : pandas.DataFrame
organization of the different spectral bands
fpath : string
path of the directory of interest
fname : string
file name of the virtual raster tile
"""
assert isinstance(df, pd.DataFrame), ('please provide a dataframe')
assert 'filepath' in df, ('please first run "get_S2_image_locations"'+
' to find the proper file locations')
if fpath is None:
fpath = os.path.commonpath(df.filepath.tolist())
ffull = os.path.join(fpath, fname)
vrt_options = gdal.BuildVRTOptions(resampleAlg=gdal.GRA_NearestNeighbour,
addAlpha=False,
separate=True,
srcNodata=0)
my_vrt = gdal.BuildVRT(ffull, [f+'.jp2' for f in df['filepath']],
options=vrt_options)
my_vrt = None
# modify the vrt-file to include band names
tree = ElementTree.parse(ffull)
root = tree.getroot()
for idx, band in enumerate(root.iter("VRTRasterBand")):
description = ElementTree.SubElement(band, "Description")
description.text = df.common_name[idx]
tree.write(ffull) # update the file on disk
return
|
[
"numpy.dstack",
"xml.etree.ElementTree.parse",
"os.path.join",
"numpy.max",
"numpy.array",
"numpy.arange",
"osgeo.gdal.BuildVRTOptions",
"xml.etree.ElementTree.SubElement",
"glob.glob",
"osgeo.gdal.Open",
"osgeo.gdal.GetDriverByName",
"osgeo.gdal.BuildVRT"
] |
[((1163, 1179), 'osgeo.gdal.Open', 'gdal.Open', (['fname'], {}), '(fname)\n', (1172, 1179), False, 'from osgeo import gdal, osr\n'), ((1538, 1550), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1546, 1550), True, 'import numpy as np\n'), ((2892, 2908), 'osgeo.gdal.Open', 'gdal.Open', (['fname'], {}), '(fname)\n', (2901, 2908), False, 'from osgeo import gdal, osr\n'), ((4848, 4877), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (4868, 4877), False, 'from osgeo import gdal, osr\n'), ((7354, 7380), 'os.path.join', 'os.path.join', (['fpath', 'fname'], {}), '(fpath, fname)\n', (7366, 7380), False, 'import os\n'), ((7399, 7506), 'osgeo.gdal.BuildVRTOptions', 'gdal.BuildVRTOptions', ([], {'resampleAlg': 'gdal.GRA_NearestNeighbour', 'addAlpha': '(False)', 'separate': '(True)', 'srcNodata': '(0)'}), '(resampleAlg=gdal.GRA_NearestNeighbour, addAlpha=False,\n separate=True, srcNodata=0)\n', (7419, 7506), False, 'from osgeo import gdal, osr\n'), ((7633, 7719), 'osgeo.gdal.BuildVRT', 'gdal.BuildVRT', (['ffull', "[(f + '.jp2') for f in df['filepath']]"], {'options': 'vrt_options'}), "(ffull, [(f + '.jp2') for f in df['filepath']], options=\n vrt_options)\n", (7646, 7719), False, 'from osgeo import gdal, osr\n'), ((7816, 7840), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['ffull'], {}), '(ffull)\n', (7833, 7840), False, 'from xml.etree import ElementTree\n'), ((6233, 6264), 'numpy.arange', 'np.arange', (['(1)', '(I.shape[2] + 1)', '(1)'], {}), '(1, I.shape[2] + 1, 1)\n', (6242, 6264), True, 'import numpy as np\n'), ((7949, 7992), 'xml.etree.ElementTree.SubElement', 'ElementTree.SubElement', (['band', '"""Description"""'], {}), "(band, 'Description')\n", (7971, 7992), False, 'from xml.etree import ElementTree\n'), ((1091, 1107), 'glob.glob', 'glob.glob', (['fname'], {}), '(fname)\n', (1100, 1107), False, 'import glob\n'), ((2820, 2836), 'glob.glob', 'glob.glob', (['fname'], {}), '(fname)\n', (2829, 2836), False, 'import glob\n'), ((3139, 3180), 'numpy.dstack', 'np.dstack', (['(data, band[:, :, np.newaxis])'], {}), '((data, band[:, :, np.newaxis]))\n', (3148, 3180), True, 'import numpy as np\n'), ((3297, 3308), 'numpy.max', 'np.max', (['boi'], {}), '(boi)\n', (3303, 3308), True, 'import numpy as np\n'), ((3523, 3564), 'numpy.dstack', 'np.dstack', (['(data, band[:, :, np.newaxis])'], {}), '((data, band[:, :, np.newaxis]))\n', (3532, 3564), True, 'import numpy as np\n')]
|
#
# import modules
#
from ahvl.helper import AhvlMsg, AhvlHelper
import subprocess
#
# helper/message
#
msg = AhvlMsg()
hlp = AhvlHelper()
#
# process
#
class Process(object):
def __init__(self, proc=None, cmd=[], failonstderr=True, shell=False):
# set process name and command
self.setprocess(proc) # set process name
self.setcmd(cmd) # set command
self.failonstderr = failonstderr # fail on stderr
self.stderr = None
self.stdout = None
self.stderrlines = []
self.stdoutlines = []
self.shell = shell
# function to remove sensitive information from commands
# by deleting the arguments from a copy of the list
def __get_safe_args(self):
# set list for unknown processes
sensitive = []
# check for which process the arguments need to be cleaned
if self.proc == "ssh-keygen":
sensitive = []#["-f", "-N", "-P"]
if self.proc == "openssl":
sensitive = ["pass:", "-passin", "-passout"]
if self.proc == "puttygen":
sensitive = ["--password-file"]
if self.proc == "gpg":
sensitive = ["--passphrase-file"]
# create a copy of the list to prevent iteration issues when removing items
safeargs = list(self.cmd)
for a in self.cmd:
if a.strip('"').strip("'").startswith(tuple(sensitive)):
safeargs.remove(a)
# return safe to print argument list
return safeargs
# useless lines removed
def __clean_stderr(self):
# remove empty lines
self.stderrlines = list(filter(None, self.stderrlines))
# function to fail on stderr messages
def __fail_on_stderr(self):
# clean output
self.__clean_stderr()
# check if stderr contains any lines
if len(self.stderrlines) > 0 and self.failonstderr:
msg.fail("the process generated an error:\n{}".format("\n".join(self.stderrlines)))
# set stderr and stdout
def __set_result(self):
# convert stdout and stderr to individual lines
self.stdoutlines = self.stdout.rstrip('\n').split("\n")
self.stderrlines = self.stderr.rstrip('\n').split("\n")
# set process to run; accepts known processes only
def setprocess(self, proc):
# sanity check
accepted = ["ssh-keygen", "openssl", "puttygen", "gpg"]
if not proc in accepted:
msg.fail("given process name [{}] is unknown".format(proc))
# set process and return
self.proc = proc
return self
# set command to run
def setcmd(self, cmd):
self.cmd = cmd
return self
# determine if process should fail if any stderr messages are generated
def setfailonstderr(self, fail):
self.failonstderr = fail
# return stdout messages
def getstdout(self):
return self.stdoutlines
# return stderr messages
def getstderr(self):
return self.stderrlines
# run the process
def run(self):
# output debug info
if self.shell == True:
msg.vvvv("about to run the following subprocess (shell): [{}]".format(self.proc))
msg.vvvv("[{}]".format(self.cmd))
else:
# remove sensitive arguments before printing debug info
printable = self.__get_safe_args()
msg.vvvv("about to run the following subprocess (sensitive information has been removed): [{}]".format(self.proc))
msg.vvvv("[{}]".format(subprocess.list2cmdline(printable)))
# spawn subprocess
sp = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=self.shell)
(self.stdout, self.stderr) = sp.communicate();
rc = sp.returncode # get the exit code
# check exit/return code
if rc != 0:
msg.fail("an error occurred for [{}]; the process exited with code [{}]\n".format(self.proc, rc) +
"the process provided the following output: [{}]".format(self.stderr))
# set result and fail on error
self.__set_result()
self.__fail_on_stderr()
# return the result
return self
|
[
"subprocess.Popen",
"ahvl.helper.AhvlMsg",
"subprocess.list2cmdline",
"ahvl.helper.AhvlHelper"
] |
[((111, 120), 'ahvl.helper.AhvlMsg', 'AhvlMsg', ([], {}), '()\n', (118, 120), False, 'from ahvl.helper import AhvlMsg, AhvlHelper\n'), ((127, 139), 'ahvl.helper.AhvlHelper', 'AhvlHelper', ([], {}), '()\n', (137, 139), False, 'from ahvl.helper import AhvlMsg, AhvlHelper\n'), ((3736, 3832), 'subprocess.Popen', 'subprocess.Popen', (['self.cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': 'self.shell'}), '(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=self.shell)\n', (3752, 3832), False, 'import subprocess\n'), ((3646, 3680), 'subprocess.list2cmdline', 'subprocess.list2cmdline', (['printable'], {}), '(printable)\n', (3669, 3680), False, 'import subprocess\n')]
|
import json
import sys
from flask import (Blueprint, Markup, flash, g, jsonify, redirect,
render_template, request, session, url_for)
from flask_login import current_user, login_user, logout_user
from app.main.admin import (get_admin_control_by_id, get_admin_control_by_name,
get_admin_controls)
from app.main.database.tables import User
from app.main.forms import LoginForm, RegisterForm
from app.main.helpers import svg_contents
from app.main.roles import change_user_permission, set_user_permission
from app.main.users import (delete_user, get_all_users_with_permissions,
get_user, is_admin, register_user)
bp = Blueprint('main', __name__)
@bp.route('/', methods=['GET', 'POST'])
def index():
'''Login form to enter a room.'''
if current_user.is_authenticated:
return redirect(url_for('.chat'))
admin = is_admin(g.session, current_user)
form = LoginForm()
if form.validate_on_submit():
username = form.username.data
password = form.password.data
try:
user = get_user(g.session, username, password)
login_user(user)
session['username'] = username
session['name'] = f'{user.first_name} {user.last_name}'
session['room'] = form.room.data
return redirect(url_for('.chat'))
except Exception as err:
return render_template('index.html',
msg=str(err),
form=form,
admin=admin,
svg=Markup(svg_contents('./app/static/socks.svg')))
elif request.method == 'GET':
form.username.data = session.get('name', '')
form.room.data = session.get('room', '')
return render_template('index.html', form=form, admin=admin, svg=Markup(svg_contents('./app/static/socks.svg')))
@bp.route('/about')
def about():
admin = is_admin(g.session, current_user)
return render_template('about.html',
svg=Markup(svg_contents('./app/static/socks.svg')),
admin=admin,
github=Markup(svg_contents('./app/static/github.svg')))
@bp.route('/admin', methods=['GET', 'PATCH', 'DELETE'])
def admin():
admin = is_admin(g.session, current_user)
if admin:
if request.method == 'GET':
users = get_all_users_with_permissions(g.session)
controls = get_admin_controls(g.session)
return render_template('admin.html',
svg=Markup(svg_contents(
'./app/static/socks.svg')),
trash=Markup(svg_contents(
'./app/static/trash.svg')),
admin=admin,
users=users,
controls=controls)
elif request.method == 'PATCH':
if request.json.get('control'):
try:
control_id = request.json.get('control')
get_admin_control_by_id(g.session, control_id).switch()
g.session.commit()
return jsonify({'msg': f'Control ID: {control_id} successfull changed'}), 200
except:
return jsonify({'msg': 'Something went wrong changing the control'}), 500
elif request.json.get('user'):
try:
user_id = request.json.get('user')
change_user_permission(g.session, user_id)
g.session.commit()
return jsonify({'msg': f'User permissions changed for ID {user_id}'}), 200
except Exception as err:
return jsonify({'msg': str(err)}), 500
except:
return jsonify({'msg': 'Something went wrong changing the user permission'}), 500
else:
return jsonify({'msg': 'A known value was not supplied'}), 400
else: # request.method == 'DELETE':
if request.json.get('user'):
user_id = request.json.get('user')
delete_user(g.session, user_id)
g.session.commit()
return jsonify({'msg': f'User with ID {user_id} successfully deleted'}), 200
else:
return jsonify({'msg': 'A known value was not supplied'}), 400
else:
return 'Access denied', 401
@bp.route('/chat')
def chat():
'''Chat room. The user's name and room must be stored in the session.'''
admin = is_admin(g.session, current_user)
username = session.get('username', '')
name = session.get('name', '')
room = session.get('room', '')
if name == '' or room == '':
flash('You must be logged in to access the chatroom')
return redirect(url_for('.index'))
return render_template('chat.html',
name=name,
room=room,
admin=admin,
svg=Markup(svg_contents('./app/static/socks.svg')),
send_logo=Markup(svg_contents('./app/static/send.svg')))
@bp.route('/logout', methods=['GET'])
def logout():
admin = is_admin(g.session, current_user)
logout_user()
session.clear()
flash('You have been successfully logged out')
return redirect(url_for('.index'))
@bp.route('/register', methods=['GET', 'POST'])
def register():
admin = is_admin(g.session, current_user)
form = RegisterForm()
# Check if 'new_users' is turned on or off
if not get_admin_control_by_name(g.session, 'new_users').value:
return render_template('register.html',
form=form,
admin=admin,
msg='New user registration has been disabled at this time',
svg=Markup(svg_contents('./app/static/socks.svg')))
if request.method == 'GET':
return render_template('register.html', form=form, svg=Markup(svg_contents('./app/static/socks.svg')))
elif request.method == 'POST':
if form.validate_on_submit():
username = request.form.get('username')
password = request.form.get('password')
password_conf = request.form.get('password_conf')
first_name = request.form.get('first_name')
last_name = request.form.get('last_name')
if password != password_conf:
return render_template('register.html',
form=form,
admin=admin,
msg='Passwords did not match',
svg=Markup(svg_contents('./app/static/socks.svg')))
try:
new_user = register_user(
g.session, username, password, first_name, last_name)
try:
# add the new user to the database
g.session.add(new_user)
g.session.commit()
except:
g.session.rollback()
raise Exception('Error adding new user')
# Set user's role as 'user'
user_permission = set_user_permission(
g.session, 'user', new_user.id)
try:
# add the new user's related permission to the database
g.session.add(user_permission)
g.session.commit()
except:
g.session.rollback()
raise Exception('Error setting user permissions')
except Exception as err:
return render_template('register.html',
form=form,
admin=admin,
msg=str(err),
svg=Markup(svg_contents('./app/static/socks.svg')))
except:
return render_template('register.html',
form=form,
admin=admin,
msg=f'Unexpected error: {sys.exc_info()[0]}',
svg=Markup(svg_contents('./app/static/socks.svg')))
else:
return render_template('register.html',
form=form,
admin=admin,
msg='Not all required fields provided',
svg=Markup(svg_contents('./app/static/socks.svg')))
flash('Registration successful')
return redirect(url_for('.index'))
|
[
"flask.flash",
"app.main.users.register_user",
"app.main.users.get_user",
"app.main.users.get_all_users_with_permissions",
"flask.request.form.get",
"app.main.admin.get_admin_control_by_name",
"app.main.users.delete_user",
"app.main.users.is_admin",
"flask.url_for",
"flask.jsonify",
"sys.exc_info",
"app.main.helpers.svg_contents",
"flask.g.session.commit",
"flask.Blueprint",
"app.main.forms.LoginForm",
"flask_login.login_user",
"flask.g.session.add",
"flask_login.logout_user",
"app.main.admin.get_admin_controls",
"app.main.forms.RegisterForm",
"flask.g.session.rollback",
"flask.request.json.get",
"app.main.admin.get_admin_control_by_id",
"flask.session.get",
"app.main.roles.change_user_permission",
"app.main.roles.set_user_permission",
"flask.session.clear"
] |
[((693, 720), 'flask.Blueprint', 'Blueprint', (['"""main"""', '__name__'], {}), "('main', __name__)\n", (702, 720), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((906, 939), 'app.main.users.is_admin', 'is_admin', (['g.session', 'current_user'], {}), '(g.session, current_user)\n', (914, 939), False, 'from app.main.users import delete_user, get_all_users_with_permissions, get_user, is_admin, register_user\n'), ((951, 962), 'app.main.forms.LoginForm', 'LoginForm', ([], {}), '()\n', (960, 962), False, 'from app.main.forms import LoginForm, RegisterForm\n'), ((1988, 2021), 'app.main.users.is_admin', 'is_admin', (['g.session', 'current_user'], {}), '(g.session, current_user)\n', (1996, 2021), False, 'from app.main.users import delete_user, get_all_users_with_permissions, get_user, is_admin, register_user\n'), ((2348, 2381), 'app.main.users.is_admin', 'is_admin', (['g.session', 'current_user'], {}), '(g.session, current_user)\n', (2356, 2381), False, 'from app.main.users import delete_user, get_all_users_with_permissions, get_user, is_admin, register_user\n'), ((4717, 4750), 'app.main.users.is_admin', 'is_admin', (['g.session', 'current_user'], {}), '(g.session, current_user)\n', (4725, 4750), False, 'from app.main.users import delete_user, get_all_users_with_permissions, get_user, is_admin, register_user\n'), ((4766, 4793), 'flask.session.get', 'session.get', (['"""username"""', '""""""'], {}), "('username', '')\n", (4777, 4793), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((4805, 4828), 'flask.session.get', 'session.get', (['"""name"""', '""""""'], {}), "('name', '')\n", (4816, 4828), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((4840, 4863), 'flask.session.get', 'session.get', (['"""room"""', '""""""'], {}), "('room', '')\n", (4851, 4863), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((5387, 5420), 'app.main.users.is_admin', 'is_admin', (['g.session', 'current_user'], {}), '(g.session, current_user)\n', (5395, 5420), False, 'from app.main.users import delete_user, get_all_users_with_permissions, get_user, is_admin, register_user\n'), ((5425, 5438), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (5436, 5438), False, 'from flask_login import current_user, login_user, logout_user\n'), ((5443, 5458), 'flask.session.clear', 'session.clear', ([], {}), '()\n', (5456, 5458), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((5463, 5509), 'flask.flash', 'flash', (['"""You have been successfully logged out"""'], {}), "('You have been successfully logged out')\n", (5468, 5509), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((5627, 5660), 'app.main.users.is_admin', 'is_admin', (['g.session', 'current_user'], {}), '(g.session, current_user)\n', (5635, 5660), False, 'from app.main.users import delete_user, get_all_users_with_permissions, get_user, is_admin, register_user\n'), ((5672, 5686), 'app.main.forms.RegisterForm', 'RegisterForm', ([], {}), '()\n', (5684, 5686), False, 'from app.main.forms import LoginForm, RegisterForm\n'), ((8873, 8905), 'flask.flash', 'flash', (['"""Registration successful"""'], {}), "('Registration successful')\n", (8878, 8905), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((4905, 4958), 'flask.flash', 'flash', (['"""You must be logged in to access the chatroom"""'], {}), "('You must be logged in to access the chatroom')\n", (4910, 4958), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((5530, 5547), 'flask.url_for', 'url_for', (['""".index"""'], {}), "('.index')\n", (5537, 5547), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((8926, 8943), 'flask.url_for', 'url_for', (['""".index"""'], {}), "('.index')\n", (8933, 8943), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((876, 892), 'flask.url_for', 'url_for', (['""".chat"""'], {}), "('.chat')\n", (883, 892), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((1105, 1144), 'app.main.users.get_user', 'get_user', (['g.session', 'username', 'password'], {}), '(g.session, username, password)\n', (1113, 1144), False, 'from app.main.users import delete_user, get_all_users_with_permissions, get_user, is_admin, register_user\n'), ((1157, 1173), 'flask_login.login_user', 'login_user', (['user'], {}), '(user)\n', (1167, 1173), False, 'from flask_login import current_user, login_user, logout_user\n'), ((1751, 1774), 'flask.session.get', 'session.get', (['"""name"""', '""""""'], {}), "('name', '')\n", (1762, 1774), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((1800, 1823), 'flask.session.get', 'session.get', (['"""room"""', '""""""'], {}), "('room', '')\n", (1811, 1823), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((2452, 2493), 'app.main.users.get_all_users_with_permissions', 'get_all_users_with_permissions', (['g.session'], {}), '(g.session)\n', (2482, 2493), False, 'from app.main.users import delete_user, get_all_users_with_permissions, get_user, is_admin, register_user\n'), ((2517, 2546), 'app.main.admin.get_admin_controls', 'get_admin_controls', (['g.session'], {}), '(g.session)\n', (2535, 2546), False, 'from app.main.admin import get_admin_control_by_id, get_admin_control_by_name, get_admin_controls\n'), ((4983, 5000), 'flask.url_for', 'url_for', (['""".index"""'], {}), "('.index')\n", (4990, 5000), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((5745, 5794), 'app.main.admin.get_admin_control_by_name', 'get_admin_control_by_name', (['g.session', '"""new_users"""'], {}), "(g.session, 'new_users')\n", (5770, 5794), False, 'from app.main.admin import get_admin_control_by_id, get_admin_control_by_name, get_admin_controls\n'), ((1358, 1374), 'flask.url_for', 'url_for', (['""".chat"""'], {}), "('.chat')\n", (1365, 1374), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((1900, 1938), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/socks.svg"""'], {}), "('./app/static/socks.svg')\n", (1912, 1938), False, 'from app.main.helpers import svg_contents\n'), ((2101, 2139), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/socks.svg"""'], {}), "('./app/static/socks.svg')\n", (2113, 2139), False, 'from app.main.helpers import svg_contents\n'), ((2223, 2262), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/github.svg"""'], {}), "('./app/static/github.svg')\n", (2235, 2262), False, 'from app.main.helpers import svg_contents\n'), ((3057, 3084), 'flask.request.json.get', 'request.json.get', (['"""control"""'], {}), "('control')\n", (3073, 3084), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((4199, 4223), 'flask.request.json.get', 'request.json.get', (['"""user"""'], {}), "('user')\n", (4215, 4223), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((5196, 5234), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/socks.svg"""'], {}), "('./app/static/socks.svg')\n", (5208, 5234), False, 'from app.main.helpers import svg_contents\n'), ((5281, 5318), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/send.svg"""'], {}), "('./app/static/send.svg')\n", (5293, 5318), False, 'from app.main.helpers import svg_contents\n'), ((6349, 6377), 'flask.request.form.get', 'request.form.get', (['"""username"""'], {}), "('username')\n", (6365, 6377), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((6401, 6429), 'flask.request.form.get', 'request.form.get', (['"""password"""'], {}), "('password')\n", (6417, 6429), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((6458, 6491), 'flask.request.form.get', 'request.form.get', (['"""password_conf"""'], {}), "('password_conf')\n", (6474, 6491), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((6517, 6547), 'flask.request.form.get', 'request.form.get', (['"""first_name"""'], {}), "('first_name')\n", (6533, 6547), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((6572, 6601), 'flask.request.form.get', 'request.form.get', (['"""last_name"""'], {}), "('last_name')\n", (6588, 6601), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((3516, 3540), 'flask.request.json.get', 'request.json.get', (['"""user"""'], {}), "('user')\n", (3532, 3540), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((4251, 4275), 'flask.request.json.get', 'request.json.get', (['"""user"""'], {}), "('user')\n", (4267, 4275), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((4292, 4323), 'app.main.users.delete_user', 'delete_user', (['g.session', 'user_id'], {}), '(g.session, user_id)\n', (4303, 4323), False, 'from app.main.users import delete_user, get_all_users_with_permissions, get_user, is_admin, register_user\n'), ((4340, 4358), 'flask.g.session.commit', 'g.session.commit', ([], {}), '()\n', (4356, 4358), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((6069, 6107), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/socks.svg"""'], {}), "('./app/static/socks.svg')\n", (6081, 6107), False, 'from app.main.helpers import svg_contents\n'), ((6212, 6250), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/socks.svg"""'], {}), "('./app/static/socks.svg')\n", (6224, 6250), False, 'from app.main.helpers import svg_contents\n'), ((7007, 7074), 'app.main.users.register_user', 'register_user', (['g.session', 'username', 'password', 'first_name', 'last_name'], {}), '(g.session, username, password, first_name, last_name)\n', (7020, 7074), False, 'from app.main.users import delete_user, get_all_users_with_permissions, get_user, is_admin, register_user\n'), ((7459, 7510), 'app.main.roles.set_user_permission', 'set_user_permission', (['g.session', '"""user"""', 'new_user.id'], {}), "(g.session, 'user', new_user.id)\n", (7478, 7510), False, 'from app.main.roles import change_user_permission, set_user_permission\n'), ((2642, 2680), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/socks.svg"""'], {}), "('./app/static/socks.svg')\n", (2654, 2680), False, 'from app.main.helpers import svg_contents\n'), ((2771, 2809), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/trash.svg"""'], {}), "('./app/static/trash.svg')\n", (2783, 2809), False, 'from app.main.helpers import svg_contents\n'), ((3140, 3167), 'flask.request.json.get', 'request.json.get', (['"""control"""'], {}), "('control')\n", (3156, 3167), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((3264, 3282), 'flask.g.session.commit', 'g.session.commit', ([], {}), '()\n', (3280, 3282), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((4382, 4446), 'flask.jsonify', 'jsonify', (["{'msg': f'User with ID {user_id} successfully deleted'}"], {}), "({'msg': f'User with ID {user_id} successfully deleted'})\n", (4389, 4446), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((4493, 4543), 'flask.jsonify', 'jsonify', (["{'msg': 'A known value was not supplied'}"], {}), "({'msg': 'A known value was not supplied'})\n", (4500, 4543), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((7192, 7215), 'flask.g.session.add', 'g.session.add', (['new_user'], {}), '(new_user)\n', (7205, 7215), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((7236, 7254), 'flask.g.session.commit', 'g.session.commit', ([], {}), '()\n', (7252, 7254), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((7649, 7679), 'flask.g.session.add', 'g.session.add', (['user_permission'], {}), '(user_permission)\n', (7662, 7679), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((7700, 7718), 'flask.g.session.commit', 'g.session.commit', ([], {}), '()\n', (7716, 7718), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((1647, 1685), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/socks.svg"""'], {}), "('./app/static/socks.svg')\n", (1659, 1685), False, 'from app.main.helpers import svg_contents\n'), ((3310, 3375), 'flask.jsonify', 'jsonify', (["{'msg': f'Control ID: {control_id} successfull changed'}"], {}), "({'msg': f'Control ID: {control_id} successfull changed'})\n", (3317, 3375), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((3593, 3617), 'flask.request.json.get', 'request.json.get', (['"""user"""'], {}), "('user')\n", (3609, 3617), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((3638, 3680), 'app.main.roles.change_user_permission', 'change_user_permission', (['g.session', 'user_id'], {}), '(g.session, user_id)\n', (3660, 3680), False, 'from app.main.roles import change_user_permission, set_user_permission\n'), ((3701, 3719), 'flask.g.session.commit', 'g.session.commit', ([], {}), '()\n', (3717, 3719), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((4082, 4132), 'flask.jsonify', 'jsonify', (["{'msg': 'A known value was not supplied'}"], {}), "({'msg': 'A known value was not supplied'})\n", (4089, 4132), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((7299, 7319), 'flask.g.session.rollback', 'g.session.rollback', ([], {}), '()\n', (7317, 7319), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((7763, 7783), 'flask.g.session.rollback', 'g.session.rollback', ([], {}), '()\n', (7781, 7783), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((8828, 8866), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/socks.svg"""'], {}), "('./app/static/socks.svg')\n", (8840, 8866), False, 'from app.main.helpers import svg_contents\n'), ((3188, 3234), 'app.main.admin.get_admin_control_by_id', 'get_admin_control_by_id', (['g.session', 'control_id'], {}), '(g.session, control_id)\n', (3211, 3234), False, 'from app.main.admin import get_admin_control_by_id, get_admin_control_by_name, get_admin_controls\n'), ((3432, 3493), 'flask.jsonify', 'jsonify', (["{'msg': 'Something went wrong changing the control'}"], {}), "({'msg': 'Something went wrong changing the control'})\n", (3439, 3493), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((3747, 3809), 'flask.jsonify', 'jsonify', (["{'msg': f'User permissions changed for ID {user_id}'}"], {}), "({'msg': f'User permissions changed for ID {user_id}'})\n", (3754, 3809), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((6922, 6960), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/socks.svg"""'], {}), "('./app/static/socks.svg')\n", (6934, 6960), False, 'from app.main.helpers import svg_contents\n'), ((3966, 4035), 'flask.jsonify', 'jsonify', (["{'msg': 'Something went wrong changing the user permission'}"], {}), "({'msg': 'Something went wrong changing the user permission'})\n", (3973, 4035), False, 'from flask import Blueprint, Markup, flash, g, jsonify, redirect, render_template, request, session, url_for\n'), ((8152, 8190), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/socks.svg"""'], {}), "('./app/static/socks.svg')\n", (8164, 8190), False, 'from app.main.helpers import svg_contents\n'), ((8506, 8544), 'app.main.helpers.svg_contents', 'svg_contents', (['"""./app/static/socks.svg"""'], {}), "('./app/static/socks.svg')\n", (8518, 8544), False, 'from app.main.helpers import svg_contents\n'), ((8435, 8449), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8447, 8449), False, 'import sys\n')]
|
import os
from nlp_tools.preprocessing import Preprocessing
from nlp_tools.loaders import MdLoader
from nlp_tools.representations import MergedMatrixRepresentation
from nlp_tools.classifiers import ClassificationProcessor, NaiveBayseTfIdfClassifier
from nlp_tools.utils import get_random_message
from quelfilm.settings import *
def build_classifier():
loader = MdLoader(TRAINING_PATH)
processor = Preprocessing(loader)
repres = MergedMatrixRepresentation(processor.data)
classifier = ClassificationProcessor(NaiveBayseTfIdfClassifier(), repres.data)
classifier.train()
def predict(text: str):
message = repres.process_new_data(processor.process_sentence(text))
intent, score = classifier.predict(message)
response = get_random_message(processor.responses[intent])
return intent, score, response
return predict
class Classifier:
def __init__(self):
self.predict = build_classifier()
|
[
"nlp_tools.representations.MergedMatrixRepresentation",
"nlp_tools.preprocessing.Preprocessing",
"nlp_tools.classifiers.NaiveBayseTfIdfClassifier",
"nlp_tools.utils.get_random_message",
"nlp_tools.loaders.MdLoader"
] |
[((368, 391), 'nlp_tools.loaders.MdLoader', 'MdLoader', (['TRAINING_PATH'], {}), '(TRAINING_PATH)\n', (376, 391), False, 'from nlp_tools.loaders import MdLoader\n'), ((408, 429), 'nlp_tools.preprocessing.Preprocessing', 'Preprocessing', (['loader'], {}), '(loader)\n', (421, 429), False, 'from nlp_tools.preprocessing import Preprocessing\n'), ((443, 485), 'nlp_tools.representations.MergedMatrixRepresentation', 'MergedMatrixRepresentation', (['processor.data'], {}), '(processor.data)\n', (469, 485), False, 'from nlp_tools.representations import MergedMatrixRepresentation\n'), ((527, 554), 'nlp_tools.classifiers.NaiveBayseTfIdfClassifier', 'NaiveBayseTfIdfClassifier', ([], {}), '()\n', (552, 554), False, 'from nlp_tools.classifiers import ClassificationProcessor, NaiveBayseTfIdfClassifier\n'), ((768, 815), 'nlp_tools.utils.get_random_message', 'get_random_message', (['processor.responses[intent]'], {}), '(processor.responses[intent])\n', (786, 815), False, 'from nlp_tools.utils import get_random_message\n')]
|
from socket import *
import argparse
# Parameters
#TCP_IP = 'localhost'
#TCP_PORT = 12003
BUFFER_SIZE = 1024
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('server_host')
parser.add_argument('server_port')
parser.add_argument('filename')
args = parser.parse_args()
# Prepare a client socket
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((args.server_host, int(args.server_port)))
# Send message to GET HTML file
# Filename: HelloWorld.html
MESSAGE = f'GET {args.filename}'
MESSAGE = bytes(MESSAGE, 'utf-8')
clientSocket.send(MESSAGE)
# GET the full content from the HTML file
full_content = ''
while True:
data = clientSocket.recv(BUFFER_SIZE)
if not data:
break
data = data.decode('utf-8')
full_content += data
with open('files_from_server/HelloWorld.html', 'w') as f:
f.write(full_content)
print("received data:", full_content)
# Close Client
clientSocket.close()
print("\n\nClient close successfully!")
|
[
"argparse.ArgumentParser"
] |
[((133, 158), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (156, 158), False, 'import argparse\n')]
|
#!/usr/bin/python
import os
import datetime
import PyRSS2Gen
kInDir = "raw_post"
kTmplDir = "template"
kBlogDir = "site/blog"
kPostsDir = "site/blog/posts"
def main():
postlist = posts()
archive(postlist)
def posts():
postlist = []
# Create the output directory if it doesn't already exist
os.makedirs(kPostsDir, exist_ok=True)
postHeader = getTemplate("posthead.htm")
postFooter = getTemplate("postfoot.htm")
postTitle = getTemplate("postitle.htm")
for fInName in os.listdir(kInDir):
fInPath = os.path.join(kInDir, fInName)
fOutName = os.path.splitext(fInName)[0] + ".htm"
fOutPath = os.path.join(kPostsDir, fOutName)
fIn = open(fInPath, "r")
fOut = open(fOutPath, "w")
# emit post header
fOut.write(postHeader)
# parse & consume 1st input line -> title
title = fIn.readline()
# parse & consume 2nd input line -> date
date = fIn.readline()
# store (title, date, filename)
postlist.append((title, date, fOutName))
# emit post titlebox
fOut.write(postTitle % (len(title) + 4, title, date))
# write remaining lines
# wrapping with <pre></pre> unless input was a .htm file
if not fInName.endswith(".htm"):
fOut.write("<pre>\n")
while 1:
line = fIn.readline()
if not line:
break
fOut.write(line)
if not fInName.endswith(".htm"):
fOut.write("</pre>\n")
# emit post footer
fOut.write(postFooter)
fIn.close()
# close post htm file
fOut.close()
return postlist
def archive(postlist):
archiveHeader = getTemplate("archhead.htm")
archiveFooter = getTemplate("archfoot.htm")
archiveDiv = getTemplate("archdiv.htm")
redirectHtml = getTemplate("redirect.htm")
# sort the (title, date, filename) data structure by date
# (ASCIIbetical descending)
postlist.sort(key=lambda t: t[1], reverse=True)
# create redirect htm file
fRdOutPath = os.path.join(kBlogDir, "index.htm")
with open(fRdOutPath, "w") as f:
# emit filename of newest post
f.write(redirectHtml % postlist[0][2])
# create archive htm file
fOutPath = os.path.join(kPostsDir, "index.htm")
fOut = open(fOutPath, "w")
# create archive rss feed
rss = PyRSS2Gen.RSS2(
title = "VGA Blog",
link = "http://example.com/blog",
description = "",
lastBuildDate = datetime.datetime.now())
# emit archive header
fOut.write(archiveHeader)
# for each datum
for tup in postlist:
(title, date, filename) = tup
date = date.strip()
# emit div
s = archiveDiv % (date.strip(), filename, title.strip())
fOut.write(s)
# emit rss entry
rss.items.append(
PyRSS2Gen.RSSItem(
title = title,
link =
"https://example.com/blog/posts/%s" % filename,
description = "",
pubDate = datetime.datetime.strptime(date, "%Y-%m-%d")))
# emit archive footer
fOut.write(archiveFooter)
# close archive htm file
fOut.close()
# write rss feed
with open(os.path.join(kBlogDir, "rss.xml"), "w") as rssFile:
rss.write_xml(rssFile)
def getTemplate(name):
path = os.path.join(kTmplDir, name)
with open(path, "r") as f: contents = "".join(f.readlines())
return contents
if __name__ == "__main__":
main()
|
[
"os.makedirs",
"datetime.datetime.now",
"datetime.datetime.strptime",
"os.path.splitext",
"os.path.join",
"os.listdir"
] |
[((316, 353), 'os.makedirs', 'os.makedirs', (['kPostsDir'], {'exist_ok': '(True)'}), '(kPostsDir, exist_ok=True)\n', (327, 353), False, 'import os\n'), ((509, 527), 'os.listdir', 'os.listdir', (['kInDir'], {}), '(kInDir)\n', (519, 527), False, 'import os\n'), ((2095, 2130), 'os.path.join', 'os.path.join', (['kBlogDir', '"""index.htm"""'], {}), "(kBlogDir, 'index.htm')\n", (2107, 2130), False, 'import os\n'), ((2300, 2336), 'os.path.join', 'os.path.join', (['kPostsDir', '"""index.htm"""'], {}), "(kPostsDir, 'index.htm')\n", (2312, 2336), False, 'import os\n'), ((3413, 3441), 'os.path.join', 'os.path.join', (['kTmplDir', 'name'], {}), '(kTmplDir, name)\n', (3425, 3441), False, 'import os\n'), ((547, 576), 'os.path.join', 'os.path.join', (['kInDir', 'fInName'], {}), '(kInDir, fInName)\n', (559, 576), False, 'import os\n'), ((653, 686), 'os.path.join', 'os.path.join', (['kPostsDir', 'fOutName'], {}), '(kPostsDir, fOutName)\n', (665, 686), False, 'import os\n'), ((2545, 2568), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2566, 2568), False, 'import datetime\n'), ((3295, 3328), 'os.path.join', 'os.path.join', (['kBlogDir', '"""rss.xml"""'], {}), "(kBlogDir, 'rss.xml')\n", (3307, 3328), False, 'import os\n'), ((596, 621), 'os.path.splitext', 'os.path.splitext', (['fInName'], {}), '(fInName)\n', (612, 621), False, 'import os\n'), ((3110, 3154), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (3136, 3154), False, 'import datetime\n')]
|
import logging
import math
import re
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, Iterable, Iterator, List, Optional, Sequence, Set, Tuple
import cv2
import numpy as np
import scipy.spatial
from lib.image_processing import (
Point,
Rectangle,
_contains_start_graphic,
find_boxes_with_rgb,
find_in_image,
)
from lib.instrument_tex import Detectable, FontSize
from lib.parse_formula_tex import TexSymbol, TexToken
logger = logging.getLogger("texsymdetect")
PageNumber = int
MathMl = str
@dataclass(frozen=True)
class Id:
"""
To uniquely identify a symbol in the symbol search functionality (i.e., not confuse
two symbols with each other), one needs both the MathML for the symbol, and the
size it was rendered at.
"""
mathml: str
level: FontSize
@dataclass
class TokenInstance:
id_: Id
location: Rectangle
@dataclass
class SymbolInstance:
id_: Id
location: Rectangle
@dataclass
class TokenTemplate:
symbol: Id
images: List[np.array]
@dataclass
class Component:
symbol_id: Id
center: Point
" Position of center of component, relative to center of anchor component. "
@dataclass
class SymbolTemplate:
anchor: Id
" Leftmost member of the composite template. "
members: List[Component]
" All members of the composite template except for the anchor. "
def create_symbol_template(
symbol_image: np.array,
token_images: Dict[MathMl, Dict[FontSize, List[np.array]]],
token_mathmls: Iterable[str],
require_blank_border_around_tokens: bool = True,
) -> Optional[SymbolTemplate]:
# Unpack token images into a 1-D list.
token_image_list: List[np.array] = []
mathmls: List[MathMl] = []
font_sizes: List[FontSize] = []
for mathml, sizes in token_images.items():
if mathml not in token_mathmls:
continue
for font_size, images in sizes.items():
for image in images:
token_image_list.append(image)
font_sizes.append(font_size)
mathmls.append(mathml)
# Search in image for tokens.
rects = find_in_image(
token_image_list,
symbol_image,
require_blank_border=require_blank_border_around_tokens,
)
# Unroll tokens into a 1-D list.
rects_unrolled: List[Rectangle] = []
mathmls_unrolled: List[MathMl] = []
font_sizes_unrolled: List[FontSize] = []
for mathml, font_size, rect_list in zip(mathmls, font_sizes, rects):
for rect in rect_list:
rects_unrolled.append(rect)
mathmls_unrolled.append(mathml)
font_sizes_unrolled.append(font_size)
# Find positions of child symbols in the composite symbol image.
components: List[Component] = []
# Add tokens to the template left-to-right.
for (mathml, font_size, rect) in sorted(
zip(mathmls_unrolled, font_sizes_unrolled, rects_unrolled),
key=lambda t: t[2].left,
):
if mathml in token_mathmls:
center = Point(rect.left + rect.width / 2.0, rect.top + rect.height / 2.0)
component = Component(Id(mathml, font_size), center)
if component not in components:
components.append(component)
# Composite symbol needs at least one component.
if not components:
return None
# Select 'anchor' for the template as the leftmost component.
components.sort(key=lambda c: c.center.x)
anchor = components.pop(0)
# Normalize the positions of components relative to the anchor.
for component in components:
component.center.x -= anchor.center.x
component.center.y -= anchor.center.y
# assert (
# False
# ), "May want to filter out overlapping tokens... for instance, by blanking out the part of the image that matches."
return SymbolTemplate(anchor.symbol_id, components)
def extract_templates(
page_images: Dict[PageNumber, np.array], detectables: Sequence[Detectable],
) -> Tuple[Dict[Detectable, List[np.array]], Dict[Detectable, SymbolTemplate]]:
"""
Given images of pages from a paper that has been modified to include appearances of many tokens
and symbols (i.e., 'detectables'), extract templates for those tokens and symbols
that can be used to identify them in other documents.
Returns a collection of token templates (images), and symbol templates
(a flexible template format).
Note that both tokens and symbols must be passed in as detectables;
symbols cannot be found without first finding their component tokens. All
detectables should be provided in the order that they appear in the TeX,
which should include all tokens first, followed by all symbols.
"""
sorted_page_images = [page_images[pn] for pn in sorted(page_images.keys())]
def dequeue_page() -> Optional[np.array]:
" Remove image of the next page from the list of all pages in the document. "
if not sorted_page_images:
return None
image = sorted_page_images.pop(0)
return image
page_image = dequeue_page()
next_page_image = dequeue_page()
# Scan all pages until the marker is found that suggests that the original LaTeX
# document has ended, and the detectables (i.e., colorized tokens and symbols)
# are about to appear.
while True:
if not _contains_start_graphic(page_image):
page_image = next_page_image
next_page_image = dequeue_page()
continue
# Once the marker has been found, skip forward one more page so that
# symbols and tokens will be detected on the page after the marker.
page_image = next_page_image
next_page_image = dequeue_page()
break
# Templates are extracted for detecting both tokens and symbols. Templates
# for tokens are images of single letters or marks. Templates for symbols
# are groups of tokens and the expected (but somewhat flexible) spatial
# relationships between them.
token_images: Dict[Detectable, List[np.array]] = defaultdict(list)
token_images_lookup: Dict[MathMl, Dict[FontSize, List[np.array]]] = defaultdict(
dict
)
symbol_templates: Dict[Detectable, SymbolTemplate] = {}
for d in detectables:
# Find a bounding box around the token / symbol.
red, green, blue = d.color
rects = find_boxes_with_rgb(page_image, red, green, blue)
if next_page_image is not None:
if not rects:
rects = find_boxes_with_rgb(next_page_image, red, green, blue)
if not rects:
logger.warning("Could not find detectable %s.", d)
continue
page_image = next_page_image
next_page_image = dequeue_page()
else:
rects.extend(find_boxes_with_rgb(next_page_image, red, green, blue))
if len(rects) > 1:
logger.warning(
"Unexpectedly more than one instance of detectable %s. "
+ "There may have been a problem in the coloring code.",
d,
)
if not rects:
logger.warning("Could not find detectable %s.", d)
box = rects[0]
logger.debug(f"Found symbol at {box}.")
# Extract a cropped, black-and-white image of the token or symbol.
cropped_bw = page_image[
box.top : box.top + box.height, box.left : box.left + box.width
]
cropped_bw[
np.where(
(cropped_bw[:, :, 0] != 255)
| (cropped_bw[:, :, 1] != 255)
| (cropped_bw[:, :, 2] != 255)
)
] = [0, 0, 0]
cropped_bw = cv2.cvtColor(cropped_bw, cv2.COLOR_BGR2GRAY)
# For simple symbols, extract images.
if isinstance(d.entity, TexToken):
# Only save a template if it has a different appearance from the other templates
# saved for a symbol. This is important as a bunch of templates for the symbol
# at the same size are created to try to make sure that templates are saved for
# every way that extra space might have been introduced between characters in the
# symbol when the PDF was rendered to an image.
already_saved = False
for img in token_images[d]:
if np.array_equal(img, cropped_bw):
already_saved = True
break
if not already_saved:
token_images[d].append(cropped_bw)
lookup_dict = token_images_lookup[d.entity.mathml]
if d.font_size not in lookup_dict:
lookup_dict[d.font_size] = []
lookup_dict[d.font_size].append(cropped_bw)
# Note that, if the caller of this function did their job in ordering the list of
# detectables, symbols will be processed only after all tokens have been processed.
if isinstance(d.entity, TexSymbol):
token_mathmls = [t.mathml for t in d.entity.tokens]
template = create_symbol_template(
cropped_bw, token_images_lookup, token_mathmls
)
if template:
symbol_templates[d] = template
return token_images, symbol_templates
class TokenIndex:
" Index of appearances of all tokens on a page. "
def __init__(self, tokens: Iterable[TokenInstance]) -> None:
self._tokens: List[TokenInstance] = list(tokens)
# Build a KD search tree over symbols to support faster spatial querying.
token_centers = [
(
t.location.left + t.location.width / 2.0,
t.location.top + t.location.height / 2.0,
)
for t in tokens
]
if not tokens:
token_centers = np.empty(shape=(0, 2))
self._tree = scipy.spatial.KDTree(token_centers)
def get_instances(self, id_: Id = None) -> List[TokenInstance]:
" Get all tokens with a specific key. "
if not id_:
return list(self._tokens)
return [t for t in self._tokens if t.id_ == id_]
def find(
self, id_: Id, center: Point, tolerance: Optional[Point] = None,
) -> List[TokenInstance]:
"""
Get all tokens near a specific point matching a specification for the token
(its key and level). Matching tokens are returned if:
* its center x falls within [center[0] - tolerance[0], center[0] + tolerance[0]]
* its center y falls within [center[1] - tolerance[1], center[1] + tolerance[1]]
"""
tolerance = tolerance or Point(1.0, 1.0)
# Initial query for candidate symbols is made using the KDTree 'query_ball_point' method,
# as it will in many cases filter symbols according to position in two-dimensional space
# than an iteratively searching over a list of all symbols.
radius = math.sqrt(tolerance.x * tolerance.x + tolerance.y * tolerance.y)
nearby_points = self._tree.query_ball_point(x=[center.x, center.y], r=radius)
matches = []
for token_i in nearby_points:
# Rule out symbols that are not the requested symbol.
token = self._tokens[token_i]
if token.id_ != id_:
continue
# Rule out symbols that are not within the tolerated distance of the query point.
token_center_x = token.location.left + token.location.width / 2.0
token_center_y = token.location.top + token.location.height / 2.0
if (
abs(token_center_x - center.x) > tolerance.x
or abs(token_center_y - center.y) > tolerance.y
):
continue
matches.append(token)
return matches
def detect_tokens(
page_images: Dict[PageNumber, np.array],
token_images: Dict[Detectable, List[np.array]],
require_blank_border: bool = True,
) -> Dict[PageNumber, TokenIndex]:
"""
Detect appearances of tokens in images of pages. If 'require_blank_border' is set,
filter the detected tokens to just those that are surrounded with whitespace. This
option is intended to help reduce the number of false positives. See the
implementation comments below for more details.
"""
tokens: Dict[PageNumber, TokenIndex] = {}
# Unpack token images into a 1-D list.
token_image_list = []
token_list = []
for (token, images) in token_images.items():
for image in images:
token_image_list.append(image)
token_list.append(token)
for page_no, page_image in sorted(page_images.items(), key=lambda t: t[0]):
logger.debug("Detecting tokens on page %d.", page_no)
page_image_gray = cv2.cvtColor(page_image, cv2.COLOR_BGR2GRAY)
rects = find_in_image(
token_image_list,
page_image_gray,
require_blank_border=require_blank_border,
)
token_instances: List[TokenInstance] = []
for (token, rect_list) in zip(token_list, rects):
for rect in rect_list:
token_instances.append(
TokenInstance(
id_=Id(token.entity.mathml, token.font_size), location=rect
)
)
tokens[page_no] = TokenIndex(token_instances)
return tokens
def detect_symbols(
token_instances: Dict[PageNumber, TokenIndex],
symbol_templates: Dict[Detectable, SymbolTemplate],
) -> Dict[PageNumber, List[SymbolInstance]]:
symbol_instances: Dict[PageNumber, List[SymbolInstance]] = defaultdict(list)
for page_no, token_index in token_instances.items():
logger.debug("Scanning page %d for symbols.", page_no)
for detectable, template in symbol_templates.items():
for rect in find_symbols(template, token_index):
instance = SymbolInstance(
Id(detectable.entity.mathml, detectable.font_size), rect
)
# Deduplicate symbols, in case two symbols are actually the same symbol (as
# may happen if two symbols had different TeX, but the same MathML).
if instance not in symbol_instances[page_no]:
symbol_instances[page_no].append(instance)
return symbol_instances
def find_symbols(template: SymbolTemplate, index: TokenIndex) -> Iterator[Rectangle]:
"""
Search for appearances of a symbol given an index of tokens.
"""
# Search for anchors---that is, leftmost glyphs in a symbol, relative
# to which all other tokens in a composite symbol will be searched.
anchor_candidates = index.get_instances(template.anchor)
# For each anchor found, attempt to fill out the rest of the composite symbol template.
for a in anchor_candidates:
template_incomplete = False
member_matches: List[TokenInstance] = []
anchor_center_x = a.location.left + a.location.width / 2.0
anchor_center_y = a.location.top + a.location.height / 2.0
# For each expected member of the composite symbol (i.e., all simple symbols the composite
# symbol should be made up of), search for appearances of the member at the expected
# location relative to the anchor.
for member in template.members:
expected_center = Point(
anchor_center_x + member.center.x, anchor_center_y + member.center.y
)
# Note that the tolerance for the position of a member symbol is higher the further away
# that member is from the anchor, as it is assumed that TeX might insert or remove space
# between members, which will accumulate the further away the member is from the anchor.
tolerance = Point(
math.ceil(abs(member.center.x) / 5.0) + 1,
math.ceil(abs(member.center.y) / 5.0) + 1,
)
member_candidates = index.find(
id_=member.symbol_id, center=expected_center, tolerance=tolerance,
)
# If multiple symbols could fill the member slot in the composite symbol, select the
# leftmost symbol that has not yet been used to fill a slot.
member_found = False
member_candidates.sort(key=lambda c: c.location.left)
for m in member_candidates:
if m not in member_matches:
member_matches.append(m)
member_found = True
break
# If any member slot of the template cannot be filled, a composite symbol cannot be
# created. Advance to the next potential anchor.
if not member_found:
template_incomplete = True
break
# Create an instance of the composite symbol if the template has been completed.
if not template_incomplete:
tokens = [a] + member_matches
left = min([t.location.left for t in tokens])
top = min([t.location.top for t in tokens])
right = max([t.location.left + t.location.width for t in tokens])
bottom = max([t.location.top + t.location.height for t in tokens])
yield Rectangle(left, top, right - left, bottom - top)
|
[
"lib.image_processing.Point",
"lib.image_processing._contains_start_graphic",
"numpy.array_equal",
"lib.image_processing.Rectangle",
"math.sqrt",
"cv2.cvtColor",
"numpy.empty",
"logging.getLogger",
"lib.image_processing.find_boxes_with_rgb",
"collections.defaultdict",
"numpy.where",
"lib.image_processing.find_in_image",
"dataclasses.dataclass"
] |
[((494, 527), 'logging.getLogger', 'logging.getLogger', (['"""texsymdetect"""'], {}), "('texsymdetect')\n", (511, 527), False, 'import logging\n'), ((562, 584), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (571, 584), False, 'from dataclasses import dataclass\n'), ((2173, 2280), 'lib.image_processing.find_in_image', 'find_in_image', (['token_image_list', 'symbol_image'], {'require_blank_border': 'require_blank_border_around_tokens'}), '(token_image_list, symbol_image, require_blank_border=\n require_blank_border_around_tokens)\n', (2186, 2280), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((6140, 6157), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6151, 6157), False, 'from collections import defaultdict\n'), ((6230, 6247), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (6241, 6247), False, 'from collections import defaultdict\n'), ((13804, 13821), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (13815, 13821), False, 'from collections import defaultdict\n'), ((6458, 6507), 'lib.image_processing.find_boxes_with_rgb', 'find_boxes_with_rgb', (['page_image', 'red', 'green', 'blue'], {}), '(page_image, red, green, blue)\n', (6477, 6507), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((7854, 7898), 'cv2.cvtColor', 'cv2.cvtColor', (['cropped_bw', 'cv2.COLOR_BGR2GRAY'], {}), '(cropped_bw, cv2.COLOR_BGR2GRAY)\n', (7866, 7898), False, 'import cv2\n'), ((11107, 11171), 'math.sqrt', 'math.sqrt', (['(tolerance.x * tolerance.x + tolerance.y * tolerance.y)'], {}), '(tolerance.x * tolerance.x + tolerance.y * tolerance.y)\n', (11116, 11171), False, 'import math\n'), ((12951, 12995), 'cv2.cvtColor', 'cv2.cvtColor', (['page_image', 'cv2.COLOR_BGR2GRAY'], {}), '(page_image, cv2.COLOR_BGR2GRAY)\n', (12963, 12995), False, 'import cv2\n'), ((13012, 13108), 'lib.image_processing.find_in_image', 'find_in_image', (['token_image_list', 'page_image_gray'], {'require_blank_border': 'require_blank_border'}), '(token_image_list, page_image_gray, require_blank_border=\n require_blank_border)\n', (13025, 13108), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((3075, 3140), 'lib.image_processing.Point', 'Point', (['(rect.left + rect.width / 2.0)', '(rect.top + rect.height / 2.0)'], {}), '(rect.left + rect.width / 2.0, rect.top + rect.height / 2.0)\n', (3080, 3140), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((5429, 5464), 'lib.image_processing._contains_start_graphic', '_contains_start_graphic', (['page_image'], {}), '(page_image)\n', (5452, 5464), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((7648, 7753), 'numpy.where', 'np.where', (['((cropped_bw[:, :, 0] != 255) | (cropped_bw[:, :, 1] != 255) | (cropped_bw[\n :, :, 2] != 255))'], {}), '((cropped_bw[:, :, 0] != 255) | (cropped_bw[:, :, 1] != 255) | (\n cropped_bw[:, :, 2] != 255))\n', (7656, 7753), True, 'import numpy as np\n'), ((9996, 10018), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 2)'}), '(shape=(0, 2))\n', (10004, 10018), True, 'import numpy as np\n'), ((10810, 10825), 'lib.image_processing.Point', 'Point', (['(1.0)', '(1.0)'], {}), '(1.0, 1.0)\n', (10815, 10825), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((15562, 15637), 'lib.image_processing.Point', 'Point', (['(anchor_center_x + member.center.x)', '(anchor_center_y + member.center.y)'], {}), '(anchor_center_x + member.center.x, anchor_center_y + member.center.y)\n', (15567, 15637), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((6599, 6653), 'lib.image_processing.find_boxes_with_rgb', 'find_boxes_with_rgb', (['next_page_image', 'red', 'green', 'blue'], {}), '(next_page_image, red, green, blue)\n', (6618, 6653), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((8513, 8544), 'numpy.array_equal', 'np.array_equal', (['img', 'cropped_bw'], {}), '(img, cropped_bw)\n', (8527, 8544), True, 'import numpy as np\n'), ((17452, 17500), 'lib.image_processing.Rectangle', 'Rectangle', (['left', 'top', '(right - left)', '(bottom - top)'], {}), '(left, top, right - left, bottom - top)\n', (17461, 17500), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((6925, 6979), 'lib.image_processing.find_boxes_with_rgb', 'find_boxes_with_rgb', (['next_page_image', 'red', 'green', 'blue'], {}), '(next_page_image, red, green, blue)\n', (6944, 6979), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n')]
|
import re
def check_userID(ID):
''' Rule : UserID consists of [A-Z|a-z|0-9] '''
match = re.search(r'\w+', ID)
#print match.group(0),
if match:
return True
else:
return False
def check_jobTitleName(Job_Title):
''' Rule: Employee Job title starts with [A-Z] then multiple occurrence of [a-z] and 1 space, [A-Z] and multiple
occurrences of [a-z] as optional. '''
match = re.search(r'(^[A-Z][a-z]+)( [A-Z][a-z]+)?$', Job_Title)
if match:
'''print "Job Title", match.group(0),
print "First part of Job title:", match.group(1),
print "Second part of Job title:", match.group(2), '''
return True
else:
return False
def check_firstName(First_Name):
''' Rule: Starts with [A-Z] the multiple occurrences of [a-z]. '''
match = re.search(r'^[A-z][a-z]+$', First_Name)
if match:
#print match.group(0),
return True
else:
return False
def check_lastName(Last_Name):
''' Rule: Starts with [A-Z] the multiple occurrences of [a-z]. '''
match = re.search(r'^[A-z][a-z]+$', Last_Name)
if match:
#print match.group(0),
return True
else:
return False
def check_preferredFullName(Full_Name):
''' Rule: Combination of first and last names. '''
match = re.search(r'(^[A-Z][a-z]+) ([A-Z][a-z]+)$', Full_Name)
if match:
'''print "Full Name:", match.group(0),
print "First Name:", match.group(1),
print "Last Name:", match.group(2), '''
return True
else:
return False
def check_employeeCode(Emp_Code):
''' Rule: Starts with 'E' and followed by multiple occurrences of [0-9]. '''
match = re.search(r'^E\d+', Emp_Code)
if match:
#print match.group(0),
return True
else:
return False
def check_region(Working_Place):
''' Rule: Short form of states in US. '''
match = re.search(r'[A-Z]{2}', Working_Place)
if match:
#print match.group(0),
return True
else:
return False
def check_phoneNumber(Contact_Number):
''' Rule: Total 10 digits. First 3 digits for province code then followed by - and 7 digits. '''
match = re.search(r'\d{3}-\d{7}', Contact_Number)
if match:
#print match.group(0),
return True
else:
return False
def check_emailAddress(Email_Address):
''' Rule: <host name>@<provider name>.<DNS type> '''
match = re.search(r'(^\w+\.?\w+)@(\w+\.\w+$)', Email_Address)
if match:
'''print "Email Address:", match.group(0),
print "Host part:", match.group(1),
print "Domain part:", match.group(2), '''
return True
else:
return False
|
[
"re.search"
] |
[((98, 119), 're.search', 're.search', (['"""\\\\w+"""', 'ID'], {}), "('\\\\w+', ID)\n", (107, 119), False, 'import re\n'), ((420, 474), 're.search', 're.search', (['"""(^[A-Z][a-z]+)( [A-Z][a-z]+)?$"""', 'Job_Title'], {}), "('(^[A-Z][a-z]+)( [A-Z][a-z]+)?$', Job_Title)\n", (429, 474), False, 'import re\n'), ((826, 864), 're.search', 're.search', (['"""^[A-z][a-z]+$"""', 'First_Name'], {}), "('^[A-z][a-z]+$', First_Name)\n", (835, 864), False, 'import re\n'), ((1077, 1114), 're.search', 're.search', (['"""^[A-z][a-z]+$"""', 'Last_Name'], {}), "('^[A-z][a-z]+$', Last_Name)\n", (1086, 1114), False, 'import re\n'), ((1321, 1374), 're.search', 're.search', (['"""(^[A-Z][a-z]+) ([A-Z][a-z]+)$"""', 'Full_Name'], {}), "('(^[A-Z][a-z]+) ([A-Z][a-z]+)$', Full_Name)\n", (1330, 1374), False, 'import re\n'), ((1711, 1740), 're.search', 're.search', (['"""^E\\\\d+"""', 'Emp_Code'], {}), "('^E\\\\d+', Emp_Code)\n", (1720, 1740), False, 'import re\n'), ((1931, 1967), 're.search', 're.search', (['"""[A-Z]{2}"""', 'Working_Place'], {}), "('[A-Z]{2}', Working_Place)\n", (1940, 1967), False, 'import re\n'), ((2220, 2262), 're.search', 're.search', (['"""\\\\d{3}-\\\\d{7}"""', 'Contact_Number'], {}), "('\\\\d{3}-\\\\d{7}', Contact_Number)\n", (2229, 2262), False, 'import re\n'), ((2469, 2527), 're.search', 're.search', (['"""(^\\\\w+\\\\.?\\\\w+)@(\\\\w+\\\\.\\\\w+$)"""', 'Email_Address'], {}), "('(^\\\\w+\\\\.?\\\\w+)@(\\\\w+\\\\.\\\\w+$)', Email_Address)\n", (2478, 2527), False, 'import re\n')]
|
"""The implementation of a Hera workflow for Argo-based workflows"""
from typing import Dict, List, Optional, Tuple
from argo_workflows.models import (
IoArgoprojWorkflowV1alpha1DAGTemplate,
IoArgoprojWorkflowV1alpha1Template,
IoArgoprojWorkflowV1alpha1VolumeClaimGC,
IoArgoprojWorkflowV1alpha1Workflow,
IoArgoprojWorkflowV1alpha1WorkflowSpec,
IoArgoprojWorkflowV1alpha1WorkflowTemplateRef,
LocalObjectReference,
ObjectMeta,
)
from hera.affinity import Affinity
from hera.host_alias import HostAlias
from hera.security_context import WorkflowSecurityContext
from hera.task import Task
from hera.ttl_strategy import TTLStrategy
from hera.volume_claim_gc import VolumeClaimGCStrategy
from hera.workflow_editors import add_head, add_tail, add_task, add_tasks, on_exit
from hera.workflow_service import WorkflowService
class Workflow:
"""A workflow representation.
The workflow is used as a functional representation for a collection of tasks and
steps. The workflow context controls the overall behaviour of tasks, such as whether to notify completion, whether
to execute retires, overall parallelism, etc. The workflow can be constructed and submitted to multiple Argo
endpoints as long as a token can be associated with the endpoint at the given domain.
Parameters
----------
name: str
The workflow name. Note that the workflow initiation will replace underscores with dashes.
service: Optional[WorkflowService] = None
A workflow service to use for submissions. See `hera.v1.workflow_service.WorkflowService`.
parallelism: int = 50
The number of parallel tasks to run in case a task group is executed for multiple tasks.
service_account_name: Optional[str] = None
The name of the service account to use in all workflow tasks.
labels: Optional[Dict[str, str]] = None
A Dict of labels to attach to the Workflow object metadata
annotations: Optional[Dict[str, str]] = None
A Dict of annotations to attach to the Workflow object metadata
namespace: Optional[str] = 'default'
The namespace to use for creating the Workflow. Defaults to "default"
security_context: Optional[WorkflowSecurityContext] = None
Define security settings for all containers in the workflow.
image_pull_secrets: Optional[List[str]] = None
A list of image pull secrets. This is used to authenticate with the private image registry of the images
used by tasks.
workflow_template_ref: Optional[str] = None
The name of the workflowTemplate reference. WorkflowTemplateRef is a reference to a WorkflowTemplate resource.
If you create a WorkflowTemplate resource either clusterWorkflowTemplate or not (clusterScope attribute bool)
you can reference it again and again when you create a new Workflow without specifying the same tasks and
dependencies. Official doc: https://argoproj.github.io/argo-workflows/fields/#workflowtemplateref
ttl_strategy: Optional[TTLStrategy] = None
The time to live strategy of the workflow.
volume_claim_gc_strategy: Optional[VolumeClaimGCStrategy] = None
Define how to delete volumes from completed Workflows.
host_aliases: Optional[List[HostAlias]] = None
Mappings between IP and hostnames.
node_selectors: Optional[Dict[str, str]] = None
A collection of key value pairs that denote node selectors. This is used for scheduling purposes. If the task
requires GPU resources, clients are encouraged to add a node selector for a node that can satisfy the
requested resources. In addition, clients are encouraged to specify a GPU toleration, depending on the platform
they submit the workflow to.
affinity: Optional[Affinity] = None
The task affinity. This dictates the scheduling protocol of the pods running the tasks of the workflow.
"""
def __init__(
self,
name: str,
service: Optional[WorkflowService] = None,
parallelism: int = 50,
service_account_name: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
annotations: Optional[Dict[str, str]] = None,
namespace: Optional[str] = None,
security_context: Optional[WorkflowSecurityContext] = None,
image_pull_secrets: Optional[List[str]] = None,
workflow_template_ref: Optional[str] = None,
ttl_strategy: Optional[TTLStrategy] = None,
volume_claim_gc_strategy: Optional[VolumeClaimGCStrategy] = None,
host_aliases: Optional[List[HostAlias]] = None,
node_selectors: Optional[Dict[str, str]] = None,
affinity: Optional[Affinity] = None,
):
self.name = f'{name.replace("_", "-")}' # RFC1123
self.namespace = namespace or 'default'
self.service = service or WorkflowService()
self.parallelism = parallelism
self.security_context = security_context
self.service_account_name = service_account_name
self.labels = labels
self.annotations = annotations
self.image_pull_secrets = image_pull_secrets
self.workflow_template_ref = workflow_template_ref
self.node_selector = node_selectors
self.ttl_strategy = ttl_strategy
self.affinity = affinity
self.dag_template = IoArgoprojWorkflowV1alpha1DAGTemplate(tasks=[])
self.exit_template = IoArgoprojWorkflowV1alpha1Template(
name='exit-template',
steps=[],
dag=IoArgoprojWorkflowV1alpha1DAGTemplate(tasks=[]),
parallelism=self.parallelism,
)
self.template = IoArgoprojWorkflowV1alpha1Template(
name=self.name,
steps=[],
dag=self.dag_template,
parallelism=self.parallelism,
)
if self.workflow_template_ref:
self.workflow_template = IoArgoprojWorkflowV1alpha1WorkflowTemplateRef(name=self.workflow_template_ref)
self.spec = IoArgoprojWorkflowV1alpha1WorkflowSpec(
workflow_template_ref=self.workflow_template,
entrypoint=self.workflow_template_ref,
volumes=[],
volume_claim_templates=[],
parallelism=self.parallelism,
)
else:
self.spec = IoArgoprojWorkflowV1alpha1WorkflowSpec(
templates=[self.template],
entrypoint=self.name,
volumes=[],
volume_claim_templates=[],
parallelism=self.parallelism,
)
if ttl_strategy:
setattr(self.spec, 'ttl_strategy', ttl_strategy.argo_ttl_strategy)
if volume_claim_gc_strategy:
setattr(
self.spec,
'volume_claim_gc',
IoArgoprojWorkflowV1alpha1VolumeClaimGC(strategy=volume_claim_gc_strategy.value),
)
if host_aliases:
setattr(self.spec, 'host_aliases', [h.argo_host_alias for h in host_aliases])
if self.security_context:
security_context = self.security_context.get_security_context()
setattr(self.spec, 'security_context', security_context)
if self.service_account_name:
setattr(self.template, 'service_account_name', self.service_account_name)
setattr(self.spec, 'service_account_name', self.service_account_name)
if self.image_pull_secrets:
secret_refs = [LocalObjectReference(name=name) for name in self.image_pull_secrets]
setattr(self.spec, 'image_pull_secrets', secret_refs)
if self.affinity:
setattr(self.exit_template, 'affinity', self.affinity.get_spec())
setattr(self.template, 'affinity', self.affinity.get_spec())
self.metadata = ObjectMeta(name=self.name)
if self.labels:
setattr(self.metadata, 'labels', self.labels)
if self.annotations:
setattr(self.metadata, 'annotations', self.annotations)
if self.node_selector:
setattr(self.dag_template, 'node_selector', self.node_selector)
setattr(self.template, 'node_selector', self.node_selector)
setattr(self.exit_template, 'node_selector', self.node_selector)
self.workflow = IoArgoprojWorkflowV1alpha1Workflow(metadata=self.metadata, spec=self.spec)
def add_task(self, t: Task) -> None:
add_task(self, t)
def add_tasks(self, *ts: Task) -> None:
add_tasks(self, *ts)
def add_head(self, t: Task, append: bool = True) -> None:
add_head(self, t, append=append)
def add_tail(self, t: Task, append: bool = True) -> None:
add_tail(self, t, append=append)
def create(self, namespace: Optional[str] = None) -> IoArgoprojWorkflowV1alpha1Workflow:
"""Creates the workflow"""
if namespace is None:
namespace = self.namespace
return self.service.create(self.workflow, namespace)
def on_exit(self, *t: Task) -> None:
on_exit(self, *t)
def delete(self, namespace: Optional[str] = None) -> Tuple[object, int, dict]:
"""Deletes the workflow"""
if namespace is None:
namespace = self.name
return self.service.delete(self.name)
|
[
"argo_workflows.models.IoArgoprojWorkflowV1alpha1DAGTemplate",
"argo_workflows.models.IoArgoprojWorkflowV1alpha1VolumeClaimGC",
"hera.workflow_editors.on_exit",
"argo_workflows.models.IoArgoprojWorkflowV1alpha1WorkflowTemplateRef",
"argo_workflows.models.IoArgoprojWorkflowV1alpha1Workflow",
"argo_workflows.models.IoArgoprojWorkflowV1alpha1WorkflowSpec",
"hera.workflow_editors.add_task",
"hera.workflow_editors.add_tail",
"argo_workflows.models.IoArgoprojWorkflowV1alpha1Template",
"hera.workflow_editors.add_head",
"hera.workflow_service.WorkflowService",
"hera.workflow_editors.add_tasks",
"argo_workflows.models.LocalObjectReference",
"argo_workflows.models.ObjectMeta"
] |
[((5366, 5413), 'argo_workflows.models.IoArgoprojWorkflowV1alpha1DAGTemplate', 'IoArgoprojWorkflowV1alpha1DAGTemplate', ([], {'tasks': '[]'}), '(tasks=[])\n', (5403, 5413), False, 'from argo_workflows.models import IoArgoprojWorkflowV1alpha1DAGTemplate, IoArgoprojWorkflowV1alpha1Template, IoArgoprojWorkflowV1alpha1VolumeClaimGC, IoArgoprojWorkflowV1alpha1Workflow, IoArgoprojWorkflowV1alpha1WorkflowSpec, IoArgoprojWorkflowV1alpha1WorkflowTemplateRef, LocalObjectReference, ObjectMeta\n'), ((5677, 5795), 'argo_workflows.models.IoArgoprojWorkflowV1alpha1Template', 'IoArgoprojWorkflowV1alpha1Template', ([], {'name': 'self.name', 'steps': '[]', 'dag': 'self.dag_template', 'parallelism': 'self.parallelism'}), '(name=self.name, steps=[], dag=self.\n dag_template, parallelism=self.parallelism)\n', (5711, 5795), False, 'from argo_workflows.models import IoArgoprojWorkflowV1alpha1DAGTemplate, IoArgoprojWorkflowV1alpha1Template, IoArgoprojWorkflowV1alpha1VolumeClaimGC, IoArgoprojWorkflowV1alpha1Workflow, IoArgoprojWorkflowV1alpha1WorkflowSpec, IoArgoprojWorkflowV1alpha1WorkflowTemplateRef, LocalObjectReference, ObjectMeta\n'), ((7851, 7877), 'argo_workflows.models.ObjectMeta', 'ObjectMeta', ([], {'name': 'self.name'}), '(name=self.name)\n', (7861, 7877), False, 'from argo_workflows.models import IoArgoprojWorkflowV1alpha1DAGTemplate, IoArgoprojWorkflowV1alpha1Template, IoArgoprojWorkflowV1alpha1VolumeClaimGC, IoArgoprojWorkflowV1alpha1Workflow, IoArgoprojWorkflowV1alpha1WorkflowSpec, IoArgoprojWorkflowV1alpha1WorkflowTemplateRef, LocalObjectReference, ObjectMeta\n'), ((8339, 8413), 'argo_workflows.models.IoArgoprojWorkflowV1alpha1Workflow', 'IoArgoprojWorkflowV1alpha1Workflow', ([], {'metadata': 'self.metadata', 'spec': 'self.spec'}), '(metadata=self.metadata, spec=self.spec)\n', (8373, 8413), False, 'from argo_workflows.models import IoArgoprojWorkflowV1alpha1DAGTemplate, IoArgoprojWorkflowV1alpha1Template, IoArgoprojWorkflowV1alpha1VolumeClaimGC, IoArgoprojWorkflowV1alpha1Workflow, IoArgoprojWorkflowV1alpha1WorkflowSpec, IoArgoprojWorkflowV1alpha1WorkflowTemplateRef, LocalObjectReference, ObjectMeta\n'), ((8464, 8481), 'hera.workflow_editors.add_task', 'add_task', (['self', 't'], {}), '(self, t)\n', (8472, 8481), False, 'from hera.workflow_editors import add_head, add_tail, add_task, add_tasks, on_exit\n'), ((8535, 8555), 'hera.workflow_editors.add_tasks', 'add_tasks', (['self', '*ts'], {}), '(self, *ts)\n', (8544, 8555), False, 'from hera.workflow_editors import add_head, add_tail, add_task, add_tasks, on_exit\n'), ((8627, 8659), 'hera.workflow_editors.add_head', 'add_head', (['self', 't'], {'append': 'append'}), '(self, t, append=append)\n', (8635, 8659), False, 'from hera.workflow_editors import add_head, add_tail, add_task, add_tasks, on_exit\n'), ((8731, 8763), 'hera.workflow_editors.add_tail', 'add_tail', (['self', 't'], {'append': 'append'}), '(self, t, append=append)\n', (8739, 8763), False, 'from hera.workflow_editors import add_head, add_tail, add_task, add_tasks, on_exit\n'), ((9073, 9090), 'hera.workflow_editors.on_exit', 'on_exit', (['self', '*t'], {}), '(self, *t)\n', (9080, 9090), False, 'from hera.workflow_editors import add_head, add_tail, add_task, add_tasks, on_exit\n'), ((4876, 4893), 'hera.workflow_service.WorkflowService', 'WorkflowService', ([], {}), '()\n', (4891, 4893), False, 'from hera.workflow_service import WorkflowService\n'), ((5927, 6005), 'argo_workflows.models.IoArgoprojWorkflowV1alpha1WorkflowTemplateRef', 'IoArgoprojWorkflowV1alpha1WorkflowTemplateRef', ([], {'name': 'self.workflow_template_ref'}), '(name=self.workflow_template_ref)\n', (5972, 6005), False, 'from argo_workflows.models import IoArgoprojWorkflowV1alpha1DAGTemplate, IoArgoprojWorkflowV1alpha1Template, IoArgoprojWorkflowV1alpha1VolumeClaimGC, IoArgoprojWorkflowV1alpha1Workflow, IoArgoprojWorkflowV1alpha1WorkflowSpec, IoArgoprojWorkflowV1alpha1WorkflowTemplateRef, LocalObjectReference, ObjectMeta\n'), ((6030, 6231), 'argo_workflows.models.IoArgoprojWorkflowV1alpha1WorkflowSpec', 'IoArgoprojWorkflowV1alpha1WorkflowSpec', ([], {'workflow_template_ref': 'self.workflow_template', 'entrypoint': 'self.workflow_template_ref', 'volumes': '[]', 'volume_claim_templates': '[]', 'parallelism': 'self.parallelism'}), '(workflow_template_ref=self.\n workflow_template, entrypoint=self.workflow_template_ref, volumes=[],\n volume_claim_templates=[], parallelism=self.parallelism)\n', (6068, 6231), False, 'from argo_workflows.models import IoArgoprojWorkflowV1alpha1DAGTemplate, IoArgoprojWorkflowV1alpha1Template, IoArgoprojWorkflowV1alpha1VolumeClaimGC, IoArgoprojWorkflowV1alpha1Workflow, IoArgoprojWorkflowV1alpha1WorkflowSpec, IoArgoprojWorkflowV1alpha1WorkflowTemplateRef, LocalObjectReference, ObjectMeta\n'), ((6356, 6520), 'argo_workflows.models.IoArgoprojWorkflowV1alpha1WorkflowSpec', 'IoArgoprojWorkflowV1alpha1WorkflowSpec', ([], {'templates': '[self.template]', 'entrypoint': 'self.name', 'volumes': '[]', 'volume_claim_templates': '[]', 'parallelism': 'self.parallelism'}), '(templates=[self.template],\n entrypoint=self.name, volumes=[], volume_claim_templates=[],\n parallelism=self.parallelism)\n', (6394, 6520), False, 'from argo_workflows.models import IoArgoprojWorkflowV1alpha1DAGTemplate, IoArgoprojWorkflowV1alpha1Template, IoArgoprojWorkflowV1alpha1VolumeClaimGC, IoArgoprojWorkflowV1alpha1Workflow, IoArgoprojWorkflowV1alpha1WorkflowSpec, IoArgoprojWorkflowV1alpha1WorkflowTemplateRef, LocalObjectReference, ObjectMeta\n'), ((5551, 5598), 'argo_workflows.models.IoArgoprojWorkflowV1alpha1DAGTemplate', 'IoArgoprojWorkflowV1alpha1DAGTemplate', ([], {'tasks': '[]'}), '(tasks=[])\n', (5588, 5598), False, 'from argo_workflows.models import IoArgoprojWorkflowV1alpha1DAGTemplate, IoArgoprojWorkflowV1alpha1Template, IoArgoprojWorkflowV1alpha1VolumeClaimGC, IoArgoprojWorkflowV1alpha1Workflow, IoArgoprojWorkflowV1alpha1WorkflowSpec, IoArgoprojWorkflowV1alpha1WorkflowTemplateRef, LocalObjectReference, ObjectMeta\n'), ((6850, 6935), 'argo_workflows.models.IoArgoprojWorkflowV1alpha1VolumeClaimGC', 'IoArgoprojWorkflowV1alpha1VolumeClaimGC', ([], {'strategy': 'volume_claim_gc_strategy.value'}), '(strategy=volume_claim_gc_strategy.value\n )\n', (6889, 6935), False, 'from argo_workflows.models import IoArgoprojWorkflowV1alpha1DAGTemplate, IoArgoprojWorkflowV1alpha1Template, IoArgoprojWorkflowV1alpha1VolumeClaimGC, IoArgoprojWorkflowV1alpha1Workflow, IoArgoprojWorkflowV1alpha1WorkflowSpec, IoArgoprojWorkflowV1alpha1WorkflowTemplateRef, LocalObjectReference, ObjectMeta\n'), ((7513, 7544), 'argo_workflows.models.LocalObjectReference', 'LocalObjectReference', ([], {'name': 'name'}), '(name=name)\n', (7533, 7544), False, 'from argo_workflows.models import IoArgoprojWorkflowV1alpha1DAGTemplate, IoArgoprojWorkflowV1alpha1Template, IoArgoprojWorkflowV1alpha1VolumeClaimGC, IoArgoprojWorkflowV1alpha1Workflow, IoArgoprojWorkflowV1alpha1WorkflowSpec, IoArgoprojWorkflowV1alpha1WorkflowTemplateRef, LocalObjectReference, ObjectMeta\n')]
|
import collections
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
if not s and not t:
return True
if not s or not t:
return False
if len(s) != len(t):
return False
s_hash = collections.defaultdict(int)
t_hash = collections.defaultdict(int)
for i in range(len(s)):
s_hash[s[i]] += 1
t_hash[t[i]] += 1
if len(s_hash) != len(t_hash):
return False
for x in s_hash:
if s_hash[x] != t_hash[x]:
return False
return True
if __name__== '__main__':
solution = Solution()
s = "anagram"
t = "nagaram"
result = solution.isAnagram(s, t)
print(result)
|
[
"collections.defaultdict"
] |
[((259, 287), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (282, 287), False, 'import collections\n'), ((305, 333), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (328, 333), False, 'import collections\n')]
|
# Copyright (c) 2013 Qubell Inc., http://qubell.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, Qubell.com"
__license__ = "Apache"
__version__ = "1.0.1"
__email__ = "<EMAIL>"
import logging as log
import requests
import simplejson as json
from qubellclient.private.organization import Organization
import qubellclient.tools as tools
class Application(Organization):
"""
Base class for applications. It should create application and services+environment requested
"""
rawResponse = None
def __parse(self, values):
ret = {}
for val in values:
ret[val['id']] = val['value']
return ret
def __init__(self, context, id=None, manifest=None, name=None):
self.context = context
self.name = name or "test-app-"+tools.rand()
self.manifest = manifest
# Create application
if not id:
newapp = self._create()
assert newapp
self.applicationId = newapp['id']
# Use existing app
else:
self.applicationId = id
self.context.applicationId = self.applicationId
def _create(self):
log.info("Creating application: %s" % self.name)
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications.json'
resp = requests.post(url, files={'path': self.manifest.content}, data={'manifestSource': 'upload', 'name': self.name}, verify=False, cookies=self.context.cookies)
log.debug(resp.text)
if resp.status_code == 200:
return resp.json()
else:
return False
def delete(self):
log.info("Removing application: %s" % self.name)
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications/'+self.applicationId+'.json'
resp = requests.delete(url, verify=False, cookies=self.context.cookies)
log.debug(resp.text)
if resp.status_code == 200:
return True
else:
return False
def clean(self):
instances = self.instances
import instance
if instances:
for ins in instances:
obj = instance.Instance(context=self.context, id=ins['id'])
st = obj.status
if st not in ['Destroyed', 'Destroying', 'Launching', 'Executing']: # Tests could fail and we can get any statye here
log.info("Destroying instance %s" % obj.name)
obj.delete()
assert obj.destroyed(timeout=10)
revisions = self.revisions
import revision
if revisions:
for rev in revisions:
obj = revision.Revision(context=self.context, id=rev['id'])
obj.delete()
return True
def json(self, key=None):
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications/'+self.applicationId+'.json'
resp = requests.get(url, cookies=self.context.cookies, data="{}", verify=False)
log.debug(resp.text)
self.rawRespose = resp
if resp.status_code == 200:
# return same way public api does
# if key and (key in ['instances', 'environments']):
# return self.__parse(resp.json()[key])
# else:
# return resp.json()[key]
return resp.json()
else:
return None
def __getattr__(self, key):
resp = self.json()
return resp[key] or False
def upload(self, manifest):
log.info("Uploading manifest")
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications/'+self.applicationId+'/manifests.json'
resp = requests.post(url, files={'path': manifest.content}, data={'manifestSource': 'upload', 'name': self.name}, verify=False, cookies=self.context.cookies)
log.debug(resp.text)
self.rawResponse = resp
if resp.status_code == 200:
self.manifest = manifest
return resp.json()
else:
log.error('Cannot upload manifest: %s' % resp.content)
return False
def launch(self, **argv):
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications/'+self.applicationId+'/launch.json'
headers = {'Content-Type': 'application/json'}
data = json.dumps(argv)
resp = requests.post(url, cookies=self.context.cookies, data=data, verify=False, headers=headers)
log.debug(resp.text)
self.rawResponse = resp
if resp.status_code == 200:
instance_id = resp.json()['id']
from qubellclient.private.instance import Instance
return Instance(context=self.context, id=instance_id)
else:
log.error('Unable to launch instance: %s' % resp.content)
return False
def revisionCreate(self, name, instance, parameters=[], version=None):
if not version:
version=self.getManifest()['version']
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications/'+self.applicationId+'/revisions.json'
headers = {'Content-Type': 'application/json'}
payload = json.dumps({ 'name': name,
'parameters': parameters,
'submoduleRevisions': {},
'returnValues': [],
'applicationId': self.context.applicationId,
'applicationName': "api",
'version': version,
'instanceId': instance.instanceId})
resp = requests.post(url, cookies=self.context.cookies, data=payload, verify=False, headers=headers)
log.debug(resp.text)
self.rawRespose = resp
if resp.status_code==200:
import revision
return revision.Revision(context=self.context, name=name, id=resp.json()['id'])
else:
return False
def getManifest(self):
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications/'+self.applicationId+'/refreshManifest.json'
headers = {'Content-Type': 'application/json'}
payload = json.dumps({})
resp = requests.post(url, cookies=self.context.cookies, data=payload, verify=False, headers=headers)
log.debug(resp.text)
self.rawRespose = resp
if resp.status_code == 200:
return resp.json()
else:
return False
|
[
"logging.error",
"logging.debug",
"simplejson.dumps",
"logging.info",
"qubellclient.tools.rand",
"requests.delete",
"instance.Instance",
"revision.Revision",
"requests.get",
"requests.post",
"qubellclient.private.instance.Instance"
] |
[((1706, 1754), 'logging.info', 'log.info', (["('Creating application: %s' % self.name)"], {}), "('Creating application: %s' % self.name)\n", (1714, 1754), True, 'import logging as log\n'), ((1868, 2033), 'requests.post', 'requests.post', (['url'], {'files': "{'path': self.manifest.content}", 'data': "{'manifestSource': 'upload', 'name': self.name}", 'verify': '(False)', 'cookies': 'self.context.cookies'}), "(url, files={'path': self.manifest.content}, data={\n 'manifestSource': 'upload', 'name': self.name}, verify=False, cookies=\n self.context.cookies)\n", (1881, 2033), False, 'import requests\n'), ((2032, 2052), 'logging.debug', 'log.debug', (['resp.text'], {}), '(resp.text)\n', (2041, 2052), True, 'import logging as log\n'), ((2190, 2238), 'logging.info', 'log.info', (["('Removing application: %s' % self.name)"], {}), "('Removing application: %s' % self.name)\n", (2198, 2238), True, 'import logging as log\n'), ((2375, 2439), 'requests.delete', 'requests.delete', (['url'], {'verify': '(False)', 'cookies': 'self.context.cookies'}), '(url, verify=False, cookies=self.context.cookies)\n', (2390, 2439), False, 'import requests\n'), ((2448, 2468), 'logging.debug', 'log.debug', (['resp.text'], {}), '(resp.text)\n', (2457, 2468), True, 'import logging as log\n'), ((3508, 3580), 'requests.get', 'requests.get', (['url'], {'cookies': 'self.context.cookies', 'data': '"""{}"""', 'verify': '(False)'}), "(url, cookies=self.context.cookies, data='{}', verify=False)\n", (3520, 3580), False, 'import requests\n'), ((3589, 3609), 'logging.debug', 'log.debug', (['resp.text'], {}), '(resp.text)\n', (3598, 3609), True, 'import logging as log\n'), ((4107, 4137), 'logging.info', 'log.info', (['"""Uploading manifest"""'], {}), "('Uploading manifest')\n", (4115, 4137), True, 'import logging as log\n'), ((4284, 4438), 'requests.post', 'requests.post', (['url'], {'files': "{'path': manifest.content}", 'data': "{'manifestSource': 'upload', 'name': self.name}", 'verify': '(False)', 'cookies': 'self.context.cookies'}), "(url, files={'path': manifest.content}, data={'manifestSource':\n 'upload', 'name': self.name}, verify=False, cookies=self.context.cookies)\n", (4297, 4438), False, 'import requests\n'), ((4443, 4463), 'logging.debug', 'log.debug', (['resp.text'], {}), '(resp.text)\n', (4452, 4463), True, 'import logging as log\n'), ((4936, 4952), 'simplejson.dumps', 'json.dumps', (['argv'], {}), '(argv)\n', (4946, 4952), True, 'import simplejson as json\n'), ((4968, 5062), 'requests.post', 'requests.post', (['url'], {'cookies': 'self.context.cookies', 'data': 'data', 'verify': '(False)', 'headers': 'headers'}), '(url, cookies=self.context.cookies, data=data, verify=False,\n headers=headers)\n', (4981, 5062), False, 'import requests\n'), ((5067, 5087), 'logging.debug', 'log.debug', (['resp.text'], {}), '(resp.text)\n', (5076, 5087), True, 'import logging as log\n'), ((5792, 6030), 'simplejson.dumps', 'json.dumps', (["{'name': name, 'parameters': parameters, 'submoduleRevisions': {},\n 'returnValues': [], 'applicationId': self.context.applicationId,\n 'applicationName': 'api', 'version': version, 'instanceId': instance.\n instanceId}"], {}), "({'name': name, 'parameters': parameters, 'submoduleRevisions': {\n }, 'returnValues': [], 'applicationId': self.context.applicationId,\n 'applicationName': 'api', 'version': version, 'instanceId': instance.\n instanceId})\n", (5802, 6030), True, 'import simplejson as json\n'), ((6173, 6270), 'requests.post', 'requests.post', (['url'], {'cookies': 'self.context.cookies', 'data': 'payload', 'verify': '(False)', 'headers': 'headers'}), '(url, cookies=self.context.cookies, data=payload, verify=False,\n headers=headers)\n', (6186, 6270), False, 'import requests\n'), ((6275, 6295), 'logging.debug', 'log.debug', (['resp.text'], {}), '(resp.text)\n', (6284, 6295), True, 'import logging as log\n'), ((6758, 6772), 'simplejson.dumps', 'json.dumps', (['{}'], {}), '({})\n', (6768, 6772), True, 'import simplejson as json\n'), ((6788, 6885), 'requests.post', 'requests.post', (['url'], {'cookies': 'self.context.cookies', 'data': 'payload', 'verify': '(False)', 'headers': 'headers'}), '(url, cookies=self.context.cookies, data=payload, verify=False,\n headers=headers)\n', (6801, 6885), False, 'import requests\n'), ((6890, 6910), 'logging.debug', 'log.debug', (['resp.text'], {}), '(resp.text)\n', (6899, 6910), True, 'import logging as log\n'), ((4627, 4681), 'logging.error', 'log.error', (["('Cannot upload manifest: %s' % resp.content)"], {}), "('Cannot upload manifest: %s' % resp.content)\n", (4636, 4681), True, 'import logging as log\n'), ((5282, 5328), 'qubellclient.private.instance.Instance', 'Instance', ([], {'context': 'self.context', 'id': 'instance_id'}), '(context=self.context, id=instance_id)\n', (5290, 5328), False, 'from qubellclient.private.instance import Instance\n'), ((5355, 5412), 'logging.error', 'log.error', (["('Unable to launch instance: %s' % resp.content)"], {}), "('Unable to launch instance: %s' % resp.content)\n", (5364, 5412), True, 'import logging as log\n'), ((1338, 1350), 'qubellclient.tools.rand', 'tools.rand', ([], {}), '()\n', (1348, 1350), True, 'import qubellclient.tools as tools\n'), ((2727, 2780), 'instance.Instance', 'instance.Instance', ([], {'context': 'self.context', 'id': "ins['id']"}), "(context=self.context, id=ins['id'])\n", (2744, 2780), False, 'import instance\n'), ((3237, 3290), 'revision.Revision', 'revision.Revision', ([], {'context': 'self.context', 'id': "rev['id']"}), "(context=self.context, id=rev['id'])\n", (3254, 3290), False, 'import revision\n'), ((2967, 3012), 'logging.info', 'log.info', (["('Destroying instance %s' % obj.name)"], {}), "('Destroying instance %s' % obj.name)\n", (2975, 3012), True, 'import logging as log\n')]
|
"""Configurations
Copyright (c) 2021 <NAME>
"""
# The MIT License (MIT)
#
# Copyright (c) 2021. <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
from config42 import ConfigManager
class Config:
@staticmethod
def _get_cur_dir_config_file_path(environment):
"""Top priority configuration file
:param environment:
:return:
"""
filename = '{}.yml'.format(environment)
return os.path.join(os.getcwd(), filename)
@staticmethod
def _get_user_dir_config_file_path(project_name, environment):
"""Second priority configuration file
:param environment:
:return:
"""
config_directory = '{}'.format(project_name)
filename = '{}/{}.yml'.format(config_directory, environment)
return os.path.join(os.path.expanduser('~'), filename)
@staticmethod
def _get_config_file_path(project_name, environment):
"""Third priority configuration file
:param environment:
:return:
"""
config_directory = '{}'.format(project_name)
filename = '{}/{}.yml'.format(config_directory, environment)
return os.path.join('/var/opt/sc', filename)
@staticmethod
def create(*, project_name, encoding='utf-8', environment=None, defaults=None):
if defaults is None:
defaults = {}
# load defaults from home directory
config_file = Config._get_config_file_path(project_name, "default")
found_config_file = False
if os.path.exists(config_file):
logging.getLogger(__name__).info("loading default configurations from %s", config_file)
config = ConfigManager(path=config_file, encoding=encoding, defaults=defaults)
found_config_file = True
# load environment configurations from environment variables
# fix prefix to be SC
prefix = "SC"
env_config = ConfigManager(prefix=prefix)
key_env = "environment"
if environment is None:
environment = env_config.get(key_env)
if environment is None:
# use production configuration if not specified environment
environment = "production"
logging.getLogger(__name__).info("did not specify environment, using %s", environment)
else:
logging.getLogger(__name__).info("using environment: %s", environment)
# load environment configurations from /var/opt/sc directory
env_config_file = Config._get_config_file_path(project_name, environment)
if os.path.exists(env_config_file):
logging.getLogger(__name__).info("loading environmental configurations from %s", env_config_file)
if not found_config_file:
config = ConfigManager(path=env_config_file, encoding=encoding, defaults=defaults)
found_config_file = True
else:
config.set_many(ConfigManager(path=env_config_file, encoding=encoding).as_dict())
# load environment configurations from user directory
user_config_file = Config._get_user_dir_config_file_path(project_name, environment)
if os.path.exists(user_config_file):
logging.getLogger(__name__).info("loading user directory configurations from %s", user_config_file)
if not found_config_file:
config = ConfigManager(path=user_config_file, encoding=encoding, defaults=defaults)
found_config_file = True
else:
config.set_many(ConfigManager(path=user_config_file, encoding=encoding).as_dict())
# load environment configurations from current directory
current_dir_config_file = Config._get_cur_dir_config_file_path(environment)
if os.path.exists(current_dir_config_file):
logging.getLogger(__name__).info("loading current directory configurations from %s", current_dir_config_file)
logging.getLogger(__name__).info(f"found_config_file: {found_config_file}")
if not found_config_file:
config = ConfigManager(path=current_dir_config_file, encoding=encoding, defaults=defaults)
found_config_file = True
else:
config.set_many(ConfigManager(path=current_dir_config_file, encoding=encoding).as_dict())
if not found_config_file:
config = ConfigManager(defaults=defaults)
config.set_many(env_config.as_dict())
config.set(key_env, environment)
return config
|
[
"config42.ConfigManager",
"os.path.expanduser",
"os.getcwd",
"os.path.exists",
"os.path.join",
"logging.getLogger"
] |
[((2220, 2257), 'os.path.join', 'os.path.join', (['"""/var/opt/sc"""', 'filename'], {}), "('/var/opt/sc', filename)\n", (2232, 2257), False, 'import os\n'), ((2581, 2608), 'os.path.exists', 'os.path.exists', (['config_file'], {}), '(config_file)\n', (2595, 2608), False, 'import os\n'), ((2980, 3008), 'config42.ConfigManager', 'ConfigManager', ([], {'prefix': 'prefix'}), '(prefix=prefix)\n', (2993, 3008), False, 'from config42 import ConfigManager\n'), ((3642, 3673), 'os.path.exists', 'os.path.exists', (['env_config_file'], {}), '(env_config_file)\n', (3656, 3673), False, 'import os\n'), ((4245, 4277), 'os.path.exists', 'os.path.exists', (['user_config_file'], {}), '(user_config_file)\n', (4259, 4277), False, 'import os\n'), ((4848, 4887), 'os.path.exists', 'os.path.exists', (['current_dir_config_file'], {}), '(current_dir_config_file)\n', (4862, 4887), False, 'import os\n'), ((1505, 1516), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1514, 1516), False, 'import os\n'), ((1868, 1891), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1886, 1891), False, 'import os\n'), ((2731, 2800), 'config42.ConfigManager', 'ConfigManager', ([], {'path': 'config_file', 'encoding': 'encoding', 'defaults': 'defaults'}), '(path=config_file, encoding=encoding, defaults=defaults)\n', (2744, 2800), False, 'from config42 import ConfigManager\n'), ((5473, 5505), 'config42.ConfigManager', 'ConfigManager', ([], {'defaults': 'defaults'}), '(defaults=defaults)\n', (5486, 5505), False, 'from config42 import ConfigManager\n'), ((3848, 3921), 'config42.ConfigManager', 'ConfigManager', ([], {'path': 'env_config_file', 'encoding': 'encoding', 'defaults': 'defaults'}), '(path=env_config_file, encoding=encoding, defaults=defaults)\n', (3861, 3921), False, 'from config42 import ConfigManager\n'), ((4454, 4528), 'config42.ConfigManager', 'ConfigManager', ([], {'path': 'user_config_file', 'encoding': 'encoding', 'defaults': 'defaults'}), '(path=user_config_file, encoding=encoding, defaults=defaults)\n', (4467, 4528), False, 'from config42 import ConfigManager\n'), ((5162, 5248), 'config42.ConfigManager', 'ConfigManager', ([], {'path': 'current_dir_config_file', 'encoding': 'encoding', 'defaults': 'defaults'}), '(path=current_dir_config_file, encoding=encoding, defaults=\n defaults)\n', (5175, 5248), False, 'from config42 import ConfigManager\n'), ((2622, 2649), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2639, 2649), False, 'import logging\n'), ((3408, 3435), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3425, 3435), False, 'import logging\n'), ((3687, 3714), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3704, 3714), False, 'import logging\n'), ((4291, 4318), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4308, 4318), False, 'import logging\n'), ((4901, 4928), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4918, 4928), False, 'import logging\n'), ((5023, 5050), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (5040, 5050), False, 'import logging\n'), ((3295, 3322), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3312, 3322), False, 'import logging\n'), ((4013, 4067), 'config42.ConfigManager', 'ConfigManager', ([], {'path': 'env_config_file', 'encoding': 'encoding'}), '(path=env_config_file, encoding=encoding)\n', (4026, 4067), False, 'from config42 import ConfigManager\n'), ((4620, 4675), 'config42.ConfigManager', 'ConfigManager', ([], {'path': 'user_config_file', 'encoding': 'encoding'}), '(path=user_config_file, encoding=encoding)\n', (4633, 4675), False, 'from config42 import ConfigManager\n'), ((5335, 5397), 'config42.ConfigManager', 'ConfigManager', ([], {'path': 'current_dir_config_file', 'encoding': 'encoding'}), '(path=current_dir_config_file, encoding=encoding)\n', (5348, 5397), False, 'from config42 import ConfigManager\n')]
|
import logging
from pathlib import Path
import hydra
from omegaconf import DictConfig
from omegaconf import OmegaConf
from src.evaluate import evaluate
from src.plot import plot_feature
from src.plot import plot_residual
from src.train import train
logger = logging.getLogger(__name__)
@hydra.main(config_path="../configs", config_name="default")
def main(cfg: DictConfig):
logger.info(OmegaConf.to_yaml(cfg=cfg))
path = Path(hydra.utils.get_original_cwd())
logger.info("Train model")
X_train, y_train, X_test, y_test, model = train(
current_path=path, data_config=cfg.data, model_config=cfg.model
)
logger.info("Evaluate model")
evaluate(
x_train=X_train,
y_train=y_train,
x_test=X_test,
y_test=y_test,
model=model,
)
logger.info("Plot features")
plot_feature(
model=model,
labels=X_train.columns,
image_config=cfg.visualization.image,
)
logger.info("Plot residual")
plot_residual(
x_test=X_test,
y_test=y_test,
model=model,
image_config=cfg.visualization.image,
)
if __name__ == "__main__":
main()
|
[
"omegaconf.OmegaConf.to_yaml",
"hydra.utils.get_original_cwd",
"src.train.train",
"src.plot.plot_residual",
"hydra.main",
"src.evaluate.evaluate",
"src.plot.plot_feature",
"logging.getLogger"
] |
[((261, 288), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (278, 288), False, 'import logging\n'), ((292, 351), 'hydra.main', 'hydra.main', ([], {'config_path': '"""../configs"""', 'config_name': '"""default"""'}), "(config_path='../configs', config_name='default')\n", (302, 351), False, 'import hydra\n'), ((550, 620), 'src.train.train', 'train', ([], {'current_path': 'path', 'data_config': 'cfg.data', 'model_config': 'cfg.model'}), '(current_path=path, data_config=cfg.data, model_config=cfg.model)\n', (555, 620), False, 'from src.train import train\n'), ((674, 763), 'src.evaluate.evaluate', 'evaluate', ([], {'x_train': 'X_train', 'y_train': 'y_train', 'x_test': 'X_test', 'y_test': 'y_test', 'model': 'model'}), '(x_train=X_train, y_train=y_train, x_test=X_test, y_test=y_test,\n model=model)\n', (682, 763), False, 'from src.evaluate import evaluate\n'), ((845, 937), 'src.plot.plot_feature', 'plot_feature', ([], {'model': 'model', 'labels': 'X_train.columns', 'image_config': 'cfg.visualization.image'}), '(model=model, labels=X_train.columns, image_config=cfg.\n visualization.image)\n', (857, 937), False, 'from src.plot import plot_feature\n'), ((1002, 1101), 'src.plot.plot_residual', 'plot_residual', ([], {'x_test': 'X_test', 'y_test': 'y_test', 'model': 'model', 'image_config': 'cfg.visualization.image'}), '(x_test=X_test, y_test=y_test, model=model, image_config=cfg.\n visualization.image)\n', (1015, 1101), False, 'from src.plot import plot_residual\n'), ((395, 421), 'omegaconf.OmegaConf.to_yaml', 'OmegaConf.to_yaml', ([], {'cfg': 'cfg'}), '(cfg=cfg)\n', (412, 421), False, 'from omegaconf import OmegaConf\n'), ((440, 470), 'hydra.utils.get_original_cwd', 'hydra.utils.get_original_cwd', ([], {}), '()\n', (468, 470), False, 'import hydra\n')]
|
import itertools
import operator
from functools import wraps, cached_property
from .._utils import safe_repr
from ..functions import fn
from ..typing import Iterator, Tuple, T, TYPE_CHECKING
if TYPE_CHECKING:
from .. import api as sk # noqa: F401
NOT_GIVEN = object()
_iter = iter
class Iter(Iterator[T]):
"""
Base sidekick iterator class.
This class extends classical Python iterators with a few extra operators.
Sidekick iterators accepts slicing, indexing, concatenation (with the + sign)
repetition (with the * sign) and pretty printing.
Operations that return new iterators (e.g., slicing, concatenation, etc)
consume the data stream. Operations that simply peek at data execute the
generator (and thus may produce side-effects), but cache values and do not
consume data stream.
"""
__slots__ = ("_iterator", "_size_hint")
_iterator: Iterator[T]
if TYPE_CHECKING:
from .. import seq as _mod
_mod = _mod
else:
@cached_property
def _mod(self):
from .. import seq
return seq
def __new__(cls, iterator: Iterator[T], size_hint: int = None):
if isinstance(iterator, Iter):
return iterator
new = object.__new__(cls)
new._iterator = _iter(iterator)
new._size_hint = size_hint
return new
def __next__(self, _next=next):
return _next(self._iterator)
def __iter__(self):
return self._iterator
def __repr__(self):
it = self._iterator
head = []
for _ in range(7):
try:
head.append(next(it))
except StopIteration:
display = map(safe_repr, head)
self._iterator = _iter(head)
self._size_hint = len(head)
break
except Exception as ex:
ex_name = type(ex).__name__
display = [*map(safe_repr, head), f"... ({ex_name})"]
self._iterator = yield_and_raise(head, ex)
self._size_hint = len(head)
break
else:
self._iterator = itertools.chain(_iter(head), it)
display = [*map(safe_repr, head[:-1]), "..."]
data = ", ".join(display)
return f"sk.iter([{data}])"
def __getitem__(self, item, _chain=itertools.chain):
if isinstance(item, int):
if item >= 0:
head = []
for i, x in enumerate(self._iterator):
head.append(x)
if i == item:
self._iterator = _chain(head, self._iterator)
return x
else:
self._iterator = _iter(head)
self._size_hint = len(head)
raise IndexError(item)
else:
raise IndexError("negative indexes are not supported")
elif isinstance(item, slice):
a, b, c = item.start, item.step, item.stop
return Iter(itertools.islice(self._iterator, a, b, c))
elif callable(item):
return Iter(filter(item, self._iterator), self._size_hint)
elif isinstance(item, list):
if not item:
return []
if isinstance(item[0], bool):
self._iterator, data = itertools.tee(self._iterator, 2)
return [x for key, x in zip(item, data) if key]
elif isinstance(item[0], int):
self._iterator, data = itertools.tee(self._iterator, 2)
data = list(itertools.islice(data, max(item) + 1))
return [data[i] for i in item]
else:
raise TypeError("index must contain only integers or booleans")
else:
size = operator.length_hint(item, -1)
size = None if size == -1 else size
return Iter(compress_or_select(item, self._iterator), size)
def __add__(self, other, _chain=itertools.chain):
if hasattr(other, "__iter__"):
return Iter(_chain(self._iterator, other))
return NotImplemented
def __radd__(self, other, _chain=itertools.chain):
if hasattr(other, "__iter__"):
return Iter(_chain(other, self._iterator))
return NotImplemented
def __iadd__(self, other, _chain=itertools.chain):
self._iterator = _chain(self._iterator, other)
def __mul__(self, other):
if isinstance(other, int):
if other < 0:
raise ValueError("cannot multiply by negative integers")
return Iter(cycle_n(self._iterator, other))
try:
data = _iter(other)
except TypeError:
return NotImplemented
return Iter(itertools.product([self._iterator, data]))
def __rmul__(self, other):
if isinstance(other, int):
return self.__mul__(other)
try:
data = _iter(other)
except TypeError:
return NotImplemented
return Iter(itertools.product([data, self._iterator]))
def __rmatmul__(self, func):
if callable(func):
return Iter(map(func, self._iterator), self._size_hint)
return NotImplemented
def __length_hint__(self):
if self._size_hint is None:
return operator.length_hint(self._iterator)
return self._size_hint
#
# Conversion to collections
#
def list(self) -> list:
"""
Convert iterator to list consuming iterator.
Infinite operators do not terminate.
"""
return list(self)
def tuple(self) -> tuple:
"""
Convert iterator to tuple consuming iterator.
Infinite operators do not terminate.
"""
return tuple(self)
def set(self) -> set:
"""
Convert iterator to tuple consuming iterator.
Infinite operators do not terminate.
"""
return set(self)
def frozenset(self) -> frozenset:
"""
Convert iterator to tuple consuming iterator.
Infinite operators do not terminate.
"""
return frozenset(self)
def str(self) -> str:
"""
Convert iterator to string consuming iterator and concatenating
elements.
Infinite operators do not terminate.
"""
return "".join(self)
def bytes(self) -> str:
"""
Convert iterator to bytes consuming iterator and concatenating
elements.
Infinite operators do not terminate.
"""
return b"".join(self)
#
# API
#
def copy(self) -> "Iter":
"""
Return a copy of iterator. Consuming the copy do not consume the
original iterator.
Internally, this method uses itertools.tee to perform the copy. If you
known that the iterator will be consumed, it is faster and more memory
efficient to convert it to a list and produce multiple iterators.
"""
self._iterator, other = itertools.tee(self._iterator, 2)
return Iter(other, self._size_hint)
def tee(self, n=1) -> Tuple["Iter", ...]:
"""
Split iterator into n additional copies.
The copy method is simply an alias to iter.tee(1)[0]
"""
self._iterator, *rest = itertools.tee(self._iterator, n + 1)
n = self._size_hint
return tuple(Iter(it, n) for it in rest)
def peek(self, n: int) -> Tuple:
"""
Peek the first n elements without consuming the iterator.
"""
data = tuple(itertools.islice(self._iterator, n))
self._iterator = itertools.chain(data, self._iterator)
return data
#
# Wrapping the iterator API
#
def cycle_n(seq, n):
data = []
store = data.append
consumed = False
while n > 0:
if consumed:
yield from data
else:
for x in seq:
store(x)
yield x
if data:
consumed = True
else:
return
n -= 1
def compress(keys, seq):
for x, pred in zip(seq, keys):
if pred:
yield x
def select(keys, seq):
data = []
for i in keys:
try:
yield data[i]
except IndexError:
data.extend(itertools.islice(seq, i - len(data) + 1))
yield data[i]
def compress_or_select(keys, seq):
keys = _iter(keys)
seq = _iter(seq)
try:
key = next(keys)
if key is True:
func = compress
yield next(seq)
elif key is False:
func = compress
next(seq)
elif isinstance(key, int):
func = select
keys = itertools.chain([key], keys)
else:
raise TypeError(f"invalid key: {key!r}")
except StopIteration:
return
yield from func(keys, seq)
@fn
def generator(func):
"""
Decorates generator function to return a sidekick iterator instead of a
regular Python generator.
Examples:
>>> @sk.generator
... def fibonacci():
... x = y = 1
... while True:
... yield x
... x, y = y, x + y
>>> fibonacci()
sk.iter([1, 1, 2, 3, 5, 8, ...])
"""
@fn
@wraps(func)
def gen(*args, **kwargs):
return Iter(func(*args, **kwargs))
return gen
def stop(x=None):
"""
Raise StopIteration with the given argument.
"""
raise StopIteration(x)
def yield_and_raise(data, exc):
"""
Return content from data and then raise exception afterwards.
"""
yield from data
raise exc
fn.generator = staticmethod(generator)
|
[
"operator.length_hint",
"functools.wraps",
"itertools.product",
"itertools.tee",
"itertools.islice",
"itertools.chain"
] |
[((9405, 9416), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (9410, 9416), False, 'from functools import wraps, cached_property\n'), ((7084, 7116), 'itertools.tee', 'itertools.tee', (['self._iterator', '(2)'], {}), '(self._iterator, 2)\n', (7097, 7116), False, 'import itertools\n'), ((7375, 7411), 'itertools.tee', 'itertools.tee', (['self._iterator', '(n + 1)'], {}), '(self._iterator, n + 1)\n', (7388, 7411), False, 'import itertools\n'), ((7701, 7738), 'itertools.chain', 'itertools.chain', (['data', 'self._iterator'], {}), '(data, self._iterator)\n', (7716, 7738), False, 'import itertools\n'), ((4803, 4844), 'itertools.product', 'itertools.product', (['[self._iterator, data]'], {}), '([self._iterator, data])\n', (4820, 4844), False, 'import itertools\n'), ((5077, 5118), 'itertools.product', 'itertools.product', (['[data, self._iterator]'], {}), '([data, self._iterator])\n', (5094, 5118), False, 'import itertools\n'), ((5366, 5402), 'operator.length_hint', 'operator.length_hint', (['self._iterator'], {}), '(self._iterator)\n', (5386, 5402), False, 'import operator\n'), ((7639, 7674), 'itertools.islice', 'itertools.islice', (['self._iterator', 'n'], {}), '(self._iterator, n)\n', (7655, 7674), False, 'import itertools\n'), ((3064, 3105), 'itertools.islice', 'itertools.islice', (['self._iterator', 'a', 'b', 'c'], {}), '(self._iterator, a, b, c)\n', (3080, 3105), False, 'import itertools\n'), ((8819, 8847), 'itertools.chain', 'itertools.chain', (['[key]', 'keys'], {}), '([key], keys)\n', (8834, 8847), False, 'import itertools\n'), ((3836, 3866), 'operator.length_hint', 'operator.length_hint', (['item', '(-1)'], {}), '(item, -1)\n', (3856, 3866), False, 'import operator\n'), ((3378, 3410), 'itertools.tee', 'itertools.tee', (['self._iterator', '(2)'], {}), '(self._iterator, 2)\n', (3391, 3410), False, 'import itertools\n'), ((3557, 3589), 'itertools.tee', 'itertools.tee', (['self._iterator', '(2)'], {}), '(self._iterator, 2)\n', (3570, 3589), False, 'import itertools\n')]
|
import requests
import webbrowser
from os.path import dirname, exists, join, realpath
from typing import List
from PySide6.QtCore import QTimer, Qt, Slot
from PySide6.QtGui import QIcon
from PySide6.QtWidgets import *
from win10toast import ToastNotifier
from pathlib import Path
from productiveware import encryption
from productiveware.client import base_url, check_cookie, get_headers
from productiveware.config import *
from productiveware.widgets.log import LogWidget
from productiveware.widgets.login import LoginWidget
todo_url = f'{base_url}/todo'
test_url = f'{base_url}/api/user'
icon_path = str(Path.cwd().joinpath("productiveware", "widgets", "res", "productiveware.ico"))
toaster = ToastNotifier()
class MainWidget(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('productiveware')
self.setWindowIcon(
QIcon(join(dirname(realpath(__file__)), 'res/productiveware.png')))
widget = QWidget()
layout = QGridLayout()
# Backend stuff
self.status = QStatusBar()
self.status_refresh = QPushButton('Refresh Connection')
self.sent_no_encrypt_message = False
self.set_connected(self._check_connection())
# Profile specific elements
self.pw_profile = QPushButton('View Todo List')
self.pw_logout = QPushButton('Log out')
# Directory list elements
self.dir_list = QListWidget()
self.dir_add = QPushButton('Add Directory')
self.dir_browse = QPushButton('Browse Directory...')
self.dir_remove = QPushButton('Remove Directory')
for path in get_target_folders():
self.dir_list.addItem(QListWidgetItem(path))
self.old_list = self._get_list_items()
# Encryption/decryption elements
self.decrypt_select = QPushButton('Decrypt file...')
self.decrypt_log = QPushButton('View encryption log...')
self.timer = QTimer()
self.delay = 5000
self.timer.timeout.connect(self.on_timer_timeout)
# Save state elements
self.save_list = QPushButton('Save')
self.save_list.setEnabled(False)
# Directory list events
self.dir_list.itemDoubleClicked.connect(
self.on_dir_list_double_clicked)
self.dir_list.currentItemChanged.connect(self.on_dir_list_item_changed)
# Button events
self.pw_profile.clicked.connect(self.on_pw_profile_clicked)
self.pw_logout.clicked.connect(self.on_pw_logout_clicked)
self.dir_add.clicked.connect(self.on_dir_add_clicked)
self.dir_browse.clicked.connect(self.on_dir_browse_clicked)
self.dir_remove.clicked.connect(self.on_dir_remove_clicked)
self.decrypt_select.clicked.connect(self.on_decrypt_select_clicked)
self.status_refresh.clicked.connect(self.on_status_refresh_clicked)
self.save_list.clicked.connect(self.on_save_list_clicked)
self.decrypt_log.clicked.connect(self.on_decrypt_log_clicked)
layout.addWidget(self.pw_profile, 0, 0, Qt.AlignLeft)
# layout.addWidget(QLabel('Targeted files: '), 0, 1)
# layout.addWidget(QLabel('Encrypted files: '), 0, 2)
layout.addWidget(self.pw_logout, 0, 3, Qt.AlignRight)
layout.addWidget(self.dir_list, 1, 0, 5, 3)
layout.addWidget(self.dir_add, 1, 3)
layout.addWidget(self.dir_browse, 2, 3)
layout.addWidget(self.dir_remove, 3, 3)
layout.addWidget(QLabel('Decryptions earned: '),
4, 3, Qt.AlignBottom)
layout.addWidget(self.decrypt_select, 5, 3)
layout.addWidget(self.status_refresh, 6, 0, Qt.AlignLeft)
layout.addWidget(self.save_list, 6, 2, Qt.AlignRight)
layout.addWidget(self.decrypt_log, 6, 3)
widget.setLayout(layout)
self.setCentralWidget(widget)
self.setStatusBar(self.status)
# Children widgets
self.window_log = LogWidget()
self.window_login = LoginWidget(self)
if not check_cookie():
self.window_login.setFixedSize(300, 150)
self.window_login.show()
else:
self.timer.start(self.delay)
self.resize(800, 500)
self.show()
@Slot()
def on_timer_timeout(self):
try:
response = requests.get(f"{base_url}/api/todos/overdue", headers=get_headers())
except requests.exceptions.ConnectionError:
return
if response.status_code == 200:
for todo in response.json()["todos"]:
if not todo["encrypted"]:
try:
path = encryption.encrypt_random_file()
except RuntimeError:
if not self.sent_no_encrypt_message:
toaster.show_toast("You missed a todo!", f"Since you missed the due date for your todo \"{todo['text']}\", we tried to encrypt one of your files. Lucky for you, we couldn't find anything to encrypt.", icon_path=icon_path, threaded=True)
self.sent_no_encrypt_message = True
else:
toaster.show_toast("You missed a todo!", f"Since you missed the due date for your todo \"{todo['text']}\", we encrypted this file: {path}", icon_path=icon_path, threaded=True)
requests.put(f"{base_url}/api/todos/encrypt", headers=get_headers(), json={"id": todo["_id"]})
self.timer.start(self.delay)
@Slot()
def on_pw_profile_clicked(self):
webbrowser.open(todo_url)
@Slot()
def on_pw_logout_clicked(self):
set_cookie(None)
self.hide()
self.window_login.show()
@Slot()
def on_dir_list_double_clicked(self, item: QListWidgetItem):
item.setFlags(item.flags() | Qt.ItemIsEditable)
@Slot()
def on_dir_list_item_changed(self, current: QListWidgetItem, prev: QListWidgetItem):
new_list = self._get_list_items()
if new_list != self.old_list:
self.save_list.setEnabled(True)
if prev is not None and prev.flags() & Qt.ItemIsEditable != 0:
prev.setFlags(prev.flags() ^ Qt.ItemIsEditable)
@Slot()
def on_dir_add_clicked(self):
self.dir_list.addItem(QListWidgetItem('Double click to edit...'))
@Slot()
def on_dir_browse_clicked(self):
browser = QFileDialog(self)
browser.setFileMode(QFileDialog.Directory)
if browser.exec():
self.dir_list.addItems(browser.selectedFiles())
self.save_list.setEnabled(True)
@Slot()
def on_dir_remove_clicked(self):
current = self.dir_list.currentItem()
if current is not None:
remove_target_folder(current.text())
self.dir_list.takeItem(self.dir_list.row(current))
@Slot()
def on_decrypt_select_clicked(self):
browser = QFileDialog(self, filter='*.pw_encrypt')
browser.setFileMode(QFileDialog.ExistingFiles)
if browser.exec():
for target in browser.selectedFiles():
encryption.decrypt_file(target)
@Slot()
def on_status_refresh_clicked(self):
if self._check_connection():
self.status_refresh.setEnabled(False)
@Slot()
def on_save_list_clicked(self):
items = self._get_list_items()
clear_target_folders()
for item in items:
if not exists(item):
warn = QMessageBox(QMessageBox.Warning, 'Invalid Path', f'The entry "{item}" is invalid.',
QMessageBox.Ok)
warn.show()
return warn.exec()
for item in items:
add_target_folder(item)
self.sent_no_encrypt_message = False
self.save_list.setEnabled(False)
@Slot()
def on_decrypt_log_clicked(self):
self.window_log.resize(700, 400)
self.window_log.show()
def set_connected(self, connected: bool):
if connected:
self.status.setStyleSheet('QStatusBar { color: green; }')
self.status.showMessage('Connected')
else:
self.status.setStyleSheet('QStatusBar { color: red; }')
self.status.showMessage('Disconnected')
def _get_list_items(self) -> List[str]:
items = []
for i in range(self.dir_list.count()):
items.append(self.dir_list.item(i).text())
return items
def _check_connection(self) -> bool:
try:
# Not the greatest solution but it works
requests.get(test_url)
self.set_connected(True)
self.status_refresh.setEnabled(False)
return True
except requests.exceptions.ConnectionError:
self.set_connected(False)
not_connected = QMessageBox(QMessageBox.Critical, 'Unable to Connect',
'The productiveware client was unable to connect to the server. ' +
'Please check your internet connection and click on "Refresh Connection".',
QMessageBox.Ok)
not_connected.show()
not_connected.exec()
return False
|
[
"productiveware.client.get_headers",
"webbrowser.open",
"productiveware.widgets.login.LoginWidget",
"pathlib.Path.cwd",
"os.path.realpath",
"PySide6.QtCore.Slot",
"os.path.exists",
"PySide6.QtCore.QTimer",
"productiveware.encryption.decrypt_file",
"requests.get",
"win10toast.ToastNotifier",
"productiveware.encryption.encrypt_random_file",
"productiveware.client.check_cookie",
"productiveware.widgets.log.LogWidget"
] |
[((698, 713), 'win10toast.ToastNotifier', 'ToastNotifier', ([], {}), '()\n', (711, 713), False, 'from win10toast import ToastNotifier\n'), ((4257, 4263), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (4261, 4263), False, 'from PySide6.QtCore import QTimer, Qt, Slot\n'), ((5512, 5518), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (5516, 5518), False, 'from PySide6.QtCore import QTimer, Qt, Slot\n'), ((5596, 5602), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (5600, 5602), False, 'from PySide6.QtCore import QTimer, Qt, Slot\n'), ((5723, 5729), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (5727, 5729), False, 'from PySide6.QtCore import QTimer, Qt, Slot\n'), ((5857, 5863), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (5861, 5863), False, 'from PySide6.QtCore import QTimer, Qt, Slot\n'), ((6216, 6222), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (6220, 6222), False, 'from PySide6.QtCore import QTimer, Qt, Slot\n'), ((6337, 6343), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (6341, 6343), False, 'from PySide6.QtCore import QTimer, Qt, Slot\n'), ((6606, 6612), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (6610, 6612), False, 'from PySide6.QtCore import QTimer, Qt, Slot\n'), ((6847, 6853), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (6851, 6853), False, 'from PySide6.QtCore import QTimer, Qt, Slot\n'), ((7150, 7156), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (7154, 7156), False, 'from PySide6.QtCore import QTimer, Qt, Slot\n'), ((7291, 7297), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (7295, 7297), False, 'from PySide6.QtCore import QTimer, Qt, Slot\n'), ((7842, 7848), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (7846, 7848), False, 'from PySide6.QtCore import QTimer, Qt, Slot\n'), ((1956, 1964), 'PySide6.QtCore.QTimer', 'QTimer', ([], {}), '()\n', (1962, 1964), False, 'from PySide6.QtCore import QTimer, Qt, Slot\n'), ((3957, 3968), 'productiveware.widgets.log.LogWidget', 'LogWidget', ([], {}), '()\n', (3966, 3968), False, 'from productiveware.widgets.log import LogWidget\n'), ((3997, 4014), 'productiveware.widgets.login.LoginWidget', 'LoginWidget', (['self'], {}), '(self)\n', (4008, 4014), False, 'from productiveware.widgets.login import LoginWidget\n'), ((5564, 5589), 'webbrowser.open', 'webbrowser.open', (['todo_url'], {}), '(todo_url)\n', (5579, 5589), False, 'import webbrowser\n'), ((609, 619), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (617, 619), False, 'from pathlib import Path\n'), ((4031, 4045), 'productiveware.client.check_cookie', 'check_cookie', ([], {}), '()\n', (4043, 4045), False, 'from productiveware.client import base_url, check_cookie, get_headers\n'), ((8590, 8612), 'requests.get', 'requests.get', (['test_url'], {}), '(test_url)\n', (8602, 8612), False, 'import requests\n'), ((7112, 7143), 'productiveware.encryption.decrypt_file', 'encryption.decrypt_file', (['target'], {}), '(target)\n', (7135, 7143), False, 'from productiveware import encryption\n'), ((7451, 7463), 'os.path.exists', 'exists', (['item'], {}), '(item)\n', (7457, 7463), False, 'from os.path import dirname, exists, join, realpath\n'), ((4386, 4399), 'productiveware.client.get_headers', 'get_headers', ([], {}), '()\n', (4397, 4399), False, 'from productiveware.client import base_url, check_cookie, get_headers\n'), ((904, 922), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (912, 922), False, 'from os.path import dirname, exists, join, realpath\n'), ((4660, 4692), 'productiveware.encryption.encrypt_random_file', 'encryption.encrypt_random_file', ([], {}), '()\n', (4690, 4692), False, 'from productiveware import encryption\n'), ((5428, 5441), 'productiveware.client.get_headers', 'get_headers', ([], {}), '()\n', (5439, 5441), False, 'from productiveware.client import base_url, check_cookie, get_headers\n')]
|
# Generated by Django 3.1 on 2022-02-28 10:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MortalityFact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gender', models.IntegerField(choices=[(0, 'Male'), (1, 'Female')])),
('age_group', models.IntegerField(choices=[(0, '0 - 4'), (1, '5 - 9'), (2, '10 - 14'), (3, '15 - 19'), (4, '20 - 24'), (5, '25 - 29'), (6, '30 - 34'), (7, '35 - 39'), (8, '40 - 44'), (9, '45 - 49'), (10, '50 - 54'), (11, '55 - 59'), (12, '60 - 64'), (13, '65 - 69'), (14, '70 - 74'), (15, '75 - 79'), (16, '80 - 84'), (17, '85 - 89'), (18, '90 +')])),
('region', models.IntegerField(choices=[(0, 'Dolnośląskie'), (1, 'Kujawsko-pomorskie'), (2, 'Lubelskie'), (3, 'Lubuskie'), (4, 'Łódzkie'), (5, 'Małopolskie'), (6, 'Mazowieckie'), (7, 'Opolskie'), (8, 'Podkarpackie'), (9, 'Podlaskie'), (10, 'Pomorskie'), (11, 'Śląskie'), (12, 'Świętokrzyskie'), (13, 'Warmińsko-mazurskie'), (14, 'Wielkopolskie'), (15, 'Zachodniopomorskie')])),
('year', models.IntegerField()),
('week', models.IntegerField()),
('deceased_actuals', models.IntegerField()),
],
),
]
|
[
"django.db.models.IntegerField",
"django.db.models.AutoField"
] |
[((307, 400), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (323, 400), False, 'from django.db import migrations, models\n'), ((426, 483), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'Male'), (1, 'Female')]"}), "(choices=[(0, 'Male'), (1, 'Female')])\n", (445, 483), False, 'from django.db import migrations, models\n'), ((516, 867), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, '0 - 4'), (1, '5 - 9'), (2, '10 - 14'), (3, '15 - 19'), (4, '20 - 24'),\n (5, '25 - 29'), (6, '30 - 34'), (7, '35 - 39'), (8, '40 - 44'), (9,\n '45 - 49'), (10, '50 - 54'), (11, '55 - 59'), (12, '60 - 64'), (13,\n '65 - 69'), (14, '70 - 74'), (15, '75 - 79'), (16, '80 - 84'), (17,\n '85 - 89'), (18, '90 +')]"}), "(choices=[(0, '0 - 4'), (1, '5 - 9'), (2, '10 - 14'), (3,\n '15 - 19'), (4, '20 - 24'), (5, '25 - 29'), (6, '30 - 34'), (7,\n '35 - 39'), (8, '40 - 44'), (9, '45 - 49'), (10, '50 - 54'), (11,\n '55 - 59'), (12, '60 - 64'), (13, '65 - 69'), (14, '70 - 74'), (15,\n '75 - 79'), (16, '80 - 84'), (17, '85 - 89'), (18, '90 +')])\n", (535, 867), False, 'from django.db import migrations, models\n'), ((881, 1266), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'Dolnośląskie'), (1, 'Kujawsko-pomorskie'), (2, 'Lubelskie'), (3,\n 'Lubuskie'), (4, 'Łódzkie'), (5, 'Małopolskie'), (6, 'Mazowieckie'), (7,\n 'Opolskie'), (8, 'Podkarpackie'), (9, 'Podlaskie'), (10, 'Pomorskie'),\n (11, 'Śląskie'), (12, 'Świętokrzyskie'), (13, 'Warmińsko-mazurskie'), (\n 14, 'Wielkopolskie'), (15, 'Zachodniopomorskie')]"}), "(choices=[(0, 'Dolnośląskie'), (1, 'Kujawsko-pomorskie'),\n (2, 'Lubelskie'), (3, 'Lubuskie'), (4, 'Łódzkie'), (5, 'Małopolskie'),\n (6, 'Mazowieckie'), (7, 'Opolskie'), (8, 'Podkarpackie'), (9,\n 'Podlaskie'), (10, 'Pomorskie'), (11, 'Śląskie'), (12, 'Świętokrzyskie'\n ), (13, 'Warmińsko-mazurskie'), (14, 'Wielkopolskie'), (15,\n 'Zachodniopomorskie')])\n", (900, 1266), False, 'from django.db import migrations, models\n'), ((1273, 1294), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1292, 1294), False, 'from django.db import migrations, models\n'), ((1322, 1343), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1341, 1343), False, 'from django.db import migrations, models\n'), ((1383, 1404), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1402, 1404), False, 'from django.db import migrations, models\n')]
|
from jinja2 import Environment, PackageLoader
from threading import Timer
import os
from collections import OrderedDict
from IPython.display import IFrame
env = Environment(
loader=PackageLoader('pyhandsontable', 'templates')
)
def generate_html(data, **kwargs):
renderers = kwargs.pop('renderers', dict())
config = kwargs.pop('config', dict())
if isinstance(data[0], (dict, OrderedDict)):
headers = sum((list(d.keys()) for d in data), list())
headers = [h for i, h in enumerate(headers) if h not in headers[:i]]
config['colHeaders'] = list(headers)
else:
headers = range(len(data[0]))
columns = []
for header in headers:
columnData = {
'data': header,
'renderer': 'jsonRenderer'
}
if header in renderers.keys():
columnData['renderer'] = renderers.get(header)
columns.append(columnData)
template = env.get_template('sheet.html')
return template.render(data=data, columns=columns, config=config, **kwargs)
def view_table(data, width=1000, height=500,
filename='temp.handsontable.html', autodelete=True, **kwargs):
# A TemporaryFile does not work with Jupyter Notebook
try:
with open(filename, 'w') as f:
f.write(generate_html(data=data, width=width, height=height, **kwargs))
return IFrame(filename, width=width, height=height)
finally:
if autodelete:
Timer(5, os.unlink, args=[filename]).start()
|
[
"jinja2.PackageLoader",
"IPython.display.IFrame",
"threading.Timer"
] |
[((186, 230), 'jinja2.PackageLoader', 'PackageLoader', (['"""pyhandsontable"""', '"""templates"""'], {}), "('pyhandsontable', 'templates')\n", (199, 230), False, 'from jinja2 import Environment, PackageLoader\n'), ((1380, 1424), 'IPython.display.IFrame', 'IFrame', (['filename'], {'width': 'width', 'height': 'height'}), '(filename, width=width, height=height)\n', (1386, 1424), False, 'from IPython.display import IFrame\n'), ((1473, 1509), 'threading.Timer', 'Timer', (['(5)', 'os.unlink'], {'args': '[filename]'}), '(5, os.unlink, args=[filename])\n', (1478, 1509), False, 'from threading import Timer\n')]
|
import sys
import socket
try:
import riak
except ImportError:
print("Riak test requested, but riak library not installed. "
"Try 'pip install riak' and try again.")
sys.exit(1)
def riak_check(config):
host = config.get("host", "localhost")
port = int(config.get("port", 8087))
def_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(1.5)
rc = riak.RiakClient(nodes=[{"host": host, "pb_port": port}])
try:
res = rc.ping()
except:
return False
else:
return True
finally:
rc.close()
socket.setdefaulttimeout(def_timeout)
|
[
"riak.RiakClient",
"socket.setdefaulttimeout",
"socket.getdefaulttimeout",
"sys.exit"
] |
[((326, 352), 'socket.getdefaulttimeout', 'socket.getdefaulttimeout', ([], {}), '()\n', (350, 352), False, 'import socket\n'), ((357, 386), 'socket.setdefaulttimeout', 'socket.setdefaulttimeout', (['(1.5)'], {}), '(1.5)\n', (381, 386), False, 'import socket\n'), ((396, 452), 'riak.RiakClient', 'riak.RiakClient', ([], {'nodes': "[{'host': host, 'pb_port': port}]"}), "(nodes=[{'host': host, 'pb_port': port}])\n", (411, 452), False, 'import riak\n'), ((187, 198), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (195, 198), False, 'import sys\n'), ((589, 626), 'socket.setdefaulttimeout', 'socket.setdefaulttimeout', (['def_timeout'], {}), '(def_timeout)\n', (613, 626), False, 'import socket\n')]
|
from django.shortcuts import render
from django.views.generic import CreateView
from django.core.urlresolvers import reverse_lazy
from .models import User
from .forms import UserAdminCreationForm
class RegisterView(CreateView):
model = User
template_name = 'accounts/register.html'
form_class = UserAdminCreationForm
success_url = reverse_lazy('index')
register = RegisterView.as_view()
|
[
"django.core.urlresolvers.reverse_lazy"
] |
[((338, 359), 'django.core.urlresolvers.reverse_lazy', 'reverse_lazy', (['"""index"""'], {}), "('index')\n", (350, 359), False, 'from django.core.urlresolvers import reverse_lazy\n')]
|
import pytest
from pytorch_lightning import Trainer
from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel
from pyannote.audio.tasks import (
OverlappedSpeechDetection,
Segmentation,
VoiceActivityDetection,
)
from pyannote.database import FileFinder, get_protocol
@pytest.fixture()
def protocol():
return get_protocol(
"Debug.SpeakerDiarization.Debug", preprocessors={"audio": FileFinder()}
)
def test_train_segmentation(protocol):
segmentation = Segmentation(protocol)
model = SimpleSegmentationModel(task=segmentation)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_train_voice_activity_detection(protocol):
voice_activity_detection = VoiceActivityDetection(protocol)
model = SimpleSegmentationModel(task=voice_activity_detection)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_train_overlapped_speech_detection(protocol):
overlapped_speech_detection = OverlappedSpeechDetection(protocol)
model = SimpleSegmentationModel(task=overlapped_speech_detection)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_finetune_with_task_that_does_not_need_setup_for_specs(protocol):
voice_activity_detection = VoiceActivityDetection(protocol)
model = SimpleSegmentationModel(task=voice_activity_detection)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
voice_activity_detection = VoiceActivityDetection(protocol)
model.task = voice_activity_detection
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_finetune_with_task_that_needs_setup_for_specs(protocol):
segmentation = Segmentation(protocol)
model = SimpleSegmentationModel(task=segmentation)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
segmentation = Segmentation(protocol)
model.task = segmentation
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_transfer_with_task_that_does_not_need_setup_for_specs(protocol):
segmentation = Segmentation(protocol)
model = SimpleSegmentationModel(task=segmentation)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
voice_activity_detection = VoiceActivityDetection(protocol)
model.task = voice_activity_detection
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_transfer_with_task_that_needs_setup_for_specs(protocol):
voice_activity_detection = VoiceActivityDetection(protocol)
model = SimpleSegmentationModel(task=voice_activity_detection)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
segmentation = Segmentation(protocol)
model.task = segmentation
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_finetune_freeze_with_task_that_needs_setup_for_specs(protocol):
segmentation = Segmentation(protocol)
model = SimpleSegmentationModel(task=segmentation)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
segmentation = Segmentation(protocol)
model.task = segmentation
model.freeze_up_to("mfcc")
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_finetune_freeze_with_task_that_does_not_need_setup_for_specs(protocol):
vad = VoiceActivityDetection(protocol)
model = SimpleSegmentationModel(task=vad)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
vad = VoiceActivityDetection(protocol)
model.task = vad
model.freeze_up_to("mfcc")
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_transfer_freeze_with_task_that_does_not_need_setup_for_specs(protocol):
segmentation = Segmentation(protocol)
model = SimpleSegmentationModel(task=segmentation)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
voice_activity_detection = VoiceActivityDetection(protocol)
model.task = voice_activity_detection
model.freeze_up_to("mfcc")
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_transfer_freeze_with_task_that_needs_setup_for_specs(protocol):
voice_activity_detection = VoiceActivityDetection(protocol)
model = SimpleSegmentationModel(task=voice_activity_detection)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
segmentation = Segmentation(protocol)
model.task = segmentation
model.freeze_up_to("mfcc")
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
|
[
"pytorch_lightning.Trainer",
"pytest.fixture",
"pyannote.audio.models.segmentation.debug.SimpleSegmentationModel",
"pyannote.audio.tasks.VoiceActivityDetection",
"pyannote.database.FileFinder",
"pyannote.audio.tasks.OverlappedSpeechDetection",
"pyannote.audio.tasks.Segmentation"
] |
[((302, 318), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (316, 318), False, 'import pytest\n'), ((506, 528), 'pyannote.audio.tasks.Segmentation', 'Segmentation', (['protocol'], {}), '(protocol)\n', (518, 528), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((541, 583), 'pyannote.audio.models.segmentation.debug.SimpleSegmentationModel', 'SimpleSegmentationModel', ([], {'task': 'segmentation'}), '(task=segmentation)\n', (564, 583), False, 'from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel\n'), ((598, 624), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (605, 624), False, 'from pytorch_lightning import Trainer\n'), ((732, 764), 'pyannote.audio.tasks.VoiceActivityDetection', 'VoiceActivityDetection', (['protocol'], {}), '(protocol)\n', (754, 764), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((777, 831), 'pyannote.audio.models.segmentation.debug.SimpleSegmentationModel', 'SimpleSegmentationModel', ([], {'task': 'voice_activity_detection'}), '(task=voice_activity_detection)\n', (800, 831), False, 'from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel\n'), ((846, 872), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (853, 872), False, 'from pytorch_lightning import Trainer\n'), ((986, 1021), 'pyannote.audio.tasks.OverlappedSpeechDetection', 'OverlappedSpeechDetection', (['protocol'], {}), '(protocol)\n', (1011, 1021), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((1034, 1091), 'pyannote.audio.models.segmentation.debug.SimpleSegmentationModel', 'SimpleSegmentationModel', ([], {'task': 'overlapped_speech_detection'}), '(task=overlapped_speech_detection)\n', (1057, 1091), False, 'from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel\n'), ((1106, 1132), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (1113, 1132), False, 'from pytorch_lightning import Trainer\n'), ((1263, 1295), 'pyannote.audio.tasks.VoiceActivityDetection', 'VoiceActivityDetection', (['protocol'], {}), '(protocol)\n', (1285, 1295), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((1308, 1362), 'pyannote.audio.models.segmentation.debug.SimpleSegmentationModel', 'SimpleSegmentationModel', ([], {'task': 'voice_activity_detection'}), '(task=voice_activity_detection)\n', (1331, 1362), False, 'from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel\n'), ((1377, 1403), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (1384, 1403), False, 'from pytorch_lightning import Trainer\n'), ((1459, 1491), 'pyannote.audio.tasks.VoiceActivityDetection', 'VoiceActivityDetection', (['protocol'], {}), '(protocol)\n', (1481, 1491), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((1548, 1574), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (1555, 1574), False, 'from pytorch_lightning import Trainer\n'), ((1685, 1707), 'pyannote.audio.tasks.Segmentation', 'Segmentation', (['protocol'], {}), '(protocol)\n', (1697, 1707), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((1720, 1762), 'pyannote.audio.models.segmentation.debug.SimpleSegmentationModel', 'SimpleSegmentationModel', ([], {'task': 'segmentation'}), '(task=segmentation)\n', (1743, 1762), False, 'from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel\n'), ((1777, 1803), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (1784, 1803), False, 'from pytorch_lightning import Trainer\n'), ((1847, 1869), 'pyannote.audio.tasks.Segmentation', 'Segmentation', (['protocol'], {}), '(protocol)\n', (1859, 1869), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((1914, 1940), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (1921, 1940), False, 'from pytorch_lightning import Trainer\n'), ((2060, 2082), 'pyannote.audio.tasks.Segmentation', 'Segmentation', (['protocol'], {}), '(protocol)\n', (2072, 2082), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((2095, 2137), 'pyannote.audio.models.segmentation.debug.SimpleSegmentationModel', 'SimpleSegmentationModel', ([], {'task': 'segmentation'}), '(task=segmentation)\n', (2118, 2137), False, 'from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel\n'), ((2152, 2178), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (2159, 2178), False, 'from pytorch_lightning import Trainer\n'), ((2234, 2266), 'pyannote.audio.tasks.VoiceActivityDetection', 'VoiceActivityDetection', (['protocol'], {}), '(protocol)\n', (2256, 2266), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((2323, 2349), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (2330, 2349), False, 'from pytorch_lightning import Trainer\n'), ((2473, 2505), 'pyannote.audio.tasks.VoiceActivityDetection', 'VoiceActivityDetection', (['protocol'], {}), '(protocol)\n', (2495, 2505), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((2518, 2572), 'pyannote.audio.models.segmentation.debug.SimpleSegmentationModel', 'SimpleSegmentationModel', ([], {'task': 'voice_activity_detection'}), '(task=voice_activity_detection)\n', (2541, 2572), False, 'from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel\n'), ((2587, 2613), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (2594, 2613), False, 'from pytorch_lightning import Trainer\n'), ((2657, 2679), 'pyannote.audio.tasks.Segmentation', 'Segmentation', (['protocol'], {}), '(protocol)\n', (2669, 2679), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((2724, 2750), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (2731, 2750), False, 'from pytorch_lightning import Trainer\n'), ((2869, 2891), 'pyannote.audio.tasks.Segmentation', 'Segmentation', (['protocol'], {}), '(protocol)\n', (2881, 2891), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((2904, 2946), 'pyannote.audio.models.segmentation.debug.SimpleSegmentationModel', 'SimpleSegmentationModel', ([], {'task': 'segmentation'}), '(task=segmentation)\n', (2927, 2946), False, 'from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel\n'), ((2961, 2987), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (2968, 2987), False, 'from pytorch_lightning import Trainer\n'), ((3031, 3053), 'pyannote.audio.tasks.Segmentation', 'Segmentation', (['protocol'], {}), '(protocol)\n', (3043, 3053), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((3129, 3155), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (3136, 3155), False, 'from pytorch_lightning import Trainer\n'), ((3273, 3305), 'pyannote.audio.tasks.VoiceActivityDetection', 'VoiceActivityDetection', (['protocol'], {}), '(protocol)\n', (3295, 3305), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((3318, 3351), 'pyannote.audio.models.segmentation.debug.SimpleSegmentationModel', 'SimpleSegmentationModel', ([], {'task': 'vad'}), '(task=vad)\n', (3341, 3351), False, 'from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel\n'), ((3366, 3392), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (3373, 3392), False, 'from pytorch_lightning import Trainer\n'), ((3427, 3459), 'pyannote.audio.tasks.VoiceActivityDetection', 'VoiceActivityDetection', (['protocol'], {}), '(protocol)\n', (3449, 3459), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((3526, 3552), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (3533, 3552), False, 'from pytorch_lightning import Trainer\n'), ((3679, 3701), 'pyannote.audio.tasks.Segmentation', 'Segmentation', (['protocol'], {}), '(protocol)\n', (3691, 3701), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((3714, 3756), 'pyannote.audio.models.segmentation.debug.SimpleSegmentationModel', 'SimpleSegmentationModel', ([], {'task': 'segmentation'}), '(task=segmentation)\n', (3737, 3756), False, 'from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel\n'), ((3771, 3797), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (3778, 3797), False, 'from pytorch_lightning import Trainer\n'), ((3853, 3885), 'pyannote.audio.tasks.VoiceActivityDetection', 'VoiceActivityDetection', (['protocol'], {}), '(protocol)\n', (3875, 3885), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((3973, 3999), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (3980, 3999), False, 'from pytorch_lightning import Trainer\n'), ((4130, 4162), 'pyannote.audio.tasks.VoiceActivityDetection', 'VoiceActivityDetection', (['protocol'], {}), '(protocol)\n', (4152, 4162), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((4175, 4229), 'pyannote.audio.models.segmentation.debug.SimpleSegmentationModel', 'SimpleSegmentationModel', ([], {'task': 'voice_activity_detection'}), '(task=voice_activity_detection)\n', (4198, 4229), False, 'from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel\n'), ((4244, 4270), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (4251, 4270), False, 'from pytorch_lightning import Trainer\n'), ((4314, 4336), 'pyannote.audio.tasks.Segmentation', 'Segmentation', (['protocol'], {}), '(protocol)\n', (4326, 4336), False, 'from pyannote.audio.tasks import OverlappedSpeechDetection, Segmentation, VoiceActivityDetection\n'), ((4412, 4438), 'pytorch_lightning.Trainer', 'Trainer', ([], {'fast_dev_run': '(True)'}), '(fast_dev_run=True)\n', (4419, 4438), False, 'from pytorch_lightning import Trainer\n'), ((426, 438), 'pyannote.database.FileFinder', 'FileFinder', ([], {}), '()\n', (436, 438), False, 'from pyannote.database import FileFinder, get_protocol\n')]
|
import quadratic_provers as q
data = q.eval_across_field([1, 2, 3, 4], 11)
qproof = q.mk_quadratic_proof(data, 4, 11)
assert q.check_quadratic_proof(data, qproof, 4, 5, 11)
data2 = q.eval_across_field(range(36), 97)
cproof = q.mk_column_proof(data2, 36, 97)
assert q.check_column_proof(data2, cproof, 36, 10, 97)
|
[
"quadratic_provers.mk_column_proof",
"quadratic_provers.mk_quadratic_proof",
"quadratic_provers.check_quadratic_proof",
"quadratic_provers.check_column_proof",
"quadratic_provers.eval_across_field"
] |
[((38, 75), 'quadratic_provers.eval_across_field', 'q.eval_across_field', (['[1, 2, 3, 4]', '(11)'], {}), '([1, 2, 3, 4], 11)\n', (57, 75), True, 'import quadratic_provers as q\n'), ((85, 118), 'quadratic_provers.mk_quadratic_proof', 'q.mk_quadratic_proof', (['data', '(4)', '(11)'], {}), '(data, 4, 11)\n', (105, 118), True, 'import quadratic_provers as q\n'), ((126, 173), 'quadratic_provers.check_quadratic_proof', 'q.check_quadratic_proof', (['data', 'qproof', '(4)', '(5)', '(11)'], {}), '(data, qproof, 4, 5, 11)\n', (149, 173), True, 'import quadratic_provers as q\n'), ((226, 258), 'quadratic_provers.mk_column_proof', 'q.mk_column_proof', (['data2', '(36)', '(97)'], {}), '(data2, 36, 97)\n', (243, 258), True, 'import quadratic_provers as q\n'), ((266, 313), 'quadratic_provers.check_column_proof', 'q.check_column_proof', (['data2', 'cproof', '(36)', '(10)', '(97)'], {}), '(data2, cproof, 36, 10, 97)\n', (286, 313), True, 'import quadratic_provers as q\n')]
|
import logging
from typing import List
import numpy as np
import tensorflow as tf
try:
import tensorflow_probability as tfp
distributions = tfp.distributions
except:
distributions = tf.distributions
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_X_y
from sklearn.utils.validation import check_array, check_is_fitted
from spn.algorithms.LearningWrappers import learn_classifier, learn_parametric
from spn.algorithms.MPE import mpe
from spn.gpu.TensorFlow import optimize_tf
from spn.structure.Base import Context, get_nodes_by_type
from spn.structure.leaves.parametric.Parametric import Categorical, Gaussian, Parametric
logger = logging.getLogger(__name__)
class SPNClassifier(BaseEstimator, ClassifierMixin):
"""
:class:`SPNClassifier` wraps the SPN structure learning, tensorflow weight optimization and MPE procedures into a single
class that follows the sklearn estimator interace. Therefore, :class:`SPNClassifier` is usable in the sklearn framework as
estimator in :meth:`sklearn.model_selection.cross_val_score`, :meth:`sklearn.model_selection.GridSearchCV` and more.
"""
def __init__(
self,
parametric_types: List[Parametric] = None,
n_jobs=-1,
tf_optimize_weights=False,
tf_n_epochs=100,
tf_batch_size: int = None,
tf_optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001),
tf_pre_optimization_hook=None,
tf_post_optimization_hook=None,
):
"""
Create an :class:`SPNClassifier`.
Parameters:
parametric_types : List
Parametric types of leaf nodes. If None, all are assumed to be Gaussian
n_jobs : int
Number of parallel jobs for learning the SPN structure
tf_optimize_weights : bool
Optimize weights in tensorflow
tf_n_epochs : int
Number of tensorflow optimization epochs
tf_batch_size : int
Batch size for tensorflow optimization
tf_optimizer
Tensorflow optimizer to use for optimization
tf_pre_optimization_hook
Hook that takes an SPN and returns an SPN before the optimization step
tf_post_optimization_hook
Hook that takes an SPN and returns an SPN after the optimization step
"""
self.n_jobs = n_jobs
self.tf_optimize_weights = tf_optimize_weights
self.tf_n_epochs = tf_n_epochs
self.tf_optimizer = tf_optimizer
self.tf_batch_size = tf_batch_size
self.parametric_types = parametric_types
self.tf_pre_optimization_hook = tf_pre_optimization_hook
self.tf_post_optimization_hook = tf_post_optimization_hook
def fit(self, X, y):
"""
Fit the :class:`SPNClassifier` object.
Parameters
----------
X : np.ndarray
Training variables
y : np.ndarray
Training labels
Returns
-------
SPNClassifier
Fitted classifier
"""
# Check that X and y have correct shape
X, y = check_X_y(X, y, multi_output=True)
# Merge X and y
train_data = np.c_[X, y].astype(np.float32)
# If no parametric types were given: Assumen that all leafs are gaussian
if self.parametric_types is None:
parametric_types = [Gaussian] * X.shape[1] + [Categorical]
else:
parametric_types = self.parametric_types
# Learn classifier
self._spn = learn_classifier(
train_data,
ds_context=Context(parametric_types=parametric_types).add_domains(train_data),
spn_learn_wrapper=learn_parametric,
label_idx=X.shape[1],
cpus=self.n_jobs,
)
# If pre optimization hook has been defined, run now
if self.tf_pre_optimization_hook:
self._spn = self.tf_pre_optimization_hook(self._spn)
# If optimization flag is set: optimize weights in tf
if self.tf_optimize_weights:
self._spn, self.loss = optimize_tf(
spn=self._spn,
data=train_data,
optimizer=self.tf_optimizer,
batch_size=self.tf_batch_size,
epochs=self.tf_n_epochs,
return_loss=True,
)
# If post optimization hook has been defined, run now
if self.tf_post_optimization_hook:
self._spn = self.tf_post_optimization_hook(self._spn)
self.X_ = X
self.y_ = y
# Return the classifier
return self
def predict(self, X):
"""
Make a prediction of the given data.
Parameters
----------
X : np.ndarray
Test data
Returns
-------
np.ndarray
Label predictions for the given test data
"""
# Check is fit had been called
check_is_fitted(self, ["X_", "y_"])
# Input validation
X = check_array(X)
# Classify
n_test = X.shape[0]
y_empty = np.full((n_test, 1), fill_value=np.nan)
data = np.c_[X, y_empty]
data_filled = mpe(self._spn, data)
y_pred = data_filled[:, -1]
return y_pred
def get_params(self, deep=True):
"""Method to make SPNClassifier usable in sklearn procedures such as cross_val_score etc."""
return {
"parametric_types": self.parametric_types,
"n_jobs": self.n_jobs,
"tf_optimize_weights": self.tf_optimize_weights,
"tf_n_epochs": self.tf_n_epochs,
"tf_batch_size": self.tf_batch_size,
"tf_optimizer": self.tf_optimizer,
"tf_pre_optimization_hook": self.tf_pre_optimization_hook,
"tf_post_optimization_hook": self.tf_post_optimization_hook,
}
def set_params(self, **parameters):
"""Method to make SPNClassifier usable in sklearn procedures such as cross_val_score etc."""
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def classification_categorical_to_tf_graph(
node, data_placeholder=None, log_space=True, variable_dict=None, dtype=np.float32
):
"""
Fix categorical to tf graph for classification problem.
For a binary class label, there will be two categorical leaf nodes in the SPN. One which one-hot encodes the first
class as [0, 1] and one that encodes the second clas as [1, 0].
Since the tf optimizes the log likelihood, these one-hot represented probabilities will be projected into logspace
which results in log([1,0])=[0, -inf] and therefore NaNs in further computations.
Therefore, this custom method adds a small epsilon, such that the zero probability value in the one-hot vector will
not degrade to negative infinity.
"""
with tf.compat.v1.variable_scope("%s_%s" % (node.__class__.__name__, node.id)):
p = np.array(node.p, dtype=dtype)
# Epsilon to make sure there are no zero values
eps = 1e-20
p += eps
# Renormalize such that the sum over all probabilities is one
p /= np.sum(p)
assert np.all(p > 0), "Probabilities in the class leaf nodes have to be greater than zero but were %s" % p
softmaxInverse = np.log(p / np.max(p)).astype(dtype)
probs = tf.nn.softmax(tf.constant(softmaxInverse))
variable_dict[node] = probs
if log_space:
return distributions.Categorical(probs=probs).log_prob(data_placeholder[:, node.scope[0]])
return distributions.Categorical(probs=probs).prob(data_placeholder[:, node.scope[0]])
|
[
"numpy.full",
"spn.gpu.TensorFlow.optimize_tf",
"numpy.sum",
"tensorflow.compat.v1.variable_scope",
"sklearn.utils.check_X_y",
"numpy.all",
"tensorflow.constant",
"sklearn.utils.validation.check_is_fitted",
"tensorflow.compat.v1.train.AdamOptimizer",
"numpy.max",
"numpy.array",
"spn.structure.Base.Context",
"spn.algorithms.MPE.mpe",
"logging.getLogger",
"sklearn.utils.validation.check_array"
] |
[((685, 712), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (702, 712), False, 'import logging\n'), ((1378, 1431), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1410, 1431), True, 'import tensorflow as tf\n'), ((3133, 3167), 'sklearn.utils.check_X_y', 'check_X_y', (['X', 'y'], {'multi_output': '(True)'}), '(X, y, multi_output=True)\n', (3142, 3167), False, 'from sklearn.utils import check_X_y\n'), ((4971, 5006), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', "['X_', 'y_']"], {}), "(self, ['X_', 'y_'])\n", (4986, 5006), False, 'from sklearn.utils.validation import check_array, check_is_fitted\n'), ((5047, 5061), 'sklearn.utils.validation.check_array', 'check_array', (['X'], {}), '(X)\n', (5058, 5061), False, 'from sklearn.utils.validation import check_array, check_is_fitted\n'), ((5128, 5167), 'numpy.full', 'np.full', (['(n_test, 1)'], {'fill_value': 'np.nan'}), '((n_test, 1), fill_value=np.nan)\n', (5135, 5167), True, 'import numpy as np\n'), ((5223, 5243), 'spn.algorithms.MPE.mpe', 'mpe', (['self._spn', 'data'], {}), '(self._spn, data)\n', (5226, 5243), False, 'from spn.algorithms.MPE import mpe\n'), ((6941, 7014), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (["('%s_%s' % (node.__class__.__name__, node.id))"], {}), "('%s_%s' % (node.__class__.__name__, node.id))\n", (6968, 7014), True, 'import tensorflow as tf\n'), ((7028, 7057), 'numpy.array', 'np.array', (['node.p'], {'dtype': 'dtype'}), '(node.p, dtype=dtype)\n', (7036, 7057), True, 'import numpy as np\n'), ((7236, 7245), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (7242, 7245), True, 'import numpy as np\n'), ((7262, 7275), 'numpy.all', 'np.all', (['(p > 0)'], {}), '(p > 0)\n', (7268, 7275), True, 'import numpy as np\n'), ((4114, 4264), 'spn.gpu.TensorFlow.optimize_tf', 'optimize_tf', ([], {'spn': 'self._spn', 'data': 'train_data', 'optimizer': 'self.tf_optimizer', 'batch_size': 'self.tf_batch_size', 'epochs': 'self.tf_n_epochs', 'return_loss': '(True)'}), '(spn=self._spn, data=train_data, optimizer=self.tf_optimizer,\n batch_size=self.tf_batch_size, epochs=self.tf_n_epochs, return_loss=True)\n', (4125, 4264), False, 'from spn.gpu.TensorFlow import optimize_tf\n'), ((7454, 7481), 'tensorflow.constant', 'tf.constant', (['softmaxInverse'], {}), '(softmaxInverse)\n', (7465, 7481), True, 'import tensorflow as tf\n'), ((3620, 3662), 'spn.structure.Base.Context', 'Context', ([], {'parametric_types': 'parametric_types'}), '(parametric_types=parametric_types)\n', (3627, 3662), False, 'from spn.structure.Base import Context, get_nodes_by_type\n'), ((7399, 7408), 'numpy.max', 'np.max', (['p'], {}), '(p)\n', (7405, 7408), True, 'import numpy as np\n')]
|
import pygame, random, time
from machine import Machine
import utilities
from equipment import *
from character import Fighter
class Section:
def __init__(self, pos, prodLine):
#self.image = img
self.prodLine = prodLine
self.tilePos = pos
self.machine = None
self.neighbors = []
class ProductionLine:
def __init__(self, factory, inGate):
print("production line init")
self.fighters = []
self.inGate = inGate
self.outGates = [(0,0), (0,9)]
self.factory = factory
self.debugLayer = pygame.Surface(self.factory.surface.get_rect().size)
self.debugLayer.set_colorkey((255, 0, 255))
self.debugLayer.fill((255, 0, 255))
self.stats = {
"step": 0
}
self.line = {
utilities.tilePosId(self.inGate): Section(self.inGate, self),
}
for s in self.factory.getTilesByLayer("prodLine"):
newSection = Section(s, self)
self.line[utilities.tilePosId(s)] = newSection
# add connections
for section in self.line:
pos = self.line[section].tilePos
#print("section in", pos)
for n in self.neighboringSections(pos):
n.neighbors.append(self.line[section])
self.line[section].neighbors.append(n)
pygame.draw.line(
self.debugLayer,
[242, 132, 45],
utilities.tilePosToScreenPos(48, pos),
utilities.tilePosToScreenPos(48, n.tilePos),
5
)
# add machines to random sections (not on the sides)
for s in self.line:
if self.line[s].tilePos[0] not in [0, 9] and self.line[s].tilePos[1] not in [0, 9]:
if random.randint(0, 100) < 20:
self.line[s].machine = Machine(self.line[s])
def availableDirections(self, fromPos):
destSections = []
if not fromPos in self.line:
return(destSections)
destSections += self.line[fromPos].neighbors
#print("destinations from", fromPos, len(destSections))
return(destSections)
def neighboringSections(self, pos):
neighbors = []
posString = utilities.tilePosId(pos)
if posString in self.line:
for x, y in [[-1, 0], [1, 0], [0, -1], [0, 1]]:
testKey = utilities.tilePosId((pos[0] + x, pos[1] + y))
if testKey in self.line:
n = self.line[testKey]
neighbors.append(n)
#print(" is connected to", len(neighbors))
return(neighbors)
def addFighter(self, newFighter):
tilePos = utilities.screenPosToTilePos(48, newFighter.rect.center)
newFighter.prodLineLastSections = [tilePos]
posString = utilities.tilePosId(tilePos)
newFighter.state = posString
#print(self.stats["step"], "add fighter to factory tile", tilePos)
self.fighters.append(newFighter)
def fightersAt(self, pos):
posString = utilities.tilePosId(pos)
occupiers = []
for f in self.fighters:
if utilities.screenPosToTilePos(48, f.rect.center) == pos:
occupiers.append(f)
return(occupiers)
def lineAdvance(self):
# move fighters
fightersToGrinder = []
for fighter in self.fighters:
if fighter.state == "IN_MACHINE":
continue
if self.stats["step"] - fighter.timeStamps["move"] < 10 + random.randint(0, 10):
continue
if fighter.prodLineLastSections[-1] in self.outGates:
fightersToGrinder.append(fighter)
for sect in self.availableDirections(fighter.state):
if not sect.tilePos in fighter.prodLineLastSections:
if len(self.fightersAt(sect.tilePos)) == 0:
fighter.state = utilities.tilePosId(sect.tilePos)
fighter.rect.center = utilities.tilePosToScreenPos(48, sect.tilePos)
fighter.timeStamps["move"] = self.stats["step"]
fighter.prodLineLastSections.append(sect.tilePos)
break
for f in fightersToGrinder:
self.fighters.remove(f)
fightersToGrinder.remove(f)
f.kill()
x, y = utilities.tilePosToScreenPos(48, f.prodLineLastSections[-1])
x = self.factory.grinder.surface.get_width() - 12
y -= 24
self.factory.grinder.fighters.append(Fighter(
world=self.factory.grinder,
team=self.factory.team,
spawnPos=[x, y],
speed=1,
selectedEquipment=[Skin(), Fist()]
))
# step all machines
for s in self.line:
if self.line[s].machine:
self.line[s].machine.step()
def step(self):
self.stats["step"] += 1
self.lineAdvance()
|
[
"random.randint",
"utilities.screenPosToTilePos",
"machine.Machine",
"utilities.tilePosId",
"utilities.tilePosToScreenPos"
] |
[((1886, 1910), 'utilities.tilePosId', 'utilities.tilePosId', (['pos'], {}), '(pos)\n', (1905, 1910), False, 'import utilities\n'), ((2249, 2305), 'utilities.screenPosToTilePos', 'utilities.screenPosToTilePos', (['(48)', 'newFighter.rect.center'], {}), '(48, newFighter.rect.center)\n', (2277, 2305), False, 'import utilities\n'), ((2366, 2394), 'utilities.tilePosId', 'utilities.tilePosId', (['tilePos'], {}), '(tilePos)\n', (2385, 2394), False, 'import utilities\n'), ((2574, 2598), 'utilities.tilePosId', 'utilities.tilePosId', (['pos'], {}), '(pos)\n', (2593, 2598), False, 'import utilities\n'), ((699, 731), 'utilities.tilePosId', 'utilities.tilePosId', (['self.inGate'], {}), '(self.inGate)\n', (718, 731), False, 'import utilities\n'), ((3620, 3680), 'utilities.tilePosToScreenPos', 'utilities.tilePosToScreenPos', (['(48)', 'f.prodLineLastSections[-1]'], {}), '(48, f.prodLineLastSections[-1])\n', (3648, 3680), False, 'import utilities\n'), ((865, 887), 'utilities.tilePosId', 'utilities.tilePosId', (['s'], {}), '(s)\n', (884, 887), False, 'import utilities\n'), ((2005, 2050), 'utilities.tilePosId', 'utilities.tilePosId', (['(pos[0] + x, pos[1] + y)'], {}), '((pos[0] + x, pos[1] + y))\n', (2024, 2050), False, 'import utilities\n'), ((2648, 2695), 'utilities.screenPosToTilePos', 'utilities.screenPosToTilePos', (['(48)', 'f.rect.center'], {}), '(48, f.rect.center)\n', (2676, 2695), False, 'import utilities\n'), ((1216, 1253), 'utilities.tilePosToScreenPos', 'utilities.tilePosToScreenPos', (['(48)', 'pos'], {}), '(48, pos)\n', (1244, 1253), False, 'import utilities\n'), ((1260, 1303), 'utilities.tilePosToScreenPos', 'utilities.tilePosToScreenPos', (['(48)', 'n.tilePos'], {}), '(48, n.tilePos)\n', (1288, 1303), False, 'import utilities\n'), ((1490, 1512), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (1504, 1512), False, 'import pygame, random, time\n'), ((1547, 1568), 'machine.Machine', 'Machine', (['self.line[s]'], {}), '(self.line[s])\n', (1554, 1568), False, 'from machine import Machine\n'), ((2961, 2982), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (2975, 2982), False, 'import pygame, random, time\n'), ((3278, 3311), 'utilities.tilePosId', 'utilities.tilePosId', (['sect.tilePos'], {}), '(sect.tilePos)\n', (3297, 3311), False, 'import utilities\n'), ((3340, 3386), 'utilities.tilePosToScreenPos', 'utilities.tilePosToScreenPos', (['(48)', 'sect.tilePos'], {}), '(48, sect.tilePos)\n', (3368, 3386), False, 'import utilities\n')]
|
import tkinter as tk
from utils.fonts import _getFont
from re import search
Y_OFFSET = 220
PANEL_HEIGHT = 127
PANEL_WIDTH = 140
class ButtonGroup:
def __init__(self, root, label, id, position, buttons, callback=None):
self.id = id
self.root = root
self.x, self.y = position
self.label = label
self.buttons = buttons
self.callback = callback
self._loadPanel()
def _sendPacket(self, tag):
packet = self.root.udpPacket
panel = int(search(r'([12])L?$', tag)[1])
payload = packet.CommandPacket()
payload.type = packet.COMMAND
payload.command = self.id
payload.panel = self.buttons[panel - 1].upper()
self.root.gameConnection.send(payload)
def toggleButton(self, tag, flag):
if flag:
self.root.itemconfig(tag, image=self.root.btn_on)
else:
self.root.itemconfig(tag, image=self.root.btn_off)
self._sendPacket(tag)
def _loadPanel(self):
btn_off = tk.PhotoImage(file='assets/controls/TextButtonOff.png')
btn_on = tk.PhotoImage(file='assets/controls/TextButtonOn.png')
if not hasattr(self.root, 'btn_off'):
self.root.btn_off = btn_off
self.root.btn_on = btn_on
self.root.addPanel(width=2, height=2, gridPos=(self.x, self.y))
self.root.create_text(145 + PANEL_WIDTH * self.x, Y_OFFSET + 50 + PANEL_HEIGHT * self.y, text=self.label, fill='black', font=_getFont('body'))
self.root.create_image(145 + PANEL_WIDTH * self.x, Y_OFFSET + 105 + PANEL_HEIGHT * self.y, image=self.root.btn_off, tags='{}1'.format(self.id))
self.root.create_image(145 + PANEL_WIDTH * self.x, Y_OFFSET + 180 + PANEL_HEIGHT * self.y, image=self.root.btn_off, tags='{}2'.format(self.id))
self.root.create_text(145 + PANEL_WIDTH * self.x, Y_OFFSET + 105 + PANEL_HEIGHT * self.y, text=self.buttons[0], fill='white', font=_getFont('heading-2s'), tags='{}1L'.format(self.id))
self.root.create_text(145 + PANEL_WIDTH * self.x, Y_OFFSET + 180 + PANEL_HEIGHT * self.y, text=self.buttons[1], fill='white', font=_getFont('heading-2s'), tags='{}2L'.format(self.id))
self.root.tag_bind('{}1'.format(self.id), '<Button-1>', lambda _: self.toggleButton('{}1'.format(self.id), True))
self.root.tag_bind('{}1'.format(self.id), '<ButtonRelease-1>', lambda _: self.toggleButton('{}1'.format(self.id), False))
self.root.tag_bind('{}2'.format(self.id), '<Button-1>', lambda _: self.toggleButton('{}2'.format(self.id), True))
self.root.tag_bind('{}2'.format(self.id), '<ButtonRelease-1>', lambda _: self.toggleButton('{}2'.format(self.id), False))
self.root.tag_bind('{}1L'.format(self.id), '<Button-1>', lambda _: self.toggleButton('{}1'.format(self.id), True))
self.root.tag_bind('{}1L'.format(self.id), '<ButtonRelease-1>', lambda _: self.toggleButton('{}1'.format(self.id), False))
self.root.tag_bind('{}2L'.format(self.id), '<Button-1>', lambda _: self.toggleButton('{}2'.format(self.id), True))
self.root.tag_bind('{}2L'.format(self.id), '<ButtonRelease-1>', lambda _: self.toggleButton('{}2'.format(self.id), False))
|
[
"utils.fonts._getFont",
"tkinter.PhotoImage",
"re.search"
] |
[((942, 997), 'tkinter.PhotoImage', 'tk.PhotoImage', ([], {'file': '"""assets/controls/TextButtonOff.png"""'}), "(file='assets/controls/TextButtonOff.png')\n", (955, 997), True, 'import tkinter as tk\n'), ((1011, 1065), 'tkinter.PhotoImage', 'tk.PhotoImage', ([], {'file': '"""assets/controls/TextButtonOn.png"""'}), "(file='assets/controls/TextButtonOn.png')\n", (1024, 1065), True, 'import tkinter as tk\n'), ((473, 497), 're.search', 'search', (['"""([12])L?$"""', 'tag'], {}), "('([12])L?$', tag)\n", (479, 497), False, 'from re import search\n'), ((1373, 1389), 'utils.fonts._getFont', '_getFont', (['"""body"""'], {}), "('body')\n", (1381, 1389), False, 'from utils.fonts import _getFont\n'), ((1822, 1844), 'utils.fonts._getFont', '_getFont', (['"""heading-2s"""'], {}), "('heading-2s')\n", (1830, 1844), False, 'from utils.fonts import _getFont\n'), ((2010, 2032), 'utils.fonts._getFont', '_getFont', (['"""heading-2s"""'], {}), "('heading-2s')\n", (2018, 2032), False, 'from utils.fonts import _getFont\n')]
|
# Copyright 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from base64 import b64decode
from time import time
from flask import current_app, session, request, redirect, make_response, Blueprint
from auth.drivers.oidc import _validate_basic_auth, _validate_token_auth
from auth.utils.redis_client import RedisClient
bp = Blueprint("root", __name__)
def handle_auth(auth_header: str):
redis_client = RedisClient()
if redis_client.check_auth_token(auth_header=auth_header):
return make_response("OK", 200)
try:
auth_key, auth_value = auth_header.strip().split(" ")
except ValueError:
return make_response("KO", 401)
else:
if auth_key.lower() == "basic":
username, password = b64decode(auth_value.strip()).decode().split(":", 1)
if _validate_basic_auth(username, password):
redis_client.set_auth_token(auth_header=auth_header)
return make_response("OK", 200)
elif auth_key.lower() == "bearer":
if _validate_token_auth(auth_value):
redis_client.set_auth_token(auth_header=auth_header)
return make_response("OK", 200)
return make_response("KO", 401)
@bp.route("/auth")
def auth():
if "X-Forwarded-Uri" in request.headers and request.headers["X-Forwarded-Uri"].startswith("/static"):
return make_response("OK")
# Check if need to login
target = request.args.get("target")
scope = request.args.get("scope")
for header in ("X-Forwarded-Proto", "X-Forwarded-Host", "X-Forwarded-Port", "X-Forwarded-Uri"):
if header in request.headers:
session[header] = request.headers[header]
if "Authorization" in request.headers:
return handle_auth(auth_header=request.headers.get("Authorization", ""))
if not session.get("auth_attributes") or session["auth_attributes"]["exp"] < int(time()):
return redirect(current_app.config["auth"]["login_handler"], 302)
if not session.get("auth", False) and not current_app.config["global"]["disable_auth"]:
# Redirect to login
return redirect(current_app.config["auth"].get("auth_redirect",
f"{request.base_url}{request.script_root}/login"))
if target is None:
target = "raw"
# Map auth response
response = make_response("OK")
try:
mapper = importlib.import_module(f"auth.mappers.{target}")
response = mapper.auth(scope, response)
except (ImportError, AttributeError, TypeError):
from traceback import format_exc
current_app.logger.error(f"Failed to map auth data {format_exc()}")
return response
@bp.route("/token")
def token():
return redirect(current_app.config["auth"]["token_handler"], 302)
@bp.route("/login")
def login():
return redirect(current_app.config["auth"]["login_handler"], 302)
@bp.route("/logout")
def logout():
to = request.args.get("to")
return redirect(current_app.config["auth"]["logout_handler"] + (f"?to={to}" if to is not None else ""))
|
[
"flask.Blueprint",
"flask.redirect",
"flask.request.args.get",
"importlib.import_module",
"auth.drivers.oidc._validate_basic_auth",
"auth.drivers.oidc._validate_token_auth",
"flask.request.headers.get",
"flask.session.get",
"time.time",
"auth.utils.redis_client.RedisClient",
"traceback.format_exc",
"flask.make_response"
] |
[((865, 892), 'flask.Blueprint', 'Blueprint', (['"""root"""', '__name__'], {}), "('root', __name__)\n", (874, 892), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((949, 962), 'auth.utils.redis_client.RedisClient', 'RedisClient', ([], {}), '()\n', (960, 962), False, 'from auth.utils.redis_client import RedisClient\n'), ((1731, 1755), 'flask.make_response', 'make_response', (['"""KO"""', '(401)'], {}), "('KO', 401)\n", (1744, 1755), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((1972, 1998), 'flask.request.args.get', 'request.args.get', (['"""target"""'], {}), "('target')\n", (1988, 1998), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((2011, 2036), 'flask.request.args.get', 'request.args.get', (['"""scope"""'], {}), "('scope')\n", (2027, 2036), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((2904, 2923), 'flask.make_response', 'make_response', (['"""OK"""'], {}), "('OK')\n", (2917, 2923), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((3284, 3342), 'flask.redirect', 'redirect', (["current_app.config['auth']['token_handler']", '(302)'], {}), "(current_app.config['auth']['token_handler'], 302)\n", (3292, 3342), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((3389, 3447), 'flask.redirect', 'redirect', (["current_app.config['auth']['login_handler']", '(302)'], {}), "(current_app.config['auth']['login_handler'], 302)\n", (3397, 3447), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((3494, 3516), 'flask.request.args.get', 'request.args.get', (['"""to"""'], {}), "('to')\n", (3510, 3516), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((3528, 3629), 'flask.redirect', 'redirect', (["(current_app.config['auth']['logout_handler'] + (f'?to={to}' if to is not\n None else ''))"], {}), "(current_app.config['auth']['logout_handler'] + (f'?to={to}' if to\n is not None else ''))\n", (3536, 3629), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((1041, 1065), 'flask.make_response', 'make_response', (['"""OK"""', '(200)'], {}), "('OK', 200)\n", (1054, 1065), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((1910, 1929), 'flask.make_response', 'make_response', (['"""OK"""'], {}), "('OK')\n", (1923, 1929), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((2462, 2520), 'flask.redirect', 'redirect', (["current_app.config['auth']['login_handler']", '(302)'], {}), "(current_app.config['auth']['login_handler'], 302)\n", (2470, 2520), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((2950, 2999), 'importlib.import_module', 'importlib.import_module', (['f"""auth.mappers.{target}"""'], {}), "(f'auth.mappers.{target}')\n", (2973, 2999), False, 'import importlib\n'), ((1175, 1199), 'flask.make_response', 'make_response', (['"""KO"""', '(401)'], {}), "('KO', 401)\n", (1188, 1199), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((1351, 1391), 'auth.drivers.oidc._validate_basic_auth', '_validate_basic_auth', (['username', 'password'], {}), '(username, password)\n', (1371, 1391), False, 'from auth.drivers.oidc import _validate_basic_auth, _validate_token_auth\n'), ((2364, 2394), 'flask.session.get', 'session.get', (['"""auth_attributes"""'], {}), "('auth_attributes')\n", (2375, 2394), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((2532, 2558), 'flask.session.get', 'session.get', (['"""auth"""', '(False)'], {}), "('auth', False)\n", (2543, 2558), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((1485, 1509), 'flask.make_response', 'make_response', (['"""OK"""', '(200)'], {}), "('OK', 200)\n", (1498, 1509), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((1568, 1600), 'auth.drivers.oidc._validate_token_auth', '_validate_token_auth', (['auth_value'], {}), '(auth_value)\n', (1588, 1600), False, 'from auth.drivers.oidc import _validate_basic_auth, _validate_token_auth\n'), ((2311, 2351), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""', '""""""'], {}), "('Authorization', '')\n", (2330, 2351), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((2438, 2444), 'time.time', 'time', ([], {}), '()\n', (2442, 2444), False, 'from time import time\n'), ((1694, 1718), 'flask.make_response', 'make_response', (['"""OK"""', '(200)'], {}), "('OK', 200)\n", (1707, 1718), False, 'from flask import current_app, session, request, redirect, make_response, Blueprint\n'), ((3202, 3214), 'traceback.format_exc', 'format_exc', ([], {}), '()\n', (3212, 3214), False, 'from traceback import format_exc\n')]
|
""" render_rgb.py renders obj file to rgb image
Aviable function:
- clear_mash: delete all the mesh in the secene
- scene_setting_init: set scene configurations
- node_setting_init: set node configurations
- render: render rgb image for one obj file and one viewpoint
- render_obj_by_vp_lists: wrapper function for render() render
one obj file by multiple viewpoints
- render_objs_by_one_vp: wrapper function for render() render
multiple obj file by one viewpoint
- init_all: a wrapper function, initialize all configurations
= set_image_path: reset defualt image output folder
author baiyu
"""
import sys
import os
import pickle
import numpy as np
import bpy
from mathutils import Matrix
import argparse
abs_path = os.path.abspath(__file__)
sys.path.append(os.path.dirname(abs_path))
from render_helper import *
from settings import *
from data_config import camera_setting_path, total_view_nums
def clear_mesh():
""" clear all meshes in the secene
"""
for block in bpy.data.meshes:
if block.users == 0:
bpy.data.meshes.remove(block)
for block in bpy.data.materials:
if block.users == 0:
bpy.data.materials.remove(block)
for block in bpy.data.textures:
if block.users == 0:
bpy.data.textures.remove(block)
for block in bpy.data.images:
if block.users == 0:
bpy.data.images.remove(block)
bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.data.objects:
if obj.type == 'MESH' or obj.type == 'EMPTY':
obj.select = True
bpy.ops.object.delete()
def scene_setting_init(use_gpu):
"""initialize blender setting configurations
"""
sce = bpy.context.scene.name
bpy.data.scenes[sce].render.engine = g_engine_type
bpy.data.scenes[sce].cycles.film_transparent = g_use_film_transparent
#output
bpy.data.scenes[sce].render.image_settings.color_mode = g_color_mode
bpy.data.scenes[sce].render.image_settings.color_depth = g_color_depth
bpy.data.scenes[sce].render.image_settings.file_format = g_file_format
bpy.data.scenes[sce].render.use_file_extension = g_use_file_extension
#dimensions
bpy.data.scenes[sce].render.resolution_x = g_resolution_x
bpy.data.scenes[sce].render.resolution_y = g_resolution_y
bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage
if use_gpu:
bpy.data.scenes[sce].render.engine = 'CYCLES' #only cycles engine can use gpu
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences
for device in cycles_prefs.devices:
if device.type == 'CUDA':
device.use = True
bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
bpy.types.CyclesRenderSettings.device = 'GPU'
bpy.data.scenes[sce].cycles.device = 'GPU'
def node_setting_init():
"""node settings for render rgb images
mainly for compositing the background images
"""
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
for node in tree.nodes:
tree.nodes.remove(node)
image_node = tree.nodes.new('CompositorNodeImage')
scale_node = tree.nodes.new('CompositorNodeScale')
alpha_over_node = tree.nodes.new('CompositorNodeAlphaOver')
render_layer_node = tree.nodes.new('CompositorNodeRLayers')
img_file_output_node = tree.nodes.new('CompositorNodeOutputFile')
depth_file_output_node = tree.nodes.new("CompositorNodeOutputFile")
scale_node.space = g_scale_space
img_file_output_node.format.color_mode = g_rgb_color_mode
img_file_output_node.format.color_depth = g_rgb_color_depth
img_file_output_node.format.file_format = g_rgb_file_format
img_file_output_node.base_path = g_syn_data_folder
depth_file_output_node.format.color_mode = g_depth_color_mode
depth_file_output_node.format.color_depth = g_depth_color_depth
depth_file_output_node.format.file_format = g_depth_file_format
depth_file_output_node.base_path = g_syn_data_folder
links.new(image_node.outputs[0], scale_node.inputs[0])
links.new(scale_node.outputs[0], alpha_over_node.inputs[1])
links.new(render_layer_node.outputs[0], alpha_over_node.inputs[2])
links.new(alpha_over_node.outputs[0], img_file_output_node.inputs[0])
links.new(render_layer_node.outputs['Depth'], depth_file_output_node.inputs[0])
def render(viewpoint, viewpoint_id, rendering_dir):
"""render rgb image and depth maps
render a object rgb image by a given camera viewpoint and
choose random image as background, only render one image
at a time.
Args:
viewpoint: a vp parameter(contains azimuth,elevation,tilt angles and distance)
viewpoint_id: the index of viewpoint
rendering_dir: path to store camera info
"""
vp = viewpoint
cam_location = camera_location(vp.azimuth, vp.elevation, vp.distance)
cam_rot = camera_rot_XYZEuler(vp.azimuth, vp.elevation, vp.tilt)
cam_obj = bpy.data.objects['Camera']
cam_obj.location[0] = cam_location[0]
cam_obj.location[1] = cam_location[1]
cam_obj.location[2] = cam_location[2]
cam_obj.rotation_euler[0] = cam_rot[0]
cam_obj.rotation_euler[1] = cam_rot[1]
cam_obj.rotation_euler[2] = cam_rot[2]
if g_background_image_path == 'TRANSPARENT':
bpy.context.scene.render.alpha_mode = g_background_image_path
else:
background_images = os.listdir(g_background_image_path)
image_name = random.choice(background_images)
image_path = os.path.join(g_background_image_path, image_name)
image_node = bpy.context.scene.node_tree.nodes[0]
image_node.image = bpy.data.images.load(image_path)
img_file_output_node = bpy.context.scene.node_tree.nodes[4]
img_file_output_node.file_slots[0].path = 'color_###.png' # blender placeholder #
depth_file_output_node = bpy.context.scene.node_tree.nodes[5]
depth_file_output_node.file_slots[0].path = 'depth_###.exr' # blender placeholder #
#start rendering
bpy.context.scene.frame_set(viewpoint_id + 1)
bpy.ops.render.render(write_still=True)
# write camera info
cam_K_file = os.path.join(cam_K_path, 'cam_K.txt')
if (not os.path.isfile(cam_K_file)) or (len(os.listdir(cam_RT_path))<total_view_nums):
K, RT = get_3x4_P_matrix_from_blender(cam_obj)
np.savetxt(cam_K_file, K)
np.savetxt(os.path.join(cam_RT_path, 'cam_RT_{0:03d}.txt'.format(viewpoint_id + 1)), RT)
print('Camera parameters written.')
def render_obj_by_vp_lists(rendering_dir, viewpoints):
""" render one obj file by a given viewpoint list
a wrapper function for render()
Args:
rendering_dir: a string variable indicate the rendering path of the model.
viewpoints: an iterable object of vp parameter(contains azimuth,elevation,tilt angles and distance)
"""
if isinstance(viewpoints, tuple):
vp_lists = [viewpoints]
try:
vp_lists = iter(viewpoints)
except TypeError:
print("viewpoints is not an iterable object")
for vp_id, vp in enumerate(vp_lists):
set_image_path(rendering_dir)
set_depth_path(rendering_dir)
render(vp, vp_id, rendering_dir)
def render_objs_by_one_vp(obj_pathes, viewpoint):
""" render multiple obj files by a given viewpoint
Args:
obj_paths: an iterable object contains multiple
obj file pathes
viewpoint: a namedtuple object contains azimuth,
elevation,tilt angles and distance
"""
if isinstance(obj_pathes, str):
obj_lists = [obj_pathes]
try:
obj_lists = iter(obj_lists)
except TypeError:
print("obj_pathes is not an iterable object")
for obj_path in obj_lists:
rendering_dir = os.path.join(output_folder, obj_path.split('/')[4])
if not os.path.exists(rendering_dir):
os.makedirs(rendering_dir)
clear_mesh()
bpy.ops.import_scene.obj(filepath=obj_path)
set_image_path(rendering_dir)
set_depth_path(rendering_dir)
render(viewpoint, 1, rendering_dir)
def camera_setting_init():
""" camera settings for renderer
"""
bpy.data.objects['Camera'].rotation_mode = g_rotation_mode
def light_setting_init():
""" light settings for renderer
"""
# Make light just directional, disable shadows.
world = bpy.data.worlds['World']
world.use_nodes = True
# changing these values does affect the render.
bg = world.node_tree.nodes['Background']
bg.inputs[1].default_value = 10.0
def init_all():
"""init everything we need for rendering
an image
"""
scene_setting_init(g_gpu_render_enable)
camera_setting_init()
node_setting_init()
light_setting_init()
def set_image_path(new_path):
""" set image output path to new_path
Args:
new rendered image output path
"""
file_output_node = bpy.context.scene.node_tree.nodes[4]
file_output_node.base_path = new_path
def set_depth_path(new_path):
""" set image output path to new_path
Args:
new rendered depth output path
"""
file_output_node = bpy.context.scene.node_tree.nodes[5]
file_output_node.base_path = new_path
#---------------------------------------------------------------
# 3x4 P matrix from Blender camera
#---------------------------------------------------------------
# BKE_camera_sensor_size
def get_sensor_size(sensor_fit, sensor_x, sensor_y):
if sensor_fit == 'VERTICAL':
return sensor_y
return sensor_x
# BKE_camera_sensor_fit
def get_sensor_fit(sensor_fit, size_x, size_y):
if sensor_fit == 'AUTO':
if size_x >= size_y:
return 'HORIZONTAL'
else:
return 'VERTICAL'
return sensor_fit
# Build intrinsic camera parameters from Blender camera data
#
# See notes on this in
# blender.stackexchange.com/questions/15102/what-is-blenders-camera-projection-matrix-model
# as well as
# https://blender.stackexchange.com/a/120063/3581
def get_calibration_matrix_K_from_blender(camd):
if camd.type != 'PERSP':
raise ValueError('Non-perspective cameras not supported')
scene = bpy.context.scene
f_in_mm = camd.lens
scale = scene.render.resolution_percentage / 100
resolution_x_in_px = scale * scene.render.resolution_x
resolution_y_in_px = scale * scene.render.resolution_y
sensor_size_in_mm = get_sensor_size(camd.sensor_fit, camd.sensor_width, camd.sensor_height)
sensor_fit = get_sensor_fit(
camd.sensor_fit,
scene.render.pixel_aspect_x * resolution_x_in_px,
scene.render.pixel_aspect_y * resolution_y_in_px
)
pixel_aspect_ratio = scene.render.pixel_aspect_y / scene.render.pixel_aspect_x
if sensor_fit == 'HORIZONTAL':
view_fac_in_px = resolution_x_in_px
else:
view_fac_in_px = pixel_aspect_ratio * resolution_y_in_px
pixel_size_mm_per_px = sensor_size_in_mm / f_in_mm / view_fac_in_px
s_u = 1 / pixel_size_mm_per_px
s_v = 1 / pixel_size_mm_per_px / pixel_aspect_ratio
# Parameters of intrinsic calibration matrix K
u_0 = resolution_x_in_px / 2 - camd.shift_x * view_fac_in_px
v_0 = resolution_y_in_px / 2 + camd.shift_y * view_fac_in_px / pixel_aspect_ratio
skew = 0 # only use rectangular pixels
K = Matrix(
((s_u, skew, u_0),
( 0, s_v, v_0),
( 0, 0, 1)))
return K
# Returns camera rotation and translation matrices from Blender.
#
# There are 3 coordinate systems involved:
# 1. The World coordinates: "world"
# - right-handed
# 2. The Blender camera coordinates: "bcam"
# - x is horizontal
# - y is up
# - right-handed: negative z look-at direction
# 3. The desired computer vision camera coordinates: "cv"
# - x is horizontal
# - y is down (to align to the actual pixel coordinates
# used in digital images)
# - right-handed: positive z look-at direction
def get_3x4_RT_matrix_from_blender(cam):
# bcam stands for blender camera
R_blender2shapenet = Matrix(
((1, 0, 0),
(0, 0, -1),
(0, 1, 0)))
R_bcam2cv = Matrix(
((1, 0, 0),
(0, -1, 0),
(0, 0, -1)))
# Transpose since the rotation is object rotation,
# and we want coordinate rotation
# R_world2bcam = cam.rotation_euler.to_matrix().transposed()
# T_world2bcam = -1*R_world2bcam * location
#
# Use matrix_world instead to account for all constraints
location, rotation = cam.matrix_world.decompose()[0:2]
R_world2bcam = rotation.to_matrix().transposed()
# Convert camera location to translation vector used in coordinate changes
# T_world2bcam = -1*R_world2bcam*cam.location
# Use location from matrix_world to account for constraints:
T_world2bcam = -1*R_world2bcam * location
# Build the coordinate transform matrix from world to computer vision camera
R_world2cv = R_bcam2cv*R_world2bcam*R_blender2shapenet
T_world2cv = R_bcam2cv*T_world2bcam
# put into 3x4 matrix
RT = Matrix((
R_world2cv[0][:] + (T_world2cv[0],),
R_world2cv[1][:] + (T_world2cv[1],),
R_world2cv[2][:] + (T_world2cv[2],)
))
return RT
def get_3x4_P_matrix_from_blender(cam):
K = get_calibration_matrix_K_from_blender(cam.data)
RT = get_3x4_RT_matrix_from_blender(cam)
return K, RT
### YOU CAN WRITE YOUR OWN IMPLEMENTATION TO GENERATE DATA
def parse_args():
argv = sys.argv
if "--" not in argv:
argv = [] # as if no args are passed
else:
argv = argv[argv.index("--") + 1:] # get all args after "--"
parser = argparse.ArgumentParser(description='Blender renderer.')
parser.add_argument("dict", type=str,
help="model-view file for rendering.")
args = parser.parse_args(argv)
return args
if __name__ == '__main__':
args = parse_args()
init_all()
result_list = pickle.load(open(args.dict, 'rb'))
cam_K_path = os.path.join(camera_setting_path, 'cam_K')
cam_RT_path = os.path.join(camera_setting_path, 'cam_RT')
if not os.path.exists(cam_K_path):
os.makedirs(cam_K_path)
if not os.path.exists(cam_RT_path):
os.makedirs(cam_RT_path)
for model in result_list:
cat = model.path.split('/')[3]
output_folder = os.path.join(g_syn_data_folder, cat)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
rendering_dir = os.path.join(output_folder, model.path.split('/')[4])
if not os.path.exists(rendering_dir):
os.makedirs(rendering_dir)
if len(os.listdir(rendering_dir)) == 40:
print('Rendering has been done with this model.')
continue
clear_mesh()
bpy.ops.import_scene.obj(filepath=model.path)
render_obj_by_vp_lists(rendering_dir, model.vps)
|
[
"argparse.ArgumentParser",
"bpy.context.scene.frame_set",
"os.path.isfile",
"bpy.data.textures.remove",
"bpy.ops.import_scene.obj",
"os.path.join",
"bpy.ops.object.select_all",
"os.path.abspath",
"os.path.dirname",
"numpy.savetxt",
"os.path.exists",
"bpy.data.meshes.remove",
"bpy.ops.object.delete",
"bpy.data.materials.remove",
"bpy.ops.render.render",
"bpy.data.images.remove",
"os.listdir",
"os.makedirs",
"bpy.data.images.load",
"mathutils.Matrix"
] |
[((770, 795), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (785, 795), False, 'import os\n'), ((812, 837), 'os.path.dirname', 'os.path.dirname', (['abs_path'], {}), '(abs_path)\n', (827, 837), False, 'import os\n'), ((1457, 1501), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""DESELECT"""'}), "(action='DESELECT')\n", (1482, 1501), False, 'import bpy\n'), ((1623, 1646), 'bpy.ops.object.delete', 'bpy.ops.object.delete', ([], {}), '()\n', (1644, 1646), False, 'import bpy\n'), ((6285, 6330), 'bpy.context.scene.frame_set', 'bpy.context.scene.frame_set', (['(viewpoint_id + 1)'], {}), '(viewpoint_id + 1)\n', (6312, 6330), False, 'import bpy\n'), ((6335, 6374), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {'write_still': '(True)'}), '(write_still=True)\n', (6356, 6374), False, 'import bpy\n'), ((6417, 6454), 'os.path.join', 'os.path.join', (['cam_K_path', '"""cam_K.txt"""'], {}), "(cam_K_path, 'cam_K.txt')\n", (6429, 6454), False, 'import os\n'), ((11610, 11662), 'mathutils.Matrix', 'Matrix', (['((s_u, skew, u_0), (0, s_v, v_0), (0, 0, 1))'], {}), '(((s_u, skew, u_0), (0, s_v, v_0), (0, 0, 1)))\n', (11616, 11662), False, 'from mathutils import Matrix\n'), ((12369, 12411), 'mathutils.Matrix', 'Matrix', (['((1, 0, 0), (0, 0, -1), (0, 1, 0))'], {}), '(((1, 0, 0), (0, 0, -1), (0, 1, 0)))\n', (12375, 12411), False, 'from mathutils import Matrix\n'), ((12456, 12499), 'mathutils.Matrix', 'Matrix', (['((1, 0, 0), (0, -1, 0), (0, 0, -1))'], {}), '(((1, 0, 0), (0, -1, 0), (0, 0, -1)))\n', (12462, 12499), False, 'from mathutils import Matrix\n'), ((13371, 13495), 'mathutils.Matrix', 'Matrix', (['(R_world2cv[0][:] + (T_world2cv[0],), R_world2cv[1][:] + (T_world2cv[1],), \n R_world2cv[2][:] + (T_world2cv[2],))'], {}), '((R_world2cv[0][:] + (T_world2cv[0],), R_world2cv[1][:] + (T_world2cv\n [1],), R_world2cv[2][:] + (T_world2cv[2],)))\n', (13377, 13495), False, 'from mathutils import Matrix\n'), ((13961, 14017), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Blender renderer."""'}), "(description='Blender renderer.')\n", (13984, 14017), False, 'import argparse\n'), ((14315, 14357), 'os.path.join', 'os.path.join', (['camera_setting_path', '"""cam_K"""'], {}), "(camera_setting_path, 'cam_K')\n", (14327, 14357), False, 'import os\n'), ((14376, 14419), 'os.path.join', 'os.path.join', (['camera_setting_path', '"""cam_RT"""'], {}), "(camera_setting_path, 'cam_RT')\n", (14388, 14419), False, 'import os\n'), ((5674, 5709), 'os.listdir', 'os.listdir', (['g_background_image_path'], {}), '(g_background_image_path)\n', (5684, 5709), False, 'import os\n'), ((5785, 5834), 'os.path.join', 'os.path.join', (['g_background_image_path', 'image_name'], {}), '(g_background_image_path, image_name)\n', (5797, 5834), False, 'import os\n'), ((5920, 5952), 'bpy.data.images.load', 'bpy.data.images.load', (['image_path'], {}), '(image_path)\n', (5940, 5952), False, 'import bpy\n'), ((6609, 6634), 'numpy.savetxt', 'np.savetxt', (['cam_K_file', 'K'], {}), '(cam_K_file, K)\n', (6619, 6634), True, 'import numpy as np\n'), ((8227, 8270), 'bpy.ops.import_scene.obj', 'bpy.ops.import_scene.obj', ([], {'filepath': 'obj_path'}), '(filepath=obj_path)\n', (8251, 8270), False, 'import bpy\n'), ((14431, 14457), 'os.path.exists', 'os.path.exists', (['cam_K_path'], {}), '(cam_K_path)\n', (14445, 14457), False, 'import os\n'), ((14467, 14490), 'os.makedirs', 'os.makedirs', (['cam_K_path'], {}), '(cam_K_path)\n', (14478, 14490), False, 'import os\n'), ((14502, 14529), 'os.path.exists', 'os.path.exists', (['cam_RT_path'], {}), '(cam_RT_path)\n', (14516, 14529), False, 'import os\n'), ((14539, 14563), 'os.makedirs', 'os.makedirs', (['cam_RT_path'], {}), '(cam_RT_path)\n', (14550, 14563), False, 'import os\n'), ((14658, 14694), 'os.path.join', 'os.path.join', (['g_syn_data_folder', 'cat'], {}), '(g_syn_data_folder, cat)\n', (14670, 14694), False, 'import os\n'), ((15105, 15150), 'bpy.ops.import_scene.obj', 'bpy.ops.import_scene.obj', ([], {'filepath': 'model.path'}), '(filepath=model.path)\n', (15129, 15150), False, 'import bpy\n'), ((1094, 1123), 'bpy.data.meshes.remove', 'bpy.data.meshes.remove', (['block'], {}), '(block)\n', (1116, 1123), False, 'import bpy\n'), ((1203, 1235), 'bpy.data.materials.remove', 'bpy.data.materials.remove', (['block'], {}), '(block)\n', (1228, 1235), False, 'import bpy\n'), ((1314, 1345), 'bpy.data.textures.remove', 'bpy.data.textures.remove', (['block'], {}), '(block)\n', (1338, 1345), False, 'import bpy\n'), ((1422, 1451), 'bpy.data.images.remove', 'bpy.data.images.remove', (['block'], {}), '(block)\n', (1444, 1451), False, 'import bpy\n'), ((6467, 6493), 'os.path.isfile', 'os.path.isfile', (['cam_K_file'], {}), '(cam_K_file)\n', (6481, 6493), False, 'import os\n'), ((8127, 8156), 'os.path.exists', 'os.path.exists', (['rendering_dir'], {}), '(rendering_dir)\n', (8141, 8156), False, 'import os\n'), ((8170, 8196), 'os.makedirs', 'os.makedirs', (['rendering_dir'], {}), '(rendering_dir)\n', (8181, 8196), False, 'import os\n'), ((14710, 14739), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (14724, 14739), False, 'import os\n'), ((14753, 14779), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (14764, 14779), False, 'import os\n'), ((14874, 14903), 'os.path.exists', 'os.path.exists', (['rendering_dir'], {}), '(rendering_dir)\n', (14888, 14903), False, 'import os\n'), ((14917, 14943), 'os.makedirs', 'os.makedirs', (['rendering_dir'], {}), '(rendering_dir)\n', (14928, 14943), False, 'import os\n'), ((6503, 6526), 'os.listdir', 'os.listdir', (['cam_RT_path'], {}), '(cam_RT_path)\n', (6513, 6526), False, 'import os\n'), ((14959, 14984), 'os.listdir', 'os.listdir', (['rendering_dir'], {}), '(rendering_dir)\n', (14969, 14984), False, 'import os\n')]
|
'''read ENVI/raw binary format. Dimensions from header, data from .bin file..
..then segment image using flood-fill segmentation'''
import os
import sys
import pickle
import numpy as np
from flood import flood
import matplotlib.pyplot as plt
from dist import normalize, to_list, centroid
def read_hdr(hdr): # read the image dimensions
cols, rows, bands = 0, 0, 0
for line in open(hdr).readlines():
chunks = line.strip().split('=')
try: # pull off two chunks delimited by '='
f, g = [x.strip() for x in chunks[0:2]]
if f == 'samples':
cols = g
if f == 'lines':
rows = g
if f == 'bands':
bands = g
except:
pass
return [int(x) for x in [cols, rows, bands]] # string to int
def read_float(fn): # read the raw binary file
return np.fromfile(fn, dtype=np.float32) / 255. # put data in range [0, 1]
'''pixel @ (row, col) = (i, j):
npx = nrow * ncol # number of pixels in image
red value: dat[ i * ncol + j]
grn value: dat[ npx + i * ncol + j]
blu value: dat[2 * npx + i * ncol + j]'''
def plot(dat, rows, cols, bands, file_name): # plot "raw binary" image
dat = dat.reshape((bands, rows * cols))
rgb = np.zeros((rows, cols, bands))
for i in range(bands):
rgb[:, :, i] = dat[i, :].reshape((rows, cols))
plt.imshow(rgb)
plt.show() # might uncomment this to zoom in to determine line numbers
plt.savefig(file_name)
plt.close()
class image:
def __init__(self, fn=None):
if fn:
self.fn = fn
self.load()
def load(self):
self.cols, self.rows, self.bands = read_hdr(self.fn[:-4] + '.hdr')
self.dat, self.npx = read_float(self.fn), self.rows * self.cols
plot(self.dat, self.rows, self.cols, self.bands, self.fn[:-4] + '.png')
def png(self):
if type(self.dat) == list:
self.dat = np.array(self.dat)
plot(self.dat, self.rows, self.cols, self.bands, self.fn + '.png')
def gather_points(self): # list points for each label
self.points = [[] for i in range(self.next_label)]
for i in range(self.rows):
for j in range(self.cols):
ix = i * self.cols + j # linear index
if self.labels[ix] > 0: # skip background
label = self.labels[ix] # label this point
self.points[label] += [[i, j]]
c = {} # count the number of pixels per segment
for point in self.points:
n = len(point)
c[n] = (c[n] + 1) if (n in c) else 1
counts = [[k, c[k]] for k in c] # sort the counts
counts.sort()
ffn = self.fn + '_seg_count.png'
if not os.path.exists(ffn):
print('+w ' + ffn)
plt.figure(figsize=(8, 8))
fig = plt.barh([str(x[0]) for x in counts],
[str(x[1]) for x in counts])
plt.title("Pixel-count vs. number of segments w that count " +
"(total segments: " + str(len(self.points)) + ")")
plt.xlabel("Number of segments with a given pixel count")
plt.ylabel("Pixel-count for a segment (total pixel counts = " +
str(len(counts)) + ")")
plt.tight_layout()
plt.savefig(ffn)
plt.close()
def segment(self, flood_lines=None, use_normalize=False):
print('segment ' + self.fn)
self.name = self.fn[:-4]
a = os.system('mkdir -p ' + self.name)
self.rgb = [[self.dat[i], # format data into list of rgb tuples
self.dat[self.npx + i],
self.dat[2 * self.npx + i]] for i in range(0, self.npx)]
c = {} # count rgb values
for x in self.rgb:
x = str(x)
c[x] = c[x] + 1 if x in c else 1
ffn = self.fn + '_rgb_count.png'
if not os.path.exists(ffn):
plt.figure()
plt.bar(c.keys(), np.log(list(c.values())) / np.log(10.))
plt.title("Log of count of color values")
print('+w ' + ffn)
plt.savefig(ffn)
plt.close()
counts = [[c[k], k] for k in c]
counts.sort()
self.max_color = counts[-1][1] # assume most-prevalent col is bg
if sys.getrecursionlimit() < self.npx: # increase recursion limit
sys.setrecursionlimit(self.npx)
# labels for segmentation
self.labels = [0 for i in range(self.npx)] # 0 == unlabelled!
self.next_label = 1
r_i = flood_lines if flood_lines else range(self.rows)
for i in r_i:
for j in range(self.cols):
flood(self, i, j)
self.gather_points() # list (i,j) points by segment
fn = None
is_truth = (self.name == 'truth') # is this truth data?
truth = None
if is_truth:
truth = [x for x in open('truth_chars.txt').read()]
for pi in range(len(self.points)): # plot image rep. of each truth
point = self.points[pi]
if pi > 0: # 0 is bg / unlabelled
try:
ns = truth[pi - 1] if is_truth else str(pi)
fn = self.name + os.path.sep + ns + '.png'
if not os.path.exists(fn):
plt.figure()
plt.scatter([x[1] for x in point],
[-x[0] for x in point])
plt.title(ns)
print('+w ' + fn)
if use_normalize:
plt.xlim([-.5, self.cols - .5])
plt.ylim([-(self.rows - .5), .5])
plt.xlabel('col ix')
plt.ylabel('-row ix')
plt.savefig(fn)
plt.close()
fn = self.name + os.path.sep + ns + '.centroid'
if not os.path.exists(fn):
print(' +w ' + fn)
xL, yL = to_list(point)
cX, cY = centroid(xL, yL)
open(fn, 'wb').write((str(cX) + ' ' +
str(cY)).encode())
# nb run cleanup.py before changing truth inputs
fn = self.name + os.path.sep + ns + '.p'
if not os.path.exists(fn):
print(' +w ' + fn)
pickle.dump(point, open(fn, 'wb'))
except:
pass # don't plot / save the background
if __name__ == "__main__": # example image data to demonstrate floodfill
args = sys.argv
if len(args) < 2:
dat = [0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0,
0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0]
a = image()
a.dat, a.rows, a.cols, a.bands = dat, 4, 4, 3
a.npx = a.rows * a.cols
a.fn = '4x4.bin'
a.png()
a.segment(use_normalize=False)
else:
a = image('truth.bin', [745, 838, 932])
a.segment()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"sys.setrecursionlimit",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.close",
"os.path.exists",
"dist.to_list",
"dist.centroid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"os.system",
"matplotlib.pyplot.ylabel",
"sys.getrecursionlimit",
"matplotlib.pyplot.xlim",
"numpy.log",
"numpy.fromfile",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"numpy.array",
"flood.flood",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((1278, 1307), 'numpy.zeros', 'np.zeros', (['(rows, cols, bands)'], {}), '((rows, cols, bands))\n', (1286, 1307), True, 'import numpy as np\n'), ((1394, 1409), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rgb'], {}), '(rgb)\n', (1404, 1409), True, 'import matplotlib.pyplot as plt\n'), ((1414, 1424), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1422, 1424), True, 'import matplotlib.pyplot as plt\n'), ((1490, 1512), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (1501, 1512), True, 'import matplotlib.pyplot as plt\n'), ((1517, 1528), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1526, 1528), True, 'import matplotlib.pyplot as plt\n'), ((881, 914), 'numpy.fromfile', 'np.fromfile', (['fn'], {'dtype': 'np.float32'}), '(fn, dtype=np.float32)\n', (892, 914), True, 'import numpy as np\n'), ((3562, 3596), 'os.system', 'os.system', (["('mkdir -p ' + self.name)"], {}), "('mkdir -p ' + self.name)\n", (3571, 3596), False, 'import os\n'), ((1967, 1985), 'numpy.array', 'np.array', (['self.dat'], {}), '(self.dat)\n', (1975, 1985), True, 'import numpy as np\n'), ((2790, 2809), 'os.path.exists', 'os.path.exists', (['ffn'], {}), '(ffn)\n', (2804, 2809), False, 'import os\n'), ((2854, 2880), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (2864, 2880), True, 'import matplotlib.pyplot as plt\n'), ((3153, 3210), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of segments with a given pixel count"""'], {}), "('Number of segments with a given pixel count')\n", (3163, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3346, 3364), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3362, 3364), True, 'import matplotlib.pyplot as plt\n'), ((3377, 3393), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ffn'], {}), '(ffn)\n', (3388, 3393), True, 'import matplotlib.pyplot as plt\n'), ((3406, 3417), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3415, 3417), True, 'import matplotlib.pyplot as plt\n'), ((3981, 4000), 'os.path.exists', 'os.path.exists', (['ffn'], {}), '(ffn)\n', (3995, 4000), False, 'import os\n'), ((4014, 4026), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4024, 4026), True, 'import matplotlib.pyplot as plt\n'), ((4109, 4150), 'matplotlib.pyplot.title', 'plt.title', (['"""Log of count of color values"""'], {}), "('Log of count of color values')\n", (4118, 4150), True, 'import matplotlib.pyplot as plt\n'), ((4194, 4210), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ffn'], {}), '(ffn)\n', (4205, 4210), True, 'import matplotlib.pyplot as plt\n'), ((4223, 4234), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4232, 4234), True, 'import matplotlib.pyplot as plt\n'), ((4384, 4407), 'sys.getrecursionlimit', 'sys.getrecursionlimit', ([], {}), '()\n', (4405, 4407), False, 'import sys\n'), ((4460, 4491), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['self.npx'], {}), '(self.npx)\n', (4481, 4491), False, 'import sys\n'), ((4767, 4784), 'flood.flood', 'flood', (['self', 'i', 'j'], {}), '(self, i, j)\n', (4772, 4784), False, 'from flood import flood\n'), ((4084, 4096), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (4090, 4096), True, 'import numpy as np\n'), ((5373, 5391), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (5387, 5391), False, 'import os\n'), ((5417, 5429), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5427, 5429), True, 'import matplotlib.pyplot as plt\n'), ((5454, 5514), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[x[1] for x in point]', '[(-x[0]) for x in point]'], {}), '([x[1] for x in point], [(-x[0]) for x in point])\n', (5465, 5514), True, 'import matplotlib.pyplot as plt\n'), ((5573, 5586), 'matplotlib.pyplot.title', 'plt.title', (['ns'], {}), '(ns)\n', (5582, 5586), True, 'import matplotlib.pyplot as plt\n'), ((5817, 5837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""col ix"""'], {}), "('col ix')\n", (5827, 5837), True, 'import matplotlib.pyplot as plt\n'), ((5862, 5883), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""-row ix"""'], {}), "('-row ix')\n", (5872, 5883), True, 'import matplotlib.pyplot as plt\n'), ((5908, 5923), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fn'], {}), '(fn)\n', (5919, 5923), True, 'import matplotlib.pyplot as plt\n'), ((5948, 5959), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5957, 5959), True, 'import matplotlib.pyplot as plt\n'), ((6056, 6074), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (6070, 6074), False, 'import os\n'), ((6153, 6167), 'dist.to_list', 'to_list', (['point'], {}), '(point)\n', (6160, 6167), False, 'from dist import normalize, to_list, centroid\n'), ((6201, 6217), 'dist.centroid', 'centroid', (['xL', 'yL'], {}), '(xL, yL)\n', (6209, 6217), False, 'from dist import normalize, to_list, centroid\n'), ((6503, 6521), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (6517, 6521), False, 'import os\n'), ((5699, 5732), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.5, self.cols - 0.5]'], {}), '([-0.5, self.cols - 0.5])\n', (5707, 5732), True, 'import matplotlib.pyplot as plt\n'), ((5759, 5794), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-(self.rows - 0.5), 0.5]'], {}), '([-(self.rows - 0.5), 0.5])\n', (5767, 5794), True, 'import matplotlib.pyplot as plt\n')]
|
##################################################################
# Utils.py
# Client side utilities
##################################################################
import platform
class Utils(object):
def __init__(self, parent):
self.parent = parent
self.ae = parent.ae
self.cfg = parent.cfg
def get_size_uint32(self):
"""
Let's do things right.
Calculate the size of an unsigned long
for this architecture. Or 4 :)
"""
try:
import ctypes
return ctypes.sizeof(ctypes.c_uint32)
except:
self.ae.m_warn("WARNING: Could not find ctypes. Assuming uint32 is 4 bytes :(")
return 4
def hit_bin(self, n):
"""
Given a hit number, return the corresponding bin
Hit bins: {1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+}
"""
# TODO: fix this monkey code!
if n < 4:
return n
elif n << 3 == 0:
return 4
elif n << 4 == 0:
return 5
elif n << 5 == 0:
return 6
elif n >= 32 and n <= 127:
return 7
else:
return 8
def get_platform_info(self):
"""
Information regarding the computer
where the fuzzer is running
"""
try:
node_properties = {
'node_name' : platform.node(),
'os_release': platform.release(),
'os_version': platform.version(),
'machine' : platform.machine(),
'processor' : platform.processor()
}
except:
self.ae.m_alert('[x] Error getting platform information')
return None
return node_properties
|
[
"platform.processor",
"platform.node",
"ctypes.sizeof",
"platform.version",
"platform.release",
"platform.machine"
] |
[((559, 589), 'ctypes.sizeof', 'ctypes.sizeof', (['ctypes.c_uint32'], {}), '(ctypes.c_uint32)\n', (572, 589), False, 'import ctypes\n'), ((1410, 1425), 'platform.node', 'platform.node', ([], {}), '()\n', (1423, 1425), False, 'import platform\n'), ((1457, 1475), 'platform.release', 'platform.release', ([], {}), '()\n', (1473, 1475), False, 'import platform\n'), ((1507, 1525), 'platform.version', 'platform.version', ([], {}), '()\n', (1523, 1525), False, 'import platform\n'), ((1557, 1575), 'platform.machine', 'platform.machine', ([], {}), '()\n', (1573, 1575), False, 'import platform\n'), ((1607, 1627), 'platform.processor', 'platform.processor', ([], {}), '()\n', (1625, 1627), False, 'import platform\n')]
|
from django.db.models import ForeignKey
from django.urls import path
from django.utils.functional import cached_property
from django.utils.translation import gettext as _
from wagtail.admin.forms.models import register_form_field_override
from wagtail.admin.views.generic import chooser as chooser_views
from wagtail.admin.widgets.chooser import BaseChooser
from .base import ViewSet
class ChooserViewSet(ViewSet):
"""
A viewset that creates a chooser modal interface for choosing model instances.
"""
icon = "snippet" #: The icon to use in the header of the chooser modal, and on the chooser widget
choose_one_text = _(
"Choose"
) #: Label for the 'choose' button in the chooser widget when choosing an initial item
page_title = None #: Title text for the chooser modal (defaults to the same as ``choose_one_text``)`
choose_another_text = _(
"Choose another"
) #: Label for the 'choose' button in the chooser widget, when an item has already been chosen
edit_item_text = _("Edit") #: Label for the 'edit' button in the chooser widget
#: The view class to use for the overall chooser modal; must be a subclass of ``wagtail.admin.views.generic.chooser.ChooseView``.
choose_view_class = chooser_views.ChooseView
#: The view class used to render just the results panel within the chooser modal; must be a subclass of ``wagtail.admin.views.generic.chooser.ChooseResultsView``.
choose_results_view_class = chooser_views.ChooseResultsView
#: The view class used after an item has been chosen; must be a subclass of ``wagtail.admin.views.generic.chooser.ChosenView``.
chosen_view_class = chooser_views.ChosenView
#: The base Widget class that the chooser widget will be derived from.
base_widget_class = BaseChooser
#: Defaults to True; if False, the chooser widget will not automatically be registered for use in admin forms.
register_widget = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.page_title is None:
self.page_title = self.choose_one_text
@property
def choose_view(self):
return self.choose_view_class.as_view(
model=self.model,
chosen_url_name=self.get_url_name("chosen"),
results_url_name=self.get_url_name("choose_results"),
icon=self.icon,
page_title=self.page_title,
)
@property
def choose_results_view(self):
return self.choose_results_view_class.as_view(
model=self.model,
chosen_url_name=self.get_url_name("chosen"),
results_url_name=self.get_url_name("choose_results"),
)
@property
def chosen_view(self):
return self.chosen_view_class.as_view(
model=self.model,
)
@cached_property
def widget_class(self):
"""
Returns the form widget class for this chooser.
"""
return type(
"%sChooserWidget" % self.model.__name__,
(self.base_widget_class,),
{
"model": self.model,
"choose_one_text": self.choose_one_text,
"choose_another_text": self.choose_another_text,
"link_to_chosen_text": self.edit_item_text,
"chooser_modal_url_name": self.get_url_name("choose"),
"icon": self.icon,
},
)
def get_urlpatterns(self):
return super().get_urlpatterns() + [
path("", self.choose_view, name="choose"),
path("results/", self.choose_results_view, name="choose_results"),
path("chosen/<str:pk>/", self.chosen_view, name="chosen"),
]
def on_register(self):
if self.register_widget:
register_form_field_override(
ForeignKey, to=self.model, override={"widget": self.widget_class}
)
|
[
"django.urls.path",
"wagtail.admin.forms.models.register_form_field_override",
"django.utils.translation.gettext"
] |
[((644, 655), 'django.utils.translation.gettext', '_', (['"""Choose"""'], {}), "('Choose')\n", (645, 655), True, 'from django.utils.translation import gettext as _\n'), ((888, 907), 'django.utils.translation.gettext', '_', (['"""Choose another"""'], {}), "('Choose another')\n", (889, 907), True, 'from django.utils.translation import gettext as _\n'), ((1037, 1046), 'django.utils.translation.gettext', '_', (['"""Edit"""'], {}), "('Edit')\n", (1038, 1046), True, 'from django.utils.translation import gettext as _\n'), ((3814, 3913), 'wagtail.admin.forms.models.register_form_field_override', 'register_form_field_override', (['ForeignKey'], {'to': 'self.model', 'override': "{'widget': self.widget_class}"}), "(ForeignKey, to=self.model, override={'widget':\n self.widget_class})\n", (3842, 3913), False, 'from wagtail.admin.forms.models import register_form_field_override\n'), ((3538, 3579), 'django.urls.path', 'path', (['""""""', 'self.choose_view'], {'name': '"""choose"""'}), "('', self.choose_view, name='choose')\n", (3542, 3579), False, 'from django.urls import path\n'), ((3593, 3658), 'django.urls.path', 'path', (['"""results/"""', 'self.choose_results_view'], {'name': '"""choose_results"""'}), "('results/', self.choose_results_view, name='choose_results')\n", (3597, 3658), False, 'from django.urls import path\n'), ((3672, 3729), 'django.urls.path', 'path', (['"""chosen/<str:pk>/"""', 'self.chosen_view'], {'name': '"""chosen"""'}), "('chosen/<str:pk>/', self.chosen_view, name='chosen')\n", (3676, 3729), False, 'from django.urls import path\n')]
|
import pandas as pd
import zipfile
import os
def get_media_data(**cfg):
'''retrive media dataset'''
for url in cfg['URLs']:
print(url)
infile = os.path.join(cfg['outpath'],url.split('/')[-1])
os.system(cfg['wget_fmt']%(url,infile))
print(infile)
with zipfile.ZipFile(infile, 'r') as zip_ref:
zip_ref.extractall(infile)
def process_media_data(**cfg):
'''get COVID-19 related news out of news dataset'''
result = []
for chunk in pd.read_csv(cfg['infile'],usecols=range(2,12),
parse_dates=['date'], chunksize=chunksize):
result.append(chunk[chunk.year==2020])
all2020 = pd.concat(result)
COVID = all2020[
(all2020.title.str.contains('virus'))|
(all2020.title.str.contains('COVID'))|
(all2020.title.str.contains('stay-at-home'))|
(all2020.title.str.contains('COVID-19'))|
# (all2020.title.str.contains('toll'))|
(all2020.title.str.contains('coronavirus'))
].reset_index(drop=True)
COVID.to_csv(cfg['outfile'])
|
[
"zipfile.ZipFile",
"os.system",
"pandas.concat"
] |
[((680, 697), 'pandas.concat', 'pd.concat', (['result'], {}), '(result)\n', (689, 697), True, 'import pandas as pd\n'), ((225, 267), 'os.system', 'os.system', (["(cfg['wget_fmt'] % (url, infile))"], {}), "(cfg['wget_fmt'] % (url, infile))\n", (234, 267), False, 'import os\n'), ((297, 325), 'zipfile.ZipFile', 'zipfile.ZipFile', (['infile', '"""r"""'], {}), "(infile, 'r')\n", (312, 325), False, 'import zipfile\n')]
|
import asyncio
from decouple import config
from motor.motor_asyncio import AsyncIOMotorClient
cs = config("MONGODB_CS")
client = AsyncIOMotorClient(cs)
client.get_io_loop = asyncio.get_running_loop
buchi = client.get_database('buchi')
|
[
"decouple.config",
"motor.motor_asyncio.AsyncIOMotorClient"
] |
[((100, 120), 'decouple.config', 'config', (['"""MONGODB_CS"""'], {}), "('MONGODB_CS')\n", (106, 120), False, 'from decouple import config\n'), ((130, 152), 'motor.motor_asyncio.AsyncIOMotorClient', 'AsyncIOMotorClient', (['cs'], {}), '(cs)\n', (148, 152), False, 'from motor.motor_asyncio import AsyncIOMotorClient\n')]
|
# Standard library
import argparse
import os
# Third party
import openai
# Consts
PROMPT = """
Given a cooking ingredient and quantity, return only the ingredient name
2 cups flour
Flour
Cinnamon ~1 tablespoon
Cinnamon
About one tsp salt
Salt
1.5-2 cups grated raw zucchini
Raw zucchini
1c walnuts (optional)
Walnuts
%s
"""
def parse(ingredient_description):
try:
openai.api_key = os.environ["OPENAI_API_KEY"]
response = openai.Completion.create(
engine="davinci",
prompt=PROMPT % (ingredient_description),
temperature=0,
max_tokens=64,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["\n"]
)
return response.choices[0].text
except:
return ingredient_description
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse ingredients using OpenAI")
parser.add_argument("-i", "--ingredient", help="Ingredient description to parse")
args = parser.parse_args()
print(parse(args.ingredient))
|
[
"argparse.ArgumentParser",
"openai.Completion.create"
] |
[((862, 931), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse ingredients using OpenAI"""'}), "(description='Parse ingredients using OpenAI')\n", (885, 931), False, 'import argparse\n'), ((445, 628), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""davinci"""', 'prompt': '(PROMPT % ingredient_description)', 'temperature': '(0)', 'max_tokens': '(64)', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)', 'stop': "['\\n']"}), "(engine='davinci', prompt=PROMPT %\n ingredient_description, temperature=0, max_tokens=64, top_p=1,\n frequency_penalty=0, presence_penalty=0, stop=['\\n'])\n", (469, 628), False, 'import openai\n')]
|
from .serializers import ReviewSerializer, ShopSerializer, UserReviewSerializer
from rest_framework import viewsets
from rest_framework.pagination import LimitOffsetPagination
from .models import ReviewModel, ShopModel
from rest_framework.generics import ListAPIView
from rest_framework import filters
from django.db.models import Count, Avg
from rest_framework import permissions
from .permissions import IsOwnerOrReadOnly
from django.contrib.auth import get_user_model
UserModel = get_user_model()
class ReviewViewSet(viewsets.ModelViewSet):
"""
ViewSet for Reviews
"""
queryset = ReviewModel.objects.all()
serializer_class = ReviewSerializer
pagination_class = LimitOffsetPagination
permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ShopView(ListAPIView):
queryset = ShopModel.objects.annotate(review_count=Count('reviews'), avg_rating=Avg('reviews__rating'))
serializer_class = ShopSerializer
filter_backends = [filters.OrderingFilter, filters.SearchFilter]
ordering_fields = ['review_count', 'avg_rating']
ordering = ['avg_rating']
search_fields = ['domain']
class UserReviewView(ListAPIView):
queryset = UserModel.objects.all()
serializer_class = UserReviewSerializer
|
[
"django.db.models.Count",
"django.db.models.Avg",
"django.contrib.auth.get_user_model"
] |
[((485, 501), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (499, 501), False, 'from django.contrib.auth import get_user_model\n'), ((975, 991), 'django.db.models.Count', 'Count', (['"""reviews"""'], {}), "('reviews')\n", (980, 991), False, 'from django.db.models import Count, Avg\n'), ((1004, 1026), 'django.db.models.Avg', 'Avg', (['"""reviews__rating"""'], {}), "('reviews__rating')\n", (1007, 1026), False, 'from django.db.models import Count, Avg\n')]
|
# stdlib
import subprocess
from typing import List
# local
from lib.io import read_value_from_file
from lib.log import log, log_pretty
# =============================================================================
#
# private utility functions
#
# =============================================================================
# =============================================================================
# _parse_packer_machine_readable_output_line
# =============================================================================
def _parse_packer_machine_readable_output_line(output_line: str) -> dict:
# machine readable format
# from https://www.packer.io/docs/commands/index.html
parsed_line = None
if output_line:
message_item: dict = {
"timestamp": None,
"target": None,
"type": None,
"data": [],
}
# split each line on commas
line_tokens: list = output_line.split(",")
for i, line_token in enumerate(line_tokens):
# assign payload fields based on token number
if i == 0:
message_item["timestamp"] = line_token
elif i == 1:
message_item["target"] = line_token
elif i == 2:
message_item["type"] = line_token
elif i > 2:
# strip trailing newline from data
message_item["data"].append(line_token.rstrip("\n"))
parsed_line = message_item
return parsed_line
# =============================================================================
# _format_packer_machine_readable_output_line
# =============================================================================
def _format_packer_machine_readable_output_line(
timestamp: str, target: str, output_type: str, data: str, subtype=None
) -> str:
# most messages won't have a target which means it's global
if not target:
target = "global"
# consistent padding for the 'version' types
if output_type.startswith("version"):
output_type = f"{output_type:16}"
# replace the packer comma
data = data.replace("%!(PACKER_COMMA)", ",")
if subtype:
return f"{timestamp} | {target} | {output_type} | {subtype:8} | {data}"
return f"{timestamp} | {target} | {output_type} | {data}"
# =============================================================================
# _print_parsed_packer_machine_readable_output_line
# =============================================================================
def _print_parsed_packer_machine_readable_output_line(parsed_line: dict) -> None:
if parsed_line:
if len(parsed_line["data"]) > 0:
subtype = None
# check for subtype
if parsed_line["data"][0] in ["say", "error", "message"]:
# pop found subtype from the parsed line
subtype = parsed_line["data"].pop(0)
for item in parsed_line["data"]:
# split on \\n
item_lines = item.split("\\n")
for item_line in item_lines:
log(
_format_packer_machine_readable_output_line(
parsed_line["timestamp"],
parsed_line["target"],
parsed_line["type"],
item_line,
subtype=subtype,
)
)
# =============================================================================
# _parse_packer_parsed_output_for_build_manifest
# =============================================================================
def _parse_packer_parsed_output_for_build_manifest(parsed_output: List[dict]) -> dict:
manifest = {"artifacts": {}}
# create collection of targets
targets = {}
for parsed_item in parsed_output:
if parsed_item["target"]:
target_name = parsed_item["target"]
if target_name not in targets:
targets[target_name] = []
del parsed_item["target"]
targets[target_name].append(parsed_item)
# iterate on targets
for target_key, target_value in targets.items():
# split into artifacts
target_artifacts = {}
for target_item in target_value:
if target_item["type"] == "artifact":
# first index of data will be the artifact number
artifact_number = target_item["data"][0]
# second index of data will be the artifact key
artifact_key = target_item["data"][1]
# skip adding the 'end' key
if artifact_key == "end":
continue
# third index of data will be the artifact value, if present
if len(target_item["data"]) > 2:
artifact_value = target_item["data"][2]
else:
artifact_value = None
# create the target artifact dict, if missing
if artifact_number not in target_artifacts:
target_artifacts[artifact_number] = {}
# assign the artifact key and value
target_artifacts[artifact_number][artifact_key] = artifact_value
manifest["artifacts"][target_key] = target_artifacts
return manifest
# =============================================================================
#
# private exe functions
#
# =============================================================================
# =============================================================================
# _packer
# =============================================================================
def _packer(*args: str, working_dir=None) -> List[dict]:
# runs packer bin with forced machine readable output
process_args = ["packer", "-machine-readable", *args]
parsed_lines = []
# use Popen so we can read lines as they come
with subprocess.Popen(
process_args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, # redirect stderr to stdout
bufsize=1,
universal_newlines=True,
stdin=None,
cwd=working_dir,
) as pipe:
for line in pipe.stdout or "":
if "fmt" in args:
# determine log level
log_level = "warning" if "fmt" in args else "info"
# directly log the output
log(f"global | ui | {log_level} | {line.rstrip()}")
else:
# parse the machine readable output as it arrives
parsed_line = _parse_packer_machine_readable_output_line(line)
parsed_lines.append(parsed_line)
_print_parsed_packer_machine_readable_output_line(parsed_line)
if pipe.returncode != 0:
# args are masked to prevent credentials leaking
raise subprocess.CalledProcessError(pipe.returncode, ["packer"])
return parsed_lines
# =============================================================================
#
# public packer functions
#
# =============================================================================
# =============================================================================
# version
# =============================================================================
def version() -> None:
# execute version command
_packer("version")
# =============================================================================
# init
# =============================================================================
def init(working_dir_path: str, template_file_path: str) -> None:
# execute init command
_packer("init", template_file_path, working_dir=working_dir_path)
# =============================================================================
# format
# =============================================================================
def format_packer_cmd(working_dir_path: str, template_file_path: str) -> None:
# execute format command
_packer("fmt", "-check", "-diff", template_file_path, working_dir=working_dir_path)
# =============================================================================
# validate
# =============================================================================
def validate(
working_dir_path: str,
template_file_path: str,
var_file_paths: List[str] = None,
template_vars: dict = None,
vars_from_files: dict = None,
only: List[str] = None,
excepts: List[str] = None,
syntax_only: bool = False,
debug: bool = False,
) -> None:
packer_command_args = []
# add any specified var file paths
if var_file_paths:
for var_file_path in var_file_paths:
packer_command_args.append(f"-var-file={var_file_path}")
# add any specified vars
if template_vars:
for var_name, var_value in template_vars.items():
packer_command_args.append(f"-var={var_name}={var_value}")
# add any vars from files
if vars_from_files:
for var_name, file_path in vars_from_files.items():
var_value = read_value_from_file(file_path, working_dir=working_dir_path)
packer_command_args.append(f"-var={var_name}={var_value}")
# only build specified sources
if only:
packer_command_args.append(f"-only={','.join(only)}")
# build all sources except those specified
elif excepts:
packer_command_args.append(f"-except={','.join(excepts)}")
# optionally check only syntax
if syntax_only:
packer_command_args.append("-syntax-only")
# dump args on debug
if debug:
log("validate args:")
log_pretty(packer_command_args)
# execute validate command
_packer(
"validate",
*packer_command_args,
template_file_path,
working_dir=working_dir_path,
)
# =============================================================================
# build
# =============================================================================
def build(
working_dir_path: str,
template_file_path: str,
var_file_paths: List[str] = None,
template_vars: dict = None,
vars_from_files: dict = None,
only: List[str] = None,
excepts: List[str] = None,
debug: bool = False,
force: bool = False,
) -> dict:
packer_command_args = []
# add any specified var file paths
if var_file_paths:
for var_file_path in var_file_paths:
packer_command_args.append(f"-var-file={var_file_path}")
# add any specified vars
if template_vars:
for var_name, var_value in template_vars.items():
packer_command_args.append(f"-var={var_name}={var_value}")
# add any vars from files
if vars_from_files:
for var_name, file_path in vars_from_files.items():
var_value = read_value_from_file(file_path, working_dir=working_dir_path)
packer_command_args.append(f"-var={var_name}={var_value}")
# only build specified sources
if only:
packer_command_args.append(f"-only={','.join(only)}")
# build all sources except those specified
elif excepts:
packer_command_args.append(f"-except={','.join(excepts)}")
# add force if requested
if force:
packer_command_args.append("-force")
# dump args on debug
if debug:
log("build args:")
log_pretty(packer_command_args)
# execute build command
packer_command_result = _packer(
"build", *packer_command_args, template_file_path, working_dir=working_dir_path
)
# get build manifest from output
packer_build_manifest = _parse_packer_parsed_output_for_build_manifest(
packer_command_result
)
# return the manifest
return packer_build_manifest
|
[
"subprocess.Popen",
"lib.log.log_pretty",
"subprocess.CalledProcessError",
"lib.io.read_value_from_file",
"lib.log.log"
] |
[((6011, 6161), 'subprocess.Popen', 'subprocess.Popen', (['process_args'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT', 'bufsize': '(1)', 'universal_newlines': '(True)', 'stdin': 'None', 'cwd': 'working_dir'}), '(process_args, stdout=subprocess.PIPE, stderr=subprocess.\n STDOUT, bufsize=1, universal_newlines=True, stdin=None, cwd=working_dir)\n', (6027, 6161), False, 'import subprocess\n'), ((6933, 6991), 'subprocess.CalledProcessError', 'subprocess.CalledProcessError', (['pipe.returncode', "['packer']"], {}), "(pipe.returncode, ['packer'])\n", (6962, 6991), False, 'import subprocess\n'), ((9678, 9699), 'lib.log.log', 'log', (['"""validate args:"""'], {}), "('validate args:')\n", (9681, 9699), False, 'from lib.log import log, log_pretty\n'), ((9708, 9739), 'lib.log.log_pretty', 'log_pretty', (['packer_command_args'], {}), '(packer_command_args)\n', (9718, 9739), False, 'from lib.log import log, log_pretty\n'), ((11400, 11418), 'lib.log.log', 'log', (['"""build args:"""'], {}), "('build args:')\n", (11403, 11418), False, 'from lib.log import log, log_pretty\n'), ((11427, 11458), 'lib.log.log_pretty', 'log_pretty', (['packer_command_args'], {}), '(packer_command_args)\n', (11437, 11458), False, 'from lib.log import log, log_pretty\n'), ((9150, 9211), 'lib.io.read_value_from_file', 'read_value_from_file', (['file_path'], {'working_dir': 'working_dir_path'}), '(file_path, working_dir=working_dir_path)\n', (9170, 9211), False, 'from lib.io import read_value_from_file\n'), ((10890, 10951), 'lib.io.read_value_from_file', 'read_value_from_file', (['file_path'], {'working_dir': 'working_dir_path'}), '(file_path, working_dir=working_dir_path)\n', (10910, 10951), False, 'from lib.io import read_value_from_file\n')]
|
from typing import Optional
from fastapi import APIRouter, Depends
import httpx
from app.models.models import User
from app.schema import User_Pydantic
router = APIRouter()
@router.get("/")
async def homepage():
# httpx代替requests进行异步请求
async with httpx.AsyncClient() as client:
res = await client.get('https://www.baidu.com')
return {"data": res.status_code}
@router.get("/test/items/{item_id}")
async def read_item(item_id: int, q: Optional[str] = None):
return {"item_id": item_id, "q": q}
@router.get('/test/users')
async def get_users():
# 创建用户
# user = User()
# user.name = 'test2'
# user.phone = '123'
# user.set_password('<PASSWORD>')
# await user.save()
# return 1
# QuerySet不进行数据库查询
users = User.all()
# User_Pydantic为序列化的模型,users为处理的QuerySet对象
data = await User_Pydantic.from_queryset(users)
return data
|
[
"app.models.models.User.all",
"httpx.AsyncClient",
"app.schema.User_Pydantic.from_queryset",
"fastapi.APIRouter"
] |
[((164, 175), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (173, 175), False, 'from fastapi import APIRouter, Depends\n'), ((770, 780), 'app.models.models.User.all', 'User.all', ([], {}), '()\n', (778, 780), False, 'from app.models.models import User\n'), ((260, 279), 'httpx.AsyncClient', 'httpx.AsyncClient', ([], {}), '()\n', (277, 279), False, 'import httpx\n'), ((845, 879), 'app.schema.User_Pydantic.from_queryset', 'User_Pydantic.from_queryset', (['users'], {}), '(users)\n', (872, 879), False, 'from app.schema import User_Pydantic\n')]
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Get a PEM-format certificate chain for a given version."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.kms import exceptions as kms_exceptions
from googlecloudsdk.command_lib.kms import flags
from googlecloudsdk.core import log
from googlecloudsdk.core.util import files
DETAILED_HELP = {
'EXAMPLES':
"""\
The following command saves the Cavium certificate chain for
CryptoKey ``frodo'' Version 2 to ``/tmp/my/cavium.pem'':
$ {command} 2 --key=frodo --keyring=fellowship --location=us-east1 --certificate-chain-type=cavium --output-file=/tmp/my/cavium.pem
""",
}
def _GetCertificateChainPem(chains, chain_type):
"""Returns the specified certificate chain(s) from a CertChains object.
Args:
chains: a KeyOperationAttestation.CertChains object.
chain_type: a string specifying the chain(s) to retrieve.
Returns:
A string containing the PEM-encoded certificate chain(s).
Raises:
exceptions.InvalidArgumentException if chain_type is not a valid chain type.
"""
if chain_type == 'cavium':
return ''.join(chains.caviumCerts)
elif chain_type == 'google-card':
return ''.join(chains.googleCardCerts)
elif chain_type == 'google-partition':
return ''.join(chains.googlePartitionCerts)
elif chain_type == 'all':
return ''.join(chains.caviumCerts + chains.googlePartitionCerts +
chains.googleCardCerts)
raise exceptions.InvalidArgumentException(
'{} is not a valid chain type.'.format(chain_type))
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class GetCertificateChain(base.DescribeCommand):
r"""Get a certificate chain for a given version.
Returns the PEM-format certificate chain for the specified key version.
The optional flag `output-file` indicates the path to store the PEM. If not
specified, the PEM will be printed to stdout.
"""
detailed_help = DETAILED_HELP
@staticmethod
def Args(parser):
flags.AddKeyVersionResourceArgument(
parser, 'from which to get the certificate chain')
flags.AddCertificateChainFlag(parser)
flags.AddOutputFileFlag(parser, 'to store PEM')
def Run(self, args):
client = cloudkms_base.GetClientInstance()
messages = cloudkms_base.GetMessagesModule()
version_ref = flags.ParseCryptoKeyVersionName(args)
if not version_ref.Name():
raise exceptions.InvalidArgumentException(
'version', 'version id must be non-empty.')
versions = client.projects_locations_keyRings_cryptoKeys_cryptoKeyVersions
version = versions.Get(
messages
.CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetRequest(
name=version_ref.RelativeName()))
if (version.protectionLevel !=
messages.CryptoKeyVersion.ProtectionLevelValueValuesEnum.HSM):
raise kms_exceptions.ArgumentError(
'Certificate chains are only available for HSM key versions.')
if (version.state ==
messages.CryptoKeyVersion.StateValueValuesEnum.PENDING_GENERATION):
raise kms_exceptions.ArgumentError(
'Certificate chains are unavailable until the version is generated.')
try:
log.WriteToFileOrStdout(
args.output_file if args.output_file else '-',
_GetCertificateChainPem(version.attestation.certChains,
args.certificate_chain_type),
overwrite=True,
binary=False)
except files.Error as e:
raise exceptions.BadFileException(e)
|
[
"googlecloudsdk.command_lib.kms.flags.AddKeyVersionResourceArgument",
"googlecloudsdk.api_lib.cloudkms.base.GetMessagesModule",
"googlecloudsdk.command_lib.kms.flags.AddOutputFileFlag",
"googlecloudsdk.calliope.exceptions.BadFileException",
"googlecloudsdk.command_lib.kms.exceptions.ArgumentError",
"googlecloudsdk.calliope.base.ReleaseTracks",
"googlecloudsdk.calliope.exceptions.InvalidArgumentException",
"googlecloudsdk.command_lib.kms.flags.ParseCryptoKeyVersionName",
"googlecloudsdk.api_lib.cloudkms.base.GetClientInstance",
"googlecloudsdk.command_lib.kms.flags.AddCertificateChainFlag"
] |
[((2397, 2491), 'googlecloudsdk.calliope.base.ReleaseTracks', 'base.ReleaseTracks', (['base.ReleaseTrack.ALPHA', 'base.ReleaseTrack.BETA', 'base.ReleaseTrack.GA'], {}), '(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA, base.\n ReleaseTrack.GA)\n', (2415, 2491), False, 'from googlecloudsdk.calliope import base\n'), ((2888, 2978), 'googlecloudsdk.command_lib.kms.flags.AddKeyVersionResourceArgument', 'flags.AddKeyVersionResourceArgument', (['parser', '"""from which to get the certificate chain"""'], {}), "(parser,\n 'from which to get the certificate chain')\n", (2923, 2978), False, 'from googlecloudsdk.command_lib.kms import flags\n'), ((2988, 3025), 'googlecloudsdk.command_lib.kms.flags.AddCertificateChainFlag', 'flags.AddCertificateChainFlag', (['parser'], {}), '(parser)\n', (3017, 3025), False, 'from googlecloudsdk.command_lib.kms import flags\n'), ((3030, 3077), 'googlecloudsdk.command_lib.kms.flags.AddOutputFileFlag', 'flags.AddOutputFileFlag', (['parser', '"""to store PEM"""'], {}), "(parser, 'to store PEM')\n", (3053, 3077), False, 'from googlecloudsdk.command_lib.kms import flags\n'), ((3115, 3148), 'googlecloudsdk.api_lib.cloudkms.base.GetClientInstance', 'cloudkms_base.GetClientInstance', ([], {}), '()\n', (3146, 3148), True, 'from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base\n'), ((3164, 3197), 'googlecloudsdk.api_lib.cloudkms.base.GetMessagesModule', 'cloudkms_base.GetMessagesModule', ([], {}), '()\n', (3195, 3197), True, 'from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base\n'), ((3216, 3253), 'googlecloudsdk.command_lib.kms.flags.ParseCryptoKeyVersionName', 'flags.ParseCryptoKeyVersionName', (['args'], {}), '(args)\n', (3247, 3253), False, 'from googlecloudsdk.command_lib.kms import flags\n'), ((3297, 3376), 'googlecloudsdk.calliope.exceptions.InvalidArgumentException', 'exceptions.InvalidArgumentException', (['"""version"""', '"""version id must be non-empty."""'], {}), "('version', 'version id must be non-empty.')\n", (3332, 3376), False, 'from googlecloudsdk.calliope import exceptions\n'), ((3757, 3853), 'googlecloudsdk.command_lib.kms.exceptions.ArgumentError', 'kms_exceptions.ArgumentError', (['"""Certificate chains are only available for HSM key versions."""'], {}), "(\n 'Certificate chains are only available for HSM key versions.')\n", (3785, 3853), True, 'from googlecloudsdk.command_lib.kms import exceptions as kms_exceptions\n'), ((3973, 4076), 'googlecloudsdk.command_lib.kms.exceptions.ArgumentError', 'kms_exceptions.ArgumentError', (['"""Certificate chains are unavailable until the version is generated."""'], {}), "(\n 'Certificate chains are unavailable until the version is generated.')\n", (4001, 4076), True, 'from googlecloudsdk.command_lib.kms import exceptions as kms_exceptions\n'), ((4401, 4431), 'googlecloudsdk.calliope.exceptions.BadFileException', 'exceptions.BadFileException', (['e'], {}), '(e)\n', (4428, 4431), False, 'from googlecloudsdk.calliope import exceptions\n')]
|
#import libraries
import face_recognition
import numpy as np
from PIL import Image, ImageDraw
import matplotlib.image as mpimg
from IPython.display import display
import cv2
import os, re
import pyrebase
import time
cv2.VideoCapture(0).isOpened()
from dronekit import connect, VehicleMode, LocationGlobalRelative
import firebase_admin
from firebase_admin import credentials
from google.cloud import firestore
'''
A rajouter, tes librairies
'''
#Config de firestore et storage:
firebaseConfig = {
"apiKey": "<KEY>",
"authDomain": "delivreapp-5221e.firebaseapp.com",
"projectId": "delivreapp-5221e",
"databaseURL": "https://del-ivre-default-rtdb.europe-west1.firebasedatabase.app",
"storageBucket": "delivreapp-5221e.appspot.com",
"messagingSenderId": "661920641786",
"appId": "1:661920641786:web:dca2c085b5ff60f1b18f43",
"measurementId": "G-CLR5PFH3G4"
};
nDrone=5 #numéro du drone associé
firebase=pyrebase.initialize_app(firebaseConfig)
storage=firebase.storage()
try:
firebase_admin.get_app()
#print('firebase intialized.')
except ValueError as e:
#print('firebase not initialized. But now initialize.')
cred = credentials.Certificate("serviceAccountKey.json")
firebase_admin.initialize_app(cred)
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="serviceAccountKey.json"
db = firestore.Client()
###récupération Coord GPS
print("Initialisation terminée, début de la boucle")
while True:
nbImg=[];
go=True
while True:
try:
doc=db.collection('Users').where("Drone","==",nDrone).get()[0]
break
except:
continue
nom=doc.get("Nom")
prenom=doc.get("Prenom")
coord=doc.get("GPS")
image=nom+"#"+prenom+".jpg"
print(nom,end=' ')
print(prenom,end="\n")
print(doc.get("Commande"),end="\n")
print(coord)
#téléchargement image
storage.child(image).download(image)
img1 = Image.open(image)
img1.save("img1.jpg","JPEG")
try:
time.sleep(0.001)
img1 = Image.open(image)
img1.save("img1.jpg","JPEG")
time.sleep(0.001)
img2=img1.rotate(90)
img2.show()
time.sleep(0.001)
img2.save("img2.jpg","JPEG")
img3=img2.rotate(90)
time.sleep(0.001)
img3.save("img3.jpg","JPEG")
img4=img3.rotate(90)
time.sleep(0.001)
img4.save("img4.jpg","JPEG")
#os.remove(image)
print("image enregistrée")
except:
print("probleme dans le téléchargement de l'image")
###variables d'initiation reconnaissance faciale
known_face_encodings = []
known_face_names = []
face_locations = []
face_encodings = []
process_this_frame = True
#Image enregistrée dans la base de donnée
for i in range(1,5):
try:
new_image=face_recognition.load_image_file("img"+ str(nbImg) + ".jpg")
new_face_encoding = face_recognition.face_encodings(new_image)[0]
known_face_encodings.append(new_face_encoding)
known_face_names.append(prenom + " " + nom)
nbImg=i
print("photo" , str(i) , " dans reconaissance faciale")
except:
os.remove("img"+ str(i) + ".jpg")
print(i)
print("photo ", str(i) , "non prise en compte")
Reco=True
#algo reconnaissance faciale
print("lancement algorithme de reconnaissance faciale")
while Reco:
# Grab a single frame of video
ret, frame = cv2.VideoCapture(0).read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
if name==prenom+" "+nom:
print(name, end=' ')
print("a bien été reconnu, on procède donc a l'attérissage.")
Reco=False
process_this_frame = not process_this_frame
#suppression image du PC
print("image supprimé de la mémoire du rpi")
for i in range(len(nbImg)):
os.remove("img"+ str(nbImg[i]) + ".jpg")
try:
id=str(int(doc.get("Id")))
db.collection('Users').document(id).delete()
print("la commande a été supprimée")
storage.delete_blob(image)
storage.delete()
except:
print("la commande était déja supprimée")
|
[
"cv2.resize",
"firebase_admin.credentials.Certificate",
"face_recognition.compare_faces",
"face_recognition.face_encodings",
"google.cloud.firestore.Client",
"pyrebase.initialize_app",
"PIL.Image.open",
"cv2.VideoCapture",
"time.sleep",
"firebase_admin.initialize_app",
"face_recognition.face_locations",
"firebase_admin.get_app"
] |
[((923, 962), 'pyrebase.initialize_app', 'pyrebase.initialize_app', (['firebaseConfig'], {}), '(firebaseConfig)\n', (946, 962), False, 'import pyrebase\n'), ((1322, 1340), 'google.cloud.firestore.Client', 'firestore.Client', ([], {}), '()\n', (1338, 1340), False, 'from google.cloud import firestore\n'), ((1001, 1025), 'firebase_admin.get_app', 'firebase_admin.get_app', ([], {}), '()\n', (1023, 1025), False, 'import firebase_admin\n'), ((1919, 1936), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (1929, 1936), False, 'from PIL import Image, ImageDraw\n'), ((216, 235), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (232, 235), False, 'import cv2\n'), ((1156, 1205), 'firebase_admin.credentials.Certificate', 'credentials.Certificate', (['"""serviceAccountKey.json"""'], {}), "('serviceAccountKey.json')\n", (1179, 1205), False, 'from firebase_admin import credentials\n'), ((1210, 1245), 'firebase_admin.initialize_app', 'firebase_admin.initialize_app', (['cred'], {}), '(cred)\n', (1239, 1245), False, 'import firebase_admin\n'), ((1987, 2004), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (1997, 2004), False, 'import time\n'), ((2020, 2037), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (2030, 2037), False, 'from PIL import Image, ImageDraw\n'), ((2083, 2100), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (2093, 2100), False, 'import time\n'), ((2158, 2175), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (2168, 2175), False, 'import time\n'), ((2250, 2267), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (2260, 2267), False, 'import time\n'), ((2342, 2359), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (2352, 2359), False, 'import time\n'), ((3627, 3670), 'cv2.resize', 'cv2.resize', (['frame', '(0, 0)'], {'fx': '(0.25)', 'fy': '(0.25)'}), '(frame, (0, 0), fx=0.25, fy=0.25)\n', (3637, 3670), False, 'import cv2\n'), ((4034, 4082), 'face_recognition.face_locations', 'face_recognition.face_locations', (['rgb_small_frame'], {}), '(rgb_small_frame)\n', (4065, 4082), False, 'import face_recognition\n'), ((4112, 4176), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['rgb_small_frame', 'face_locations'], {}), '(rgb_small_frame, face_locations)\n', (4143, 4176), False, 'import face_recognition\n'), ((2918, 2960), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['new_image'], {}), '(new_image)\n', (2949, 2960), False, 'import face_recognition\n'), ((3494, 3513), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (3510, 3513), False, 'import cv2\n'), ((4320, 4387), 'face_recognition.compare_faces', 'face_recognition.compare_faces', (['known_face_encodings', 'face_encoding'], {}), '(known_face_encodings, face_encoding)\n', (4350, 4387), False, 'import face_recognition\n')]
|
import re, os, json
root = os.path.dirname(os.path.dirname(__file__))
source_folder = os.path.join(root, 'src', 'HoneybeeSchema', 'Model')
#NamedReferenceable
class_files = [x for x in os.listdir(source_folder) if (x.endswith("Abridged.cs") and not x.endswith("SetAbridged.cs") and not x.endswith("PropertiesAbridged.cs") and not x.endswith("ScheduleRuleAbridged.cs") ) ]
abridged_file = os.path.join(root, 'src', 'HoneybeeSchema', 'BaseClasses', 'NamedReferenceable.cs')
with open(abridged_file, "wt", encoding='utf-8') as abridgeFile:
data = []
data.append('namespace HoneybeeSchema\n')
data.append('{\n')
for f in class_files:
type_name = f
data.append('public partial class %s: INamed {}\n' % f.replace('.cs',''))
data.append('public partial class ConstructionSetAbridged: INamed{}\n')
data.append('}')
abridgeFile.writelines(data)
abridgeFile.close()
#EnergyWindowMaterial
class_files = [x for x in os.listdir(source_folder) if (x.startswith("EnergyWindowMaterial")) ]
abridged_file = os.path.join(root, 'src', 'HoneybeeSchema', 'BaseClasses', 'EnergyWindowMaterial.cs')
with open(abridged_file, "wt", encoding='utf-8') as abridgeFile:
data = []
data.append('namespace HoneybeeSchema\n')
data.append('{\n')
for f in class_files:
type_name = f
data.append('public partial class %s: IEnergyWindowMaterial {}\n' % f.replace('.cs',''))
data.append('}')
abridgeFile.writelines(data)
abridgeFile.close()
#EnergyMaterial
class_files = [x for x in os.listdir(source_folder) if (x.startswith("EnergyMaterial")) ]
abridged_file = os.path.join(root, 'src', 'HoneybeeSchema', 'BaseClasses', 'EnergyMaterial.cs')
with open(abridged_file, "wt", encoding='utf-8') as abridgeFile:
data = []
data.append('namespace HoneybeeSchema\n')
data.append('{\n')
for f in class_files:
type_name = f
data.append('public partial class %s: IEnergyMaterial {}\n' % f.replace('.cs',''))
data.append('}')
abridgeFile.writelines(data)
abridgeFile.close()
|
[
"os.path.dirname",
"os.path.join",
"os.listdir"
] |
[((88, 140), 'os.path.join', 'os.path.join', (['root', '"""src"""', '"""HoneybeeSchema"""', '"""Model"""'], {}), "(root, 'src', 'HoneybeeSchema', 'Model')\n", (100, 140), False, 'import re, os, json\n'), ((391, 478), 'os.path.join', 'os.path.join', (['root', '"""src"""', '"""HoneybeeSchema"""', '"""BaseClasses"""', '"""NamedReferenceable.cs"""'], {}), "(root, 'src', 'HoneybeeSchema', 'BaseClasses',\n 'NamedReferenceable.cs')\n", (403, 478), False, 'import re, os, json\n'), ((1043, 1132), 'os.path.join', 'os.path.join', (['root', '"""src"""', '"""HoneybeeSchema"""', '"""BaseClasses"""', '"""EnergyWindowMaterial.cs"""'], {}), "(root, 'src', 'HoneybeeSchema', 'BaseClasses',\n 'EnergyWindowMaterial.cs')\n", (1055, 1132), False, 'import re, os, json\n'), ((1624, 1703), 'os.path.join', 'os.path.join', (['root', '"""src"""', '"""HoneybeeSchema"""', '"""BaseClasses"""', '"""EnergyMaterial.cs"""'], {}), "(root, 'src', 'HoneybeeSchema', 'BaseClasses', 'EnergyMaterial.cs')\n", (1636, 1703), False, 'import re, os, json\n'), ((45, 70), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (60, 70), False, 'import re, os, json\n'), ((188, 213), 'os.listdir', 'os.listdir', (['source_folder'], {}), '(source_folder)\n', (198, 213), False, 'import re, os, json\n'), ((957, 982), 'os.listdir', 'os.listdir', (['source_folder'], {}), '(source_folder)\n', (967, 982), False, 'import re, os, json\n'), ((1544, 1569), 'os.listdir', 'os.listdir', (['source_folder'], {}), '(source_folder)\n', (1554, 1569), False, 'import re, os, json\n')]
|
import sys
import os
from settings import beaver_broker_ip, beaver_broker_port, autotestdir, beaver_datanode_file, gflagsfile, config_path, log_dir, index_forsearch, pb_forsearch
import psutil
import time
import numpy as np
import requests
#MEM_MAX = psutil.virtual_memory().total
MEM_MAX = 0.8*32*1024*1024*1024 # memory size of tikv node, not current PC
#------------------knob controller------------------
# disable_auto_compactions
def set_disable_auto_compactions(ip, port, val):
cmd="./tikv-ctl --host "+ip+":"+port+" modify-tikv-config -m kvdb -n default.disable_auto_compactions -v "+str(val)
res=os.popen(cmd).read() # will return "success"
return(res)
knob_set=\
{
"--max_concurrency_tasks_per_search":
{
"changebyyml": True,
"set_func": None,
"minval": 0, # if type==int, indicate min possible value
"maxval": 0, # if type==int, indicate max possible value
"enumval": [4, 6, 8], # if type==enum, list all valid values
"type": "enum", # int / enum
"default": 0 # default value
},
"--max_per_search_ram":
{
"changebyyml": True,
"set_func": None,
"minval": 0, # if type==int, indicate min possible value
"maxval": 0, # if type==int, indicate max possible value
"enumval": [198], # if type==enum, list all valid values
"type": "enum", # int / enum
"default": 0 # default value
},
"--max_per_sub_search_ram":
{
"changebyyml": True,
"set_func": None,
"minval": 0, # if type==int, indicate min possible value
"maxval": 0, # if type==int, indicate max possible value
"enumval": [99], # if type==enum, list all valid values
"type": "enum", # int / enum
"default": 0 # default value
},
"--block_ids_per_batch":
{
"changebyyml": True,
"set_func": None,
"minval": 0, # if type==int, indicate min possible value
"maxval": 0, # if type==int, indicate max possible value
"enumval": [16, 18, 20], # if type==enum, list all valid values
"type": "enum", # int / enum
"default": 0 # default value
},
"--lease_timeout":
{
"changebyyml": True,
"set_func": None,
"minval": 0, # if type==int, indicate min possible value
"maxval": 0, # if type==int, indicate max possible value
"enumval": [4, 8, 16, 32, 64], # if type==enum, list all valid values
"type": "enum", # int / enum
"default": 0 # default value
},
"--enable_query_cache":
{
"changebyyml": True,
"set_func": None,
"minval": 0, # if type==int, indicate min possible value
"maxval": 0, # if type==int, indicate max possible value
"enumval": ['false', 'true'], # if type==enum, list all valid values
"type": "bool", # int / enum
"default": 0 # default value
},
}
#------------------metric controller------------------
def read_write_throughput(ip, port):
return(0) # DEPRECATED FUNCTION: throughput is instant and could be read from go-ycsb. No need to read in this function
def read_write_latency(ip, port):
return(0) # DEPRECATED FUNCTION: latency is instant and could be read from go-ycsb. No need to read in this function
def read_get_throughput(ip, port):
return(0) # DEPRECATED FUNCTION: throughput is instant and could be read from go-ycsb. No need to read in this function
def read_get_latency(ip, port):
return(0) # DEPRECATED FUNCTION: latency is instant and could be read from go-ycsb. No need to read in this function
def read_scan_throughput(ip, port):
return(0) # DEPRECATED FUNCTION: throughput is instant and could be read from go-ycsb. No need to read in this function
def read_scan_latency(ip, port):
return(0) # DEPRECATED FUNCTION: latency is instant and could be read from go-ycsb. No need to read in this function
def read_store_size(ip, port):
return(0)
def read_compaction_cpu(ip, port):
cmd="ps -aux|grep beaver_datanode|grep -v 'grep'|grep -v '/bin/sh'|awk -F' *' '{print $3}'"
res=os.popen(cmd).read()
if len(res) == 0:
return 0
else:
return(res)
def read_compaction_mem(ip, port):
cmd="ps -aux|grep beaver_datanode|grep -v 'grep'|grep -v '/bin/sh'|awk -F' *' '{print $4}'"
res=os.popen(cmd).read()
if len(res) == 0:
return 0
else:
return(res)
def read_search_latency(ip, port):
url = "http://"+ip+":"+port+"/_search?index="+index_forsearch+"&sid=test&rpc_timeout=60"
data = pb_forsearch
testnum = 20
num = 100
restime = []
# costime = []
for i in range(num + testnum):
start_api = beaverrequest(url, data)
if i >= testnum:
# restime.append(start_api[1])
restime.append(start_api[0]["timecost"])
sortedRestime = sorted(restime)
newrestime = sortedRestime[:-10]
return sum(newrestime) / len(newrestime)
def beaverrequest(url, data):
r = requests.post(url, data=data)
return [r.json(), r.elapsed.total_seconds(), r.status_code]
metric_set=\
{"write_throughput":
{
"read_func": read_write_throughput,
"lessisbetter": 0, # whether less value of this metric is better(1: yes)
"calc": "ins", #incremental
},
"write_latency":
{
"read_func": read_write_latency,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #instant
},
"get_throughput":
{
"read_func": read_get_throughput,
"lessisbetter": 0, # whether less value of this metric is better(1: yes)
"calc": "ins", #incremental
},
"get_latency":
{
"read_func": read_get_latency,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #instant
},
"scan_throughput":
{
"read_func": read_scan_throughput,
"lessisbetter": 0, # whether less value of this metric is better(1: yes)
"calc": "ins", #incremental
},
"scan_latency":
{
"read_func": read_scan_latency,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #instant
},
"store_size":
{
"read_func": read_store_size,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #instant
},
"compaction_cpu":
{
"read_func": read_compaction_cpu,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #incremental
},
"compaction_mem":
{
"read_func": read_compaction_mem,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #incremental
},
"search_latency":
{
"read_func": read_search_latency,
"lessisbetter": 1, # whether less value of this metric is better(1: yes)
"calc": "ins", #incremental
},
}
#------------------workload controller------------------
def run_workload(wl_type):
return(None)
def load_workload(wl_type):
return(None)
#------------------common functions------------------
def set_tikvyml(knob_sessname, knob_val):
ymldir=os.path.join(autotestdir,"conf","beaver_test.gflags_new")
tmpdir=os.path.join(autotestdir,"conf","beaver_test.gflags")
if not os.path.exists(os.path.dirname(tmpdir)):
os.makedirs(os.path.dirname(tmpdir))
os.popen("cp "+gflagsfile+" "+tmpdir).read()
with open(tmpdir, 'r') as read_file, open(ymldir, 'w') as write_file:
dic={}
for line in read_file:
value = line.strip().split("=")
if len(value) > 1:
dic[value[0]] = value[1]
if(knob_set[knob_sessname]['type']=='enum'):
idx=knob_val
knob_val=knob_set[knob_sessname]['enumval'][idx]
if(knob_set[knob_sessname]['type']=='bool'):
if(knob_val==0):
knob_val='false'
else:
knob_val='true'
if(knob_sessname=='--max_shard_size'):
knob_val=str(knob_val)+"g"
if(knob_sessname=='--max_per_search_ram' or knob_sessname=='--max_per_sub_search_ram'):
knob_val=str(knob_val)+"m"
if(knob_sessname in dic):
dic[knob_sessname] = knob_val
else:
return('failed')
print("set_beaver_datanode_gflags:: ",knob_sessname, knob_val)
for kkk in dic:
write_file.write(kkk+"="+str(dic[kkk])+'\n')
# os.popen("rm "+tmpdir+" && "+"mv "+ymldir+" "+tmpdir)
os.remove(tmpdir)
os.rename(ymldir, tmpdir)
time.sleep(0.5)
return('success')
# if(knob_name=='block-size'):
# knob_val=str(knob_val)+"KB"
# if(knob_name=='write-buffer-size' or knob_name=='max-bytes-for-level-base' or knob_name=='target-file-size-base'):
# knob_val=str(knob_val)+"MB"
# if(knob_name in tmpcontent[knob_sess[0]][knob_sess[1]]): # TODO: only support 2 level of knob_sess currently
# tmpcontent[knob_sess[0]][knob_sess[1]][knob_name]=knob_val
# else:
# return('failed')
# print("set_tikvyml:: ",knob_sessname, knob_sess, knob_name, knob_val)
# ymlf=open(ymldir, 'w')
# yaml.dump(tmpcontent, ymlf, Dumper=yaml.RoundTripDumper)
# os.popen("rm "+tmpdir+" && "+"mv "+ymldir+" "+tmpdir)
# time.sleep(0.5)
# return('success')
def set_knob(knob_name, knob_val):
changebyyml=knob_set[knob_name]["changebyyml"]
if(changebyyml):
res=set_tikvyml(knob_name, knob_val)
else:
func=knob_set[knob_name]["set_func"]
res=func(beaver_broker_ip, beaver_broker_port, knob_val)
return res
def read_knob(knob_name, knob_cache):
res=knob_cache[knob_name]
return res
def read_metric(metric_name, rres=None):
if(rres!=None):
rl=rres.split('\n')
rl.reverse()
if(metric_name=="write_latency"):
i=0
while((not rl[i].startswith('UPDATE ')) and (not rl[i].startswith('INSERT '))):
i+=1
dat=rl[i][rl[i].find("Avg(us):") + 9:].split(",")[0]
dat=int(dat)
return(dat)
elif(metric_name=="get_latency"):
i=0
while(not rl[i].startswith('READ ')):
i+=1
dat=rl[i][rl[i].find("Avg(us):") + 9:].split(",")[0]
dat=int(dat)
return(dat)
elif(metric_name=="scan_latency"):
i=0
while(not rl[i].startswith('SCAN ')):
i+=1
dat=rl[i][rl[i].find("Avg(us):") + 9:].split(",")[0]
dat=int(dat)
return(dat)
elif(metric_name=="write_throughput"):
i=0
while((not rl[i].startswith('UPDATE ')) and (not rl[i].startswith('INSERT '))):
i+=1
dat=rl[i][rl[i].find("OPS:") + 5:].split(",")[0]
dat=float(dat)
return(dat)
elif(metric_name=="get_throughput"):
i=0
while(not rl[i].startswith('READ ')):
i+=1
dat=rl[i][rl[i].find("OPS:") + 5:].split(",")[0]
dat=float(dat)
return(dat)
elif(metric_name=="scan_throughput"):
i=0
while(not rl[i].startswith('SCAN ')):
i+=1
dat=rl[i][rl[i].find("OPS:") + 5:].split(",")[0]
dat=float(dat)
return(dat)
func=metric_set[metric_name]["read_func"]
res=func(beaver_broker_ip, beaver_broker_port)
return res
def init_knobs():
# if there are knobs whose range is related to PC memory size, initialize them here
pass
def calc_metric(metric_after, metric_before, metric_list):
num_metrics = len(metric_list)
new_metric = np.zeros([1, num_metrics])
for i, x in enumerate(metric_list):
if(metric_set[x]["calc"]=="inc"):
new_metric[0][i]=metric_after[0][i]-metric_before[0][i]
elif(metric_set[x]["calc"]=="ins"):
new_metric[0][i]=metric_after[0][i]
return(new_metric)
# def restart_db():
# #cmd="cd /home/tidb/tidb-ansible/ && ansible-playbook unsafe_cleanup_data.yml"
# dircmd="cd "+ autotestdir + " && "
# clrcmd="ansible-playbook unsafe_cleanup_data.yml"
# depcmd="ansible-playbook deploy.yml"
# runcmd="ansible-playbook start.yml"
# ntpcmd="ansible-playbook -i hosts.ini deploy_ntp.yml -u tidb -b" #need sleep 10s after ntpcmd
# print("-------------------------------------------------------")
# clrres = os.popen(dircmd+clrcmd).read()
# if("Congrats! All goes well" in clrres):
# print("unsafe_cleanup_data finished, res == "+clrres.split('\n')[-2])
# else:
# print(clrres)
# print("unsafe_cleanup_data failed")
# exit()
# print("-------------------------------------------------------")
# ntpres = os.popen(dircmd + ntpcmd).read()
# time.sleep(10)
# if ("Congrats! All goes well" in ntpres):
# print("set ntp finished, res == " + ntpres.split('\n')[-2])
# else:
# print(ntpres)
# print("set ntp failed")
# exit()
# print("-------------------------------------------------------")
# depres = os.popen(dircmd + depcmd).read()
# if ("Congrats! All goes well" in depres):
# print("deploy finished, res == "+depres.split('\n')[-2])
# else:
# print(depres)
# print("deploy failed")
# exit()
# print("-------------------------------------------------------")
# runres = os.popen(dircmd + runcmd).read()
# if ("Congrats! All goes well" in runres):
# print("start finished, res == "+runres.split('\n')[-2])
# else:
# print(runres)
# print("start failed")
# exit()
# print("-------------------------------------------------------")
def restart_beaver_datanode():
dircmd="cd "+ autotestdir + " && "
stopcmd="ps -ef|grep beaver_datanode|grep -v 'grep'|awk -F' *' '{print $2}'|xargs kill"
querycmd="ps -ef|grep beaver_datanode|grep -v 'grep'|awk -F' *' '{print $2}'"
beaver_conf=os.path.join(autotestdir,"conf","beaver_datanode.gflags")
test_conf=os.path.join(autotestdir,"conf","beaver_test.gflags")
startcmd=beaver_datanode_file+" --flagfile="+beaver_conf+" --config_path="+config_path+" --log_dir="+log_dir+" > /dev/null 2>&1"
print("-----------------------------stop beaver datanode--------------------------")
stopres = os.popen(stopcmd).read()
if len(os.popen(querycmd).read()) != 0:
for i in range(5):
time.sleep(2)
psres = os.popen(querycmd).read()
if len(psres) == 0 :
print("Beaver has been closed successfully!")
break
else:
print("Waiting beaver to close, pid is %s" % psres)
if i == 4:
print("Beaver close failed!")
exit()
else:
print("Beaver closed successfully!")
print("-----------------------------replace config file--------------------------")
if os.path.exists(beaver_conf):
os.remove(beaver_conf)
replaceres = os.popen("cp "+test_conf+" "+beaver_conf).read()
if len(replaceres) == 0:
print("replace config file finished!")
else:
print(replaceres)
print("replace config file failed!")
exit()
print("-----------------------------start beaver datanode--------------------------")
startres = os.popen(startcmd)
beaver_url = "http://"+beaver_broker_ip+":"+beaver_broker_port+"/_search?index="+index_forsearch+"&sid=test&rpc_timeout=60"
for i in range(20):
time.sleep(10)
curlres = requests.post(beaver_url, data=pb_forsearch).json()
if "result" in curlres and curlres['result'] == False:
print("Waiting beaver datanode to be available...")
else:
print("Beaver datanode is available!")
break
if i == 19:
print(curlres)
print("Beaver start failed!")
exit()
print("---------------------------------------------------------------------------")
|
[
"os.remove",
"os.rename",
"os.popen",
"numpy.zeros",
"os.path.exists",
"os.path.dirname",
"time.sleep",
"requests.post",
"os.path.join"
] |
[((6072, 6101), 'requests.post', 'requests.post', (['url'], {'data': 'data'}), '(url, data=data)\n', (6085, 6101), False, 'import requests\n'), ((8870, 8929), 'os.path.join', 'os.path.join', (['autotestdir', '"""conf"""', '"""beaver_test.gflags_new"""'], {}), "(autotestdir, 'conf', 'beaver_test.gflags_new')\n", (8882, 8929), False, 'import os\n'), ((8939, 8994), 'os.path.join', 'os.path.join', (['autotestdir', '"""conf"""', '"""beaver_test.gflags"""'], {}), "(autotestdir, 'conf', 'beaver_test.gflags')\n", (8951, 8994), False, 'import os\n'), ((10239, 10256), 'os.remove', 'os.remove', (['tmpdir'], {}), '(tmpdir)\n', (10248, 10256), False, 'import os\n'), ((10261, 10286), 'os.rename', 'os.rename', (['ymldir', 'tmpdir'], {}), '(ymldir, tmpdir)\n', (10270, 10286), False, 'import os\n'), ((10291, 10306), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (10301, 10306), False, 'import time\n'), ((13447, 13473), 'numpy.zeros', 'np.zeros', (['[1, num_metrics]'], {}), '([1, num_metrics])\n', (13455, 13473), True, 'import numpy as np\n'), ((15787, 15846), 'os.path.join', 'os.path.join', (['autotestdir', '"""conf"""', '"""beaver_datanode.gflags"""'], {}), "(autotestdir, 'conf', 'beaver_datanode.gflags')\n", (15799, 15846), False, 'import os\n'), ((15859, 15914), 'os.path.join', 'os.path.join', (['autotestdir', '"""conf"""', '"""beaver_test.gflags"""'], {}), "(autotestdir, 'conf', 'beaver_test.gflags')\n", (15871, 15914), False, 'import os\n'), ((16762, 16789), 'os.path.exists', 'os.path.exists', (['beaver_conf'], {}), '(beaver_conf)\n', (16776, 16789), False, 'import os\n'), ((17165, 17183), 'os.popen', 'os.popen', (['startcmd'], {}), '(startcmd)\n', (17173, 17183), False, 'import os\n'), ((16799, 16821), 'os.remove', 'os.remove', (['beaver_conf'], {}), '(beaver_conf)\n', (16808, 16821), False, 'import os\n'), ((17344, 17358), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (17354, 17358), False, 'import time\n'), ((633, 646), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (641, 646), False, 'import os\n'), ((5174, 5187), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (5182, 5187), False, 'import os\n'), ((5404, 5417), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (5412, 5417), False, 'import os\n'), ((9019, 9042), 'os.path.dirname', 'os.path.dirname', (['tmpdir'], {}), '(tmpdir)\n', (9034, 9042), False, 'import os\n'), ((9065, 9088), 'os.path.dirname', 'os.path.dirname', (['tmpdir'], {}), '(tmpdir)\n', (9080, 9088), False, 'import os\n'), ((16149, 16166), 'os.popen', 'os.popen', (['stopcmd'], {}), '(stopcmd)\n', (16157, 16166), False, 'import os\n'), ((16257, 16270), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (16267, 16270), False, 'import time\n'), ((16839, 16886), 'os.popen', 'os.popen', (["('cp ' + test_conf + ' ' + beaver_conf)"], {}), "('cp ' + test_conf + ' ' + beaver_conf)\n", (16847, 16886), False, 'import os\n'), ((9098, 9141), 'os.popen', 'os.popen', (["('cp ' + gflagsfile + ' ' + tmpdir)"], {}), "('cp ' + gflagsfile + ' ' + tmpdir)\n", (9106, 9141), False, 'import os\n'), ((17377, 17421), 'requests.post', 'requests.post', (['beaver_url'], {'data': 'pb_forsearch'}), '(beaver_url, data=pb_forsearch)\n', (17390, 17421), False, 'import requests\n'), ((16185, 16203), 'os.popen', 'os.popen', (['querycmd'], {}), '(querycmd)\n', (16193, 16203), False, 'import os\n'), ((16291, 16309), 'os.popen', 'os.popen', (['querycmd'], {}), '(querycmd)\n', (16299, 16309), False, 'import os\n')]
|
import os
from musicscore.musictree.treescoretimewise import TreeScoreTimewise
from musurgia.unittest import TestCase
from musurgia.fractaltree.fractalmusic import FractalMusic
path = str(os.path.abspath(__file__).split('.')[0])
class Test(TestCase):
def setUp(self) -> None:
self.score = TreeScoreTimewise()
fm = FractalMusic(tempo=60, quarter_duration=10, reading_direction='vertical')
fm.add_layer()
fm.add_layer()
self.fm = fm.get_children()[1]
self.deep_copied = self.fm.__deepcopy__()
def test(self, exp=None, act=None):
if not exp:
exp = self.fm
if not act:
act = self.deep_copied
self.assertEqual(exp.value, act.value)
self.assertEqual(exp.proportions, act.proportions)
self.assertEqual(exp.value, act.value)
self.assertEqual(exp.proportions, act.proportions)
self.assertEqual(exp.tree_permutation_order, act.tree_permutation_order)
self.assertEqual(exp.fractal_order, act.fractal_order)
self.assertEqual(exp.reading_direction, act.reading_direction)
self.assertEqual(exp._name, act._name)
self.assertEqual(exp.tree_directions, act.tree_directions)
self.assertEqual(exp.tempo, act.tempo)
self.assertEqual(exp.midi_value, act.midi_value)
def test_1(self):
self.assertIsNone(self.deep_copied.up)
def test_2(self):
self.assertNotEqual(self.deep_copied.name, self.fm.name)
def test_3(self):
for leaf in self.fm.traverse_leaves():
leaf.chord.add_words(leaf.fractal_order)
copied = self.fm.__deepcopy__()
copied.get_simple_format().to_stream_voice().add_to_score(self.score)
xml_path = path + '_test_3.xml'
self.score.write(xml_path)
expected_path = path + '_test_3_expected.xml'
expected_score = TreeScoreTimewise()
self.fm.get_simple_format().to_stream_voice().add_to_score(expected_score)
expected_score.write(expected_path)
self.assertCompareFiles(xml_path, expected_path)
def test_deep_copied_child_midi_values(self):
fm = FractalMusic(proportions=[1, 2, 3, 4], tree_permutation_order=[3, 1, 4, 2], quarter_duration=20,
tempo=70)
fm.midi_generator.midi_range = [36, 60]
fm.add_layer()
selected_node = fm.get_children()[0]
copied_node = selected_node.__deepcopy__()
copied_node.add_layer()
actual = [node.midi_value for node in copied_node.get_children()]
selected_node.add_layer()
expected = [node.midi_value for node in selected_node.get_children()]
self.assertEqual(expected, actual)
|
[
"musicscore.musictree.treescoretimewise.TreeScoreTimewise",
"os.path.abspath",
"musurgia.fractaltree.fractalmusic.FractalMusic"
] |
[((306, 325), 'musicscore.musictree.treescoretimewise.TreeScoreTimewise', 'TreeScoreTimewise', ([], {}), '()\n', (323, 325), False, 'from musicscore.musictree.treescoretimewise import TreeScoreTimewise\n'), ((339, 412), 'musurgia.fractaltree.fractalmusic.FractalMusic', 'FractalMusic', ([], {'tempo': '(60)', 'quarter_duration': '(10)', 'reading_direction': '"""vertical"""'}), "(tempo=60, quarter_duration=10, reading_direction='vertical')\n", (351, 412), False, 'from musurgia.fractaltree.fractalmusic import FractalMusic\n'), ((1892, 1911), 'musicscore.musictree.treescoretimewise.TreeScoreTimewise', 'TreeScoreTimewise', ([], {}), '()\n', (1909, 1911), False, 'from musicscore.musictree.treescoretimewise import TreeScoreTimewise\n'), ((2160, 2270), 'musurgia.fractaltree.fractalmusic.FractalMusic', 'FractalMusic', ([], {'proportions': '[1, 2, 3, 4]', 'tree_permutation_order': '[3, 1, 4, 2]', 'quarter_duration': '(20)', 'tempo': '(70)'}), '(proportions=[1, 2, 3, 4], tree_permutation_order=[3, 1, 4, 2],\n quarter_duration=20, tempo=70)\n', (2172, 2270), False, 'from musurgia.fractaltree.fractalmusic import FractalMusic\n'), ((191, 216), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (206, 216), False, 'import os\n')]
|
import functools
import collections
from django.template.loader import render_to_string
from pagination.settings import MARGIN_PAGES_DISPLAYED, PAGE_RANGE_DISPLAYED
class PageRepresentation(int):
def __new__(cls, x, querystring):
obj = int.__new__(cls, x)
obj.querystring = querystring
return obj
def add_page_querystring(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if isinstance(result, int):
querystring = self._other_page_querystring(result)
return PageRepresentation(result, querystring)
elif isinstance(result, collections.Iterable):
new_result = []
for number in result:
if isinstance(number, int):
querystring = self._other_page_querystring(number)
new_result.append(PageRepresentation(number, querystring))
else:
new_result.append(number)
return new_result
return result
return wrapper
class Page(object):
template = 'pagination.html'
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.paginator = paginator
if paginator.request:
self.base_queryset = self.paginator.request.GET.copy()
self.number = PageRepresentation(number, self._other_page_querystring(number))
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
@add_page_querystring
def next_page_number(self):
return self.number + 1
@add_page_querystring
def previous_page_number(self):
return self.number - 1
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
@add_page_querystring
def pages(self):
if self.paginator.num_pages <= PAGE_RANGE_DISPLAYED:
return range(1, self.paginator.num_pages + 1)
result = []
left_side = PAGE_RANGE_DISPLAYED / 2
right_side = PAGE_RANGE_DISPLAYED - left_side
if self.number > self.paginator.num_pages - PAGE_RANGE_DISPLAYED / 2:
right_side = self.paginator.num_pages - self.number
left_side = PAGE_RANGE_DISPLAYED - right_side
elif self.number < PAGE_RANGE_DISPLAYED / 2:
left_side = self.number
right_side = PAGE_RANGE_DISPLAYED - left_side
for page in range(1, self.paginator.num_pages + 1):
if page <= MARGIN_PAGES_DISPLAYED:
result.append(page)
continue
if page > self.paginator.num_pages - MARGIN_PAGES_DISPLAYED:
result.append(page)
continue
if (page >= self.number - left_side) and (page <= self.number + right_side):
result.append(page)
continue
if result[-1]:
result.append(None)
return result
def _other_page_querystring(self, page_number):
"""
Returns a query string for the given page, preserving any
GET parameters present.
"""
if self.paginator.request:
self.base_queryset['page'] = page_number
return self.base_queryset.urlencode()
# raise Warning("You must supply Paginator() with the request object for a proper querystring.")
return 'page=%s' % page_number
def render(self):
return render_to_string(self.template, {
'current_page': self,
'page_obj': self, # Issue 9 https://github.com/jamespacileo/django-pure-pagination/issues/9
# Use same naming conventions as Django
})
|
[
"django.template.loader.render_to_string",
"functools.wraps"
] |
[((369, 390), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (384, 390), False, 'import functools\n'), ((4361, 4434), 'django.template.loader.render_to_string', 'render_to_string', (['self.template', "{'current_page': self, 'page_obj': self}"], {}), "(self.template, {'current_page': self, 'page_obj': self})\n", (4377, 4434), False, 'from django.template.loader import render_to_string\n')]
|
import sys
input = bin(int(sys.stdin.readline().strip(), base=16))[2:]
# filling missing leading 0
input = input.zfill(-(-len(input)//4)*4)
def decode(msg):
if msg == '' or int(msg) == 0:
return 0
version = int(msg[0:3], 2)
type_id = int(msg[3:6], 2)
if type_id == 4:
last_group = False
cursor = 6
while not last_group:
if msg[cursor] == '0':
last_group = True
cursor += 5
return version + decode(msg[cursor:])
length_type_id = msg[6]
if length_type_id == '0':
total_sub_packets_len = int(msg[7:22], 2)
return version + decode(msg[22:22+total_sub_packets_len]) + decode(msg[22+total_sub_packets_len:])
return version + decode(msg[18:])
# result
# sample1: 16
# smaple2: 12
# sample3: 23
# sample4: 31
# puzzle: 971
print(decode(input))
|
[
"sys.stdin.readline"
] |
[((28, 48), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (46, 48), False, 'import sys\n')]
|
from arm_prosthesis.external_communication.models.dto.entity_dto import EntityDto
from arm_prosthesis.external_communication.models.dto.gesture_dto import GestureDto
from gestures_pb2 import SaveGesture
class SaveGestureDto(EntityDto):
def __init__(self):
self._time_sync = 0
self._gesture_dto = None
@property
def time_sync(self) -> int:
return self._time_sync
@property
def gesture_dto(self) -> GestureDto:
return self._gesture_dto
def serialize(self) -> bytes:
raise NotImplementedError
def deserialize(self, byte_array: bytes):
save_gesture_protobuf = SaveGesture()
save_gesture_protobuf.ParseFromString(byte_array)
self._time_sync = save_gesture_protobuf.time_sync
self._gesture_dto = GestureDto()
self._gesture_dto.create_from_protobuf_gesture(save_gesture_protobuf.gesture)
|
[
"arm_prosthesis.external_communication.models.dto.gesture_dto.GestureDto",
"gestures_pb2.SaveGesture"
] |
[((662, 675), 'gestures_pb2.SaveGesture', 'SaveGesture', ([], {}), '()\n', (673, 675), False, 'from gestures_pb2 import SaveGesture\n'), ((825, 837), 'arm_prosthesis.external_communication.models.dto.gesture_dto.GestureDto', 'GestureDto', ([], {}), '()\n', (835, 837), False, 'from arm_prosthesis.external_communication.models.dto.gesture_dto import GestureDto\n')]
|
from process.process import Process
class Load(Process):
def __init__(self, settings=None):
Process.__init__(self, settings=settings)
# import_file_name is full local file name or url to source
#self.import_file_list=import_file_list
#self.dataframe=None
#self.dictionary={}
#self.list={}
#print('Load')
'''
def get_dictionary(self):
return self.dictionary
def get_dataframe(self):
return self.dataframe
def get_list(self):
return self.list
'''
|
[
"process.process.Process.__init__"
] |
[((104, 145), 'process.process.Process.__init__', 'Process.__init__', (['self'], {'settings': 'settings'}), '(self, settings=settings)\n', (120, 145), False, 'from process.process import Process\n')]
|
import json
from typing import List, Mapping, Optional, Sequence, Tuple, Union, cast
import pulumi_aws as aws
import pulumi_docker as docker
from infra.cache import Cache
from infra.config import (
DEPLOYMENT_NAME,
REAL_DEPLOYMENT,
SERVICE_LOG_RETENTION_DAYS,
configured_version_for,
)
from infra.ec2 import Ec2Port
from infra.emitter import EventEmitter
from infra.metric_forwarder import MetricForwarder
from infra.network import Network
from infra.policies import ECR_TOKEN_POLICY, attach_policy
from infra.repository import Repository, registry_credentials
from infra.service_queue import ServiceQueue
import pulumi
class GraplDockerBuild(docker.DockerBuild):
def __init__(
self,
dockerfile: str,
target: str,
context: Optional[str] = None,
args: Optional[Mapping[str, pulumi.Input[str]]] = None,
env: Optional[Mapping[str, str]] = None,
):
super().__init__(
context=context,
dockerfile=dockerfile,
env={**(env or {}), "DOCKER_BUILDKIT": 1},
args={**(args or {}), "RUST_BUILD": "debug"},
target=target,
# Quiet the Docker builds at `pulumi up` time
# ...except it doesn't work with `buildx` yet
# https://github.com/docker/buildx/issues/401
# extra_options=("--quiet",),
)
class FargateTaskRole(aws.iam.Role):
def __init__(
self, name: str, opts: Optional[pulumi.ResourceOptions] = None
) -> None:
super().__init__(
f"{name}-task-role",
description=f"Fargate task role for {name}",
assume_role_policy=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {"Service": "ecs-tasks.amazonaws.com"},
}
],
}
),
opts=opts,
)
class FargateExecutionRole(aws.iam.Role):
def __init__(
self, name: str, opts: Optional[pulumi.ResourceOptions] = None
) -> None:
super().__init__(
f"{name}-execution-role",
description=f"Fargate execution role for {name}",
assume_role_policy=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {"Service": "ecs-tasks.amazonaws.com"},
}
],
}
),
opts=opts,
)
class _AWSFargateService(pulumi.ComponentResource):
def __init__(
self,
name: str,
cluster: aws.ecs.Cluster,
queue: ServiceQueue,
input_emitter: EventEmitter,
output_emitter: EventEmitter,
network: Network,
image: pulumi.Output[str],
env: Mapping[str, Union[str, pulumi.Output[str]]],
forwarder: MetricForwarder,
entrypoint: Optional[List[str]] = None,
command: Optional[List[str]] = None,
opts: Optional[pulumi.ResourceOptions] = None,
) -> None:
"""
:param command: supply an override to the CMD defined in the Dockerfile.
"""
super().__init__("grapl:AWSFargateService", name, None, opts)
self.task_role = FargateTaskRole(name, opts=pulumi.ResourceOptions(parent=self))
########################################################################
# TODO: CDK code has us consuming from all queues, but that's
# likely excessive. The default service probably just needs to
# consume from the main queue; similarly for the retry service
# and retry queue
#
# We should probably bundle this concept up into a single
# policy (one for the "default" case and one for the "retry"
# case), and then put this into the ServiceQueue object. Then,
# anything that needs to behave as a "default service" can
# just attach the appropriate policy; similarly for things
# that behave like "retry services".
#
# That would likely allow us to unify the Fargate- and
# Lambda-based services, too.
queue.grant_main_queue_consumption_to(self.task_role)
queue.grant_retry_queue_consumption_to(self.task_role)
queue.grant_dead_letter_queue_consumption_to(self.task_role)
########################################################################
########################################################################
# TODO: As above, we don't need everything to be able to send
# to all our queues.
#
# If we take the approach advocated above with a single policy
# laying out the behavior we want, then these attachments can
# go away, since they will have been subsumed into the ones
# above.
queue.grant_main_queue_send_to(self.task_role)
queue.grant_retry_queue_send_to(self.task_role)
queue.grant_dead_letter_queue_send_to(self.task_role)
########################################################################
input_emitter.grant_read_to(self.task_role)
output_emitter.grant_write_to(self.task_role)
self.execution_role = FargateExecutionRole(
name, opts=pulumi.ResourceOptions(parent=self)
)
# Incorporating the stack name into this log group name;
# otherwise we'll end up dumping logs from different stacks
# together.
#
# TODO: Consider a helper function for generating log group
# names that adheres to this convention for all our services
# (though this will be less of an issue once we migrate to
# Kafka)
self.log_group = aws.cloudwatch.LogGroup(
f"{name}-log-group",
name=f"/grapl/{DEPLOYMENT_NAME}/{name}",
retention_in_days=SERVICE_LOG_RETENTION_DAYS,
opts=pulumi.ResourceOptions(parent=self),
)
aws.iam.RolePolicy(
f"{name}-write-log-events",
role=self.execution_role.name,
policy=self.log_group.arn.apply(
lambda arn: json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["logs:CreateLogStream", "logs:PutLogEvents"],
"Resource": f"{arn}:*",
}
],
}
)
),
opts=pulumi.ResourceOptions(parent=self.execution_role),
)
# This is only needed if we're actually pulling from ECR,
# which we don't do in production (because we're pulling from
# Cloudsmith). The only time we use ECR is when we build a
# Docker container locally, and that'll only happen for
# individual developer sandbox deployments.
# TODO: This feels hacky; consider other ways to model this.
if not REAL_DEPLOYMENT:
attach_policy(ECR_TOKEN_POLICY, self.execution_role)
forwarder.subscribe_to_log_group(name, self.log_group)
self.task = aws.ecs.TaskDefinition( # type: ignore[call-overload]
f"{name}-task",
family=f"{DEPLOYMENT_NAME}-{name}-task",
container_definitions=pulumi.Output.all(
queue_url=queue.main_queue_url,
retry_url=queue.retry_queue_url,
dead_letter_url=queue.dead_letter_queue_url,
log_group=self.log_group.name,
bucket=output_emitter.bucket.bucket,
image=image,
env=env,
).apply(
lambda inputs: json.dumps(
[
{
# NOTE: it seems that *all* our containers
# are named this. Perhaps due to CDK's
# QueueProcessingFargateService abstraction?
"name": "QueueProcessingContainer",
"image": inputs["image"],
"environment": _environment_from_map(
{
"QUEUE_URL": inputs["queue_url"],
"SOURCE_QUEUE_URL": inputs["queue_url"],
"DEST_BUCKET_NAME": inputs["bucket"],
"DEPLOYMENT_NAME": DEPLOYMENT_NAME,
"DEAD_LETTER_QUEUE_URL": inputs["dead_letter_url"],
"RETRY_QUEUE_URL": inputs["retry_url"],
**inputs["env"],
}
),
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-stream-prefix": "logs",
"awslogs-region": aws.get_region().name,
"awslogs-group": inputs["log_group"],
},
},
**({"entryPoint": entrypoint} if entrypoint else {}),
**({"command": command} if command else {}),
},
]
)
),
requires_compatibilities=["FARGATE"],
cpu=256,
memory=512,
network_mode="awsvpc", # only option for Fargate
task_role_arn=self.task_role.arn,
execution_role_arn=self.execution_role.arn,
opts=pulumi.ResourceOptions(
parent=self,
),
)
self.security_group = aws.ec2.SecurityGroup(
f"{name}-security-group",
vpc_id=network.vpc.id,
opts=pulumi.ResourceOptions(parent=self),
)
self.service = aws.ecs.Service(
f"{name}-service",
cluster=cluster.arn,
network_configuration=aws.ecs.ServiceNetworkConfigurationArgs(
assign_public_ip=False,
subnets=[net.id for net in network.private_subnets],
security_groups=[self.security_group.id],
),
launch_type="FARGATE",
desired_count=1, # TODO: Set this to 1 or 0 depending on default vs. retry
deployment_minimum_healthy_percent=50,
task_definition=self.task.arn,
opts=pulumi.ResourceOptions(
parent=self,
),
)
self.register_outputs({})
class FargateService(pulumi.ComponentResource):
def __init__(
self,
name: str,
input_emitter: EventEmitter,
output_emitter: EventEmitter,
network: Network,
image: docker.DockerBuild,
env: Mapping[str, Union[str, pulumi.Output[str]]],
forwarder: MetricForwarder,
entrypoint: Optional[List[str]] = None,
command: Optional[List[str]] = None,
retry_image: Optional[docker.DockerBuild] = None,
retry_entrypoint: Optional[List[str]] = None,
retry_command: Optional[List[str]] = None,
opts: Optional[pulumi.ResourceOptions] = None,
) -> None:
super().__init__("grapl:FargateService", name, None, opts)
self.queue = ServiceQueue(name, opts=pulumi.ResourceOptions(parent=self))
self.queue.subscribe_to_emitter(input_emitter)
self.ecs_cluster = aws.ecs.Cluster(
f"{name}-cluster",
opts=pulumi.ResourceOptions(parent=self),
)
# We're not calling this image, e.g., "foo-default" to account
# for the (common) case that the corresponding retry service
# uses the same image.
(repository, image_name) = self._repository_and_image(name, image)
self.default_service = _AWSFargateService(
f"{name}-default",
cluster=self.ecs_cluster,
queue=self.queue,
input_emitter=input_emitter,
output_emitter=output_emitter,
network=network,
image=image_name,
entrypoint=entrypoint,
command=command,
env=env,
forwarder=forwarder,
opts=pulumi.ResourceOptions(parent=self),
)
if repository:
repository.grant_access_to(self.default_service.execution_role)
# If a separate retry image was provided, create a separate
# repository for it; otherwise, reuse the existing repository
# and image.
retry_name = f"{name}-retry"
(retry_repository, retry_image_name) = (
self._repository_and_image(retry_name, retry_image)
if retry_image
else (repository, image_name)
)
self.retry_service = _AWSFargateService(
retry_name,
cluster=self.ecs_cluster,
queue=self.queue,
input_emitter=input_emitter,
output_emitter=output_emitter,
network=network,
image=retry_image_name,
entrypoint=retry_entrypoint or entrypoint,
command=retry_command or command,
env=env,
forwarder=forwarder,
opts=pulumi.ResourceOptions(parent=self),
)
if retry_repository:
retry_repository.grant_access_to(self.retry_service.execution_role)
self.services = (self.default_service, self.retry_service)
self._setup_default_ports()
self.register_outputs({})
def _setup_default_ports(self) -> None:
"""
Can be overridden by subclasses. Most services are fine having an outbound 443.
Has a cognate in service.py.
"""
for svc in self.services:
Ec2Port("tcp", 443).allow_outbound_any_ip(svc.security_group)
def allow_egress_to_cache(self, cache: Cache) -> None:
"""
Allow both the default and retry services to connect to the `cache`.
"""
for svc in self.services:
cache.allow_egress_to_cache_for(svc._name, svc.security_group)
def _repository_and_image(
self, name: str, build: docker.DockerBuild
) -> Tuple[Optional[Repository], pulumi.Output[str]]:
version = configured_version_for(name)
if version:
image_name = f"docker.cloudsmith.io/grapl/raw/{name}:{version}"
pulumi.info(f"Version found for {name}: {version} ({image_name})")
# It's a bit of a bummer to need this cast :/
return (None, cast(pulumi.Output[str], image_name))
else:
# create ECR, build image, push to ECR, return output
pulumi.info(
f"Version NOT found for {name}; performing local container image build"
)
repository = Repository(name, opts=pulumi.ResourceOptions(parent=self))
image = docker.Image(
name,
image_name=repository.registry_qualified_name,
build=build,
registry=registry_credentials(),
opts=pulumi.ResourceOptions(parent=self),
)
# The built image name will have a checksum appended to it,
# thus eliminating the need to use tags.
return (repository, image.image_name)
def _environment_from_map(env: Mapping[str, str]) -> Sequence[Mapping[str, str]]:
"""
Generate a list of environment variable dictionaries for an ECS task container definition from a standard dictionary.
"""
return [{"name": k, "value": v} for (k, v) in env.items()]
|
[
"pulumi.info",
"infra.policies.attach_policy",
"typing.cast",
"pulumi.ResourceOptions",
"pulumi_aws.ecs.ServiceNetworkConfigurationArgs",
"infra.config.configured_version_for",
"json.dumps",
"infra.ec2.Ec2Port",
"pulumi.Output.all",
"infra.repository.registry_credentials",
"pulumi_aws.get_region"
] |
[((14859, 14887), 'infra.config.configured_version_for', 'configured_version_for', (['name'], {}), '(name)\n', (14881, 14887), False, 'from infra.config import DEPLOYMENT_NAME, REAL_DEPLOYMENT, SERVICE_LOG_RETENTION_DAYS, configured_version_for\n'), ((7471, 7523), 'infra.policies.attach_policy', 'attach_policy', (['ECR_TOKEN_POLICY', 'self.execution_role'], {}), '(ECR_TOKEN_POLICY, self.execution_role)\n', (7484, 7523), False, 'from infra.policies import ECR_TOKEN_POLICY, attach_policy\n'), ((14996, 15062), 'pulumi.info', 'pulumi.info', (['f"""Version found for {name}: {version} ({image_name})"""'], {}), "(f'Version found for {name}: {version} ({image_name})')\n", (15007, 15062), False, 'import pulumi\n'), ((15277, 15366), 'pulumi.info', 'pulumi.info', (['f"""Version NOT found for {name}; performing local container image build"""'], {}), "(\n f'Version NOT found for {name}; performing local container image build')\n", (15288, 15366), False, 'import pulumi\n'), ((1667, 1829), 'json.dumps', 'json.dumps', (["{'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow', 'Action':\n 'sts:AssumeRole', 'Principal': {'Service': 'ecs-tasks.amazonaws.com'}}]}"], {}), "({'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow',\n 'Action': 'sts:AssumeRole', 'Principal': {'Service':\n 'ecs-tasks.amazonaws.com'}}]})\n", (1677, 1829), False, 'import json\n'), ((2407, 2569), 'json.dumps', 'json.dumps', (["{'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow', 'Action':\n 'sts:AssumeRole', 'Principal': {'Service': 'ecs-tasks.amazonaws.com'}}]}"], {}), "({'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow',\n 'Action': 'sts:AssumeRole', 'Principal': {'Service':\n 'ecs-tasks.amazonaws.com'}}]})\n", (2417, 2569), False, 'import json\n'), ((3633, 3668), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (3655, 3668), False, 'import pulumi\n'), ((5625, 5660), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (5647, 5660), False, 'import pulumi\n'), ((6267, 6302), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (6289, 6302), False, 'import pulumi\n'), ((6976, 7026), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'parent': 'self.execution_role'}), '(parent=self.execution_role)\n', (6998, 7026), False, 'import pulumi\n'), ((10176, 10211), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (10198, 10211), False, 'import pulumi\n'), ((10398, 10433), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (10420, 10433), False, 'import pulumi\n'), ((10584, 10752), 'pulumi_aws.ecs.ServiceNetworkConfigurationArgs', 'aws.ecs.ServiceNetworkConfigurationArgs', ([], {'assign_public_ip': '(False)', 'subnets': '[net.id for net in network.private_subnets]', 'security_groups': '[self.security_group.id]'}), '(assign_public_ip=False, subnets=[\n net.id for net in network.private_subnets], security_groups=[self.\n security_group.id])\n', (10623, 10752), True, 'import pulumi_aws as aws\n'), ((11041, 11076), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (11063, 11076), False, 'import pulumi\n'), ((11925, 11960), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (11947, 11960), False, 'import pulumi\n'), ((12110, 12145), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (12132, 12145), False, 'import pulumi\n'), ((12833, 12868), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (12855, 12868), False, 'import pulumi\n'), ((13831, 13866), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (13853, 13866), False, 'import pulumi\n'), ((15147, 15183), 'typing.cast', 'cast', (['pulumi.Output[str]', 'image_name'], {}), '(pulumi.Output[str], image_name)\n', (15151, 15183), False, 'from typing import List, Mapping, Optional, Sequence, Tuple, Union, cast\n'), ((14367, 14386), 'infra.ec2.Ec2Port', 'Ec2Port', (['"""tcp"""', '(443)'], {}), "('tcp', 443)\n", (14374, 14386), False, 'from infra.ec2 import Ec2Port\n'), ((15440, 15475), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (15462, 15475), False, 'import pulumi\n'), ((15651, 15673), 'infra.repository.registry_credentials', 'registry_credentials', ([], {}), '()\n', (15671, 15673), False, 'from infra.repository import Repository, registry_credentials\n'), ((15696, 15731), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'parent': 'self'}), '(parent=self)\n', (15718, 15731), False, 'import pulumi\n'), ((6499, 6661), 'json.dumps', 'json.dumps', (["{'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow', 'Action': [\n 'logs:CreateLogStream', 'logs:PutLogEvents'], 'Resource': f'{arn}:*'}]}"], {}), "({'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow',\n 'Action': ['logs:CreateLogStream', 'logs:PutLogEvents'], 'Resource':\n f'{arn}:*'}]})\n", (6509, 6661), False, 'import json\n'), ((7779, 8010), 'pulumi.Output.all', 'pulumi.Output.all', ([], {'queue_url': 'queue.main_queue_url', 'retry_url': 'queue.retry_queue_url', 'dead_letter_url': 'queue.dead_letter_queue_url', 'log_group': 'self.log_group.name', 'bucket': 'output_emitter.bucket.bucket', 'image': 'image', 'env': 'env'}), '(queue_url=queue.main_queue_url, retry_url=queue.\n retry_queue_url, dead_letter_url=queue.dead_letter_queue_url, log_group\n =self.log_group.name, bucket=output_emitter.bucket.bucket, image=image,\n env=env)\n', (7796, 8010), False, 'import pulumi\n'), ((9500, 9516), 'pulumi_aws.get_region', 'aws.get_region', ([], {}), '()\n', (9514, 9516), True, 'import pulumi_aws as aws\n')]
|
import random
import string
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/')
def root():
return jsonify(result=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(2 ** 10)))
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0')
|
[
"flask.Flask",
"random.choice"
] |
[((69, 84), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (74, 84), False, 'from flask import Flask, jsonify\n'), ((149, 202), 'random.choice', 'random.choice', (['(string.ascii_uppercase + string.digits)'], {}), '(string.ascii_uppercase + string.digits)\n', (162, 202), False, 'import random\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from . import MultiheadAttention
class MultiBranch(nn.Module):
def __init__(self, branches, embed_dim_list):
super().__init__()
self.branches = nn.ModuleList(branches)
self.embed_dim_list = embed_dim_list
def forward(self, query, key, value, key_padding_mask=None, incremental_state=None, need_weights=True, static_kv=False, attn_mask=None):
tgt_len, bsz, embed_size = query.size()
assert sum(self.embed_dim_list) == embed_size
out = []
attn = None
start = 0
for idx, embed_dim in enumerate(self.embed_dim_list):
branch = self.branches[idx]
branch_type = type(branch)
q = query[...,start:start+embed_dim]
if key is not None:
assert value is not None
k, v = key[..., start:start+embed_dim], value[..., start:start+embed_dim]
start += embed_dim
if branch_type == MultiheadAttention:
x, attn = branch(q, k, v, key_padding_mask, incremental_state, need_weights, static_kv, attn_mask)
else:
mask = key_padding_mask
if mask is not None:
q = q.masked_fill(mask.transpose(0, 1).unsqueeze(2), 0)
x = branch(q.contiguous(), incremental_state=incremental_state)
out.append(x)
out = torch.cat(out, dim=-1)
return out, attn
|
[
"torch.cat",
"torch.nn.ModuleList"
] |
[((253, 276), 'torch.nn.ModuleList', 'nn.ModuleList', (['branches'], {}), '(branches)\n', (266, 276), True, 'import torch.nn as nn\n'), ((1465, 1487), 'torch.cat', 'torch.cat', (['out'], {'dim': '(-1)'}), '(out, dim=-1)\n', (1474, 1487), False, 'import torch\n')]
|
import fnmatch
from . import jobs
from . import config
log = config.log
#
# {
# At least one match from this array must succeed, or array must be empty
# "any": [
# ["file.type", "dicom" ] # Match the file's type
# ["file.name", "*.dcm" ] # Match a shell glob for the file name
# ["file.measurements", "diffusion" ] # Match any of the file's measurements
# ["container.measurement", "diffusion" ] # Match the container's primary measurment
# ["container.has-type", "bvec" ] # Match the container having any file (including this one) with this type
# ]
#
# All matches from array must succeed, or array must be empty
# "all": [
# ]
#
# Algorithm to run if both sets of rules match
# "alg": "dcm2nii"
# }
#
MATCH_TYPES = [
'file.type',
'file.name',
'file.measurements',
'container.measurement',
'container.has-type'
]
# TODO: replace with default rules, which get persisted, maintained, upgraded, and reasoned intelligently
HARDCODED_RULES = [
{
'alg': 'dicom_mr_classifier',
'all': [
['file.type', 'dicom']
]
},
{
'alg': 'dcm_convert',
'all': [
['file.type', 'dicom']
]
},
{
'alg': 'qa-report-fmri',
'all': [
['file.type', 'nifti']
]
}
]
def _log_file_key_error(file_, container, error):
log.warning('file ' + file_.get('name', '?') + ' in container ' + str(container.get('_id', '?')) + ' ' + error)
def eval_match(match_type, match_param, file_, container):
"""
Given a match entry, return if the match succeeded.
"""
# Match the file's type
if match_type == 'file.type':
try:
return file_['type'] == match_param
except KeyError:
_log_file_key_error(file_, container, 'has no type key')
return False
# Match a shell glob for the file name
elif match_type == 'file.name':
return fnmatch.fnmatch(file_['name'], match_param)
# Match any of the file's measurements
elif match_type == 'file.measurements':
try:
return match_param in file_['measurements']
except KeyError:
_log_file_key_error(file_, container, 'has no measurements key')
return False
# Match the container's primary measurment
elif match_type == 'container.measurement':
return container['measurement'] == match_param
# Match the container having any file (including this one) with this type
elif match_type == 'container.has-type':
for c_file in container['files']:
if match_param in c_file['measurements']:
return True
return False
raise Exception('Unimplemented match type ' + match_type)
def eval_rule(rule, file_, container):
"""
Decide if a rule should spawn a job.
"""
# Are there matches in the 'any' set?
must_match = len(rule.get('any', [])) > 0
has_match = False
for match in rule.get('any', []):
if eval_match(match[0], match[1], file_, container):
has_match = True
break
# If there were matches in the 'any' array and none of them succeeded
if must_match and not has_match:
return False
# Are there matches in the 'all' set?
for match in rule.get('all', []):
if not eval_match(match[0], match[1], file_, container):
return False
return True
def create_jobs(db, container, container_type, file_):
"""
Check all rules that apply to this file, and enqueue the jobs that should be run.
Returns the algorithm names that were queued.
"""
job_list = []
# Get configured rules for this project
rules = get_rules_for_container(db, container)
# Add hardcoded rules that cannot be removed or changed
for hardcoded_rule in HARDCODED_RULES:
rules.append(hardcoded_rule)
for rule in rules:
if eval_rule(rule, file_, container):
alg_name = rule['alg']
input = jobs.create_fileinput_from_reference(container, container_type, file_)
jobs.queue_job(db, alg_name, input)
job_list.append(alg_name)
return job_list
# TODO: consider moving to a module that has a variety of hierarchy-management helper functions
def get_rules_for_container(db, container):
"""
Recursively walk the hierarchy until the project object is found.
"""
if 'session' in container:
session = db.sessions.find_one({'_id': container['session']})
return get_rules_for_container(db, session)
elif 'project' in container:
project = db.projects.find_one({'_id': container['project']})
return get_rules_for_container(db, project)
else:
# Assume container is a project, or a collection (which currently cannot have a rules property)
return container.get('rules', [])
|
[
"fnmatch.fnmatch"
] |
[((2022, 2065), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (["file_['name']", 'match_param'], {}), "(file_['name'], match_param)\n", (2037, 2065), False, 'import fnmatch\n')]
|
'''
Script used to pull voting information from benty-fields.com.
Must be logged in to access the benty-fields.
NOTES:
1) benty-fields mostly organizes paper suggestions based upon voting
history and chosen preferences (machine learning involved), so these
voting totals can be considered as a control sample (in a way?).
2) Additionally, can only see the total votes for the "most popular"; the
total votes per paper is not information available through the search.
--> (so smaller sample than VoxCharta votes)
3) "last year" not an option
THOUGHT: how new is benty-fields?
'''
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from datetime import datetime
import threading, time, getpass, sys, subprocess
import pandas as pd
import numpy as np
from datetime import datetime as dt
from datetime import timedelta
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# amount of time to wait
timeit = 2 # seconds
# ------------------------ #
# -- creating dataframe -- #
# ------------------------ #
df_dtypes = {'id':str,'total_votes':int}
# will be creating them inside the outermost for loop
# ------------------------ #
logmein = True # option to log into benty-fields account
# opening browser & going to benty-fields
if logmein == True:
# pulling information to access arXiv account
username = '<EMAIL>'
password = '<PASSWORD>!'
#username = input('\nBenty-Fields username: ')
assert len(username) > 0, "Need to provide a username"
#password = input('Benty-Fields password: ')
assert len(password) > 0, "Need to provide account password"
driver = webdriver.Firefox()
driver.get("https://www.benty-fields.com/login")
assert "Login" in driver.title
# finding log in cells
usern = driver.find_element_by_id("email")
passw = driver.find_element_by_id("password")
usern.clear()
passw.clear()
# adding log in info
usern.send_keys(username)
passw.send_keys(password)
# locating "Log In" button
buttons = driver.find_element_by_class_name("modal-footer")
login = buttons.find_element_by_xpath("//button[@type='submit' and contains(.,'Login')]")
login.click()
else:
driver = webdriver.Firefox()
driver.get("https://www.benty-fields.com/most_popular")
# going to Most Popular page
driver.get("https://www.benty-fields.com/most_popular")
frequencies = ['last week','last month','last 6 months']
freq_days = [7,30,180]
for freq in frequencies: # running through frequencies
print(f'''\n---------------------------------------
Looking at Most Popular: {freq}
---------------------------------------\n''')
df = pd.DataFrame({'id':[],'total_votes':[]}) # dataframe for the frequency
idx = frequencies.index(freq)
if freq != 'last week': # have to change frequency
period = driver.find_element_by_xpath("//button[@data-toggle='dropdown']")
period.click() # this works
# THIS WORKS!!! Can I tell you how long this took me to figure out.......
loc = f"//ul[@class='dropdown-menu inner']/li[contains(.,'{freq}')]"
last_month = driver.find_element_by_xpath(loc)
last_month.click() # this works
time.sleep(5) # let it load
# ... just realized I could have just used the URL
# most_popular/1?period=180 # where period == number of days
# -- most popular votes -- #
# ------------------------ #
i = 0
for page in range(1,50): # start with page 1, go to page 50
# going page by page for at least 20 pages
items = driver.find_elements_by_class_name("paper")
# running through the posts to pull out arXiv ID
for item in items:
print(f"{i}) ",end=' ')
arxiv_id = item.get_attribute('id') # "paper####.#####v#"
arxiv_id = arxiv_id.lstrip('paper')
arxiv_id = arxiv_id.rsplit('v')[0]
print(arxiv_id,end='\t')
# total votes
votes = item.find_element_by_tag_name("h3").text
votes = votes.rsplit('Votes ')[1].rsplit(')')[0] # pulling out just vote count
votes = int(votes) # just because
print(f"{votes} votes")
# adding value to dataframe
filler_df = pd.DataFrame({'id':[arxiv_id],'total_votes':[votes]})
df = df.append(filler_df,ignore_index=True)
i += 1
# going to the next page using the link (instead of clicking the buttons)
next_page = f"https://www.benty-fields.com/most_popular/{page+1}?period={freq_days[idx]}"
driver.get(next_page)
# saving dataframe
freq_dash = freq.replace(' ','-')
df_dtypes = {'id':str,'total_votes':int}
sub_df = pd.read_csv(f'votes_benty-fields/benty-fields_voting-{freq_dash}.txt',sep='\t',dtype=df_dtypes) # reading in to add
df = df.astype(df_dtypes) # to make sure column dtypes don't change
# appending on data
final_df = sub_df.append(df,ignore_index=True)
# checking for duplicates
ids = set(final_df.id.values) # creates 'set' of unique values
if len(ids) != len(final_df): # SO checking for duplicates added in to table
print(f'\nLength of sub_df: \t\t\t\t\t{len(sub_df)}')
print(f'Length of df: \t\t\t\t\t\t{len(df)}')
print(f'Length of combined df: \t\t\t\t\t{len(final_df)}')
final_df.drop_duplicates(inplace=True,subset='id',keep='last') # want most up-to-date #'s
print(f'Length of final_df after dropping id duplicates: \t{len(final_df)}')
else:
print(f'\nNo duplicates, check passed.')
final_df.to_csv(f'votes_benty-fields/benty-fields_voting-{freq_dash}.txt',sep='\t',index=False)
print(f"\nData saved to 'benty-fields_voting-{freq_dash}.txt'",end='\n\n')
# Wait until before closing browser (so we can see the "pycon" search)
time.sleep(timeit)
driver.close()
|
[
"pandas.DataFrame",
"pandas.read_csv",
"selenium.webdriver.Firefox",
"time.sleep"
] |
[((5469, 5487), 'time.sleep', 'time.sleep', (['timeit'], {}), '(timeit)\n', (5479, 5487), False, 'import threading, time, getpass, sys, subprocess\n'), ((1610, 1629), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (1627, 1629), False, 'from selenium import webdriver\n'), ((2147, 2166), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (2164, 2166), False, 'from selenium import webdriver\n'), ((2586, 2629), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [], 'total_votes': []}"], {}), "({'id': [], 'total_votes': []})\n", (2598, 2629), True, 'import pandas as pd\n'), ((4407, 4509), 'pandas.read_csv', 'pd.read_csv', (['f"""votes_benty-fields/benty-fields_voting-{freq_dash}.txt"""'], {'sep': '"""\t"""', 'dtype': 'df_dtypes'}), "(f'votes_benty-fields/benty-fields_voting-{freq_dash}.txt', sep=\n '\\t', dtype=df_dtypes)\n", (4418, 4509), True, 'import pandas as pd\n'), ((3081, 3094), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3091, 3094), False, 'import threading, time, getpass, sys, subprocess\n'), ((3994, 4050), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [arxiv_id], 'total_votes': [votes]}"], {}), "({'id': [arxiv_id], 'total_votes': [votes]})\n", (4006, 4050), True, 'import pandas as pd\n')]
|
import pygame
# local import
from base.base import GUIBase
from solver.solver import Solver
class Board(GUIBase):
"""Screen Board
:param board: Sudoku board represent as two dimensional array
:type board: list
:param size: screen dimensions (pixels) (width, height)
:type size: tuple
:param screen: pygame screen
:type screen: pygame.Surface
"""
def __init__(self, size: tuple, board: list, screen: pygame.Surface):
super().__init__((size[1], size[1], size[0] - size[1]), screen)
self.__board = board
self.__solver = Solver(self)
# create squares list
self.__squares = [
[
Square(
self.__board[c][r],
(r, c),
(self.size[0], self.size[2]),
self.screen,
True if self.__board[c][r] == 0 else False,
)
for r in range(9)
]
for c in range(9)
]
self.__selected = None
self.__wrong = None
@property
def wrong(self):
"""wrong property (getter)"""
return self.__wrong
@property
def squares(self) -> list:
"""squares property (getter)"""
return self.__squares
def update_squares(self):
"""squares property (updatter)"""
# iterate over all squares
for r in range(9):
for c in range(9):
# update values
self.__squares[r][c].value = self.__board[r][c]
self.__squares[r][c].pencil = 0
@property
def board(self) -> list:
"""board property (getter)"""
return self.__board
@board.setter
def board(self, board: list):
"""board property (setter) & update squares
:param board: Sudoku board represent as two dimensional array
:type board: list
"""
# set new board
self.__board = board
# reinit squares
self.__squares = [
[
Square(
self.__board[c][r],
(r, c),
(self.size[0], self.size[2]),
self.screen,
True if self.__board[c][r] == 0 else False,
)
for r in range(9)
]
for c in range(9)
]
@property
def selected(self) -> tuple:
"""selected property (getter)"""
return self.__selected
@selected.setter
def selected(self, pos: tuple):
"""selected property (setter) & refresh squares
:param pos: selected square position (row, column)
:type pos: tuple
"""
if not self.__wrong:
# clear previous selection
if self.__selected != None:
self.__squares[self.__selected[0]][self.__selected[1]].selected = False
if pos:
# select new square
self.__selected = pos
self.__squares[self.__selected[0]][self.__selected[1]].selected = True
else:
# set selected to None if pos out of board
self.__selected = None
@property
def get_pencil(self) -> int:
"""selected square pencil (getter)"""
# get selected square
r, c = self.__selected
return self.__squares[r][c].pencil
def set_pencil(self, value: int):
"""set pencil value
:param value: pencil value
:type value: int
"""
# get selected square
r, c = self.__selected
if self.__squares[r][c].value == 0:
self.__squares[r][c].pencil = value
@property
def get_value(self) -> int:
"""selected square value (getter)"""
# get selected square
r, c = self.__selected
return self.__squares[r][c].value
def set_value(self) -> str:
"""set square value
:returns: board state ('s' -> success, 'w' -> wrong, 'c' -> unsolvable board)
:rtype: str
"""
# get selected square
r, c = self.__selected
if self.get_value == 0:
# chock for non-0 pencil value
pencil = self.get_pencil
if pencil != 0:
# check the number match Sudoku rules
w = self.__solver.exists(self.__board, pencil, (r, c))
if w:
# change squares state to wrong (red color)
self.__squares[r][c].wrong = True
self.__squares[w[0]][w[1]].wrong = True
self.__squares[r][c].value = pencil
self.__board[r][c] = pencil
self.__wrong = w
return "w"
else:
# change set square value and return true
self.__squares[r][c].value = pencil
self.__board[r][c] = pencil
# copy board
# init copy as two dimensional array with 9 rows
copy = [[] for r in range(9)]
# iterate over all rows
for r in range(9):
# iterate over all columns
for c in range(9):
# append the num
copy[r].append(self.__board[r][c])
# check if the board unsolvable
if not self.__solver.solve(copy):
return "c"
return "s"
@property
def clear(self):
"""clear selected square value"""
# get selected square
r, c = self.__selected
# clear square value and pencil
self.__squares[r][c].value = 0
self.__squares[r][c].pencil = 0
self.__board[r][c] = 0
# change wrong state
if self.__wrong:
self.__squares[r][c].wrong = False
self.__squares[self.__wrong[0]][self.__wrong[1]].wrong = False
self.__wrong = None
@property
def isfinished(self):
"""return true if there's no more empty squares else false
:returns: true if there's no more empty squares else false
:rtype: bool
"""
return not self.__solver.nextpos(self.board)
def set_sq_value(self, value: int, pos: tuple):
"""change square value by position
:param value: new square value
:type value: int
:param pos: square position
:type pos: tuple
"""
self.__squares[pos[0]][pos[1]].value = value
def draw(self):
"""Draw the board on the screen"""
# Draw squares
# iterate over all rows
for r in range(9):
# iterate over all columns
for c in range(9):
# draw square value
self.__squares[c][r].draw()
# Draw grid
# set space between squares
space = self.size[0] // 9
# drow 10 lines HvV
for r in range(10):
# set line weight (bold at the end of 3*3 area)
w = 4 if r % 3 == 0 and r != 0 else 1
# draw horizontal line (screen, (color), (start_pos), (end_pos), width)
pygame.draw.line(
self.screen,
(72, 234, 54),
(self.size[2], r * space),
(self.size[0] + self.size[2], r * space),
w,
)
# draw vertical line (screen, (color), (start_pos), (end_pos), width)
pygame.draw.line(
self.screen,
(72, 234, 54),
(r * space + self.size[2], 0),
(r * space + self.size[2], self.size[1]),
w,
)
class Square(GUIBase):
"""Board squeares
:param value: square display number
:type value: int
:param pos: square position (row, column)
:type pos: tuple
:param width: screen width and left gap (width, gap)
:type width: tuple
:param screen: pygame screen
:type screen: pygame.Surface
:param changeable: changeabllity
:type changeable: bool
"""
def __init__(
self,
value: int,
pos: tuple,
widthpos: tuple,
screen: pygame.Surface,
changeable: bool,
):
super().__init__(0, screen)
self.__value = value
self.__pos = pos
self.__widthpos = widthpos
self.__pencil = 0
self.__selected = False
self.__changeable = changeable
self.__wrong = False
@property
def changeable(self):
"""changeable property (getter)"""
return self.__changeable
@property
def selected(self) -> tuple:
"""selected property (getter)"""
return self.__selected
@selected.setter
def selected(self, v: bool):
"""selected property (setter)
:param v: selected value
:type v: bool
"""
self.__selected = v
@property
def value(self) -> int:
"""value property (getter)"""
return self.__value
@value.setter
def value(self, value: int):
"""value property (setter)
:param value: square value
:type value: int
"""
if self.__changeable:
self.__value = value
@property
def pencil(self) -> int:
"""pencil property (getter)"""
return self.__pencil
@pencil.setter
def pencil(self, value: int):
"""pencil property (setter)
:param value: pencil square value
:type value: int
"""
if self.__changeable:
self.__pencil = value
@property
def wrong(self):
"""wrong property (getter)"""
return self.__wrong
@wrong.setter
def wrong(self, w: bool):
"""wrong property (setter)
:param w: wrong value
:type w: bool
"""
self.__wrong = w
def draw(self):
"""Draw square value"""
# set space between squares
space = self.__widthpos[0] // 9
# set actuall square position on the screen
r, c = self.__pos[0] * space + self.__widthpos[1], self.__pos[1] * space
# fill unchangeable square background
if not self.__changeable:
sqsize = self.__widthpos[0] // 9
# draw rectangle (frame)
pygame.draw.rect(self.screen, (10, 30, 0), ((r, c), (sqsize, sqsize)))
# check for none 0's squares
if self.__value != 0:
font = pygame.font.Font("../assets/Rubik-font/Rubik-Regular.ttf", 38)
# set color
rgb = (72, 234, 54) if not self.__wrong else (234, 72, 54)
# create suface object
v = font.render(str(self.__value), 1, rgb)
# draw in on the screen
self.screen.blit(
v,
(
int(r + ((space / 2) - (v.get_width() / 2))),
int(c + ((space / 2) - (v.get_height() / 2))),
),
)
elif self.__pencil != 0:
font = pygame.font.Font("../assets/Rubik-font/Rubik-Regular.ttf", 30)
# create suface object
v = font.render(str(self.__pencil), 1, (2, 164, 0))
# draw in on the screen
self.screen.blit(
v,
(
int(r + ((space / 2) - (v.get_width() / 2)) - 20),
int(c + ((space / 2) - (v.get_height() / 2)) - 15),
),
)
# draw bold outline around selected square
if self.__selected:
# draw rectangle (frame)
pygame.draw.rect(self.screen, (52, 214, 34), ((r, c), (space, space)), 3)
|
[
"pygame.draw.rect",
"pygame.draw.line",
"solver.solver.Solver",
"pygame.font.Font"
] |
[((583, 595), 'solver.solver.Solver', 'Solver', (['self'], {}), '(self)\n', (589, 595), False, 'from solver.solver import Solver\n'), ((7266, 7387), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', '(72, 234, 54)', '(self.size[2], r * space)', '(self.size[0] + self.size[2], r * space)', 'w'], {}), '(self.screen, (72, 234, 54), (self.size[2], r * space), (\n self.size[0] + self.size[2], r * space), w)\n', (7282, 7387), False, 'import pygame\n'), ((7572, 7696), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', '(72, 234, 54)', '(r * space + self.size[2], 0)', '(r * space + self.size[2], self.size[1])', 'w'], {}), '(self.screen, (72, 234, 54), (r * space + self.size[2], 0),\n (r * space + self.size[2], self.size[1]), w)\n', (7588, 7696), False, 'import pygame\n'), ((10413, 10483), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen', '(10, 30, 0)', '((r, c), (sqsize, sqsize))'], {}), '(self.screen, (10, 30, 0), ((r, c), (sqsize, sqsize)))\n', (10429, 10483), False, 'import pygame\n'), ((10570, 10632), 'pygame.font.Font', 'pygame.font.Font', (['"""../assets/Rubik-font/Rubik-Regular.ttf"""', '(38)'], {}), "('../assets/Rubik-font/Rubik-Regular.ttf', 38)\n", (10586, 10632), False, 'import pygame\n'), ((11708, 11781), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen', '(52, 214, 34)', '((r, c), (space, space))', '(3)'], {}), '(self.screen, (52, 214, 34), ((r, c), (space, space)), 3)\n', (11724, 11781), False, 'import pygame\n'), ((11139, 11201), 'pygame.font.Font', 'pygame.font.Font', (['"""../assets/Rubik-font/Rubik-Regular.ttf"""', '(30)'], {}), "('../assets/Rubik-font/Rubik-Regular.ttf', 30)\n", (11155, 11201), False, 'import pygame\n')]
|
"""
Example headers
{
"X-Screenly-hostname": "srly-jmar75ko6xp651j",
"X-Screenly-screen-name": "dizzy cherry",
"X-Screenly-location-name": "Cape Town",
"X-Screenly-hardware": "x86",
"X-Screenly-version": "v2",
"X-Screenly-lat": "-33.925278",
"X-Screenly-lng": "18.423889",
"X-Screenly-tags": "srly-jmar75ko6xp651j,custom-label"
}"""
from os import environ
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route("/")
def render_metadata_headers():
return render_template(
"metadata_headers.html",
headers=request.headers,
apiKey=environ.get("GOOGLE_MAPS_API_KEY"),
)
if __name__ == "__main__":
app.run(host="0.0.0.0")
|
[
"os.environ.get",
"flask.Flask"
] |
[((448, 463), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (453, 463), False, 'from flask import Flask, render_template, request\n'), ((622, 656), 'os.environ.get', 'environ.get', (['"""GOOGLE_MAPS_API_KEY"""'], {}), "('GOOGLE_MAPS_API_KEY')\n", (633, 656), False, 'from os import environ\n')]
|
from django.core import mail
from django.test import TestCase, TransactionTestCase
from django.contrib.auth.models import User
from news.models import News
class NewsTest(TestCase):
def test_feed(self):
response = self.client.get('/feeds/news/')
self.assertEqual(response.status_code, 200)
def test_sitemap(self):
response = self.client.get('/sitemap-news.xml')
self.assertEqual(response.status_code, 200)
def test_news_sitemap(self):
response = self.client.get('/news-sitemap.xml')
self.assertEqual(response.status_code, 200)
def test_newsitem(self):
response = self.client.get('/news/404', follow=True)
self.assertEqual(response.status_code, 404)
class NewsCrud(TransactionTestCase):
def setUp(self):
password = '<PASSWORD>'
self.user = User.objects.create_superuser('admin',
'<EMAIL>',
password)
self.client.post('/login/', {
'username': self.user.username,
'password': password
})
def tearDown(self):
News.objects.all().delete()
self.user.delete()
def create(self, title='Bash broken', content='Broken in [testing]', announce=False):
data = {
'title': title,
'content': content,
}
if announce:
data['send_announce'] = 'on'
return self.client.post('/news/add/', data, follow=True)
def testCreateItem(self):
title = 'Bash broken'
response = self.create(title)
self.assertEqual(response.status_code, 200)
news = News.objects.first()
self.assertEqual(news.author, self.user)
self.assertEqual(news.title, title)
def testView(self):
self.create()
news = News.objects.first()
response = self.client.get(news.get_absolute_url())
self.assertEqual(response.status_code, 200)
def testRedirectId(self):
self.create()
news = News.objects.first()
response = self.client.get('/news/{}'.format(news.id), follow=True)
self.assertEqual(response.status_code, 200)
def testSendAnnounce(self):
title = 'New glibc'
self.create(title, announce=True)
self.assertEqual(len(mail.outbox), 1)
self.assertIn(title, mail.outbox[0].subject)
def testPreview(self):
response = self.client.post('/news/preview/', {'data': '**body**'}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual('<p><strong>body</strong></p>', response.content.decode())
|
[
"django.contrib.auth.models.User.objects.create_superuser",
"news.models.News.objects.first",
"news.models.News.objects.all"
] |
[((879, 938), 'django.contrib.auth.models.User.objects.create_superuser', 'User.objects.create_superuser', (['"""admin"""', '"""<EMAIL>"""', 'password'], {}), "('admin', '<EMAIL>', password)\n", (908, 938), False, 'from django.contrib.auth.models import User\n'), ((1799, 1819), 'news.models.News.objects.first', 'News.objects.first', ([], {}), '()\n', (1817, 1819), False, 'from news.models import News\n'), ((1981, 2001), 'news.models.News.objects.first', 'News.objects.first', ([], {}), '()\n', (1999, 2001), False, 'from news.models import News\n'), ((2190, 2210), 'news.models.News.objects.first', 'News.objects.first', ([], {}), '()\n', (2208, 2210), False, 'from news.models import News\n'), ((1255, 1273), 'news.models.News.objects.all', 'News.objects.all', ([], {}), '()\n', (1271, 1273), False, 'from news.models import News\n')]
|
import time
import bisect
import numpy as np
import pandas as pd
import networkx as nx
import scipy
import scipy.optimize
import scipy as sp
import os
import matplotlib.pyplot as plt
import random
from bayes_opt import BayesianOptimization
from bayes_opt.util import UtilityFunction, Colours
import asyncio
import threading
import json
import tornado.ioloop
import tornado.httpserver
from tornado.web import RequestHandler
import requests
from lib.priorityqueue import PriorityQueue
from lib.dynamics import DiseaseModel
from lib.mobilitysim import MobilitySimulator
from bayes_opt import BayesianOptimization
from lib.parallel import *
SIMPLIFIED_OPT = True
def format_opt_to_sim(opt_params, n_betas):
'''
Convert bayes_opt parameter format into our format
'''
if SIMPLIFIED_OPT:
return {
'betas' : [opt_params['beta'] for _ in range(n_betas)],
'alpha': opt_params['alpha'],
'mu': opt_params['mu']
}
else:
sim_params = {
'betas' : [None for _ in range(n_betas)],
'alpha': None,
'mu': None
}
for k, v, in opt_params.items():
if 'betas' in k:
sim_params['betas'][int(k[5:])] = v
else:
sim_params[k] = v
return sim_params
def format_sim_to_opt(sim_params):
'''
Convert our format into bayes opt format
'''
if SIMPLIFIED_OPT:
return {
'beta' : sim_params['betas'][0],
'alpha': sim_params['alpha'],
'mu': opt_params['mu']
}
else:
opt_params = {'betas' + str(i) : p for i, p in enumerate(sim_params['betas'])}
opt_params.update({
'alpha': sim_params['alpha'],
'mu': sim_params['mu']
})
return opt_params
def convert_timings_to_daily(timings, time_horizon):
'''
Converts batch of size N of timings of M individuals in a time horizon
of `time_horizon` in hours into daily aggregate cases
Argument:
timings : np.array of shape (N, M)
Argument:
timings : np.array of shape (N, T / 24)
'''
if len(timings.shape) == 1:
timings = np.expand_dims(timings, axis=0)
arr = np.array([
np.sum((timings >= t * 24) &
(timings < (t + 1) * 24), axis=1)
for t in range(0, int(time_horizon // 24))]).T
return arr
def convert_timings_to_cumulative_daily(timings, time_horizon):
'''
Converts batch of size N of timings of M individuals in a time horizon
of `time_horizon` in hours into daily cumulative aggregate cases
Argument:
timings : np.array of shape (N, M)
Argument:
timings : np.array of shape (N, T / 24)
'''
if len(timings.shape) == 1:
timings = np.expand_dims(timings, axis=0)
cumulative = np.array([
np.sum((timings < (t + 1) * 24), axis=1)
for t in range(0, int(time_horizon // 24))]).T
return cumulative
def loss_daily(predicted_confirmed_times, targets_daily, time_horizon, power=2.0):
'''
Daily loss:
total squared error between average predicted daily cases and true daily cases
'''
# predicted_confirmed_daily = convert_timings_to_daily(predicted_confirmed_times, time_horizon)
predicted_confirmed_daily = convert_timings_to_cumulative_daily(predicted_confirmed_times, time_horizon)
ave_predicted_confirmed_daily = predicted_confirmed_daily.mean(axis=0)
loss = np.power(np.abs(ave_predicted_confirmed_daily - targets_daily), power).mean()
return loss
def multimodal_loss_daily(preds, weights, targets, time_horizon, power=2.0):
'''
Multimodal Daily loss:
Same as loss_daily but considering several weighted metrics (e.g. positive, recovered, deceased)
'''
loss = 0
for w, pred, target in zip(weights, preds, targets):
# pred = convert_timings_to_daily(pred, time_horizon)
pred = convert_timings_to_cumulative_daily(pred, time_horizon)
ave_pred = pred.mean(axis=0)
loss += w * np.power(np.abs(ave_pred - target), power).mean()
return loss
def make_loss_function(mob_settings, distributions, targets, time_horizon, param_bounds,
initial_seeds, testing_params, random_repeats, num_site_types,
cpu_count, measure_list, loss, num_people, site_loc, home_loc, c, extra_params=None):
'''
Returns function executable by optimizer with desired loss
'''
with open(f'logger_{c}.txt', 'w+') as logfile:
logfile.write(f'Log run: seed = {c}\n\n')
def f(opt_params):
# convert bayes_opt parameter format into our format
sim_params = format_opt_to_sim(opt_params, n_betas=num_site_types)
# launch in parallel
summary = launch_parallel_simulations(
mob_settings=mob_settings,
distributions=distributions,
random_repeats=random_repeats,
cpu_count=cpu_count,
params=sim_params,
initial_seeds=initial_seeds,
testing_params=testing_params,
measure_list=measure_list,
max_time=time_horizon,
num_people=num_people,
site_loc=site_loc,
home_loc=home_loc,
verbose=False)
if loss == 'loss_daily':
return summary.state_started_at['posi']
elif loss == 'multimodal_loss_daily':
return (summary.state_started_at['posi'], summary.state_started_at['resi'], summary.state_started_at['dead'])
else:
raise ValueError('Unknown loss function')
if loss == 'loss_daily':
def loss_function(**kwargv):
predicted_confirmed_times = f(kwargv)
l = loss_daily(
predicted_confirmed_times=predicted_confirmed_times,
targets_daily=targets,
time_horizon=time_horizon,
power=2.0)
ave_pred = convert_timings_to_cumulative_daily(
predicted_confirmed_times, time_horizon).mean(axis=0)
loginfo = f'{-l} ' + str(kwargv) + '\n'
with open(f'logger_{c}.txt', 'a') as logfile:
logfile.write(loginfo)
# bayes_opt maximizes
return - l
return loss_function
elif loss == 'multimodal_loss_daily':
# here `extra_params` are weights
if extra_params:
weights = extra_params['weights']
else:
weights = np.ones(len(targets))
def loss_function(**kwargv):
preds = f(kwargv)
l = multimodal_loss_daily(
preds=preds, weights=weights, targets=targets,
time_horizon=time_horizon, power=2.0)
# bayes_opt maximizes
return - l
return loss_function
else:
raise ValueError('Unknown loss function')
|
[
"numpy.abs",
"numpy.sum",
"numpy.expand_dims"
] |
[((2226, 2257), 'numpy.expand_dims', 'np.expand_dims', (['timings'], {'axis': '(0)'}), '(timings, axis=0)\n', (2240, 2257), True, 'import numpy as np\n'), ((2841, 2872), 'numpy.expand_dims', 'np.expand_dims', (['timings'], {'axis': '(0)'}), '(timings, axis=0)\n', (2855, 2872), True, 'import numpy as np\n'), ((2288, 2350), 'numpy.sum', 'np.sum', (['((timings >= t * 24) & (timings < (t + 1) * 24))'], {'axis': '(1)'}), '((timings >= t * 24) & (timings < (t + 1) * 24), axis=1)\n', (2294, 2350), True, 'import numpy as np\n'), ((2910, 2948), 'numpy.sum', 'np.sum', (['(timings < (t + 1) * 24)'], {'axis': '(1)'}), '(timings < (t + 1) * 24, axis=1)\n', (2916, 2948), True, 'import numpy as np\n'), ((3538, 3591), 'numpy.abs', 'np.abs', (['(ave_predicted_confirmed_daily - targets_daily)'], {}), '(ave_predicted_confirmed_daily - targets_daily)\n', (3544, 3591), True, 'import numpy as np\n'), ((4120, 4145), 'numpy.abs', 'np.abs', (['(ave_pred - target)'], {}), '(ave_pred - target)\n', (4126, 4145), True, 'import numpy as np\n')]
|
#!/bin/env python
# coding=utf8
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
import base64
import requests
import json
import urllib
import time
import random
import datetime
import hashlib
# 获得响应头信息中的Content-Type域
def urlOpenGetHeaders(url):
req = urllib.request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36')
page = urllib.request.urlopen(req)
html = page.getheader('Content-Type')
return html
# 获得url的源码
def urlOpen(url):
req = urllib.request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36')
if False:
proxies = ['192.168.127.12:8123', '172.16.31.10:8123', '192.168.3.11:8118']
proxy = random.choice(proxies)
proxy_support = urllib.request.ProxyHandler({'http':proxy})
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [('User-Agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36')]
urllib.request.install_opener(opener)
page = urllib.request.urlopen(url)
else:
page = urllib.request.urlopen(req)
html = page.read()
return html
# 根据名字、后缀和url下载到本地文件夹
def download(title,post,url):
filename = title + "." +post
with open(filename, 'wb') as f:
music = urlOpen(url)
f.write(music)
def getPostStr(pKey, song_id):
rsaKey = RSA.importKey(pKey)
cipher = Cipher_pkcs1_v1_5.new(rsaKey)
encry = cipher.encrypt(song_id)
return base64.b64encode(encry)
# 获取歌曲的实际的url
def getSongRealUrl(songidVal,timeVal,md5Val):
url = 'http://www.aekun.com/api/getMusicbyid/'
r = requests.post(url, {
'songid': songidVal,
't':timeVal,
'sign':md5Val
})
return r.content
# 将需要的数据写入本地文件
def writeStrToFile(writeStr):
print(writeStr)
with open("downurl.txt","a",encoding="UTF-8") as f:
f.write(writeStr)
f.write("\n")
# 获取最新的推荐歌曲编号
def getMaxSongs():
url = "http://www.aekun.com/new/"
html = urlOpen(url).decode('utf-8')
a = html.find('<tr musicid=') + 13
b = html.find('"',a)
result = int(html[a:b])
return result
# 获取目前已经获得的最大曲目编号
def getNowSongId(songIdInt):
f = open("downurl.txt","r",encoding="UTF-8")
lines = f.readlines() #读取全部内容
for line in lines:
if line.find('|')!=-1:
line = line.split("|")
line = int(line[0])
if line > songIdInt:
songIdInt = line
return songIdInt
# 下载歌曲的主程序部分
def downloadMusicMain():
# 获取pKey
f = open('public.pem')
pKey = f.read()
f.close()
songIdInt = 3509719
songIdInt = getNowSongId(songIdInt)
songIdInt = songIdInt + 1
maxSong = getMaxSongs()
print("start from:%s,end with:%s"%(songIdInt,maxSong))
# 3505251 |10 |2015084685 |▌▌Chillout ▌▌Losing Ground Michael FK & Groundfold -----3505251.mp3
while(False):
if songIdInt > maxSong:
break
time.sleep(10)
try:
urlOpen("http://www.aekun.com/song/" + str(songIdInt))
except ConnectionResetError:
print("Error occur")
songId = str(songIdInt).encode('utf-8')
print(songId)
songidVal = getPostStr(pKey, songId)
songidVal = songidVal.decode('utf-8')
t = time.time()
t = int(round(t * 1000))
timeVal = getPostStr(pKey,str(t).encode('utf-8'))
timeVal = timeVal.decode('utf-8')
m2 = hashlib.md5()
src = str(songIdInt) + "|" + str(t)
m2.update(src.encode("utf8"))
t = m2.hexdigest()
md5Val = getPostStr(pKey,str(t).encode('utf-8'))
md5Val = md5Val.decode('utf-8')
try:
print(songidVal)
print(timeVal)
print(md5Val)
ret = getSongRealUrl(songidVal,timeVal,md5Val)
except (ConnectionError , ConnectionResetError):
print("ConnectionError")
time.sleep(3)
continue
ret = ret.decode('utf-8')
#ret = '{"state":"success","message":"ok","action":null,"data":{"url":"http://us.aekun.com/upload/75AAB77BC2D16123F9F2E8B6C68FCB8E.mp3","song_name":"就算遇到挫折、受到嘲笑,也要勇敢的向前跑!","coll":0,"singername":"小哥","singerpic":"https://m4.aekun.com/user_l_5973822_20170513135220.png"}}'
print(ret)
ret = json.loads(ret)
print(ret)
status = ret['state']
if status != 'success':
print(status)
break
downUrl = ret['data']
if isinstance(downUrl,str):
if downUrl.strip() == '':
html = urlOpen("http://www.aekun.com/song/" + str(songIdInt)).decode('utf-8')
songIdInt = songIdInt + 1
continue
elif isinstance(downUrl,dict):
pass
else:
continue
downUrl = ret['data']['url']
if downUrl is None:
continue
if downUrl.strip() == "":
continue
post = downUrl[-3:]
post = post.lower()
if post != 'mp3' and post != 'm4a':
tmp = urlOpenGetHeaders(downUrl)
if tmp.find('mp3') != -1:
post = 'mp3'
songName = ret['data']['song_name']
writeStr = "%-10s|%-50s|%-5s|%s"%(songIdInt,songName,post,downUrl)
writeStrToFile(writeStr)
songIdInt = songIdInt + 1
now = datetime.datetime.now()
now = now.strftime('%Y-%m-%d %H:%M:%S')
writeStrToFile(str(now) + '\t\t\t' + str(maxSong))
if __name__ == '__main__':
downloadMusicMain()
|
[
"urllib.request.ProxyHandler",
"hashlib.md5",
"urllib.request.Request",
"json.loads",
"urllib.request.urlopen",
"random.choice",
"urllib.request.build_opener",
"time.sleep",
"urllib.request.install_opener",
"time.time",
"base64.b64encode",
"Crypto.PublicKey.RSA.importKey",
"requests.post",
"datetime.datetime.now",
"Crypto.Cipher.PKCS1_v1_5.new"
] |
[((300, 327), 'urllib.request.Request', 'urllib.request.Request', (['url'], {}), '(url)\n', (322, 327), False, 'import urllib\n'), ((496, 523), 'urllib.request.urlopen', 'urllib.request.urlopen', (['req'], {}), '(req)\n', (518, 523), False, 'import urllib\n'), ((622, 649), 'urllib.request.Request', 'urllib.request.Request', (['url'], {}), '(url)\n', (644, 649), False, 'import urllib\n'), ((1650, 1669), 'Crypto.PublicKey.RSA.importKey', 'RSA.importKey', (['pKey'], {}), '(pKey)\n', (1663, 1669), False, 'from Crypto.PublicKey import RSA\n'), ((1683, 1712), 'Crypto.Cipher.PKCS1_v1_5.new', 'Cipher_pkcs1_v1_5.new', (['rsaKey'], {}), '(rsaKey)\n', (1704, 1712), True, 'from Crypto.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5\n'), ((1760, 1783), 'base64.b64encode', 'base64.b64encode', (['encry'], {}), '(encry)\n', (1776, 1783), False, 'import base64\n'), ((1904, 1975), 'requests.post', 'requests.post', (['url', "{'songid': songidVal, 't': timeVal, 'sign': md5Val}"], {}), "(url, {'songid': songidVal, 't': timeVal, 'sign': md5Val})\n", (1917, 1975), False, 'import requests\n'), ((5659, 5682), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5680, 5682), False, 'import datetime\n'), ((922, 944), 'random.choice', 'random.choice', (['proxies'], {}), '(proxies)\n', (935, 944), False, 'import random\n'), ((969, 1013), 'urllib.request.ProxyHandler', 'urllib.request.ProxyHandler', (["{'http': proxy}"], {}), "({'http': proxy})\n", (996, 1013), False, 'import urllib\n'), ((1030, 1072), 'urllib.request.build_opener', 'urllib.request.build_opener', (['proxy_support'], {}), '(proxy_support)\n', (1057, 1072), False, 'import urllib\n'), ((1249, 1286), 'urllib.request.install_opener', 'urllib.request.install_opener', (['opener'], {}), '(opener)\n', (1278, 1286), False, 'import urllib\n'), ((1302, 1329), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (1324, 1329), False, 'import urllib\n'), ((1355, 1382), 'urllib.request.urlopen', 'urllib.request.urlopen', (['req'], {}), '(req)\n', (1377, 1382), False, 'import urllib\n'), ((3242, 3256), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (3252, 3256), False, 'import time\n'), ((3581, 3592), 'time.time', 'time.time', ([], {}), '()\n', (3590, 3592), False, 'import time\n'), ((3740, 3753), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (3751, 3753), False, 'import hashlib\n'), ((4611, 4626), 'json.loads', 'json.loads', (['ret'], {}), '(ret)\n', (4621, 4626), False, 'import json\n'), ((4229, 4242), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (4239, 4242), False, 'import time\n')]
|
import os
import numpy as np
import pytest
from ananse.network import Network
from .test_02_utils import write_file
@pytest.fixture
def binding_fname():
return "tests/example_data/binding2.tsv"
@pytest.fixture
def network():
genome = "tests/data/genome.fa"
if not os.path.exists(genome):
write_file(genome, [">chr1", "N"])
return Network(genome=genome, gene_bed="ananse/db/hg38.genes.bed")
def test_unique_enhancer(network, binding_fname):
regions = network.unique_enhancers(binding_fname)
regions = regions.as_df()
assert regions.shape[0] == 6
assert sorted(list(regions["Chromosome"].unique())) == ["chr1", "chr10", "chr17"]
assert sorted(list(regions["Start"].unique())) == [7677184, 7687827]
def test_distance_weight(network):
dw = network.distance_weight(
include_promoter=True,
promoter_region=20,
full_weight_region=50,
maximum_distance=100,
alpha=5,
)
assert list(dw.columns) == ["weight", "dist"]
dw = dw.set_index("dist")
assert dw.loc[0, "weight"] == 1
assert dw.loc[25, "weight"] == 1
assert dw.loc[50, "weight"] == 1
assert dw.loc[51, "weight"] < 1
assert np.isclose(dw.loc[100, "weight"], 0, atol=1e-4)
assert dw.shape[0] == 101
dw = network.distance_weight(
include_promoter=False,
promoter_region=20,
full_weight_region=50,
maximum_distance=100,
alpha=5,
)
assert list(dw.columns) == ["weight", "dist"]
dw = dw.set_index("dist")
assert dw.loc[0, "weight"] == 0
assert dw.loc[20, "weight"] == 0
assert dw.loc[21, "weight"] == 1
assert dw.shape[0] == 101
|
[
"numpy.isclose",
"os.path.exists",
"ananse.network.Network"
] |
[((361, 420), 'ananse.network.Network', 'Network', ([], {'genome': 'genome', 'gene_bed': '"""ananse/db/hg38.genes.bed"""'}), "(genome=genome, gene_bed='ananse/db/hg38.genes.bed')\n", (368, 420), False, 'from ananse.network import Network\n'), ((1202, 1251), 'numpy.isclose', 'np.isclose', (["dw.loc[100, 'weight']", '(0)'], {'atol': '(0.0001)'}), "(dw.loc[100, 'weight'], 0, atol=0.0001)\n", (1212, 1251), True, 'import numpy as np\n'), ((282, 304), 'os.path.exists', 'os.path.exists', (['genome'], {}), '(genome)\n', (296, 304), False, 'import os\n')]
|
import setuptools
from configparser import ConfigParser
from pkg_resources import parse_version
from sys import platform
assert parse_version(setuptools.__version__) >= parse_version('36.2')
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
cfg = config['DEFAULT']
license_options = {
'apache2': (
'Apache Software License 2.0',
'OSI Approved :: Apache Software License'
),
'MIT': (
'MIT License',
'OSI Approved :: MIT License'
)
}
status_options = {
'1': 'Planning',
'2': 'Pre-Alpha',
'3': 'Alpha',
'4': 'Beta',
'5': 'Production/Stable',
'6': 'Mature',
'7': 'Inactive'
}
maximum_python3_available = 8
with open("requirements.txt") as requirements_file:
requirements = []
for line in requirements_file:
line = line.strip()
requirements.append(line)
setuptools.setup(
name=cfg["lib_name"],
license=license_options[cfg["license"]][0],
classifiers=[
f'Development Status :: {cfg["status"]} - {status_options[cfg["status"]]}',
f'Intended Audience :: {cfg["audience"]}',
f'License :: {license_options[cfg["license"]][1]}',
f'Natural Language :: {cfg["language"]}',
] + [
f'Programming Language :: Python :: 3.{i}' for i in range(
int(cfg["min_python"].split(".")[1]),
maximum_python3_available + 1
)
],
version=cfg["version"],
description=cfg["description"],
keywords=cfg["keywords"],
author=cfg["author"],
author_email=cfg["author_email"],
url=cfg["url"],
packages=setuptools.find_packages(),
# TODO: Modifying this should allow to remove the MAINFEST.in
include_package_data=True,
install_requires=requirements,
python_requires=f'>={cfg["min_python"]},<{cfg["max_python"]}',
zip_safe=False,
)
|
[
"pkg_resources.parse_version",
"configparser.ConfigParser",
"setuptools.find_packages"
] |
[((201, 231), 'configparser.ConfigParser', 'ConfigParser', ([], {'delimiters': "['=']"}), "(delimiters=['='])\n", (213, 231), False, 'from configparser import ConfigParser\n'), ((128, 165), 'pkg_resources.parse_version', 'parse_version', (['setuptools.__version__'], {}), '(setuptools.__version__)\n', (141, 165), False, 'from pkg_resources import parse_version\n'), ((169, 190), 'pkg_resources.parse_version', 'parse_version', (['"""36.2"""'], {}), "('36.2')\n", (182, 190), False, 'from pkg_resources import parse_version\n'), ((1603, 1629), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (1627, 1629), False, 'import setuptools\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-06-28 20:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('providers', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Nombre')),
('img', models.ImageField(upload_to='uploads/category/', verbose_name='Imagen')),
],
options={
'ordering': ['name'],
'verbose_name': 'Categoria',
'verbose_name_plural': 'Categorias',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Nombre del producto ')),
('description', models.TextField(blank=True, null=True, verbose_name='Descripci\xf3n del producto ')),
('img', models.ImageField(upload_to='uploads/products/', verbose_name='Imagen del producto ')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Category', verbose_name='Categoria')),
('provider', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='providers.Provider', verbose_name='Marca o Retail')),
],
options={
'ordering': ['name'],
'verbose_name': 'Producto',
'verbose_name_plural': 'Productos',
},
),
]
|
[
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.ImageField"
] |
[((440, 533), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (456, 533), False, 'from django.db import migrations, models\n'), ((557, 612), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Nombre"""'}), "(max_length=200, verbose_name='Nombre')\n", (573, 612), False, 'from django.db import migrations, models\n'), ((639, 710), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""uploads/category/"""', 'verbose_name': '"""Imagen"""'}), "(upload_to='uploads/category/', verbose_name='Imagen')\n", (656, 710), False, 'from django.db import migrations, models\n'), ((1016, 1109), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1032, 1109), False, 'from django.db import migrations, models\n'), ((1133, 1202), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Nombre del producto """'}), "(max_length=200, verbose_name='Nombre del producto ')\n", (1149, 1202), False, 'from django.db import migrations, models\n'), ((1237, 1323), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Descripción del producto """'}), "(blank=True, null=True, verbose_name=\n 'Descripción del producto ')\n", (1253, 1323), False, 'from django.db import migrations, models\n'), ((1348, 1438), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""uploads/products/"""', 'verbose_name': '"""Imagen del producto """'}), "(upload_to='uploads/products/', verbose_name=\n 'Imagen del producto ')\n", (1365, 1438), False, 'from django.db import migrations, models\n'), ((1465, 1582), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""products.Category"""', 'verbose_name': '"""Categoria"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'products.Category', verbose_name='Categoria')\n", (1482, 1582), False, 'from django.db import migrations, models\n'), ((1609, 1732), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""providers.Provider"""', 'verbose_name': '"""Marca o Retail"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'providers.Provider', verbose_name='Marca o Retail')\n", (1626, 1732), False, 'from django.db import migrations, models\n')]
|
import matplotlib.pyplot as plt
from matplotlib.text import Text
class DragHandler(object):
# NOTE: DOES NOT HANDLE TEXT WITH ARBITRARY TRANSFORMS!!!!
"""
A simple class to handle Drag n Drop.
This is a simple example, which works for Text objects only
"""
def __init__(self, figure=None):
""" Create a new drag handler and connect it to the figure's event system.
If the figure handler is not given, the current figure is used instead
"""
if figure is None:
figure = plt.gcf()
# simple attibute to store the dragged text object
self.dragged = None
# Connect events and callbacks
figure.canvas.mpl_connect("pick_event", self.on_pick_event)
figure.canvas.mpl_connect("button_release_event", self.on_release_event)
def on_pick_event(self, event):
" Store which text object was picked and were the pick event occurs."
if isinstance(event.artist, Text):
self.dragged = event.artist
self.pick_pos = (event.mouseevent.xdata, event.mouseevent.ydata)
return True
def on_release_event(self, event):
" Update text position and redraw"
if self.dragged is not None:
old_pos = self.dragged.get_position()
new_pos = (old_pos[0] + event.xdata - self.pick_pos[0],
old_pos[1] + event.ydata - self.pick_pos[1])
self.dragged.set_position(new_pos)
self.dragged = None
plt.draw()
return True
|
[
"matplotlib.pyplot.draw",
"matplotlib.pyplot.gcf"
] |
[((541, 550), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (548, 550), True, 'import matplotlib.pyplot as plt\n'), ((1521, 1531), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1529, 1531), True, 'import matplotlib.pyplot as plt\n')]
|
import yaml
import argparse
from attrdict import AttrDict
from matplotlib import pyplot as plt
import torch
from torch.autograd import Variable
from models.generator import Generator
def test(params):
G = Generator(params.network.generator)
if params.restore.G:
G.load_state_dict(torch.load(params.restore.G))
gen_input = \
Variable(torch.FloatTensor(
1, params.network.generator.z_size,
1, 1
).normal_(0, 1))
torch_cat = G(gen_input)
np_cat = torch_cat.data.numpy()[0] / 2.0 + 0.5
np_cat = np_cat.transpose((1, 2, 0))
fig = plt.gcf()
fig.canvas.set_window_title('Random cat')
plt.imshow(np_cat)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='GAN testing script'
)
parser.add_argument('--conf', '-c', required=True,
help='a path to the configuration file')
args = parser.parse_args()
with open(args.conf, 'r') as stream:
try:
args = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
test(AttrDict(args))
|
[
"yaml.load",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.imshow",
"torch.load",
"torch.FloatTensor",
"models.generator.Generator",
"matplotlib.pyplot.gcf",
"attrdict.AttrDict"
] |
[((214, 249), 'models.generator.Generator', 'Generator', (['params.network.generator'], {}), '(params.network.generator)\n', (223, 249), False, 'from models.generator import Generator\n'), ((610, 619), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (617, 619), True, 'from matplotlib import pyplot as plt\n'), ((671, 689), 'matplotlib.pyplot.imshow', 'plt.imshow', (['np_cat'], {}), '(np_cat)\n', (681, 689), True, 'from matplotlib import pyplot as plt\n'), ((694, 704), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (702, 704), True, 'from matplotlib import pyplot as plt\n'), ((747, 804), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""GAN testing script"""'}), "(description='GAN testing script')\n", (770, 804), False, 'import argparse\n'), ((1133, 1147), 'attrdict.AttrDict', 'AttrDict', (['args'], {}), '(args)\n', (1141, 1147), False, 'from attrdict import AttrDict\n'), ((302, 330), 'torch.load', 'torch.load', (['params.restore.G'], {}), '(params.restore.G)\n', (312, 330), False, 'import torch\n'), ((1044, 1061), 'yaml.load', 'yaml.load', (['stream'], {}), '(stream)\n', (1053, 1061), False, 'import yaml\n'), ((368, 427), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', 'params.network.generator.z_size', '(1)', '(1)'], {}), '(1, params.network.generator.z_size, 1, 1)\n', (385, 427), False, 'import torch\n')]
|
"""mcpython - a minecraft clone written in python licenced under MIT-licence
authors: uuk, xkcdjerry
original game by forgleman licenced under MIT-licence
minecraft by Mojang
blocks based on 1.14.4.jar of minecraft, downloaded on 20th of July, 2019"""
from world.gen.layer.Layer import Layer, LayerConfig
import globals as G
import random
import opensimplex
import world.Chunk
@G.worldgenerationhandler
class DefaultLandMassLayer(Layer):
noise1 = opensimplex.OpenSimplex(seed=random.randint(-10000, 10000))
noise2 = opensimplex.OpenSimplex(seed=random.randint(-10000, 10000))
noise3 = opensimplex.OpenSimplex(seed=random.randint(-10000, 10000))
@staticmethod
def normalize_config(config: LayerConfig):
if not hasattr(config, "masses"):
config.masses = ["land"]
# todo: add underwaterbiomes
if not hasattr(config, "size"):
config.size = 1
@staticmethod
def get_name() -> str:
return "landmass_default"
@staticmethod
def add_generate_functions_to_chunk(config: LayerConfig, chunk):
chunk.chunkgenerationtasks.append([DefaultLandMassLayer.generate_landmass, [chunk, config], {}])
@staticmethod
def generate_landmass(chunk, config):
cx, cz = chunk.position
landmap = chunk.get_value("landmassmap")
factor = 10**config.size
for x in range(cx*16, cx*16+16):
for z in range(cz*16, cz*16+16):
v = sum([DefaultLandMassLayer.noise1.noise2d(x/factor, z/factor) * 0.5 + 0.5,
DefaultLandMassLayer.noise2.noise2d(x/factor, z/factor) * 0.5 + 0.5,
DefaultLandMassLayer.noise3.noise2d(x/factor, z/factor) * 0.5 + 0.5]) / 3
v *= len(config.masses)
v = round(v)
if v == len(config.masses):
v = 0
landmap[(x, z)] = config.masses[v]
"""
if v < 0:
chunk.add_add_block_gen_task((x, 5, z), "minecraft:stone")
else:
chunk.add_add_block_gen_task((x, 5, z), "minecraft:dirt")
"""
authcode = world.Chunk.Chunk.add_default_attribute("landmassmap", DefaultLandMassLayer, {})
|
[
"random.randint"
] |
[((485, 514), 'random.randint', 'random.randint', (['(-10000)', '(10000)'], {}), '(-10000, 10000)\n', (499, 514), False, 'import random\n'), ((558, 587), 'random.randint', 'random.randint', (['(-10000)', '(10000)'], {}), '(-10000, 10000)\n', (572, 587), False, 'import random\n'), ((631, 660), 'random.randint', 'random.randint', (['(-10000)', '(10000)'], {}), '(-10000, 10000)\n', (645, 660), False, 'import random\n')]
|
from functools import partial
from keras.optimizers import SGD
from fire import Fire
from src.dataset import KaggleDataset, PseudoDataset, ExtraDataset, DataCollection
from src.model import get_model, get_callbacks
from src.aug import augment
from src.utils import logger
def fit_once(model, model_name, loss, train, val, stage, n_fold, start_epoch, initial=False):
logger.info(f'Stage {stage} started: loss {loss}, fold {n_fold}')
steps_per_epoch = 500
validation_steps = 100
model.compile(optimizer=SGD(lr=0.01 if initial else 0.001, clipvalue=4, momentum=.9, nesterov=True),
loss=loss,
metrics=['accuracy'])
history = model.fit_generator(train,
epochs=500,
steps_per_epoch=steps_per_epoch,
validation_data=val,
workers=8,
max_queue_size=32,
use_multiprocessing=False,
validation_steps=validation_steps,
callbacks=get_callbacks(model_name, loss, stage, n_fold),
initial_epoch=start_epoch,
)
return model, max(history.epoch)
def fit_model(model_name, batch_size=16, n_fold=1, shape=384):
n_classes = 10
aug = partial(augment, expected_shape=shape)
n_fold = int(n_fold)
batch_size = int(batch_size)
model, preprocess = get_model(model_name, shape, n_classes=n_classes)
def make_config(**kwargs):
d = {'n_fold': int(n_fold),
'transform': preprocess,
'batch_size': batch_size,
'train': True,
'size': shape,
'aug': aug,
'center_crop_size': 0}
d.update(kwargs)
return d
kaggle_train = KaggleDataset(**make_config())
kaggle_val = KaggleDataset(**make_config(train=False))
pseudo_train = PseudoDataset(**make_config())
pseudo_val = PseudoDataset(**make_config(train=False))
extra_train = ExtraDataset(**make_config())
extra_val = ExtraDataset(**make_config(train=False))
frozen_epochs = 1
steps_per_epoch = 500
validation_steps = 50
loss = 'categorical_crossentropy'
model.compile(optimizer='adam', loss=loss, metrics=['accuracy'])
model.fit_generator(DataCollection(kaggle_train, extra_train, pseudo_train),
epochs=frozen_epochs,
steps_per_epoch=steps_per_epoch,
validation_data=DataCollection(kaggle_val, extra_val, pseudo_val),
workers=8,
validation_steps=validation_steps,
use_multiprocessing=False,
max_queue_size=50,
)
for layer in model.layers:
layer.trainable = True
epoch = frozen_epochs
for stage, (train, val) in enumerate(((DataCollection(kaggle_train, extra_train, pseudo_train),
DataCollection(kaggle_val, extra_val, pseudo_val)),
(DataCollection(kaggle_train, pseudo_train),
DataCollection(kaggle_val, pseudo_val)),
(DataCollection(pseudo_train), DataCollection(pseudo_val)),
)):
model, epoch = fit_once(model=model,
model_name=model_name,
loss='categorical_crossentropy',
train=train,
val=val,
start_epoch=epoch,
stage=stage,
n_fold=n_fold,
initial=True if stage > 0 else False
)
if __name__ == '__main__':
Fire(fit_model)
|
[
"src.utils.logger.info",
"functools.partial",
"fire.Fire",
"keras.optimizers.SGD",
"src.model.get_callbacks",
"src.dataset.DataCollection",
"src.model.get_model"
] |
[((374, 439), 'src.utils.logger.info', 'logger.info', (['f"""Stage {stage} started: loss {loss}, fold {n_fold}"""'], {}), "(f'Stage {stage} started: loss {loss}, fold {n_fold}')\n", (385, 439), False, 'from src.utils import logger\n'), ((1425, 1463), 'functools.partial', 'partial', (['augment'], {'expected_shape': 'shape'}), '(augment, expected_shape=shape)\n', (1432, 1463), False, 'from functools import partial\n'), ((1547, 1596), 'src.model.get_model', 'get_model', (['model_name', 'shape'], {'n_classes': 'n_classes'}), '(model_name, shape, n_classes=n_classes)\n', (1556, 1596), False, 'from src.model import get_model, get_callbacks\n'), ((4032, 4047), 'fire.Fire', 'Fire', (['fit_model'], {}), '(fit_model)\n', (4036, 4047), False, 'from fire import Fire\n'), ((2433, 2488), 'src.dataset.DataCollection', 'DataCollection', (['kaggle_train', 'extra_train', 'pseudo_train'], {}), '(kaggle_train, extra_train, pseudo_train)\n', (2447, 2488), False, 'from src.dataset import KaggleDataset, PseudoDataset, ExtraDataset, DataCollection\n'), ((522, 598), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01 if initial else 0.001)', 'clipvalue': '(4)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01 if initial else 0.001, clipvalue=4, momentum=0.9, nesterov=True)\n', (525, 598), False, 'from keras.optimizers import SGD\n'), ((1149, 1195), 'src.model.get_callbacks', 'get_callbacks', (['model_name', 'loss', 'stage', 'n_fold'], {}), '(model_name, loss, stage, n_fold)\n', (1162, 1195), False, 'from src.model import get_model, get_callbacks\n'), ((2633, 2682), 'src.dataset.DataCollection', 'DataCollection', (['kaggle_val', 'extra_val', 'pseudo_val'], {}), '(kaggle_val, extra_val, pseudo_val)\n', (2647, 2682), False, 'from src.dataset import KaggleDataset, PseudoDataset, ExtraDataset, DataCollection\n'), ((3031, 3086), 'src.dataset.DataCollection', 'DataCollection', (['kaggle_train', 'extra_train', 'pseudo_train'], {}), '(kaggle_train, extra_train, pseudo_train)\n', (3045, 3086), False, 'from src.dataset import KaggleDataset, PseudoDataset, ExtraDataset, DataCollection\n'), ((3131, 3180), 'src.dataset.DataCollection', 'DataCollection', (['kaggle_val', 'extra_val', 'pseudo_val'], {}), '(kaggle_val, extra_val, pseudo_val)\n', (3145, 3180), False, 'from src.dataset import KaggleDataset, PseudoDataset, ExtraDataset, DataCollection\n'), ((3226, 3268), 'src.dataset.DataCollection', 'DataCollection', (['kaggle_train', 'pseudo_train'], {}), '(kaggle_train, pseudo_train)\n', (3240, 3268), False, 'from src.dataset import KaggleDataset, PseudoDataset, ExtraDataset, DataCollection\n'), ((3313, 3351), 'src.dataset.DataCollection', 'DataCollection', (['kaggle_val', 'pseudo_val'], {}), '(kaggle_val, pseudo_val)\n', (3327, 3351), False, 'from src.dataset import KaggleDataset, PseudoDataset, ExtraDataset, DataCollection\n'), ((3397, 3425), 'src.dataset.DataCollection', 'DataCollection', (['pseudo_train'], {}), '(pseudo_train)\n', (3411, 3425), False, 'from src.dataset import KaggleDataset, PseudoDataset, ExtraDataset, DataCollection\n'), ((3427, 3453), 'src.dataset.DataCollection', 'DataCollection', (['pseudo_val'], {}), '(pseudo_val)\n', (3441, 3453), False, 'from src.dataset import KaggleDataset, PseudoDataset, ExtraDataset, DataCollection\n')]
|
##############################################################################
#
# Copyright (c) 2004, 2005 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Size adapters for testing
$Id: test_size.py 61072 2005-10-31 17:43:51Z philikon $
"""
import os, sys
if __name__ == '__main__':
execfile(os.path.join(sys.path[0], 'framework.py'))
from zope.interface import implements
from zope.app.size.interfaces import ISized
def test_emailmessage():
"""
Test searching
Set up:
>>> from zope.app.testing.placelesssetup import setUp, tearDown
>>> setUp()
>>> import Products.Five
>>> import Products.XWFMailingListManager
>>> from Products.GSSearch import queries
>>> from Products.Five import zcml
>>> from Products.ZSQLAlchemy.ZSQLAlchemy import manage_addZSQLAlchemy
>>> zcml.load_config('meta.zcml', Products.Five)
>>> zcml.load_config('permissions.zcml', Products.Five)
>>> zcml.load_config('configure.zcml', Products.XWFMailingListManager)
>>> alchemy_adaptor = manage_addZSQLAlchemy(app, 'zalchemy')
>>> alchemy_adaptor.manage_changeProperties( hostname='localhost',
... port=5433,
... username='onlinegroups',
... password='',
... dbtype='postgres',
... database='onlinegroups.net')
>>> mq = queries.MessageQuery( {}, alchemy_adaptor )
>>> from zope.component import createObject
Clean up:
>>> tearDown()
"""
def test_suite():
from Testing.ZopeTestCase import ZopeDocTestSuite
return ZopeDocTestSuite()
if __name__ == '__main__':
framework()
|
[
"Testing.ZopeTestCase.ZopeDocTestSuite",
"os.path.join"
] |
[((2279, 2297), 'Testing.ZopeTestCase.ZopeDocTestSuite', 'ZopeDocTestSuite', ([], {}), '()\n', (2295, 2297), False, 'from Testing.ZopeTestCase import ZopeDocTestSuite\n'), ((788, 829), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""framework.py"""'], {}), "(sys.path[0], 'framework.py')\n", (800, 829), False, 'import os, sys\n')]
|
from paver.easy import *
import os
DLLS = ['h5py_hdf5.dll', 'h5py_hdf5_hl.dll', 'szip.dll', 'zlib.dll']
@task
def release_unix():
sh('python setup.py clean')
sh('python setup.py configure --reset --hdf5-version=1.8.4')
sh('python setup.py build -f')
sh('python setup.py test')
sh('python setup.py sdist')
print("Unix release done. Distribution tar file is in dist/")
@task
def release_windows():
for pyver in (27, 34):
exe = r'C:\Python%d\Python.exe' % pyver
hdf5 = r'c:\hdf5\Python%d' % pyver
sh('%s setup.py clean' % exe)
sh('%s setup.py configure --reset --hdf5-version=1.8.13 --hdf5=%s' % (exe, hdf5))
for dll in DLLS:
sh('copy c:\\hdf5\\Python%d\\bin\\%s h5py /Y' % (pyver, dll))
sh('%s setup.py build -f' % exe)
sh('%s setup.py test' % exe)
sh('%s setup.py bdist_wininst' % exe)
print ("Windows exe release done. Distribution files are in dist/")
for dll in DLLS:
os.unlink('h5py\\%s' % dll)
@task
@consume_args
def git_summary(options):
sh('git log --no-merges --pretty=oneline --abbrev-commit %s..HEAD'%options.args[0])
sh('git shortlog -s -n %s..HEAD'%options.args[0])
|
[
"os.unlink"
] |
[((995, 1022), 'os.unlink', 'os.unlink', (["('h5py\\\\%s' % dll)"], {}), "('h5py\\\\%s' % dll)\n", (1004, 1022), False, 'import os\n')]
|
"""Miscellaneous utility functions."""
from functools import reduce
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
import spacy
import re
import cv2
import time
from keras_bert.tokenizer import Tokenizer
from keras_bert.loader import load_trained_model_from_checkpoint, load_vocabulary
from keras_bert import extract_embeddings
import os
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
def get_bert_input(text,vocabs,max_len=512):
tokenizer = Tokenizer(vocabs, cased=False)
token=[]
segment=[]
token, segment = tokenizer.encode(text, max_len=max_len)
token.append(token)
segment.append(segment)
token.extend([0] * (max_len - len(token)))
segment.extend([0] * (max_len - len(token)))
return [token,segment]
def seq_to_list(s):
'''
note: 2018.10.3
use for process sentences
'''
t_str = s.lower()
for i in [r'\?', r'\!', r'\'', r'\"', r'\$', r'\:', r'\@', r'\(', r'\)', r'\,', r'\.', r'\;', r'\n']:
t_str = re.sub(i, '', t_str)
for i in [r'\-', r'\/']:
t_str = re.sub(i, ' ', t_str)
q_list = re.sub(r'\?', '', t_str.lower()).split(' ')
q_list = list(filter(lambda x: len(x) > 0, q_list))
return q_list
def qlist_to_vec(max_length, q_list,embed):
'''
note: 2018.10.3
use for process sentences
'''
glove_matrix = []
glove_dict = {}
q_len = len(q_list)
if q_len > max_length:
q_len = max_length
for i in range(max_length):
if i < q_len:
w=q_list[i]
if w not in glove_dict:
glove_dict[w]=embed(u'%s'%w).vector
glove_matrix.append(glove_dict[w])
else:
glove_matrix.append(np.zeros(300,dtype=float))
return np.array(glove_matrix)
def get_random_data(annotation_line, input_shape,embed,config, train_mode=True, max_boxes=1):
'''random preprocessing for real-time data augmentation'''
SEG_DIR=config['seg_gt_path']
line = annotation_line.split()
h, w = input_shape
stop=len(line)
for i in range(1,len(line)):
if (line[i]=='~'):
stop=i
break
# print(line[1:stop])
box_ = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:stop]])
box=np.zeros([1,5])
seg_id=box_[0][-1]
box[0]=box_[0][:-1]
seg_map=np.load(os.path.join(SEG_DIR,str(seg_id)+'.npy'))
seg_map_ori=np.array(seg_map).astype(np.float32)
seg_map=Image.fromarray(seg_map_ori)
# print(np.shape(box))
# print(box)
#####################################
#sentence process maxlength set to 20 and random choose one for train
sentences=[]
sent_stop=stop+1
for i in range(stop+1,len(line)):
if line[i]=='~':
sentences.append(line[sent_stop:i])
sent_stop=i+1
sentences.append(line[sent_stop:len(line)])
choose_index=np.random.choice(len(sentences))
sentence=sentences[choose_index]
# print(qlist)
if config['use_bert']:
vocabs = load_vocabulary(config['bert_path']+'/vocab.txt')
word_vec=get_bert_input(sentence,vocabs,512)
else:
word_vec=qlist_to_vec(config['word_len'], sentence,embed)
# print(word_vec)
# print(np.shape(word_vec))
#######################################
image = Image.open(os.path.join(config['image_path'],line[0]))
iw, ih = image.size
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
dx = (w - nw) // 2
dy = (h - nh) // 2
ori_image = image
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image) / 255.
seg_map = seg_map.resize((nw, nh))
new_map = Image.new('L', (w, h), (0))
new_map.paste(seg_map, (dx, dy))
seg_map_data = np.array(new_map)
seg_map_data = cv2.resize(seg_map_data, (
seg_map_data.shape[0] // config['seg_out_stride'], seg_map_data.shape[0] // config['seg_out_stride']),interpolation=cv2.INTER_NEAREST)
seg_map_data = np.reshape(seg_map_data, [np.shape(seg_map_data)[0], np.shape(seg_map_data)[1], 1])
# print(new_image.size)
# correct boxes
box_data = np.zeros((max_boxes, 5))
if len(box) > 0:
if len(box) > max_boxes: box = box[:max_boxes]
box[:, [0, 2]] = box[:, [0, 2]] * scale + dx
box[:, [1, 3]] = box[:, [1, 3]] * scale + dy
box_data[:len(box)] = box
box_data = box_data[:, 0:4] #delete classfy
if not train_mode:
word_vec=[qlist_to_vec(config['word_len'], sent,embed) for sent in sentences]
return image_data, box_data,word_vec,ori_image,sentences,np.expand_dims(seg_map_ori ,-1)
return image_data, box_data,word_vec,seg_map_data
def lr_step_decay(lr_start=0.001, steps=[30, 40]):
def get_lr(epoch):
decay_rate = len(steps)
for i, e in enumerate(steps):
if epoch < e:
decay_rate = i
break
lr = lr_start / (10 ** (decay_rate))
return lr
return get_lr
#powre decay
def lr_power_decay(lr_start=2.5e-4,lr_power=0.9, warm_up_lr=0.,step_all=45*1414,warm_up_step=1000):
# step_per_epoch=3286
def warm_up(base_lr, lr, cur_step, end_step):
return base_lr + (lr - base_lr) * cur_step / end_step
def get_learningrate(epoch):
if epoch<warm_up_step:
lr = warm_up(warm_up_lr, lr_start, epoch, warm_up_step)
else:
lr = lr_start * ((1 - float(epoch-warm_up_step) / (step_all-warm_up_step)) ** lr_power)
return lr
# print("learning rate is", lr)
return get_learningrate
|
[
"PIL.Image.new",
"os.path.join",
"numpy.random.rand",
"numpy.zeros",
"numpy.expand_dims",
"numpy.shape",
"numpy.array",
"keras_bert.loader.load_vocabulary",
"re.sub",
"PIL.Image.fromarray",
"keras_bert.tokenizer.Tokenizer",
"cv2.resize"
] |
[((1069, 1108), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'size', '(128, 128, 128)'], {}), "('RGB', size, (128, 128, 128))\n", (1078, 1108), False, 'from PIL import Image\n'), ((1302, 1332), 'keras_bert.tokenizer.Tokenizer', 'Tokenizer', (['vocabs'], {'cased': '(False)'}), '(vocabs, cased=False)\n', (1311, 1332), False, 'from keras_bert.tokenizer import Tokenizer\n'), ((2576, 2598), 'numpy.array', 'np.array', (['glove_matrix'], {}), '(glove_matrix)\n', (2584, 2598), True, 'import numpy as np\n'), ((3085, 3101), 'numpy.zeros', 'np.zeros', (['[1, 5]'], {}), '([1, 5])\n', (3093, 3101), True, 'import numpy as np\n'), ((3275, 3303), 'PIL.Image.fromarray', 'Image.fromarray', (['seg_map_ori'], {}), '(seg_map_ori)\n', (3290, 3303), False, 'from PIL import Image\n'), ((4426, 4467), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (4435, 4467), False, 'from PIL import Image\n'), ((4603, 4628), 'PIL.Image.new', 'Image.new', (['"""L"""', '(w, h)', '(0)'], {}), "('L', (w, h), 0)\n", (4612, 4628), False, 'from PIL import Image\n'), ((4687, 4704), 'numpy.array', 'np.array', (['new_map'], {}), '(new_map)\n', (4695, 4704), True, 'import numpy as np\n'), ((4724, 4894), 'cv2.resize', 'cv2.resize', (['seg_map_data', "(seg_map_data.shape[0] // config['seg_out_stride'], seg_map_data.shape[0] //\n config['seg_out_stride'])"], {'interpolation': 'cv2.INTER_NEAREST'}), "(seg_map_data, (seg_map_data.shape[0] // config['seg_out_stride'],\n seg_map_data.shape[0] // config['seg_out_stride']), interpolation=cv2.\n INTER_NEAREST)\n", (4734, 4894), False, 'import cv2\n'), ((5061, 5085), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (5069, 5085), True, 'import numpy as np\n'), ((1829, 1849), 're.sub', 're.sub', (['i', '""""""', 't_str'], {}), "(i, '', t_str)\n", (1835, 1849), False, 'import re\n'), ((1895, 1916), 're.sub', 're.sub', (['i', '""" """', 't_str'], {}), "(i, ' ', t_str)\n", (1901, 1916), False, 'import re\n'), ((3839, 3890), 'keras_bert.loader.load_vocabulary', 'load_vocabulary', (["(config['bert_path'] + '/vocab.txt')"], {}), "(config['bert_path'] + '/vocab.txt')\n", (3854, 3890), False, 'from keras_bert.loader import load_trained_model_from_checkpoint, load_vocabulary\n'), ((4139, 4182), 'os.path.join', 'os.path.join', (["config['image_path']", 'line[0]'], {}), "(config['image_path'], line[0])\n", (4151, 4182), False, 'import os\n'), ((4522, 4541), 'numpy.array', 'np.array', (['new_image'], {}), '(new_image)\n', (4530, 4541), True, 'import numpy as np\n'), ((1211, 1227), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1225, 1227), True, 'import numpy as np\n'), ((3226, 3243), 'numpy.array', 'np.array', (['seg_map'], {}), '(seg_map)\n', (3234, 3243), True, 'import numpy as np\n'), ((5526, 5557), 'numpy.expand_dims', 'np.expand_dims', (['seg_map_ori', '(-1)'], {}), '(seg_map_ori, -1)\n', (5540, 5557), True, 'import numpy as np\n'), ((2538, 2564), 'numpy.zeros', 'np.zeros', (['(300)'], {'dtype': 'float'}), '(300, dtype=float)\n', (2546, 2564), True, 'import numpy as np\n'), ((4935, 4957), 'numpy.shape', 'np.shape', (['seg_map_data'], {}), '(seg_map_data)\n', (4943, 4957), True, 'import numpy as np\n'), ((4962, 4984), 'numpy.shape', 'np.shape', (['seg_map_data'], {}), '(seg_map_data)\n', (4970, 4984), True, 'import numpy as np\n')]
|
#!env/bin/python3
import datetime
import sched
import pandas as pd
import plotly.graph_objs as go
import plotly.plotly as py
import requests
CSV_FILE = 'OholPlayersByServer.csv'
def process_current_player_counts():
data = fetch()
write(data, CSV_FILE)
draw(CSV_FILE)
def fetch():
timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
response = requests.get('http://onehouronelife.com/reflector/server.php?action=report')
response.raise_for_status()
raw = response.content
player_counts = [parse_player_count(line) for line in parse_server_lines(raw)]
return [timestamp] + player_counts
def parse_server_lines(raw):
return [line for line in str(raw).split('<br><br>') if line.startswith('|--> server')]
def parse_player_count(server_line):
return '' if server_line.endswith('OFFLINE') else server_line.split()[-3]
def write(data, filename):
data_line = ';'.join(data)
with open(filename, "a") as file:
file.write(data_line + '\n')
print(data_line)
def periodic(scheduler, interval, action):
scheduler.enter(interval, 1, periodic, (scheduler, interval, action))
action()
def draw(filename):
fig = dict(
data=arrange_plot_data(filename),
layout=dict(
title='OHOL Players by Server',
xaxis=dict(
rangeslider=dict(visible=True),
type='date'
)
))
upload_plot(fig)
def upload_plot(figure):
try:
py.plot(figure, filename=figure['layout']['title'], auto_open=False)
except Exception as e:
print('ERROR creating plot:\n{0}'.format(e))
def arrange_plot_data(filename):
servers = ['server%s' % (n + 1) for n in range(15)]
df = pd.read_csv(filename, sep=';', names=['timestamp'] + servers)
df['sum'] = df.apply(calculate_sum, axis=1)
data = [plot_column(name, df) for name in servers + ['sum']]
return data
def calculate_sum(row):
return sum(row[1:])
def plot_column(name, df):
return go.Scatter(x=df.timestamp, y=df[name], name=name)
if __name__ == '__main__':
s = sched.scheduler()
periodic(s, 5 * 60, process_current_player_counts)
s.run()
|
[
"pandas.read_csv",
"plotly.graph_objs.Scatter",
"sched.scheduler",
"datetime.datetime.utcnow",
"requests.get",
"plotly.plotly.plot"
] |
[((391, 467), 'requests.get', 'requests.get', (['"""http://onehouronelife.com/reflector/server.php?action=report"""'], {}), "('http://onehouronelife.com/reflector/server.php?action=report')\n", (403, 467), False, 'import requests\n'), ((1766, 1827), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '""";"""', 'names': "(['timestamp'] + servers)"}), "(filename, sep=';', names=['timestamp'] + servers)\n", (1777, 1827), True, 'import pandas as pd\n'), ((2050, 2099), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'df.timestamp', 'y': 'df[name]', 'name': 'name'}), '(x=df.timestamp, y=df[name], name=name)\n', (2060, 2099), True, 'import plotly.graph_objs as go\n'), ((2137, 2154), 'sched.scheduler', 'sched.scheduler', ([], {}), '()\n', (2152, 2154), False, 'import sched\n'), ((1516, 1584), 'plotly.plotly.plot', 'py.plot', (['figure'], {'filename': "figure['layout']['title']", 'auto_open': '(False)'}), "(figure, filename=figure['layout']['title'], auto_open=False)\n", (1523, 1584), True, 'import plotly.plotly as py\n'), ((313, 339), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (337, 339), False, 'import datetime\n')]
|
from os import getenv
from os.path import join, dirname
from dotenv import load_dotenv
# Create .env file path.
dotenv_path = join(dirname(__file__), ".env")
# Load file from the path.
load_dotenv(dotenv_path)
BOT_TOKEN = getenv('BOT_TOKEN', "")
CHAT_NAME = getenv('CHAT_NAME', "")
INSIDE_CHANNEL = getenv('INSIDE_CHANNEL', "")
|
[
"dotenv.load_dotenv",
"os.path.dirname",
"os.getenv"
] |
[((187, 211), 'dotenv.load_dotenv', 'load_dotenv', (['dotenv_path'], {}), '(dotenv_path)\n', (198, 211), False, 'from dotenv import load_dotenv\n'), ((225, 248), 'os.getenv', 'getenv', (['"""BOT_TOKEN"""', '""""""'], {}), "('BOT_TOKEN', '')\n", (231, 248), False, 'from os import getenv\n'), ((261, 284), 'os.getenv', 'getenv', (['"""CHAT_NAME"""', '""""""'], {}), "('CHAT_NAME', '')\n", (267, 284), False, 'from os import getenv\n'), ((302, 330), 'os.getenv', 'getenv', (['"""INSIDE_CHANNEL"""', '""""""'], {}), "('INSIDE_CHANNEL', '')\n", (308, 330), False, 'from os import getenv\n'), ((132, 149), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (139, 149), False, 'from os.path import join, dirname\n')]
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
#Import the data
data = pd.read_csv("TSLA.csv")
print('Raw data from Yahoo Finance : ')
print(data.head())
#Remove date and Adj Close columns
data = data.drop('Date',axis=1)
data = data.drop('Adj Close',axis = 1)
print('\n\nData after removing Date and Adj Close : ')
print(data.head())
#Split into train and test data
data_X = data.loc[:,data.columns != 'Close' ]
data_Y = data['Close']
train_X, test_X, train_y,test_y = train_test_split(data_X,data_Y,test_size=0.25)
print('\n\nTraining Set')
print(train_X.head())
print(train_y.head())
#Creating the Regressor
regressor = LinearRegression()
regressor.fit(train_X,train_y)
#Make Predictions and Evaluate them
predict_y = regressor.predict(test_X)
print('Prediction Score : ' , regressor.score(test_X,test_y))
error = mean_squared_error(test_y,predict_y)
print('Mean Squared Error : ',error)
#Plot the predicted and the expected values
fig = plt.figure()
ax = plt.axes()
ax.grid()
ax.set(xlabel='Close ($)',ylabel='Open ($)',
title='Tesla Stock Prediction using Linear Regression')
ax.plot(test_X['Open'],test_y)
ax.plot(test_X['Open'],predict_y)
fig.savefig('LRPlot.png')
plt.show()
|
[
"matplotlib.pyplot.show",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.axes",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.figure",
"sklearn.metrics.mean_squared_error"
] |
[((275, 298), 'pandas.read_csv', 'pd.read_csv', (['"""TSLA.csv"""'], {}), "('TSLA.csv')\n", (286, 298), True, 'import pandas as pd\n'), ((677, 725), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data_X', 'data_Y'], {'test_size': '(0.25)'}), '(data_X, data_Y, test_size=0.25)\n', (693, 725), False, 'from sklearn.model_selection import train_test_split\n'), ((831, 849), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (847, 849), False, 'from sklearn.linear_model import LinearRegression\n'), ((1027, 1064), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['test_y', 'predict_y'], {}), '(test_y, predict_y)\n', (1045, 1064), False, 'from sklearn.metrics import mean_squared_error\n'), ((1153, 1165), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1163, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1171, 1181), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (1179, 1181), True, 'import matplotlib.pyplot as plt\n'), ((1385, 1395), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1393, 1395), True, 'import matplotlib.pyplot as plt\n')]
|
"""Utilities to scan all Python files in a directory and
aggregate the names of all the imported packages
"""
import argparse
import ast
import os
from collections import Counter
from typing import Dict, Iterable, List, Optional, Tuple
from iscan.std_lib import separate_third_party_from_std_lib
class ImportScanner(ast.NodeVisitor):
"""Scanner to look for imported packages."""
def __init__(self) -> None:
self.imports = [] # type: ignore
def visit_Import(self, node: ast.Import) -> None:
"""Extract imports of the form `import foo`.
>>> import_statement = 'import os.path.join as jn, datetime.datetime as dt'
>>> ast.dump(ast.parse(import_statement))
"Module(body=[
Import(names=[alias(name='os.path.join', asname='jn'),
alias(name='datetime.datetime', asname='dt')])
])"
"""
for alias in node.names:
self.imports.append(alias.name)
self.generic_visit(node)
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
"""Extract imports of the form `from foo import bar`.
Relative imports such as `from ..utils import foo` will be ignored.
>>> import_statement = 'from os.path import join as jn, split'
>>> ast.dump(ast.parse(import_statement))
"Module(body=[
ImportFrom(module='os.path',
names=[alias(name='join', asname='jn'),
alias(name='split', asname=None)],
level=0)
])"
"""
# Ignore relative imports, for which node.level > 0
# E.g., `from ..utils import foo` has a node.level of 2
if node.level == 0:
self.imports.append(node.module)
self.generic_visit(node)
def get_imports(self) -> List[str]:
return sorted(self.imports)
def convert_source_to_tree(fpath: str) -> ast.Module:
"""Convert source code into abstract syntax tree.
Args:
fpath: Path to the Python file of interest
Returns:
AST representation of the source code
"""
with open(fpath, 'r') as f:
tree = ast.parse(f.read())
return tree
def scan_directory(dir_to_scan: str, dir_to_exclude: Optional[str] = None) -> List[str]:
"""Extract packages imported across all Python files in a directory.
Args:
dir_to_scan: Path to the directory of interest
dir_to_exclude: Path to the directory to be excluded during scanning
Returns:
Imported packages; might contain duplicates
"""
all_imports = []
for root_dir, _, fnames in os.walk(top=dir_to_scan):
# Skip excluded directory
if dir_to_exclude is not None:
if os.path.abspath(dir_to_exclude) in os.path.abspath(root_dir):
continue
for fname in fnames:
# Skip non-Python files
if not fname.endswith('.py'):
continue
# Convert source code into tree
fpath = os.path.join(root_dir, fname)
tree = convert_source_to_tree(fpath)
# Extract imports for current file
scanner = ImportScanner()
scanner.visit(tree)
all_imports.extend(scanner.get_imports())
return all_imports
def get_base_name(full_name: str) -> str:
"""Extract the base name of a package.
Args:
full_name: Full name of the package of interest, e.g., pandas.testing
Returns:
Base name of the provided package, e.g., pandas
"""
return full_name.split('.')[0]
def sort_counter(counter: Counter, alphabetical: bool) -> Dict[str, int]:
"""Sort counter according to custom logic.
Args:
counter: Imported packages and their corresponding count
alphabetical: Whether to sort counter alphabetically
Returns:
Sorted counter
"""
def custom_order(tup):
# Sort first by count (descending), and then by name
return -tup[1], tup[0]
sort_key = None if alphabetical else custom_order
return dict(sorted(counter.items(), key=sort_key))
def show_result(third_party: Dict[str, int], std_lib: Dict[str, int], ignore_std_lib: bool) -> None:
"""Print the result of running iscan.
Args:
third_party: Imported third-party packages and count
std_lib: Imported standard library modules and count
ignore_std_lib: Whether to omit standard library modules in the output
"""
result = '''
--------------------------
Third-party packages
--------------------------
NAME COUNT
'''
for name, count in third_party.items():
result += f'{name:<20} {count:>5}\n'
if not ignore_std_lib:
result += '''
--------------------------
Standard library modules
--------------------------
NAME COUNT
'''
for name, count in std_lib.items():
result += f'{name:<20} {count:>5}\n'
print(result)
def run(dir_to_scan: str, dir_to_exclude: Optional[str] = None) -> Tuple[Counter, Counter]:
"""Run iscan for a given set of parameters.
Args:
dir_to_scan: Path to the directory of interest
dir_to_exclude: Path to the directory to be excluded during scanning
Returns:
Imported third-party packages and count
Imported standard library modules and count
"""
full_packages = scan_directory(dir_to_scan, dir_to_exclude)
base_packages = map(get_base_name, full_packages)
third_party, std_lib = separate_third_party_from_std_lib(base_packages)
return Counter(third_party), Counter(std_lib)
def cli() -> argparse.Namespace:
"""Command line interface."""
parser = argparse.ArgumentParser(
allow_abbrev=False,
description='Aggregate third-party packages and standard library modules imported across all Python files in a given directory.' # noqa: E501
)
parser.add_argument(
'DIR_TO_SCAN',
help='target directory to scan'
)
parser.add_argument(
'-x',
default=None,
dest='DIR_TO_EXCLUDE',
help='directory to exclude during scanning'
)
parser.add_argument(
'--ignore-std-lib',
dest='IGNORE_STD_LIB',
action='store_const',
const=True,
default=False,
help='whether to leave standard library modules out of the report'
)
parser.add_argument(
'--alphabetical',
dest='ALPHABETICAL',
action='store_const',
const=True,
default=False,
help='whether to sort the report alphabetically'
)
return parser.parse_args()
def main() -> None:
args = cli()
third_party, std_lib = run(args.DIR_TO_SCAN, args.DIR_TO_EXCLUDE)
third_party = sort_counter(third_party, args.ALPHABETICAL) # type: ignore
std_lib = sort_counter(std_lib, args.ALPHABETICAL) # type: ignore
show_result(third_party, std_lib, args.IGNORE_STD_LIB)
|
[
"os.path.abspath",
"argparse.ArgumentParser",
"os.walk",
"iscan.std_lib.separate_third_party_from_std_lib",
"collections.Counter",
"os.path.join"
] |
[((2635, 2659), 'os.walk', 'os.walk', ([], {'top': 'dir_to_scan'}), '(top=dir_to_scan)\n', (2642, 2659), False, 'import os\n'), ((5537, 5585), 'iscan.std_lib.separate_third_party_from_std_lib', 'separate_third_party_from_std_lib', (['base_packages'], {}), '(base_packages)\n', (5570, 5585), False, 'from iscan.std_lib import separate_third_party_from_std_lib\n'), ((5718, 5901), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'allow_abbrev': '(False)', 'description': '"""Aggregate third-party packages and standard library modules imported across all Python files in a given directory."""'}), "(allow_abbrev=False, description=\n 'Aggregate third-party packages and standard library modules imported across all Python files in a given directory.'\n )\n", (5741, 5901), False, 'import argparse\n'), ((5597, 5617), 'collections.Counter', 'Counter', (['third_party'], {}), '(third_party)\n', (5604, 5617), False, 'from collections import Counter\n'), ((5619, 5635), 'collections.Counter', 'Counter', (['std_lib'], {}), '(std_lib)\n', (5626, 5635), False, 'from collections import Counter\n'), ((3034, 3063), 'os.path.join', 'os.path.join', (['root_dir', 'fname'], {}), '(root_dir, fname)\n', (3046, 3063), False, 'import os\n'), ((2749, 2780), 'os.path.abspath', 'os.path.abspath', (['dir_to_exclude'], {}), '(dir_to_exclude)\n', (2764, 2780), False, 'import os\n'), ((2784, 2809), 'os.path.abspath', 'os.path.abspath', (['root_dir'], {}), '(root_dir)\n', (2799, 2809), False, 'import os\n')]
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import json
URL = "https://awspolicygen.s3.amazonaws.com/js/policies.js"
def main():
raw_data = requests.get(URL).text
data = json.loads(raw_data[raw_data.find('=') + 1:])
perms = {}
for _, svc in data['serviceMap'].items():
perms[svc['StringPrefix']] = svc['Actions']
sorted_perms = {}
for k in sorted(perms):
sorted_perms[k] = sorted(perms[k])
with open('iam-permissions.json', 'w') as fh:
json.dump(sorted_perms, fp=fh, indent=2)
if __name__ == '__main__':
main()
|
[
"json.dump",
"requests.get"
] |
[((736, 753), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (748, 753), False, 'import requests\n'), ((1083, 1123), 'json.dump', 'json.dump', (['sorted_perms'], {'fp': 'fh', 'indent': '(2)'}), '(sorted_perms, fp=fh, indent=2)\n', (1092, 1123), False, 'import json\n')]
|
#!/usr/bin/env python2
'''
This submodule lets the user download the data files necessary for running the GOMAP pipline from CyVerse
Currently the files are stored in Gokul's personal directory so the download has to be initiated by gokul's own CyVerse account with icommands
'''
import os, re, logging, json, sys, argparse, jsonmerge, gzip, shutil
from pprint import pprint
from code.utils.basic_utils import check_output_and_run
import tarfile
cyverse_path="i:/iplant/home/shared/dillpicl/gomap/GOMAP-data/"
from code.utils.logging_utils import setlogging
def setup(config):
setlogging(config,"setup")
"""
setup(config)
This function downloads the **GOMAP-data.tar.gz** directory from CyVerse and extracts the content to the **data** directory. The steps run by this function is given below
1. asdsdsa
2. sadsadsad
3. sadsadsad
Parameters
----------
config : dict
The config dict generated in the gomap.py script.
"""
outdir="data/"
cmd = ["irsync","-rsv",cyverse_path,outdir]
logging.info("Downloading file from Cyverse using irsync")
#The irsync will checksum the files on both ends and dtermine if the download is necessary and will only download if necessary
# might take time to check if the files needs to be downloaded
print(os.getcwd())
print(" ".join(cmd))
check_output_and_run("outfile",cmd)
with open("data/compress_files.txt","r") as comp_files:
counter=0
for infile in comp_files.readlines():
counter=counter+1
outfile = outdir+infile.strip()
gzfile = outdir+infile.strip()+".gz"
if os.path.exists(gzfile):
if os.path.exists(outfile):
print( gzfile + " already extracted")
else:
print("Extracting " + gzfile)
with gzip.open(gzfile,"rb") as in_f:
with open(outfile,"wb") as out_f:
shutil.copyfileobj(in_f,out_f)
os.remove(gzfile)
else:
print(gzfile + " doesn't exist")
with open("data/tar_files.txt","r") as comp_files:
for infile in comp_files.readlines():
infile=infile.strip()
outfile = outdir+infile.strip()
tar_f = outdir+infile.strip()+".tar.gz"
base_dir=os.path.basename(outfile)
if os.path.exists(tar_f):
if os.path.exists(outfile):
print(tar_f + " already extracted")
else:
print("Extracting " + tar_f)
with tarfile.open(tar_f) as tar:
tar.extractall("data/")
os.remove(tar_f)
else:
print(tar_f + " doesn't exist")
|
[
"os.remove",
"gzip.open",
"os.path.basename",
"os.getcwd",
"code.utils.basic_utils.check_output_and_run",
"os.path.exists",
"logging.info",
"code.utils.logging_utils.setlogging",
"tarfile.open",
"shutil.copyfileobj"
] |
[((585, 612), 'code.utils.logging_utils.setlogging', 'setlogging', (['config', '"""setup"""'], {}), "(config, 'setup')\n", (595, 612), False, 'from code.utils.logging_utils import setlogging\n'), ((1055, 1113), 'logging.info', 'logging.info', (['"""Downloading file from Cyverse using irsync"""'], {}), "('Downloading file from Cyverse using irsync')\n", (1067, 1113), False, 'import os, re, logging, json, sys, argparse, jsonmerge, gzip, shutil\n'), ((1364, 1400), 'code.utils.basic_utils.check_output_and_run', 'check_output_and_run', (['"""outfile"""', 'cmd'], {}), "('outfile', cmd)\n", (1384, 1400), False, 'from code.utils.basic_utils import check_output_and_run\n'), ((1322, 1333), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1331, 1333), False, 'import os, re, logging, json, sys, argparse, jsonmerge, gzip, shutil\n'), ((1663, 1685), 'os.path.exists', 'os.path.exists', (['gzfile'], {}), '(gzfile)\n', (1677, 1685), False, 'import os, re, logging, json, sys, argparse, jsonmerge, gzip, shutil\n'), ((2394, 2419), 'os.path.basename', 'os.path.basename', (['outfile'], {}), '(outfile)\n', (2410, 2419), False, 'import os, re, logging, json, sys, argparse, jsonmerge, gzip, shutil\n'), ((2435, 2456), 'os.path.exists', 'os.path.exists', (['tar_f'], {}), '(tar_f)\n', (2449, 2456), False, 'import os, re, logging, json, sys, argparse, jsonmerge, gzip, shutil\n'), ((1706, 1729), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (1720, 1729), False, 'import os, re, logging, json, sys, argparse, jsonmerge, gzip, shutil\n'), ((2477, 2500), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (2491, 2500), False, 'import os, re, logging, json, sys, argparse, jsonmerge, gzip, shutil\n'), ((2056, 2073), 'os.remove', 'os.remove', (['gzfile'], {}), '(gzfile)\n', (2065, 2073), False, 'import os, re, logging, json, sys, argparse, jsonmerge, gzip, shutil\n'), ((2751, 2767), 'os.remove', 'os.remove', (['tar_f'], {}), '(tar_f)\n', (2760, 2767), False, 'import os, re, logging, json, sys, argparse, jsonmerge, gzip, shutil\n'), ((1887, 1910), 'gzip.open', 'gzip.open', (['gzfile', '"""rb"""'], {}), "(gzfile, 'rb')\n", (1896, 1910), False, 'import os, re, logging, json, sys, argparse, jsonmerge, gzip, shutil\n'), ((2655, 2674), 'tarfile.open', 'tarfile.open', (['tar_f'], {}), '(tar_f)\n', (2667, 2674), False, 'import tarfile\n'), ((2005, 2036), 'shutil.copyfileobj', 'shutil.copyfileobj', (['in_f', 'out_f'], {}), '(in_f, out_f)\n', (2023, 2036), False, 'import os, re, logging, json, sys, argparse, jsonmerge, gzip, shutil\n')]
|
#!/usr/bin/env python
import argparse
from functools import partial
from pathlib import Path
from requests_futures.sessions import FuturesSession
import pandas as pd
import numpy as np
# see https://stackoverflow.com/a/50039149
import resource
resource.setrlimit(resource.RLIMIT_NOFILE, (110000, 110000))
__version__ = '0.3'
HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'
}
def get_html(response, verbose=False):
try:
result = response.result()
if verbose:
print('Response from {} has status code {}.'.format(result.url, result.status_code))
assert result.status_code // 100 == 2
return result.content.decode()
except:
if verbose:
print('Error occured for {}'.format(response))
return None
def get_htmls(urls, max_workers=8, verbose=False, timeout=60):
session = FuturesSession(max_workers=max_workers)
if verbose:
n = len(urls)
print('Submitting {} jobs...'.format(n))
responses = [session.get(url, headers=HEADERS, timeout=timeout) for url in urls]
if verbose:
print('Executing {} jobs...'.format(n))
# if verbose, run a for loop to show progress explicitly
if verbose:
result = []
for i, response in enumerate(responses):
print('{} done, {} to go...'.format(i, n - i))
result.append(get_html(response, verbose=verbose))
return result
else:
return [get_html(response, verbose=verbose) for response in responses]
def get_htmls_archive(urls, max_workers=8, verbose=False, timeout=60):
urls = ['https://web.archive.org/web/' + url for url in urls]
return get_htmls(urls, max_workers=max_workers, verbose=verbose, timeout=timeout)
def main(path, output, verbose, worker, timeout):
df = pd.read_hdf(path)
# if output already existed, updates:
if Path(output).is_file():
df_old = pd.read_hdf(output)
# merging dfs
df_merged = df.merge(df_old[['html']], how='outer', left_index=True, right_index=True)
df = df_merged
# merging might have changed the orders
df.sort_values('time_added', inplace=True)
na_idx = df.html.isna()
n = np.count_nonzero(na_idx)
print('{} out of {} urls are new, fetching...'.format(n, df.shape[0]))
# fetch html
n_workers = worker if worker else n
df.loc[na_idx, 'html'] = get_htmls(df[na_idx].index, max_workers=n_workers, verbose=verbose, timeout=timeout)
else:
n = df.shape[0]
print('{} urls to fetch...'.format(n))
n_workers = worker if worker else n
df['html'] = get_htmls(df.index, max_workers=n_workers, verbose=verbose, timeout=timeout)
# no response
df['archive'] = df.html.isna()
n = np.count_nonzero(df.archive)
print('{} out of {} urls cannot be fetched, try fetching from archive.org...'.format(n, df.shape[0]))
n_workers = worker if worker else n
df.loc[df.archive, 'html'] = get_htmls_archive(df[df.archive].index, max_workers=n_workers, verbose=verbose, timeout=timeout)
df.to_hdf(
output,
'df',
format='table',
complevel=9,
)
def cli():
parser = argparse.ArgumentParser(description="Save url content in HDF5.")
parser.add_argument('input', help='Input urls in HDF5.')
parser.add_argument('-o', '--output', help='Output HDF5. Update file if exists.')
parser.add_argument('-p', '--worker', type=int,
help='No. of workers used. If not specified, use as many as needed.')
parser.add_argument('-t', '--timeout', type=float, default=60.,
help='Timeout specified for requests. Default: 60.')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s {}'.format(__version__))
parser.add_argument('-V', '--verbose', action='store_true',
help='verbose to stdout.')
args = parser.parse_args()
main(args.input, args.output, args.verbose, args.worker, args.timeout)
if __name__ == "__main__":
cli()
|
[
"numpy.count_nonzero",
"pandas.read_hdf",
"argparse.ArgumentParser",
"resource.setrlimit",
"requests_futures.sessions.FuturesSession",
"pathlib.Path"
] |
[((247, 307), 'resource.setrlimit', 'resource.setrlimit', (['resource.RLIMIT_NOFILE', '(110000, 110000)'], {}), '(resource.RLIMIT_NOFILE, (110000, 110000))\n', (265, 307), False, 'import resource\n'), ((1187, 1226), 'requests_futures.sessions.FuturesSession', 'FuturesSession', ([], {'max_workers': 'max_workers'}), '(max_workers=max_workers)\n', (1201, 1226), False, 'from requests_futures.sessions import FuturesSession\n'), ((2128, 2145), 'pandas.read_hdf', 'pd.read_hdf', (['path'], {}), '(path)\n', (2139, 2145), True, 'import pandas as pd\n'), ((3116, 3144), 'numpy.count_nonzero', 'np.count_nonzero', (['df.archive'], {}), '(df.archive)\n', (3132, 3144), True, 'import numpy as np\n'), ((3544, 3608), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Save url content in HDF5."""'}), "(description='Save url content in HDF5.')\n", (3567, 3608), False, 'import argparse\n'), ((2237, 2256), 'pandas.read_hdf', 'pd.read_hdf', (['output'], {}), '(output)\n', (2248, 2256), True, 'import pandas as pd\n'), ((2543, 2567), 'numpy.count_nonzero', 'np.count_nonzero', (['na_idx'], {}), '(na_idx)\n', (2559, 2567), True, 'import numpy as np\n'), ((2196, 2208), 'pathlib.Path', 'Path', (['output'], {}), '(output)\n', (2200, 2208), False, 'from pathlib import Path\n')]
|
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
# Note: The original ordering of chapters and verses was extremely complex.
# As a result, chapters are the bold headers and subsections are each p tag.
# Case 1: Sections split by numbers (Roman or not) followed by a period, or bracketed. Subsections split by <p> tags
def parsecase1(ptags, c, colltitle, title, author, date, URL):
# ptags contains all <p> tags. c is the cursor object.
chapter = '-1'
verse = 0
for p in ptags:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
passage = ''
text = p.get_text().strip()
# Skip empty paragraphs. and skip the last part with the collection link.
if len(text) <= 0 or text.startswith('Asconius\n'):
continue
chapterb = p.find('b')
if chapterb is not None and text[0].isalpha():
test = chapterb.find(text = True)
if text == test:
chapter = text
verse = 0
continue
passage = text
verse+=1
if passage.startswith('Asconius'):
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, passage.strip(), URL, 'prose'))
def main():
collURL = 'http://www.thelatinlibrary.com/asconius.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = collSOUP.title.string.strip()
colltitle = 'QUINTUS ASCONIUS PEDIANUS'
date = 'c. 9 B.C. - c. A.D. 76'
textsURL = [collURL]
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author='Asconius'")
for url in textsURL:
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
try:
title = textsoup.title.string.split(':')[1].strip()
except:
title = textsoup.title.string.strip()
getp = textsoup.find_all('p')
parsecase1(getp, c, colltitle, title, author, date, url)
logger.info("Program runs successfully.")
if __name__ == '__main__':
main()
|
[
"bs4.BeautifulSoup",
"phyllo.phyllo_logger.logger.info",
"sqlite3.connect",
"urllib.request.urlopen"
] |
[((1840, 1871), 'urllib.request.urlopen', 'urllib.request.urlopen', (['collURL'], {}), '(collURL)\n', (1862, 1871), False, 'import urllib\n'), ((1888, 1923), 'bs4.BeautifulSoup', 'BeautifulSoup', (['collOpen', '"""html5lib"""'], {}), "(collOpen, 'html5lib')\n", (1901, 1923), False, 'from bs4 import BeautifulSoup\n'), ((2883, 2924), 'phyllo.phyllo_logger.logger.info', 'logger.info', (['"""Program runs successfully."""'], {}), "('Program runs successfully.')\n", (2894, 2924), False, 'from phyllo.phyllo_logger import logger\n'), ((2090, 2117), 'sqlite3.connect', 'sqlite3.connect', (['"""texts.db"""'], {}), "('texts.db')\n", (2105, 2117), False, 'import sqlite3\n'), ((2513, 2540), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (2535, 2540), False, 'import urllib\n'), ((2565, 2599), 'bs4.BeautifulSoup', 'BeautifulSoup', (['openurl', '"""html5lib"""'], {}), "(openurl, 'html5lib')\n", (2578, 2599), False, 'from bs4 import BeautifulSoup\n')]
|
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib import auth
def login(request):
context = {}
if request.method == "POST":
user = auth.authenticate(username=request.POST["email"], password=request.POST["password"])
if user is not None:
auth.login(request, user)
return redirect("home")
else:
context["error"] = "Email or Password incorrect."
return render(request, "accounts/login.html", context)
def signup(request):
context = {}
if request.method == "POST":
# Check if the password and the confrom are the same
if request.POST["password1"] == request.POST["password2"]:
# Check if email is alrady been used
try:
user = User.objects.get(email=request.POST["email"])
context["error"] = "This email is already registred."
except User.DoesNotExist:
user = User.objects.create_user(request.POST["email"], email=request.POST["email"], password=request.POST["<PASSWORD>"])
auth.login(request, user)
return redirect("home")
else:
context["error"] = "Passwords must match."
return render(request, "accounts/signup.html", context)
def logout(request):
auth.logout(request)
return redirect("login")
|
[
"django.contrib.auth.models.User.objects.get",
"django.shortcuts.redirect",
"django.contrib.auth.models.User.objects.create_user",
"django.contrib.auth.logout",
"django.contrib.auth.authenticate",
"django.shortcuts.render",
"django.contrib.auth.login"
] |
[((432, 479), 'django.shortcuts.render', 'render', (['request', '"""accounts/login.html"""', 'context'], {}), "(request, 'accounts/login.html', context)\n", (438, 479), False, 'from django.shortcuts import render, redirect\n'), ((1103, 1151), 'django.shortcuts.render', 'render', (['request', '"""accounts/signup.html"""', 'context'], {}), "(request, 'accounts/signup.html', context)\n", (1109, 1151), False, 'from django.shortcuts import render, redirect\n'), ((1176, 1196), 'django.contrib.auth.logout', 'auth.logout', (['request'], {}), '(request)\n', (1187, 1196), False, 'from django.contrib import auth\n'), ((1205, 1222), 'django.shortcuts.redirect', 'redirect', (['"""login"""'], {}), "('login')\n", (1213, 1222), False, 'from django.shortcuts import render, redirect\n'), ((198, 287), 'django.contrib.auth.authenticate', 'auth.authenticate', ([], {'username': "request.POST['email']", 'password': "request.POST['password']"}), "(username=request.POST['email'], password=request.POST[\n 'password'])\n", (215, 287), False, 'from django.contrib import auth\n'), ((309, 334), 'django.contrib.auth.login', 'auth.login', (['request', 'user'], {}), '(request, user)\n', (319, 334), False, 'from django.contrib import auth\n'), ((345, 361), 'django.shortcuts.redirect', 'redirect', (['"""home"""'], {}), "('home')\n", (353, 361), False, 'from django.shortcuts import render, redirect\n'), ((723, 768), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'email': "request.POST['email']"}), "(email=request.POST['email'])\n", (739, 768), False, 'from django.contrib.auth.models import User\n'), ((867, 984), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', (["request.POST['email']"], {'email': "request.POST['email']", 'password': "request.POST['<PASSWORD>']"}), "(request.POST['email'], email=request.POST['email'],\n password=request.POST['<PASSWORD>'])\n", (891, 984), False, 'from django.contrib.auth.models import User\n'), ((985, 1010), 'django.contrib.auth.login', 'auth.login', (['request', 'user'], {}), '(request, user)\n', (995, 1010), False, 'from django.contrib import auth\n'), ((1022, 1038), 'django.shortcuts.redirect', 'redirect', (['"""home"""'], {}), "('home')\n", (1030, 1038), False, 'from django.shortcuts import render, redirect\n')]
|
import collections
import pandas as pd
big_list = [[{'автопродление': 1},
{'аккаунт': 1},
{'акция': 2},
{'безумный': 1},
{'бесплатно': 1},
{'бесплатнои': 1},
{'бесплатныи': 1},
{'бесплатный': 1},
{'бесценок': 1},
{'билет': 2},
{'бритва': 1},
{'бритвеныи': 1},
{'важный': 2},
{'вводить': 1},
{'деиствует': 1},
{'забудь': 1},
{'заполнять': 1},
{'заходить': 1},
{'заявка': 1},
{'идти': 1},
{'канал': 1},
{'карта': 1},
{'кино': 2},
{'кинопоиск': 1},
{'ленись': 1},
{'наидете': 1},
{'неделя': 1},
{'новыи': 1},
{'отключить': 1},
{'пара': 1},
{'первый': 1},
{'переходить': 1},
{'подписка': 2},
{'подписываися': 1},
{'покупка': 2},
{'покупке': 1},
{'получать': 1},
{'получение': 1},
{'почту': 1},
{'премиум': 1},
{'привязывать': 1},
{'прийти': 1},
{'промо': 1},
{'промокоду': 1},
{'регистрировать': 1},
{'регистрируемся': 1},
{'саит': 1},
{'сеичас': 1},
{'скидка': 2},
{'совершенно': 1},
{'станок': 1},
{'телеграм': 1},
{'экономить': 1}],
[{'неделя': 1},
{'получать': 1},
{'саит': 1},
{'скидка': 6},
{'автоматически': 1},
{'антивирус': 1},
{'антивирусы': 1},
{'бит': 1},
{'возможность': 1},
{'временной': 1},
{'выбрать': 1},
{'даваите': 1},
{'деиствительно': 1},
{'деиствия': 1},
{'деиствовать': 1},
{'дополнительнои': 1},
{'дополнительный': 1},
{'других': 1},
{'другое': 1},
{'ждать': 1},
{'запись': 1},
{'запустить': 1},
{'защитный': 1},
{'использовать': 1},
{'ключ': 2},
{'код': 3},
{'компьютер': 1},
{'мочь': 1},
{'наиболее': 1},
{'новость': 1},
{'обеспечение': 4},
{'обновить': 1},
{'ограничить': 2},
{'отличный': 1},
{'парк': 1},
{'планировать': 1},
{'полугодовой': 1},
{'получить': 1},
{'популярный': 1},
{'посмотреть': 1},
{'предложение': 1},
{'применение': 1},
{'программный': 4},
{'продукт': 2},
{'распродажа': 2},
{'саите': 1},
{'скидкои': 1},
{'следующии': 1},
{'следующий': 1},
{'снижение': 1},
{'специальный': 1},
{'срок': 1},
{'супер': 2},
{'течение': 1},
{'упустить': 1},
{'устроиств': 1},
{'устроиства': 1},
{'учётный': 1},
{'хотеть': 1},
{'цена': 9}],
[{'наидете': 1},
{'неделя': 1},
{'первый': 2},
{'скидка': 4},
{'деиствительно': 2},
{'других': 1},
{'предложение': 2},
{'распродажа': 2},
{'снижение': 1},
{'цена': 5},
{'instagram': 1},
{'twitter': 1},
{'большинство': 1},
{'бренд': 1},
{'верить': 1},
{'вернее': 1},
{'вид': 1},
{'видео': 2},
{'витрина': 1},
{'витринный': 1},
{'выгодный': 1},
{'гарантию': 1},
{'делать': 1},
{'день': 1},
{'диктофон': 1},
{'другои': 1},
{'жж': 1},
{'закрываться': 2},
{'интересный': 1},
{'каждыи': 1},
{'количество': 1},
{'кстати': 1},
{'купить': 1},
{'логотип': 1},
{'магазин': 2},
{'маркет': 1},
{'медиамаркт': 1},
{'наидется': 1},
{'наидутся': 1},
{'например': 1},
{'находиться': 1},
{'небольшой': 3},
{'недавно': 1},
{'низкий': 2},
{'обещать': 2},
{'обман': 1},
{'общий': 1},
{'остаться': 2},
{'осуществлять': 1},
{'пестреть': 1},
{'писать': 1},
{'повыбирать': 1},
{'позиция': 1},
{'понадобиться': 1},
{'посетителеи': 1},
{'правда': 1},
{'правильно': 1},
{'продавать': 1},
{'производитель': 1},
{'размер': 1},
{'распродажный': 1},
{'рекламировать': 1},
{'связь': 1},
{'сервис': 1},
{'скореи': 1},
{'случай': 4},
{'случиться': 1},
{'сменить': 1},
{'смотреть': 1},
{'событие': 1},
{'сообщение': 1},
{'сообщить': 1},
{'соцсеть': 2},
{'сравниваите': 1},
{'сравнивать': 1},
{'старт': 1},
{'существенно': 1},
{'товар': 2},
{'трансляция': 2},
{'тщательно': 1},
{'увеличивать': 1},
{'уменьшаться': 1},
{'уникальныи': 1},
{'финальный': 1},
{'ходовой': 1},
{'центр': 1},
{'экземпляр': 1}],
[{'покупка': 1},
{'выбрать': 1},
{'продукт': 1},
{'саите': 2},
{'магазин': 1},
{'сервис': 1},
{'товар': 3},
{'уникальныи': 1},
{'брать': 2},
{'выбор': 1},
{'выкуп': 1},
{'груз': 1},
{'днеи': 1},
{'забота': 2},
{'заказ': 2},
{'заниматься': 1},
{'интернет': 3},
{'каталог': 2},
{'категория': 1},
{'мелко': 1},
{'мск': 1},
{'набор': 2},
{'нужный': 1},
{'объединение': 1},
{'оставить': 1},
{'остальные': 1},
{'откроить': 1},
{'оформление': 1},
{'параметр': 1},
{'перепаковке': 1},
{'подарочныи': 1},
{'подарочный': 1},
{'поддержка': 1},
{'полностью': 1},
{'полныи': 1},
{'посылка': 1},
{'праздничный': 1},
{'разный': 1},
{'сделать': 1},
{'служба': 1},
{'соблюдение': 1},
{'собрать': 1},
{'ссылка': 1},
{'таможенный': 1},
{'телефон': 1},
{'требовании': 1},
{'удобныи': 1},
{'указание': 1},
{'шопинг': 1}],
[{'канал': 1},
{'мочь': 1},
{'цена': 1},
{'видео': 1},
{'смотреть': 1},
{'товар': 4},
{'ссылка': 1},
{'безусловно': 1},
{'большои': 1},
{'боцманскии': 1},
{'вариант': 1},
{'внутренний': 1},
{'военнои': 1},
{'возможный': 1},
{'входить': 1},
{'глаз': 1},
{'дерево': 1},
{'довольно': 1},
{'доступный': 1},
{'друг': 1},
{'жми': 1},
{'защёлка': 1},
{'иметь': 2},
{'инструмент': 1},
{'карман': 1},
{'классный': 1},
{'кольцо': 1},
{'комплект': 1},
{'которои': 1},
{'крепление': 1},
{'крутой': 2},
{'лезвие': 1},
{'марлина': 1},
{'металического': 1},
{'металом': 1},
{'модификациеи': 1},
{'молния': 1},
{'морской': 1},
{'мужик': 1},
{'мужчик': 1},
{'наидет': 1},
{'наити': 1},
{'найти': 1},
{'накладка': 1},
{'наличие': 1},
{'настоящий': 1},
{'начать': 1},
{'нежелательный': 1},
{'необходимый': 1},
{'нержавеики': 1},
{'нож': 2},
{'основнои': 1},
{'основный': 1},
{'особенность': 1},
{'отличительнои': 1},
{'палированным': 1},
{'пластик': 1},
{'поддеть': 1},
{'популярнои': 1},
{'потаиным': 1},
{'поэтому': 1},
{'правило': 1},
{'представлять': 1},
{'преимущество': 1},
{'привет': 1},
{'простота': 1},
{'работа': 1},
{'ремень': 6},
{'ремня': 1},
{'рукоятка': 1},
{'самое': 1},
{'связке': 1},
{'складный': 1},
{'слишком': 1},
{'смочь': 1},
{'собои': 1},
{'сокровенный': 1},
{'статья': 1},
{'страховочный': 1},
{'таиника': 1},
{'таиником': 1},
{'такои': 1},
{'твёрдый': 1},
{'тканевыи': 1},
{'толстыи': 1},
{'топчик': 1},
{'увидеть': 1},
{'узел': 1},
{'часть': 1},
{'шип': 1},
{'являться': 2}],
[{'канал': 1},
{'покупка': 1},
{'сеичас': 1},
{'скидка': 5},
{'других': 1},
{'супер': 1},
{'товар': 3},
{'нужный': 1},
{'подарочныи': 1},
{'подарочный': 1},
{'разный': 1},
{'ремень': 1},
{'барсучий': 1},
{'благородный': 1},
{'больший': 1},
{'бритьё': 1},
{'быстрый': 1},
{'восторженный': 1},
{'вставка': 1},
{'выделка': 1},
{'выполнить': 1},
{'высокий': 1},
{'год': 1},
{'двоиными': 1},
{'длина': 1},
{'добавить': 1},
{'документ': 1},
{'доставка': 1},
{'древесина': 1},
{'дужки': 1},
{'зажимами': 1},
{'защитои': 1},
{'зеркальный': 1},
{'изготовить': 1},
{'исполнение': 1},
{'качество': 1},
{'кисть': 2},
{'клапанах': 1},
{'ключеи': 1},
{'кожа': 1},
{'кожаный': 2},
{'комфортный': 1},
{'коричневыи': 1},
{'коробка': 1},
{'кошелёк': 1},
{'красивый': 1},
{'красота': 1},
{'крем': 1},
{'круглый': 1},
{'лаик': 1},
{'линза': 1},
{'лицо': 1},
{'материал': 2},
{'мелочеи': 1},
{'металлическии': 1},
{'металлический': 2},
{'мех': 1},
{'моделеи': 1},
{'модель': 1},
{'модный': 1},
{'молниях': 1},
{'мужской': 1},
{'мужчина': 2},
{'накладками': 1},
{'нанесение': 2},
{'наплечныи': 1},
{'наслаждение': 1},
{'натуральный': 1},
{'нежный': 1},
{'новинка': 1},
{'ноутбук': 1},
{'оправа': 1},
{'отделение': 2},
{'отзыв': 2},
{'отзывы': 1},
{'отличнои': 1},
{'очень': 2},
{'очки': 1},
{'пена': 2},
{'плохой': 1},
{'подписываитесь': 1},
{'подтяжка': 1},
{'покупателеи': 1},
{'покупатель': 1},
{'полный': 1},
{'помазок': 1},
{'понравиться': 1},
{'портфель': 1},
{'превращаться': 1},
{'прекрасныи': 1},
{'прекрасный': 1},
{'признателен': 1},
{'продавец': 1},
{'пружинои': 1},
{'рекомендовать': 2},
{'ретро': 1},
{'решение': 1},
{'ручка': 2},
{'сантиметр': 2},
{'сдержанный': 1},
{'сегодня': 1},
{'спандекс': 1},
{'сплава': 1},
{'стекло': 1},
{'стиль': 1},
{'стильный': 1},
{'сумка': 1},
{'темно': 1},
{'тысяча': 1},
{'удобный': 2},
{'удобство': 1},
{'удовольствие': 1},
{'ультрафиолет': 1},
{'упаковать': 2},
{'фотохромный': 1},
{'футляр': 1},
{'хороший': 1},
{'худой': 1},
{'цвет': 1},
{'цветовой': 1},
{'цинк': 1},
{'черныи': 1},
{'ширина': 1},
{'эластичныи': 1}],
[{'покупка': 4},
{'даваите': 1},
{'использовать': 1},
{'посмотреть': 2},
{'цена': 2},
{'интересный': 1},
{'магазин': 2},
{'товар': 5},
{'набор': 2},
{'разный': 1},
{'самое': 1},
{'складный': 1},
{'статья': 1},
{'качество': 1},
{'кожа': 1},
{'коробка': 1},
{'крем': 1},
{'новинка': 7},
{'подписываитесь': 1},
{'цвет': 4},
{'автомобилист': 1},
{'апрель': 4},
{'аромат': 1},
{'ассортимент': 2},
{'банныи': 1},
{'бельё': 1},
{'блокноты': 1},
{'вакуумный': 1},
{'весёлый': 1},
{'волос': 1},
{'гель': 1},
{'гигиена': 1},
{'горшки': 1},
{'губка': 1},
{'дача': 1},
{'двухъярусная': 1},
{'детеи': 1},
{'детский': 2},
{'дизаинами': 1},
{'дизаины': 1},
{'дом': 2},
{'душе': 1},
{'желать': 1},
{'забываите': 1},
{'завезти': 1},
{'завершить': 1},
{'зеркало': 1},
{'зонт': 1},
{'иванов': 1},
{'игрушка': 4},
{'идея': 1},
{'канцелярия': 1},
{'кинетический': 1},
{'клавиатура': 1},
{'компас': 1},
{'конец': 2},
{'конструктор': 1},
{'копилка': 1},
{'корзина': 1},
{'коробочка': 1},
{'косметика': 2},
{'крышкои': 1},
{'лаванда': 1},
{'лаики': 1},
{'летний': 1},
{'магнитик': 1},
{'март': 6},
{'мочалка': 1},
{'мытьё': 1},
{'надувной': 1},
{'наносить': 1},
{'начало': 1},
{'новинками': 1},
{'новый': 1},
{'обзор': 9},
{'отдел': 1},
{'отделе': 1},
{'отдых': 1},
{'отсек': 1},
{'пакет': 1},
{'песок': 1},
{'песочница': 1},
{'подарок': 1},
{'подготовить': 1},
{'подробныи': 1},
{'полезный': 1},
{'полка': 1},
{'полотенце': 2},
{'полочка': 1},
{'постельный': 1},
{'посуда': 3},
{'появиться': 3},
{'предполагать': 1},
{'представить': 2},
{'приятный': 1},
{'проводной': 1},
{'проидемся': 1},
{'производство': 1},
{'пропустить': 1},
{'просмотр': 1},
{'простынь': 1},
{'прямо': 1},
{'пятёрочка': 3},
{'ремешок': 1},
{'роза': 1},
{'рублеи': 14},
{'светодиодныи': 1},
{'сказать': 1},
{'см': 2},
{'снова': 2},
{'сожаление': 1},
{'состав': 1},
{'спасибо': 1},
{'ставить': 1},
{'страничка': 1},
{'сушка': 1},
{'творчество': 1},
{'тело': 1},
{'трость': 1},
{'удачный': 1},
{'указать': 2},
{'уход': 2},
{'хранение': 2},
{'цветок': 1},
{'цифровой': 1},
{'читаите': 1},
{'щётка': 1}],
[{'покупка': 3},
{'деиствительно': 1},
{'дополнительнои': 1},
{'получить': 1},
{'цена': 4},
{'выгодный': 3},
{'купить': 4},
{'магазин': 5},
{'продавать': 1},
{'товар': 2},
{'заказ': 1},
{'интернет': 2},
{'комплект': 2},
{'смочь': 2},
{'покупатель': 1},
{'желать': 1},
{'приятный': 1},
{'рублеи': 2},
{'база': 1},
{'батарейка': 1},
{'быстро': 1},
{'вагин': 6},
{'вагины': 1},
{'вибрациеи': 5},
{'внимание': 1},
{'волосик': 1},
{'вставляться': 1},
{'выгоднои': 1},
{'выносной': 1},
{'джанин': 8},
{'известнои': 1},
{'интим': 1},
{'качественныи': 1},
{'лицензионныи': 1},
{'лобке': 1},
{'любрикант': 1},
{'максимально': 1},
{'название': 1},
{'недорого': 1},
{'описание': 1},
{'особый': 1},
{'отверстие': 1},
{'оформить': 1},
{'пальчиковый': 1},
{'положить': 1},
{'порнозвезды': 1},
{'пульт': 1},
{'работать': 1},
{'светлый': 1},
{'секс': 2},
{'слепок': 1},
{'совершение': 1},
{'стимуляция': 1},
{'тип': 1},
{'уважаемые': 1},
{'яицо': 1}],
[{'планировать': 1},
{'цена': 2},
{'продавать': 4},
{'экземпляр': 1},
{'модель': 1},
{'очень': 3},
{'рублеи': 1},
{'спасибо': 1},
{'акрил': 1},
{'бахроме': 1},
{'белыи': 1},
{'буклированные': 1},
{'вещь': 1},
{'длинныи': 2},
{'достаточно': 1},
{'единственный': 1},
{'изменю': 1},
{'метр': 1},
{'моеи': 1},
{'мягкий': 1},
{'наматываться': 1},
{'нежныи': 1},
{'неузнаваемость': 1},
{'нитка': 2},
{'огромный': 1},
{'оксана': 1},
{'повтор': 1},
{'повторю': 1},
{'пушистый': 1},
{'радуга': 1},
{'руб': 3},
{'сиреневыи': 1},
{'тонкии': 1},
{'фиолетовый': 1},
{'черно': 1},
{'шарф': 2},
{'шею': 1}],
[{'срок': 1},
{'цена': 1},
{'другои': 1},
{'днеи': 1},
{'заказ': 1},
{'оформление': 1},
{'работа': 1},
{'длина': 1},
{'модель': 1},
{'цвет': 3},
{'рублеи': 1},
{'см': 1},
{'нитка': 1},
{'шарф': 1},
{'белый': 1},
{'выполню': 1},
{'двустороннии': 1},
{'двухслоиныи': 1},
{'красный': 1},
{'крючок': 1},
{'молот': 1},
{'надпись': 1},
{'однои': 1},
{'подарить': 1},
{'пряжи': 1},
{'связать': 1},
{'серп': 1},
{'сторона': 1},
{'шерстянои': 1},
{'шерстяной': 1}],
[{'других': 1},
{'хотеть': 2},
{'цена': 2},
{'купить': 2},
{'размер': 1},
{'товар': 4},
{'брать': 1},
{'полностью': 1},
{'сделать': 1},
{'мех': 1},
{'приятный': 1},
{'рублеи': 1},
{'состав': 1},
{'руб': 1},
{'ангора': 1},
{'вопрос': 1},
{'гольф': 1},
{'дело': 1},
{'засунуть': 1},
{'знать': 1},
{'китае': 1},
{'место': 1},
{'меховой': 1},
{'новогодний': 1},
{'носок': 1},
{'ощупь': 1},
{'полиамид': 1},
{'полиэстер': 2},
{'рассчитать': 1},
{'рука': 1},
{'самом': 1},
{'светофор': 4},
{'тёплый': 1},
{'успеть': 1},
{'эластан': 1}]]
flat_list = [item for sublist in big_list for item in sublist]
result = {}
for i in flat_list:
result.update(i)
counter = collections.Counter(result).most_common()
print(counter)
dframe = pd.DataFrame(counter, columns=["Word", "Count"])
dframe.to_csv('a12_freq_done.csv')
|
[
"pandas.DataFrame",
"collections.Counter"
] |
[((14011, 14059), 'pandas.DataFrame', 'pd.DataFrame', (['counter'], {'columns': "['Word', 'Count']"}), "(counter, columns=['Word', 'Count'])\n", (14023, 14059), True, 'import pandas as pd\n'), ((13945, 13972), 'collections.Counter', 'collections.Counter', (['result'], {}), '(result)\n', (13964, 13972), False, 'import collections\n')]
|
import uuid
from typing import List, Optional
from .utils import logging
logger = logging.get_logger(__name__)
class Conversation:
"""
Utility class containing a conversation and its history. This class is meant to be used as an input to the
:class:`~transformers.ConversationalPipeline`. The conversation contains a number of utility function to manage the
addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input
before being passed to the :class:`~transformers.ConversationalPipeline`. This user input is either created when
the class is instantiated, or by calling :obj:`conversational_pipeline.append_response("input")` after a
conversation turn.
Arguments:
text (:obj:`str`, `optional`):
The initial user input to start the conversation. If not provided, a user input needs to be provided
manually using the :meth:`~transformers.Conversation.add_user_input` method before the conversation can
begin.
conversation_id (:obj:`uuid.UUID`, `optional`):
Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the
conversation.
past_user_inputs (:obj:`List[str]`, `optional`):
Eventual past history of the conversation of the user. You don't need to pass it manually if you use the
pipeline interactively but if you want to recreate history you need to set both :obj:`past_user_inputs` and
:obj:`generated_responses` with equal length lists of strings
generated_responses (:obj:`List[str]`, `optional`):
Eventual past history of the conversation of the model. You don't need to pass it manually if you use the
pipeline interactively but if you want to recreate history you need to set both :obj:`past_user_inputs` and
:obj:`generated_responses` with equal length lists of strings
Usage::
conversation = Conversation("Going to the movies tonight - any suggestions?")
# Steps usually performed by the model when generating a response:
# 1. Mark the user input as processed (moved to the history)
conversation.mark_processed()
# 2. Append a mode response
conversation.append_response("The Big lebowski.")
conversation.add_user_input("Is it good?")
"""
def __init__(
self, text: str = None, conversation_id: uuid.UUID = None, past_user_inputs=None, generated_responses=None
):
if not conversation_id:
conversation_id = uuid.uuid4()
if past_user_inputs is None:
past_user_inputs = []
if generated_responses is None:
generated_responses = []
self.uuid: uuid.UUID = conversation_id
self.past_user_inputs: List[str] = past_user_inputs
self.generated_responses: List[str] = generated_responses
self.new_user_input: Optional[str] = text
def __eq__(self, other):
if not isinstance(other, Conversation):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def add_user_input(self, text: str, overwrite: bool = False):
"""
Add a user input to the conversation for the next round. This populates the internal :obj:`new_user_input`
field.
Args:
text (:obj:`str`): The user input for the next conversation round.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not existing and unprocessed user input should be overwritten when this function is called.
"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".'
)
self.new_user_input = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input'
)
else:
self.new_user_input = text
def mark_processed(self):
"""
Mark the conversation as processed (moves the content of :obj:`new_user_input` to :obj:`past_user_inputs`) and
empties the :obj:`new_user_input` field.
"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
self.new_user_input = None
def append_response(self, response: str):
"""
Append a response to the list of generated responses.
Args:
response (:obj:`str`): The model generated response.
"""
self.generated_responses.append(response)
def iter_texts(self):
"""
Iterates over all blobs of the conversation.
Returns: Iterator of (is_user, text_chunk) in chronological order of the conversation. ``is_user`` is a
:obj:`bool`, ``text_chunks`` is a :obj:`str`.
"""
for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__(self):
"""
Generates a string representation of the conversation.
Return:
:obj:`str`:
Example: Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user >> Going to the movies tonight - any
suggestions? bot >> The Big Lebowski
"""
output = f"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
name = "user" if is_user else "bot"
output += f"{name} >> {text} \n"
return output
|
[
"uuid.uuid4"
] |
[((2622, 2634), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2632, 2634), False, 'import uuid\n')]
|
import argparse
from datetime import datetime
from Common.functions import add_data, get_filtered_stb, get_data
parser = argparse.ArgumentParser(description='Data-stream import and searching. Expected input data-stream line\n' +
'of the form: STB|TITLE|PROVIDER|DATE|REVENUE|TIME\n')
parser.add_argument('-i', dest='filename', help='import FILENAME to import data to datastore')
parser.add_argument('-s', dest='select',
help='SELECT from comma separated list of columns (STB,TITLE,PROVIDER,DATE,REV,TIME)')
parser.add_argument('-f', dest='filter',
help='FILTER from one column=value pair. CASE SENSITIVE. ex -f date=2017-04-21')
parser.add_argument('-o', dest='order',
help='ORDER from comma separated list of columns (STB,TITLE,PROVIDER,DATE,REV,TIME)')
args = parser.parse_args()
"""
If importing data:
Import data stream from argument filename. Expected format:
STB|TITLE|PROVIDER|DATE|REVENUE|TIME\n
"""
if args.filename:
count = 0
with open(args.filename, 'r') as file:
for line in file:
try:
box_id, title, provider, date, revenue, time = line.rstrip('\r\n').split('|')
time = datetime.strptime(time, '%H:%M')
date = datetime.strptime(date, '%Y-%m-%d')
data = {
'stb': box_id,
'date': date.strftime('%Y-%m-%d'),
'title': title,
'provider': provider,
'rev': "{0:.2f}".format(float(revenue)),
'time': time.strftime('%H:%M')
}
add_data(data)
count += 1
except ValueError as e:
print("Mal-formatted line. Skipping.")
print("Imported {} records.".format(count))
# Else, retrieving data. Data retrieval from SELECT, FILTER, and ORDER arguments
else:
# Error checking retrieval arguments
columns = {'stb', 'title', 'provider', 'date', 'rev', 'time'}
selection = args.select.lower().split(',') if args.select else None
if not selection or not set(selection) < columns:
print("Invalid SELECT argument(s). See --help for help.")
exit(1)
order = args.order.lower().split(',') if args.order else None
if order and not set(order) < columns and not set(order) < set(selection):
print("Invalid ORDER arguments(s). See --help for help.")
exit(1)
filter_by = ()
if args.filter:
key, value = tuple(args.filter.split('='))
if key not in columns:
print("Invalid FILTER argument(s). See --help for help.")
exit(1)
if key == 'rev':
try:
value = "{0:.2f}".format(float(value))
except ValueError:
print("Invalid number for rev filter.")
exit(1)
filter_by = (key, value)
# Retrieve set of matching STB id numbers based on the filter
matching_stb = get_filtered_stb(filter_by)
# If there are any matching STB id numbers, get actual data, order, and print SELECT results.
if matching_stb:
results = get_data(matching_stb, selection, filter_by, order)
# Print results in order of SELECT
for entry in results:
print(','.join([entry[key] for key in selection]))
|
[
"Common.functions.get_data",
"argparse.ArgumentParser",
"Common.functions.add_data",
"datetime.datetime.strptime",
"Common.functions.get_filtered_stb"
] |
[((122, 296), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '(\'Data-stream import and searching. Expected input data-stream line\\n\' +\n """of the form: STB|TITLE|PROVIDER|DATE|REVENUE|TIME\n""")'}), '(description=\n """Data-stream import and searching. Expected input data-stream line\n""" +\n \'of the form: STB|TITLE|PROVIDER|DATE|REVENUE|TIME\\n\')\n', (145, 296), False, 'import argparse\n'), ((3023, 3050), 'Common.functions.get_filtered_stb', 'get_filtered_stb', (['filter_by'], {}), '(filter_by)\n', (3039, 3050), False, 'from Common.functions import add_data, get_filtered_stb, get_data\n'), ((3189, 3240), 'Common.functions.get_data', 'get_data', (['matching_stb', 'selection', 'filter_by', 'order'], {}), '(matching_stb, selection, filter_by, order)\n', (3197, 3240), False, 'from Common.functions import add_data, get_filtered_stb, get_data\n'), ((1241, 1273), 'datetime.datetime.strptime', 'datetime.strptime', (['time', '"""%H:%M"""'], {}), "(time, '%H:%M')\n", (1258, 1273), False, 'from datetime import datetime\n'), ((1297, 1332), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (1314, 1332), False, 'from datetime import datetime\n'), ((1672, 1686), 'Common.functions.add_data', 'add_data', (['data'], {}), '(data)\n', (1680, 1686), False, 'from Common.functions import add_data, get_filtered_stb, get_data\n')]
|
import os, fnmatch, sys, time
import dill as pickle
import scipy.interpolate as interp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import bead_util as bu
import calib_util as cu
import configuration as config
import time
dirname = '/data/old_trap/20201202/power/init'
files, _ = bu.find_all_fnames(dirname, sort_time=True)
fb_set = []
power = []
for filname in files:
df = bu.DataFile()
df.load(filname)
fb_set.append(np.mean(df.pos_fb[2]))
power.append(np.abs(np.mean(df.power)))
plt.plot(fb_set, power)
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"bead_util.find_all_fnames",
"numpy.mean",
"bead_util.DataFile"
] |
[((325, 368), 'bead_util.find_all_fnames', 'bu.find_all_fnames', (['dirname'], {'sort_time': '(True)'}), '(dirname, sort_time=True)\n', (343, 368), True, 'import bead_util as bu\n'), ((549, 572), 'matplotlib.pyplot.plot', 'plt.plot', (['fb_set', 'power'], {}), '(fb_set, power)\n', (557, 572), True, 'import matplotlib.pyplot as plt\n'), ((573, 583), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (581, 583), True, 'import matplotlib.pyplot as plt\n'), ((426, 439), 'bead_util.DataFile', 'bu.DataFile', ([], {}), '()\n', (437, 439), True, 'import bead_util as bu\n'), ((480, 501), 'numpy.mean', 'np.mean', (['df.pos_fb[2]'], {}), '(df.pos_fb[2])\n', (487, 501), True, 'import numpy as np\n'), ((527, 544), 'numpy.mean', 'np.mean', (['df.power'], {}), '(df.power)\n', (534, 544), True, 'import numpy as np\n')]
|
"""Carletproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from Carletapp import views
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('signup1/', csrf_exempt(views.SignUp1.as_view())),
path('signup2/', csrf_exempt(views.SignUp2.as_view())),
path('login/', csrf_exempt(views.Login.as_view())),
path('uservalidation/', csrf_exempt(views.UserRegistrationValidation.as_view())),
path('userregister/', csrf_exempt(views.UserRegistration.as_view())),
path('forgotpassword/', csrf_exempt(views.ForgetPassword.as_view())),
path('changepassword/', csrf_exempt(views.ChangePassword.as_view())),
path('checkverification/', csrf_exempt(views.CheckVerification.as_view())),
path('checkregistration/', csrf_exempt(views.CheckRegistration.as_view())),
path('searchvehicle/', csrf_exempt(views.SearchVehicle.as_view())),
path('registervehicle/', csrf_exempt(views.VehicleRegistration.as_view())),
path('licensevalidation/', csrf_exempt(views.VehicleDetailValidation.as_view())),
path('requestvehicle/', csrf_exempt(views.RequestVehicle.as_view())),
path('approverequest/', csrf_exempt(views.ApproveRequest.as_view())),
path('ratevehicle/', csrf_exempt(views.RaterReviewVehicle.as_view())),
path('raterenter/', csrf_exempt(views.RateReviewRenter.as_view())),
path('sentrentrequest/', csrf_exempt(views.SentRentRequest.as_view())),
path('rcvrentrequest/', csrf_exempt(views.RecvRentRequest.as_view())),
path('generatereceipt/', csrf_exempt(views.GenerateReceipt.as_view())),
path('uploadreceipt/', csrf_exempt(views.UploadReceipt.as_view())),
path('getprofileinfo/', csrf_exempt(views.GetProfileInfo.as_view())),
path('payment/', csrf_exempt(views.Payment.as_view())),
path('accountsetting/<str:pk>/', csrf_exempt(views.ProfileAccountSetting.as_view())),
path('uservehicle/<str:pk>/', csrf_exempt(views.UserVehicleList.as_view())),
path('vehiclesetting/<str:pk>/', csrf_exempt(views.VehicleSetting.as_view())),
path('triphistory/<str:pk>/', csrf_exempt(views.TripHistory.as_view())),
path('profilepic/<str:pk>/', csrf_exempt(views.RetreiveProfilePicture.as_view())),
path('vehiclepictures/<str:pk>/', csrf_exempt(views.DisplayVehiclePictures.as_view())),
path('redeemamount/<str:pk>/', csrf_exempt(views.RedeemAmount.as_view())),
path('removefromrent/<str:pk>/', csrf_exempt(views.RemoveVehicleForRent.as_view())),
path('updateprofilepic/<str:pk>/', csrf_exempt(views.UpdateProfilePicture.as_view())),
path('addfav/', csrf_exempt(views.AddFavorite.as_view())),
path('removefav/<str:pk>/', csrf_exempt(views.RemoveFavorite.as_view())),
path('displayfav/<str:pk>/', csrf_exempt(views.FavoriteList.as_view())),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"Carletapp.views.SignUp1.as_view",
"Carletapp.views.FavoriteList.as_view",
"Carletapp.views.ChangePassword.as_view",
"Carletapp.views.RequestVehicle.as_view",
"Carletapp.views.UserRegistrationValidation.as_view",
"Carletapp.views.SearchVehicle.as_view",
"Carletapp.views.ProfileAccountSetting.as_view",
"Carletapp.views.Payment.as_view",
"Carletapp.views.Login.as_view",
"Carletapp.views.SentRentRequest.as_view",
"django.urls.path",
"Carletapp.views.RemoveVehicleForRent.as_view",
"Carletapp.views.VehicleDetailValidation.as_view",
"Carletapp.views.SignUp2.as_view",
"Carletapp.views.VehicleRegistration.as_view",
"Carletapp.views.GetProfileInfo.as_view",
"Carletapp.views.CheckVerification.as_view",
"Carletapp.views.DisplayVehiclePictures.as_view",
"Carletapp.views.CheckRegistration.as_view",
"Carletapp.views.GenerateReceipt.as_view",
"Carletapp.views.RemoveFavorite.as_view",
"Carletapp.views.TripHistory.as_view",
"Carletapp.views.RedeemAmount.as_view",
"Carletapp.views.UpdateProfilePicture.as_view",
"Carletapp.views.UploadReceipt.as_view",
"Carletapp.views.AddFavorite.as_view",
"django.conf.urls.static.static",
"Carletapp.views.UserVehicleList.as_view",
"Carletapp.views.RetreiveProfilePicture.as_view",
"Carletapp.views.UserRegistration.as_view",
"Carletapp.views.RaterReviewVehicle.as_view",
"Carletapp.views.VehicleSetting.as_view",
"Carletapp.views.RecvRentRequest.as_view",
"Carletapp.views.ForgetPassword.as_view",
"Carletapp.views.RateReviewRenter.as_view",
"Carletapp.views.ApproveRequest.as_view"
] |
[((3578, 3639), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (3584, 3639), False, 'from django.conf.urls.static import static\n'), ((3513, 3576), 'django.conf.urls.static.static', 'static', (['settings.STATIC_URL'], {'document_root': 'settings.STATIC_ROOT'}), '(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n', (3519, 3576), False, 'from django.conf.urls.static import static\n'), ((877, 908), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (881, 908), False, 'from django.urls import path\n'), ((943, 966), 'Carletapp.views.SignUp1.as_view', 'views.SignUp1.as_view', ([], {}), '()\n', (964, 966), False, 'from Carletapp import views\n'), ((1003, 1026), 'Carletapp.views.SignUp2.as_view', 'views.SignUp2.as_view', ([], {}), '()\n', (1024, 1026), False, 'from Carletapp import views\n'), ((1061, 1082), 'Carletapp.views.Login.as_view', 'views.Login.as_view', ([], {}), '()\n', (1080, 1082), False, 'from Carletapp import views\n'), ((1126, 1168), 'Carletapp.views.UserRegistrationValidation.as_view', 'views.UserRegistrationValidation.as_view', ([], {}), '()\n', (1166, 1168), False, 'from Carletapp import views\n'), ((1210, 1242), 'Carletapp.views.UserRegistration.as_view', 'views.UserRegistration.as_view', ([], {}), '()\n', (1240, 1242), False, 'from Carletapp import views\n'), ((1286, 1316), 'Carletapp.views.ForgetPassword.as_view', 'views.ForgetPassword.as_view', ([], {}), '()\n', (1314, 1316), False, 'from Carletapp import views\n'), ((1360, 1390), 'Carletapp.views.ChangePassword.as_view', 'views.ChangePassword.as_view', ([], {}), '()\n', (1388, 1390), False, 'from Carletapp import views\n'), ((1437, 1470), 'Carletapp.views.CheckVerification.as_view', 'views.CheckVerification.as_view', ([], {}), '()\n', (1468, 1470), False, 'from Carletapp import views\n'), ((1517, 1550), 'Carletapp.views.CheckRegistration.as_view', 'views.CheckRegistration.as_view', ([], {}), '()\n', (1548, 1550), False, 'from Carletapp import views\n'), ((1593, 1622), 'Carletapp.views.SearchVehicle.as_view', 'views.SearchVehicle.as_view', ([], {}), '()\n', (1620, 1622), False, 'from Carletapp import views\n'), ((1667, 1702), 'Carletapp.views.VehicleRegistration.as_view', 'views.VehicleRegistration.as_view', ([], {}), '()\n', (1700, 1702), False, 'from Carletapp import views\n'), ((1749, 1788), 'Carletapp.views.VehicleDetailValidation.as_view', 'views.VehicleDetailValidation.as_view', ([], {}), '()\n', (1786, 1788), False, 'from Carletapp import views\n'), ((1832, 1862), 'Carletapp.views.RequestVehicle.as_view', 'views.RequestVehicle.as_view', ([], {}), '()\n', (1860, 1862), False, 'from Carletapp import views\n'), ((1906, 1936), 'Carletapp.views.ApproveRequest.as_view', 'views.ApproveRequest.as_view', ([], {}), '()\n', (1934, 1936), False, 'from Carletapp import views\n'), ((1977, 2011), 'Carletapp.views.RaterReviewVehicle.as_view', 'views.RaterReviewVehicle.as_view', ([], {}), '()\n', (2009, 2011), False, 'from Carletapp import views\n'), ((2051, 2083), 'Carletapp.views.RateReviewRenter.as_view', 'views.RateReviewRenter.as_view', ([], {}), '()\n', (2081, 2083), False, 'from Carletapp import views\n'), ((2128, 2159), 'Carletapp.views.SentRentRequest.as_view', 'views.SentRentRequest.as_view', ([], {}), '()\n', (2157, 2159), False, 'from Carletapp import views\n'), ((2203, 2234), 'Carletapp.views.RecvRentRequest.as_view', 'views.RecvRentRequest.as_view', ([], {}), '()\n', (2232, 2234), False, 'from Carletapp import views\n'), ((2279, 2310), 'Carletapp.views.GenerateReceipt.as_view', 'views.GenerateReceipt.as_view', ([], {}), '()\n', (2308, 2310), False, 'from Carletapp import views\n'), ((2353, 2382), 'Carletapp.views.UploadReceipt.as_view', 'views.UploadReceipt.as_view', ([], {}), '()\n', (2380, 2382), False, 'from Carletapp import views\n'), ((2426, 2456), 'Carletapp.views.GetProfileInfo.as_view', 'views.GetProfileInfo.as_view', ([], {}), '()\n', (2454, 2456), False, 'from Carletapp import views\n'), ((2493, 2516), 'Carletapp.views.Payment.as_view', 'views.Payment.as_view', ([], {}), '()\n', (2514, 2516), False, 'from Carletapp import views\n'), ((2569, 2606), 'Carletapp.views.ProfileAccountSetting.as_view', 'views.ProfileAccountSetting.as_view', ([], {}), '()\n', (2604, 2606), False, 'from Carletapp import views\n'), ((2656, 2687), 'Carletapp.views.UserVehicleList.as_view', 'views.UserVehicleList.as_view', ([], {}), '()\n', (2685, 2687), False, 'from Carletapp import views\n'), ((2740, 2770), 'Carletapp.views.VehicleSetting.as_view', 'views.VehicleSetting.as_view', ([], {}), '()\n', (2768, 2770), False, 'from Carletapp import views\n'), ((2820, 2847), 'Carletapp.views.TripHistory.as_view', 'views.TripHistory.as_view', ([], {}), '()\n', (2845, 2847), False, 'from Carletapp import views\n'), ((2896, 2934), 'Carletapp.views.RetreiveProfilePicture.as_view', 'views.RetreiveProfilePicture.as_view', ([], {}), '()\n', (2932, 2934), False, 'from Carletapp import views\n'), ((2988, 3026), 'Carletapp.views.DisplayVehiclePictures.as_view', 'views.DisplayVehiclePictures.as_view', ([], {}), '()\n', (3024, 3026), False, 'from Carletapp import views\n'), ((3077, 3105), 'Carletapp.views.RedeemAmount.as_view', 'views.RedeemAmount.as_view', ([], {}), '()\n', (3103, 3105), False, 'from Carletapp import views\n'), ((3158, 3194), 'Carletapp.views.RemoveVehicleForRent.as_view', 'views.RemoveVehicleForRent.as_view', ([], {}), '()\n', (3192, 3194), False, 'from Carletapp import views\n'), ((3249, 3285), 'Carletapp.views.UpdateProfilePicture.as_view', 'views.UpdateProfilePicture.as_view', ([], {}), '()\n', (3283, 3285), False, 'from Carletapp import views\n'), ((3321, 3348), 'Carletapp.views.AddFavorite.as_view', 'views.AddFavorite.as_view', ([], {}), '()\n', (3346, 3348), False, 'from Carletapp import views\n'), ((3396, 3426), 'Carletapp.views.RemoveFavorite.as_view', 'views.RemoveFavorite.as_view', ([], {}), '()\n', (3424, 3426), False, 'from Carletapp import views\n'), ((3475, 3503), 'Carletapp.views.FavoriteList.as_view', 'views.FavoriteList.as_view', ([], {}), '()\n', (3501, 3503), False, 'from Carletapp import views\n')]
|
# Generated by Django 3.2 on 2021-12-02 01:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0007_alter_user_email'),
]
operations = [
migrations.AlterField(
model_name='notice',
name='enabled',
field=models.BooleanField(default=False, help_text='Display on the Galaxy Australia landing page.'),
),
]
|
[
"django.db.models.BooleanField"
] |
[((330, 428), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Display on the Galaxy Australia landing page."""'}), "(default=False, help_text=\n 'Display on the Galaxy Australia landing page.')\n", (349, 428), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python
# Read secrets from .env file
import requests
import csv
import json
import os
from dotenv import load_dotenv
load_dotenv()
BOARD_ID = os.getenv("TRELLO_BOARD_ID")
TRELLO_API_KEY = os.getenv('TRELLO_API_KEY')
TRELLO_TOKEN = os.getenv('TRELLO_TOKEN')
output_filename = f'output-{BOARD_ID}.csv'
keep_fetching = True
BASE_URL = "https://api.trello.com/1/boards/{board_id}/actions/?key={api_key}&token={token}&limit=1000".format(
board_id=BOARD_ID,
api_key=TRELLO_API_KEY,
token=T<PASSWORD>LO_TOKEN)
url = BASE_URL
with open(output_filename, mode='w') as csv_file:
# , quoting=csv.QUOTE_MINIMAL)
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"')
# headers
csv_writer.writerow(['timestamp', 'type', 'card_id', 'card_name', 'card_shortLink',
'listAfter_id', 'listAfter_name', 'listBefore_id', 'listBefore_name', 'text',
'member_fullName', 'member_username'])
while(keep_fetching):
print(url)
print("fetching...")
response = requests.get(url)
print("done.")
# json_data = json.load(json_file)
# for action in json_data.get('actions'):
for action in response.json():
row = []
data = action.get('data')
card = data.get('card', {})
# type
row.append(action.get('date'))
row.append(action.get('type'))
# data.card.id
# data.card.name
# data.card.shortLink
row.append(card.get('id', ''))
row.append(card.get('name', ''))
row.append(card.get('shortLink', ''))
listAfter = data.get('listAfter', {})
# data.listAfter.id
# data.listAfter.name
row.append(listAfter.get('id', ''))
row.append(listAfter.get('name', ''))
listBefore = data.get('listBefore', {})
# data.listBefore.id
# data.listBefore.name
row.append(listBefore.get('id', ''))
row.append(listBefore.get('name', ''))
# data.text
row.append(data.get('text', ''))
memberCreator = action.get('memberCreator', {})
# memberCreator.fullName
# memberCreator.username
row.append(memberCreator.get('fullName', ''))
row.append(memberCreator.get('username', ''))
# Write to the CSV file
csv_writer.writerow(row)
# if we got data, then keep going
keep_fetching = len(response.json()) > 0
if (keep_fetching):
# last_action
oldest_action = response.json()[-1]
print(oldest_action.get('date'))
url = "{base_url}&before={oldest_id}".format(
base_url=BASE_URL, oldest_id=oldest_action.get('id'))
else:
print("No records")
print("----------------------")
|
[
"dotenv.load_dotenv",
"csv.writer",
"os.getenv",
"requests.get"
] |
[((133, 146), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (144, 146), False, 'from dotenv import load_dotenv\n'), ((159, 187), 'os.getenv', 'os.getenv', (['"""TRELLO_BOARD_ID"""'], {}), "('TRELLO_BOARD_ID')\n", (168, 187), False, 'import os\n'), ((205, 232), 'os.getenv', 'os.getenv', (['"""TRELLO_API_KEY"""'], {}), "('TRELLO_API_KEY')\n", (214, 232), False, 'import os\n'), ((248, 273), 'os.getenv', 'os.getenv', (['"""TRELLO_TOKEN"""'], {}), "('TRELLO_TOKEN')\n", (257, 273), False, 'import os\n'), ((653, 703), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(csv_file, delimiter=\',\', quotechar=\'"\')\n', (663, 703), False, 'import csv\n'), ((1067, 1084), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1079, 1084), False, 'import requests\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import sys
import threading
from dataclasses import asdict
from pprint import pformat
from typing import Dict, List, Optional, Type
import torchx.specs as specs
from pyre_extensions import none_throws
from torchx.cli.cmd_base import SubCommand
from torchx.cli.cmd_log import get_logs
from torchx.runner import Runner, config, get_runner
from torchx.schedulers import get_default_scheduler_name, get_scheduler_factories
from torchx.specs import CfgVal
from torchx.specs.finder import (
ComponentNotFoundException,
ComponentValidationException,
_Component,
get_builtin_source,
get_components,
)
from torchx.util.types import to_dict
logger: logging.Logger = logging.getLogger(__name__)
def _convert_to_option_type(
value: str, option_type: Type[specs.CfgVal]
) -> specs.CfgVal:
if option_type == bool:
return value.lower() == "true"
elif option_type == List[str]:
return value.split(";")
else:
# pyre-ignore[19]
return option_type(value)
def _parse_run_config(arg: str, scheduler_opts: specs.runopts) -> Dict[str, CfgVal]:
conf: Dict[str, CfgVal] = {}
if not arg:
return conf
for key, value in to_dict(arg).items():
option = scheduler_opts.get(key)
if option is None:
raise ValueError(f"Unknown {key}, run `torchx runopts` for more info")
option_type = option.opt_type
typed_value = _convert_to_option_type(value, option_type)
conf[key] = typed_value
return conf
class CmdBuiltins(SubCommand):
def add_arguments(self, subparser: argparse.ArgumentParser) -> None:
subparser.add_argument(
"--print",
type=str,
help="prints the builtin's component def to stdout",
)
def _builtins(self) -> Dict[str, _Component]:
return get_components()
def run(self, args: argparse.Namespace) -> None:
builtin_name = args.print
if not builtin_name:
builtin_components = self._builtins()
num_builtins = len(builtin_components)
print(f"Found {num_builtins} builtin components:")
for i, component in enumerate(builtin_components.values()):
print(f" {i + 1:2d}. {component.name}")
else:
print(get_builtin_source(builtin_name))
class CmdRun(SubCommand):
def __init__(self) -> None:
self._subparser: Optional[argparse.ArgumentParser] = None
def add_arguments(self, subparser: argparse.ArgumentParser) -> None:
scheduler_names = get_scheduler_factories().keys()
self._subparser = subparser
subparser.add_argument(
"-s",
"--scheduler",
type=str,
help=f"Name of the scheduler to use. One of: [{','.join(scheduler_names)}]",
default=get_default_scheduler_name(),
)
subparser.add_argument(
"-cfg",
"--scheduler_args",
type=str,
help="Arguments to pass to the scheduler (Ex:`cluster=foo,user=bar`)."
" For a list of scheduler run options run: `torchx runopts`"
"",
)
subparser.add_argument(
"--dryrun",
action="store_true",
default=False,
help="Does not actually submit the app,"
" just prints the scheduler request",
)
subparser.add_argument(
"--wait",
action="store_true",
default=False,
help="Wait for the app to finish before exiting.",
)
subparser.add_argument(
"--log",
action="store_true",
default=False,
help="Stream logs while waiting for app to finish.",
)
subparser.add_argument(
"conf_args",
nargs=argparse.REMAINDER,
)
def _run(self, runner: Runner, args: argparse.Namespace) -> None:
if args.scheduler == "local":
logger.warning(
"`local` scheduler is deprecated and will be"
" removed in the near future,"
" please use other variants of the local scheduler"
" (e.g. `local_cwd`)"
)
run_opts = get_runner().run_opts()
scheduler_opts = run_opts[args.scheduler]
cfg = _parse_run_config(args.scheduler_args, scheduler_opts)
config.apply(scheduler=args.scheduler, cfg=cfg)
if len(args.conf_args) < 1:
none_throws(self._subparser).error(
"the following arguments are required: conf_file, conf_args"
)
# Python argparse would remove `--` if it was the first argument. This
# does not work well for torchx, since torchx.specs.api uses another argparser to
# parse component arguments.
conf_file, conf_args = args.conf_args[0], args.conf_args[1:]
try:
if args.dryrun:
dryrun_info = runner.dryrun_component(
conf_file, conf_args, args.scheduler, cfg
)
logger.info(
"\n=== APPLICATION ===\n"
f"{pformat(asdict(dryrun_info._app), indent=2, width=80)}"
)
logger.info("\n=== SCHEDULER REQUEST ===\n" f"{dryrun_info}")
else:
app_handle = runner.run_component(
conf_file,
conf_args,
args.scheduler,
cfg,
)
# DO NOT delete this line. It is used by slurm tests to retrieve the app id
print(app_handle)
if args.scheduler.startswith("local"):
self._wait_and_exit(runner, app_handle, log=True)
else:
logger.info(f"Launched app: {app_handle}")
status = runner.status(app_handle)
logger.info(status)
logger.info(f"Job URL: {none_throws(status).ui_url}")
if args.wait:
self._wait_and_exit(runner, app_handle, log=args.log)
except (ComponentValidationException, ComponentNotFoundException) as e:
error_msg = f"\nFailed to run component `{conf_file}` got errors: \n {e}"
logger.error(error_msg)
sys.exit(1)
except specs.InvalidRunConfigException as e:
error_msg = (
f"Scheduler arg is incorrect or missing required option: `{e.cfg_key}`\n"
f"Run `torchx runopts` to check configuration for `{args.scheduler}` scheduler\n"
f"Use `-cfg` to specify run cfg as `key1=value1,key2=value2` pair\n"
"of setup `.torchxconfig` file, see: https://pytorch.org/torchx/main/experimental/runner.config.html"
)
logger.error(error_msg)
sys.exit(1)
def run(self, args: argparse.Namespace) -> None:
os.environ["TORCHX_CONTEXT_NAME"] = os.getenv("TORCHX_CONTEXT_NAME", "cli_run")
with get_runner() as runner:
self._run(runner, args)
def _wait_and_exit(self, runner: Runner, app_handle: str, log: bool) -> None:
logger.info("Waiting for the app to finish...")
log_thread = self._start_log_thread(runner, app_handle) if log else None
status = runner.wait(app_handle, wait_interval=1)
if not status:
raise RuntimeError(f"unknown status, wait returned {status}")
logger.info(f"Job finished: {status.state}")
if log_thread:
log_thread.join()
if status.state != specs.AppState.SUCCEEDED:
logger.error(status)
sys.exit(1)
else:
logger.debug(status)
def _start_log_thread(self, runner: Runner, app_handle: str) -> threading.Thread:
thread = threading.Thread(
target=get_logs,
kwargs={
"file": sys.stderr,
"runner": runner,
"identifier": app_handle,
"regex": None,
"should_tail": True,
},
)
thread.daemon = True
thread.start()
return thread
|
[
"threading.Thread",
"torchx.runner.get_runner",
"torchx.schedulers.get_scheduler_factories",
"torchx.schedulers.get_default_scheduler_name",
"torchx.specs.finder.get_builtin_source",
"torchx.util.types.to_dict",
"torchx.specs.finder.get_components",
"torchx.runner.config.apply",
"sys.exit",
"pyre_extensions.none_throws",
"dataclasses.asdict",
"os.getenv",
"logging.getLogger"
] |
[((927, 954), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (944, 954), False, 'import logging\n'), ((2085, 2101), 'torchx.specs.finder.get_components', 'get_components', ([], {}), '()\n', (2099, 2101), False, 'from torchx.specs.finder import ComponentNotFoundException, ComponentValidationException, _Component, get_builtin_source, get_components\n'), ((4654, 4701), 'torchx.runner.config.apply', 'config.apply', ([], {'scheduler': 'args.scheduler', 'cfg': 'cfg'}), '(scheduler=args.scheduler, cfg=cfg)\n', (4666, 4701), False, 'from torchx.runner import Runner, config, get_runner\n'), ((7279, 7322), 'os.getenv', 'os.getenv', (['"""TORCHX_CONTEXT_NAME"""', '"""cli_run"""'], {}), "('TORCHX_CONTEXT_NAME', 'cli_run')\n", (7288, 7322), False, 'import os\n'), ((8143, 8289), 'threading.Thread', 'threading.Thread', ([], {'target': 'get_logs', 'kwargs': "{'file': sys.stderr, 'runner': runner, 'identifier': app_handle, 'regex':\n None, 'should_tail': True}"}), "(target=get_logs, kwargs={'file': sys.stderr, 'runner':\n runner, 'identifier': app_handle, 'regex': None, 'should_tail': True})\n", (8159, 8289), False, 'import threading\n'), ((1436, 1448), 'torchx.util.types.to_dict', 'to_dict', (['arg'], {}), '(arg)\n', (1443, 1448), False, 'from torchx.util.types import to_dict\n'), ((7336, 7348), 'torchx.runner.get_runner', 'get_runner', ([], {}), '()\n', (7346, 7348), False, 'from torchx.runner import Runner, config, get_runner\n'), ((7980, 7991), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7988, 7991), False, 'import sys\n'), ((2543, 2575), 'torchx.specs.finder.get_builtin_source', 'get_builtin_source', (['builtin_name'], {}), '(builtin_name)\n', (2561, 2575), False, 'from torchx.specs.finder import ComponentNotFoundException, ComponentValidationException, _Component, get_builtin_source, get_components\n'), ((2803, 2828), 'torchx.schedulers.get_scheduler_factories', 'get_scheduler_factories', ([], {}), '()\n', (2826, 2828), False, 'from torchx.schedulers import get_default_scheduler_name, get_scheduler_factories\n'), ((3080, 3108), 'torchx.schedulers.get_default_scheduler_name', 'get_default_scheduler_name', ([], {}), '()\n', (3106, 3108), False, 'from torchx.schedulers import get_default_scheduler_name, get_scheduler_factories\n'), ((4503, 4515), 'torchx.runner.get_runner', 'get_runner', ([], {}), '()\n', (4513, 4515), False, 'from torchx.runner import Runner, config, get_runner\n'), ((6625, 6636), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6633, 6636), False, 'import sys\n'), ((7169, 7180), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7177, 7180), False, 'import sys\n'), ((4751, 4779), 'pyre_extensions.none_throws', 'none_throws', (['self._subparser'], {}), '(self._subparser)\n', (4762, 4779), False, 'from pyre_extensions import none_throws\n'), ((5436, 5460), 'dataclasses.asdict', 'asdict', (['dryrun_info._app'], {}), '(dryrun_info._app)\n', (5442, 5460), False, 'from dataclasses import asdict\n'), ((6267, 6286), 'pyre_extensions.none_throws', 'none_throws', (['status'], {}), '(status)\n', (6278, 6286), False, 'from pyre_extensions import none_throws\n')]
|
#Bibliotecas
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import investpy as py
plt.style.use('fivethirtyeight')
#Buscando dados
bonds = py.get_bonds_overview(country='brazil')
#print(bonds)
print('')
#Filtrando por nome e preço de fechamento
bonds2 = py.get_bonds_overview(country='brazil')[['name', 'last_close']]
#print(bonds2)
#Visualização:
plt.figure(figsize=(12, 6));
plt.title('Curva de Juros - Brazilians bonds');
plt.errorbar(bonds2.index, bonds2.last_close, marker='o', label='Curva de juros', color='blue', linewidth=1);
#plt.xlabel('Nome');
plt.ylabel('Valores de fechamento');
plt.xticks(bonds2.index, bonds2.name);
plt.legend()
plt.show();
'''#Outra forma:
pesq_fundos = py.funds.search_funds(by='name', value='Cdi')
print(pesq_fundos.head(10))
#Escolhendo o fundo
fundo = pesq_fundos['name'][1]
print(fundo)
#Buscando os dados
data = py.get_fund_historical_data(fund=fundo, country='brazil', from_date='01/01/2020', to_date='30/11/2021')['Close']
print(data.head())
retorno = data.pct_change().iloc[1:]
retorno_acum = (1 + retorno).cumprod()
#Visualização
plt.figure(figsize=(12, 6));
plt.title('Curva de Juros - Brazilians bonds');
plt.errorbar(retorno_acum.index, retorno_acum, label='Curva de juros', color='blue', linewidth=1)
plt.show()'''
|
[
"matplotlib.pyplot.title",
"investpy.get_bonds_overview",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.errorbar"
] |
[((109, 141), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (122, 141), True, 'import matplotlib.pyplot as plt\n'), ((168, 207), 'investpy.get_bonds_overview', 'py.get_bonds_overview', ([], {'country': '"""brazil"""'}), "(country='brazil')\n", (189, 207), True, 'import investpy as py\n'), ((381, 408), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (391, 408), True, 'import matplotlib.pyplot as plt\n'), ((410, 456), 'matplotlib.pyplot.title', 'plt.title', (['"""Curva de Juros - Brazilians bonds"""'], {}), "('Curva de Juros - Brazilians bonds')\n", (419, 456), True, 'import matplotlib.pyplot as plt\n'), ((458, 571), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['bonds2.index', 'bonds2.last_close'], {'marker': '"""o"""', 'label': '"""Curva de juros"""', 'color': '"""blue"""', 'linewidth': '(1)'}), "(bonds2.index, bonds2.last_close, marker='o', label=\n 'Curva de juros', color='blue', linewidth=1)\n", (470, 571), True, 'import matplotlib.pyplot as plt\n'), ((589, 624), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Valores de fechamento"""'], {}), "('Valores de fechamento')\n", (599, 624), True, 'import matplotlib.pyplot as plt\n'), ((626, 663), 'matplotlib.pyplot.xticks', 'plt.xticks', (['bonds2.index', 'bonds2.name'], {}), '(bonds2.index, bonds2.name)\n', (636, 663), True, 'import matplotlib.pyplot as plt\n'), ((665, 677), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (675, 677), True, 'import matplotlib.pyplot as plt\n'), ((678, 688), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (686, 688), True, 'import matplotlib.pyplot as plt\n'), ((285, 324), 'investpy.get_bonds_overview', 'py.get_bonds_overview', ([], {'country': '"""brazil"""'}), "(country='brazil')\n", (306, 324), True, 'import investpy as py\n')]
|
#!/usr/bin/env python3
"""This engine enables to customize the stream joining very flexible by importing only few lines of code that
define customized functionality. This framework ensures exactly-once time-series processing that are based on joins
using the local stream buffering algorithm with Apache Kafka.
Import constants and 'ingest_fct()' and 'on_join()' to customize the processing.
A join rate of around 15000 time-series joins per second is reached with a exactly-once semantic for
the consume-join-produce procedures using Apache Kafka.
Don't forget to start the demo producers in in advance in order to produce records into the Kafka topic.
"""
import os
import sys
import socket
import time
import json
from datetime import datetime
import pytz
from confluent_kafka import Producer, Consumer, TopicPartition
try:
from .LocalStreamBuffer.local_stream_buffer import Record, StreamBuffer, record_from_dict
except (ModuleNotFoundError, ImportError):
# noinspection PyUnresolvedReferences
from LocalStreamBuffer.local_stream_buffer import Record, StreamBuffer, record_from_dict
def delivery_report(err, msg):
"""Delivery callback for Kafka Produce. Called once for each message produced to indicate delivery result.
Triggered by poll() or flush(). """
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
if VERBOSE:
# get the sent message using msg.value()
print(f"Message '{msg.key().decode('utf-8')}' \tdelivered to topic '{msg.topic()}' [{msg.partition()}].")
# define customized function for join
def join_fct(record_left, record_right):
try:
# create a record dictionary from both join partners
record_dict = on_join(record_left, record_right)
if record_dict is not None:
# adapt two time fields of the record
record_dict["processingTime"] = time.time()
if USE_ISO_TIMESTAMPS:
record_dict["phenomenonTime"] = to_iso_time(record_dict.get("phenomenonTime"))
record_dict["processingTime"] = to_iso_time(record_dict.get("processingTime"))
# produce a Kafka message, the delivery report callback, the key must be thing + quantity
kafka_producer.produce(f"{TARGET_SYSTEM}.ext", json.dumps(record_dict).encode('utf-8'),
key=f"{record_dict.get('thing')}.{record_dict.get('quantity')}".encode('utf-8'),
callback=delivery_report)
except Exception as ex: # this block catches possible errors in custom code
print(f"WARNING, Exception while joining streams: {ex}")
print(f"left record: {record_left}")
print(f"right record: {record_right}")
raise ex
def commit_transaction(verbose=False, commit_time=time.time()):
# Send the consumer's position to transaction to commit them along with the transaction, committing both
# input and outputs in the same transaction is what provides EOS.
kafka_producer.send_offsets_to_transaction(
kafka_consumer.position(kafka_consumer.assignment()),
kafka_consumer.consumer_group_metadata())
# Commit the transaction
kafka_producer.commit_transaction()
# Begin new transaction
kafka_producer.begin_transaction()
# commit the offset of the latest records that got obsolete in order to consume and join always the same Records.
latest_records = []
if stream_buffer.last_removed_left:
latest_records.append(stream_buffer.last_removed_left.data.get("record"))
if stream_buffer.last_removed_right:
latest_records.append(stream_buffer.last_removed_right.data.get("record"))
# Commit message’s offset + 1
kafka_consumer.commit(offsets=[TopicPartition(topic=rec.get("topic"),
partition=rec.get("partition"),
offset=rec.get("offset") + 1) # commit the next (n+1) offset
for rec in latest_records])
if verbose:
print(f"Committed to latest offsets at {commit_time:.6f}.")
def to_iso_time(timestamp):
"""Receives an arbitrary timestamp in UTC format (most likely in unix timestamp) and returns it as ISO-format.
:param timestamp: arbitrary timestamp
:return: timestamp in ISO 8601 and UTC timezone
"""
if isinstance(timestamp, (int, float)):
return datetime.utcfromtimestamp(timestamp).replace(tzinfo=pytz.UTC).isoformat()
if timestamp is None:
return datetime.utcnow().replace(tzinfo=pytz.UTC).isoformat()
return timestamp
if __name__ == "__main__":
# Import the original, or if used in Docker the overwritten custom functions
try:
from .customization.custom_fct import *
except (ModuleNotFoundError, ImportError):
# noinspection PyUnresolvedReferences
from customization.custom_fct import *
if "--use-env-config" in sys.argv:
print(f"Load environment variables: {os.environ}")
try:
STREAM_NAME = os.environ["STREAM_NAME"]
SOURCE_SYSTEMS = os.environ["SOURCE_SYSTEM"]
TARGET_SYSTEM = os.environ["TARGET_SYSTEM"]
GOST_SERVER = os.environ["GOST_SERVER"]
KAFKA_BOOTSTRAP_SERVERS = os.environ["KAFKA_BOOTSTRAP_SERVERS"]
FILTER_LOGIC = os.environ["FILTER_LOGIC"]
# Execute the customization passed as filter logic to load necessary constants and function.
exec(FILTER_LOGIC)
_ = TIME_DELTA # Check if it worked
except Exception as e:
print("Could not load config.")
raise e
print(f"Starting the stream join with the following configurations: "
f"\n\tKAFKA_BOOTSTRAP_SERVERS: '{KAFKA_BOOTSTRAP_SERVERS}'"
f"\n\tSTREAM_NAME: '{STREAM_NAME}'"
f"\n\tSOURCE_SYSTEMS: '{SOURCE_SYSTEMS}'"
f"\n\tTARGET_SYSTEM: '{TARGET_SYSTEM}'"
f"\n\tTIME_DELTA: '{TIME_DELTA}'"
f"\n\tADDITIONAL_ATTRIBUTES: '{ADDITIONAL_ATTRIBUTES}'")
# Create a kafka producer and consumer instance and subscribe to the topics
kafka_consumer = Consumer({
'bootstrap.servers': KAFKA_BOOTSTRAP_SERVERS,
'group.id': f"TS-joiner_{socket.gethostname()}_1",
'auto.offset.reset': 'earliest',
'enable.auto.commit': False,
'enable.auto.offset.store': False
})
kafka_topics_in = [f"{sys}.int" for sys in SOURCE_SYSTEMS.split(",")]
kafka_consumer.subscribe(kafka_topics_in)
# kafka_consumer.assign([TopicPartition(topic, 0) for topic in kafka_topics_in]) # manually assign to an offset
# Create a Kafka producer
kafka_producer = Producer({'bootstrap.servers': KAFKA_BOOTSTRAP_SERVERS,
"transactional.id": f'ms-stream-app_{SOURCE_SYSTEMS}_{STREAM_NAME}'})
# Initialize producer transaction.
kafka_producer.init_transactions()
# Start producer transaction.
kafka_producer.begin_transaction()
print("Create a StreamBuffer instance.")
stream_buffer = StreamBuffer(instant_emit=True, buffer_results=False,
verbose=VERBOSE, join_function=join_fct)
start_time = last_transaction_time = time.time()
n_none_polls = 0
started = False
try:
print("Start the Stream Processing.")
while True:
# Here, a small timeout can be used, as the commit is done manually and based on TRANSACTION_TIME
msgs = kafka_consumer.consume(num_messages=MAX_BATCH_SIZE, timeout=0.2)
# iterate over each message that was consumed
for msg in msgs:
record_json = json.loads(msg.value().decode('utf-8'))
if VERBOSE:
print(f"Received new record: {record_json}")
# create a Record from the json
additional_attributes = {att: record_json.get(att.strip()) for att in ADDITIONAL_ATTRIBUTES.split(",")
if att != ""}
record = Record(
thing=record_json.get("thing"),
quantity=record_json.get("quantity"),
timestamp=record_json.get("phenomenonTime"),
result=record_json.get("result"),
topic=msg.topic(), partition=msg.partition(), offset=msg.offset(),
**additional_attributes)
ingest_fct(record, stream_buffer)
# commit the transaction every TRANSACTION_TIME
cur_time = time.time()
if cur_time >= last_transaction_time + TRANSACTION_TIME:
last_transaction_time = cur_time
commit_transaction(verbose=VERBOSE, commit_time=last_transaction_time)
except KeyboardInterrupt:
print("Gracefully stopping")
finally:
stop_time = time.time()
# commit processed message offsets to the transaction
kafka_producer.send_offsets_to_transaction(
kafka_consumer.position(kafka_consumer.assignment()),
kafka_consumer.consumer_group_metadata())
# commit transaction
kafka_producer.commit_transaction()
# Leave group and commit offsets
kafka_consumer.close()
print(f"\nRecords in |{TARGET_SYSTEM}| = {stream_buffer.get_join_counter()}, "
f"|left buffer| = {stream_buffer.get_left_counter()}, "
f"|right buffer| = {stream_buffer.get_right_counter()}.")
if start_time != stop_time:
print(f"Joined time-series {stop_time - start_time:.6f} s long, "
f"that are {stream_buffer.get_join_counter() / (stop_time - start_time):.2f} joins per second.")
|
[
"json.dumps",
"time.time",
"socket.gethostname",
"datetime.datetime.utcfromtimestamp",
"datetime.datetime.utcnow",
"confluent_kafka.Producer",
"LocalStreamBuffer.local_stream_buffer.StreamBuffer"
] |
[((2844, 2855), 'time.time', 'time.time', ([], {}), '()\n', (2853, 2855), False, 'import time\n'), ((6762, 6891), 'confluent_kafka.Producer', 'Producer', (["{'bootstrap.servers': KAFKA_BOOTSTRAP_SERVERS, 'transactional.id':\n f'ms-stream-app_{SOURCE_SYSTEMS}_{STREAM_NAME}'}"], {}), "({'bootstrap.servers': KAFKA_BOOTSTRAP_SERVERS, 'transactional.id':\n f'ms-stream-app_{SOURCE_SYSTEMS}_{STREAM_NAME}'})\n", (6770, 6891), False, 'from confluent_kafka import Producer, Consumer, TopicPartition\n'), ((7136, 7234), 'LocalStreamBuffer.local_stream_buffer.StreamBuffer', 'StreamBuffer', ([], {'instant_emit': '(True)', 'buffer_results': '(False)', 'verbose': 'VERBOSE', 'join_function': 'join_fct'}), '(instant_emit=True, buffer_results=False, verbose=VERBOSE,\n join_function=join_fct)\n', (7148, 7234), False, 'from LocalStreamBuffer.local_stream_buffer import Record, StreamBuffer, record_from_dict\n'), ((7306, 7317), 'time.time', 'time.time', ([], {}), '()\n', (7315, 7317), False, 'import time\n'), ((8949, 8960), 'time.time', 'time.time', ([], {}), '()\n', (8958, 8960), False, 'import time\n'), ((1919, 1930), 'time.time', 'time.time', ([], {}), '()\n', (1928, 1930), False, 'import time\n'), ((8631, 8642), 'time.time', 'time.time', ([], {}), '()\n', (8640, 8642), False, 'import time\n'), ((6320, 6340), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (6338, 6340), False, 'import socket\n'), ((2318, 2341), 'json.dumps', 'json.dumps', (['record_dict'], {}), '(record_dict)\n', (2328, 2341), False, 'import json\n'), ((4479, 4515), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['timestamp'], {}), '(timestamp)\n', (4504, 4515), False, 'from datetime import datetime\n'), ((4594, 4611), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4609, 4611), False, 'from datetime import datetime\n')]
|
import datetime
from enum import Enum
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import (
DeleteRowsEvent,
UpdateRowsEvent,
WriteRowsEvent,
TableMapEvent
)
from pymysqlreplication.event import (
BeginLoadQueryEvent,
ExecuteLoadQueryEvent,
QueryEvent,
RotateEvent,
HeartbeatLogEvent
)
from lib.utils import Utils
class EventType(Enum):
LOG_STATE = 1
INSERT = 2
UPDATE = 3
DELETE = 4
TABLE = 5
OTHER = 6
class BinLogStreamReaderWrapper(object):
''' Wrapper class for the python-mysql-replication library '''
def __init__(self, mysql_settings,server_id=1,blocking=False, resume_stream=True, log_file=None, log_pos=None, slave_heartbeat=None):
self.__stream = BinLogStreamReader(
connection_settings = mysql_settings,
server_id = server_id,
blocking = blocking,
resume_stream = resume_stream,
only_events = [DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent, TableMapEvent, BeginLoadQueryEvent, ExecuteLoadQueryEvent, QueryEvent], # RotateEvent, QueryEvent, HeartbeatLogEvent
log_file=log_file,
log_pos=log_pos,
slave_heartbeat=slave_heartbeat
)
def close(self):
self.__stream.close()
def fetch_event(self):
return self.__parse_event(self.__stream.fetchone())
def __iter__ (self):
return iter(self.fetch_event, None)
def __parse_event(self, binlogevent):
event = {
'event_type': self.__get_event_type(binlogevent),
'pymysqlreplication_event_type': type(binlogevent).__name__,
'timestamp': binlogevent.timestamp,
'log_pos': binlogevent.packet.log_pos,
'log_file': self.__stream.log_file
}
if self.__is_query_event(binlogevent):
event['log_pos'] = binlogevent.packet.log_pos
event['log_file'] = self.__stream.log_file
elif self.__is_rotate_event(binlogevent):
event['log_pos'] = binlogevent.position
event['log_file'] = binlogevent.next_binlog
elif self.__is_row_event(binlogevent) or self.__is_table_event(binlogevent):
if binlogevent.schema != 'auth': # For security
event['schema'] = binlogevent.schema
event['table'] = binlogevent.table
if self.__is_row_event(binlogevent):
for row in binlogevent.rows:
event['primary_key'] = binlogevent.primary_key
event['after_values'] = self.__get_before_values(binlogevent, row)
event['before_values'] = self.__get_after_values(binlogevent, row)
elif self.__is_heartbeat_event(binlogevent):
event['log_file'] = binlogevent.ident
return event
def __get_event_type(self, binlogevent):
event_type = None
if self.__is_heartbeat_event(binlogevent) or self.__is_rotate_event(binlogevent) or self.__is_heartbeat_event(binlogevent):
event_type = EventType.LOG_STATE
elif self.__is_delete_event(binlogevent):
event_type = EventType.DELETE
elif self.__is_update_event(binlogevent):
event_type = EventType.UPDATE
elif self.__is_insert_event(binlogevent):
event_type = EventType.INSERT
elif self.__is_table_event(binlogevent):
event_type = EventType.TABLE
else:
event_type = EventType.OTHER
return event_type
def __get_before_values(self, binlogevent, row):
before_values = None
if isinstance(binlogevent, UpdateRowsEvent):
before_values = row['before_values']
elif isinstance(binlogevent, DeleteRowsEvent):
before_values = row['values']
return before_values
def __get_after_values(self, binlogevent, row):
after_values = None
if isinstance(binlogevent, WriteRowsEvent):
after_values = row['values']
elif isinstance(binlogevent, UpdateRowsEvent):
after_values = row['after_values']
return after_values
def __is_row_event(self, binlogevent):
return self.__is_insert_event(binlogevent) or self.__is_update_event(binlogevent) or self.__is_delete_event(binlogevent)
def __is_delete_event(self, binlogevent):
return isinstance(binlogevent, DeleteRowsEvent)
def __is_update_event(self, binlogevent):
return isinstance(binlogevent, UpdateRowsEvent)
def __is_insert_event(self, binlogevent):
return isinstance(binlogevent, WriteRowsEvent)
def __is_table_event(self, binlogevent):
return isinstance(binlogevent, (TableMapEvent))
def __is_query_event(self, binlogevent):
return isinstance(binlogevent, (QueryEvent))
def __is_begin_query_event(self, binlogevent):
return isinstance(binlogevent, (BeginLoadQueryEvent))
def __is_load_query_event(self, binlogevent):
return isinstance(binlogevent, (ExecuteLoadQueryEvent))
def __is_rotate_event(self, binlogevent):
return isinstance(binlogevent, (RotateEvent))
def __is_heartbeat_event(self, binlogevent):
return isinstance(binlogevent, (HeartbeatLogEvent))
|
[
"pymysqlreplication.BinLogStreamReader"
] |
[((777, 1122), 'pymysqlreplication.BinLogStreamReader', 'BinLogStreamReader', ([], {'connection_settings': 'mysql_settings', 'server_id': 'server_id', 'blocking': 'blocking', 'resume_stream': 'resume_stream', 'only_events': '[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent, TableMapEvent,\n BeginLoadQueryEvent, ExecuteLoadQueryEvent, QueryEvent]', 'log_file': 'log_file', 'log_pos': 'log_pos', 'slave_heartbeat': 'slave_heartbeat'}), '(connection_settings=mysql_settings, server_id=server_id,\n blocking=blocking, resume_stream=resume_stream, only_events=[\n DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent, TableMapEvent,\n BeginLoadQueryEvent, ExecuteLoadQueryEvent, QueryEvent], log_file=\n log_file, log_pos=log_pos, slave_heartbeat=slave_heartbeat)\n', (795, 1122), False, 'from pymysqlreplication import BinLogStreamReader\n')]
|
import argparse
import os
import sys
from collections import namedtuple
from pathlib import Path
import toml
from ._build import build_journal
from ._create import create_journal
from ._data_classes import JournalConfiguration, WaldenConfiguration
from ._delete import delete_journal
from ._edit import edit_journal
from ._errors import WaldenException
from ._list import list_journals
from ._utils import print_error
# for initializing commands that need journal name
ARGUMENTS = [
("create", "create a new journal"),
("today", "edit today's entry"),
("delete", "delete specified journal"),
("build", "compile the specified journal"),
("view", "open the specified journal (OS dependent)"),
]
# for initializing flags
FLAGS = [
("list", "list all journals managed by walden"),
]
ARGUMENT_MAPPING = {
"build": build_journal,
"create": create_journal,
"delete": delete_journal,
"today": edit_journal,
#"view": view_journal
}
FLAG_MAPPING = {"list": list_journals}
def _parse_args() -> argparse.Namespace:
"""Create the arg parser from ARGUMENTS and return the parsed arguments"""
parser = argparse.ArgumentParser(description="edit and manage your walden journals")
ex_group = parser.add_mutually_exclusive_group(required=True)
for cmd, help_txt in ARGUMENTS:
ex_group.add_argument(
f"-{cmd[0]}",
f"--{cmd}",
type=str,
nargs=1,
help=help_txt,
metavar="JOURNAL_NAME",
)
for flag, help_txt in FLAGS:
ex_group.add_argument(
f"-{flag[0]}",
f"--{flag}",
action="store_true",
help=help_txt,
)
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
return parser.parse_args()
def _create_walden_config(config_file_path: Path):
"""Write default configuration file at specified path"""
config = {
"walden": {
"config_path": str(config_file_path),
"default_journal_path": str(Path.home() / "journals"),
}
}
config_file_path.write_text(toml.dumps(config))
def _validate_config(config: dict):
"""ensure that required fields are in config"""
if not config.get("walden", {}).get("config_path"):
raise WaldenException("Missing 'config_path' in walden configuration")
if not config["walden"].get("default_journal_path"):
raise WaldenException("Missing 'default_journal_path' in walden configuration")
def _parse_walden_config(config: dict) -> WaldenConfiguration:
"""Parse raw configuration into a dataclass for easier access"""
config_path, default_journal_path = Path(config["config_path"]), Path(
config["default_journal_path"]
)
journal_info = {}
for journal_name, journal_path in config.items():
if journal_name == "config_path" or journal_name == "default_journal_path":
continue
journal_info[journal_name] = JournalConfiguration(
name=journal_name, path=Path(journal_path)
)
return WaldenConfiguration(
config_path=config_path,
default_journal_path=default_journal_path,
journals=journal_info,
)
def _get_config() -> WaldenConfiguration:
"""Create configuration if it doesn't exist and return an object representing the config"""
config_dir = Path.home() / ".config" / "walden"
config_dir.mkdir(parents=True, exist_ok=True)
# config file is stored as a toml
config_file_path = config_dir / "walden.conf"
if not config_file_path.exists():
_create_walden_config(config_file_path)
config = toml.load(config_file_path)
_validate_config(config)
return _parse_walden_config(config["walden"])
def main():
"""Parse arguments, fetch config, and route command to appropriate function"""
try:
args = _parse_args()
config = _get_config()
cmd, value = next(
(cmd, value) for cmd, value in vars(args).items() if value != None
)
# check if command is a flag
if value == True:
sys.exit(FLAG_MAPPING[cmd](config))
if cmd in ["build", "delete", "view", "today"]:
# verify journal exists and is accessible
journal_name = value[0]
journal_info = config.journals.get(journal_name)
if not journal_info:
raise WaldenException(
f"'{journal_name}' not found! Please create a journal before attempting to access it."
)
journal_path = journal_info.path
if not journal_path.exists():
raise WaldenException(
f"Expected to find '{journal_name}' at {journal_path}, but found nothing!"
)
sys.exit(ARGUMENT_MAPPING[cmd](value, config))
except WaldenException as we:
print_error(we)
sys.exit(1)
except Exception as e:
raise e
sys.exit(1)
|
[
"toml.dumps",
"argparse.ArgumentParser",
"pathlib.Path.home",
"pathlib.Path",
"toml.load",
"sys.exit"
] |
[((1147, 1222), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""edit and manage your walden journals"""'}), "(description='edit and manage your walden journals')\n", (1170, 1222), False, 'import argparse\n'), ((3684, 3711), 'toml.load', 'toml.load', (['config_file_path'], {}), '(config_file_path)\n', (3693, 3711), False, 'import toml\n'), ((1784, 1795), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1792, 1795), False, 'import sys\n'), ((2144, 2162), 'toml.dumps', 'toml.dumps', (['config'], {}), '(config)\n', (2154, 2162), False, 'import toml\n'), ((2711, 2738), 'pathlib.Path', 'Path', (["config['config_path']"], {}), "(config['config_path'])\n", (2715, 2738), False, 'from pathlib import Path\n'), ((2740, 2776), 'pathlib.Path', 'Path', (["config['default_journal_path']"], {}), "(config['default_journal_path'])\n", (2744, 2776), False, 'from pathlib import Path\n'), ((3409, 3420), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (3418, 3420), False, 'from pathlib import Path\n'), ((4959, 4970), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4967, 4970), False, 'import sys\n'), ((5023, 5034), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5031, 5034), False, 'import sys\n'), ((3068, 3086), 'pathlib.Path', 'Path', (['journal_path'], {}), '(journal_path)\n', (3072, 3086), False, 'from pathlib import Path\n'), ((2068, 2079), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (2077, 2079), False, 'from pathlib import Path\n')]
|
from dataclasses import dataclass
from decimal import Decimal
from common.config import (
BTCProxyConfig,
GRPCServerConfig,
IPFSConfig,
SQLAlchemyConfig,
W3Config,
)
@dataclass(frozen=True)
class WebauthnConfig:
rp_name: str
rp_id: str
origin: str
@dataclass(frozen=True)
class AuditorConfig:
sqlalchemy_config: SQLAlchemyConfig
grpc_server_config: GRPCServerConfig
btc_proxy_config: BTCProxyConfig
webauthn_config: WebauthnConfig
w3_config: W3Config
audit_folder: str
ipfs_config: IPFSConfig
audit_smart_contract_address: str
acceptable_exchange_rate_epsilon: Decimal
|
[
"dataclasses.dataclass"
] |
[((190, 212), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (199, 212), False, 'from dataclasses import dataclass\n'), ((286, 308), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (295, 308), False, 'from dataclasses import dataclass\n')]
|
from django.urls import path
from chats import consumers
websocket_urlpatterns = [
path('ws/v1/chat_rooms/<int:id>/', consumers.ChatConsumer),
]
|
[
"django.urls.path"
] |
[((89, 147), 'django.urls.path', 'path', (['"""ws/v1/chat_rooms/<int:id>/"""', 'consumers.ChatConsumer'], {}), "('ws/v1/chat_rooms/<int:id>/', consumers.ChatConsumer)\n", (93, 147), False, 'from django.urls import path\n')]
|
import numpy as np
from scipy.linalg import expm
from pymanopt.manifolds.manifold import EuclideanEmbeddedSubmanifold
from pymanopt.tools.multi import multiprod, multisym, multitransp
class Stiefel(EuclideanEmbeddedSubmanifold):
"""
Factory class for the Stiefel manifold. Instantiation requires the
dimensions n, p to be specified. Optional argument k allows the user to
optimize over the product of k Stiefels.
Elements are represented as n x p matrices (if k == 1), and as k x n x p
matrices if k > 1 (Note that this is different to manopt!).
"""
def __init__(self, n, p, k=1):
self._n = n
self._p = p
self._k = k
# Check that n is greater than or equal to p
if n < p or p < 1:
raise ValueError("Need n >= p >= 1. Values supplied were n = %d "
"and p = %d." % (n, p))
if k < 1:
raise ValueError("Need k >= 1. Value supplied was k = %d." % k)
if k == 1:
name = "Stiefel manifold St(%d, %d)" % (n, p)
elif k >= 2:
name = "Product Stiefel manifold St(%d, %d)^%d" % (n, p, k)
dimension = int(k * (n * p - p * (p + 1) / 2))
super().__init__(name, dimension)
@property
def typicaldist(self):
return np.sqrt(self._p * self._k)
def inner(self, X, G, H):
# Inner product (Riemannian metric) on the tangent space
# For the stiefel this is the Frobenius inner product.
return np.tensordot(G, H, axes=G.ndim)
def dist(self, X, Y):
raise NotImplementedError(
"The manifold '{:s}' currently provides no implementation of "
"the 'dist' method".format(self._get_class_name()))
def proj(self, X, U):
return U - multiprod(X, multisym(multiprod(multitransp(X), U)))
# TODO(nkoep): Implement the weingarten map instead.
def ehess2rhess(self, X, egrad, ehess, H):
XtG = multiprod(multitransp(X), egrad)
symXtG = multisym(XtG)
HsymXtG = multiprod(H, symXtG)
return self.proj(X, ehess - HsymXtG)
# Retract to the Stiefel using the qr decomposition of X + G.
def retr(self, X, G):
if self._k == 1:
# Calculate 'thin' qr decomposition of X + G
q, r = np.linalg.qr(X + G)
# Unflip any flipped signs
XNew = np.dot(q, np.diag(np.sign(np.sign(np.diag(r)) + 0.5)))
else:
XNew = X + G
for i in range(self._k):
q, r = np.linalg.qr(XNew[i])
XNew[i] = np.dot(
q, np.diag(np.sign(np.sign(np.diag(r)) + 0.5)))
return XNew
def norm(self, X, G):
# Norm on the tangent space of the Stiefel is simply the Euclidean
# norm.
return np.linalg.norm(G)
# Generate random Stiefel point using qr of random normally distributed
# matrix.
def rand(self):
if self._k == 1:
X = np.random.randn(self._n, self._p)
q, r = np.linalg.qr(X)
return q
X = np.zeros((self._k, self._n, self._p))
for i in range(self._k):
X[i], r = np.linalg.qr(np.random.randn(self._n, self._p))
return X
def randvec(self, X):
U = np.random.randn(*np.shape(X))
U = self.proj(X, U)
U = U / np.linalg.norm(U)
return U
def transp(self, x1, x2, d):
return self.proj(x2, d)
def exp(self, X, U):
# TODO: Simplify these expressions.
if self._k == 1:
W = expm(np.bmat([[X.T.dot(U), -U.T.dot(U)],
[np.eye(self._p), X.T.dot(U)]]))
Z = np.bmat([[expm(-X.T.dot(U))], [np.zeros((self._p, self._p))]])
Y = np.bmat([X, U]).dot(W).dot(Z)
else:
Y = np.zeros(np.shape(X))
for i in range(self._k):
W = expm(np.bmat([[X[i].T.dot(U[i]), -U[i].T.dot(U[i])],
[np.eye(self._p), X[i].T.dot(U[i])]]))
Z = np.bmat([[expm(-X[i].T.dot(U[i]))],
[np.zeros((self._p, self._p))]])
Y[i] = np.bmat([X[i], U[i]]).dot(W).dot(Z)
return Y
def zerovec(self, X):
if self._k == 1:
return np.zeros((self._n, self._p))
return np.zeros((self._k, self._n, self._p))
|
[
"numpy.eye",
"numpy.random.randn",
"numpy.tensordot",
"numpy.linalg.qr",
"numpy.zeros",
"pymanopt.tools.multi.multiprod",
"numpy.shape",
"numpy.bmat",
"pymanopt.tools.multi.multitransp",
"numpy.linalg.norm",
"pymanopt.tools.multi.multisym",
"numpy.diag",
"numpy.sqrt"
] |
[((1309, 1335), 'numpy.sqrt', 'np.sqrt', (['(self._p * self._k)'], {}), '(self._p * self._k)\n', (1316, 1335), True, 'import numpy as np\n'), ((1510, 1541), 'numpy.tensordot', 'np.tensordot', (['G', 'H'], {'axes': 'G.ndim'}), '(G, H, axes=G.ndim)\n', (1522, 1541), True, 'import numpy as np\n'), ((2011, 2024), 'pymanopt.tools.multi.multisym', 'multisym', (['XtG'], {}), '(XtG)\n', (2019, 2024), False, 'from pymanopt.tools.multi import multiprod, multisym, multitransp\n'), ((2043, 2063), 'pymanopt.tools.multi.multiprod', 'multiprod', (['H', 'symXtG'], {}), '(H, symXtG)\n', (2052, 2063), False, 'from pymanopt.tools.multi import multiprod, multisym, multitransp\n'), ((2812, 2829), 'numpy.linalg.norm', 'np.linalg.norm', (['G'], {}), '(G)\n', (2826, 2829), True, 'import numpy as np\n'), ((3085, 3122), 'numpy.zeros', 'np.zeros', (['(self._k, self._n, self._p)'], {}), '((self._k, self._n, self._p))\n', (3093, 3122), True, 'import numpy as np\n'), ((4341, 4378), 'numpy.zeros', 'np.zeros', (['(self._k, self._n, self._p)'], {}), '((self._k, self._n, self._p))\n', (4349, 4378), True, 'import numpy as np\n'), ((1971, 1985), 'pymanopt.tools.multi.multitransp', 'multitransp', (['X'], {}), '(X)\n', (1982, 1985), False, 'from pymanopt.tools.multi import multiprod, multisym, multitransp\n'), ((2303, 2322), 'numpy.linalg.qr', 'np.linalg.qr', (['(X + G)'], {}), '(X + G)\n', (2315, 2322), True, 'import numpy as np\n'), ((2982, 3015), 'numpy.random.randn', 'np.random.randn', (['self._n', 'self._p'], {}), '(self._n, self._p)\n', (2997, 3015), True, 'import numpy as np\n'), ((3035, 3050), 'numpy.linalg.qr', 'np.linalg.qr', (['X'], {}), '(X)\n', (3047, 3050), True, 'import numpy as np\n'), ((3356, 3373), 'numpy.linalg.norm', 'np.linalg.norm', (['U'], {}), '(U)\n', (3370, 3373), True, 'import numpy as np\n'), ((4297, 4325), 'numpy.zeros', 'np.zeros', (['(self._n, self._p)'], {}), '((self._n, self._p))\n', (4305, 4325), True, 'import numpy as np\n'), ((2535, 2556), 'numpy.linalg.qr', 'np.linalg.qr', (['XNew[i]'], {}), '(XNew[i])\n', (2547, 2556), True, 'import numpy as np\n'), ((3191, 3224), 'numpy.random.randn', 'np.random.randn', (['self._n', 'self._p'], {}), '(self._n, self._p)\n', (3206, 3224), True, 'import numpy as np\n'), ((3299, 3310), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (3307, 3310), True, 'import numpy as np\n'), ((3836, 3847), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (3844, 3847), True, 'import numpy as np\n'), ((1821, 1835), 'pymanopt.tools.multi.multitransp', 'multitransp', (['X'], {}), '(X)\n', (1832, 1835), False, 'from pymanopt.tools.multi import multiprod, multisym, multitransp\n'), ((3719, 3747), 'numpy.zeros', 'np.zeros', (['(self._p, self._p)'], {}), '((self._p, self._p))\n', (3727, 3747), True, 'import numpy as np\n'), ((3640, 3655), 'numpy.eye', 'np.eye', (['self._p'], {}), '(self._p)\n', (3646, 3655), True, 'import numpy as np\n'), ((3767, 3782), 'numpy.bmat', 'np.bmat', (['[X, U]'], {}), '([X, U])\n', (3774, 3782), True, 'import numpy as np\n'), ((4118, 4146), 'numpy.zeros', 'np.zeros', (['(self._p, self._p)'], {}), '((self._p, self._p))\n', (4126, 4146), True, 'import numpy as np\n'), ((2415, 2425), 'numpy.diag', 'np.diag', (['r'], {}), '(r)\n', (2422, 2425), True, 'import numpy as np\n'), ((3994, 4009), 'numpy.eye', 'np.eye', (['self._p'], {}), '(self._p)\n', (4000, 4009), True, 'import numpy as np\n'), ((4173, 4194), 'numpy.bmat', 'np.bmat', (['[X[i], U[i]]'], {}), '([X[i], U[i]])\n', (4180, 4194), True, 'import numpy as np\n'), ((2638, 2648), 'numpy.diag', 'np.diag', (['r'], {}), '(r)\n', (2645, 2648), True, 'import numpy as np\n')]
|
from mock import patch, Mock
from tests.helpers.testbase import TestBase
from cerami.datatype import String
from cerami.datatype.translator import BaseDatatypeTranslator
class TestBaseDatatypeTranslator(TestBase):
def setUp(self):
self.dt = String()
self.translator = BaseDatatypeTranslator(self.dt)
def test_to_dynamodb_none(self):
"""it returns the NULL object when value is None"""
assert self.translator.to_dynamodb(None) == {'NULL': True}
def test_to_dynamodb(self):
"""it returns a dict
with the key the condition_type
and the value the result of resolve()
"""
with patch('cerami.datatype.translator.BaseDatatypeTranslator.format_for_dynamodb') as resolve:
resolve.return_value = "mocked"
res = self.translator.to_dynamodb('test')
assert res == {"S": "mocked"}
def test_to_cerami_null(self):
"""it returns None when mapped_dict is NULL"""
assert self.translator.to_cerami({'NULL': True}) == None
def test_to_cerami_calls_format_for_cerami(self):
"""calls format_for_cerami when the value is not NULL"""
self.translator.format_for_cerami = Mock()
self.translator.to_cerami({'S': 'test'})
self.translator.format_for_cerami.assert_called_with('test')
def test_format_for_cerami(self):
"""returns the value"""
assert self.translator.format_for_cerami('test') == 'test'
|
[
"mock.Mock",
"mock.patch",
"cerami.datatype.translator.BaseDatatypeTranslator",
"cerami.datatype.String"
] |
[((254, 262), 'cerami.datatype.String', 'String', ([], {}), '()\n', (260, 262), False, 'from cerami.datatype import String\n'), ((289, 320), 'cerami.datatype.translator.BaseDatatypeTranslator', 'BaseDatatypeTranslator', (['self.dt'], {}), '(self.dt)\n', (311, 320), False, 'from cerami.datatype.translator import BaseDatatypeTranslator\n'), ((1210, 1216), 'mock.Mock', 'Mock', ([], {}), '()\n', (1214, 1216), False, 'from mock import patch, Mock\n'), ((659, 737), 'mock.patch', 'patch', (['"""cerami.datatype.translator.BaseDatatypeTranslator.format_for_dynamodb"""'], {}), "('cerami.datatype.translator.BaseDatatypeTranslator.format_for_dynamodb')\n", (664, 737), False, 'from mock import patch, Mock\n')]
|
from json import dumps
from .base import Base
class Show(Base):
"""Show the databases and the tables using this command.
Usage:
puchkidb show [options]
options:
dbs: show all the databases in the server
tables: show all the tables inside a database
Example:
puchkidb show dbs
"""
def run(self):
print('Hello, world!')
print('You supplied the following options:', dumps(self.options, indent=2, sort_keys=True))
|
[
"json.dumps"
] |
[((444, 489), 'json.dumps', 'dumps', (['self.options'], {'indent': '(2)', 'sort_keys': '(True)'}), '(self.options, indent=2, sort_keys=True)\n', (449, 489), False, 'from json import dumps\n')]
|
import logging
from fuzzywuzzy import process
from telegram import (
Update,
InlineQuery,
InlineQueryResultArticle,
InlineKeyboardButton,
InlineKeyboardMarkup,
)
from telegram.ext import CallbackContext
from typing import List, Optional
from pythonidbot.constants import HINTS
from pythonidbot.utils.helpers import article, build_menu
class Hints:
def __init__(self, hints: Optional[List[dict]] = None):
self.logger = logging.getLogger(__name__)
self.score_cutoff = 60
self.hints = hints or list(HINTS.values())
self.hints_q_dict = dict(enumerate([hint["help"] for hint in self.hints]))
self.hashtag_q_dict = dict(enumerate([hint["key"] for hint in self.hints]))
@property
def hints_article(self):
return [self.article(hint) for hint in self.hints]
def __call__(
self,
query: str,
limit: int = 10,
score_cutoff: int = 60,
) -> List[InlineQueryResultArticle]:
return self.find(query, limit, score_cutoff)
def find(
self,
query: str,
limit: int = 10,
score_cutoff: int = 60,
) -> List[InlineQueryResultArticle]:
self.logger.debug(f"Mencari hint dengan keyword `{query}`")
best_hints = process.extractBests(
query=query,
choices=self.hints_q_dict,
score_cutoff=score_cutoff,
limit=limit,
)
self.logger.debug(f"Ditemukan {len(best_hints)} kemungkinan hint")
return [self.hints_article[z] for (x, y, z) in best_hints] if best_hints else []
def article(
self,
data: dict,
query: Optional[str] = None,
markup: Optional[InlineKeyboardMarkup] = None,
) -> InlineQueryResultArticle:
message: str = data["message"]
if "{query}" in message:
query = query or data["default"]
message = message.replace("{query}", query)
if "buttons" in data:
markup = self.make_button(data["buttons"])
return article(
title=data["help"],
description=f'{data["key"]}: {message}',
message_text=message,
reply_markup=markup,
)
def make_button(self, buttons: List[dict]) -> InlineKeyboardMarkup:
keyboards: List[InlineKeyboardButton] = list()
for data in buttons:
keyboards.append(InlineKeyboardButton(**data))
menu = build_menu(keyboards, 1)
return InlineKeyboardMarkup(menu)
def hashtag_handler(self, update: Update, context: CallbackContext):
if not update.inline_query:
return
query = update.inline_query.query
query = query.lstrip("#")
hashtag = query.split(" ")[0]
query = query.lstrip(hashtag)
if len(hashtag) < 3:
return
result = process.extractOne(
hashtag,
choices=self.hashtag_q_dict,
score_cutoff=self.score_cutoff,
)
if not result:
return
value, score, index = result
results = [self.article(self.hints[index], query=query)]
return update.inline_query.answer(results)
|
[
"pythonidbot.utils.helpers.article",
"fuzzywuzzy.process.extractOne",
"telegram.InlineKeyboardButton",
"fuzzywuzzy.process.extractBests",
"pythonidbot.utils.helpers.build_menu",
"telegram.InlineKeyboardMarkup",
"pythonidbot.constants.HINTS.values",
"logging.getLogger"
] |
[((453, 480), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (470, 480), False, 'import logging\n'), ((1273, 1378), 'fuzzywuzzy.process.extractBests', 'process.extractBests', ([], {'query': 'query', 'choices': 'self.hints_q_dict', 'score_cutoff': 'score_cutoff', 'limit': 'limit'}), '(query=query, choices=self.hints_q_dict, score_cutoff=\n score_cutoff, limit=limit)\n', (1293, 1378), False, 'from fuzzywuzzy import process\n'), ((2049, 2164), 'pythonidbot.utils.helpers.article', 'article', ([], {'title': "data['help']", 'description': 'f"""{data[\'key\']}: {message}"""', 'message_text': 'message', 'reply_markup': 'markup'}), '(title=data[\'help\'], description=f"{data[\'key\']}: {message}",\n message_text=message, reply_markup=markup)\n', (2056, 2164), False, 'from pythonidbot.utils.helpers import article, build_menu\n'), ((2451, 2475), 'pythonidbot.utils.helpers.build_menu', 'build_menu', (['keyboards', '(1)'], {}), '(keyboards, 1)\n', (2461, 2475), False, 'from pythonidbot.utils.helpers import article, build_menu\n'), ((2491, 2517), 'telegram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['menu'], {}), '(menu)\n', (2511, 2517), False, 'from telegram import Update, InlineQuery, InlineQueryResultArticle, InlineKeyboardButton, InlineKeyboardMarkup\n'), ((2864, 2957), 'fuzzywuzzy.process.extractOne', 'process.extractOne', (['hashtag'], {'choices': 'self.hashtag_q_dict', 'score_cutoff': 'self.score_cutoff'}), '(hashtag, choices=self.hashtag_q_dict, score_cutoff=self.\n score_cutoff)\n', (2882, 2957), False, 'from fuzzywuzzy import process\n'), ((547, 561), 'pythonidbot.constants.HINTS.values', 'HINTS.values', ([], {}), '()\n', (559, 561), False, 'from pythonidbot.constants import HINTS\n'), ((2406, 2434), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', ([], {}), '(**data)\n', (2426, 2434), False, 'from telegram import Update, InlineQuery, InlineQueryResultArticle, InlineKeyboardButton, InlineKeyboardMarkup\n')]
|
# Compare Algorithms
import pandas
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import label_binarize
import os
import sys
config_path = "utilities/"
sys.path.append(os.path.abspath(config_path))
from MyAPI import MyAPI
api = MyAPI()
X, Y = api.get_dataset(0, start_index=0,end_index=20000, nr=20000)
# prepare models
models = []
models.append(('KNN', KNeighborsClassifier()))
models.append(('Decision Tree', DecisionTreeClassifier()))
models.append(('Gaussian', GaussianNB()))
models.append(('SVM', SVC()))
classes=list(set(Y))
# prepare configuration for cross validation test harness
seed = 7
# evaluate each model in turn
results = []
names = []
scoring = 'accuracy'
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X, Y, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# boxplot algorithm comparison
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.savefig('compare.png')
|
[
"os.path.abspath",
"sklearn.naive_bayes.GaussianNB",
"matplotlib.pyplot.boxplot",
"sklearn.model_selection.cross_val_score",
"sklearn.model_selection.KFold",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.figure",
"matplotlib.use",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.svm.SVC",
"MyAPI.MyAPI",
"matplotlib.pyplot.savefig"
] |
[((60, 74), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (67, 74), True, 'import matplotlib as mpl\n'), ((656, 663), 'MyAPI.MyAPI', 'MyAPI', ([], {}), '()\n', (661, 663), False, 'from MyAPI import MyAPI\n'), ((1464, 1476), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1474, 1476), True, 'import matplotlib.pyplot as plt\n'), ((1540, 1560), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['results'], {}), '(results)\n', (1551, 1560), True, 'import matplotlib.pyplot as plt\n'), ((1587, 1613), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""compare.png"""'], {}), "('compare.png')\n", (1598, 1613), True, 'import matplotlib.pyplot as plt\n'), ((595, 623), 'os.path.abspath', 'os.path.abspath', (['config_path'], {}), '(config_path)\n', (610, 623), False, 'import os\n'), ((1145, 1198), 'sklearn.model_selection.KFold', 'model_selection.KFold', ([], {'n_splits': '(10)', 'random_state': 'seed'}), '(n_splits=10, random_state=seed)\n', (1166, 1198), False, 'from sklearn import model_selection\n'), ((1216, 1287), 'sklearn.model_selection.cross_val_score', 'model_selection.cross_val_score', (['model', 'X', 'Y'], {'cv': 'kfold', 'scoring': 'scoring'}), '(model, X, Y, cv=kfold, scoring=scoring)\n', (1247, 1287), False, 'from sklearn import model_selection\n'), ((783, 805), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (803, 805), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((840, 864), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (862, 864), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((894, 906), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (904, 906), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((931, 936), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (934, 936), False, 'from sklearn.svm import SVC\n')]
|
import os
import subprocess as subp
import sys
class Bench:
moreisbetter = False
def run(interp):
os.chdir(os.path.join(sys.path[0], 'vendor', 'jsnes', 'test'))
res = subp.run([interp, 'shell-bench.js'], stdout=subp.PIPE, universal_newlines=True)
if res.returncode != 0:
return "Error when trying to run jsnes with interpreter " + interp + "!"
warmup, result = 0., 0.
nwarmup, nresult = 0., 0.
for line in res.stdout.split('\n'):
res = line.split(':')
if len(res) != 2:
continue
if res[0] == 'warmup':
warmup += float(res[1])
nwarmup += 1
elif res[0] == 'result':
result += float(res[1])
nresult += 1
return {
'warmup': warmup / nwarmup,
'~~~~ Result': result / nresult,
}
|
[
"subprocess.run",
"os.path.join"
] |
[((193, 272), 'subprocess.run', 'subp.run', (["[interp, 'shell-bench.js']"], {'stdout': 'subp.PIPE', 'universal_newlines': '(True)'}), "([interp, 'shell-bench.js'], stdout=subp.PIPE, universal_newlines=True)\n", (201, 272), True, 'import subprocess as subp\n'), ((125, 177), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""vendor"""', '"""jsnes"""', '"""test"""'], {}), "(sys.path[0], 'vendor', 'jsnes', 'test')\n", (137, 177), False, 'import os\n')]
|
###############################################################
import os
import cv2
import math
import numpy as np
from scipy.ndimage import interpolation as inter
from scipy.ndimage import rotate
###############################################################
C_percentage = 0
ACCEPTED_EXTENSIONS = (".jpeg", ".jpg", ".png", ".tif", ".tiff", ".bmp", ".dib", ".jpe", ".jp2", ".webp", ".pbm", ".pgm", ".ppm", ".sr", ".ras")
###############################################################
def euclidian_distance(first, second):
return math.sqrt(sum([pow(max(x, y) - min(x, y), 2) for x, y in zip(first, second)]))
def color_difference(first, second, precision = 100):
return euclidian_distance(first, second) > precision
def precision(arr, angle):
hit = np.sum(inter.rotate(arr, angle, reshape = False, order = 0), axis = 1)
prec = np.sum((hit[1:]-hit[:-1])**2)
return prec
def rotateImage(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(255,255,255))
return result
def crop(abs_folder_in, abs_folder_out, debug):
global C_percentage
images_list = [i for i in os.listdir(abs_folder_in) if i.endswith(ACCEPTED_EXTENSIONS) and i[:2] != "ad"]
if debug: print("\n".join(images_list))
images_list = sorted(images_list, key = lambda i: int(i[:-4]))
for c, image_path in enumerate(images_list, 1):
original_image = cv2.imread(os.path.join(abs_folder_in, image_path), 0)
sheet = cv2.resize(original_image, (0, 0), fx = 0.125, fy = 0.125)
ret, sheet = cv2.threshold(sheet, 127, 255, cv2.THRESH_BINARY)
wd, ht = sheet.shape
pix = np.array(sheet, np.uint8)
bin_img = 1 - (pix / 255.0)
limit, delta = 10, 1
angles = np.arange(-limit, limit+delta, delta)
scores = [precision(bin_img, angle) for angle in angles]
best = angles[scores.index(max(scores))]
original_image = rotateImage(cv2.imread(os.path.join(abs_folder_in, image_path)), best)
w, h, z = original_image.shape
K = 500 / max(w, h)
resized_image = cv2.resize(original_image, (0, 0), fx = K, fy = K)
w, h, z = resized_image.shape
mx, my = int(w / 2), int(h / 2)
startx = 0
starty = 0
endx = w
endy = h
for i in range(1, w):
if color_difference(resized_image[i, my], resized_image[i - 1, my]):
startx = i
break
for i in range(w - 2, 0, -1):
if color_difference(resized_image[i, my], resized_image[i + 1, my]):
endx = i
break
for i in range(1, h):
if color_difference(resized_image[mx, i], resized_image[mx, i - 1]):
starty = i
break
for i in range(h - 2, 0, -1):
if color_difference(resized_image[mx, i], resized_image[mx, i + 1]):
endy = i
break
if endx <= startx:
endx = w
if endy <= starty:
endy = h
startx, starty, endx, endy = int(startx * (1 / K)), int(starty * (1 / K)), int(endx * (1 / K)), int(endy * (1 / K))
jump = int(1 / K * 10)
if debug:
print("Angle : ", best)
print("K : ", K, " jump : ", jump)
print("(", startx, ", ", starty, ") -> (", endx, ", ", endy, ")")
print("Saving...")
if (endx-jump) - (startx+jump) < (w*K)/3 or (endy-jump) - (starty+jump) < (h*K)/3:
cv2.imwrite(os.path.join(abs_folder_out, str(c) + ".jpg"), original_image)
else:
cv2.imwrite(os.path.join(abs_folder_out, str(c) + ".jpg"), original_image[startx + jump : endx - jump, starty + jump : endy - jump])
if debug: print("Done ", c, " of ", len(images_list))
C_percentage += 1 / len(images_list)
C_percentage = 0
def get_percentage():
global C_percentage
return C_percentage
###############################################################
|
[
"os.listdir",
"numpy.sum",
"cv2.threshold",
"scipy.ndimage.interpolation.rotate",
"cv2.warpAffine",
"numpy.array",
"numpy.arange",
"os.path.join",
"cv2.getRotationMatrix2D",
"cv2.resize"
] |
[((847, 880), 'numpy.sum', 'np.sum', (['((hit[1:] - hit[:-1]) ** 2)'], {}), '((hit[1:] - hit[:-1]) ** 2)\n', (853, 880), True, 'import numpy as np\n'), ((997, 1046), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['image_center', 'angle', '(1.0)'], {}), '(image_center, angle, 1.0)\n', (1020, 1046), False, 'import cv2\n'), ((1060, 1199), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'rot_mat', 'image.shape[1::-1]'], {'flags': 'cv2.INTER_LINEAR', 'borderMode': 'cv2.BORDER_CONSTANT', 'borderValue': '(255, 255, 255)'}), '(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_CONSTANT, borderValue=(255, 255, 255))\n', (1074, 1199), False, 'import cv2\n'), ((772, 820), 'scipy.ndimage.interpolation.rotate', 'inter.rotate', (['arr', 'angle'], {'reshape': '(False)', 'order': '(0)'}), '(arr, angle, reshape=False, order=0)\n', (784, 820), True, 'from scipy.ndimage import interpolation as inter\n'), ((1653, 1707), 'cv2.resize', 'cv2.resize', (['original_image', '(0, 0)'], {'fx': '(0.125)', 'fy': '(0.125)'}), '(original_image, (0, 0), fx=0.125, fy=0.125)\n', (1663, 1707), False, 'import cv2\n'), ((1733, 1782), 'cv2.threshold', 'cv2.threshold', (['sheet', '(127)', '(255)', 'cv2.THRESH_BINARY'], {}), '(sheet, 127, 255, cv2.THRESH_BINARY)\n', (1746, 1782), False, 'import cv2\n'), ((1826, 1851), 'numpy.array', 'np.array', (['sheet', 'np.uint8'], {}), '(sheet, np.uint8)\n', (1834, 1851), True, 'import numpy as np\n'), ((1934, 1973), 'numpy.arange', 'np.arange', (['(-limit)', '(limit + delta)', 'delta'], {}), '(-limit, limit + delta, delta)\n', (1943, 1973), True, 'import numpy as np\n'), ((2273, 2319), 'cv2.resize', 'cv2.resize', (['original_image', '(0, 0)'], {'fx': 'K', 'fy': 'K'}), '(original_image, (0, 0), fx=K, fy=K)\n', (2283, 2319), False, 'import cv2\n'), ((949, 977), 'numpy.array', 'np.array', (['image.shape[1::-1]'], {}), '(image.shape[1::-1])\n', (957, 977), True, 'import numpy as np\n'), ((1314, 1339), 'os.listdir', 'os.listdir', (['abs_folder_in'], {}), '(abs_folder_in)\n', (1324, 1339), False, 'import os\n'), ((1593, 1632), 'os.path.join', 'os.path.join', (['abs_folder_in', 'image_path'], {}), '(abs_folder_in, image_path)\n', (1605, 1632), False, 'import os\n'), ((2134, 2173), 'os.path.join', 'os.path.join', (['abs_folder_in', 'image_path'], {}), '(abs_folder_in, image_path)\n', (2146, 2173), False, 'import os\n')]
|
import turtle
captain = turtle.Turtle()
print("\n--------------------------")
print("Ad ve Soyad : <NAME>\nOkul No : 203305028")
print("----------------------------")
kenar = float(input("Kenar Uzunluklarini Gir : "))
yariCap = float(input("Dairenin Yaricapini Gir : "))
x = 0
y = 0
count = 0
kenar += yariCap
captain.shape("circle")
for i in range(4):
captain.up()
captain.goto(x,y)
captain.down()
captain.circle(yariCap)
count += 1
if(count == 1):
x = kenar + yariCap
elif(count == 2):
x = 0
y = (-kenar) - yariCap
elif(count == 3):
x = kenar + yariCap
captain.up()
captain.goto(0,yariCap)
captain.down()
for i in range(4):
captain.shape("arrow")
captain.dot()
captain.forward(x)
captain.right(90)
|
[
"turtle.Turtle"
] |
[((25, 40), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (38, 40), False, 'import turtle\n')]
|
from logging import error
import numpy as np
class Plane:
def __init__(self, origin: np.array, vector1: np.array, vector2: np.array) -> None:
self.origin = origin
self.vector1 = vector1
self.vector2 = vector2
def getBarycentricCoordinates(self, point: np.array, direction: np.array):
a = np.array([self.vector1, self.vector2, -direction]).T
b = point - self.origin
sol = np.linalg.solve(a, b)
return np.array([sol[0], sol[1]])
def convertBarycentricCoordinates(self, x, y):
return self.origin + x * self.vector1 + y * self.vector2
class Sphere:
"""https://math.stackexchange.com/questions/268064/move-a-point-up-and-down-along-a-sphere"""
def __init__(self, radius) -> None:
self.radius = radius
self.current_pos = [90, -90]
def rotate(self, x, y):
self.current_pos[0] = (self.current_pos[0] + x) % 360
self.current_pos[1] = (self.current_pos[1] + y) % 360
theta, phi = np.deg2rad(self.current_pos[0]), np.deg2rad(self.current_pos[1])
return np.array(
[
self.radius * np.sin(theta) * np.cos(phi),
self.radius * np.sin(theta) * np.sin(phi),
self.radius * np.cos(theta),
]
)
class RotationMatrix3D:
def __init__(self) -> None:
pass
def __call__(
self, object_to_rotate: np.ndarray, axis: int, angle: float
) -> np.ndarray:
if axis == 0:
rotation_matrix = np.array(
[
[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)],
]
)
elif axis == 1:
rotation_matrix = np.array(
[
[np.cos(angle), 0, np.sin(angle)],
[0, 1, 0],
[-np.sin(angle), 0, np.cos(angle)],
]
)
elif axis == 2:
rotation_matrix = np.array(
[
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
)
else:
raise error("Invalid argument for axis, options are 0, 1, 2")
return np.matmul(rotation_matrix, object_to_rotate)
def getQuadrant(x: float, y: float):
if x == 0 and y == 0:
return -1
if x >= 0 and y >= 0:
return 1
elif x > 0 and y < 0:
return 2
elif x <= 0 and y <= 0:
return 3
elif x < 0 and y > 0:
return 4
|
[
"logging.error",
"numpy.deg2rad",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.matmul",
"numpy.linalg.solve"
] |
[((430, 451), 'numpy.linalg.solve', 'np.linalg.solve', (['a', 'b'], {}), '(a, b)\n', (445, 451), True, 'import numpy as np\n'), ((468, 494), 'numpy.array', 'np.array', (['[sol[0], sol[1]]'], {}), '([sol[0], sol[1]])\n', (476, 494), True, 'import numpy as np\n'), ((2348, 2392), 'numpy.matmul', 'np.matmul', (['rotation_matrix', 'object_to_rotate'], {}), '(rotation_matrix, object_to_rotate)\n', (2357, 2392), True, 'import numpy as np\n'), ((331, 381), 'numpy.array', 'np.array', (['[self.vector1, self.vector2, -direction]'], {}), '([self.vector1, self.vector2, -direction])\n', (339, 381), True, 'import numpy as np\n'), ((1008, 1039), 'numpy.deg2rad', 'np.deg2rad', (['self.current_pos[0]'], {}), '(self.current_pos[0])\n', (1018, 1039), True, 'import numpy as np\n'), ((1041, 1072), 'numpy.deg2rad', 'np.deg2rad', (['self.current_pos[1]'], {}), '(self.current_pos[1])\n', (1051, 1072), True, 'import numpy as np\n'), ((1158, 1169), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1164, 1169), True, 'import numpy as np\n'), ((1217, 1228), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1223, 1228), True, 'import numpy as np\n'), ((1260, 1273), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1266, 1273), True, 'import numpy as np\n'), ((2276, 2331), 'logging.error', 'error', (['"""Invalid argument for axis, options are 0, 1, 2"""'], {}), "('Invalid argument for axis, options are 0, 1, 2')\n", (2281, 2331), False, 'from logging import error\n'), ((1142, 1155), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1148, 1155), True, 'import numpy as np\n'), ((1201, 1214), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1207, 1214), True, 'import numpy as np\n'), ((1613, 1626), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1619, 1626), True, 'import numpy as np\n'), ((1669, 1682), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1675, 1682), True, 'import numpy as np\n'), ((1684, 1697), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1690, 1697), True, 'import numpy as np\n'), ((1629, 1642), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1635, 1642), True, 'import numpy as np\n'), ((1835, 1848), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1841, 1848), True, 'import numpy as np\n'), ((1853, 1866), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1859, 1866), True, 'import numpy as np\n'), ((1940, 1953), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1946, 1953), True, 'import numpy as np\n'), ((1922, 1935), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1928, 1935), True, 'import numpy as np\n'), ((2091, 2104), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2097, 2104), True, 'import numpy as np\n'), ((2147, 2160), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2153, 2160), True, 'import numpy as np\n'), ((2162, 2175), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2168, 2175), True, 'import numpy as np\n'), ((2107, 2120), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2113, 2120), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.