code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
"""Neurally implanted schematic for moving around on the ship.
It is illegal for prisoners in transit to activate such an
implant. Failure to comply carries a minimum sentence of
six months.
Many parts of the ship are derelict and inaccessible.
"""
from pyntnclick.i18n import _
from pyntnclick.state import Scene, Thing, Result
from pyntnclick.scenewidgets import (
InteractRectUnion, InteractUnion, InteractText, InteractNoImage)
from gamelib.scenes.game_constants import PLAYER_ID
from gamelib.scenes.game_widgets import make_jim_dialog, make_sentence_dialog
class Map(Scene):
FOLDER = "map"
BACKGROUND = 'map.png'
INITIAL_DATA = {
'implant': True,
}
def setup(self):
self.add_thing(ToCryo())
self.add_thing(ToBridge())
self.add_thing(ToMess())
self.add_thing(ToEngine())
self.add_thing(ToMachine())
self.add_thing(ToCrew())
self.add_thing(InaccessibleArea())
self.add_thing(HydroponicsArea())
def enter(self):
if self.get_data('implant'):
self.set_data('implant', False)
ai1 = make_jim_dialog(
_("Under the terms of the emergency conscription "
"act, I have downloaded the ship's schematics to your "
"neural implant to help you navigate around the ship."),
self.game)
if ai1:
self.state.increase_sentence(3)
return ai1, make_jim_dialog(
_("Prisoner %s, you are a "
"class 1 felon. Obtaining access to the ship's "
"schematics constitutes a level 2 offence and carries a "
"minimal penalty of an additional 3 years on your "
"sentence.") % PLAYER_ID,
self.game), make_sentence_dialog(PLAYER_ID, self.game)
class DoorThing(Thing):
# name of destination
DEST = None
def interact(self, _item):
"""Go to destination."""
if self.DEST in self.game.scenes:
self.game.change_scene(self.DEST)
class ToCryo(DoorThing):
"Way to cryo room."
NAME = "map.tocryo"
DEST = "cryo"
INTERACTS = {
'door': InteractUnion((
InteractNoImage(515, 158, 56, 68),
InteractText(
361, 512, 245, 33, _("Prisoner cryo chambers"),
'white', 20, 'Monospace.ttf'),
))
}
INITIAL = 'door'
class ToBridge(DoorThing):
"Way to bridge room."
NAME = "map.tobridge"
DEST = "bridge"
INTERACTS = {
'door': InteractUnion((
InteractNoImage(36, 260, 60, 83),
InteractText(
26, 170, 71, 33, _("Bridge"), 'white', 20,
'Monospace.ttf'),
))
}
INITIAL = 'door'
class ToMess(DoorThing):
"Way to cryo room."
NAME = "map.tomess"
DEST = "mess"
INTERACTS = {
'door': InteractUnion((
InteractNoImage(395, 262, 64, 80),
InteractText(
341, 430, 110, 33, _("Mess hall"), 'white', 20,
'Monospace.ttf'),
))
}
INITIAL = 'door'
class ToEngine(DoorThing):
"Way to engine room."
NAME = "map.toengine"
DEST = "engine"
INTERACTS = {
'door': InteractUnion((
InteractNoImage(691, 279, 76, 54),
InteractText(
662, 496, 128, 33, _("Engine room"), 'white', 20,
'Monospace.ttf'),
))
}
INITIAL = 'door'
def interact(self, item):
if not self.game.is_in_inventory('helmet:'):
return Result(
_('The airlock refuses to open. The automated'
' voice says: "Hull breach beyond this door. Personnel'
' must be equipped for vacuum before entry."'))
else:
return super(ToEngine, self).interact(item)
class ToMachine(DoorThing):
"Way to machine room."
NAME = "map.tomachine"
DEST = "machine"
INTERACTS = {
'door': InteractUnion((
InteractNoImage(608, 156, 57, 72),
InteractText(
578, 83, 140, 33, _("Machine room"), 'white', 20,
'Monospace.ttf'),
))
}
INITIAL = 'door'
class ToCrew(DoorThing):
"Way to crew quarters."
NAME = "map.tocrew_quarters"
DEST = "crew_quarters"
INTERACTS = {
'door': InteractUnion((
InteractNoImage(210, 321, 37, 64),
InteractText(
69, 460, 160, 33, _("Crew quarters"), 'white', 20,
'Monospace.ttf'),
))
}
INITIAL = 'door'
class InaccessibleArea(Thing):
NAME = 'map.inaccessible'
INTERACTS = {
'areas': InteractRectUnion((
(207, 227, 39, 63),
(256, 225, 35, 64),
(259, 322, 34, 64),
(514, 380, 58, 66),
(607, 377, 60, 70),
))
}
INITIAL = 'areas'
def interact(self, _item):
return Result(_("You look in the door, but just see empty space: "
"that room appears to have been obliterated by "
"meteors."))
class HydroponicsArea(Thing):
NAME = 'map.hydroponics'
INTERACTS = {
'areas': InteractUnion((
InteractNoImage(314, 263, 73, 81),
InteractText(
313, 132, 140, 33, _("Hydroponics"), 'white', 20,
'Monospace.ttf'),
))
}
INITIAL = 'areas'
def interact(self, _item):
return Result(_("Peering in through the window, you see that the "
"entire chamber is overgrown with giant broccoli. "
"It would take you years to cut a path through that."))
SCENES = [Map]
|
[
"pyntnclick.i18n._",
"pyntnclick.scenewidgets.InteractRectUnion",
"gamelib.scenes.game_widgets.make_sentence_dialog",
"pyntnclick.scenewidgets.InteractNoImage"
] |
[((4806, 4930), 'pyntnclick.scenewidgets.InteractRectUnion', 'InteractRectUnion', (['((207, 227, 39, 63), (256, 225, 35, 64), (259, 322, 34, 64), (514, 380, 58,\n 66), (607, 377, 60, 70))'], {}), '(((207, 227, 39, 63), (256, 225, 35, 64), (259, 322, 34, \n 64), (514, 380, 58, 66), (607, 377, 60, 70)))\n', (4823, 4930), False, 'from pyntnclick.scenewidgets import InteractRectUnion, InteractUnion, InteractText, InteractNoImage\n'), ((5080, 5192), 'pyntnclick.i18n._', '_', (['"""You look in the door, but just see empty space: that room appears to have been obliterated by meteors."""'], {}), "('You look in the door, but just see empty space: that room appears to have been obliterated by meteors.'\n )\n", (5081, 5192), False, 'from pyntnclick.i18n import _\n'), ((5623, 5781), 'pyntnclick.i18n._', '_', (['"""Peering in through the window, you see that the entire chamber is overgrown with giant broccoli. It would take you years to cut a path through that."""'], {}), "('Peering in through the window, you see that the entire chamber is overgrown with giant broccoli. It would take you years to cut a path through that.'\n )\n", (5624, 5781), False, 'from pyntnclick.i18n import _\n'), ((1171, 1332), 'pyntnclick.i18n._', '_', (['"""Under the terms of the emergency conscription act, I have downloaded the ship\'s schematics to your neural implant to help you navigate around the ship."""'], {}), '("Under the terms of the emergency conscription act, I have downloaded the ship\'s schematics to your neural implant to help you navigate around the ship."\n )\n', (1172, 1332), False, 'from pyntnclick.i18n import _\n'), ((2286, 2319), 'pyntnclick.scenewidgets.InteractNoImage', 'InteractNoImage', (['(515)', '(158)', '(56)', '(68)'], {}), '(515, 158, 56, 68)\n', (2301, 2319), False, 'from pyntnclick.scenewidgets import InteractRectUnion, InteractUnion, InteractText, InteractNoImage\n'), ((2662, 2694), 'pyntnclick.scenewidgets.InteractNoImage', 'InteractNoImage', (['(36)', '(260)', '(60)', '(83)'], {}), '(36, 260, 60, 83)\n', (2677, 2694), False, 'from pyntnclick.scenewidgets import InteractRectUnion, InteractUnion, InteractText, InteractNoImage\n'), ((3011, 3044), 'pyntnclick.scenewidgets.InteractNoImage', 'InteractNoImage', (['(395)', '(262)', '(64)', '(80)'], {}), '(395, 262, 64, 80)\n', (3026, 3044), False, 'from pyntnclick.scenewidgets import InteractRectUnion, InteractUnion, InteractText, InteractNoImage\n'), ((3374, 3407), 'pyntnclick.scenewidgets.InteractNoImage', 'InteractNoImage', (['(691)', '(279)', '(76)', '(54)'], {}), '(691, 279, 76, 54)\n', (3389, 3407), False, 'from pyntnclick.scenewidgets import InteractRectUnion, InteractUnion, InteractText, InteractNoImage\n'), ((3701, 3849), 'pyntnclick.i18n._', '_', (['"""The airlock refuses to open. The automated voice says: "Hull breach beyond this door. Personnel must be equipped for vacuum before entry.\\""""'], {}), '(\'The airlock refuses to open. The automated voice says: "Hull breach beyond this door. Personnel must be equipped for vacuum before entry."\'\n )\n', (3702, 3849), False, 'from pyntnclick.i18n import _\n'), ((4127, 4160), 'pyntnclick.scenewidgets.InteractNoImage', 'InteractNoImage', (['(608)', '(156)', '(57)', '(72)'], {}), '(608, 156, 57, 72)\n', (4142, 4160), False, 'from pyntnclick.scenewidgets import InteractRectUnion, InteractUnion, InteractText, InteractNoImage\n'), ((4506, 4539), 'pyntnclick.scenewidgets.InteractNoImage', 'InteractNoImage', (['(210)', '(321)', '(37)', '(64)'], {}), '(210, 321, 37, 64)\n', (4521, 4539), False, 'from pyntnclick.scenewidgets import InteractRectUnion, InteractUnion, InteractText, InteractNoImage\n'), ((5368, 5401), 'pyntnclick.scenewidgets.InteractNoImage', 'InteractNoImage', (['(314)', '(263)', '(73)', '(81)'], {}), '(314, 263, 73, 81)\n', (5383, 5401), False, 'from pyntnclick.scenewidgets import InteractRectUnion, InteractUnion, InteractText, InteractNoImage\n'), ((1864, 1906), 'gamelib.scenes.game_widgets.make_sentence_dialog', 'make_sentence_dialog', (['PLAYER_ID', 'self.game'], {}), '(PLAYER_ID, self.game)\n', (1884, 1906), False, 'from gamelib.scenes.game_widgets import make_jim_dialog, make_sentence_dialog\n'), ((2382, 2409), 'pyntnclick.i18n._', '_', (['"""Prisoner cryo chambers"""'], {}), "('Prisoner cryo chambers')\n", (2383, 2409), False, 'from pyntnclick.i18n import _\n'), ((2755, 2766), 'pyntnclick.i18n._', '_', (['"""Bridge"""'], {}), "('Bridge')\n", (2756, 2766), False, 'from pyntnclick.i18n import _\n'), ((3107, 3121), 'pyntnclick.i18n._', '_', (['"""Mess hall"""'], {}), "('Mess hall')\n", (3108, 3121), False, 'from pyntnclick.i18n import _\n'), ((3470, 3486), 'pyntnclick.i18n._', '_', (['"""Engine room"""'], {}), "('Engine room')\n", (3471, 3486), False, 'from pyntnclick.i18n import _\n'), ((4222, 4239), 'pyntnclick.i18n._', '_', (['"""Machine room"""'], {}), "('Machine room')\n", (4223, 4239), False, 'from pyntnclick.i18n import _\n'), ((4601, 4619), 'pyntnclick.i18n._', '_', (['"""Crew quarters"""'], {}), "('Crew quarters')\n", (4602, 4619), False, 'from pyntnclick.i18n import _\n'), ((5464, 5480), 'pyntnclick.i18n._', '_', (['"""Hydroponics"""'], {}), "('Hydroponics')\n", (5465, 5480), False, 'from pyntnclick.i18n import _\n'), ((1531, 1723), 'pyntnclick.i18n._', '_', (['"""Prisoner %s, you are a class 1 felon. Obtaining access to the ship\'s schematics constitutes a level 2 offence and carries a minimal penalty of an additional 3 years on your sentence."""'], {}), '("Prisoner %s, you are a class 1 felon. Obtaining access to the ship\'s schematics constitutes a level 2 offence and carries a minimal penalty of an additional 3 years on your sentence."\n )\n', (1532, 1723), False, 'from pyntnclick.i18n import _\n')]
|
# -*- coding: utf-8 -*-
# This file is part of Knitlib. It is based on AYAB.
#
# Knitlib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Knitlib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Knitlib. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2015 <NAME>, <NAME>
import pytest
import unittest
import os
from knitlib.plugins.ayab_plugin.ayab_image import ayabImage
from PIL import Image
class TestImage(unittest.TestCase):
def setUp(self):
self.script_dir = os.path.dirname(os.path.abspath(__file__))
self.filename_text = u"mushroom.png"
self.conf = {}
self.conf["filename"] = self.filename_text
self.pil_image = Image.open(os.path.join(self.script_dir, self.conf["filename"]))
self.ayab_image = ayabImage(self.pil_image, 2)
def test_knitStartNeedle(self):
assert self.ayab_image.knitStartNeedle() == 0
def test_knitStopNeedle(self):
assert self.ayab_image.knitStopNeedle() == 199
def test_imgPosition(self):
assert self.ayab_image.imgPosition() == 'center'
def test_startLine(self):
assert self.ayab_image.startLine() == 0
def test_numColors(self):
assert self.ayab_image.numColors() == 2
def test_setStartLine(self):
self.startLine = 0
self.ayab_image.setStartLine(self.startLine)
assert self.ayab_image.startLine() == 0
|
[
"os.path.abspath",
"knitlib.plugins.ayab_plugin.ayab_image.ayabImage",
"os.path.join"
] |
[((1256, 1284), 'knitlib.plugins.ayab_plugin.ayab_image.ayabImage', 'ayabImage', (['self.pil_image', '(2)'], {}), '(self.pil_image, 2)\n', (1265, 1284), False, 'from knitlib.plugins.ayab_plugin.ayab_image import ayabImage\n'), ((994, 1019), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1009, 1019), False, 'import os\n'), ((1176, 1228), 'os.path.join', 'os.path.join', (['self.script_dir', "self.conf['filename']"], {}), "(self.script_dir, self.conf['filename'])\n", (1188, 1228), False, 'import os\n')]
|
import requests
import json
from moneymour import environments
from moneymour.crypto_utils import Signature
API_BASE_URL = 'https://api.moneymour.com'
API_SANDBOX_BASE_URL = 'https://api.sandbox.moneymour.com'
API_STAGE_BASE_URL = 'https://api.stage.moneymour.com'
API_DEVELOPMENT_BASE_URL = 'http://localhost:3000'
ENDPOINT_MERCHANT_REQUEST = '/merchant-request'
class ApiClient:
def __init__(self, merchant_id, merchant_secret, environment=environments.ENVIRONMENT_SANDBOX):
environments.validate_environment(environment)
self.merchant_id = merchant_id
self.merchant_secret = merchant_secret
self.environment = environment
def request(self, private_key, body):
"""
Request a loan.
:param private_key: Your personal private key
:param body: The body to be sent in the POST request
:return: JSON decoded object
"""
# Add identification fields to the request
body['merchantId'] = self.merchant_id
body['secret'] = self.merchant_secret
expires_at = Signature.generate_expires_at_header_value()
signature = Signature.build(private_key, expires_at, body)
headers = {
'Content-Type': 'application/json',
'Expires-at': expires_at,
'Signature': signature.decode("utf-8")
}
body = json.dumps(body, separators=(',', ':'))
# Perform the request
r = requests.post(ApiClient.get_api_base_url(self.environment) + ENDPOINT_MERCHANT_REQUEST, headers=headers,
data=body)
return json.loads(r.text)
@staticmethod
def get_api_base_url(environment):
environments.validate_environment(environment)
if environment == environments.ENVIRONMENT_PRODUCTION:
return API_BASE_URL
elif environment == environments.ENVIRONMENT_SANDBOX:
return API_SANDBOX_BASE_URL
elif environment == environments.ENVIRONMENT_STAGE:
return API_STAGE_BASE_URL
else:
return API_DEVELOPMENT_BASE_URL
|
[
"moneymour.crypto_utils.Signature.generate_expires_at_header_value",
"moneymour.environments.validate_environment",
"json.loads",
"json.dumps",
"moneymour.crypto_utils.Signature.build"
] |
[((494, 540), 'moneymour.environments.validate_environment', 'environments.validate_environment', (['environment'], {}), '(environment)\n', (527, 540), False, 'from moneymour import environments\n'), ((1085, 1129), 'moneymour.crypto_utils.Signature.generate_expires_at_header_value', 'Signature.generate_expires_at_header_value', ([], {}), '()\n', (1127, 1129), False, 'from moneymour.crypto_utils import Signature\n'), ((1150, 1196), 'moneymour.crypto_utils.Signature.build', 'Signature.build', (['private_key', 'expires_at', 'body'], {}), '(private_key, expires_at, body)\n', (1165, 1196), False, 'from moneymour.crypto_utils import Signature\n'), ((1381, 1420), 'json.dumps', 'json.dumps', (['body'], {'separators': "(',', ':')"}), "(body, separators=(',', ':'))\n", (1391, 1420), False, 'import json\n'), ((1622, 1640), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (1632, 1640), False, 'import json\n'), ((1707, 1753), 'moneymour.environments.validate_environment', 'environments.validate_environment', (['environment'], {}), '(environment)\n', (1740, 1753), False, 'from moneymour import environments\n')]
|
import os
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
import matplotlib.pyplot as plt
from stompy.grid import paver
from stompy.spatial.linestring_utils import upsample_linearring,resample_linearring
from stompy.grid import paver
from stompy.spatial import field,constrained_delaunay,wkb2shp
##
from stompy.grid import exact_delaunay
from stompy.grid import live_dt
from stompy.grid import paver
reload(exact_delaunay)
reload(live_dt)
reload(paver)
##
def test_basic():
# Define a polygon
boundary=np.array([[0,0],[1000,0],[1000,1000],[0,1000]])
island =np.array([[200,200],[600,200],[200,600]])
rings=[boundary,island]
# And the scale:
scale=field.ConstantField(50)
p=paver.Paving(rings=rings,density=scale)
p.pave_all()
##
def test_basic_apollo():
# Define a polygon
boundary=np.array([[0,0],[1000,0],[1000,1000],[0,1000]])
island =np.array([[200,200],[600,200],[200,600]])
rings=[boundary,island]
# And the scale:
scale=field.PyApolloniusField()
scale.insert([50,50],20)
p=paver.Paving(rings=rings,density=scale)
p.pave_all()
return p
##
# A circle - r = 100, C=628, n_points = 628
def test_circle():
r = 100
thetas = np.linspace(0,2*np.pi,200)[:-1]
circle = np.zeros((len(thetas),2),np.float64)
circle[:,0] = r*np.cos(thetas)
circle[:,1] = r*np.sin(thetas)
class CircleDensityField(field.Field):
# horizontally varying, from 5 to 20
def value(self,X):
X = np.array(X)
return 5 + 15 * (X[...,0] + 100) / 200.0
density = CircleDensityField()
p=paver.Paving(circle,density,label='circle')
p.pave_all()
def test_long_channel():
l = 2000
w = 50
long_channel = np.array([[0,0],
[l,0],
[l,w],
[0,w]], np.float64 )
density = field.ConstantField( 19.245 )
p=paver.Paving(long_channel,density)
p.pave_all()
def test_long_channel_rigid():
l = 2000
w = 50
long_channel = np.array([[0,0],
[l,0],
[l,w],
[0,w]], np.float64 )
density = field.ConstantField( 19.245 )
p=paver.Paving(long_channel,density,initial_node_status=paver.Paving.RIGID)
p.pave_all()
def test_narrow_channel():
l = 1000
w = 50
long_channel = np.array([[0,0],
[l,0.375*w],
[l,0.625*w],
[0,w]], np.float64 )
density = field.ConstantField( w/np.sin(60*np.pi/180.) / 4 )
p=paver.Paving(long_channel,density)
p.pave_all()
def test_small_island():
l = 100
square = np.array([[0,0],
[l,0],
[l,l],
[0,l]], np.float64 )
r=10
theta = np.linspace(0,2*np.pi,30)
circle = r/np.sqrt(2) * np.swapaxes( np.array([np.cos(theta), np.sin(theta)]), 0,1)
island1 = circle + np.array([45,45])
island2 = circle + np.array([65,65])
island3 = circle + np.array([20,80])
rings = [square,island1,island2,island3]
density = field.ConstantField( 10 )
p=paver.Paving(rings,density)
p.pave_all()
def test_tight_peanut():
r = 100
thetas = np.linspace(0,2*np.pi,300)
peanut = np.zeros( (len(thetas),2), np.float64)
x = r*np.cos(thetas)
y = r*np.sin(thetas) * (0.9/10000 * x*x + 0.05)
peanut[:,0] = x
peanut[:,1] = y
density = field.ConstantField( 6.0 )
p=paver.Paving(peanut,density,label='tight_peanut')
p.pave_all()
def test_tight_with_island():
# build a peanut first:
r = 100
thetas = np.linspace(0,2*np.pi,250)
peanut = np.zeros( (len(thetas),2), np.float64)
x = r*np.cos(thetas)
y = r*np.sin(thetas) * (0.9/10000 * x*x + 0.05)
peanut[:,0] = x
peanut[:,1] = y
# put two holes into it
thetas = np.linspace(0,2*np.pi,30)
hole1 = np.zeros( (len(thetas),2), np.float64)
hole1[:,0] = 10*np.cos(thetas) - 75
hole1[:,1] = 10*np.sin(thetas)
hole2 = np.zeros( (len(thetas),2), np.float64)
hole2[:,0] = 20*np.cos(thetas) + 75
hole2[:,1] = 20*np.sin(thetas)
rings = [peanut,hole1,hole2]
density = field.ConstantField( 6.0 )
p=paver.Paving(rings,density,label='tight_with_island')
p.pave_all()
def test_peninsula():
r = 100
thetas = np.linspace(0,2*np.pi,1000)
pen = np.zeros( (len(thetas),2), np.float64)
pen[:,0] = r*(0.2+ np.abs(np.sin(2*thetas))**0.2)*np.cos(thetas)
pen[:,1] = r*(0.2+ np.abs(np.sin(2*thetas))**0.2)*np.sin(thetas)
density = field.ConstantField( 10.0 )
pen2 = upsample_linearring(pen,density)
p=paver.Paving(pen2,density,label='peninsula')
p.pave_all()
def test_peanut():
# like a figure 8, or a peanut
r = 100
thetas = np.linspace(0,2*np.pi,1000)
peanut = np.zeros( (len(thetas),2), np.float64)
peanut[:,0] = r*(0.5+0.3*np.cos(2*thetas))*np.cos(thetas)
peanut[:,1] = r*(0.5+0.3*np.cos(2*thetas))*np.sin(thetas)
min_pnt = peanut.min(axis=0)
max_pnt = peanut.max(axis=0)
d_data = np.array([ [min_pnt[0],min_pnt[1], 1.5],
[min_pnt[0],max_pnt[1], 1.5],
[max_pnt[0],min_pnt[1], 8],
[max_pnt[0],max_pnt[1], 8]])
density = field.XYZField(X=d_data[:,:2],F=d_data[:,2])
p=paver.Paving(peanut,density)
p.pave_all()
def test_cul_de_sac():
r=5
theta = np.linspace(-np.pi/2,np.pi/2,20)
cap = r * np.swapaxes( np.array([np.cos(theta), np.sin(theta)]), 0,1)
box = np.array([ [-3*r,r],
[-4*r,-r] ])
ring = np.concatenate((box,cap))
density = field.ConstantField(2*r/(np.sqrt(3)/2))
p=paver.Paving(ring,density,label='cul_de_sac')
p.pave_all()
def test_bow():
x = np.linspace(-100,100,50)
# with /1000 it seems to do okay
# with /500 it still looks okay
y = x**2 / 250.0
bow = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
height = np.array([0,20])
ring = np.concatenate( (bow+height,bow[::-1]-height) )
density = field.ConstantField(2)
p=paver.Paving(ring,density,label='bow')
p.pave_all()
def test_ngon(nsides=7):
# hexagon works ok, though a bit of perturbation
# septagon starts to show expansion issues, but never pronounced
# octagon - works fine.
theta = np.linspace(0,2*np.pi,nsides+1)[:-1]
r=100
x = r*np.cos(theta)
y = r*np.sin(theta)
poly = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
density = field.ConstantField(6)
p=paver.Paving(poly,density,label='ngon%02d'%nsides)
p.pave_all()
def test_expansion():
# 40: too close to a 120deg angle - always bisect on centerline
# 30: rows alternate with wall and bisect seams
# 35: starts to diverge, but recovers.
# 37: too close to 120.
d = 36
pnts = np.array([[0.,0.],
[100,-d],
[200,0],
[200,100],
[100,100+d],
[0,100]])
density = field.ConstantField(6)
p=paver.Paving([pnts],density,label='expansion')
p.pave_all()
def test_embedded_channel():
# trying out degenerate internal lines - the trick may be mostly in
# how to specify them.
# make a large rectangle, with a sinuous channel in the middle
L = 500.0
W = 300.0
rect = np.array([[0,0],
[L,0],
[L,W],
[0,W]])
x = np.linspace(0.1*L,0.9*L,50)
y = W/2 + 0.1*W*np.cos(4*np.pi*x/L)
shore = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
density = field.ConstantField(10)
# this will probably get moved into Paver itself.
# Note closed_ring=0 !
shore = resample_linearring(shore,density,closed_ring=0)
south_shore = shore - np.array([0,0.1*W])
north_shore = shore + np.array([0,0.1*W])
p=paver.Paving([rect],density,degenerates=[north_shore,south_shore])
p.pave_all()
# dumbarton...
def test_dumbarton():
shp=os.path.join( os.path.dirname(__file__), 'data','dumbarton.shp')
features=wkb2shp.shp2geom(shp)
geom = features['geom'][0]
dumbarton = np.array(geom.exterior)
density = field.ConstantField(250.0)
p=paver.Paving(dumbarton, density,label='dumbarton')
p.pave_all()
# #def log_spiral_channel():
# t = linspace(1.0,12*pi,200)
# a = 1 ; b = 0.1
# x = a*exp(b*t)*cos(t)
# y = a*exp(b*t)*sin(t)
# # each 2*pi, the radius gets bigger by exp(2pi*b)
# x2 = a*exp(b*t-b*pi)*cos(t)
# y2 = a*exp(b*t-b*pi)*sin(t)
# cla(); plot(x,y,'b',x2,y2,'r')
##
# This is going to require a fair bit of porting --
# hmm - maybe better just to have a sinusoid channel, then perturb it
# and put some islands in there. having a wide range of scales looks
# nice but isn't going to be a great test.
def gen_sine_sine():
t = np.linspace(1.0,12*np.pi,400)
x1 = 100*t
y1 = 200*np.sin(t)
# each 2*pi, the radius gets bigger by exp(2pi*b)
x2 = x1
y2 = y1+50
# now perturb both sides, but keep amplitude < 20
y1 = y1 + 20*np.sin(10*t)
y2 = y2 + 10*np.cos(5*t)
x = np.concatenate( (x1,x2[::-1]) )
y = np.concatenate( (y1,y2[::-1]) )
shore = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
rings = [shore]
# and make some islands:
north_island_shore = 0.4*y1 + 0.6*y2
south_island_shore = 0.6*y1 + 0.4*y2
Nislands = 20
# islands same length as space between islands, so divide
# island shorelines into 2*Nislands blocks
for i in range(Nislands):
i_start = int( (2*i+0.5)*len(t)/(2*Nislands) )
i_stop = int( (2*i+1.5)*len(t)/(2*Nislands) )
north_y = north_island_shore[i_start:i_stop]
south_y = south_island_shore[i_start:i_stop]
north_x = x1[i_start:i_stop]
south_x = x2[i_start:i_stop]
x = np.concatenate( (north_x,south_x[::-1]) )
y = np.concatenate( (north_y,south_y[::-1]) )
island = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
rings.append(island)
density = field.ConstantField(25.0)
min_density = field.ConstantField(2.0)
p = paver.Paving(rings,density=density,min_density=min_density)
print("Smoothing to nominal 1.0m")
# mostly just to make sure that long segments are
# sampled well relative to the local feature scale.
p.smooth()
print("Adjusting other densities to local feature size")
p.telescope_rate=1.1
p.adjust_density_by_apollonius()
return p
def test_sine_sine():
p=gen_sine_sine()
p.pave_all()
if 0:
# debugging the issue with sine_sine()
# fails deep inside here, step 512
# lots of crap coming from this one, too.
# at some point, dt_incident_constraints reports only 1 constraint,
# but it should have two, which happens because of a bad slide.
# Tricky to guard against -
# Several avenues to fix this:
# 1. Make the resample_neighbors code (which I'm pretty sure is the culprit)
# more cautious and willing to accept a local maximum in distance instead
# of shoving a node far away. This is a nice but incomplete solution.
# 2. The resample code, which I think is responsible for adding the new node
# that screwed it all up, should check for self-intersections
# this is probably the appropriate thing to do.
# test_sine_sine()
p=gen_sine_sine()
p.pave_all(n_steps=512)
##
p.verbose=3
p.pave_all(n_steps=513)
##
zoom=plt.axis()
plt.figure(1).clf()
p.plot()
p.plot_boundary()
plt.axis('equal')
plt.axis(zoom)
##
# Step 510 really takes the end off an island
# yep.
p.pave_all(n_steps=512)
##
# node is 3626
# to_remove: an edge with nodes 5374, 3626
# pnt2edges: [3626, 5915]
# part of the problem is that there is some sliding around
# at the beginning of step 512 that really wreaks havoc on
# what was already a dicey node.
p.plot_nodes([3626,5374])
|
[
"stompy.spatial.field.PyApolloniusField",
"stompy.spatial.linestring_utils.upsample_linearring",
"logging.basicConfig",
"stompy.grid.paver.Paving",
"os.path.dirname",
"stompy.spatial.field.ConstantField",
"matplotlib.pyplot.axis",
"stompy.spatial.field.XYZField",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"stompy.spatial.linestring_utils.resample_linearring",
"numpy.sqrt",
"numpy.concatenate",
"stompy.spatial.wkb2shp.shp2geom"
] |
[((25, 64), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (44, 64), False, 'import logging\n'), ((541, 595), 'numpy.array', 'np.array', (['[[0, 0], [1000, 0], [1000, 1000], [0, 1000]]'], {}), '([[0, 0], [1000, 0], [1000, 1000], [0, 1000]])\n', (549, 595), True, 'import numpy as np\n'), ((602, 648), 'numpy.array', 'np.array', (['[[200, 200], [600, 200], [200, 600]]'], {}), '([[200, 200], [600, 200], [200, 600]])\n', (610, 648), True, 'import numpy as np\n'), ((705, 728), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(50)'], {}), '(50)\n', (724, 728), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((736, 776), 'stompy.grid.paver.Paving', 'paver.Paving', ([], {'rings': 'rings', 'density': 'scale'}), '(rings=rings, density=scale)\n', (748, 776), False, 'from stompy.grid import paver\n'), ((860, 914), 'numpy.array', 'np.array', (['[[0, 0], [1000, 0], [1000, 1000], [0, 1000]]'], {}), '([[0, 0], [1000, 0], [1000, 1000], [0, 1000]])\n', (868, 914), True, 'import numpy as np\n'), ((921, 967), 'numpy.array', 'np.array', (['[[200, 200], [600, 200], [200, 600]]'], {}), '([[200, 200], [600, 200], [200, 600]])\n', (929, 967), True, 'import numpy as np\n'), ((1024, 1049), 'stompy.spatial.field.PyApolloniusField', 'field.PyApolloniusField', ([], {}), '()\n', (1047, 1049), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((1086, 1126), 'stompy.grid.paver.Paving', 'paver.Paving', ([], {'rings': 'rings', 'density': 'scale'}), '(rings=rings, density=scale)\n', (1098, 1126), False, 'from stompy.grid import paver\n'), ((1645, 1690), 'stompy.grid.paver.Paving', 'paver.Paving', (['circle', 'density'], {'label': '"""circle"""'}), "(circle, density, label='circle')\n", (1657, 1690), False, 'from stompy.grid import paver\n'), ((1775, 1829), 'numpy.array', 'np.array', (['[[0, 0], [l, 0], [l, w], [0, w]]', 'np.float64'], {}), '([[0, 0], [l, 0], [l, w], [0, w]], np.float64)\n', (1783, 1829), True, 'import numpy as np\n'), ((1929, 1956), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(19.245)'], {}), '(19.245)\n', (1948, 1956), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((1965, 2000), 'stompy.grid.paver.Paving', 'paver.Paving', (['long_channel', 'density'], {}), '(long_channel, density)\n', (1977, 2000), False, 'from stompy.grid import paver\n'), ((2092, 2146), 'numpy.array', 'np.array', (['[[0, 0], [l, 0], [l, w], [0, w]]', 'np.float64'], {}), '([[0, 0], [l, 0], [l, w], [0, w]], np.float64)\n', (2100, 2146), True, 'import numpy as np\n'), ((2246, 2273), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(19.245)'], {}), '(19.245)\n', (2265, 2273), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((2282, 2357), 'stompy.grid.paver.Paving', 'paver.Paving', (['long_channel', 'density'], {'initial_node_status': 'paver.Paving.RIGID'}), '(long_channel, density, initial_node_status=paver.Paving.RIGID)\n', (2294, 2357), False, 'from stompy.grid import paver\n'), ((2446, 2516), 'numpy.array', 'np.array', (['[[0, 0], [l, 0.375 * w], [l, 0.625 * w], [0, w]]', 'np.float64'], {}), '([[0, 0], [l, 0.375 * w], [l, 0.625 * w], [0, w]], np.float64)\n', (2454, 2516), True, 'import numpy as np\n'), ((2669, 2704), 'stompy.grid.paver.Paving', 'paver.Paving', (['long_channel', 'density'], {}), '(long_channel, density)\n', (2681, 2704), False, 'from stompy.grid import paver\n'), ((2776, 2830), 'numpy.array', 'np.array', (['[[0, 0], [l, 0], [l, l], [0, l]]', 'np.float64'], {}), '([[0, 0], [l, 0], [l, l], [0, l]], np.float64)\n', (2784, 2830), True, 'import numpy as np\n'), ((2919, 2948), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(30)'], {}), '(0, 2 * np.pi, 30)\n', (2930, 2948), True, 'import numpy as np\n'), ((3216, 3239), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(10)'], {}), '(10)\n', (3235, 3239), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((3248, 3276), 'stompy.grid.paver.Paving', 'paver.Paving', (['rings', 'density'], {}), '(rings, density)\n', (3260, 3276), False, 'from stompy.grid import paver\n'), ((3344, 3374), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(300)'], {}), '(0, 2 * np.pi, 300)\n', (3355, 3374), True, 'import numpy as np\n'), ((3554, 3578), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(6.0)'], {}), '(6.0)\n', (3573, 3578), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((3587, 3638), 'stompy.grid.paver.Paving', 'paver.Paving', (['peanut', 'density'], {'label': '"""tight_peanut"""'}), "(peanut, density, label='tight_peanut')\n", (3599, 3638), False, 'from stompy.grid import paver\n'), ((3738, 3768), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(250)'], {}), '(0, 2 * np.pi, 250)\n', (3749, 3768), True, 'import numpy as np\n'), ((3976, 4005), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(30)'], {}), '(0, 2 * np.pi, 30)\n', (3987, 4005), True, 'import numpy as np\n'), ((4305, 4329), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(6.0)'], {}), '(6.0)\n', (4324, 4329), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((4338, 4393), 'stompy.grid.paver.Paving', 'paver.Paving', (['rings', 'density'], {'label': '"""tight_with_island"""'}), "(rings, density, label='tight_with_island')\n", (4350, 4393), False, 'from stompy.grid import paver\n'), ((4457, 4488), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1000)'], {}), '(0, 2 * np.pi, 1000)\n', (4468, 4488), True, 'import numpy as np\n'), ((4688, 4713), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(10.0)'], {}), '(10.0)\n', (4707, 4713), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((4727, 4760), 'stompy.spatial.linestring_utils.upsample_linearring', 'upsample_linearring', (['pen', 'density'], {}), '(pen, density)\n', (4746, 4760), False, 'from stompy.spatial.linestring_utils import upsample_linearring, resample_linearring\n'), ((4771, 4817), 'stompy.grid.paver.Paving', 'paver.Paving', (['pen2', 'density'], {'label': '"""peninsula"""'}), "(pen2, density, label='peninsula')\n", (4783, 4817), False, 'from stompy.grid import paver\n'), ((4914, 4945), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1000)'], {}), '(0, 2 * np.pi, 1000)\n', (4925, 4945), True, 'import numpy as np\n'), ((5199, 5334), 'numpy.array', 'np.array', (['[[min_pnt[0], min_pnt[1], 1.5], [min_pnt[0], max_pnt[1], 1.5], [max_pnt[0],\n min_pnt[1], 8], [max_pnt[0], max_pnt[1], 8]]'], {}), '([[min_pnt[0], min_pnt[1], 1.5], [min_pnt[0], max_pnt[1], 1.5], [\n max_pnt[0], min_pnt[1], 8], [max_pnt[0], max_pnt[1], 8]])\n', (5207, 5334), True, 'import numpy as np\n'), ((5413, 5460), 'stompy.spatial.field.XYZField', 'field.XYZField', ([], {'X': 'd_data[:, :2]', 'F': 'd_data[:, 2]'}), '(X=d_data[:, :2], F=d_data[:, 2])\n', (5427, 5460), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((5465, 5494), 'stompy.grid.paver.Paving', 'paver.Paving', (['peanut', 'density'], {}), '(peanut, density)\n', (5477, 5494), False, 'from stompy.grid import paver\n'), ((5555, 5593), 'numpy.linspace', 'np.linspace', (['(-np.pi / 2)', '(np.pi / 2)', '(20)'], {}), '(-np.pi / 2, np.pi / 2, 20)\n', (5566, 5593), True, 'import numpy as np\n'), ((5672, 5709), 'numpy.array', 'np.array', (['[[-3 * r, r], [-4 * r, -r]]'], {}), '([[-3 * r, r], [-4 * r, -r]])\n', (5680, 5709), True, 'import numpy as np\n'), ((5738, 5764), 'numpy.concatenate', 'np.concatenate', (['(box, cap)'], {}), '((box, cap))\n', (5752, 5764), True, 'import numpy as np\n'), ((5825, 5872), 'stompy.grid.paver.Paving', 'paver.Paving', (['ring', 'density'], {'label': '"""cul_de_sac"""'}), "(ring, density, label='cul_de_sac')\n", (5837, 5872), False, 'from stompy.grid import paver\n'), ((5913, 5939), 'numpy.linspace', 'np.linspace', (['(-100)', '(100)', '(50)'], {}), '(-100, 100, 50)\n', (5924, 5939), True, 'import numpy as np\n'), ((6114, 6131), 'numpy.array', 'np.array', (['[0, 20]'], {}), '([0, 20])\n', (6122, 6131), True, 'import numpy as np\n'), ((6142, 6192), 'numpy.concatenate', 'np.concatenate', (['(bow + height, bow[::-1] - height)'], {}), '((bow + height, bow[::-1] - height))\n', (6156, 6192), True, 'import numpy as np\n'), ((6204, 6226), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(2)'], {}), '(2)\n', (6223, 6226), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((6233, 6273), 'stompy.grid.paver.Paving', 'paver.Paving', (['ring', 'density'], {'label': '"""bow"""'}), "(ring, density, label='bow')\n", (6245, 6273), False, 'from stompy.grid import paver\n'), ((6672, 6694), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(6)'], {}), '(6)\n', (6691, 6694), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((6701, 6755), 'stompy.grid.paver.Paving', 'paver.Paving', (['poly', 'density'], {'label': "('ngon%02d' % nsides)"}), "(poly, density, label='ngon%02d' % nsides)\n", (6713, 6755), False, 'from stompy.grid import paver\n'), ((7005, 7091), 'numpy.array', 'np.array', (['[[0.0, 0.0], [100, -d], [200, 0], [200, 100], [100, 100 + d], [0, 100]]'], {}), '([[0.0, 0.0], [100, -d], [200, 0], [200, 100], [100, 100 + d], [0, \n 100]])\n', (7013, 7091), True, 'import numpy as np\n'), ((7197, 7219), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(6)'], {}), '(6)\n', (7216, 7219), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((7226, 7274), 'stompy.grid.paver.Paving', 'paver.Paving', (['[pnts]', 'density'], {'label': '"""expansion"""'}), "([pnts], density, label='expansion')\n", (7238, 7274), False, 'from stompy.grid import paver\n'), ((7530, 7572), 'numpy.array', 'np.array', (['[[0, 0], [L, 0], [L, W], [0, W]]'], {}), '([[0, 0], [L, 0], [L, W], [0, W]])\n', (7538, 7572), True, 'import numpy as np\n'), ((7632, 7665), 'numpy.linspace', 'np.linspace', (['(0.1 * L)', '(0.9 * L)', '(50)'], {}), '(0.1 * L, 0.9 * L, 50)\n', (7643, 7665), True, 'import numpy as np\n'), ((7790, 7813), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(10)'], {}), '(10)\n', (7809, 7813), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((7912, 7962), 'stompy.spatial.linestring_utils.resample_linearring', 'resample_linearring', (['shore', 'density'], {'closed_ring': '(0)'}), '(shore, density, closed_ring=0)\n', (7931, 7962), False, 'from stompy.spatial.linestring_utils import upsample_linearring, resample_linearring\n'), ((8061, 8130), 'stompy.grid.paver.Paving', 'paver.Paving', (['[rect]', 'density'], {'degenerates': '[north_shore, south_shore]'}), '([rect], density, degenerates=[north_shore, south_shore])\n', (8073, 8130), False, 'from stompy.grid import paver\n'), ((8269, 8290), 'stompy.spatial.wkb2shp.shp2geom', 'wkb2shp.shp2geom', (['shp'], {}), '(shp)\n', (8285, 8290), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((8338, 8361), 'numpy.array', 'np.array', (['geom.exterior'], {}), '(geom.exterior)\n', (8346, 8361), True, 'import numpy as np\n'), ((8376, 8402), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(250.0)'], {}), '(250.0)\n', (8395, 8402), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((8409, 8460), 'stompy.grid.paver.Paving', 'paver.Paving', (['dumbarton', 'density'], {'label': '"""dumbarton"""'}), "(dumbarton, density, label='dumbarton')\n", (8421, 8460), False, 'from stompy.grid import paver\n'), ((9018, 9051), 'numpy.linspace', 'np.linspace', (['(1.0)', '(12 * np.pi)', '(400)'], {}), '(1.0, 12 * np.pi, 400)\n', (9029, 9051), True, 'import numpy as np\n'), ((9293, 9323), 'numpy.concatenate', 'np.concatenate', (['(x1, x2[::-1])'], {}), '((x1, x2[::-1]))\n', (9307, 9323), True, 'import numpy as np\n'), ((9333, 9363), 'numpy.concatenate', 'np.concatenate', (['(y1, y2[::-1])'], {}), '((y1, y2[::-1]))\n', (9347, 9363), True, 'import numpy as np\n'), ((10264, 10289), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(25.0)'], {}), '(25.0)\n', (10283, 10289), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((10308, 10332), 'stompy.spatial.field.ConstantField', 'field.ConstantField', (['(2.0)'], {}), '(2.0)\n', (10327, 10332), False, 'from stompy.spatial import field, constrained_delaunay, wkb2shp\n'), ((10341, 10402), 'stompy.grid.paver.Paving', 'paver.Paving', (['rings'], {'density': 'density', 'min_density': 'min_density'}), '(rings, density=density, min_density=min_density)\n', (10353, 10402), False, 'from stompy.grid import paver\n'), ((11719, 11729), 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), '()\n', (11727, 11729), True, 'import matplotlib.pyplot as plt\n'), ((11794, 11811), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (11802, 11811), True, 'import matplotlib.pyplot as plt\n'), ((11816, 11830), 'matplotlib.pyplot.axis', 'plt.axis', (['zoom'], {}), '(zoom)\n', (11824, 11830), True, 'import matplotlib.pyplot as plt\n'), ((1256, 1286), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(200)'], {}), '(0, 2 * np.pi, 200)\n', (1267, 1286), True, 'import numpy as np\n'), ((1358, 1372), 'numpy.cos', 'np.cos', (['thetas'], {}), '(thetas)\n', (1364, 1372), True, 'import numpy as np\n'), ((1393, 1407), 'numpy.sin', 'np.sin', (['thetas'], {}), '(thetas)\n', (1399, 1407), True, 'import numpy as np\n'), ((3056, 3074), 'numpy.array', 'np.array', (['[45, 45]'], {}), '([45, 45])\n', (3064, 3074), True, 'import numpy as np\n'), ((3097, 3115), 'numpy.array', 'np.array', (['[65, 65]'], {}), '([65, 65])\n', (3105, 3115), True, 'import numpy as np\n'), ((3138, 3156), 'numpy.array', 'np.array', (['[20, 80]'], {}), '([20, 80])\n', (3146, 3156), True, 'import numpy as np\n'), ((3433, 3447), 'numpy.cos', 'np.cos', (['thetas'], {}), '(thetas)\n', (3439, 3447), True, 'import numpy as np\n'), ((3827, 3841), 'numpy.cos', 'np.cos', (['thetas'], {}), '(thetas)\n', (3833, 3841), True, 'import numpy as np\n'), ((4114, 4128), 'numpy.sin', 'np.sin', (['thetas'], {}), '(thetas)\n', (4120, 4128), True, 'import numpy as np\n'), ((4241, 4255), 'numpy.sin', 'np.sin', (['thetas'], {}), '(thetas)\n', (4247, 4255), True, 'import numpy as np\n'), ((4589, 4603), 'numpy.cos', 'np.cos', (['thetas'], {}), '(thetas)\n', (4595, 4603), True, 'import numpy as np\n'), ((4658, 4672), 'numpy.sin', 'np.sin', (['thetas'], {}), '(thetas)\n', (4664, 4672), True, 'import numpy as np\n'), ((5042, 5056), 'numpy.cos', 'np.cos', (['thetas'], {}), '(thetas)\n', (5048, 5056), True, 'import numpy as np\n'), ((5104, 5118), 'numpy.sin', 'np.sin', (['thetas'], {}), '(thetas)\n', (5110, 5118), True, 'import numpy as np\n'), ((6055, 6095), 'numpy.concatenate', 'np.concatenate', (['(x[None, :], y[None, :])'], {}), '((x[None, :], y[None, :]))\n', (6069, 6095), True, 'import numpy as np\n'), ((6477, 6514), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(nsides + 1)'], {}), '(0, 2 * np.pi, nsides + 1)\n', (6488, 6514), True, 'import numpy as np\n'), ((6540, 6553), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6546, 6553), True, 'import numpy as np\n'), ((6564, 6577), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6570, 6577), True, 'import numpy as np\n'), ((6607, 6647), 'numpy.concatenate', 'np.concatenate', (['(x[None, :], y[None, :])'], {}), '((x[None, :], y[None, :]))\n', (6621, 6647), True, 'import numpy as np\n'), ((7725, 7765), 'numpy.concatenate', 'np.concatenate', (['(x[None, :], y[None, :])'], {}), '((x[None, :], y[None, :]))\n', (7739, 7765), True, 'import numpy as np\n'), ((7988, 8010), 'numpy.array', 'np.array', (['[0, 0.1 * W]'], {}), '([0, 0.1 * W])\n', (7996, 8010), True, 'import numpy as np\n'), ((8034, 8056), 'numpy.array', 'np.array', (['[0, 0.1 * W]'], {}), '([0, 0.1 * W])\n', (8042, 8056), True, 'import numpy as np\n'), ((8205, 8230), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (8220, 8230), False, 'import os\n'), ((9076, 9085), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (9082, 9085), True, 'import numpy as np\n'), ((9391, 9431), 'numpy.concatenate', 'np.concatenate', (['(x[None, :], y[None, :])'], {}), '((x[None, :], y[None, :]))\n', (9405, 9431), True, 'import numpy as np\n'), ((10047, 10087), 'numpy.concatenate', 'np.concatenate', (['(north_x, south_x[::-1])'], {}), '((north_x, south_x[::-1]))\n', (10061, 10087), True, 'import numpy as np\n'), ((10101, 10141), 'numpy.concatenate', 'np.concatenate', (['(north_y, south_y[::-1])'], {}), '((north_y, south_y[::-1]))\n', (10115, 10141), True, 'import numpy as np\n'), ((1539, 1550), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1547, 1550), True, 'import numpy as np\n'), ((2960, 2970), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2967, 2970), True, 'import numpy as np\n'), ((3458, 3472), 'numpy.sin', 'np.sin', (['thetas'], {}), '(thetas)\n', (3464, 3472), True, 'import numpy as np\n'), ((3852, 3866), 'numpy.sin', 'np.sin', (['thetas'], {}), '(thetas)\n', (3858, 3866), True, 'import numpy as np\n'), ((4074, 4088), 'numpy.cos', 'np.cos', (['thetas'], {}), '(thetas)\n', (4080, 4088), True, 'import numpy as np\n'), ((4201, 4215), 'numpy.cos', 'np.cos', (['thetas'], {}), '(thetas)\n', (4207, 4215), True, 'import numpy as np\n'), ((7680, 7705), 'numpy.cos', 'np.cos', (['(4 * np.pi * x / L)'], {}), '(4 * np.pi * x / L)\n', (7686, 7705), True, 'import numpy as np\n'), ((9238, 9252), 'numpy.sin', 'np.sin', (['(10 * t)'], {}), '(10 * t)\n', (9244, 9252), True, 'import numpy as np\n'), ((9268, 9281), 'numpy.cos', 'np.cos', (['(5 * t)'], {}), '(5 * t)\n', (9274, 9281), True, 'import numpy as np\n'), ((10173, 10213), 'numpy.concatenate', 'np.concatenate', (['(x[None, :], y[None, :])'], {}), '((x[None, :], y[None, :]))\n', (10187, 10213), True, 'import numpy as np\n'), ((11735, 11748), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (11745, 11748), True, 'import matplotlib.pyplot as plt\n'), ((2635, 2661), 'numpy.sin', 'np.sin', (['(60 * np.pi / 180.0)'], {}), '(60 * np.pi / 180.0)\n', (2641, 2661), True, 'import numpy as np\n'), ((5804, 5814), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (5811, 5814), True, 'import numpy as np\n'), ((2996, 3009), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3002, 3009), True, 'import numpy as np\n'), ((3011, 3024), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3017, 3024), True, 'import numpy as np\n'), ((5024, 5042), 'numpy.cos', 'np.cos', (['(2 * thetas)'], {}), '(2 * thetas)\n', (5030, 5042), True, 'import numpy as np\n'), ((5086, 5104), 'numpy.cos', 'np.cos', (['(2 * thetas)'], {}), '(2 * thetas)\n', (5092, 5104), True, 'import numpy as np\n'), ((5625, 5638), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5631, 5638), True, 'import numpy as np\n'), ((5640, 5653), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5646, 5653), True, 'import numpy as np\n'), ((4565, 4583), 'numpy.sin', 'np.sin', (['(2 * thetas)'], {}), '(2 * thetas)\n', (4571, 4583), True, 'import numpy as np\n'), ((4634, 4652), 'numpy.sin', 'np.sin', (['(2 * thetas)'], {}), '(2 * thetas)\n', (4640, 4652), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>
from collections import defaultdict
import os
import re
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
from pandas import DataFrame
import scipy.stats
import seaborn as sns
import lda_metrics
N_PROPS_LIST = ['None', 0.001, 0.01, 0.1]
N_FREQS_LIST = [1, 2, 4, 8]
N_TOPICS_LIST = [5, 10, 20, 40, 80, 160, 320]
sns.set(style='whitegrid', context='poster')
to_print_name = {
'reusl-train': 'REUSL 25k',
'reusl-short': 'REUSL 2.5k',
'nyt-train': 'NYT 25k',
'nyt-short': 'NYT 2.5k',
}
def validate_fname(
fname,
extension,
file_prefix=None,
process=None,
n_topics=None,
n_props_list=N_PROPS_LIST,
n_freqs_list=N_FREQS_LIST,
n_topics_list=N_TOPICS_LIST):
if not fname.startswith(file_prefix + '-'):
return None
is_seq_file = (extension == 'txt')
is_exact_duplicate = (len(fname.split('-')) == 6 - int(is_seq_file))
if is_exact_duplicate:
if is_seq_file:
fname_regex = r'[a-z\-]+(?P<proc_id>\d+)-(?P<prop>[\d.]+|None)-(?P<freq>\d+).' + extension
else:
fname_regex = r'[a-z\-]+(?P<proc_id>\d+)-(?P<prop>[\d.]+|None)-(?P<freq>\d+)-(?P<topic_ct>\d+).' + extension
else:
if is_seq_file:
fname_regex = r'[a-z\-]+(?P<proc_id>\d+)-(?P<prop>[\d.]+|None).' + extension
else:
fname_regex = r'[a-z\-]+(?P<proc_id>\d+)-(?P<prop>[\d.]+|None)-(?P<topic_ct>\d+).' + extension
match_obj = re.match(fname_regex, fname)
if match_obj is None:
return None
ret_dict = {}
proc_id = int(match_obj.group('proc_id'))
if process is not None and proc_id != process:
return None
else:
ret_dict['proc_id'] = proc_id
prop = match_obj.group('prop')
if prop != 'None':
prop = float(prop)
if prop not in n_props_list:
return None
else:
ret_dict['prop'] = prop
if not is_seq_file:
topic_ct = int(match_obj.group('topic_ct'))
if not (n_topics is None) and topic_ct != n_topics:
return None
elif not (topic_ct in n_topics_list):
return None
else:
ret_dict['topic_ct'] = topic_ct
if is_exact_duplicate:
freq = int(match_obj.group('freq'))
if freq not in n_freqs_list:
return None
else:
ret_dict['freq'] = freq
return ret_dict
def make_entity_from_fields(n_topics, val, label, fields):
return {
'proportion': fields['prop'],
'c': fields.get('freq', 0),
'K': n_topics,
'process_id': fields['proc_id'],
'value': val,
'label': label
}
def print_significances(entities):
val_collection = defaultdict(list)
for entity in entities:
key = "{} {} {} {}".format(
entity['label'],
entity['proportion'],
entity['k'],
entity['c'])
val_collection[key].append(entity['value'])
for key in sorted(val_collection.keys()):
print(key, np.mean(val_collection[key]), 1.96*scipy.stats.sem(val_collection[key]))
def plot_cmap_from_entity_list(entities, save_file, vmax=1.0, value_name="value"):
plt.figure(figsize=(25, 15))
if not entities:
raise ValueError("No entities in list")
dataf = DataFrame([e for e in entities])
g = sns.FacetGrid(
dataf,
col='k',
row='label')
cbar_ax = g.fig.add_axes([.92, .3, .02, .4])
g.map_dataframe(facet_heatmap, cbar_ax=cbar_ax, vmax=vmax)
g.set_titles(col_template="{col_name} topics", row_template="{row_name}")
g.fig.subplots_adjust(right=.9)
plt.savefig(save_file)
def plot_pplot_from_entity_list(entities, save_file, value_name="value"):
plt.figure(figsize=(25, 15))
if not entities:
raise ValueError("No entities in list")
dataf = DataFrame([e for e in entities])
g = sns.factorplot(
x='c',
y='value',
hue='proportion',
col='k',
row='label',
capsize=.2,
markers='.',
scale=0.5,
data=dataf)
g.set_titles(col_template="{col_name} topics", row_template="{row_name}")
g.set_axis_labels("# copies", value_name)
plt.savefig(save_file)
def print_data_table(entities):
dataf = DataFrame([e for e in entities])
data = dataf.pivot_table(index='proportion', columns='c', values='value')
print(data)
def facet_heatmap(data, color, vmax=1.0, **kws):
data = data.pivot_table(index='proportion', columns='c', values='value')
sns.heatmap(data, cmap='Blues', annot=True, fmt=".2f", vmin=0, vmax=vmax, **kws)
|
[
"pandas.DataFrame",
"seaborn.heatmap",
"seaborn.factorplot",
"re.match",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.mean",
"seaborn.set",
"seaborn.FacetGrid",
"matplotlib.pyplot.savefig"
] |
[((138, 159), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (152, 159), False, 'import matplotlib\n'), ((426, 470), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""', 'context': '"""poster"""'}), "(style='whitegrid', context='poster')\n", (433, 470), True, 'import seaborn as sns\n'), ((1573, 1601), 're.match', 're.match', (['fname_regex', 'fname'], {}), '(fname_regex, fname)\n', (1581, 1601), False, 'import re\n'), ((2827, 2844), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2838, 2844), False, 'from collections import defaultdict\n'), ((3317, 3345), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(25, 15)'}), '(figsize=(25, 15))\n', (3327, 3345), True, 'from matplotlib import pyplot as plt\n'), ((3427, 3459), 'pandas.DataFrame', 'DataFrame', (['[e for e in entities]'], {}), '([e for e in entities])\n', (3436, 3459), False, 'from pandas import DataFrame\n'), ((3468, 3510), 'seaborn.FacetGrid', 'sns.FacetGrid', (['dataf'], {'col': '"""k"""', 'row': '"""label"""'}), "(dataf, col='k', row='label')\n", (3481, 3510), True, 'import seaborn as sns\n'), ((3778, 3800), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_file'], {}), '(save_file)\n', (3789, 3800), True, 'from matplotlib import pyplot as plt\n'), ((3881, 3909), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(25, 15)'}), '(figsize=(25, 15))\n', (3891, 3909), True, 'from matplotlib import pyplot as plt\n'), ((3991, 4023), 'pandas.DataFrame', 'DataFrame', (['[e for e in entities]'], {}), '([e for e in entities])\n', (4000, 4023), False, 'from pandas import DataFrame\n'), ((4032, 4157), 'seaborn.factorplot', 'sns.factorplot', ([], {'x': '"""c"""', 'y': '"""value"""', 'hue': '"""proportion"""', 'col': '"""k"""', 'row': '"""label"""', 'capsize': '(0.2)', 'markers': '"""."""', 'scale': '(0.5)', 'data': 'dataf'}), "(x='c', y='value', hue='proportion', col='k', row='label',\n capsize=0.2, markers='.', scale=0.5, data=dataf)\n", (4046, 4157), True, 'import seaborn as sns\n'), ((4390, 4412), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_file'], {}), '(save_file)\n', (4401, 4412), True, 'from matplotlib import pyplot as plt\n'), ((4459, 4491), 'pandas.DataFrame', 'DataFrame', (['[e for e in entities]'], {}), '([e for e in entities])\n', (4468, 4491), False, 'from pandas import DataFrame\n'), ((4718, 4803), 'seaborn.heatmap', 'sns.heatmap', (['data'], {'cmap': '"""Blues"""', 'annot': '(True)', 'fmt': '""".2f"""', 'vmin': '(0)', 'vmax': 'vmax'}), "(data, cmap='Blues', annot=True, fmt='.2f', vmin=0, vmax=vmax, **kws\n )\n", (4729, 4803), True, 'import seaborn as sns\n'), ((3155, 3183), 'numpy.mean', 'np.mean', (['val_collection[key]'], {}), '(val_collection[key])\n', (3162, 3183), True, 'import numpy as np\n')]
|
from flask import Flask
from flask import request, jsonify
import numpy as np
import torch
from flask_cors import CORS, cross_origin
import socket
import argparse
import random
import json
import re
from tokenize_code import tokenize_code
from serverHelpers import notebook_to_frontend
from gensim.models.doc2vec import Doc2Vec
from model import BertModel, Generator
from RetrievalDB_doc2vec import RetrievalDB_doc2vec, inferenceRNN_doc2vec
from RetrievalDB_CodeBERT import RetrievalDB_CodeBERT, inferenceRNN_CodeBERT
# Get the path to the data
PATH_TO_SLICED_SCRIPTS = '../../yizhi/EDA/kaggle-dataset/sliced-notebooks-full-new'
PATH_TO_NOTEBOOKS = '../../yizhi/EDA/kaggle-dataset/notebooks-full'
PATH_TO_CODEBERT_MODELS = '../../yizhi/EDA/EDA-prediction/'
# retrievalDB_doc2vec = RetrievalDB_doc2vec()
retrievalDB_CodeBERT = RetrievalDB_CodeBERT(PATH_TO_CODEBERT_MODELS)
app = Flask(__name__)
CORS(app)
def randomSublists(someList):
resultList = [] #result container
index = 0 #start at the start of the list
length = len(someList) #and cache the length for performance on large lists
while (index < length):
randomNumber = np.random.randint(1, length-index+1) #get a number between 1 and the remaining choices
resultList.append(someList[index:index+randomNumber]) #append a list starting at index with randomNumber length to it
index = index + randomNumber #increment index by amount of list used
return resultList #return the list of randomized sublists
def create_app():
@app.route("/", methods=["GET"])
def index():
return "SmartEDA API Server"
@app.route("/generate_answer", methods=["GET","POST"])
def generate_answer():
#nl_input = request.form['input']
files_to_read = ['2.ipynb', '11111.ipynb', '8570777.ipynb', '9582250.ipynb', '10269993.ipynb']
store = []
for file_name in files_to_read:
file = open("examples/" + file_name)
line = file.read()
file.close()
store.append(line)
json_parsed = []
for file_content in store:
json_parsed.append(json.loads(file_content))
all_ops = []
all_op_type = []
all_if_demon = []
for notebook in json_parsed:
cells = notebook['cells']
operations = []
one_op_type = []
one_if_demon = []
for a_cell in cells:
# a code cell
if a_cell['cell_type'] == 'code':
for a_line in a_cell['source']:
# a line of code
replaced_line = a_line.replace('"', '@').replace("'", '@')
if replaced_line[-1] != '\n':
operations.append(replaced_line + '\n')
else:
operations.append(replaced_line)
one_op_type.append(np.random.randint(4) + 1)
one_if_demon.append(np.random.randint(2))
all_ops.append(operations)
all_op_type.append(one_op_type)
all_if_demon.append(one_if_demon)
all_keywords = []
for j in range(len(all_if_demon)):
one_notebook = all_if_demon[j]
a_keyword = []
length = len(one_notebook)
i = 0
while i < length:
if one_notebook[i] == 0:
i += 1
# skip
else:
start = i
end = start
while i < length:
if one_notebook[i] == 1:
# no worries, just check if it is the end
if i == length - 1:
# 1 all the way to the end.
end = i
else:
# 0, time to stop
i = i - 1
end = i
break
i = i + 1
try:
a_keyword.append(random.choice(re.sub("[^a-zA-Z]+", " ", ' '.join(all_ops[j][start:end+1])).split()))
except:
a_keyword.append('random_stuff')
i += 1
all_keywords.append(a_keyword)
response = jsonify(all_operation_types=all_op_type,
all_operations=all_ops,
all_if_demonstrated=all_if_demon,
all_kwds=all_keywords)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route("/predict_next", methods=["POST"])
def predict_next():
if request.method == "POST":
print("Inferring next sequence")
# Axios request body is {notebook: stringified json}
# So we need to access the notebook field and parse it with json.loads
notebookSrc = json.loads(request.get_json()['notebook'])
print("notebooksrc json is", notebookSrc)
print("Notebook is", notebookSrc.keys())
# Do inference
topNotebooks = inferenceRNN_CodeBERT(notebookSrc, retrievalDB_CodeBERT, PATH_TO_CODEBERT_MODELS)
notebook_filepaths = []
# Parse the returned results
for (name, seqNum ) in topNotebooks:
# Name format is "competition\filename_seqNum"
competition = name.split('\\')[0]
filename_and_idx = name.split('\\')[1]
filename = filename_and_idx.split('_')[0]
idx = filename_and_idx.split('_')[1]
filepath = PATH_TO_NOTEBOOKS + '/' + competition + '/' + filename + '.ipynb'
notebook_filepaths.append(filepath)
data_to_frontend = notebook_to_frontend(notebook_filepaths)
response_formatted = jsonify(all_operation_types=data_to_frontend[0],
all_operations=data_to_frontend[1],
all_if_demonstrated=data_to_frontend[2],
all_kwds=data_to_frontend[3])
# Prevent CORS error
response_formatted.headers.add('Access-Control-Allow-Origin', '*')
return response_formatted
# POST /predict_next_doc2vec
@app.route("/predict_next_doc2vec", methods=["POST"])
def predict_next_doc2vec():
if request.method == "POST":
print("Inferring next sequence")
# Axios request body is {notebook: stringified json}
# So we need to access the notebook field and parse it with json.loads
notebookSrc = json.loads(request.get_json()['notebook'])
print("notebooksrc json is", notebookSrc)
print("Notebook is", notebookSrc.keys())
# Do inference
topNotebooks = inferenceRNN_doc2vec(notebookSrc, retrievalDB_doc2vec)
notebook_filepaths = []
# Parse the returned results
for (name, seqNum ) in topNotebooks:
# Name format is "competition\filename_seqNum"
competition = name.split('\\')[0]
filename_and_idx = name.split('\\')[1]
filename = filename_and_idx.split('_')[0]
idx = filename_and_idx.split('_')[1]
filepath = PATH_TO_NOTEBOOKS + '/' + competition + '/' + filename + '.ipynb'
notebook_filepaths.append(filepath)
print("notebooks filepaths is", notebook_filepaths)
response = jsonify(topNotebooks)
data_to_frontend = notebook_to_frontend(notebook_filepaths)
response_formatted = jsonify(all_operation_types=data_to_frontend[0],
all_operations=data_to_frontend[1],
all_if_demonstrated=data_to_frontend[2],
all_kwds=data_to_frontend[3])
# Prevent CORS error
response_formatted.headers.add('Access-Control-Allow-Origin', '*')
return response_formatted
@app.route("/search_by_nl", methods=["POST"])
def search_by_nl():
if request.method == "POST":
return jsonify(hello="world search by nl")
return app
def main(args):
hostname = socket.gethostname()
local_ip = socket.gethostbyname(hostname)
print("hostname is", hostname)
print("local ip is", local_ip)
app = create_app()
app.run(host=args.host, debug=True, port=args.port)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--beam_size", default=10, type=int, help="beam size for beam search"
)
parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available")
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--port", type=int, default=5000)
args = parser.parse_args()
args.device_name = "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
args.device = torch.device(args.device_name)
args.beam_size = (args.beam_size if torch.cuda.is_available() and not args.no_cuda else 1)
main(args)
|
[
"argparse.ArgumentParser",
"json.loads",
"flask_cors.CORS",
"flask.Flask",
"RetrievalDB_doc2vec.inferenceRNN_doc2vec",
"socket.gethostbyname",
"RetrievalDB_CodeBERT.RetrievalDB_CodeBERT",
"socket.gethostname",
"numpy.random.randint",
"flask.jsonify",
"RetrievalDB_CodeBERT.inferenceRNN_CodeBERT",
"torch.cuda.is_available",
"torch.device",
"serverHelpers.notebook_to_frontend",
"flask.request.get_json"
] |
[((835, 880), 'RetrievalDB_CodeBERT.RetrievalDB_CodeBERT', 'RetrievalDB_CodeBERT', (['PATH_TO_CODEBERT_MODELS'], {}), '(PATH_TO_CODEBERT_MODELS)\n', (855, 880), False, 'from RetrievalDB_CodeBERT import RetrievalDB_CodeBERT, inferenceRNN_CodeBERT\n'), ((887, 902), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (892, 902), False, 'from flask import Flask\n'), ((903, 912), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (907, 912), False, 'from flask_cors import CORS, cross_origin\n'), ((8501, 8521), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (8519, 8521), False, 'import socket\n'), ((8537, 8567), 'socket.gethostbyname', 'socket.gethostbyname', (['hostname'], {}), '(hostname)\n', (8557, 8567), False, 'import socket\n'), ((8758, 8797), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (8781, 8797), False, 'import argparse\n'), ((9270, 9300), 'torch.device', 'torch.device', (['args.device_name'], {}), '(args.device_name)\n', (9282, 9300), False, 'import torch\n'), ((1160, 1200), 'numpy.random.randint', 'np.random.randint', (['(1)', '(length - index + 1)'], {}), '(1, length - index + 1)\n', (1177, 1200), True, 'import numpy as np\n'), ((4434, 4559), 'flask.jsonify', 'jsonify', ([], {'all_operation_types': 'all_op_type', 'all_operations': 'all_ops', 'all_if_demonstrated': 'all_if_demon', 'all_kwds': 'all_keywords'}), '(all_operation_types=all_op_type, all_operations=all_ops,\n all_if_demonstrated=all_if_demon, all_kwds=all_keywords)\n', (4441, 4559), False, 'from flask import request, jsonify\n'), ((5243, 5328), 'RetrievalDB_CodeBERT.inferenceRNN_CodeBERT', 'inferenceRNN_CodeBERT', (['notebookSrc', 'retrievalDB_CodeBERT', 'PATH_TO_CODEBERT_MODELS'], {}), '(notebookSrc, retrievalDB_CodeBERT,\n PATH_TO_CODEBERT_MODELS)\n', (5264, 5328), False, 'from RetrievalDB_CodeBERT import RetrievalDB_CodeBERT, inferenceRNN_CodeBERT\n'), ((5916, 5956), 'serverHelpers.notebook_to_frontend', 'notebook_to_frontend', (['notebook_filepaths'], {}), '(notebook_filepaths)\n', (5936, 5956), False, 'from serverHelpers import notebook_to_frontend\n'), ((5991, 6156), 'flask.jsonify', 'jsonify', ([], {'all_operation_types': 'data_to_frontend[0]', 'all_operations': 'data_to_frontend[1]', 'all_if_demonstrated': 'data_to_frontend[2]', 'all_kwds': 'data_to_frontend[3]'}), '(all_operation_types=data_to_frontend[0], all_operations=\n data_to_frontend[1], all_if_demonstrated=data_to_frontend[2], all_kwds=\n data_to_frontend[3])\n', (5998, 6156), False, 'from flask import request, jsonify\n'), ((7017, 7071), 'RetrievalDB_doc2vec.inferenceRNN_doc2vec', 'inferenceRNN_doc2vec', (['notebookSrc', 'retrievalDB_doc2vec'], {}), '(notebookSrc, retrievalDB_doc2vec)\n', (7037, 7071), False, 'from RetrievalDB_doc2vec import RetrievalDB_doc2vec, inferenceRNN_doc2vec\n'), ((7719, 7740), 'flask.jsonify', 'jsonify', (['topNotebooks'], {}), '(topNotebooks)\n', (7726, 7740), False, 'from flask import request, jsonify\n'), ((7773, 7813), 'serverHelpers.notebook_to_frontend', 'notebook_to_frontend', (['notebook_filepaths'], {}), '(notebook_filepaths)\n', (7793, 7813), False, 'from serverHelpers import notebook_to_frontend\n'), ((7848, 8013), 'flask.jsonify', 'jsonify', ([], {'all_operation_types': 'data_to_frontend[0]', 'all_operations': 'data_to_frontend[1]', 'all_if_demonstrated': 'data_to_frontend[2]', 'all_kwds': 'data_to_frontend[3]'}), '(all_operation_types=data_to_frontend[0], all_operations=\n data_to_frontend[1], all_if_demonstrated=data_to_frontend[2], all_kwds=\n data_to_frontend[3])\n', (7855, 8013), False, 'from flask import request, jsonify\n'), ((8418, 8453), 'flask.jsonify', 'jsonify', ([], {'hello': '"""world search by nl"""'}), "(hello='world search by nl')\n", (8425, 8453), False, 'from flask import request, jsonify\n'), ((9194, 9219), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9217, 9219), False, 'import torch\n'), ((9341, 9366), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9364, 9366), False, 'import torch\n'), ((2147, 2171), 'json.loads', 'json.loads', (['file_content'], {}), '(file_content)\n', (2157, 2171), False, 'import json\n'), ((5048, 5066), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (5064, 5066), False, 'from flask import request, jsonify\n'), ((6822, 6840), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (6838, 6840), False, 'from flask import request, jsonify\n'), ((3025, 3045), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (3042, 3045), True, 'import numpy as np\n'), ((2955, 2975), 'numpy.random.randint', 'np.random.randint', (['(4)'], {}), '(4)\n', (2972, 2975), True, 'import numpy as np\n')]
|
from flask import Flask
from .views import views
def create_app() -> Flask:
app = Flask(
__name__,
static_url_path='',
static_folder='../app/build',
)
app.register_blueprint(views)
return app
|
[
"flask.Flask"
] |
[((90, 155), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '""""""', 'static_folder': '"""../app/build"""'}), "(__name__, static_url_path='', static_folder='../app/build')\n", (95, 155), False, 'from flask import Flask\n')]
|
# Generated by Django 3.1.7 on 2021-05-06 05:22
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('gyoithon', '0005_auto_20210506_1358'),
]
operations = [
migrations.AlterField(
model_name='domain',
name='registration_date',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 6, 5, 22, 59, 129066, tzinfo=utc)),
),
migrations.AlterField(
model_name='organization',
name='registration_date',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 6, 5, 22, 59, 128595, tzinfo=utc)),
),
migrations.AlterField(
model_name='organization',
name='status',
field=models.CharField(default='N/A', max_length=10, verbose_name='Status'),
),
migrations.AlterField(
model_name='subdomain',
name='registration_date',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 6, 5, 22, 59, 129596, tzinfo=utc)),
),
]
|
[
"django.db.models.CharField",
"datetime.datetime"
] |
[((849, 918), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""N/A"""', 'max_length': '(10)', 'verbose_name': '"""Status"""'}), "(default='N/A', max_length=10, verbose_name='Status')\n", (865, 918), False, 'from django.db import migrations, models\n'), ((431, 491), 'datetime.datetime', 'datetime.datetime', (['(2021)', '(5)', '(6)', '(5)', '(22)', '(59)', '(129066)'], {'tzinfo': 'utc'}), '(2021, 5, 6, 5, 22, 59, 129066, tzinfo=utc)\n', (448, 491), False, 'import datetime\n'), ((660, 720), 'datetime.datetime', 'datetime.datetime', (['(2021)', '(5)', '(6)', '(5)', '(22)', '(59)', '(128595)'], {'tzinfo': 'utc'}), '(2021, 5, 6, 5, 22, 59, 128595, tzinfo=utc)\n', (677, 720), False, 'import datetime\n'), ((1083, 1143), 'datetime.datetime', 'datetime.datetime', (['(2021)', '(5)', '(6)', '(5)', '(22)', '(59)', '(129596)'], {'tzinfo': 'utc'}), '(2021, 5, 6, 5, 22, 59, 129596, tzinfo=utc)\n', (1100, 1143), False, 'import datetime\n')]
|
"""Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from .codegen import Codegen
from .xr import Go as XRGo
from .xe import Go as XEGo
from .nx import Go as NXGo
os_lang_map = {
'XR': {
'Go': XRGo
},
'XE': {
'Go': XEGo
},
'NX': {
'Go': NXGo
}
}
def get_codegen_class(os_name, language):
if os_name not in os_lang_map.keys():
logging.error('%s is not a supported OS!', os_name)
return None
if language not in os_lang_map[os_name].keys():
logging.error('%s is not a supported language for %s!', language, os_name)
return None
return os_lang_map[os_name][language]
def start_codegen(os_list, language_list, config):
if os_list:
logging.info('Preparing codegen for %s.', ', '.join(os_list))
if not set(os_list).issubset(set(os_lang_map.keys())):
logging.error('OS list contains invalid entries!')
return
else:
logging.info('Preparing codegen for all supported OSes.')
os_list = os_lang_map.keys()
if not language_list:
logging.info('All supported languages will be generated.')
for _os in os_list:
languages = language_list if language_list else os_lang_map[_os].keys()
for language in languages:
gen_target = get_codegen_class(_os, language)
if gen_target:
logging.info('Starting %s generation for %s.', language, _os)
gen_target(config).generate()
|
[
"logging.info",
"logging.error"
] |
[((913, 964), 'logging.error', 'logging.error', (['"""%s is not a supported OS!"""', 'os_name'], {}), "('%s is not a supported OS!', os_name)\n", (926, 964), False, 'import logging\n'), ((1045, 1119), 'logging.error', 'logging.error', (['"""%s is not a supported language for %s!"""', 'language', 'os_name'], {}), "('%s is not a supported language for %s!', language, os_name)\n", (1058, 1119), False, 'import logging\n'), ((1483, 1540), 'logging.info', 'logging.info', (['"""Preparing codegen for all supported OSes."""'], {}), "('Preparing codegen for all supported OSes.')\n", (1495, 1540), False, 'import logging\n'), ((1612, 1670), 'logging.info', 'logging.info', (['"""All supported languages will be generated."""'], {}), "('All supported languages will be generated.')\n", (1624, 1670), False, 'import logging\n'), ((1395, 1445), 'logging.error', 'logging.error', (['"""OS list contains invalid entries!"""'], {}), "('OS list contains invalid entries!')\n", (1408, 1445), False, 'import logging\n'), ((1911, 1972), 'logging.info', 'logging.info', (['"""Starting %s generation for %s."""', 'language', '_os'], {}), "('Starting %s generation for %s.', language, _os)\n", (1923, 1972), False, 'import logging\n')]
|
# https://www.visualcrossing.com/weather/weather-data-services?pln=plan_GqkYVnzyiNg93X#/timeline
# https://www.visualcrossing.com/weather-api
import requests
import json
# Convert degrees to compass direction
def deg_to_compass(num):
val = int((num / 22.5) + 0.5)
arr = [
"N",
"NNE",
"NE",
"ENE",
"E",
"ESE",
"SE",
"SSE",
"S",
"SSW",
"SW",
"WSW",
"W",
"WNW",
"NW",
"NNW",
]
return arr[(val % 16)]
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
class ApiVisualCrossing:
def __init__(self):
self.data = None
def refresh(self, location="", api_key="", data_units="metric"):
url = (
f"https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/"
f"{location}/today"
f"?unitGroup={data_units}"
f"&key={api_key}"
"&include=fcst%2Ccurrent"
)
result = requests.get(url)
if result.status_code == 200:
self.data = result.json()
else:
self.data = None
def get_timestamp(self):
return self.get_element(("currentConditions", "datetime"), "N/A")
def get_resolved_address(self):
ret_val = "N/A"
if self.data is not None:
ret_val = self.data["resolvedAddress"]
return ret_val
def get_temperature(self):
return self.get_element(("currentConditions", "temp"), "N/A")
def get_feels_like_temperature(self):
return self.get_element(("currentConditions", "feelslike"), "N/A")
def get_low_temperature(self):
return self.get_element(("days", "tempmin"), "N/A")
def get_high_temperature(self):
return self.get_element(("days", "tempmax"), "N/A")
def get_wind_speed(self):
return self.get_element(("currentConditions", "windspeed"), "N/A")
def get_wind_gust(self):
return self.get_element(("currentConditions", "windgust"), "0")
def get_wind_direction(self):
ret_val = self.get_element(("currentConditions", "winddir"), "N/A")
if is_number(ret_val):
ret_val = deg_to_compass(ret_val)
return ret_val
def get_precip(self):
return self.get_element(("currentConditions", "precip"), "0")
def get_precip_prob(self):
return self.get_element(("currentConditions", "precipprob"), "0")
def get_element(self, keys, default="", round_val=True):
ret_val = default
if self.data is not None:
ret_val = self.data[keys[0]]
if isinstance(ret_val, list):
ret_val = ret_val[0][keys[1]]
else:
ret_val = ret_val[keys[1]]
if ret_val:
if round and is_number(ret_val):
ret_val = round(float(ret_val))
else:
ret_val = default
return ret_val
if __name__ == "__main__":
api = ApiVisualCrossing()
api.refresh("32.52402,-97.29605", "")
print(json.dumps(api.data, indent=4))
# print('Address: ', api.get_resolved_address())
# print('Time: ', api.get_timestamp())
# print('Temperature: ', api.get_temperature())
# print('Feels Like: ', api.get_feels_like_temperature())
# print('Low Temperature: ', api.get_low_temperature())
# print('High Temperature: ', api.get_high_temperature())
# print('Wind Speed: ', api.get_wind_speed())
# print('Wind Gust: ', api.get_wind_gust())
# print('Wind Direction From: ', api.get_wind_direction())
# print('Precipitation: ', api.get_precip())
# print('Precipitation Probability: ', api.get_precip_prob())
|
[
"requests.get",
"json.dumps"
] |
[((1082, 1099), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1094, 1099), False, 'import requests\n'), ((3159, 3189), 'json.dumps', 'json.dumps', (['api.data'], {'indent': '(4)'}), '(api.data, indent=4)\n', (3169, 3189), False, 'import json\n')]
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\sickness\sickness_commands.py
# Compiled at: 2017-08-17 00:31:58
# Size of source mod 2**32: 2877 bytes
from server_commands.argument_helpers import OptionalSimInfoParam, TunableInstanceParam, get_optional_target
import services, sims4
@sims4.commands.Command('sickness.make_sick', command_type=(sims4.commands.CommandType.Automation))
def make_sick(opt_target: OptionalSimInfoParam=None, _connection=None):
target = get_optional_target(opt_target, _connection, target_type=OptionalSimInfoParam)
if target is None:
return False
services.get_sickness_service().make_sick(target)
@sims4.commands.Command('sickness.add', command_type=(sims4.commands.CommandType.Automation))
def add_sickness(sickness_type: TunableInstanceParam(sims4.resources.Types.SICKNESS), opt_target: OptionalSimInfoParam=None, _connection=None):
target = get_optional_target(opt_target, _connection, target_type=OptionalSimInfoParam)
if target is None:
return False
services.get_sickness_service().make_sick(target, sickness=sickness_type)
@sims4.commands.Command('sickness.remove', command_type=(sims4.commands.CommandType.Automation))
def remove_sickness(opt_target: OptionalSimInfoParam=None, _connection=None):
target = get_optional_target(opt_target, _connection, target_type=OptionalSimInfoParam)
if target is None:
return False
services.get_sickness_service().remove_sickness(target)
@sims4.commands.Command('sickness.distribute_sicknesses')
def distribute_sicknesses(_connection=None):
services.get_sickness_service().trigger_sickness_distribution()
@sims4.commands.Command('sickness.update_diagnosis')
def update_diagnosis(opt_target: OptionalSimInfoParam=None, _connection=None):
target = get_optional_target(opt_target, _connection, target_type=OptionalSimInfoParam)
return target is None or target.has_sickness_tracking() or False
target.current_sickness.update_diagnosis(target)
@sims4.commands.Command('sickness.clear_diagnosis')
def clear_diagnosis(opt_target: OptionalSimInfoParam=None, _connection=None):
target = get_optional_target(opt_target, _connection, target_type=OptionalSimInfoParam)
return target is None or target.has_sickness_tracking() or False
target.sickness_tracker.clear_diagnosis_data()
|
[
"sims4.commands.Command",
"server_commands.argument_helpers.TunableInstanceParam",
"services.get_sickness_service",
"server_commands.argument_helpers.get_optional_target"
] |
[((458, 559), 'sims4.commands.Command', 'sims4.commands.Command', (['"""sickness.make_sick"""'], {'command_type': 'sims4.commands.CommandType.Automation'}), "('sickness.make_sick', command_type=sims4.commands.\n CommandType.Automation)\n", (480, 559), False, 'import services, sims4\n'), ((822, 917), 'sims4.commands.Command', 'sims4.commands.Command', (['"""sickness.add"""'], {'command_type': 'sims4.commands.CommandType.Automation'}), "('sickness.add', command_type=sims4.commands.\n CommandType.Automation)\n", (844, 917), False, 'import services, sims4\n'), ((1276, 1374), 'sims4.commands.Command', 'sims4.commands.Command', (['"""sickness.remove"""'], {'command_type': 'sims4.commands.CommandType.Automation'}), "('sickness.remove', command_type=sims4.commands.\n CommandType.Automation)\n", (1298, 1374), False, 'import services, sims4\n'), ((1649, 1705), 'sims4.commands.Command', 'sims4.commands.Command', (['"""sickness.distribute_sicknesses"""'], {}), "('sickness.distribute_sicknesses')\n", (1671, 1705), False, 'import services, sims4\n'), ((1822, 1873), 'sims4.commands.Command', 'sims4.commands.Command', (['"""sickness.update_diagnosis"""'], {}), "('sickness.update_diagnosis')\n", (1844, 1873), False, 'import services, sims4\n'), ((2170, 2220), 'sims4.commands.Command', 'sims4.commands.Command', (['"""sickness.clear_diagnosis"""'], {}), "('sickness.clear_diagnosis')\n", (2192, 2220), False, 'import services, sims4\n'), ((642, 720), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_target', '_connection'], {'target_type': 'OptionalSimInfoParam'}), '(opt_target, _connection, target_type=OptionalSimInfoParam)\n', (661, 720), False, 'from server_commands.argument_helpers import OptionalSimInfoParam, TunableInstanceParam, get_optional_target\n'), ((1072, 1150), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_target', '_connection'], {'target_type': 'OptionalSimInfoParam'}), '(opt_target, _connection, target_type=OptionalSimInfoParam)\n', (1091, 1150), False, 'from server_commands.argument_helpers import OptionalSimInfoParam, TunableInstanceParam, get_optional_target\n'), ((1463, 1541), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_target', '_connection'], {'target_type': 'OptionalSimInfoParam'}), '(opt_target, _connection, target_type=OptionalSimInfoParam)\n', (1482, 1541), False, 'from server_commands.argument_helpers import OptionalSimInfoParam, TunableInstanceParam, get_optional_target\n'), ((1966, 2044), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_target', '_connection'], {'target_type': 'OptionalSimInfoParam'}), '(opt_target, _connection, target_type=OptionalSimInfoParam)\n', (1985, 2044), False, 'from server_commands.argument_helpers import OptionalSimInfoParam, TunableInstanceParam, get_optional_target\n'), ((2312, 2390), 'server_commands.argument_helpers.get_optional_target', 'get_optional_target', (['opt_target', '_connection'], {'target_type': 'OptionalSimInfoParam'}), '(opt_target, _connection, target_type=OptionalSimInfoParam)\n', (2331, 2390), False, 'from server_commands.argument_helpers import OptionalSimInfoParam, TunableInstanceParam, get_optional_target\n'), ((947, 999), 'server_commands.argument_helpers.TunableInstanceParam', 'TunableInstanceParam', (['sims4.resources.Types.SICKNESS'], {}), '(sims4.resources.Types.SICKNESS)\n', (967, 999), False, 'from server_commands.argument_helpers import OptionalSimInfoParam, TunableInstanceParam, get_optional_target\n'), ((769, 800), 'services.get_sickness_service', 'services.get_sickness_service', ([], {}), '()\n', (798, 800), False, 'import services, sims4\n'), ((1199, 1230), 'services.get_sickness_service', 'services.get_sickness_service', ([], {}), '()\n', (1228, 1230), False, 'import services, sims4\n'), ((1590, 1621), 'services.get_sickness_service', 'services.get_sickness_service', ([], {}), '()\n', (1619, 1621), False, 'import services, sims4\n'), ((1755, 1786), 'services.get_sickness_service', 'services.get_sickness_service', ([], {}), '()\n', (1784, 1786), False, 'import services, sims4\n')]
|
# Copyright 2020-2021 Dolthub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pymysql.cursors
class TestMySQL(unittest.TestCase):
def test_connect(self):
connection = pymysql.connect(host='127.0.0.1',
user='root',
password='',
db='',
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
sql = "SELECT name, email FROM mytable ORDER BY name, email"
cursor.execute(sql)
rows = cursor.fetchall()
expected = [
{"name": "<NAME>", "email": "<EMAIL>"},
{"name": "<NAME>", "email": "<EMAIL>"},
{"name": "<NAME>", "email": "<EMAIL>"},
{"name": "<NAME>", "email": "<EMAIL>"}
]
self.assertEqual(expected, rows)
finally:
connection.close()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main"
] |
[((2163, 2178), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2176, 2178), False, 'import unittest\n')]
|
from selenium import webdriver
from time import sleep
import json
import Client
t = Client.get_servers_raw()
if t == None:
print('Failed to get the list of servers')
raise SystemExit
if len(t) == 0:
print('The list of servers is empty')
raise SystemExit
print(f'Added {len(t)} servers to the queue')
with open('res\\GetMembers.js', 'r') as f:
uscan = f.read()
users = set()
total_expected = 0
total = 0
driver = webdriver.Edge('res\\msedgedriver.exe')
driver.get('https://discord.com/login')
print('Login to continue')
while not driver.current_url == 'https://discord.com/channels/@me':
sleep(1)
print('Login successful!')
for srv in t:
print(f'Processing [{srv["id"]}] {srv["name"]}')
count = Client.get_member_count(srv['id'])
print(f'Expected member count:', count)
total_expected += count
driver.get('https://discord.com/channels/' + srv['id'])
wait = True
while wait:
sleep(0.5)
wait = False
try:
driver.find_element_by_xpath('//div[@aria-label="Members"]')
except:
wait = True
sleep(0.5)
driver.execute_script(uscan)
done = False
while not done:
done = driver.execute_script('return done;')
sleep(1)
tmp = json.loads(driver.execute_script('return JSON.stringify(users);'))
total += len(tmp)
users = users.union(tmp)
print(f'Discovered {len(tmp)} members ~{len(tmp)*100//count}%.\n')
driver.close()
with open('Users.json', 'w') as f:
json.dump(list(users), f)
print(f'Exported {total} users as Users.json')
print(f'Final discovery rate: ~{total*100//total_expected}%')
|
[
"Client.get_member_count",
"selenium.webdriver.Edge",
"Client.get_servers_raw",
"time.sleep"
] |
[((85, 109), 'Client.get_servers_raw', 'Client.get_servers_raw', ([], {}), '()\n', (107, 109), False, 'import Client\n'), ((434, 473), 'selenium.webdriver.Edge', 'webdriver.Edge', (['"""res\\\\msedgedriver.exe"""'], {}), "('res\\\\msedgedriver.exe')\n", (448, 473), False, 'from selenium import webdriver\n'), ((614, 622), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (619, 622), False, 'from time import sleep\n'), ((731, 765), 'Client.get_member_count', 'Client.get_member_count', (["srv['id']"], {}), "(srv['id'])\n", (754, 765), False, 'import Client\n'), ((1102, 1112), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (1107, 1112), False, 'from time import sleep\n'), ((940, 950), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (945, 950), False, 'from time import sleep\n'), ((1245, 1253), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1250, 1253), False, 'from time import sleep\n')]
|
# Copyright 2011 Terena. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY TERENA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL TERENA OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Terena.
from django.conf.urls import patterns, url
from peer.entity.feeds import EntitiesFeed, ChangesFeed
urlpatterns = patterns(
'peer.entity.views',
# Global views
url(r'^$', 'entities_list', name='entities_list'),
url(r'^rss$', EntitiesFeed(), name='entities_feed'),
url(r'^add$', 'entity_add', name='entity_add'),
# Search view
url(r'^search$', 'search.search_entities', name='search_entities'),
# Group Views
url(r'^group/add$',
'group.entity_group_add', name='entity_group_add'),
url(r'^group/(?P<entity_group_id>\d+)$',
'group.entity_group_view', name='entity_group_view'),
url(r'^group/(?P<entity_group_id>\d+)/edit$',
'group.entity_group_edit', name='entity_group_edit'),
url(r'^group/(?P<entity_group_id>\d+)/remove$',
'group.entity_group_remove', name='entity_group_remove'),
# Entity basic views
url(r'^(?P<entity_id>\d+)$', 'entity_view',
name='entity_view'),
url(r'^(?P<entity_id>\d+)/remove/$', 'entity_remove',
name='entity_remove'),
url(r'^(?P<domain_name>\w+)/add$', 'entity_add_with_domain',
name='entity_add_with_domain'),
# Metadata views
url(r'^(?P<entity_id>\d+)/edit_metadata/$',
'metadata.edit_metadata', name='edit_metadata'),
url(r'^(?P<entity_id>\d+)/text_edit_metadata/$',
'metadata.text_edit_metadata', name='text_edit_metadata'),
url(r'^(?P<entity_id>\d+)/file_edit_metadata/$',
'metadata.file_edit_metadata', name='file_edit_metadata'),
url(r'^(?P<entity_id>\d+)/remote_edit_metadata/$',
'metadata.remote_edit_metadata', name='remote_edit_metadata'),
# Team views
url(r'^(?P<entity_id>\d+)/sharing/$',
'teams.sharing', name='sharing'),
url(r'^(?P<entity_id>\d+)/list_delegates/$',
'teams.list_delegates', name='list_delegates'),
url(r'^(?P<entity_id>\d+)/make_owner/$',
'teams.make_owner', name='make_owner'),
url(r'^(?P<entity_id>\d+)/remove_delegate/(?P<user_id>\d+)$',
'teams.remove_delegate', name='remove_delegate'),
url(r'^(?P<entity_id>\d+)/add_delegate/(?P<username>.+)$',
'teams.add_delegate', name='add_delegate'),
# Metarefresh views
url(r'^(?P<entity_id>\d+)/edit_metarefresh/$',
'metadata_utils.metarefresh_edit', name='metarefresh_edit'),
# Monitor endpoint views
url(r'^(?P<entity_id>\d+)/monitoring_prefs/$',
'metadata_utils.monitoring_prefs', name='monitoring_prefs'),
# Metadata revision views
url(r'^(?P<entity_id>\d+)/get_diff/(?P<r1>\w+)/(?P<r2>\w+)$',
'revisions.get_diff', name='get_diff'),
url(r'^(?P<entity_id>\d+)/get_revision/(?P<rev>\w+)$',
'revisions.get_revision', name='get_revision'),
url(r'^(?P<entity_id>\d+)/latest_metadata/$',
'revisions.get_latest_metadata', name='get_latest_metadata'),
# CSS with highlight colors
url(r'^pygments.css$', 'revisions.get_pygments_css',
name='get_pygments_css'),
# Entity feed
url(r'^(?P<entity_id>\d+)/rss$', ChangesFeed(), name='changes_feed'),
)
|
[
"peer.entity.feeds.ChangesFeed",
"peer.entity.feeds.EntitiesFeed",
"django.conf.urls.url"
] |
[((1684, 1732), 'django.conf.urls.url', 'url', (['"""^$"""', '"""entities_list"""'], {'name': '"""entities_list"""'}), "('^$', 'entities_list', name='entities_list')\n", (1687, 1732), False, 'from django.conf.urls import patterns, url\n'), ((1796, 1841), 'django.conf.urls.url', 'url', (['"""^add$"""', '"""entity_add"""'], {'name': '"""entity_add"""'}), "('^add$', 'entity_add', name='entity_add')\n", (1799, 1841), False, 'from django.conf.urls import patterns, url\n'), ((1867, 1932), 'django.conf.urls.url', 'url', (['"""^search$"""', '"""search.search_entities"""'], {'name': '"""search_entities"""'}), "('^search$', 'search.search_entities', name='search_entities')\n", (1870, 1932), False, 'from django.conf.urls import patterns, url\n'), ((1958, 2027), 'django.conf.urls.url', 'url', (['"""^group/add$"""', '"""group.entity_group_add"""'], {'name': '"""entity_group_add"""'}), "('^group/add$', 'group.entity_group_add', name='entity_group_add')\n", (1961, 2027), False, 'from django.conf.urls import patterns, url\n'), ((2042, 2140), 'django.conf.urls.url', 'url', (['"""^group/(?P<entity_group_id>\\\\d+)$"""', '"""group.entity_group_view"""'], {'name': '"""entity_group_view"""'}), "('^group/(?P<entity_group_id>\\\\d+)$', 'group.entity_group_view', name=\n 'entity_group_view')\n", (2045, 2140), False, 'from django.conf.urls import patterns, url\n'), ((2149, 2251), 'django.conf.urls.url', 'url', (['"""^group/(?P<entity_group_id>\\\\d+)/edit$"""', '"""group.entity_group_edit"""'], {'name': '"""entity_group_edit"""'}), "('^group/(?P<entity_group_id>\\\\d+)/edit$', 'group.entity_group_edit',\n name='entity_group_edit')\n", (2152, 2251), False, 'from django.conf.urls import patterns, url\n'), ((2261, 2369), 'django.conf.urls.url', 'url', (['"""^group/(?P<entity_group_id>\\\\d+)/remove$"""', '"""group.entity_group_remove"""'], {'name': '"""entity_group_remove"""'}), "('^group/(?P<entity_group_id>\\\\d+)/remove$', 'group.entity_group_remove',\n name='entity_group_remove')\n", (2264, 2369), False, 'from django.conf.urls import patterns, url\n'), ((2405, 2468), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)$"""', '"""entity_view"""'], {'name': '"""entity_view"""'}), "('^(?P<entity_id>\\\\d+)$', 'entity_view', name='entity_view')\n", (2408, 2468), False, 'from django.conf.urls import patterns, url\n'), ((2482, 2557), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/remove/$"""', '"""entity_remove"""'], {'name': '"""entity_remove"""'}), "('^(?P<entity_id>\\\\d+)/remove/$', 'entity_remove', name='entity_remove')\n", (2485, 2557), False, 'from django.conf.urls import patterns, url\n'), ((2571, 2667), 'django.conf.urls.url', 'url', (['"""^(?P<domain_name>\\\\w+)/add$"""', '"""entity_add_with_domain"""'], {'name': '"""entity_add_with_domain"""'}), "('^(?P<domain_name>\\\\w+)/add$', 'entity_add_with_domain', name=\n 'entity_add_with_domain')\n", (2574, 2667), False, 'from django.conf.urls import patterns, url\n'), ((2698, 2794), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/edit_metadata/$"""', '"""metadata.edit_metadata"""'], {'name': '"""edit_metadata"""'}), "('^(?P<entity_id>\\\\d+)/edit_metadata/$', 'metadata.edit_metadata', name=\n 'edit_metadata')\n", (2701, 2794), False, 'from django.conf.urls import patterns, url\n'), ((2803, 2913), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/text_edit_metadata/$"""', '"""metadata.text_edit_metadata"""'], {'name': '"""text_edit_metadata"""'}), "('^(?P<entity_id>\\\\d+)/text_edit_metadata/$',\n 'metadata.text_edit_metadata', name='text_edit_metadata')\n", (2806, 2913), False, 'from django.conf.urls import patterns, url\n'), ((2923, 3033), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/file_edit_metadata/$"""', '"""metadata.file_edit_metadata"""'], {'name': '"""file_edit_metadata"""'}), "('^(?P<entity_id>\\\\d+)/file_edit_metadata/$',\n 'metadata.file_edit_metadata', name='file_edit_metadata')\n", (2926, 3033), False, 'from django.conf.urls import patterns, url\n'), ((3043, 3159), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/remote_edit_metadata/$"""', '"""metadata.remote_edit_metadata"""'], {'name': '"""remote_edit_metadata"""'}), "('^(?P<entity_id>\\\\d+)/remote_edit_metadata/$',\n 'metadata.remote_edit_metadata', name='remote_edit_metadata')\n", (3046, 3159), False, 'from django.conf.urls import patterns, url\n'), ((3187, 3257), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/sharing/$"""', '"""teams.sharing"""'], {'name': '"""sharing"""'}), "('^(?P<entity_id>\\\\d+)/sharing/$', 'teams.sharing', name='sharing')\n", (3190, 3257), False, 'from django.conf.urls import patterns, url\n'), ((3271, 3367), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/list_delegates/$"""', '"""teams.list_delegates"""'], {'name': '"""list_delegates"""'}), "('^(?P<entity_id>\\\\d+)/list_delegates/$', 'teams.list_delegates', name=\n 'list_delegates')\n", (3274, 3367), False, 'from django.conf.urls import patterns, url\n'), ((3376, 3455), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/make_owner/$"""', '"""teams.make_owner"""'], {'name': '"""make_owner"""'}), "('^(?P<entity_id>\\\\d+)/make_owner/$', 'teams.make_owner', name='make_owner')\n", (3379, 3455), False, 'from django.conf.urls import patterns, url\n'), ((3469, 3584), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/remove_delegate/(?P<user_id>\\\\d+)$"""', '"""teams.remove_delegate"""'], {'name': '"""remove_delegate"""'}), "('^(?P<entity_id>\\\\d+)/remove_delegate/(?P<user_id>\\\\d+)$',\n 'teams.remove_delegate', name='remove_delegate')\n", (3472, 3584), False, 'from django.conf.urls import patterns, url\n'), ((3593, 3698), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/add_delegate/(?P<username>.+)$"""', '"""teams.add_delegate"""'], {'name': '"""add_delegate"""'}), "('^(?P<entity_id>\\\\d+)/add_delegate/(?P<username>.+)$',\n 'teams.add_delegate', name='add_delegate')\n", (3596, 3698), False, 'from django.conf.urls import patterns, url\n'), ((3734, 3844), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/edit_metarefresh/$"""', '"""metadata_utils.metarefresh_edit"""'], {'name': '"""metarefresh_edit"""'}), "('^(?P<entity_id>\\\\d+)/edit_metarefresh/$',\n 'metadata_utils.metarefresh_edit', name='metarefresh_edit')\n", (3737, 3844), False, 'from django.conf.urls import patterns, url\n'), ((3884, 3994), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/monitoring_prefs/$"""', '"""metadata_utils.monitoring_prefs"""'], {'name': '"""monitoring_prefs"""'}), "('^(?P<entity_id>\\\\d+)/monitoring_prefs/$',\n 'metadata_utils.monitoring_prefs', name='monitoring_prefs')\n", (3887, 3994), False, 'from django.conf.urls import patterns, url\n'), ((4035, 4141), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/get_diff/(?P<r1>\\\\w+)/(?P<r2>\\\\w+)$"""', '"""revisions.get_diff"""'], {'name': '"""get_diff"""'}), "('^(?P<entity_id>\\\\d+)/get_diff/(?P<r1>\\\\w+)/(?P<r2>\\\\w+)$',\n 'revisions.get_diff', name='get_diff')\n", (4038, 4141), False, 'from django.conf.urls import patterns, url\n'), ((4149, 4255), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/get_revision/(?P<rev>\\\\w+)$"""', '"""revisions.get_revision"""'], {'name': '"""get_revision"""'}), "('^(?P<entity_id>\\\\d+)/get_revision/(?P<rev>\\\\w+)$',\n 'revisions.get_revision', name='get_revision')\n", (4152, 4255), False, 'from django.conf.urls import patterns, url\n'), ((4264, 4374), 'django.conf.urls.url', 'url', (['"""^(?P<entity_id>\\\\d+)/latest_metadata/$"""', '"""revisions.get_latest_metadata"""'], {'name': '"""get_latest_metadata"""'}), "('^(?P<entity_id>\\\\d+)/latest_metadata/$',\n 'revisions.get_latest_metadata', name='get_latest_metadata')\n", (4267, 4374), False, 'from django.conf.urls import patterns, url\n'), ((4417, 4493), 'django.conf.urls.url', 'url', (['"""^pygments.css$"""', '"""revisions.get_pygments_css"""'], {'name': '"""get_pygments_css"""'}), "('^pygments.css$', 'revisions.get_pygments_css', name='get_pygments_css')\n", (4420, 4493), False, 'from django.conf.urls import patterns, url\n'), ((1753, 1767), 'peer.entity.feeds.EntitiesFeed', 'EntitiesFeed', ([], {}), '()\n', (1765, 1767), False, 'from peer.entity.feeds import EntitiesFeed, ChangesFeed\n'), ((4560, 4573), 'peer.entity.feeds.ChangesFeed', 'ChangesFeed', ([], {}), '()\n', (4571, 4573), False, 'from peer.entity.feeds import EntitiesFeed, ChangesFeed\n')]
|
import json
# From Marseille
class CDCPArgumentationDoc:
def __init__(self, file_root, merge_consecutive_spans=True):
self.doc_id = int(file_root[-5:])
self._ann_path = file_root + ".ann.json"
with open(file_root + ".txt") as f:
self.raw_text = f.read()
# annotation is always loaded
try:
with open(self._ann_path, encoding="utf8") as f:
ann = json.load(f)
self.url = {int(key): val for key, val in ann['url'].items()}
self.prop_labels = ann['prop_labels']
self.prop_offsets = [(int(a), int(b))
for a, b in ann['prop_offsets']]
self.reasons = [((int(a), int(b)), int(c), 'reason')
for (a, b), c in ann['reasons']]
self.evidences = [((int(a), int(b)), int(c), 'evidence')
for (a, b), c in ann['evidences']]
self.links = self.reasons + self.evidences
except FileNotFoundError:
raise FileNotFoundError("Annotation json not found at {}"
.format(self._ann_path))
if merge_consecutive_spans:
merge_spans(self)
self.links = _transitive(self.links)
link_dict = {a: [] for (a, b, l_type) in self.links}
for (a, b, l_type) in self.links:
link_dict[a] += [{'link': b, 'type': l_type}]
self.links_dict = {a: {'link': b, 'type': l_type} for (a, b, l_type) in self.links}
self.links_lists = {'locate': [(src, trg) for (src, trg, l_type) in self.links],
'link': [l_type for (src, trg, l_type) in self.links]}
self.reasons = [(a, b) for (a, b, l_type) in self.links if l_type == 'reason']
self.evidences = [(a, b) for (a, b, l_type) in self.links if l_type == 'evidence']
# From Marseille
def merge_spans(doc, include_nonarg=True):
"""Normalization needed for CDCP data because of multi-prop spans"""
# flatten multi-prop src spans like (3, 6) into new propositions
# as long as they never overlap with other links. This inevitably will
# drop some data but it's a very small number.
# function fails if called twice because
# precondition: doc.links = [((i, j), k)...]
# postcondition: doc.links = [(i, k)...]
new_links = []
new_props = {}
new_prop_offsets = {}
dropped = 0
for (start, end), trg, l_type in doc.links:
if start == end:
new_props[start] = (start, end)
new_prop_offsets[start] = doc.prop_offsets[start]
new_props[trg] = (trg, trg)
new_prop_offsets[trg] = doc.prop_offsets[trg]
new_links.append((start, trg, l_type))
elif start < end:
# multi-prop span. Check for problems:
problems = []
for (other_start, other_end), other_trg, other_l_type in doc.links:
if start == other_start and end == other_end:
continue
# another link coming out of a subset of our span
if start <= other_start <= other_end <= end:
problems.append(((other_start, other_end), other_trg))
# another link coming into a subset of our span
if start <= other_trg <= end:
problems.append(((other_start, other_end), other_trg))
if not len(problems):
if start in new_props:
assert (start, end) == new_props[start]
new_props[start] = (start, end)
new_prop_offsets[start] = (doc.prop_offsets[start][0],
doc.prop_offsets[end][1])
new_props[trg] = (trg, trg)
new_prop_offsets[trg] = doc.prop_offsets[trg]
new_links.append((start, trg, l_type))
else:
# Since we drop the possibly NEW span, there is no need
# to remove any negative links.
dropped += 1
if include_nonarg:
used_props = set(k for a, b in new_props.values()
for k in range(a, b + 1))
for k in range(len(doc.prop_offsets)):
if k not in used_props:
new_props[k] = (k, k)
new_prop_offsets[k] = doc.prop_offsets[k]
mapping = {key: k for k, key in enumerate(sorted(new_props))}
props = [val for _, val in sorted(new_props.items())]
doc.prop_offsets = [val for _, val in sorted(new_prop_offsets.items())]
doc.links = [(mapping[src], mapping[trg], l_type) for src, trg, l_type in new_links]
doc.prop_labels = [merge_prop_labels(doc.prop_labels[a:1 + b])
for a, b in props]
return doc
# From Marseille
def merge_prop_labels(labels):
"""After joining multiple propositions, we need to decide the new type.
Rules:
1. if the span is a single prop, keep the label
2. if the span props have the same type, use that type
3. Else, rules from Jon: policy>value>testimony>reference>fact
"""
if len(labels) == 1:
return labels[0]
labels = set(labels)
if len(labels) == 1:
return next(iter(labels))
if 'policy' in labels:
return 'policy'
elif 'value' in labels:
return 'value'
elif 'testimony' in labels:
return 'testimony'
elif 'reference' in labels:
return 'reference'
elif 'fact' in labels:
return 'fact'
else:
raise ValueError("weird labels: {}".format(" ".join(labels)))
# From Marseille
def _transitive(links):
"""perform transitive closure of links.
For input [(1, 2), (2, 3)] the output is [(1, 2), (2, 3), (1, 3)]
"""
links = set(links)
while True:
new_links = [(src_a, trg_b, l_type_a)
for src_a, trg_a, l_type_a in links
for src_b, trg_b, l_type_b in links
if trg_a == src_b
and l_type_a == l_type_b
and (src_a, trg_b, l_type_a) not in links]
if new_links:
links.update(new_links)
else:
break
return links
|
[
"json.load"
] |
[((434, 446), 'json.load', 'json.load', (['f'], {}), '(f)\n', (443, 446), False, 'import json\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
# @author : <NAME>
# @Email : <EMAIL>
# @Project : Python_Files
# @File : utils.py
# @Software: PyCharm
# @Time : 2021/5/20 下午7:42
"""
import os
import struct
import sys
import time
import traceback
from datetime import datetime
from pathlib import Path
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
pd.set_option("display.max_columns", None)
# 相应的我们可以设置显示的最大行数
pd.set_option("display.max_rows", None)
# function: byte2int
def byte2int(data, mode="u16"):
dbyte = bytearray(data)
darray = []
i = 0
while i < len(dbyte):
if "u8" == mode:
darray.append(dbyte[i])
i = i + 1
elif "u16" == mode:
darray.append(dbyte[i] | dbyte[i + 1] << 8)
i = i + 2
return darray
# end: byte2int
# function: byte2float
def byte2float(data, mode="float"):
darray = []
i = 0
if "float" == mode:
while i < len(data):
fx = struct.unpack("f", data[i : i + 4])
darray.append(fx)
i = i + 4
elif "double" == mode:
while i < len(data):
dx = struct.unpack("d", data[i : i + 8])
darray.append(dx)
i = i + 8
return darray
# end: byte2float
def read_bytefile(path, folder, file, mode="u8"):
fname = path + folder + file
f = open(fname, "rb")
dtmp = f.read()
global rslt
if "u8" == mode:
rslt = byte2int(dtmp, mode="u8")
if "u16" == mode:
rslt = byte2int(dtmp, mode="u16")
if "float" == mode:
rslt = byte2float(dtmp, mode="float")
if "double" == mode:
rslt = byte2float(dtmp, mode="double")
return rslt
# 向sheet中写入一行数据
def insertOne(value, sheet):
sheet.append(value)
def read_raw(src_dir, fname):
bcg, gain = [], []
fname = src_dir + fname
f = open(fname, "rb")
dtmp = f.read()
dbyte = bytearray(dtmp)
i = 0
while i < len(dbyte):
bcg.append(dbyte[i] | dbyte[i + 1] << 8)
gain.append(dbyte[i + 2])
i = i + 3
return bcg, gain
def read_wgt(src_dir, fname):
wgt = []
fname = src_dir + fname
f = open(fname, "rb")
dtmp = f.read()
dbyte = bytearray(dtmp)
i = 0
while i < len(dbyte):
wgt.append(dbyte[i + 1] | dbyte[i] << 8)
i = i + 2
return wgt
def time2stamp(cmnttime): # 转时间戳函数
# 转为时间数组
timeArray = time.strptime(cmnttime, "%Y-%m-%d %H:%M:%S")
# 转为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def stamp2time(timeStamp):
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
return otherStyleTime
def day2stamp(cmnttime): # 转时间戳函数
# 转为时间数组
timeArray = time.strptime(cmnttime, "%Y-%m-%d")
# 转为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def stamp2day(timeStamp):
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d", timeArray)
return otherStyleTime
def hour2stamp(cmnttime): # 转时间戳函数
# 转为时间数组
timeArray = time.strptime(cmnttime, "%Y-%m-%d %H:%M")
# 转为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def stamp2hour(timeStamp):
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M", timeArray)
return otherStyleTime
def time2datetime(tranTime, pList):
tdelta, startstamp = 60, int(time2stamp(tranTime))
t = [datetime.fromtimestamp(startstamp + t * tdelta) for t in range(len(pList))]
return t
def time_formattime(pList):
famTime = [datetime.fromisoformat(t) for t in pList]
return famTime
def quest_time_extract(num_spl, quest_outbed, slp_awTim):
num_slp0 = num_spl[0]
num_slp2 = num_spl[:2]
aslp_day = stamp2day(day2stamp(slp_awTim) - 86400)
awak_day = slp_awTim
if len(num_spl) == 6:
outbed_stamp = "0" + num_spl[0] + ":" + num_spl[1:3] + ":00"
if int(num_slp0) >= 19 and int(num_slp0) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp0) >= 0 and int(num_slp0) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 4:
outbed_stamp = num_spl[:2] + ":" + num_spl[2:] + ":00"
if int(num_slp2) >= 19 and int(num_slp2) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp2) >= 0 and int(num_slp2) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 3:
outbed_stamp = "0" + num_spl[0] + ":" + num_spl[1:] + ":00"
if int(num_slp0) >= 19 and int(num_slp0) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp0) >= 0 and int(num_slp0) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 2:
outbed_stamp = "0" + num_spl[0] + ":" + "00" + ":00"
if int(num_slp0) >= 19 and int(num_slp0) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp0) >= 0 and int(num_slp0) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 1:
outbed_stamp = "0" + num_spl + ":" + "00" + ":00"
if int(num_spl) >= 19 and int(num_spl) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_spl) >= 0 and int(num_spl) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
def diff_acl(slpList, psgList):
fslp_diff = int(abs(time2stamp(str(psgList)) - time2stamp(str(slpList))) / 60)
return fslp_diff
def num_pop(num1: list, num2: list):
if len(num1) > len(num2):
lenDiff = len(num1) - len(num2)
for i in range(lenDiff):
num1.pop()
elif len(num2) > len(num1):
lenDiff = len(num2) - len(num1)
for i in range(lenDiff):
num2.pop()
def num3_pop(num1: list, num2: list, num3: list):
num2 = [str(i) for i in range(len(num2))]
num3 = [str(i) for i in range(len(num3))]
maxLen = max(len(num1), len(num2), len(num3))
minLen = min(len(num1), len(num2), len(num3))
plen = maxLen - minLen
new_num1, new_num2, new_num3 = 0, 0, 0
for i in range(maxLen):
if len(num1) == maxLen:
new_num1 = num1[:-plen]
elif len(num2) == maxLen:
new_num2 = num2[:-plen]
elif len(num3) == maxLen:
new_num3 = num3[:-plen]
return new_num1, new_num2, new_num3
def len_compare(pr_list: list, rr_list: list):
if len(pr_list) > len(rr_list):
return len(rr_list)
elif len(pr_list) < len(rr_list):
return len(pr_list)
def path_concat(sub_dir, pathName):
_path = str(sub_dir.joinpath(pathName)) + "/"
return _path
def is_empty_file_3(file_path: str):
assert isinstance(file_path, str), f"file_path参数类型不是字符串类型: {type(file_path)}"
p = Path(file_path)
assert p.is_file(), f"file_path不是一个文件: {file_path}"
return p.stat().st_size == 0
def dir_empty(dir_path):
try:
next(os.scandir(dir_path))
return False
except StopIteration:
return True
def select_num(df1, df2):
# num_requried = 0
hr_lower_limit = df1["hr"].map(lambda x: x != 0)
hr_upper_limit = df1["hr"].map(lambda x: x != 255)
br_lower_limit = df1["br"].map(lambda x: x != 0)
br_upper_limit = df1["br"].map(lambda x: x != 255)
pr_lower_limit = df2["pr"].map(lambda x: x != 0)
pr_upper_limit = df2["pr"].map(lambda x: x != 255)
rr_lower_limit = df2["rr"].map(lambda x: x != 0)
rr_upper_limit = df2["rr"].map(lambda x: x != 255)
df1 = df1[
(hr_lower_limit & hr_upper_limit & br_lower_limit & br_upper_limit)
& (pr_lower_limit & pr_upper_limit & rr_lower_limit & rr_upper_limit)
]
df2 = df2[
(hr_lower_limit & hr_upper_limit & br_lower_limit & br_upper_limit)
& (pr_lower_limit & pr_upper_limit & rr_lower_limit & rr_upper_limit)
]
df1 = df1.reset_index(drop=True) # 重新给索引
df2 = df2.reset_index(drop=True) # 重新给索引
return df1, df2
def minute_mean(df, cname, stime):
# 计算每分钟SLP的心率、呼吸率
hr_min_list = []
slp_time_min_list = []
df_min = int(len(df[cname]) / 60) # 数据共多少分钟
for i in range(df_min):
hr_min_len = (i + 1) * 60
num = 0
temp = 0
slp_time_min = stime + hr_min_len
for j in df[cname][hr_min_len - 60 : hr_min_len]:
if j != 0 and j != 255:
num += 1
temp += j
if num > 0:
res = int(temp / num)
hr_min_list.append(res)
if num == 0:
hr_min_list.append(0)
slp_time_min_list.append(slp_time_min)
# rslt = {'time':slp_time_min_list,'hr':hr_min_list,'br':br_min_list}
# df_clean = pd.DataFrame(data=rslt)
return slp_time_min_list, hr_min_list
def file_exist(my_file):
txt_list = []
if Path(my_file).is_file() is False:
Path(my_file).touch()
return txt_list
def Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv):
PR = PR[PR.map(lambda x: x > 0)]
HR = HR[HR.map(lambda x: x > 0)]
PR = PR.reset_index(drop=True) # 重新给索引
HR = HR.reset_index(drop=True) # 重新给索引
diff_hr = PR - HR
diff_hr_cnt = 0
try:
diff_hr_pre = abs(diff_hr) / PR
diff_hr_pre = diff_hr_pre.dropna()
diff_hr_pre = diff_hr_pre * 100
for i, val in enumerate(diff_hr):
if i <= len(PR):
if abs(val) <= PR[i] * 0.1 or abs(val) <= 5:
diff_hr_cnt += 1
hr_mean = round(np.mean(abs(diff_hr)), 2)
hr_std = round(np.std(abs(diff_hr), ddof=1), 2)
if len(diff_hr_pre) == 0:
print(traceback.print_exc())
else:
acc_hr = diff_hr_cnt / len(diff_hr_pre)
txt_content = (
fcsv
+ " 心率准确性[%d / %d]: %.2f %%"
% (
diff_hr_cnt,
len(diff_hr_pre),
round(acc_hr * 100, 2),
)
+ " 心率误差:",
str(hr_mean) + "±" + str(hr_std),
)
f = open(src_txt + "accuracy.txt", "a")
f.write((str(txt_content) + "\r"))
return acc_hr
except Exception as exc:
print(exc)
print(traceback.print_exc())
def Respiration_rate_accuracy_calculat(RR, br, src_txt, fcsv):
RR = RR[RR.map(lambda x: x > 0)]
br = br[br.map(lambda x: x > 0)]
RR = RR.reset_index(drop=True) # 重新给索引
br = br.reset_index(drop=True) # 重新给索引
try:
# 计算呼吸率准确性
diff_br_pre = abs(RR - br)
diff_br_pre = diff_br_pre.dropna()
diff_br_cnt = 0
for i in diff_br_pre:
if i <= 2:
diff_br_cnt += 1
br_mean = round(np.mean(abs(diff_br_pre)), 2)
br_std = round(np.std(abs(diff_br_pre), ddof=1), 2)
if len(diff_br_pre) == 0:
print(traceback.print_exc())
else:
acc_br = diff_br_cnt / len(diff_br_pre)
txt_content = (
fcsv
+ " 呼吸率准确性[%d / %d]: %.2f %%"
% (
diff_br_cnt,
len(diff_br_pre),
round(acc_br * 100, 2),
)
+ " 呼吸率误差:",
str(br_mean) + "±" + str(br_std),
)
f = open(src_txt + "accuracy.txt", "a")
f.write((str(txt_content) + "\r"))
return acc_br
except Exception as exc:
print(exc)
print(traceback.print_exc())
def draw_PR_save(PR, slp_hr, time_offset, img_dir, fcsv, acc_flag):
# 作图
mpl.rcParams["font.sans-serif"] = ["SimHei"]
mpl.rcParams["axes.unicode_minus"] = False
# 配置横坐标日期显示#格式#间隔
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y%m/%d %H:%M:%S"))
plt.gca().xaxis.set_major_locator(mdates.MinuteLocator(interval=15))
if len(PR) > len(time_offset):
PR = PR[:-1]
ax1 = plt.subplot(412)
plt.plot(time_offset, PR, "r-", label="PSG")
plt.plot(time_offset, slp_hr, "b-", label="智能枕头")
plt.title("心率对比(bpm)", fontsize=9)
plt.legend(loc="upper right")
plt.setp(ax1.get_xticklabels(), visible=False, fontsize=9)
# plt.xlim(time_offset[0], time_offset[-1])
plt.ylim(40, 100)
f = plt.gcf() # 获取当前图像
if acc_flag == 1:
f.savefig(img_dir + "err_img/" + fcsv + ".png", bbox_inches="tight")
elif acc_flag == 0:
f.savefig(img_dir + "nor_img/" + fcsv + ".png", bbox_inches="tight")
f.clear() # 释放内存
def draw_PR_RR_save(PR, RR, slp_hr, slp_br, time_offset, img_dir, fcsv, acc_flag):
# 作图
mpl.rcParams["font.sans-serif"] = ["SimHei"]
mpl.rcParams["axes.unicode_minus"] = False
# fig.suptitle(fname)
# 配置横坐标日期显示#格式#间隔
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y%m/%d %H:%M:%S"))
plt.gca().xaxis.set_major_locator(mdates.MinuteLocator(interval=15))
if len(PR) > len(time_offset):
PR = PR[:-1]
if len(RR) > len(time_offset):
RR = RR[:-1]
print(len(time_offset), len(PR))
print(time_offset)
ax1 = plt.subplot(412)
plt.plot(time_offset, PR, "r-", label="PSG")
plt.plot(time_offset, slp_hr, "b-", label="智能枕头")
plt.title("心率对比(bpm)", fontsize=9)
plt.legend(loc="upper right")
plt.setp(ax1.get_xticklabels(), visible=False, fontsize=9)
# plt.xlim(time_offset[0], time_offset[-1])
plt.ylim(40, 100)
ax2 = plt.subplot(413, sharex=ax1)
plt.plot(time_offset, RR, "r-", label="PSG")
plt.plot(time_offset, slp_br, "b-", label="智能枕头")
plt.title("呼吸率对比(rpm)", fontsize=9)
plt.legend(loc="upper right")
plt.setp(ax2.get_xticklabels(), visible=True, fontsize=9)
plt.xticks()
# plt.xlim(time_offset[0], time_offset[-1])
plt.ylim(5, 35)
f = plt.gcf() # 获取当前图像
if acc_flag == 1:
f.savefig(img_dir + "err_img/" + fcsv + ".png", bbox_inches="tight")
elif acc_flag == 0:
f.savefig(img_dir + "nor_img/" + fcsv + ".png", bbox_inches="tight")
# f.figlegend()
f.clear() # 释放内存
def slp_hr_br_transfrom(cat_dir, save_dir, flag):
# slp批量仿真数据转成csv文件
flist = os.listdir(cat_dir + "hr_sec/")
for fcsv in flist[:]:
fname = fcsv.split(".")[0]
hr_list = read_bytefile(cat_dir, "hr_sec/", fcsv, mode="u8")
br_list = read_bytefile(cat_dir, "br_sec/", fcsv, mode="u8")
startstamp = int(fcsv.split("_")[-1].split(".")[0])
time_list = [startstamp + t for t in range(len(hr_list))]
if flag == 0:
rslt = {"time": time_list, "heart_rate": hr_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"), index=False, header=["time", "heart_rate"]
)
elif flag == 1:
rslt = {"time": time_list, "breath_rate": br_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"), index=False, header=["time", "breath_rate"]
)
elif flag == 2:
rslt = {"time": time_list, "heart_rate": hr_list, "breath_rate": br_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"),
index=False,
header=["time", "heart_rate", "breath_rate"],
)
def psg_slp_heart_cal(src_slp, src_psg, src_txt, src_img):
"""心率准确性脚本计算"""
slp_flist = os.listdir(src_slp)
psg_flist = os.listdir(src_psg)
txt_list = []
my_file = src_txt + "setime.txt"
for i, fcsv in enumerate(slp_flist):
simg_name = fcsv.split(".")[0]
data_slp = pd.read_csv(src_slp + fcsv)
print(fcsv, psg_flist[i])
data_psg = pd.read_csv(src_psg + psg_flist[i])
data_slp.columns = ["time", "hr"]
data_psg.columns = ["time", "pr"]
time_set = [
data_slp["time"].tolist()[0],
time2stamp(data_psg["time"].tolist()[0]),
data_slp["time"].tolist()[-1],
time2stamp(data_psg["time"].tolist()[-1]),
]
start_time = time_set[0] - time_set[1]
end_time = time_set[2] - time_set[3]
if start_time < 0:
file_start = time_set[1]
else:
file_start = time_set[0]
if end_time < 0:
file_end = time_set[2]
else:
file_end = time_set[3]
data_psg["timestamp"] = time2stamp(data_psg["time"])
print(
"开始区间:", file_start, "结束区间:", file_end, "公共区间长度:", (file_end - file_start)
)
slp_sind = data_slp[data_slp["time"] == file_start].index.tolist()[0]
slp_eind = data_slp[data_slp["time"] == file_end].index.tolist()[0]
slp_clist = data_slp[slp_sind : slp_eind + 1]
psg_sind = data_psg[data_psg["timestamp"] == file_start].index.tolist()[0]
psg_eind = data_psg[data_psg["timestamp"] == file_end].index.tolist()[0]
psg_clist = data_psg[psg_sind : psg_eind + 1]
hr_time, hr_list = minute_mean(slp_clist, "hr", file_start)
pr_time, pr_list = minute_mean(psg_clist, "pr", file_start)
rslt_slp = {"time": hr_time, "hr": hr_list}
clean_slp = pd.DataFrame(data=rslt_slp)
rslt_psg = {"time": pr_time, "pr": pr_list}
clean_psg = pd.DataFrame(data=rslt_psg)
time = clean_slp["time"]
HR = clean_slp["hr"]
PR = clean_psg["pr"]
acc_hr = Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv)
time_offset = [datetime.fromtimestamp(i) for i in time]
# 准备原始SLP心率、呼吸数据
slp_hr = pd.Series(list(HR), index=time_offset)
if len(time_offset) > 0:
acc_flag = 0
if acc_hr < 0.9:
acc_flag = 1
draw_PR_save(PR, slp_hr, time_offset, src_img, simg_name, acc_flag)
else:
draw_PR_save(PR, slp_hr, time_offset, src_img, simg_name, acc_flag)
if Path(my_file).is_file() is False:
Path(my_file).touch()
if Path(my_file).exists():
size = os.path.getsize(my_file)
if size > 100:
os.remove(my_file)
Path(my_file).touch()
elif size == 0:
time_diff = file_end - file_start
txt_content = (
fcsv
+ " 起始时间:"
+ str(file_start)
+ " 结束时间:"
+ str(file_end)
+ " 时间长度:"
+ str(time_diff)
)
txt_list.append(txt_content)
for i, val in enumerate(txt_list):
f = open(my_file, "a")
f.write((str(val) + "\r"))
f.close()
def psg_slp_heart_breath_cal(src_slp, src_psg, src_txt, src_img, flag):
"""心率、呼吸率准确性计算脚本"""
if flag == 0:
slp_flist = os.listdir(src_slp)
psg_flist = os.listdir(src_psg)
slp_idList = [i.split(".")[0].split("_")[0] for i in slp_flist]
psg_idList = [i.split(".")[0].split("_")[0] for i in psg_flist]
txt_list = []
my_file = src_txt + "setime.txt"
for i, fcsv in enumerate(slp_flist):
# print(slp_idList[i],psg_idList[i])
j = psg_idList.index(slp_idList[i])
simg_name = fcsv.split(".")[0]
data_slp = pd.read_csv(src_slp + fcsv)
data_psg = pd.read_csv(src_psg + psg_flist[j])
data_slp.columns = ["time", "hr", "br"]
data_psg.columns = ["time", "pr", "rr"]
time_set = [
data_slp["time"].tolist()[0],
time2stamp(data_psg["time"].tolist()[0]),
data_slp["time"].tolist()[-1],
time2stamp(data_psg["time"].tolist()[-1]),
]
start_time = time_set[0] - time_set[1]
end_time = time_set[2] - time_set[3]
if start_time < 0:
file_start = time_set[1]
else:
file_start = time_set[0]
if end_time < 0:
file_end = time_set[2]
else:
file_end = time_set[3]
data_psg["timestamp"] = time2stamp(data_psg["time"])
print(
"开始区间:",
file_start,
"结束区间:",
file_end,
"公共区间长度:",
(file_end - file_start),
)
slp_sind = data_slp[data_slp["time"] == file_start].index.tolist()[0]
slp_eind = data_slp[data_slp["time"] == file_end].index.tolist()[0]
slp_clist = data_slp[slp_sind : slp_eind + 1]
psg_sind = data_psg[data_psg["timestamp"] == file_start].index.tolist()[0]
psg_eind = data_psg[data_psg["timestamp"] == file_end].index.tolist()[0]
psg_clist = data_psg[psg_sind : psg_eind + 1]
hr_time, hr_list = minute_mean(slp_clist, "hr", file_start)
br_time, br_list = minute_mean(slp_clist, "br", file_start)
pr_time, pr_list = minute_mean(psg_clist, "pr", file_start)
rr_time, rr_list = minute_mean(psg_clist, "rr", file_start)
rslt_slp = {"time": hr_time, "hr": hr_list, "br": br_list}
clean_slp = pd.DataFrame(data=rslt_slp)
rslt_psg = {"time": pr_time, "pr": pr_list, "rr": rr_list}
clean_psg = pd.DataFrame(data=rslt_psg)
time = clean_slp["time"]
HR = clean_slp["hr"]
PR = clean_psg["pr"]
BR = clean_slp["br"]
RR = clean_psg["rr"]
acc_hr = Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv)
acc_br = Respiration_rate_accuracy_calculat(RR, BR, src_txt, fcsv)
time_offset = [datetime.fromtimestamp(i) for i in time]
# 准备原始SLP心率、呼吸数据
slp_hr = pd.Series(list(HR), index=time_offset)
slp_br = pd.Series(list(BR), index=time_offset)
if len(time_offset) > 0:
acc_flag = 0
if acc_hr is not None and acc_br is not None:
if acc_hr < 0.9 or acc_br < 0.9:
acc_flag = 1
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
else:
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
if Path(my_file).is_file() is False:
Path(my_file).touch()
if Path(my_file).exists():
size = os.path.getsize(my_file)
if size > 100:
os.remove(my_file)
Path(my_file).touch()
elif size == 0:
time_diff = file_end - file_start
txt_content = (
fcsv
+ " 起始时间:"
+ str(file_start)
+ " 结束时间:"
+ str(file_end)
+ " 时间长度:"
+ str(time_diff)
)
txt_list.append(txt_content)
for i, val in enumerate(txt_list):
f = open(my_file, "a")
f.write((str(val) + "\r"))
f.close()
elif flag == 1:
slp_flist = os.listdir(src_slp)
psg_flist = os.listdir(src_psg)
slp_idList = [i.split(".")[0].split("_")[0] for i in slp_flist]
psg_idList = [i.split(".")[0].split("_")[0].lstrip("0") for i in psg_flist]
txt_list = []
my_file = src_txt + "setime.txt"
for i, fcsv in enumerate(slp_flist):
j = psg_idList.index(slp_idList[i])
simg_name = fcsv.split(".")[0]
data_slp = pd.read_csv(src_slp + fcsv)
data_psg = pd.read_csv(src_psg + psg_flist[j])
data_slp.columns = ["time", "hr", "br"]
data_psg.columns = ["time", "pr", "rr"]
time_set = [
data_slp["time"].tolist()[0],
hour2stamp(data_psg["time"].tolist()[0]),
data_slp["time"].tolist()[-1],
hour2stamp(data_psg["time"].tolist()[-1]),
]
start_time = time_set[0] - time_set[1]
end_time = time_set[2] - time_set[3]
if start_time < 0:
file_start = time_set[1]
else:
file_start = time_set[0]
if end_time < 0:
file_end = time_set[2]
else:
file_end = time_set[3]
print(time_set[1], time_set[0])
# data_psg["timestamp"] = data_psg["time"].apply(lambda x: hour2stamp(x))
data_psg["timestamp"] = hour2stamp(data_psg["time"])
print(
"开始区间:",
file_start,
"结束区间:",
file_end,
"公共区间长度:",
(file_end - file_start),
)
slp_sind = data_slp[data_slp["time"] == file_start].index.tolist()[0]
slp_eind = data_slp[data_slp["time"] == file_end].index.tolist()[0]
slp_clist = data_slp[slp_sind : slp_eind + 1]
psg_sind = data_psg[data_psg["timestamp"] == file_start].index.tolist()[0]
psg_eind = data_psg[data_psg["timestamp"] == file_end].index.tolist()[0]
psg_clist = data_psg[psg_sind : psg_eind + 1]
hr_time, hr_list = minute_mean(slp_clist, "hr", file_start)
br_time, br_list = minute_mean(slp_clist, "br", file_start)
pr_time, pr_list = minute_mean(psg_clist, "pr", file_start)
rr_time, rr_list = minute_mean(psg_clist, "rr", file_start)
rslt_slp = {"time": hr_time, "hr": hr_list, "br": br_list}
clean_slp = pd.DataFrame(data=rslt_slp)
rslt_psg = {"time": pr_time, "pr": pr_list, "rr": rr_list}
clean_psg = pd.DataFrame(data=rslt_psg)
time = clean_slp["time"]
HR = clean_slp["hr"]
PR = clean_psg["pr"]
BR = clean_slp["br"]
RR = clean_psg["rr"]
acc_hr = Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv)
acc_br = Respiration_rate_accuracy_calculat(RR, BR, src_txt, fcsv)
time_offset = [datetime.fromtimestamp(i) for i in time]
# 准备原始SLP心率、呼吸数据
slp_hr = pd.Series(list(HR), index=time_offset)
slp_br = pd.Series(list(BR), index=time_offset)
if len(time_offset) > 0:
acc_flag = 0
if acc_hr < 0.9 or acc_br < 0.9:
acc_flag = 1
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
else:
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
if Path(my_file).is_file() is False:
Path(my_file).touch()
if Path(my_file).exists():
size = os.path.getsize(my_file)
if size > 100:
os.remove(my_file)
Path(my_file).touch()
elif size == 0:
time_diff = file_end - file_start
txt_content = (
fcsv
+ " 起始时间:"
+ str(file_start)
+ " 结束时间:"
+ str(file_end)
+ " 时间长度:"
+ str(time_diff)
)
txt_list.append(txt_content)
for i, val in enumerate(txt_list):
f = open(my_file, "a")
f.write((str(val) + "\r"))
f.close()
def psg_rr_transfrom(cat_dir, save_dir):
# psg批量仿真数据转成csv文件
flist = os.listdir(cat_dir + "br_sec/")
for fcsv in flist[:]:
fname = fcsv.split(".")[0]
br_list = read_bytefile(cat_dir, "br_sec/", fcsv, mode="u8")
startstamp = int(fcsv.split("_")[-1].split(".")[0])
time_list = [startstamp + t for t in range(len(br_list))]
rslt = {"time": time_list, "breath_rate": br_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"), index=False, header=["time", "breath_rate"]
)
def read_summary(path, folder, file):
fname = path + folder + file
f = open(fname, "rb")
dtmp = f.read()
dtmp = bytearray(dtmp)
mean_hrate = dtmp[0] | dtmp[1] << 8 # 平均心率
mean_brate = dtmp[2] | dtmp[3] << 8 # 平均呼吸率
fallasleeptime = dtmp[4] | dtmp[5] << 8 # 入睡时刻
wakeuptime = dtmp[6] | dtmp[7] << 8 # 清醒时刻
offbed_cnt = dtmp[8] | dtmp[9] << 8 # 离床次数
turnover_cnt = dtmp[10] | dtmp[11] << 8 # 翻身次数
bodymove_cnt = dtmp[12] | dtmp[13] << 8 # 体动次数
heartstop_cnt = dtmp[14] | dtmp[15] << 8 # 心跳暂停次数
respstop_cnt = dtmp[16] | dtmp[17] << 8 # 呼吸暂停次数
deepsleep_per = dtmp[18] | dtmp[19] << 8 # 深睡比例
remsleep_per = dtmp[20] | dtmp[21] << 8 # 中睡比例
lightsleep_per = dtmp[22] | dtmp[23] << 8 # 浅睡比例
wakesleep_per = dtmp[24] | dtmp[25] << 8 # 清醒比例
wakesleep_time = dtmp[26] | dtmp[27] << 8 # 清醒时长
lightsleep_time = dtmp[28] | dtmp[29] << 8 # 浅睡时长
remsleep_time = dtmp[30] | dtmp[31] << 8 # 中睡时长
deepsleep_time = dtmp[32] | dtmp[33] << 8 # 深睡时长
wake_off_cnt = dtmp[34] | dtmp[35] << 8 # 清醒(含离床)次数
hrate_max = dtmp[36] | dtmp[37] << 8 # 最高心率
brate_max = dtmp[38] | dtmp[39] << 8 # 最高呼吸率
hrate_min = dtmp[40] | dtmp[41] << 8 # 最低心率
brate_min = dtmp[42] | dtmp[43] << 8 # 最低呼吸率
hrate_high_time = dtmp[44] | dtmp[55] << 8 # 心率过速时长
hrate_low_time = dtmp[46] | dtmp[47] << 8 # 心率过缓时长
brate_high_time = dtmp[48] | dtmp[49] << 8 # 呼吸过速时长
brate_low_time = dtmp[50] | dtmp[51] << 8 # 呼吸过缓时长
allsleep_time = dtmp[52] | dtmp[53] << 8 # 睡觉时长
body_move = dtmp[54] | dtmp[55] << 8 # 躁动不安扣分
off_bed = dtmp[56] | dtmp[57] << 8 # 离床扣分
wake_cnt = dtmp[58] | dtmp[59] << 8 # 易醒扣分
start_time = dtmp[60] | dtmp[61] << 8 # 睡太晚扣分
fall_asleep = dtmp[62] | dtmp[63] << 8 # 难于入睡扣分
perc_deep = dtmp[64] | dtmp[65] << 8 # 深睡不足扣分
sleep_long = dtmp[66] | dtmp[67] << 8 # 睡时间过长扣分
sleep_less = dtmp[68] | dtmp[69] << 8 # 睡眠时间过短扣分
breath_stop = dtmp[70] | dtmp[71] << 8 # 呼吸暂停扣分
heart_stop = dtmp[72] | dtmp[73] << 8 # 心跳暂停扣分
hrate_low = dtmp[74] | dtmp[75] << 8 # 心跳过缓扣分
hrate_high = dtmp[76] | dtmp[77] << 8 # 心跳过速扣分
brate_low = dtmp[78] | dtmp[79] << 8 # 呼吸过缓扣分
brate_high = dtmp[80] | dtmp[81] << 8 # 呼吸过速扣分
benign_sleep = dtmp[82] | dtmp[83] << 8 # 良性睡眠分布扣分
offset = dtmp[84] | dtmp[85] << 8
data_len = dtmp[86] | dtmp[87] << 8
start_stamp = dtmp[88] | dtmp[89] << 8 | dtmp[90] << 16 | dtmp[91] << 24
print(start_stamp, start_stamp + fallasleeptime * 60)
diff = (
body_move
+ off_bed
+ wake_cnt
+ start_time
+ fall_asleep
+ perc_deep
+ sleep_long
+ sleep_less
+ breath_stop
+ heart_stop
+ hrate_low
+ hrate_high
+ brate_low
+ brate_high
+ benign_sleep
)
score = 100 - diff
rslt = {"offset": offset, "len": data_len, "start_time": start_stamp}
print("-----睡眠报告-----")
print(">>> 睡眠比例")
print(
"睡眠时长:%d H %d min (入睡:%d, 清醒:%d)"
% (allsleep_time / 60, allsleep_time % 60, fallasleeptime, wakeuptime)
)
print(
"深睡时长:%d H %d min (%d%%) | 中睡时长:%d H %d min (%d%%) "
"| 浅睡时长:%d H %d min (%d%%) | 清醒时长:%d H %d min (%d%%)"
% (
deepsleep_time / 60,
deepsleep_time % 60,
deepsleep_per,
remsleep_time / 60,
remsleep_time % 60,
remsleep_per,
lightsleep_time / 60,
lightsleep_time % 60,
lightsleep_per,
wakesleep_time / 60,
wakesleep_time % 60,
wakesleep_per,
)
)
print(">>> 呼吸心率")
print("平均呼吸:%d bpm (min: %d, max: %d)" % (mean_brate, brate_min, brate_max))
print("呼吸暂停:%d 次" % respstop_cnt)
print(
"呼吸过速:%d H %d min | 呼吸过缓:%d H %d min "
% (
brate_high_time / 60,
brate_high_time % 60,
brate_low_time / 60,
brate_low_time % 60,
)
)
print("平均心率:%d bpm (min: %d, max: %d)" % (mean_hrate, hrate_min, hrate_max))
print(
"心率过速:%d H %d min | 心率过缓:%d H %d min "
% (
hrate_high_time / 60,
hrate_high_time % 60,
hrate_low_time / 60,
hrate_low_time % 60,
)
)
print("心跳暂停:%d 次" % heartstop_cnt)
print(">>> 体动翻身")
print(
"体动次数:%d | 翻身次数:%d | 离床次数:%d | 清醒次数:%d "
% (bodymove_cnt, turnover_cnt, offbed_cnt, wake_off_cnt)
)
print(">>> 睡眠分数")
print("整晚睡眠得分:", score)
print("躁动不安扣分:", body_move)
print("离床过多扣分:", off_bed)
print("睡觉易醒扣分:", wake_cnt)
print("睡觉太晚扣分:", start_time)
print("难于入睡扣分:", fall_asleep)
print("深睡不足扣分:", perc_deep)
print("睡眠过长扣分:", sleep_long)
print("睡眠过短扣分:", sleep_less)
print("呼吸暂停扣分:", breath_stop)
print("心跳暂停扣分:", heart_stop)
print("心跳过缓扣分:", hrate_low)
print("心跳过速扣分:", hrate_high)
print("呼吸过缓扣分:", brate_low)
print("呼吸过速扣分:", brate_high)
print("良性睡眠扣分:", benign_sleep)
print("----------------")
return rslt
|
[
"matplotlib.pyplot.title",
"os.remove",
"pandas.read_csv",
"time.strftime",
"pathlib.Path",
"time.mktime",
"matplotlib.pyplot.gca",
"pandas.set_option",
"pandas.DataFrame",
"traceback.print_exc",
"datetime.datetime.fromisoformat",
"warnings.simplefilter",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.xticks",
"time.localtime",
"matplotlib.pyplot.ylim",
"os.path.getsize",
"matplotlib.pyplot.legend",
"struct.unpack",
"datetime.datetime.fromtimestamp",
"matplotlib.pyplot.gcf",
"os.listdir",
"os.scandir",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"matplotlib.dates.MinuteLocator",
"time.strptime"
] |
[((515, 557), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (528, 557), True, 'import pandas as pd\n'), ((577, 616), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (590, 616), True, 'import pandas as pd\n'), ((482, 513), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (503, 513), False, 'import warnings\n'), ((2576, 2620), 'time.strptime', 'time.strptime', (['cmnttime', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(cmnttime, '%Y-%m-%d %H:%M:%S')\n", (2589, 2620), False, 'import time\n'), ((2743, 2768), 'time.localtime', 'time.localtime', (['timeStamp'], {}), '(timeStamp)\n', (2757, 2768), False, 'import time\n'), ((2790, 2835), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""', 'timeArray'], {}), "('%Y-%m-%d %H:%M:%S', timeArray)\n", (2803, 2835), False, 'import time\n'), ((2928, 2963), 'time.strptime', 'time.strptime', (['cmnttime', '"""%Y-%m-%d"""'], {}), "(cmnttime, '%Y-%m-%d')\n", (2941, 2963), False, 'import time\n'), ((3085, 3110), 'time.localtime', 'time.localtime', (['timeStamp'], {}), '(timeStamp)\n', (3099, 3110), False, 'import time\n'), ((3132, 3168), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d"""', 'timeArray'], {}), "('%Y-%m-%d', timeArray)\n", (3145, 3168), False, 'import time\n'), ((3262, 3303), 'time.strptime', 'time.strptime', (['cmnttime', '"""%Y-%m-%d %H:%M"""'], {}), "(cmnttime, '%Y-%m-%d %H:%M')\n", (3275, 3303), False, 'import time\n'), ((3426, 3451), 'time.localtime', 'time.localtime', (['timeStamp'], {}), '(timeStamp)\n', (3440, 3451), False, 'import time\n'), ((3473, 3515), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M"""', 'timeArray'], {}), "('%Y-%m-%d %H:%M', timeArray)\n", (3486, 3515), False, 'import time\n'), ((7525, 7540), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (7529, 7540), False, 'from pathlib import Path\n'), ((12681, 12697), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(412)'], {}), '(412)\n', (12692, 12697), True, 'import matplotlib.pyplot as plt\n'), ((12702, 12746), 'matplotlib.pyplot.plot', 'plt.plot', (['time_offset', 'PR', '"""r-"""'], {'label': '"""PSG"""'}), "(time_offset, PR, 'r-', label='PSG')\n", (12710, 12746), True, 'import matplotlib.pyplot as plt\n'), ((12751, 12800), 'matplotlib.pyplot.plot', 'plt.plot', (['time_offset', 'slp_hr', '"""b-"""'], {'label': '"""智能枕头"""'}), "(time_offset, slp_hr, 'b-', label='智能枕头')\n", (12759, 12800), True, 'import matplotlib.pyplot as plt\n'), ((12805, 12839), 'matplotlib.pyplot.title', 'plt.title', (['"""心率对比(bpm)"""'], {'fontsize': '(9)'}), "('心率对比(bpm)', fontsize=9)\n", (12814, 12839), True, 'import matplotlib.pyplot as plt\n'), ((12844, 12873), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (12854, 12873), True, 'import matplotlib.pyplot as plt\n'), ((12989, 13006), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(40)', '(100)'], {}), '(40, 100)\n', (12997, 13006), True, 'import matplotlib.pyplot as plt\n'), ((13016, 13025), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13023, 13025), True, 'import matplotlib.pyplot as plt\n'), ((13837, 13853), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(412)'], {}), '(412)\n', (13848, 13853), True, 'import matplotlib.pyplot as plt\n'), ((13858, 13902), 'matplotlib.pyplot.plot', 'plt.plot', (['time_offset', 'PR', '"""r-"""'], {'label': '"""PSG"""'}), "(time_offset, PR, 'r-', label='PSG')\n", (13866, 13902), True, 'import matplotlib.pyplot as plt\n'), ((13907, 13956), 'matplotlib.pyplot.plot', 'plt.plot', (['time_offset', 'slp_hr', '"""b-"""'], {'label': '"""智能枕头"""'}), "(time_offset, slp_hr, 'b-', label='智能枕头')\n", (13915, 13956), True, 'import matplotlib.pyplot as plt\n'), ((13961, 13995), 'matplotlib.pyplot.title', 'plt.title', (['"""心率对比(bpm)"""'], {'fontsize': '(9)'}), "('心率对比(bpm)', fontsize=9)\n", (13970, 13995), True, 'import matplotlib.pyplot as plt\n'), ((14000, 14029), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (14010, 14029), True, 'import matplotlib.pyplot as plt\n'), ((14145, 14162), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(40)', '(100)'], {}), '(40, 100)\n', (14153, 14162), True, 'import matplotlib.pyplot as plt\n'), ((14174, 14202), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(413)'], {'sharex': 'ax1'}), '(413, sharex=ax1)\n', (14185, 14202), True, 'import matplotlib.pyplot as plt\n'), ((14207, 14251), 'matplotlib.pyplot.plot', 'plt.plot', (['time_offset', 'RR', '"""r-"""'], {'label': '"""PSG"""'}), "(time_offset, RR, 'r-', label='PSG')\n", (14215, 14251), True, 'import matplotlib.pyplot as plt\n'), ((14256, 14305), 'matplotlib.pyplot.plot', 'plt.plot', (['time_offset', 'slp_br', '"""b-"""'], {'label': '"""智能枕头"""'}), "(time_offset, slp_br, 'b-', label='智能枕头')\n", (14264, 14305), True, 'import matplotlib.pyplot as plt\n'), ((14310, 14345), 'matplotlib.pyplot.title', 'plt.title', (['"""呼吸率对比(rpm)"""'], {'fontsize': '(9)'}), "('呼吸率对比(rpm)', fontsize=9)\n", (14319, 14345), True, 'import matplotlib.pyplot as plt\n'), ((14350, 14379), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (14360, 14379), True, 'import matplotlib.pyplot as plt\n'), ((14446, 14458), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (14456, 14458), True, 'import matplotlib.pyplot as plt\n'), ((14511, 14526), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(5)', '(35)'], {}), '(5, 35)\n', (14519, 14526), True, 'import matplotlib.pyplot as plt\n'), ((14536, 14545), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (14543, 14545), True, 'import matplotlib.pyplot as plt\n'), ((14887, 14918), 'os.listdir', 'os.listdir', (["(cat_dir + 'hr_sec/')"], {}), "(cat_dir + 'hr_sec/')\n", (14897, 14918), False, 'import os\n'), ((16172, 16191), 'os.listdir', 'os.listdir', (['src_slp'], {}), '(src_slp)\n', (16182, 16191), False, 'import os\n'), ((16208, 16227), 'os.listdir', 'os.listdir', (['src_psg'], {}), '(src_psg)\n', (16218, 16227), False, 'import os\n'), ((29807, 29838), 'os.listdir', 'os.listdir', (["(cat_dir + 'br_sec/')"], {}), "(cat_dir + 'br_sec/')\n", (29817, 29838), False, 'import os\n'), ((2653, 2675), 'time.mktime', 'time.mktime', (['timeArray'], {}), '(timeArray)\n', (2664, 2675), False, 'import time\n'), ((2996, 3018), 'time.mktime', 'time.mktime', (['timeArray'], {}), '(timeArray)\n', (3007, 3018), False, 'import time\n'), ((3336, 3358), 'time.mktime', 'time.mktime', (['timeArray'], {}), '(timeArray)\n', (3347, 3358), False, 'import time\n'), ((3644, 3691), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(startstamp + t * tdelta)'], {}), '(startstamp + t * tdelta)\n', (3666, 3691), False, 'from datetime import datetime\n'), ((3778, 3803), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['t'], {}), '(t)\n', (3800, 3803), False, 'from datetime import datetime\n'), ((12498, 12538), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y%m/%d %H:%M:%S"""'], {}), "('%Y%m/%d %H:%M:%S')\n", (12518, 12538), True, 'import matplotlib.dates as mdates\n'), ((12578, 12611), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'interval': '(15)'}), '(interval=15)\n', (12598, 12611), True, 'import matplotlib.dates as mdates\n'), ((13536, 13576), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y%m/%d %H:%M:%S"""'], {}), "('%Y%m/%d %H:%M:%S')\n", (13556, 13576), True, 'import matplotlib.dates as mdates\n'), ((13616, 13649), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'interval': '(15)'}), '(interval=15)\n', (13636, 13649), True, 'import matplotlib.dates as mdates\n'), ((16385, 16412), 'pandas.read_csv', 'pd.read_csv', (['(src_slp + fcsv)'], {}), '(src_slp + fcsv)\n', (16396, 16412), True, 'import pandas as pd\n'), ((16466, 16501), 'pandas.read_csv', 'pd.read_csv', (['(src_psg + psg_flist[i])'], {}), '(src_psg + psg_flist[i])\n', (16477, 16501), True, 'import pandas as pd\n'), ((17943, 17970), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rslt_slp'}), '(data=rslt_slp)\n', (17955, 17970), True, 'import pandas as pd\n'), ((18044, 18071), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rslt_psg'}), '(data=rslt_psg)\n', (18056, 18071), True, 'import pandas as pd\n'), ((19682, 19701), 'os.listdir', 'os.listdir', (['src_slp'], {}), '(src_slp)\n', (19692, 19701), False, 'import os\n'), ((19722, 19741), 'os.listdir', 'os.listdir', (['src_psg'], {}), '(src_psg)\n', (19732, 19741), False, 'import os\n'), ((30169, 30192), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rslt'}), '(data=rslt)\n', (30181, 30192), True, 'import pandas as pd\n'), ((1137, 1170), 'struct.unpack', 'struct.unpack', (['"""f"""', 'data[i:i + 4]'], {}), "('f', data[i:i + 4])\n", (1150, 1170), False, 'import struct\n'), ((7680, 7700), 'os.scandir', 'os.scandir', (['dir_path'], {}), '(dir_path)\n', (7690, 7700), False, 'import os\n'), ((15347, 15370), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rslt'}), '(data=rslt)\n', (15359, 15370), True, 'import pandas as pd\n'), ((18258, 18283), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['i'], {}), '(i)\n', (18280, 18283), False, 'from datetime import datetime\n'), ((20159, 20186), 'pandas.read_csv', 'pd.read_csv', (['(src_slp + fcsv)'], {}), '(src_slp + fcsv)\n', (20170, 20186), True, 'import pandas as pd\n'), ((20210, 20245), 'pandas.read_csv', 'pd.read_csv', (['(src_psg + psg_flist[j])'], {}), '(src_psg + psg_flist[j])\n', (20221, 20245), True, 'import pandas as pd\n'), ((22068, 22095), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rslt_slp'}), '(data=rslt_slp)\n', (22080, 22095), True, 'import pandas as pd\n'), ((22192, 22219), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rslt_psg'}), '(data=rslt_psg)\n', (22204, 22219), True, 'import pandas as pd\n'), ((24794, 24813), 'os.listdir', 'os.listdir', (['src_slp'], {}), '(src_slp)\n', (24804, 24813), False, 'import os\n'), ((24834, 24853), 'os.listdir', 'os.listdir', (['src_psg'], {}), '(src_psg)\n', (24844, 24853), False, 'import os\n'), ((1298, 1331), 'struct.unpack', 'struct.unpack', (['"""d"""', 'data[i:i + 8]'], {}), "('d', data[i:i + 8])\n", (1311, 1331), False, 'import struct\n'), ((9558, 9571), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (9562, 9571), False, 'from pathlib import Path\n'), ((9600, 9613), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (9604, 9613), False, 'from pathlib import Path\n'), ((10370, 10391), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (10389, 10391), False, 'import traceback\n'), ((10986, 11007), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (11005, 11007), False, 'import traceback\n'), ((11620, 11641), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (11639, 11641), False, 'import traceback\n'), ((12238, 12259), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (12257, 12259), False, 'import traceback\n'), ((12462, 12471), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12469, 12471), True, 'import matplotlib.pyplot as plt\n'), ((12544, 12553), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12551, 12553), True, 'import matplotlib.pyplot as plt\n'), ((13500, 13509), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13507, 13509), True, 'import matplotlib.pyplot as plt\n'), ((13582, 13591), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13589, 13591), True, 'import matplotlib.pyplot as plt\n'), ((15601, 15624), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rslt'}), '(data=rslt)\n', (15613, 15624), True, 'import pandas as pd\n'), ((18834, 18858), 'os.path.getsize', 'os.path.getsize', (['my_file'], {}), '(my_file)\n', (18849, 18858), False, 'import os\n'), ((22571, 22596), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['i'], {}), '(i)\n', (22593, 22596), False, 'from datetime import datetime\n'), ((25234, 25261), 'pandas.read_csv', 'pd.read_csv', (['(src_slp + fcsv)'], {}), '(src_slp + fcsv)\n', (25245, 25261), True, 'import pandas as pd\n'), ((25285, 25320), 'pandas.read_csv', 'pd.read_csv', (['(src_psg + psg_flist[j])'], {}), '(src_psg + psg_flist[j])\n', (25296, 25320), True, 'import pandas as pd\n'), ((27274, 27301), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rslt_slp'}), '(data=rslt_slp)\n', (27286, 27301), True, 'import pandas as pd\n'), ((27398, 27425), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rslt_psg'}), '(data=rslt_psg)\n', (27410, 27425), True, 'import pandas as pd\n'), ((15878, 15901), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rslt'}), '(data=rslt)\n', (15890, 15901), True, 'import pandas as pd\n'), ((18787, 18800), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (18791, 18800), False, 'from pathlib import Path\n'), ((18910, 18928), 'os.remove', 'os.remove', (['my_file'], {}), '(my_file)\n', (18919, 18928), False, 'import os\n'), ((27777, 27802), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['i'], {}), '(i)\n', (27799, 27802), False, 'from datetime import datetime\n'), ((18699, 18712), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (18703, 18712), False, 'from pathlib import Path\n'), ((18749, 18762), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (18753, 18762), False, 'from pathlib import Path\n'), ((23906, 23930), 'os.path.getsize', 'os.path.getsize', (['my_file'], {}), '(my_file)\n', (23921, 23930), False, 'import os\n'), ((28941, 28965), 'os.path.getsize', 'os.path.getsize', (['my_file'], {}), '(my_file)\n', (28956, 28965), False, 'import os\n'), ((18949, 18962), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (18953, 18962), False, 'from pathlib import Path\n'), ((23851, 23864), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (23855, 23864), False, 'from pathlib import Path\n'), ((23998, 24016), 'os.remove', 'os.remove', (['my_file'], {}), '(my_file)\n', (24007, 24016), False, 'import os\n'), ((28890, 28903), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (28894, 28903), False, 'from pathlib import Path\n'), ((29025, 29043), 'os.remove', 'os.remove', (['my_file'], {}), '(my_file)\n', (29034, 29043), False, 'import os\n'), ((23747, 23760), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (23751, 23760), False, 'from pathlib import Path\n'), ((23805, 23818), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (23809, 23818), False, 'from pathlib import Path\n'), ((28794, 28807), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (28798, 28807), False, 'from pathlib import Path\n'), ((28848, 28861), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (28852, 28861), False, 'from pathlib import Path\n'), ((24045, 24058), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (24049, 24058), False, 'from pathlib import Path\n'), ((29068, 29081), 'pathlib.Path', 'Path', (['my_file'], {}), '(my_file)\n', (29072, 29081), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
import unittest
from gilded_rose import Item, GildedRose
class GildedRoseTest(unittest.TestCase):
def test_concert_under_5(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 2, 30)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(33, items[0].quality)
def test_concert_under_10(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 8, 30)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(32, items[0].quality)
def test_concert(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 12, 30)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(31, items[0].quality)
def test_concert_expired(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 0, 24)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(0, items[0].quality)
def test_concert_max_10(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 9, 50)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(50, items[0].quality)
def test_concert_max_5(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 4, 50)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(50, items[0].quality)
def test_concert_max(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 13, 50)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(50, items[0].quality)
def test_vest(self):
items = [Item("+5 Dexterity Vest", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(19, items[0].quality)
def test_vest_expired(self):
items = [Item("+5 Dexterity Vest", 0, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(18, items[0].quality)
def test__vest_min(self):
items = [Item("+5 Dexterity Vest", 5, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(0, items[0].quality)
def test_mongoose(self):
items = [Item("Elixir of the Mongoose", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(19, items[0].quality)
def test_mongoose_expired(self):
items = [Item("Elixir of the Mongoose", 0, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(18, items[0].quality)
def test_mongoose_min(self):
items = [Item("Elixir of the Mongoose", 5, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(0, items[0].quality)
def test_sulfuras(self):
items = [Item("Sulfuras, Hand of Ragnaros", 10, 80)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(80, items[0].quality)
def test_sulfuras_expired(self):
items = [Item("Sulfuras, Hand of Ragnaros", 0, 80)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(80, items[0].quality)
def test_brie(self):
items = [Item("Aged Brie", 10, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(1, items[0].quality)
def test_brie_expired(self):
items = [Item("Aged Brie", 0, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(2, items[0].quality)
def test_brie_max(self):
items = [Item("Aged Brie", 12, 49)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(50, items[0].quality)
def test_conjured(self):
items = [Item("Conjured Mana Cake", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(18, items[0].quality)
def test_conjured_expired(self):
items = [Item("Conjured Mana Cake", 0, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(16, items[0].quality)
def test_conjured_min(self):
items = [Item("Conjured Mana Cake", 5, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(0, items[0].quality)
def test_concert_sell_in(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(9, items[0].sell_in)
def test_vest_sell_in(self):
items = [Item("+5 Dexterity Vest", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(9, items[0].sell_in)
def test_mongoose_sell_in(self):
items = [Item("Elixir of the Mongoose", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(9, items[0].sell_in)
def test_sulfuras_sell_in(self):
items = [Item("Sulfuras, Hand of Ragnaros", 0, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(0, items[0].sell_in)
def test_brie_sell_in(self):
items = [Item("Aged Brie", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(9, items[0].sell_in)
def test_conjured_sell_in(self):
items = [Item("Conjured Mana Cake", 10, 20)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEquals(9, items[0].sell_in)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"gilded_rose.GildedRose",
"gilded_rose.Item"
] |
[((6033, 6048), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6046, 6048), False, 'import unittest\n'), ((258, 275), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (268, 275), False, 'from gilded_rose import Item, GildedRose\n'), ((496, 513), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (506, 513), False, 'from gilded_rose import Item, GildedRose\n'), ((726, 743), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (736, 743), False, 'from gilded_rose import Item, GildedRose\n'), ((963, 980), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (973, 980), False, 'from gilded_rose import Item, GildedRose\n'), ((1198, 1215), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (1208, 1215), False, 'from gilded_rose import Item, GildedRose\n'), ((1433, 1450), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (1443, 1450), False, 'from gilded_rose import Item, GildedRose\n'), ((1667, 1684), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (1677, 1684), False, 'from gilded_rose import Item, GildedRose\n'), ((1870, 1887), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (1880, 1887), False, 'from gilded_rose import Item, GildedRose\n'), ((2080, 2097), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (2090, 2097), False, 'from gilded_rose import Item, GildedRose\n'), ((2286, 2303), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (2296, 2303), False, 'from gilded_rose import Item, GildedRose\n'), ((2497, 2514), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (2507, 2514), False, 'from gilded_rose import Item, GildedRose\n'), ((2716, 2733), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (2726, 2733), False, 'from gilded_rose import Item, GildedRose\n'), ((2930, 2947), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (2940, 2947), False, 'from gilded_rose import Item, GildedRose\n'), ((3145, 3162), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (3155, 3162), False, 'from gilded_rose import Item, GildedRose\n'), ((3368, 3385), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (3378, 3385), False, 'from gilded_rose import Item, GildedRose\n'), ((3562, 3579), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (3572, 3579), False, 'from gilded_rose import Item, GildedRose\n'), ((3762, 3779), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (3772, 3779), False, 'from gilded_rose import Item, GildedRose\n'), ((3960, 3977), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (3970, 3977), False, 'from gilded_rose import Item, GildedRose\n'), ((4168, 4185), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (4178, 4185), False, 'from gilded_rose import Item, GildedRose\n'), ((4383, 4400), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (4393, 4400), False, 'from gilded_rose import Item, GildedRose\n'), ((4593, 4610), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (4603, 4610), False, 'from gilded_rose import Item, GildedRose\n'), ((4830, 4847), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (4840, 4847), False, 'from gilded_rose import Item, GildedRose\n'), ((5040, 5057), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (5050, 5057), False, 'from gilded_rose import Item, GildedRose\n'), ((5259, 5276), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (5269, 5276), False, 'from gilded_rose import Item, GildedRose\n'), ((5481, 5498), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (5491, 5498), False, 'from gilded_rose import Item, GildedRose\n'), ((5683, 5700), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (5693, 5700), False, 'from gilded_rose import Item, GildedRose\n'), ((5898, 5915), 'gilded_rose.GildedRose', 'GildedRose', (['items'], {}), '(items)\n', (5908, 5915), False, 'from gilded_rose import Item, GildedRose\n'), ((178, 234), 'gilded_rose.Item', 'Item', (['"""Backstage passes to a TAFKAL80ETC concert"""', '(2)', '(30)'], {}), "('Backstage passes to a TAFKAL80ETC concert', 2, 30)\n", (182, 234), False, 'from gilded_rose import Item, GildedRose\n'), ((416, 472), 'gilded_rose.Item', 'Item', (['"""Backstage passes to a TAFKAL80ETC concert"""', '(8)', '(30)'], {}), "('Backstage passes to a TAFKAL80ETC concert', 8, 30)\n", (420, 472), False, 'from gilded_rose import Item, GildedRose\n'), ((645, 702), 'gilded_rose.Item', 'Item', (['"""Backstage passes to a TAFKAL80ETC concert"""', '(12)', '(30)'], {}), "('Backstage passes to a TAFKAL80ETC concert', 12, 30)\n", (649, 702), False, 'from gilded_rose import Item, GildedRose\n'), ((883, 939), 'gilded_rose.Item', 'Item', (['"""Backstage passes to a TAFKAL80ETC concert"""', '(0)', '(24)'], {}), "('Backstage passes to a TAFKAL80ETC concert', 0, 24)\n", (887, 939), False, 'from gilded_rose import Item, GildedRose\n'), ((1118, 1174), 'gilded_rose.Item', 'Item', (['"""Backstage passes to a TAFKAL80ETC concert"""', '(9)', '(50)'], {}), "('Backstage passes to a TAFKAL80ETC concert', 9, 50)\n", (1122, 1174), False, 'from gilded_rose import Item, GildedRose\n'), ((1353, 1409), 'gilded_rose.Item', 'Item', (['"""Backstage passes to a TAFKAL80ETC concert"""', '(4)', '(50)'], {}), "('Backstage passes to a TAFKAL80ETC concert', 4, 50)\n", (1357, 1409), False, 'from gilded_rose import Item, GildedRose\n'), ((1586, 1643), 'gilded_rose.Item', 'Item', (['"""Backstage passes to a TAFKAL80ETC concert"""', '(13)', '(50)'], {}), "('Backstage passes to a TAFKAL80ETC concert', 13, 50)\n", (1590, 1643), False, 'from gilded_rose import Item, GildedRose\n'), ((1813, 1846), 'gilded_rose.Item', 'Item', (['"""+5 Dexterity Vest"""', '(10)', '(20)'], {}), "('+5 Dexterity Vest', 10, 20)\n", (1817, 1846), False, 'from gilded_rose import Item, GildedRose\n'), ((2024, 2056), 'gilded_rose.Item', 'Item', (['"""+5 Dexterity Vest"""', '(0)', '(20)'], {}), "('+5 Dexterity Vest', 0, 20)\n", (2028, 2056), False, 'from gilded_rose import Item, GildedRose\n'), ((2231, 2262), 'gilded_rose.Item', 'Item', (['"""+5 Dexterity Vest"""', '(5)', '(0)'], {}), "('+5 Dexterity Vest', 5, 0)\n", (2235, 2262), False, 'from gilded_rose import Item, GildedRose\n'), ((2435, 2473), 'gilded_rose.Item', 'Item', (['"""Elixir of the Mongoose"""', '(10)', '(20)'], {}), "('Elixir of the Mongoose', 10, 20)\n", (2439, 2473), False, 'from gilded_rose import Item, GildedRose\n'), ((2655, 2692), 'gilded_rose.Item', 'Item', (['"""Elixir of the Mongoose"""', '(0)', '(20)'], {}), "('Elixir of the Mongoose', 0, 20)\n", (2659, 2692), False, 'from gilded_rose import Item, GildedRose\n'), ((2870, 2906), 'gilded_rose.Item', 'Item', (['"""Elixir of the Mongoose"""', '(5)', '(0)'], {}), "('Elixir of the Mongoose', 5, 0)\n", (2874, 2906), False, 'from gilded_rose import Item, GildedRose\n'), ((3079, 3121), 'gilded_rose.Item', 'Item', (['"""Sulfuras, Hand of Ragnaros"""', '(10)', '(80)'], {}), "('Sulfuras, Hand of Ragnaros', 10, 80)\n", (3083, 3121), False, 'from gilded_rose import Item, GildedRose\n'), ((3303, 3344), 'gilded_rose.Item', 'Item', (['"""Sulfuras, Hand of Ragnaros"""', '(0)', '(80)'], {}), "('Sulfuras, Hand of Ragnaros', 0, 80)\n", (3307, 3344), False, 'from gilded_rose import Item, GildedRose\n'), ((3514, 3538), 'gilded_rose.Item', 'Item', (['"""Aged Brie"""', '(10)', '(0)'], {}), "('Aged Brie', 10, 0)\n", (3518, 3538), False, 'from gilded_rose import Item, GildedRose\n'), ((3715, 3738), 'gilded_rose.Item', 'Item', (['"""Aged Brie"""', '(0)', '(0)'], {}), "('Aged Brie', 0, 0)\n", (3719, 3738), False, 'from gilded_rose import Item, GildedRose\n'), ((3911, 3936), 'gilded_rose.Item', 'Item', (['"""Aged Brie"""', '(12)', '(49)'], {}), "('Aged Brie', 12, 49)\n", (3915, 3936), False, 'from gilded_rose import Item, GildedRose\n'), ((4110, 4144), 'gilded_rose.Item', 'Item', (['"""Conjured Mana Cake"""', '(10)', '(20)'], {}), "('Conjured Mana Cake', 10, 20)\n", (4114, 4144), False, 'from gilded_rose import Item, GildedRose\n'), ((4326, 4359), 'gilded_rose.Item', 'Item', (['"""Conjured Mana Cake"""', '(0)', '(20)'], {}), "('Conjured Mana Cake', 0, 20)\n", (4330, 4359), False, 'from gilded_rose import Item, GildedRose\n'), ((4537, 4569), 'gilded_rose.Item', 'Item', (['"""Conjured Mana Cake"""', '(5)', '(0)'], {}), "('Conjured Mana Cake', 5, 0)\n", (4541, 4569), False, 'from gilded_rose import Item, GildedRose\n'), ((4749, 4806), 'gilded_rose.Item', 'Item', (['"""Backstage passes to a TAFKAL80ETC concert"""', '(10)', '(20)'], {}), "('Backstage passes to a TAFKAL80ETC concert', 10, 20)\n", (4753, 4806), False, 'from gilded_rose import Item, GildedRose\n'), ((4983, 5016), 'gilded_rose.Item', 'Item', (['"""+5 Dexterity Vest"""', '(10)', '(20)'], {}), "('+5 Dexterity Vest', 10, 20)\n", (4987, 5016), False, 'from gilded_rose import Item, GildedRose\n'), ((5197, 5235), 'gilded_rose.Item', 'Item', (['"""Elixir of the Mongoose"""', '(10)', '(20)'], {}), "('Elixir of the Mongoose', 10, 20)\n", (5201, 5235), False, 'from gilded_rose import Item, GildedRose\n'), ((5416, 5457), 'gilded_rose.Item', 'Item', (['"""Sulfuras, Hand of Ragnaros"""', '(0)', '(20)'], {}), "('Sulfuras, Hand of Ragnaros', 0, 20)\n", (5420, 5457), False, 'from gilded_rose import Item, GildedRose\n'), ((5634, 5659), 'gilded_rose.Item', 'Item', (['"""Aged Brie"""', '(10)', '(20)'], {}), "('Aged Brie', 10, 20)\n", (5638, 5659), False, 'from gilded_rose import Item, GildedRose\n'), ((5840, 5874), 'gilded_rose.Item', 'Item', (['"""Conjured Mana Cake"""', '(10)', '(20)'], {}), "('Conjured Mana Cake', 10, 20)\n", (5844, 5874), False, 'from gilded_rose import Item, GildedRose\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 23 13:45:44 2019
@author: thomassullivan
"""
import docx
from docx.enum.dml import MSO_THEME_COLOR_INDEX
from objects import Article
def add_hyperlink(paragraph, text, url):
# This gets access to the document.xml.rels file and gets a new relation id value
#print(paragraph)
#print(text)
#print(url)
try:
part = paragraph.part
r_id = part.relate_to(url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True)
# Create the w:hyperlink tag and add needed values
hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink')
hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, )
# Create a w:r element and a new w:rPr element
new_run = docx.oxml.shared.OxmlElement('w:r')
rPr = docx.oxml.shared.OxmlElement('w:rPr')
# Join all the xml elements together add add the required text to the w:r element
new_run.append(rPr)
new_run.text = text
hyperlink.append(new_run)
# Create a new Run object and add the hyperlink into it
r = paragraph.add_run ()
r._r.append (hyperlink)
# A workaround for the lack of a hyperlink style (doesn't go purple after using the link)
# Delete this if using a template that has the hyperlink style in it
r.font.color.theme_color = MSO_THEME_COLOR_INDEX.HYPERLINK
r.font.underline = True
return hyperlink
except Exception as e:
print(e)
def add_article(document, article):
#print(article)
try:
new_paragraph = document.add_paragraph('') #add blank paragraph that we append the text to
add_hyperlink(paragraph=new_paragraph, text=article.name, url=article.link)
#print(Article.get_date_formatted(article))
new_paragraph.add_run(' ({0}) '.format(Article.get_date_formatted(article))) #blank space between the link and the description
new_paragraph.add_run(article.description)
except Exception as e:
print(e)
def add_section(document, section):
section_name = document.add_paragraph(section.section_name)
section.categories.sort(key=lambda x: x.name, reverse=True)
section.categories.reverse()
for category in section.categories:
add_category(document, category)
def add_category(document, category):
category_name = document.add_paragraph(category.category_name)
#category.articles = category.articles.sort()
category.articles.sort(key=lambda x: x.name, reverse=True)
category.articles.reverse()
for article in category.articles:
#print(article)
add_article(document, article)
def create_roundup2(document, roundup_title, categories):
title = document.add_paragraph(roundup_title)
for category in categories:
add_category(document, category)
def complete_roundup2(filename, roundup_title, sections):
new_document = docx.Document()
create_roundup2(new_document, roundup_title, sections)
new_document.save('{0}.docx'.format(filename))
def create_roundup_docx(document, roundup_title, categories):
title = document.add_paragraph(roundup_title)
for category in categories:
add_category(document, category)
def create_complete_roundup(filename, roundup_title, categories):
new_document = docx.Document()
create_roundup_docx(new_document, roundup_title, categories)
new_document.save('{0}.docx'.format(filename))
if __name__ == '__main__':
print('roundup_docx2 loaded')
|
[
"objects.Article.get_date_formatted",
"docx.oxml.shared.qn",
"docx.Document",
"docx.oxml.shared.OxmlElement"
] |
[((2974, 2989), 'docx.Document', 'docx.Document', ([], {}), '()\n', (2987, 2989), False, 'import docx\n'), ((3391, 3406), 'docx.Document', 'docx.Document', ([], {}), '()\n', (3404, 3406), False, 'import docx\n'), ((611, 654), 'docx.oxml.shared.OxmlElement', 'docx.oxml.shared.OxmlElement', (['"""w:hyperlink"""'], {}), "('w:hyperlink')\n", (639, 654), False, 'import docx\n'), ((792, 827), 'docx.oxml.shared.OxmlElement', 'docx.oxml.shared.OxmlElement', (['"""w:r"""'], {}), "('w:r')\n", (820, 827), False, 'import docx\n'), ((842, 879), 'docx.oxml.shared.OxmlElement', 'docx.oxml.shared.OxmlElement', (['"""w:rPr"""'], {}), "('w:rPr')\n", (870, 879), False, 'import docx\n'), ((677, 704), 'docx.oxml.shared.qn', 'docx.oxml.shared.qn', (['"""r:id"""'], {}), "('r:id')\n", (696, 704), False, 'import docx\n'), ((1885, 1920), 'objects.Article.get_date_formatted', 'Article.get_date_formatted', (['article'], {}), '(article)\n', (1911, 1920), False, 'from objects import Article\n')]
|
import base64
import json
import socket
from typing import Optional, Union
from platypush.plugins import Plugin, action
class TcpPlugin(Plugin):
"""
Plugin for raw TCP communications.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._sockets = {}
def _connect(self, host: str, port: int, timeout: Optional[float] = None) -> socket.socket:
sd = self._sockets.get((host, port))
if sd:
return sd
sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if timeout:
sd.settimeout(timeout)
sd.connect((host, port))
self._sockets[(host, port)] = sd
return sd
@action
def connect(self, host: str, port: int, timeout: Optional[float] = None):
"""
Open a TCP connection.
:param host: Host IP/name.
:param port: TCP port.
:param timeout: Connection timeout in seconds (default: None).
"""
self._connect(host, port, timeout)
@action
def close(self, host: str, port: int):
"""
Close an active TCP connection.
:param host: Host IP/name.
:param port: TCP port.
"""
sd = self._sockets.get((host, port))
if not sd:
self.logger.warning('Not connected to ({}, {})'.format(host, port))
return
sd.close()
@action
def send(self, data: Union[bytes, str], host: str, port: int, binary: bool = False,
timeout: Optional[float] = None, recv_response: bool = False, **recv_opts):
"""
Send data over a TCP connection. If the connection isn't active it will be created.
:param data: Data to be sent, as bytes or string.
:param host: Host IP/name.
:param port: TCP port.
:param binary: If set to True and ``data`` is a string then will be treated as base64-encoded binary input.
:param timeout: Connection timeout in seconds (default: None).
:param recv_response: If True then the action will wait for a response from the server before closing the
connection. Note that ``recv_opts`` must be specified in this case - at least ``length``.
"""
if isinstance(data, list) or isinstance(data, dict):
data = json.dumps(data)
if isinstance(data, str):
data = data.encode()
if binary:
data = base64.decodebytes(data)
sd = self._connect(host, port, timeout)
try:
sd.send(data)
if recv_response:
recv_opts.update({
'host': host,
'port': port,
'timeout': timeout,
'binary': binary,
})
return self.recv(**recv_opts)
finally:
self.close(host, port)
@action
def recv(self, length: int, host: str, port: int, binary: bool = False, timeout: Optional[float] = None) -> str:
"""
Receive data from a TCP connection. If the connection isn't active it will be created.
:param length: Maximum number of bytes to be received.
:param host: Host IP/name.
:param port: TCP port.
:param binary: If set to True then the output will be base64-encoded, otherwise decoded as string.
:param timeout: Connection timeout in seconds (default: None).
"""
sd = self._connect(host, port, timeout)
try:
data = sd.recv(length)
if binary:
data = base64.encodebytes(data).decode()
else:
data = data.decode()
return data
finally:
self.close(host, port)
# vim:sw=4:ts=4:et:
|
[
"base64.encodebytes",
"socket.socket",
"base64.decodebytes",
"json.dumps"
] |
[((494, 543), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (507, 543), False, 'import socket\n'), ((2302, 2318), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2312, 2318), False, 'import json\n'), ((2432, 2456), 'base64.decodebytes', 'base64.decodebytes', (['data'], {}), '(data)\n', (2450, 2456), False, 'import base64\n'), ((3575, 3599), 'base64.encodebytes', 'base64.encodebytes', (['data'], {}), '(data)\n', (3593, 3599), False, 'import base64\n')]
|
"""Week2 Test Cases Traveling Salesman Problem"""
import math
from src.course4.week2.tsp import generate_complete_euclidean_distanace_graph, traveling_salesman_problem
def test_tsp1():
points = [(1, 1), (4, 1), (1, 4), (4, 4)]
result = traveling_salesman_problem(points)
assert result == 12
def test_tsp2():
points = [(0, 0), (0, 3), (3, 3)]
result = traveling_salesman_problem(points)
assert math.floor(result) == 10
def test_tsp3():
points = [(0, 0), (4, 3), (4, 0), (0, 3)]
result = traveling_salesman_problem(points)
assert result == 14
def test_tsp4():
points = [
(1.000, 1.00),
(1.125, 1.00),
(1.250, 1.00),
(1.500, 1.00),
(1.750, 1.00),
(2.000, 1.00),
(1.000, 2.00),
(1.125, 2.00),
(1.250, 2.00),
(1.500, 2.00),
(1.750, 2.00),
(2.000, 2.00)
]
result = traveling_salesman_problem(points)
assert result == 4
def test_tsp5():
points = [
(0.549963E-07, 0.985808E-08),
(-28.8733, -0.797739E-07),
(-79.2916, -21.4033),
(-14.6577, -43.3896),
(-64.7473, 21.8982),
(-29.0585, -43.2167),
(-72.0785, 0.181581),
(-36.0366, -21.6135),
(-50.4808, 7.37447),
(-50.5859, -21.5882),
(-0.135819, -28.7293),
(-65.0866, -36.0625),
(-21.4983, 7.31942),
(-57.5687, -43.2506),
(-43.0700, 14.5548)
]
result = traveling_salesman_problem(points)
assert math.floor(result) == 284
def test_tsp6():
points = [(0, 2.05), (3.414213562373095, 3.4642135623730947),
(0.5857864376269049, 0.6357864376269047),
(0.5857864376269049, 3.4642135623730947),
(2, 0),
(4.05, 2.05),
(2, 4.10),
(3.414213562373095, 0.6357864376269047)]
result = traveling_salesman_problem(points)
assert math.floor(result) == 12
|
[
"math.floor",
"src.course4.week2.tsp.traveling_salesman_problem"
] |
[((248, 282), 'src.course4.week2.tsp.traveling_salesman_problem', 'traveling_salesman_problem', (['points'], {}), '(points)\n', (274, 282), False, 'from src.course4.week2.tsp import generate_complete_euclidean_distanace_graph, traveling_salesman_problem\n'), ((377, 411), 'src.course4.week2.tsp.traveling_salesman_problem', 'traveling_salesman_problem', (['points'], {}), '(points)\n', (403, 411), False, 'from src.course4.week2.tsp import generate_complete_euclidean_distanace_graph, traveling_salesman_problem\n'), ((526, 560), 'src.course4.week2.tsp.traveling_salesman_problem', 'traveling_salesman_problem', (['points'], {}), '(points)\n', (552, 560), False, 'from src.course4.week2.tsp import generate_complete_euclidean_distanace_graph, traveling_salesman_problem\n'), ((913, 947), 'src.course4.week2.tsp.traveling_salesman_problem', 'traveling_salesman_problem', (['points'], {}), '(points)\n', (939, 947), False, 'from src.course4.week2.tsp import generate_complete_euclidean_distanace_graph, traveling_salesman_problem\n'), ((1535, 1569), 'src.course4.week2.tsp.traveling_salesman_problem', 'traveling_salesman_problem', (['points'], {}), '(points)\n', (1561, 1569), False, 'from src.course4.week2.tsp import generate_complete_euclidean_distanace_graph, traveling_salesman_problem\n'), ((1947, 1981), 'src.course4.week2.tsp.traveling_salesman_problem', 'traveling_salesman_problem', (['points'], {}), '(points)\n', (1973, 1981), False, 'from src.course4.week2.tsp import generate_complete_euclidean_distanace_graph, traveling_salesman_problem\n'), ((423, 441), 'math.floor', 'math.floor', (['result'], {}), '(result)\n', (433, 441), False, 'import math\n'), ((1581, 1599), 'math.floor', 'math.floor', (['result'], {}), '(result)\n', (1591, 1599), False, 'import math\n'), ((1993, 2011), 'math.floor', 'math.floor', (['result'], {}), '(result)\n', (2003, 2011), False, 'import math\n')]
|
#!/usr/bin/env python
import platform
from EPPs.common import SendMailEPP
class DataReleaseTrigger(SendMailEPP):
"""Notifies the bioinformatics team to release data for a project."""
def _run(self):
if len(self.projects) > 1:
raise ValueError('More than one project present in step. Only one project per step permitted')
data_download_contacts = []
# There are up to 5 contacts entered in the step.
for count in range(1, 6):
udf_name1 = 'Data Download Contact Username %s' % count
udf_name2 = 'Is Contact %s A New or Existing User?' % count
if self.process.udf.get(udf_name1):
data_download_contacts.append(
'%s (%s)' % (self.process.udf.get(udf_name1), self.process.udf.get(udf_name2))
)
msg = '''Hi Bioinformatics,
Please release the data for {sample_count} sample(s) from project {project} shown at the link below:
{link}
The data contacts are:
{data_download_contacts}
Kind regards,
ClarityX'''
msg = msg.format(
link='https://' + platform.node() + '/clarity/work-details/' + self.step_id[3:],
sample_count=len(self.samples),
project=self.projects[0].name,
data_download_contacts='\n'.join(data_download_contacts)
)
subject = ', '.join(p.name for p in self.projects) + ': Please release data'
# Send email to list of persons specified in the default section of config
self.send_mail(subject, msg, config_name='projects-bioinformatics')
if __name__ == '__main__':
DataReleaseTrigger().run()
|
[
"platform.node"
] |
[((1114, 1129), 'platform.node', 'platform.node', ([], {}), '()\n', (1127, 1129), False, 'import platform\n')]
|
#!/usr/bin/env python
#
# Replication repair
# Copyright (C) 2015 <NAME>
#
# Licensed under the MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#=================================
# Replication repair
# by <NAME>
# License: MIT
# Creation date: 2015-11-16
#=================================
#
from _infos import __version__
# Include the lib folder in the python import path (so that packaged modules can be easily called, such as gooey which always call its submodules via gooey parent module)
import sys, os
thispathname = os.path.dirname(__file__)
sys.path.append(os.path.join(thispathname, 'lib'))
# Import necessary libraries
import rfigc # optional
import shutil
from lib.aux_funcs import recwalk, path2unix, fullpath, is_dir_or_file, is_dir, is_file, create_dir_if_not_exist
import lib.argparse as argparse
import datetime, time
import lib.tqdm as tqdm
import itertools
import math
#import operator # to get the max out of a dict
import csv # to process the database file from rfigc.py
import shlex # for string parsing as argv argument to main(), unnecessary otherwise
from lib.tee import Tee # Redirect print output to the terminal as well as in a log file
#import pprint # Unnecessary, used only for debugging purposes
#***********************************
# AUXILIARY FUNCTIONS
#***********************************
def relpath_posix(recwalk_result, pardir, fromwinpath=False):
''' Helper function to convert all paths to relative posix like paths (to ease comparison) '''
return recwalk_result[0], path2unix(os.path.join(os.path.relpath(recwalk_result[0], pardir),recwalk_result[1]), nojoin=True, fromwinpath=fromwinpath)
#def checkAllEqual(lst):
# return not lst or [lst[0]]*len(lst) == lst
def sort_dict_of_paths(d):
""" Sort a dict containing paths parts (ie, paths divided in parts and stored as a list). Top paths will be given precedence over deeper paths. """
# Find the path that is the deepest, and count the number of parts
max_rec = max(len(x) if x else 0 for x in d.values())
# Pad other paths with empty parts to fill in, so that all paths will have the same number of parts (necessary to compare correctly, else deeper paths may get precedence over top ones, since the folder name will be compared to filenames!)
for key in d.keys():
if d[key]:
d[key] = ['']*(max_rec-len(d[key])) + d[key]
return sorted(d.items(), key=lambda x: x[1])
def sort_group(d, return_only_first=False):
''' Sort a dictionary of relative paths and cluster equal paths together at the same time '''
# First, sort the paths in order (this must be a couple: (parent_dir, filename), so that there's no ambiguity because else a file at root will be considered as being after a folder/file since the ordering is done alphabetically without any notion of tree structure).
d_sort = sort_dict_of_paths(d)
# Pop the first item in the ordered list
base_elt = (-1, None)
while (base_elt[1] is None and d_sort):
base_elt = d_sort.pop(0)
if base_elt[1] is None:
return None
# Init by creating the first group and pushing the first ordered filepath into the first group
lst = [[base_elt]]
if d_sort:
# For each subsequent filepath
for elt in d_sort:
# If the filepath is not empty (generator died)
if elt[1] is not None:
# If the filepath is the same to the latest grouped filepath, we add it to the same group
if elt[1] == base_elt[1]:
lst[-1].append(elt)
# Else the filepath is different: we create a new group, add the filepath to this group, and replace the latest grouped filepath
else:
if return_only_first: break # break here if we only need the first group
lst.append([elt])
base_elt = elt # replace the latest grouped filepath
return lst
def majority_vote_byte_scan(relfilepath, fileslist, outpath, blocksize=65535, default_char_null=False):
'''Takes a list of files in string format representing the same data, and disambiguate by majority vote: for position in string, if the character is not the same accross all entries, we keep the major one. If none, it will be replaced by a null byte (because we can't know if any of the entries are correct about this character).
relfilepath is the filename or the relative file path relative to the parent directory (ie, this is the relative path so that we can compare the files from several directories).'''
# The idea of replication combined with ECC was a bit inspired by this paper: <NAME>, <NAME>, and <NAME>. "Combining Erasure-Code and Replication Redundancy Schemes for Increased Storage and Repair Efficiency in P2P Storage Systems.", 2013, Technion, Computer Science Department, Technical Report CS-2013-03
# But it is a very well known concept in redundancy engineering, usually called triple-modular redundancy (which is here extended to n-modular since we can supply any number of files we want, not just three).
# Preference in case of ambiguity is always given to the file of the first folder.
fileshandles = []
for filepath in fileslist:
if filepath:
# Already a file handle? Just store it in the fileshandles list
if hasattr(filepath, 'read'):
fileshandles.append(filepath)
# Else it's a string filepath, open the file
else:
fileshandles.append(open(filepath, 'rb'))
# Create and open output (merged) file, except if we were already given a file handle
if hasattr(outpath, 'write'):
outfile = outpath
else:
outpathfull = os.path.join(outpath, relfilepath)
pardir = os.path.dirname(outpathfull)
if not os.path.exists(pardir):
os.makedirs(pardir)
outfile = open(outpathfull, 'wb')
# Cannot vote if there's not at least 3 files!
# In this case, just copy the file from the first folder, verbatim
if len(fileshandles) < 3:
# If there's at least one input file, then copy it verbatim to the output folder
if fileshandles:
create_dir_if_not_exist(os.path.dirname(outpathfull))
buf = 1
while (buf):
buf = fileshandles[0].read()
outfile.write(buf)
outfile.flush()
return (1, "Error with file %s: only %i copies available, cannot vote (need at least 3)! Copied the first file from the first folder, verbatim." % (relfilepath, len(fileshandles)))
errors = []
entries = [1]*len(fileshandles) # init with 0 to start the while loop
while (entries.count('') < len(fileshandles)):
final_entry = []
# Read a block from all input files into memory
for i in xrange(len(fileshandles)):
entries[i] = fileshandles[i].read(blocksize)
# End of file for all files, we exit
if entries.count('') == len(fileshandles):
break
# Else if there's only one file, just copy the file's content over
elif len(entries) == 1:
final_entry = entries[0]
# Else, do the majority vote
else:
# Walk along each column (imagine the strings being rows in a matrix, then we pick one column at each iteration = all characters at position i of each string), so that we can compare these characters easily
for i in xrange(max(len(entry) for entry in entries)):
hist = {} # kind of histogram, we just memorize how many times a character is presented at the position i in each string TODO: use collections.Counter instead of dict()?
# Extract the character at position i of each string and compute the histogram at the same time (number of time this character appear among all strings at this position i)
for entry in entries:
# Check if we are not beyond the current entry's length
if i < len(entry): # TODO: check this line, this should allow the vote to continue even if some files are shorter than others
# Extract the character and use it to contribute to the histogram
# TODO: add warning message when one file is not of the same size as the others
key = str(ord(entry[i])) # convert to the ascii value to avoid any funky problem with encoding in dict keys
hist[key] = hist.get(key, 0) + 1 # increment histogram for this value. If it does not exists, use 0. (essentially equivalent to hist[key] += 1 but with exception management if key did not already exists)
# If there's only one character (it's the same accross all strings at position i), then it's an exact match, we just save the character and we can skip to the next iteration
if len(hist) == 1:
final_entry.append(chr(int(hist.iterkeys().next())))
continue
# Else, the character is different among different entries, we will pick the major one (mode)
elif len(hist) > 1:
# Sort the dict by value (and reverse because we want the most frequent first)
skeys = sorted(hist, key=hist.get, reverse=True)
# Ambiguity! If each entries present a different character (thus the major has only an occurrence of 1), then it's too ambiguous and we just set a null byte to signal that
if hist[skeys[0]] == 1:
if default_char_null:
if default_char_null is True:
final_entry.append("\x00")
else:
final_entry.append(default_char_null)
else:
# Use the entry of the first file that is still open
first_char = ''
for entry in entries:
# Found the first file that has a character at this position: store it and break loop
if i < len(entry):
first_char = entry[i]
break
# Use this character in spite of ambiguity
final_entry.append(first_char)
errors.append(outfile.tell() + i) # Print an error indicating the characters that failed
# Else if there is a tie (at least two characters appear with the same frequency), then we just pick one of them
elif hist[skeys[0]] == hist[skeys[1]]:
final_entry.append(chr(int(skeys[0]))) # TODO: find a way to account for both characters. Maybe return two different strings that will both have to be tested? (eg: maybe one has a tampered hash, both will be tested and if one correction pass the hash then it's ok we found the correct one)
# Else we have a clear major character that appear in more entries than any other character, then we keep this one
else:
final_entry.append(chr(int(skeys[0]))) # alternative one-liner: max(hist.iteritems(), key=operator.itemgetter(1))[0]
continue
# Concatenate to a string (this is faster than using a string from the start and concatenating at each iteration because Python strings are immutable so Python has to copy over the whole string, it's in O(n^2)
final_entry = ''.join(final_entry)
# Commit to output file
outfile.write(final_entry)
outfile.flush()
# Errors signaling
if errors:
error_msg = "Unrecoverable corruptions (because of ambiguity) in file %s on characters: %s." % (relfilepath, [hex(int(x)) for x in errors]) # Signal to user that this file has unrecoverable corruptions (he may try to fix the bits manually or with his own script)
return (1, error_msg) # return an error
# Close all input files
for fh in fileshandles:
fh.close()
# Close output file
if outfile != outpath: # close only if we were not given a file handle in the first place
outfile.flush()
outfile.close()
return (0, None)
def synchronize_files(inputpaths, outpath, database=None, tqdm_bar=None, report_file=None, ptee=None, verbose=False):
''' Main function to synchronize files contents by majority vote
The main job of this function is to walk through the input folders and align the files, so that we can compare every files across every folders, one by one.
The whole trick here is to align files, so that we don't need to memorize all the files in memory and we compare all equivalent files together: to do that, we ensure that we walk through the input directories in alphabetical order, and we pick the relative filepath at the top of the alphabetical order, this ensures the alignment of files between different folders, without memorizing the whole trees structures.
'''
# (Generator) Files Synchronization Algorithm:
# Needs a function stable_dir_walking, which will walk through directories recursively but in always the same order on all platforms (same order for files but also for folders), whatever order it is, as long as it is stable.
# Until there's no file in any of the input folders to be processed:
# - curfiles <- load first file for each folder by using stable_dir_walking on each input folder.
# - curfiles_grouped <- group curfiles_ordered:
# * curfiles_ordered <- order curfiles alphabetically (need to separate the relative parent directory and the filename, to account for both without ambiguity)
# * curfiles_grouped <- empty list
# * curfiles_grouped[0] = add first element in curfiles_ordered
# * last_group = 0
# * for every subsequent element nextelt in curfiles_ordered:
# . if nextelt == curfiles_grouped[last_group][0]: add nextelt into curfiles_grouped[last_group] (the latest group in curfiles_grouped)
# . else: create a new group in curfiles_grouped (last_group += 1) and add nextelt into curfiles_grouped[last_group]
# At this stage, curfiles_grouped[0] should contain a group of files with the same relative filepath from different input folders, and since we used stable_dir_walking, we are guaranteed that this file is the next to be processed in alphabetical order.
# - Majority vote byte-by-byte for each of curfiles_grouped[0], and output winning byte to the output file.
# - Update files list alignment: we will now ditch files in curfiles_grouped[0] from curfiles, and replace by the next files respectively from each respective folder. Since we processed in alphabetical (or whatever) order, the next loaded files will match the files in other curfiles_grouped groups that we could not process before.
# At this point (after the loop), all input files have been processed in order, without maintaining the whole files list in memory, just one file per input folder.
# Init files walking generator for each inputpaths
recgen = [recwalk(path, sorting=True) for path in inputpaths]
curfiles = {}
recgen_exhausted = {}
recgen_exhausted_count = 0
nbpaths = len(inputpaths)
retcode = 0
if not ptee: ptee = sys.stdout
# Open report file and write header
if report_file is not None:
rfile = open(report_file, 'wb')
r_writer = csv.writer(rfile, delimiter='|', lineterminator='\n', quotechar='"')
r_header = ["filepath"] + ["dir%i" % (i+1) for i in xrange(nbpaths)] + ["hash-correct", "error_code", "errors"]
r_length = len(r_header)
r_writer.writerow(r_header)
# Initialization: load the first batch of files, one for each folder
for i in xrange(len(recgen)):
recgen_exhausted[i] = False
try:
if curfiles.get(i, None) is None:
curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1]
except StopIteration:
recgen_exhausted[i] = True
recgen_exhausted_count += 1
# Files lists alignment loop
while recgen_exhausted_count < nbpaths:
errcode = 0
errmsg = None
# Init a new report's row
if report_file: r_row = ["-"] * r_length
# -- Group equivalent relative filepaths together
#print curfiles # debug
curfiles_grouped = sort_group(curfiles, True)
# -- Extract first group of equivalent filepaths (this allows us to process with the same alphabetical order on all platforms)
# Note that the remaining files in other groups will be processed later, because their alphabetical order is higher to the first group, this means that the first group is to be processed now
to_process = curfiles_grouped[0]
#print to_process # debug
# -- Byte-by-byte majority vote on the first group of files
# Need the relative filepath also (note that there's only one since it's a group of equivalent relative filepaths, only the absolute path is different between files of a same group)
relfilepath = path2unix(os.path.join(*to_process[0][1]))
if report_file: r_row[0] = relfilepath
if verbose: ptee.write("- Processing file %s." % relfilepath)
# Generate output path
outpathfull = os.path.join(outpath, relfilepath)
create_dir_if_not_exist(os.path.dirname(outpathfull))
# Initialize the list of absolute filepaths
fileslist = []
for elt in to_process:
i = elt[0]
fileslist.append(os.path.join(inputpaths[i], os.path.join(*elt[1])))
if report_file: r_row[i+1] = 'X' # put an X in the report file below each folder that contains this file
# If there's only one file, just copy it over
if len(to_process) == 1:
shutil.copyfile(fileslist[0], outpathfull)
id = to_process[0][0]
if report_file: r_row[id+1] = 'O'
# Else, merge by majority vote
else:
# Before-merge check using rfigc database, if provided
# If one of the files in the input folders is already correct, just copy it over
correct_file = None
if database:
for id, filepath in enumerate(fileslist):
if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (filepath, database)) == 0:
correct_file = filepath
correct_id = to_process[id][0]
break
# If one correct file was found, copy it over
if correct_file:
create_dir_if_not_exist(os.path.dirname(outpathfull))
shutil.copyfile(correct_file, outpathfull)
if report_file:
r_row[correct_id+1] = "O"
r_row[-3] = "OK"
# Else, we need to do the majority vote merge
else:
# Do the majority vote merge
errcode, errmsg = majority_vote_byte_scan(relfilepath, fileslist, outpath)
# After-merge/move check using rfigc database, if provided
if database:
if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (outpathfull, database)) == 1:
errcode = 1
r_row[-3] = "KO"
if not errmsg: errmsg = ''
errmsg += " File could not be totally repaired according to rfigc database."
else:
if report_file:
r_row[-3] = "OK"
if errmsg: errmsg += " But merged file is correct according to rfigc database."
# Display errors if any
if errcode:
if report_file:
r_row[-2] = "KO"
r_row[-1] = errmsg
ptee.write(errmsg)
retcode = 1
else:
if report_file: r_row[-2] = "OK"
# Save current report's row
if report_file:
r_writer.writerow(r_row)
# -- Update files lists alignment (ie, retrieve new files but while trying to keep the alignment)
for elt in to_process: # for files of the first group (the ones we processed)
i = elt[0]
# Walk their respective folders and load up the next file
try:
if not recgen_exhausted.get(i, False):
curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1]
# If there's no file left in this folder, mark this input folder as exhausted and continue with the others
except StopIteration:
curfiles[i] = None
recgen_exhausted[i] = True
recgen_exhausted_count += 1
if tqdm_bar: tqdm_bar.update()
if tqdm_bar: tqdm_bar.close()
# Closing report file
if report_file:
# Write list of directories and legend
rfile.write("\n=> Input directories:")
for id, ipath in enumerate(inputpaths):
rfile.write("\n\t- dir%i = %s" % ((id+1), ipath))
rfile.write("\n=> Output directory: %s" % outpath)
rfile.write("\n=> Legend: X=existing/selected for majority vote, O=only used this file, - = not existing, OK = check correct, KO = check incorrect (file was not recovered)\n")
# Close the report file handle
rfile.close()
return retcode
#***********************************
# GUI AUX FUNCTIONS
#***********************************
# Try to import Gooey for GUI display, but manage exception so that we replace the Gooey decorator by a dummy function that will just return the main function as-is, thus keeping the compatibility with command-line usage
try: # pragma: no cover
import lib.gooey as gooey
except ImportError as exc:
# Define a dummy replacement function for Gooey to stay compatible with command-line usage
class gooey(object): # pragma: no cover
def Gooey(func):
return func
# If --gui was specified, then there's a problem
if len(sys.argv) > 1 and sys.argv[1] == '--gui': # pragma: no cover
print('ERROR: --gui specified but an error happened with lib/gooey, cannot load the GUI (however you can still use this script in commandline). Check that lib/gooey exists and that you have wxpython installed. Here is the error: ')
raise(exc)
def conditional_decorator(flag, dec): # pragma: no cover
def decorate(fn):
if flag:
return dec(fn)
else:
return fn
return decorate
def check_gui_arg(): # pragma: no cover
'''Check that the --gui argument was passed, and if true, we remove the --gui option and replace by --gui_launched so that Gooey does not loop infinitely'''
if len(sys.argv) > 1 and sys.argv[1] == '--gui':
# DEPRECATED since Gooey automatically supply a --ignore-gooey argument when calling back the script for processing
#sys.argv[1] = '--gui_launched' # CRITICAL: need to remove/replace the --gui argument, else it will stay in memory and when Gooey will call the script again, it will be stuck in an infinite loop calling back and forth between this script and Gooey. Thus, we need to remove this argument, but we also need to be aware that Gooey was called so that we can call gooey.GooeyParser() instead of argparse.ArgumentParser() (for better fields management like checkboxes for boolean arguments). To solve both issues, we replace the argument --gui by another internal argument --gui_launched.
return True
else:
return False
def AutoGooey(fn): # pragma: no cover
'''Automatically show a Gooey GUI if --gui is passed as the first argument, else it will just run the function as normal'''
if check_gui_arg():
return gooey.Gooey(fn)
else:
return fn
#***********************************
# MAIN
#***********************************
#@conditional_decorator(check_gui_arg(), gooey.Gooey) # alternative to AutoGooey which also correctly works
@AutoGooey
def main(argv=None):
if argv is None: # if argv is empty, fetch from the commandline
argv = sys.argv[1:]
elif isinstance(argv, basestring): # else if argv is supplied but it's a simple string, we need to parse it to a list of arguments before handing to argparse or any other argument parser
argv = shlex.split(argv) # Parse string just like argv using shlex
#==== COMMANDLINE PARSER ====
#== Commandline description
desc = '''Replication Repair
Description: Given a set of directories (or files), try to repair your files by scanning each byte, cast a majority vote among all copies, and then output the winning byte. This process is usually called triple-modular redundancy (but here it should be called n-modular redundancy since you can use as many copies as you have).
It is recommended for long term storage to store several copies of your files on different storage mediums. Everything's fine until all your copies are partially corrupted. In this case, this script can help you, by taking advantage of your multiple copies, without requiring a pregenerated ecc file. Just specify the path to every copies, and the script will try to recover them.
Replication can repair exactly r-2 errors using majority vote (you need at least 2 blocks for majority vote to work), where r is the number of replications: if r=3, you get a redundancy rate of 1/3, if r=4, rate is 2/4, etc.
This script can also take advantage of a database generated by rfigc.py to make sure that the recovered files are correct, or to select files that are already correct.
Note: in case the end result is not what you expected, you can try a different order of input directories: in case of ambiguity, the first input folder has precedence over subsequent folders.
Note2: in case some files with the same names are of different length, the merging will continue until the longest file is exhausted.
Note3: last modification date is not (yet) accounted for.
'''
ep = '''Use --gui as the first argument to use with a GUI (via Gooey).
'''
#== Commandline arguments
#-- Constructing the parser
# Use GooeyParser if we want the GUI because it will provide better widgets
if len(argv) > 0 and (argv[0] == '--gui' and not '--ignore-gooey' in argv): # pragma: no cover
# Initialize the Gooey parser
main_parser = gooey.GooeyParser(add_help=True, description=desc, epilog=ep, formatter_class=argparse.RawTextHelpFormatter)
# Define Gooey widget types explicitly (because type auto-detection doesn't work quite well)
widget_dir = {"widget": "DirChooser"}
widget_filesave = {"widget": "FileSaver"}
widget_file = {"widget": "FileChooser"}
widget_text = {"widget": "TextField"}
widget_multidir = {"widget": "MultiDirChooser"}
else: # Else in command-line usage, use the standard argparse
# Delete the special argument to avoid unrecognized argument error in argparse
if '--ignore-gooey' in argv[0]: argv.remove('--ignore-gooey') # this argument is automatically fed by Gooey when the user clicks on Start
# Initialize the normal argparse parser
main_parser = argparse.ArgumentParser(add_help=True, description=desc, epilog=ep, formatter_class=argparse.RawTextHelpFormatter)
# Define dummy dict to keep compatibile with command-line usage
widget_dir = {}
widget_filesave = {}
widget_file = {}
widget_text = {}
widget_multidir = {}
# Required arguments
main_parser.add_argument('-i', '--input', metavar='"/path/to/copy1/" "/path/to/copy2/" "etc."', type=is_dir_or_file, nargs='+', required=True,
help='Specify the paths to every copies you have (minimum 3 copies, else it won\'t work!). Can be folders or files (if you want to repair only one file). Order matters: in case of ambiguity, the first folder where the file exists will be chosen.', **widget_multidir)
main_parser.add_argument('-o', '--output', metavar='/ouput/folder/', nargs=1, required=True,
help='Where the recovered files will be stored.', **widget_dir)
# Optional general arguments
main_parser.add_argument('-d', '--database', metavar='database.csv', type=is_file, required=False,
help='Path to a previously generated rfigc.py database. If provided, this will be used to check that the repaired files are correct (and also to find already correct files in copies).', **widget_file)
main_parser.add_argument('-r', '--report', metavar='/some/folder/report.csv', type=str, required=False,
help='Save all results of the repair process in a report file, with detailed descriptions of ambiguous repairs (ie, when majority vote came to a draw).', **widget_filesave)
main_parser.add_argument('-l', '--log', metavar='/some/folder/filename.log', type=str, nargs=1, required=False,
help='Path to the log file. (Output will be piped to both the stdout and the log file)', **widget_filesave)
main_parser.add_argument('-f', '--force', action='store_true', required=False, default=False,
help='Force overwriting the output folder even if it already exists.')
main_parser.add_argument('-v', '--verbose', action='store_true', required=False, default=False,
help='Verbose mode (show more output).')
main_parser.add_argument('--silent', action='store_true', required=False, default=False,
help='No console output (but if --log specified, the log will still be saved in the specified file).')
#== Parsing the arguments
args = main_parser.parse_args(argv) # Storing all arguments to args
#-- Set variables from arguments
inputpaths = [fullpath(x) for x in args.input] # path to the files to repair (ie, paths to all the different copies the user has)
outputpath = fullpath(args.output[0])
force = args.force
verbose = args.verbose
silent = args.silent
if len(inputpaths) < 3:
raise Exception('Need at least 3 copies to do a replication repair/majority vote!')
#if os.path.isfile(inputpath): # if inputpath is a single file (instead of a folder), then define the rootfolderpath as the parent directory (for correct relative path generation, else it will also truncate the filename!)
#rootfolderpath = os.path.dirname(inputpath)
report_file = None
if args.report: report_file = os.path.basename(fullpath(args.report))
database = None
if args.database: database = args.database
# -- Checking arguments
if os.path.exists(outputpath) and not force:
raise NameError('Specified output path %s already exists! Use --force if you want to overwrite.' % outputpath)
if database and not os.path.isfile(database):
raise NameError('Specified rfigc database file %s does not exist!' % database)
# -- Configure the log file if enabled (ptee.write() will write to both stdout/console and to the log file)
if args.log:
ptee = Tee(args.log[0], 'a', nostdout=silent)
#sys.stdout = Tee(args.log[0], 'a')
sys.stderr = Tee(args.log[0], 'a', nostdout=silent)
else:
ptee = Tee(nostdout=silent)
# == PROCESSING BRANCHING == #
# == Precomputation of ecc file size
# Precomputing is important so that the user can know what size to expect before starting (and how much time it will take...).
filescount = 0
sizetotal = 0
sizeheaders = 0
visitedfiles = {}
ptee.write("Precomputing list of files and predicted statistics...")
prebar = tqdm.tqdm(file=ptee, disable=silent)
for inputpath in inputpaths:
for (dirpath, filename) in recwalk(inputpath):
# Get full absolute filepath
filepath = os.path.join(dirpath, filename)
relfilepath = path2unix(os.path.relpath(filepath, inputpath)) # File relative path from the root (we truncate the rootfolderpath so that we can easily check the files later even if the absolute path is different)
# Only increase the files count if we didn't see this file before
if not visitedfiles.get(relfilepath, None):
# Counting the total number of files we will process (so that we can show a progress bar with ETA)
filescount = filescount + 1
# Add the file to the list of already visited files
visitedfiles[relfilepath] = True
# Get the current file's size
size = os.stat(filepath).st_size
# Compute total size of all files
sizetotal = sizetotal + size
prebar.update()
prebar.close()
ptee.write("Precomputing done.")
# == Majority vote repair
# For each folder, align the files lists and then majority vote over each byte to repair
ptee.write("====================================")
ptee.write("Replication repair, started on %s" % datetime.datetime.now().isoformat())
ptee.write("====================================")
# Prepare progress bar if necessary
if silent:
tqdm_bar = None
else:
tqdm_bar = tqdm.tqdm(total=filescount, file=ptee, leave=True, unit="files")
# Call the main function to synchronize files using majority vote
errcode = synchronize_files(inputpaths, outputpath, database=database, tqdm_bar=tqdm_bar, report_file=report_file, ptee=ptee, verbose=verbose)
#ptee.write("All done! Stats:\n- Total files processed: %i\n- Total files corrupted: %i\n- Total files repaired completely: %i\n- Total files repaired partially: %i\n- Total files corrupted but not repaired at all: %i\n- Total files skipped: %i" % (files_count, files_corrupted, files_repaired_completely, files_repaired_partially, files_corrupted - (files_repaired_partially + files_repaired_completely), files_skipped) )
if tqdm_bar: tqdm_bar.close()
ptee.write("All done!")
if report_file: ptee.write("Saved replication repair results in report file: %s" % report_file)
del ptee
return errcode
# Calling main function if the script is directly called (not imported as a library in another program)
if __name__ == "__main__": # pragma: no cover
sys.exit(main())
|
[
"os.path.isfile",
"os.path.join",
"lib.gooey.Gooey",
"os.path.dirname",
"os.path.exists",
"shlex.split",
"shutil.copyfile",
"datetime.datetime.now",
"csv.writer",
"os.stat",
"lib.tqdm.tqdm",
"lib.argparse.ArgumentParser",
"lib.aux_funcs.recwalk",
"os.makedirs",
"lib.aux_funcs.fullpath",
"lib.gooey.GooeyParser",
"rfigc.main",
"os.path.relpath",
"lib.tee.Tee"
] |
[((1622, 1647), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1637, 1647), False, 'import sys, os\n'), ((1664, 1697), 'os.path.join', 'os.path.join', (['thispathname', '"""lib"""'], {}), "(thispathname, 'lib')\n", (1676, 1697), False, 'import sys, os\n'), ((31271, 31295), 'lib.aux_funcs.fullpath', 'fullpath', (['args.output[0]'], {}), '(args.output[0])\n', (31279, 31295), False, 'from lib.aux_funcs import recwalk, path2unix, fullpath, is_dir_or_file, is_dir, is_file, create_dir_if_not_exist\n'), ((32981, 33017), 'lib.tqdm.tqdm', 'tqdm.tqdm', ([], {'file': 'ptee', 'disable': 'silent'}), '(file=ptee, disable=silent)\n', (32990, 33017), True, 'import lib.tqdm as tqdm\n'), ((6835, 6869), 'os.path.join', 'os.path.join', (['outpath', 'relfilepath'], {}), '(outpath, relfilepath)\n', (6847, 6869), False, 'import sys, os\n'), ((6887, 6915), 'os.path.dirname', 'os.path.dirname', (['outpathfull'], {}), '(outpathfull)\n', (6902, 6915), False, 'import sys, os\n'), ((16385, 16412), 'lib.aux_funcs.recwalk', 'recwalk', (['path'], {'sorting': '(True)'}), '(path, sorting=True)\n', (16392, 16412), False, 'from lib.aux_funcs import recwalk, path2unix, fullpath, is_dir_or_file, is_dir, is_file, create_dir_if_not_exist\n'), ((16726, 16794), 'csv.writer', 'csv.writer', (['rfile'], {'delimiter': '"""|"""', 'lineterminator': '"""\n"""', 'quotechar': '"""\\""""'}), '(rfile, delimiter=\'|\', lineterminator=\'\\n\', quotechar=\'"\')\n', (16736, 16794), False, 'import csv\n'), ((18629, 18663), 'os.path.join', 'os.path.join', (['outpath', 'relfilepath'], {}), '(outpath, relfilepath)\n', (18641, 18663), False, 'import sys, os\n'), ((25062, 25077), 'lib.gooey.Gooey', 'gooey.Gooey', (['fn'], {}), '(fn)\n', (25073, 25077), True, 'import lib.gooey as gooey\n'), ((27693, 27805), 'lib.gooey.GooeyParser', 'gooey.GooeyParser', ([], {'add_help': '(True)', 'description': 'desc', 'epilog': 'ep', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(add_help=True, description=desc, epilog=ep,\n formatter_class=argparse.RawTextHelpFormatter)\n', (27710, 27805), True, 'import lib.gooey as gooey\n'), ((28518, 28636), 'lib.argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(True)', 'description': 'desc', 'epilog': 'ep', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(add_help=True, description=desc, epilog=ep,\n formatter_class=argparse.RawTextHelpFormatter)\n', (28541, 28636), True, 'import lib.argparse as argparse\n'), ((31138, 31149), 'lib.aux_funcs.fullpath', 'fullpath', (['x'], {}), '(x)\n', (31146, 31149), False, 'from lib.aux_funcs import recwalk, path2unix, fullpath, is_dir_or_file, is_dir, is_file, create_dir_if_not_exist\n'), ((31973, 31999), 'os.path.exists', 'os.path.exists', (['outputpath'], {}), '(outputpath)\n', (31987, 31999), False, 'import sys, os\n'), ((32417, 32455), 'lib.tee.Tee', 'Tee', (['args.log[0]', '"""a"""'], {'nostdout': 'silent'}), "(args.log[0], 'a', nostdout=silent)\n", (32420, 32455), False, 'from lib.tee import Tee\n'), ((32521, 32559), 'lib.tee.Tee', 'Tee', (['args.log[0]', '"""a"""'], {'nostdout': 'silent'}), "(args.log[0], 'a', nostdout=silent)\n", (32524, 32559), False, 'from lib.tee import Tee\n'), ((32585, 32605), 'lib.tee.Tee', 'Tee', ([], {'nostdout': 'silent'}), '(nostdout=silent)\n', (32588, 32605), False, 'from lib.tee import Tee\n'), ((33086, 33104), 'lib.aux_funcs.recwalk', 'recwalk', (['inputpath'], {}), '(inputpath)\n', (33093, 33104), False, 'from lib.aux_funcs import recwalk, path2unix, fullpath, is_dir_or_file, is_dir, is_file, create_dir_if_not_exist\n'), ((34545, 34609), 'lib.tqdm.tqdm', 'tqdm.tqdm', ([], {'total': 'filescount', 'file': 'ptee', 'leave': '(True)', 'unit': '"""files"""'}), "(total=filescount, file=ptee, leave=True, unit='files')\n", (34554, 34609), True, 'import lib.tqdm as tqdm\n'), ((6931, 6953), 'os.path.exists', 'os.path.exists', (['pardir'], {}), '(pardir)\n', (6945, 6953), False, 'import sys, os\n'), ((6967, 6986), 'os.makedirs', 'os.makedirs', (['pardir'], {}), '(pardir)\n', (6978, 6986), False, 'import sys, os\n'), ((18426, 18457), 'os.path.join', 'os.path.join', (['*to_process[0][1]'], {}), '(*to_process[0][1])\n', (18438, 18457), False, 'import sys, os\n'), ((18696, 18724), 'os.path.dirname', 'os.path.dirname', (['outpathfull'], {}), '(outpathfull)\n', (18711, 18724), False, 'import sys, os\n'), ((19152, 19194), 'shutil.copyfile', 'shutil.copyfile', (['fileslist[0]', 'outpathfull'], {}), '(fileslist[0], outpathfull)\n', (19167, 19194), False, 'import shutil\n'), ((25655, 25672), 'shlex.split', 'shlex.split', (['argv'], {}), '(argv)\n', (25666, 25672), False, 'import shlex\n'), ((31847, 31868), 'lib.aux_funcs.fullpath', 'fullpath', (['args.report'], {}), '(args.report)\n', (31855, 31868), False, 'from lib.aux_funcs import recwalk, path2unix, fullpath, is_dir_or_file, is_dir, is_file, create_dir_if_not_exist\n'), ((32159, 32183), 'os.path.isfile', 'os.path.isfile', (['database'], {}), '(database)\n', (32173, 32183), False, 'import sys, os\n'), ((33170, 33201), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (33182, 33201), False, 'import sys, os\n'), ((2645, 2687), 'os.path.relpath', 'os.path.relpath', (['recwalk_result[0]', 'pardir'], {}), '(recwalk_result[0], pardir)\n', (2660, 2687), False, 'import sys, os\n'), ((7332, 7360), 'os.path.dirname', 'os.path.dirname', (['outpathfull'], {}), '(outpathfull)\n', (7347, 7360), False, 'import sys, os\n'), ((20008, 20050), 'shutil.copyfile', 'shutil.copyfile', (['correct_file', 'outpathfull'], {}), '(correct_file, outpathfull)\n', (20023, 20050), False, 'import shutil\n'), ((20482, 20549), 'rfigc.main', 'rfigc.main', (['(\'-i "%s" -d "%s" -m --silent\' % (outpathfull, database))'], {}), '(\'-i "%s" -d "%s" -m --silent\' % (outpathfull, database))\n', (20492, 20549), False, 'import rfigc\n'), ((33238, 33274), 'os.path.relpath', 'os.path.relpath', (['filepath', 'inputpath'], {}), '(filepath, inputpath)\n', (33253, 33274), False, 'import sys, os\n'), ((18912, 18933), 'os.path.join', 'os.path.join', (['*elt[1]'], {}), '(*elt[1])\n', (18924, 18933), False, 'import sys, os\n'), ((19962, 19990), 'os.path.dirname', 'os.path.dirname', (['outpathfull'], {}), '(outpathfull)\n', (19977, 19990), False, 'import sys, os\n'), ((33907, 33924), 'os.stat', 'os.stat', (['filepath'], {}), '(filepath)\n', (33914, 33924), False, 'import sys, os\n'), ((34344, 34367), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (34365, 34367), False, 'import datetime, time\n'), ((19626, 19690), 'rfigc.main', 'rfigc.main', (['(\'-i "%s" -d "%s" -m --silent\' % (filepath, database))'], {}), '(\'-i "%s" -d "%s" -m --silent\' % (filepath, database))\n', (19636, 19690), False, 'import rfigc\n')]
|
# -*- coding: utf-8 -*-
from app import app as application
if __name__ == '__main__':
application.run(host="0.0.0.0", debug=True)
|
[
"app.app.run"
] |
[((93, 136), 'app.app.run', 'application.run', ([], {'host': '"""0.0.0.0"""', 'debug': '(True)'}), "(host='0.0.0.0', debug=True)\n", (108, 136), True, 'from app import app as application\n')]
|
import pandas as pd
import psycopg2 as pg2
import yaml
import io
import ohio.ext.pandas
from sqlalchemy import create_engine
def open_db_connection(secrets_file="secrets.yaml", verbose=True):
"""
Opens connection to psql db
:return:
connection object
"""
try:
with open(secrets_file, 'r') as f:
# loads contents of secrets.yaml into a python dictionary
secret_config = yaml.safe_load(f.read())
db_params = secret_config['db']
except FileNotFoundError:
print("Cannot establish connection to database. Please provide db_params in secrets.yaml file.")
exit(1)
conn = pg2.connect(
host=db_params['host'],
port=db_params['port'],
dbname=db_params['dbname'],
user=db_params['user'],
password=db_params['password']
)
if verbose:
print(f"Connection opened to database {db_params['dbname']}")
return conn
connection = open_db_connection()
def write_df_in_table(conn, df, schema_name, table_name):
"""write pandas dataframe in table
Args:
conn: a pSQL databse connection object
df: a pandas dataframe to write to the database
schema_name: name of the schema for the table
table_name: name of the table
"""
# write df to memory buffer
SEP = "~"
buffer = io.StringIO()
df.to_csv(buffer, index_label='id', header=False, sep=SEP)
buffer.seek(0)
type_mapping = {'int64': 'integer', 'float64': 'double precision', 'object': 'varchar'}
cur = conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {schema_name}.{table_name};")
cur.execute(f"CREATE TABLE {schema_name}.{table_name} (id integer PRIMARY KEY);")
# cur.execute(f"GRANT ALL PRIVILEGES ON {schema_name}.{table_name} TO bills1;")
cur.execute(f"ALTER TABLE {schema_name}.{table_name} OWNER TO bills1;")
# create table column
for col_name, col_type in zip(df.columns, df.dtypes):
print(col_name)
col_type = type_mapping[str(col_type)]
cur.execute(f"ALTER table {schema_name}.{table_name} ADD COLUMN {col_name} {col_type};")
# hard-coded for now, may be made dynamic later
# TODO: need to figure out how to change NULL values to date as well
#if col_name == "introduced_date":
# cur.execute(f"""ALTER table {schema_name}.{table_name} ALTER COLUMN {col_name}
# TYPE date using to_date({col_name}, 'YYYY-MM-DD');""")
# copy data from buffer to table
cur.copy_from(buffer, f'{schema_name}.{table_name}', sep=SEP)
conn.commit()
cur.close()
# If you need to recreate the SQL tables for whatever reason
object = pd.read_pickle(r'/data/groups/bills1/mlpolicylab_fall20_bills1/bid_groups.pkl')
white_df = pd.DataFrame(object['white'], columns=['bill_id'])
write_df_in_table(conn=connection, df=white_df, schema_name="sketch", table_name="reference_bills_w")
"""
black_df = pd.DataFrame(object['black'], columns=['bill_id'])
asian_df = pd.DataFrame(object['asian'], columns=['bill_id'])
write_df_in_table(conn=connection, df= black_df, schema_name="sketch", table_name="protected_bills_b")
write_df_in_table(conn=connection, df= asian_df, schema_name="sketch", table_name="protected_bills_a")
"""
|
[
"pandas.read_pickle",
"io.StringIO",
"pandas.DataFrame",
"psycopg2.connect"
] |
[((2709, 2787), 'pandas.read_pickle', 'pd.read_pickle', (['"""/data/groups/bills1/mlpolicylab_fall20_bills1/bid_groups.pkl"""'], {}), "('/data/groups/bills1/mlpolicylab_fall20_bills1/bid_groups.pkl')\n", (2723, 2787), True, 'import pandas as pd\n'), ((2800, 2850), 'pandas.DataFrame', 'pd.DataFrame', (["object['white']"], {'columns': "['bill_id']"}), "(object['white'], columns=['bill_id'])\n", (2812, 2850), True, 'import pandas as pd\n'), ((667, 820), 'psycopg2.connect', 'pg2.connect', ([], {'host': "db_params['host']", 'port': "db_params['port']", 'dbname': "db_params['dbname']", 'user': "db_params['user']", 'password': "db_params['password']"}), "(host=db_params['host'], port=db_params['port'], dbname=\n db_params['dbname'], user=db_params['user'], password=db_params['password']\n )\n", (678, 820), True, 'import psycopg2 as pg2\n'), ((1365, 1378), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1376, 1378), False, 'import io\n')]
|
from fastai.text import *
import fire
BOS = 'xbos' # beginning-of-sentence tag
FLD = 'xfld' # data field tag
BOS_LABEL = '_bos_'
PAD = '_pad_'
re1 = re.compile(r' +')
def read_file(filepath):
assert os.path.exists(filepath)
sentences = []
labels = []
with open(filepath, encoding='utf-8') as f:
sentence = [BOS]
sentence_labels = [BOS_LABEL]
for line in f:
if line == '\n':
sentences.append(sentence)
labels.append(sentence_labels)
sentence = [BOS] # use xbos as the start of sentence token
sentence_labels = [BOS_LABEL]
else:
sentence.append(line.split()[0].lower())
# label is generally in the last column
sentence_labels.append(line.split()[-1])
if sentence: # some files, e.g. NER end on an empty line
sentences.append(sentence)
labels.append(sentence_labels)
return sentences, labels
def create_toks(prefix, max_vocab=30000, min_freq=1):
PATH = f'data/nlp_seq/{prefix}/'
names = {}
if prefix == 'ner':
names['train'] = 'train.txt'
names['val'] = 'valid.txt'
names['test'] = 'test.txt'
else:
raise ValueError(f'Filenames for {prefix} have to be added first.')
paths = {}
for split in ['train', 'val', 'test']:
paths[split] = f'{PATH}{names[split]}'
print(f'prefix {prefix} max_vocab {max_vocab} min_freq {min_freq}')
os.makedirs(f'{PATH}tmp', exist_ok=True)
trn_tok, trn_labels = read_file(paths['train'])
val_tok, val_labels = read_file(paths['val'])
test_tok, test_labels = read_file(paths['test'])
for trn_t, trn_l in zip(trn_tok[:5], trn_labels[:5]):
print('Sentence:', trn_t, 'labels:', trn_l)
print(f'# of train: {len(trn_tok)}, # of val: {len(val_tok)},'
f'# of test: {len(test_tok)}')
freq = Counter(p for o in trn_tok for p in o)
print(freq.most_common(25))
itos = [o for o, c in freq.most_common(max_vocab) if c > min_freq]
itos.insert(0, PAD)
itos.insert(0, '_unk_')
stoi = collections.defaultdict(lambda: 0,
{v: k for k, v in enumerate(itos)})
print(len(itos))
trn_ids = np.array([[stoi[o] for o in p] for p in trn_tok])
val_ids = np.array([[stoi[o] for o in p] for p in val_tok])
test_ids = np.array([[stoi[o] for o in p] for p in test_tok])
# map the labels to ids
freq = Counter(p for o in trn_labels for p in o)
print(freq)
itol = [l for l, c in freq.most_common()]
itol.insert(1, PAD) # insert padding label at index 1
print(itol)
ltoi = {l: i for i, l in enumerate(itol)}
trn_lbl_ids = np.array([[ltoi[o] for o in p] for p in trn_labels])
val_lbl_ids = np.array([[ltoi[o] for o in p] for p in val_labels])
test_lbl_ids = np.array([[ltoi[o] for o in p] for p in test_labels])
ids_joined = np.array([[stoi[o] for o in p] for p in trn_tok + val_tok + test_tok])
val_ids_joined = ids_joined[int(len(ids_joined)*0.9):]
ids_joined = ids_joined[:int(len(ids_joined)*0.9)]
np.save(f'{PATH}tmp/trn_ids.npy', trn_ids)
np.save(f'{PATH}tmp/val_ids.npy', val_ids)
np.save(f'{PATH}tmp/test_ids.npy', test_ids)
np.save(f'{PATH}tmp/lbl_trn.npy', trn_lbl_ids)
np.save(f'{PATH}tmp/lbl_val.npy', val_lbl_ids)
np.save(f'{PATH}tmp/lbl_test.npy', test_lbl_ids)
pickle.dump(itos, open(f'{PATH}tmp/itos.pkl', 'wb'))
pickle.dump(itol, open(f'{PATH}tmp/itol.pkl', 'wb'))
np.save(f'{PATH}tmp/trn_lm_ids.npy', ids_joined)
np.save(f'{PATH}tmp/val_lm_ids.npy', val_ids_joined)
if __name__ == '__main__': fire.Fire(create_toks)
|
[
"fire.Fire"
] |
[((3710, 3732), 'fire.Fire', 'fire.Fire', (['create_toks'], {}), '(create_toks)\n', (3719, 3732), False, 'import fire\n')]
|
from django.db import models
from django.urls import reverse
# Create your models here.
class Product(models.Model):
title = models.CharField(max_length=120)
description = models.TextField(null=True)
price = models.DecimalField(decimal_places=2, max_digits=100)
summary = models.TextField(blank=True)
featured = models.BooleanField(default=False)
def get_absolute_url(self):
return reverse("products:product_detail", kwargs={"id_lookup": self.id})
|
[
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.urls.reverse",
"django.db.models.DecimalField"
] |
[((136, 168), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)'}), '(max_length=120)\n', (152, 168), False, 'from django.db import models\n'), ((187, 214), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (203, 214), False, 'from django.db import models\n'), ((233, 286), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(100)'}), '(decimal_places=2, max_digits=100)\n', (252, 286), False, 'from django.db import models\n'), ((305, 333), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (321, 333), False, 'from django.db import models\n'), ((352, 386), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (371, 386), False, 'from django.db import models\n'), ((435, 500), 'django.urls.reverse', 'reverse', (['"""products:product_detail"""'], {'kwargs': "{'id_lookup': self.id}"}), "('products:product_detail', kwargs={'id_lookup': self.id})\n", (442, 500), False, 'from django.urls import reverse\n')]
|
import pysmurf
#S = pysmurf.SmurfControl(make_logfile=False,setup=False,epics_root='test_epics',cfg_file='/usr/local/controls/Applications/smurf/pysmurf/pysmurf/cfg_files/experiment_fp28_smurfsrv04.cfg')
import numpy as np
import time
Vrange=np.linspace(0,0.195/6.,100)+S.get_tes_bias_bipolar(3)
Vrange=[Vrange,Vrange[::-1]]
Vrange=np.array(Vrange).flatten()
while True:
for Vtes in Vrange:
S.set_tes_bias_bipolar(7,Vtes)
time.sleep(0.005)
|
[
"numpy.array",
"numpy.linspace",
"time.sleep"
] |
[((246, 278), 'numpy.linspace', 'np.linspace', (['(0)', '(0.195 / 6.0)', '(100)'], {}), '(0, 0.195 / 6.0, 100)\n', (257, 278), True, 'import numpy as np\n'), ((336, 352), 'numpy.array', 'np.array', (['Vrange'], {}), '(Vrange)\n', (344, 352), True, 'import numpy as np\n'), ((447, 464), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (457, 464), False, 'import time\n')]
|
import base64
import pathlib
from typing import List, Tuple
STATIC_DIR = pathlib.Path(__file__).parent / "static"
def _get_img_uri(img_file: str) -> str:
img_path = STATIC_DIR / img_file
with img_path.open(mode="rb") as f:
img_data = f.read()
encoded = base64.b64encode(img_data).decode("ascii")
return f"data:image/png;base64,{encoded}"
def _to_colourscale(colour_sequence: List[str]) -> List[Tuple[float, str]]:
length = len(colour_sequence)
return [(i / (length - 1), colour) for i, colour in enumerate(colour_sequence)]
|
[
"pathlib.Path",
"base64.b64encode"
] |
[((75, 97), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (87, 97), False, 'import pathlib\n'), ((277, 303), 'base64.b64encode', 'base64.b64encode', (['img_data'], {}), '(img_data)\n', (293, 303), False, 'import base64\n')]
|
# Generated by Django 3.0.7 on 2020-06-11 11:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0018_auto_20200609_1810'),
]
operations = [
migrations.AddField(
model_name='beneficiaire',
name='date_creation',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
|
[
"django.db.models.DateTimeField"
] |
[((344, 394), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (364, 394), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python3
import sys
import json
import re
from pathlib import Path
from typing import List, Optional, Any, Dict
from bs4 import BeautifulSoup, Tag
class Hit:
search: str
title: str
anchor: str
content: str
start: int
def __init__(self,
search: str,
page_url: str,
page_title: str,
anchor_url: str,
anchor_title: str,
content: str,
start: int) -> None:
self.search = search
self.page_url = page_url
self.page_title = page_title
self.anchor_url = anchor_url
self.anchor_title = anchor_title
self.content = content
self.start = start
def __str__(self) -> str:
return f"Hit(search={self.search} anchor_url={self.anchor_url} anchor_title={self.anchor_title}"
def build_file_index(f: Path) -> List[Hit]:
"""Construct an index from html file."""
soup = BeautifulSoup(f.read_text(), "html.parser")
try:
title = soup.title.text
except AttributeError:
sys.stderr.write(f"Error in path {f}\n")
raise
relative_path = f.relative_to("hurl.dev/_site/")
url: str
if str(relative_path) == "index.html":
url = "/"
else:
url = f"/{relative_path}"
# On construit une représentation textuelle de la page
# en agrégeant tous les tags contenant du text "significatif"
all_hits: List[Hit] = []
all_tags: List[Tag] = []
root = soup.find("div", class_=re.compile("indexed"))
if not root:
sys.stderr.write(f"No indexed content in path {f}\n")
return []
all_tags.extend(root.find_all("p"))
all_tags.extend(root.find_all("ul"))
all_tags.extend(root.find_all("h2"))
all_tags.extend(root.find_all("h3"))
all_tags.extend(root.find_all("h4"))
for tag in all_tags:
hits = build_tag_index(url=url, title=title, soup=soup, tag=tag)
all_hits.extend(hits)
return all_hits
non_significant_words = [
"all",
"also",
"and",
"any",
"are",
"both",
"but",
"can",
"doc",
"does",
"etc",
"for",
"from",
"has",
"have",
"into",
"one",
"only",
"let",
"may",
"say",
"see",
"set",
"the",
"this",
"than",
"that",
"use",
"yet",
"you",
"very",
"when",
"will",
"with",
]
def build_tag_index(url: str, title: str, soup: BeautifulSoup, tag: Tag) -> List[Hit]:
"""Build serach hit from a p tag."""
anchor_tag = find_anchor(tag)
anchor_url: str
anchor_title: str
if anchor_tag:
anchor_id = anchor_tag["id"]
anchor_url = f"{url}#{anchor_id}"
anchor_title = anchor_tag.text
else:
anchor_url = url
anchor_title = title
# Iterate over each word and construct indices
text = tag.text
text = text.replace(" \n", " ")
text = text.replace("\n", " ")
span = 120
hits: List[Hit] = []
for res in re.finditer(r"\w+", text):
match = res[0]
if len(match) < 3 or match.lower() in non_significant_words:
continue
#if len(match) == 4:
# sys.stderr.write(f"-> {match}\n")
start = res.start()
end = res.end()
if start < span:
content_before = text[:start]
else:
content_before = "..." + text[start-span:start]
if (len(text) - end) < span:
content_after = text[end:]
else:
content_after = text[end:end+span] + "..."
content = content_before + match + content_after
hit = Hit(
search=match.lower(),
page_url=url,
page_title=title,
anchor_url=anchor_url,
anchor_title=anchor_title,
content=content,
start=len(content_before)
)
hits.append(hit)
return hits
def find_anchor(tag: Optional[Any]) -> Optional[Tag]:
if isinstance(tag, Tag) and tag.get("id"):
return tag
else:
if tag.previous_sibling:
return find_anchor(tag.previous_sibling)
elif tag.parent:
return find_anchor(tag.parent)
else:
return None
def split(word: str, start: int):
return [word[:end] for end in range(start, len(word)+1)]
def serialize_hits(hits: List[Hit]) -> str:
hits_refs: Dict[str, List[int]] = {}
# Pour chaque hit, on construit une list de
for i in range(0, len(hits)):
h = hits[i]
words = split(h.search, 3)
for w in words:
hr = hits_refs.get(w)
if hr:
hr.append(i)
else:
hr = [i]
hits_refs[w] = hr
d = {"hits": hits_refs, "refs": hits}
return json.dumps(d, default=lambda o: o.__dict__, sort_keys=True)
def main():
sys.stderr.write("Building search index...\n")
site = Path("hurl.dev/_site")
files = list(site.glob("**/*.html"))
all_hits: List[Hit] = []
for f in files:
hits = build_file_index(f)
all_hits.extend(hits)
index = serialize_hits(all_hits)
print(index)
if __name__ == "__main__":
main()
|
[
"re.finditer",
"json.dumps",
"pathlib.Path",
"sys.stderr.write",
"re.compile"
] |
[((3047, 3072), 're.finditer', 're.finditer', (['"""\\\\w+"""', 'text'], {}), "('\\\\w+', text)\n", (3058, 3072), False, 'import re\n'), ((4840, 4899), 'json.dumps', 'json.dumps', (['d'], {'default': '(lambda o: o.__dict__)', 'sort_keys': '(True)'}), '(d, default=lambda o: o.__dict__, sort_keys=True)\n', (4850, 4899), False, 'import json\n'), ((4918, 4964), 'sys.stderr.write', 'sys.stderr.write', (['"""Building search index...\n"""'], {}), "('Building search index...\\n')\n", (4934, 4964), False, 'import sys\n'), ((4977, 4999), 'pathlib.Path', 'Path', (['"""hurl.dev/_site"""'], {}), "('hurl.dev/_site')\n", (4981, 4999), False, 'from pathlib import Path\n'), ((1599, 1652), 'sys.stderr.write', 'sys.stderr.write', (['f"""No indexed content in path {f}\n"""'], {}), "(f'No indexed content in path {f}\\n')\n", (1615, 1652), False, 'import sys\n'), ((1106, 1146), 'sys.stderr.write', 'sys.stderr.write', (['f"""Error in path {f}\n"""'], {}), "(f'Error in path {f}\\n')\n", (1122, 1146), False, 'import sys\n'), ((1551, 1572), 're.compile', 're.compile', (['"""indexed"""'], {}), "('indexed')\n", (1561, 1572), False, 'import re\n')]
|
# Reference Book: Python Data Science Handbook (page:(70-77))
# Date(13 April, 2019) Day-3, Time = 3:25 PM
# This section covers the use of Boolean masks to examine and manipulate values
# within NumPy arrays.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn; seaborn.set() #set plot style
# use Pandas to extract rainfall inches as a NumPy array
rainfall = pd.read_csv('/media/nahid/New Volume/GitHub/Numpy/Seattle2014.csv')['PRCP'].values
# print(rainfall)
inches = rainfall / 254 #1/10mm -> inches
print(inches.shape) #(365,)
# fig = plt.figure()
# plt.hist(inches, 40)
# print(plt.show())
# fig.savefig('rainfallHistogram.png')
'''
This histogram gives us a general idea of what the data looks like: despite its reputa‐
tion, the vast majority of days in Seattle saw near zero measured rainfall in 2014. But
this doesn’t do a good job of conveying some information we’d like to see: for exam‐
ple, how many rainy days were there in the year? What is the average precipitation on
those rainy days? How many days were there with more than half an inch of rain?
Digging into the data
One approach to this would be to answer these questions by hand: loop through the
data, incrementing a counter each time we see values in some desired range. For rea‐
sons discussed throughout this chapter, such an approach is very inefficient, both
from the standpoint of time writing code and time computing the result. We saw in
“Computation on NumPy Arrays: Universal Functions” on page 50 that NumPy’s
ufuncs can be used in place of loops to do fast element-wise arithmetic operations on
arrays; in the same way, we can use other ufuncs to do element-wise comparisons over
arrays, and we can then manipulate the results to answer the questions we have. We’ll
leave the data aside for right now, and discuss some general tools in NumPy to use
masking to quickly answer these types of questions.
'''
a = np.array([1,2,3,4,5])
print(a < 3) # [ True True False False False]
# check this apply all of others relational operator
# like
print(a != 3) # [ True True False True True]
# It is also possible to do an element-by-element comparison of two arrays, and to
# include compound expressions:
print((2 * a) == (a ** 2)) # [False True False False False]
'''
As in the case of arithmetic operators, the comparison operators are implemented as
ufuncs in NumPy; for example, when you write x < 3 , internally NumPy uses
np.less(x, 3) . A summary of the comparison operators and their equivalent ufunc
is shown here:
Operator Equivalent ufunc
== np.equal
!= np.not_equal
< np.less
<= np.less_equal
> np.greater
>= np.greater_equal
'''
# Just as in the case of arithmetic ufuncs, these will work on arrays of any size and
# shape. Here is a two-dimensional example:
rng = np.random.RandomState(0)
x = rng.randint(10, size=(3,4))
print(x)
print(x < 5)
'''
[[5 0 3 3]
[7 9 3 5]
[2 4 7 6]]
[[False True True True]
[False False True False]
[ True True False False]]
'''
print(np.count_nonzero(x < 6)) # 8
print(np.sum(x < 6)) # 8
# how many values less than 6 in each row?
print(np.sum(x < 6, axis=1)) # [4 2 2]
# If we’re interested in quickly checking whether any or all the values are true, we can
# use(you guessed it) np.any() or np.all():
print(np.any(x < 8)) #True
print(np.any(x < 0)) #False
print(np.all(x < 10)) #True
print(np.all(x == 6)) # False
# np.all() and np.any() can be used along particular axes as well. For example:
print(np.all(x < 8, axis=1)) # [ True False True]
# Here all the elements in the first and third rows are less than 8, while this is not the
# case for the second row.
#Boolean operators
print(np.sum((inches > .5) & (inches < 1))) # 29
#So we see that there are 29 days with rainfall between 0.5 and 1.0 inches.
#Using the equivalence of A AND B and NOT (A OR B)
print(np.sum(~((inches <= 0.5) | (inches >= 1)))) # 29
'''
The following table summarizes the bitwise Boolean operators and their equivalent
ufuncs:
Operator Equivalent ufunc
& np.bitwise_and
| np.bitwise_or
^ np.bitwise_xor
~ np.bitwise_not
'''
print('Number of days without rain :',np.sum(inches == 0))
print('Number of days with rain :',np.sum(inches != 0))
print('Days with more than .5 inches :',np.sum(inches > 0.5))
'''
Number of days without rain : 215
Number of days with rain : 150
Days with more than .5 inches : 37
'''
print(x[x < 5]) # [0 3 3 3 2 4]
# construct a mask of all rainy days
rainy = (inches > 0)
# construct a mask of all summer days (June 21st is the 172nd day)
summer = (np.arange(365) - 172 < 90) & (np.arange(365) - 172 > 0)
print("Median precip on rainy days in 2014 (inches):", np.median(inches[rainy]))
print("Median precip on summer days in 2014 (inches): ",
np.median(inches[summer]))
print("Maximum precip on summer days in 2014 (inches): ",
np.max(inches[summer]))
print("Median precip on non-summer rainy days (inches):",
np.median(inches[rainy & ~summer]))
#Using the Keywords and/or Versus the Operators &/|
print(bool(42), bool(0)) #True False
print(bool(42 and 0)) #False
print(bool(42 or 0)) #True
print(bin(42)) # 0b101010
print(bin(59)) # 0b111011
print(bin(42 | 59)) # 0b111011
a = np.array([1, 0, 1, 0, 1, 0], dtype=bool)
b = np.array([1, 1, 1, 0, 1, 1], dtype=bool)
print(a | b) # [ True True True False True True]
#ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
# print( a or b)
|
[
"numpy.count_nonzero",
"numpy.sum",
"numpy.median",
"pandas.read_csv",
"numpy.random.RandomState",
"numpy.any",
"numpy.max",
"numpy.array",
"numpy.arange",
"seaborn.set",
"numpy.all"
] |
[((302, 315), 'seaborn.set', 'seaborn.set', ([], {}), '()\n', (313, 315), False, 'import seaborn\n'), ((1941, 1966), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1949, 1966), True, 'import numpy as np\n'), ((2867, 2891), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (2888, 2891), True, 'import numpy as np\n'), ((5349, 5389), 'numpy.array', 'np.array', (['[1, 0, 1, 0, 1, 0]'], {'dtype': 'bool'}), '([1, 0, 1, 0, 1, 0], dtype=bool)\n', (5357, 5389), True, 'import numpy as np\n'), ((5394, 5434), 'numpy.array', 'np.array', (['[1, 1, 1, 0, 1, 1]'], {'dtype': 'bool'}), '([1, 1, 1, 0, 1, 1], dtype=bool)\n', (5402, 5434), True, 'import numpy as np\n'), ((3076, 3099), 'numpy.count_nonzero', 'np.count_nonzero', (['(x < 6)'], {}), '(x < 6)\n', (3092, 3099), True, 'import numpy as np\n'), ((3111, 3124), 'numpy.sum', 'np.sum', (['(x < 6)'], {}), '(x < 6)\n', (3117, 3124), True, 'import numpy as np\n'), ((3180, 3201), 'numpy.sum', 'np.sum', (['(x < 6)'], {'axis': '(1)'}), '(x < 6, axis=1)\n', (3186, 3201), True, 'import numpy as np\n'), ((3355, 3368), 'numpy.any', 'np.any', (['(x < 8)'], {}), '(x < 8)\n', (3361, 3368), True, 'import numpy as np\n'), ((3383, 3396), 'numpy.any', 'np.any', (['(x < 0)'], {}), '(x < 0)\n', (3389, 3396), True, 'import numpy as np\n'), ((3412, 3426), 'numpy.all', 'np.all', (['(x < 10)'], {}), '(x < 10)\n', (3418, 3426), True, 'import numpy as np\n'), ((3440, 3454), 'numpy.all', 'np.all', (['(x == 6)'], {}), '(x == 6)\n', (3446, 3454), True, 'import numpy as np\n'), ((3551, 3572), 'numpy.all', 'np.all', (['(x < 8)'], {'axis': '(1)'}), '(x < 8, axis=1)\n', (3557, 3572), True, 'import numpy as np\n'), ((3742, 3779), 'numpy.sum', 'np.sum', (['((inches > 0.5) & (inches < 1))'], {}), '((inches > 0.5) & (inches < 1))\n', (3748, 3779), True, 'import numpy as np\n'), ((3920, 3962), 'numpy.sum', 'np.sum', (['(~((inches <= 0.5) | (inches >= 1)))'], {}), '(~((inches <= 0.5) | (inches >= 1)))\n', (3926, 3962), True, 'import numpy as np\n'), ((4244, 4263), 'numpy.sum', 'np.sum', (['(inches == 0)'], {}), '(inches == 0)\n', (4250, 4263), True, 'import numpy as np\n'), ((4310, 4329), 'numpy.sum', 'np.sum', (['(inches != 0)'], {}), '(inches != 0)\n', (4316, 4329), True, 'import numpy as np\n'), ((4376, 4396), 'numpy.sum', 'np.sum', (['(inches > 0.5)'], {}), '(inches > 0.5)\n', (4382, 4396), True, 'import numpy as np\n'), ((4808, 4832), 'numpy.median', 'np.median', (['inches[rainy]'], {}), '(inches[rainy])\n', (4817, 4832), True, 'import numpy as np\n'), ((4897, 4922), 'numpy.median', 'np.median', (['inches[summer]'], {}), '(inches[summer])\n', (4906, 4922), True, 'import numpy as np\n'), ((4988, 5010), 'numpy.max', 'np.max', (['inches[summer]'], {}), '(inches[summer])\n', (4994, 5010), True, 'import numpy as np\n'), ((5076, 5110), 'numpy.median', 'np.median', (['inches[rainy & ~summer]'], {}), '(inches[rainy & ~summer])\n', (5085, 5110), True, 'import numpy as np\n'), ((401, 468), 'pandas.read_csv', 'pd.read_csv', (['"""/media/nahid/New Volume/GitHub/Numpy/Seattle2014.csv"""'], {}), "('/media/nahid/New Volume/GitHub/Numpy/Seattle2014.csv')\n", (412, 468), True, 'import pandas as pd\n'), ((4697, 4711), 'numpy.arange', 'np.arange', (['(365)'], {}), '(365)\n', (4706, 4711), True, 'import numpy as np\n'), ((4727, 4741), 'numpy.arange', 'np.arange', (['(365)'], {}), '(365)\n', (4736, 4741), True, 'import numpy as np\n')]
|
import sqlite3
import datetime
import yaml
class SQLButler:
"""
SQLButler handles data addition and extraction from the database. There is a csv
database version that is designed to be completely compatable and interchangable
but SQL is likely to be faster in the long run.
"""
def __init__(self, database_name):
self.headers = {
'time': 'datetime',
'city': 'integer',
'name': 'text',
'sky_id': 'integer',
'sky': 'text',
'sky_desc': 'text',
'temp': 'float',
'humidity': 'integer',
'wind': 'float',
'cover': 'integer',
'rain': 'float',
'snow': 'float',
}
# Load config file and set some parameters
self.master_config = 'etc/weatherman.yml'
with open(self.master_config) as ycf:
self.config = yaml.load(ycf, Loader=yaml.FullLoader)
if not isinstance(database_name, str):
raise TypeError('The provided database name is not a string')
self.database_name = database_name + '.sql'
def create_database(self):
"""
SQL needs to connect to the database any time it tries to do something.
I created the create function to either connect or create the database
if it does not already exist.
"""
self.conn = sqlite3.connect(self.database_name)
self.c = self.conn.cursor()
try:
self.c.execute("""CREATE TABLE weather (
time datetime,
city integer,
name text,
sky_id integer,
sky text,
sky_desc text,
temp float,
humidity integer,
wind float,
cover integer,
rain float,
snow float
)""")
except sqlite3.OperationalError:
pass
return self.c
def format_for_insert(self, data):
"""
Takes a dict and formats the proper data insert for SQL
"""
insert_data = []
try:
insert_data.append(data['time'].strftime(self.config['datetime_str']))
except:
insert_data.append('')
try:
if data['city'] is None:
raise ValueError
insert_data.append(data['city'])
except:
insert_data.append(0)
try:
if data['name'] is None:
raise ValueError
insert_data.append(data['name'])
except:
insert_data.append('')
try:
if data['sky_id'] is None:
raise ValueError
insert_data.append(data['sky_id'])
except:
insert_data.append(0)
try:
if data['sky'] is None:
raise ValueError
insert_data.append(data['sky'])
except:
insert_data.append('')
try:
if data['sky_desc'] is None:
raise ValueError
insert_data.append(data['sky_desc'])
except:
insert_data.append('')
try:
if data['temp'] is None:
raise ValueError
insert_data.append(data['temp'])
except:
insert_data.append(0)
try:
if data['humidity'] is None:
raise ValueError
insert_data.append(data['humidity'])
except:
insert_data.append(0)
try:
if data['wind'] is None:
raise ValueError
insert_data.append(data['wind'])
except:
insert_data.append(0)
try:
if data['cover'] is None:
raise ValueError
insert_data.append(data['cover'])
except:
insert_data.append(0)
try:
if data['rain'] is None:
raise ValueError
insert_data.append(data['rain'])
except:
insert_data.append(0)
try:
if data['snow'] is None:
raise ValueError
insert_data.append(data['snow'])
except:
insert_data.append(0)
return insert_data
def add_data(self, data):
"""
Add data sets up the data to be added.
I have not built out safetys yet but I plan to eventually incase the data
is changed in the main class and then passed on here.
"""
insert = self.format_for_insert(data)
sql = f"""INSERT INTO weather({','.join(self.headers.keys())})
VALUES(?,?,?,?,?,?,?,?,?,?,?,?)"""
self.c.execute(sql, insert)
def commit_table(self):
"""
I think this saves the database... i dont remember how needed it is i just have it.
"""
self.conn.commit()
def multi_add(self, data_list):
"""
As you can image having for loops everywhere is just asking way too much of me to add
data... so i created a function to handle it all.
"""
self.c = self.create_database()
for data in data_list:
self.add_data(data)
self.commit_table()
def tuple_to_dict(self, tpl):
"""
When getting data out of the database it comes back in a list of tuples.
I wrote this to convert the tuple of data to a dict.
"""
line = list(tpl)
try:
line[0] = datetime.datetime.strptime(line[0], self.config['datetime_str'])
except ValueError:
# HERE purge the bad data eventually
line[0] = datetime.datetime.strptime(line[0], self.config['datetime_utc_str'])
dct = {k: v for k, v in zip(self.headers.keys(), line)}
return dct
def list_tuple_to_list_dict(self, lstt):
"""
This takes the list of tuples to convert it to a list of sets.
ha ha jk. to a list of dicts. can you image how useless a list of sets
would be here???
"""
lstd = []
for line_t in lstt:
lstd.append(self.tuple_to_dict(line_t))
return lstd
def query_database(self, parameters):
"""
Based on the parameters, grab data from the database and filter it.
"""
dump = []
refined = []
self.c = self.create_database()
self.c.execute("""SELECT * FROM weather""")
data = self.c.fetchall()
dump = self.list_tuple_to_list_dict(data)
for entry in dump:
if parameters['start_time'] is not None:
if entry['time'] < parameters['start_time']:
continue
if parameters['end_time'] is not None:
if entry['time'] > parameters['end_time']:
continue
if parameters['exact_list'] is not None:
if entry['sky_id'] not in parameters['exact_list']:
continue
refined.append(entry)
return refined
def get_all_data(self):
"""
This gets all data
"""
dump = []
self.c = self.create_database()
self.c.execute("""SELECT * FROM weather""")
data = self.c.fetchall()
dump = self.list_tuple_to_list_dict(data)
return dump
# def get_bad_data(self):
# """
# This gets all data that is not clear... more or less. See a better explanation of why
# 200 and 799 are important in the main module.
# """
# dump = []
# self.c = self.create_database()
# self.c.execute("""SELECT * FROM weather WHERE
# sky_id BETWEEN 200 AND 799
# """)
# data = self.c.fetchall()
# dump = self.list_tuple_to_list_dict(data)
# return dump
def get_first_and_last(self):
"""
To get timestamps of the first and lasty entry i wrote this thing.
"""
dump = []
self.c = self.create_database()
data = list(self.c.execute("""SELECT * FROM weather""").fetchall())
dump.append(self.tuple_to_dict(data[0]))
dump.append(self.tuple_to_dict(data[-1]))
print(dump)
return dump
|
[
"datetime.datetime.strptime",
"yaml.load",
"sqlite3.connect"
] |
[((1402, 1437), 'sqlite3.connect', 'sqlite3.connect', (['self.database_name'], {}), '(self.database_name)\n', (1417, 1437), False, 'import sqlite3\n'), ((915, 953), 'yaml.load', 'yaml.load', (['ycf'], {'Loader': 'yaml.FullLoader'}), '(ycf, Loader=yaml.FullLoader)\n', (924, 953), False, 'import yaml\n'), ((5530, 5594), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['line[0]', "self.config['datetime_str']"], {}), "(line[0], self.config['datetime_str'])\n", (5556, 5594), False, 'import datetime\n'), ((5693, 5761), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['line[0]', "self.config['datetime_utc_str']"], {}), "(line[0], self.config['datetime_utc_str'])\n", (5719, 5761), False, 'import datetime\n')]
|
# TRIGGERS.PY
import logging
import sys
import time
defaultDefault = object()
class TriggerFactory:
def __init__(self, tack):
self.tack = tack
self.kinds = { "timer" : TimerTrigger,
"process" : ProcessTrigger,
"globus" : GlobusTrigger,
"reader" : ReaderTrigger
}
def new(self, **kwargs):
try:
t = kwargs["kind"]
except:
logging.critical("Given trigger with no kind!")
sys.exit(1)
if not t in self.kinds:
logging.critical("No such kind: " + t)
sys.exit(1)
T = self.kinds[t]
result = T(self.tack, kwargs)
self.tack.add(result)
return result
class Trigger:
def __init__(self, tack, args, kind="SUPER"):
self.constructor(tack, args, kind)
def constructor(self, tack, args, kind):
self.tack = tack
self.id = self.tack.make_id()
self.kind = kind
self.name = self.key(args, "name")
logging.info("New Trigger: %s" % str(self))
def __str__(self):
return "%s <%i>" % (self.name, self.id)
# d: a dictionary ; k: the key ; default: optional default value
def key(self, d, k, default=defaultDefault):
try:
result = d[k]
except KeyError:
if default is defaultDefault:
logging.critical("Given trigger kind=%s with no %s!" %
(self.kind, k))
sys.exit(1)
else:
return default
return result
def info(self, message):
logging.info("%s: %s" % (str(self), message))
def debug(self, message):
logging.debug("%s: %s" % (str(self), message))
''' Returns True if something happened, else False'''
def poll(self):
logging.info("Default poll(): %s" % str(self))
''' Internal use only'''
def request_shutdown(self):
self.tack.request_shutdown(self)
''' Tells this Trigger to shutdown'''
def shutdown(self):
logging.info("Default shutdown(): %s" % str(self))
class TimerTrigger(Trigger):
def __init__(self, tack, args):
self.constructor(tack, args, kind="timer")
self.interval = self.key(args, "interval", 0)
logging.info("New TimerTrigger \"%s\" (%0.3fs)" % \
(self.name, self.interval))
self.last_poll = time.time()
self.handler = self.key(args, "handler")
def poll(self):
self.debug("poll()")
t = time.time()
if t - self.last_poll > self.interval:
self.debug("Calling handler")
self.handler(self, t)
last_poll = t
return True
return False
import threading
from Queue import Queue, Empty
class ProcessTrigger(Trigger):
def __init__(self, tack, args):
self.constructor(tack, args, kind="process")
self.command = args["command"]
logging.info("New ProcessTrigger \"%s\" <%i> (%s)" %
(self.name, self.id, self.command))
self.handler = self.key(args, "handler")
self.q_down = Queue()
self.q_up = Queue()
threading.Thread(target=self.run).start()
def poll(self):
self.debug("poll()")
try:
returncode = self.q_up.get_nowait()
except Empty:
return False
self.debug("returncode: " + str(returncode))
self.handler(self, returncode)
self.tack.remove(self)
return True
def run(self):
self.debug("process thread for <%i>: %s" %
(self.id, self.command))
import subprocess
tokens = self.command.split()
# cp = subprocess.call(tokens)
process = subprocess.Popen(tokens)
self.debug("pid is %i for: %s" % (process.pid, self.command))
while True:
p = process.poll()
if not p is None:
break
try:
message = self.q_down.get(timeout=1)
except Empty:
continue
assert(message == "TERMINATE")
self.info("terminating pid: %i: %s" %
(process.pid, self.command))
try:
process.terminate()
except OSError:
self.info("process <%i> already exited.")
process.poll()
break
self.debug("run(): done")
self.q_up.put(process.returncode)
def shutdown(self):
self.q_down.put("TERMINATE")
message = self.q_up.get()
self.debug("returncode: " + str(message))
class GlobusTrigger(Trigger):
def __init__(self, tack, args):
self.constructor(tack, args, kind="globus")
self.user = self.key(args, "user")
self.token = self.key(args, "token")
self.task = self.key(args, "task")
logging.info("New GlobusTrigger \"%s\" <%i> (%s)" %
(self.name, self.id, self.task))
self.handler = self.key(args, "handler")
self.q = Queue()
threading.Thread(target=self.run).start()
def poll(self):
self.debug("poll()")
try:
status = self.q.get_nowait()
except Empty:
return False
self.debug("status: " + status)
self.handler(self, status)
self.tack.remove(self)
return True
def run(self):
self.debug("thread for <%i>: %s" % (self.id, self.task))
from globusonline.transfer.api_client \
import TransferAPIClient
token = self.get_token()
api = TransferAPIClient(self.user, goauth=token)
while True:
code, reason, data = api.task(self.task, fields="status")
status = data["status"]
print(status)
if status in ("SUCCEEDED", "FAILED"):
break
self.debug("Globus: done " + status)
self.q.put(status)
def get_token(self):
if self.token == "ENV":
import os
v = os.getenv("TOKEN")
if v == None:
print("Globus token environment variable TOKEN is unset!")
sys.exit(1)
else:
result = v
else:
result = self.token
return result
class ReaderTrigger(Trigger):
def __init__(self, tack, args):
self.constructor(tack, args, kind="reader")
self.filename = self.key(args, "filename")
self.eof = self.key(args, "eof")
print ("eof: " + self.eof)
self.pattern = self.key(args, "pattern", default=None)
self.eof_obj = object()
if self.pattern:
self.pc = re.compile(self.pattern)
logging.info("New ReaderTrigger \"%s\" <%i> (%s)" %
(self.name, self.id, self.filename))
self.handler = self.key(args, "handler")
self.q_up = Queue()
self.q_down = Queue()
threading.Thread(target=self.run).start()
def poll(self):
self.debug("poll()")
try:
line = self.q_up.get_nowait()
except Empty:
return False
if (not line is self.eof_obj):
self.debug("line: " + line)
self.handler(self, line)
else:
self.debug("found EOF: " + self.eof)
self.tack.remove(self)
return True
def run(self):
self.debug("thread for %s" % self.filename)
with open(self.filename, "r") as f:
delay_max = 1.0
delay_min = 0.0
delay = delay_min
while True:
line = f.readline().strip()
if len(line) > 0:
delay = delay_min
if line == self.eof:
print("found eof")
break
if (not self.pattern) or self.pc.match(line):
self.q_up.put(line)
else:
delay = delay_incr(delay, delay_max)
try:
message = self.q_down.get(timeout=delay)
assert(message == "TERMINATE")
return
except Empty:
pass
self.debug("Reader: done: " + self.filename)
self.q_up.put(self.eof_obj)
def shutdown(self):
print("putting terminate")
self.q_down.put("TERMINATE")
def delay_incr(delay_now, delay_max):
if delay_now < 1.0:
result = delay_now + 0.1
else:
result = delay_now + 1.0
if result > delay_max:
result = delay_max
return result
|
[
"threading.Thread",
"subprocess.Popen",
"Queue.Queue",
"logging.critical",
"time.time",
"logging.info",
"globusonline.transfer.api_client.TransferAPIClient",
"os.getenv",
"sys.exit"
] |
[((2351, 2426), 'logging.info', 'logging.info', (['(\'New TimerTrigger "%s" (%0.3fs)\' % (self.name, self.interval))'], {}), '(\'New TimerTrigger "%s" (%0.3fs)\' % (self.name, self.interval))\n', (2363, 2426), False, 'import logging\n'), ((2477, 2488), 'time.time', 'time.time', ([], {}), '()\n', (2486, 2488), False, 'import time\n'), ((2600, 2611), 'time.time', 'time.time', ([], {}), '()\n', (2609, 2611), False, 'import time\n'), ((3023, 3113), 'logging.info', 'logging.info', (['(\'New ProcessTrigger "%s" <%i> (%s)\' % (self.name, self.id, self.command))'], {}), '(\'New ProcessTrigger "%s" <%i> (%s)\' % (self.name, self.id,\n self.command))\n', (3035, 3113), False, 'import logging\n'), ((3204, 3211), 'Queue.Queue', 'Queue', ([], {}), '()\n', (3209, 3211), False, 'from Queue import Queue, Empty\n'), ((3234, 3241), 'Queue.Queue', 'Queue', ([], {}), '()\n', (3239, 3241), False, 'from Queue import Queue, Empty\n'), ((3829, 3853), 'subprocess.Popen', 'subprocess.Popen', (['tokens'], {}), '(tokens)\n', (3845, 3853), False, 'import subprocess\n'), ((4958, 5045), 'logging.info', 'logging.info', (['(\'New GlobusTrigger "%s" <%i> (%s)\' % (self.name, self.id, self.task))'], {}), '(\'New GlobusTrigger "%s" <%i> (%s)\' % (self.name, self.id, self\n .task))\n', (4970, 5045), False, 'import logging\n'), ((5130, 5137), 'Queue.Queue', 'Queue', ([], {}), '()\n', (5135, 5137), False, 'from Queue import Queue, Empty\n'), ((5682, 5724), 'globusonline.transfer.api_client.TransferAPIClient', 'TransferAPIClient', (['self.user'], {'goauth': 'token'}), '(self.user, goauth=token)\n', (5699, 5724), False, 'from globusonline.transfer.api_client import TransferAPIClient\n'), ((6810, 6901), 'logging.info', 'logging.info', (['(\'New ReaderTrigger "%s" <%i> (%s)\' % (self.name, self.id, self.filename))'], {}), '(\'New ReaderTrigger "%s" <%i> (%s)\' % (self.name, self.id, self\n .filename))\n', (6822, 6901), False, 'import logging\n'), ((6991, 6998), 'Queue.Queue', 'Queue', ([], {}), '()\n', (6996, 6998), False, 'from Queue import Queue, Empty\n'), ((7021, 7028), 'Queue.Queue', 'Queue', ([], {}), '()\n', (7026, 7028), False, 'from Queue import Queue, Empty\n'), ((601, 639), 'logging.critical', 'logging.critical', (["('No such kind: ' + t)"], {}), "('No such kind: ' + t)\n", (617, 639), False, 'import logging\n'), ((652, 663), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (660, 663), False, 'import sys\n'), ((6119, 6137), 'os.getenv', 'os.getenv', (['"""TOKEN"""'], {}), "('TOKEN')\n", (6128, 6137), False, 'import os\n'), ((484, 531), 'logging.critical', 'logging.critical', (['"""Given trigger with no kind!"""'], {}), "('Given trigger with no kind!')\n", (500, 531), False, 'import logging\n'), ((544, 555), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (552, 555), False, 'import sys\n'), ((3250, 3283), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.run'}), '(target=self.run)\n', (3266, 3283), False, 'import threading\n'), ((5146, 5179), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.run'}), '(target=self.run)\n', (5162, 5179), False, 'import threading\n'), ((6255, 6266), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6263, 6266), False, 'import sys\n'), ((7037, 7070), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.run'}), '(target=self.run)\n', (7053, 7070), False, 'import threading\n'), ((1435, 1505), 'logging.critical', 'logging.critical', (["('Given trigger kind=%s with no %s!' % (self.kind, k))"], {}), "('Given trigger kind=%s with no %s!' % (self.kind, k))\n", (1451, 1505), False, 'import logging\n'), ((1555, 1566), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1563, 1566), False, 'import sys\n')]
|
# t-SNE is a great technique for visual exploration of high dimensional datasets
# Should be applied to non-numeric features
# Import module
from sklearn.manifold import TSNE
# Non-numerical columns in the dataset
non_numeric = ['Branch', 'Gender', 'Component']
# Drop the non-numerical columns from df
df_numeric = df.drop(non_numeric, axis=1)
# Create a t-SNE model with learning rate 50
m = TSNE(learning_rate=50)
# Fit and transform the t-SNE model on the numeric dataset
tsne_features = m.fit_transform(df_numeric)
print(tsne_features.shape)
# Perform visualizations of the reduced dataset
# Color the points by Gender
sns.scatterplot(x="x", y="y", hue='Gender', data=df)
# Show the plot
plt.show()
|
[
"sklearn.manifold.TSNE"
] |
[((398, 420), 'sklearn.manifold.TSNE', 'TSNE', ([], {'learning_rate': '(50)'}), '(learning_rate=50)\n', (402, 420), False, 'from sklearn.manifold import TSNE\n')]
|
"""set default to string
Revision ID: 392e1a7038a5
Revises: <PASSWORD>
Create Date: 2014-11-17 17:41:47.983000
"""
# revision identifiers, used by Alembic.
revision = '392e1a7038a5'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
# default ignored
op.drop_column('task_entity', 'feature')
op.add_column('task_entity', sa.Column('feature', sa.String, server_default=""))
def downgrade():
# default ignored
op.drop_column('task_entity', 'feature')
op.add_column('task_entity', sa.Column('feature', sa.String, server_default=''))
|
[
"alembic.op.drop_column",
"sqlalchemy.Column"
] |
[((306, 346), 'alembic.op.drop_column', 'op.drop_column', (['"""task_entity"""', '"""feature"""'], {}), "('task_entity', 'feature')\n", (320, 346), False, 'from alembic import op\n'), ((485, 525), 'alembic.op.drop_column', 'op.drop_column', (['"""task_entity"""', '"""feature"""'], {}), "('task_entity', 'feature')\n", (499, 525), False, 'from alembic import op\n'), ((380, 430), 'sqlalchemy.Column', 'sa.Column', (['"""feature"""', 'sa.String'], {'server_default': '""""""'}), "('feature', sa.String, server_default='')\n", (389, 430), True, 'import sqlalchemy as sa\n'), ((559, 609), 'sqlalchemy.Column', 'sa.Column', (['"""feature"""', 'sa.String'], {'server_default': '""""""'}), "('feature', sa.String, server_default='')\n", (568, 609), True, 'import sqlalchemy as sa\n')]
|
#
import datetime
class DclUtil(object):
@staticmethod
def datetime_format():
return datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S")
|
[
"datetime.datetime.now"
] |
[((108, 131), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (129, 131), False, 'import datetime\n')]
|
import os
import json
from utils import Colors, clamp
from .controllers import Keyboard
from .managers.sprite_font import SpriteFontManager
from pygame.locals import K_UP, K_w, K_DOWN, K_s, K_LEFT, K_a, K_RIGHT, K_d, K_SPACE, K_e, K_RETURN, K_ESCAPE
def read_float(options, key, default):
try:
option = options[key]
return option if isinstance(option, float) else default
except KeyError:
return default
def read_int(options, key, default):
try:
option = options[key]
return option if isinstance(option, int) else default
except KeyError:
return default
def read_string(options, key, default):
try:
option = options[key]
return option if isinstance(option, (str)) else default
except KeyError:
return default
class Configuration(object):
lang = 'english.json'
master_volume = 1
sound_volume = 1
music_volume = 1
level = 1
@classmethod
def load(cls):
# Load options
if os.path.isfile('datafiles/options.json'):
with open('datafiles/options.json', 'r', encoding='utf-8') as file:
options = json.load(file)
# Language options
cls.lang = read_string(options, 'lang', cls.lang)
if not os.path.isfile(os.path.join('datafiles', 'lang', cls.lang)):
cls.lang = 'english.json'
# Volume options
cls.master_volume = read_float(options, 'master_volume', cls.master_volume)
cls.master_volume = clamp(cls.master_volume, 0, 1)
cls.sound_volume = read_float(options, 'sound_volume', cls.sound_volume)
cls.sound_volume = clamp(cls.sound_volume, 0, 1)
cls.music_volume = read_float(options, 'music_volume', cls.music_volume)
cls.music_volume = clamp(cls.music_volume, 0, 1)
# Init sprite font
SpriteFontManager.load_font('spr_font.png', '0123456789')
# Init keyboard
Keyboard.add('up', (K_UP, K_w))
Keyboard.add('down', (K_DOWN, K_s))
Keyboard.add('left', (K_LEFT, K_a))
Keyboard.add('right', (K_RIGHT, K_d))
Keyboard.add('interact', (K_e, K_RETURN, K_SPACE))
Keyboard.add('any', (K_LEFT, K_a, K_RIGHT, K_d, K_UP, K_w))
Keyboard.add('key_left', (K_LEFT, K_a))
Keyboard.add('key_right', (K_RIGHT, K_d))
Keyboard.add('key_shoot', (K_UP, K_w))
Keyboard.add('key1_left', K_a)
Keyboard.add('key1_right', K_d)
Keyboard.add('key1_shoot', K_w)
Keyboard.add('key2_left', K_RIGHT)
Keyboard.add('key2_right', K_LEFT)
Keyboard.add('key2_shoot', K_UP)
Keyboard.add('pause', K_ESCAPE)
@classmethod
def save(cls):
options = {}
# Language options
options['lang'] = cls.lang
# Volume options
options['master_volume'] = cls.master_volume
options['sound_volume'] = cls.sound_volume
options['music_volume'] = cls.music_volume
if not os.path.exists('datafiles'):
os.mkdir('datafiles')
with open('datafiles/options.json', 'w', encoding='utf-8') as file:
json.dump(options, file)
@classmethod
def state(cls):
if cls.level == 1:
return {
'color': Colors.ENEMY,
'shoot_delay': (50, 60),
'balls': [
(125, 175, 10)
],
'max_score': 3,
'max_balls': 6,
'music': 'bgm_stage2.ogg'
}
elif cls.level == 2:
return {
'color': Colors.KYRGOS,
'shoot_delay': (40, 60),
'balls': [
(125, 175, 10)
],
'max_score': 3,
'max_balls': 6,
'music': 'bgm_stage3.ogg'
}
elif cls.level == 3:
return {
'color': Colors.QUADROPA,
'shoot_delay': (30, 60),
'balls': [
(125, 175, 10)
],
'max_score': 3,
'max_balls': 6,
'music': 'bgm_stage3.ogg'
}
elif cls.level == 4:
return {
'color': Colors.LANDIUS,
'shoot_delay': (30, 40),
'balls': [
(125, 175, 10),
(65, 175, 10),
(185, 175, 10)
],
'max_score': 3,
'max_balls': 10,
'music': 'bgm_stage6.ogg'
}
elif cls.level == 5:
return {
'color': Colors.ALOHA,
'shoot_delay': (20, 40),
'balls': [
(125, 175, 10),
(65, 205, 10),
(185, 145, 10),
(65, 115, 10),
(185, 235, 10)
],
'max_score': 5,
'max_balls': 10,
'music': 'bgm_stage4.ogg'
}
elif cls.level == 6:
return {
'color': Colors.KEETESH,
'shoot_delay': (10, 30),
'balls': [
(125, 175, 10),
(65, 235, 10),
(185, 115, 10),
(65, 145, 10),
(185, 205, 10)
],
'max_score': 5,
'max_balls': 10,
'music': 'bgm_stage4.ogg'
}
elif cls.level == 7:
return {
'color': Colors.KROTIK,
'shoot_delay': (10, 20),
'balls': [
(125, 175, 10),
(65, 205, 10),
(185, 145, 10),
(65, 115, 10),
(185, 235, 10)
],
'max_score': 5,
'max_balls': 10,
'music': 'bgm_stage5.ogg'
}
|
[
"json.dump",
"os.mkdir",
"json.load",
"os.path.exists",
"os.path.isfile",
"utils.clamp",
"os.path.join"
] |
[((1023, 1063), 'os.path.isfile', 'os.path.isfile', (['"""datafiles/options.json"""'], {}), "('datafiles/options.json')\n", (1037, 1063), False, 'import os\n'), ((3093, 3120), 'os.path.exists', 'os.path.exists', (['"""datafiles"""'], {}), "('datafiles')\n", (3107, 3120), False, 'import os\n'), ((3134, 3155), 'os.mkdir', 'os.mkdir', (['"""datafiles"""'], {}), "('datafiles')\n", (3142, 3155), False, 'import os\n'), ((3245, 3269), 'json.dump', 'json.dump', (['options', 'file'], {}), '(options, file)\n', (3254, 3269), False, 'import json\n'), ((1171, 1186), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1180, 1186), False, 'import json\n'), ((1581, 1611), 'utils.clamp', 'clamp', (['cls.master_volume', '(0)', '(1)'], {}), '(cls.master_volume, 0, 1)\n', (1586, 1611), False, 'from utils import Colors, clamp\n'), ((1736, 1765), 'utils.clamp', 'clamp', (['cls.sound_volume', '(0)', '(1)'], {}), '(cls.sound_volume, 0, 1)\n', (1741, 1765), False, 'from utils import Colors, clamp\n'), ((1890, 1919), 'utils.clamp', 'clamp', (['cls.music_volume', '(0)', '(1)'], {}), '(cls.music_volume, 0, 1)\n', (1895, 1919), False, 'from utils import Colors, clamp\n'), ((1327, 1370), 'os.path.join', 'os.path.join', (['"""datafiles"""', '"""lang"""', 'cls.lang'], {}), "('datafiles', 'lang', cls.lang)\n", (1339, 1370), False, 'import os\n')]
|
"""
Deck of cards.
"""
import itertools
import json
import random
SUITS = (
'Hearts',
'Diamonds',
'Clubs',
'Spades'
)
RANKS = [str(x) for x in range(2, 11)] + ['Jack', 'Queen', 'King', 'Ace']
class Card(object):
"""
A single card in a classic card deck.
"""
def __init__(self, suit=None, rank=None):
self.suit = suit
self.rank = rank
def to_json(self):
"""
Return a string containing a JSON representation of a card.
"""
return '{{"suit":"{}", "rank":"{}"}}'.format(self.suit, self.rank)
def from_json(self, json_str):
"""
Loads a card from a valid JSON representation.
"""
card = json.loads(json_str)
self.suit = card['suit']
self.rank = card['rank']
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.suit == other.suit and self.rank == other.rank
return False
def __unicode__(self):
return "{} of {}".format(self.rank, self.suit)
def __repr__(self):
return self.__unicode__()
class CardList(object):
"""
An ordered list of an arbitrary number of Card objects.
"""
def __init__(self, card_list=None):
"""
"""
if card_list is None:
self.reset_to_standard()
else:
self.cards = card_list
def reset_to_standard(self):
"""
Form a full 52 card deck, ordered.
"""
self.cards = [Card(s, r) for s, r in list(itertools.product(SUITS, RANKS))]
def shuffle(self):
"""
Shuffle the remaining cards in the deck.
"""
for idx, card in enumerate(self.cards):
new_idx = random.randrange(0, len(self.cards))
tmp_card = self.cards[new_idx]
self.cards[new_idx] = self.cards[idx]
self.cards[idx] = tmp_card
def give(self, num_cards):
"""
Give out the specified number of cards, removing them from the deck.
Cards are given from the top of the deck.
"""
if num_cards < 0:
return None
given_cards = self.cards[:num_cards]
self.cards = self.cards[num_cards:]
return given_cards
def take(self, cards):
"""
Takes a list of Card objects into this hand. Appends them to the end of the card list.
"""
if isinstance(cards, list):
for card in cards:
if isinstance(card, Card):
self.cards.append(card)
@property
def count(self):
"""
Returns the number of cards in the list.
"""
return len(self.cards)
def to_json(self):
"""
Return a string containing a JSON representation of a card list.
"""
return '[{}]'.format(','.join([card.to_json() for card in self.cards]))
def from_json(self, json_str):
"""
Loads a card list from a valid JSON representation.
WARNING: Erases all existing cards in the list!
"""
card_list = json.loads(json_str)
self.cards = []
for card in card_list:
self.cards.append(Card(card['suit'], card['rank']))
def __eq__(self, other):
# if isinstance(other, self.__class__):
if other.count == self.count:
for idx in range(0, other.count):
if self.cards[idx] != other.cards[idx]:
return False
return True
return False
def __unicode__(self):
return ':'.join(['{}{}'.format(c.suit[0], c.rank[0]) for c in self.cards])
def __repr__(self):
return self.__unicode__()
|
[
"json.loads",
"itertools.product"
] |
[((706, 726), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (716, 726), False, 'import json\n'), ((3093, 3113), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (3103, 3113), False, 'import json\n'), ((1536, 1567), 'itertools.product', 'itertools.product', (['SUITS', 'RANKS'], {}), '(SUITS, RANKS)\n', (1553, 1567), False, 'import itertools\n')]
|
from matplotlib import cm
# (B, G, R)
mouse_colors = [
(83, 255, 18), # green
(0, 139, 232), # orange
(255, 136, 0), # blue
(0, 196, 255), # yellow
(0, 25, 255) # red
]
outline_colormap = cm.get_cmap('plasma', 100)
|
[
"matplotlib.cm.get_cmap"
] |
[((215, 241), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""plasma"""', '(100)'], {}), "('plasma', 100)\n", (226, 241), False, 'from matplotlib import cm\n')]
|
from django.db import models
import os
from django.conf import settings
from django.core.exceptions import ValidationError
# Create your models here.
# Define user directory path
def file_size(value):
limit = 524000
if value.size > limit:
raise ValidationError('File too large. Size should not exceed 500KiB.')
def user_directory_path(instance, filename):
return os.path.join("files", filename)
class File(models.Model):
file = models.FileField(upload_to='files', null=True, validators=[file_size])
def abspath_file(self):
root = settings.MEDIA_ROOT
path = os.path.dirname(self.file.name)
file = os.path.basename(self.file.name)
return os.path.join(root, path, file)
|
[
"django.db.models.FileField",
"django.core.exceptions.ValidationError",
"os.path.basename",
"os.path.dirname",
"os.path.join"
] |
[((388, 419), 'os.path.join', 'os.path.join', (['"""files"""', 'filename'], {}), "('files', filename)\n", (400, 419), False, 'import os\n'), ((459, 529), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""files"""', 'null': '(True)', 'validators': '[file_size]'}), "(upload_to='files', null=True, validators=[file_size])\n", (475, 529), False, 'from django.db import models\n'), ((264, 329), 'django.core.exceptions.ValidationError', 'ValidationError', (['"""File too large. Size should not exceed 500KiB."""'], {}), "('File too large. Size should not exceed 500KiB.')\n", (279, 329), False, 'from django.core.exceptions import ValidationError\n'), ((609, 640), 'os.path.dirname', 'os.path.dirname', (['self.file.name'], {}), '(self.file.name)\n', (624, 640), False, 'import os\n'), ((656, 688), 'os.path.basename', 'os.path.basename', (['self.file.name'], {}), '(self.file.name)\n', (672, 688), False, 'import os\n'), ((704, 734), 'os.path.join', 'os.path.join', (['root', 'path', 'file'], {}), '(root, path, file)\n', (716, 734), False, 'import os\n')]
|
"""
app.utils.mail
==============
Setup app's mailer.
"""
import typing as t
from threading import Thread
from flask import Flask, current_app
from flask_mail import Message
from app.extensions import mail
def _send_async_email(app: Flask, msg: Message) -> None:
with app.app_context():
mail.send(msg)
def send_email(
attachments: t.Optional[t.Iterable[t.Dict[str, str]]] = None,
sync: bool = False,
**kwargs: t.Any,
) -> None:
"""Send a threaded email.
Without threading the app will wait until the email has been sent
before continuing.
In order to access the application context for this function a
protected ``werkzeug`` attribute has to be accessed.
From https://blog.miguelgrinberg.com/post/
``the-flask-mega-tutorial-part-xv-a-better-application-structure``:
Using current_app directly in the send_async_email() function
that runs as a background thread would not have worked, because
current_app is a context-aware variable that is tied to the
thread that is handling the client request. In a different
thread, current_app would not have a value assigned.
Passing current_app directly as an argument to the thread object
would not have worked either, because current_app is really a
proxy object that is dynamically mapped to the application
instance. So passing the proxy object would be the same as using
current_app directly in the thread.
What I needed to do is access the real application instance that
is stored inside the proxy object, and pass that as the app
argument. The current_app._get_current_object() expression
extracts the actual application instance from inside the proxy
object, so that is what I passed to the thread as an argument.
Note: Keyword args (dict) to pass to ``attachments``:
See ``flask_mail.Message.attach``.
* filename: filename of attachment
* content_type: file mimetype
* data: the raw file data
:param attachments: Iterable of kwargs to construct attachment.
:param sync: Don't thread if True: Defaults to False.
:param kwargs: Keyword args to pass to ``Message``:
See ``flask_mail.Message``.
"""
# noinspection PyProtectedMember
# pylint: disable=protected-access
app = current_app._get_current_object() # type: ignore
subject_prefix = app.config["MAIL_SUBJECT_PREFIX"]
subject = kwargs.get("subject", "")
kwargs["subject"] = f"{subject_prefix}{subject}"
kwargs["sender"] = kwargs.get("sender", app.config["DEFAULT_MAIL_SENDER"])
message = Message(**kwargs)
if attachments:
for attachment in attachments:
message.attach(**attachment)
if sync:
mail.send(message)
else:
thread = Thread(target=_send_async_email, args=(app, message))
thread.start()
|
[
"threading.Thread",
"flask.current_app._get_current_object",
"app.extensions.mail.send",
"flask_mail.Message"
] |
[((2390, 2423), 'flask.current_app._get_current_object', 'current_app._get_current_object', ([], {}), '()\n', (2421, 2423), False, 'from flask import Flask, current_app\n'), ((2681, 2698), 'flask_mail.Message', 'Message', ([], {}), '(**kwargs)\n', (2688, 2698), False, 'from flask_mail import Message\n'), ((304, 318), 'app.extensions.mail.send', 'mail.send', (['msg'], {}), '(msg)\n', (313, 318), False, 'from app.extensions import mail\n'), ((2821, 2839), 'app.extensions.mail.send', 'mail.send', (['message'], {}), '(message)\n', (2830, 2839), False, 'from app.extensions import mail\n'), ((2867, 2920), 'threading.Thread', 'Thread', ([], {'target': '_send_async_email', 'args': '(app, message)'}), '(target=_send_async_email, args=(app, message))\n', (2873, 2920), False, 'from threading import Thread\n')]
|
from .shared.utils import *
import circus.shared.algorithms as algo
from .shared import plot
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
import h5py
from circus.shared.probes import get_nodes_and_edges
from .shared.files import get_dead_times
from circus.shared.messages import print_and_log, init_logging
from circus.shared.utils import get_parallel_hdf5_flag
def main(params, nb_cpu, nb_gpu, use_gpu):
numpy.random.seed(520)
parallel_hdf5 = get_parallel_hdf5_flag(params)
#params = detect_memory(params)
logger = init_logging(params.logfile)
logger = logging.getLogger('circus.clustering')
#################################################################
data_file = params.data_file
data_file.open()
N_e = params.getint('data', 'N_e')
N_total = params.nb_channels
N_t = params.getint('detection', 'N_t')
dist_peaks = params.getint('detection', 'dist_peaks')
template_shift = params.getint('detection', 'template_shift')
file_out_suff = params.get('data', 'file_out_suff')
sign_peaks = params.get('detection', 'peaks')
alignment = params.getboolean('detection', 'alignment')
smoothing = params.getboolean('detection', 'smoothing')
isolation = params.getboolean('detection', 'isolation')
over_factor = float(params.getint('detection', 'oversampling_factor'))
matched_filter = params.getboolean('detection', 'matched-filter')
spike_thresh = params.getfloat('detection', 'spike_thresh')
smoothing_factor = params.getfloat('detection', 'smoothing_factor')
if params.getboolean('data', 'global_tmp'):
tmp_path_loc = os.path.join(os.path.abspath(params.get('data', 'file_out_suff')), 'tmp')
else:
tmp_path_loc = tempfile.gettempdir()
plot_path = os.path.join(params.get('data', 'file_out_suff'), 'plots')
do_temporal_whitening = params.getboolean('whitening', 'temporal')
do_spatial_whitening = params.getboolean('whitening', 'spatial')
safety_time = params.getint('clustering', 'safety_time')
safety_space = params.getboolean('clustering', 'safety_space')
comp_templates = params.getboolean('clustering', 'compress')
dispersion = params.get('clustering', 'dispersion').replace('(', '').replace(')', '').split(',')
dispersion = list(map(float, dispersion))
nodes, edges = get_nodes_and_edges(params)
chunk_size = params.getint('data', 'chunk_size')
max_elts_elec = params.getint('clustering', 'max_elts')
if sign_peaks == 'both':
max_elts_elec *= 2
nb_elts = int(params.getfloat('clustering', 'nb_elts')*N_e*max_elts_elec)
nb_repeats = params.getint('clustering', 'nb_repeats')
nclus_min = params.getfloat('clustering', 'nclus_min')
make_plots = params.get('clustering', 'make_plots')
sim_same_elec = params.getfloat('clustering', 'sim_same_elec')
noise_thr = params.getfloat('clustering', 'noise_thr')
remove_mixture = params.getboolean('clustering', 'remove_mixture')
extraction = params.get('clustering', 'extraction')
smart_search = params.getboolean('clustering', 'smart_search')
n_abs_min = params.getint('clustering', 'n_abs_min')
sensitivity = params.getfloat('clustering', 'sensitivity')
hdf5_compress = params.getboolean('data', 'hdf5_compress')
blosc_compress = params.getboolean('data', 'blosc_compress')
test_clusters = params.getboolean('clustering', 'test_clusters')
tmp_limits = params.get('fitting', 'amp_limits').replace('(', '').replace(')', '').split(',')
amp_limits = list(map(float, tmp_limits))
elt_count = 0
m_ratio = nclus_min
sub_output_dim = params.getint('clustering', 'sub_dim')
inv_nodes = numpy.zeros(N_total, dtype=numpy.int32)
inv_nodes[nodes] = numpy.argsort(nodes)
to_write = ['clusters_', 'times_', 'data_', 'peaks_']
ignore_dead_times = params.getboolean('triggers', 'ignore_times')
jitter_range = params.getint('detection', 'jitter_range')
template_shift_2 = template_shift + jitter_range
nb_ss_bins = 50
use_hanning = params.getboolean('detection', 'hanning')
#################################################################
if sign_peaks == 'negative':
search_peaks = ['neg']
elif sign_peaks == 'positive':
search_peaks = ['pos']
elif sign_peaks == 'both':
search_peaks = ['neg', 'pos']
smart_searches = {}
for p in search_peaks:
smart_searches[p] = numpy.ones(N_e, dtype=numpy.float32)*int(smart_search)
basis = {}
if use_hanning:
hanning_filter = numpy.hanning(N_t)
if sign_peaks in ['negative', 'both']:
basis['proj_neg'], basis['rec_neg'] = io.load_data(params, 'basis')
if sign_peaks in ['positive', 'both']:
basis['proj_pos'], basis['rec_pos'] = io.load_data(params, 'basis-pos')
thresholds = io.load_data(params, 'thresholds')
mads = io.load_data(params, 'mads')
if do_spatial_whitening:
spatial_whitening = io.load_data(params, 'spatial_whitening')
if do_temporal_whitening:
temporal_whitening = io.load_data(params, 'temporal_whitening')
if matched_filter:
if sign_peaks in ['negative', 'both']:
waveform_neg = io.load_data(params, 'waveform')
waveform_neg /= (numpy.abs(numpy.sum(waveform_neg))* len(waveform_neg))
matched_tresholds_neg = io.load_data(params, 'matched-thresholds')
if sign_peaks in ['positive', 'both']:
waveform_pos = io.load_data(params, 'waveform-pos')
waveform_pos /= (numpy.abs(numpy.sum(waveform_pos))* len(waveform_pos))
matched_tresholds_pos = io.load_data(params, 'matched-thresholds-pos')
if ignore_dead_times:
all_dead_times = get_dead_times(params)
result = {}
if use_gpu:
import cudamat as cmt
## Need to properly handle multi GPU per MPI nodes?
if nb_gpu > nb_cpu:
gpu_id = int(comm.rank//nb_cpu)
else:
gpu_id = 0
cmt.cuda_set_device(gpu_id)
cmt.init()
cmt.cuda_sync_threads()
if test_clusters:
injected_spikes = io.load_data(params, 'injected_spikes')
if comm.rank == 0:
if not os.path.exists(tmp_path_loc):
os.makedirs(tmp_path_loc)
if alignment:
cdata = numpy.linspace(-jitter_range, jitter_range, int(over_factor*2*jitter_range))
xdata = numpy.arange(-template_shift_2, template_shift_2 + 1)
xoff = len(cdata)/2.
if isolation:
yoff = numpy.array(list(range(0, N_t//4)) + list(range(3*N_t//4, N_t)))
comm.Barrier()
if use_gpu and do_spatial_whitening:
spatial_whitening = cmt.CUDAMatrix(spatial_whitening, copy_on_host=False)
elec_positions = {}
for i in range(N_e):
result['loc_times_' + str(i)] = numpy.zeros(0, dtype=numpy.uint32)
result['all_times_' + str(i)] = numpy.zeros(0, dtype=numpy.uint32)
result['times_' + str(i)] = numpy.zeros(0, dtype=numpy.uint32)
result['clusters_' + str(i)] = numpy.zeros(0, dtype=numpy.int32)
result['peaks_' + str(i)] = numpy.zeros(0, dtype=numpy.uint32)
for p in search_peaks:
result['pca_%s_' %p + str(i)] = None
indices = numpy.take(inv_nodes, edges[nodes[i]])
elec_positions[i] = numpy.where(indices == i)[0]
max_elts_elec //= comm.size
nb_elts //= comm.size
few_elts = False
nb_chunks, _ = data_file.analyze(chunk_size)
if nb_chunks < comm.size:
res = io.data_stats(params, show=False)
chunk_size = int(res*params.rate//comm.size)
if comm.rank == 0:
print_and_log(["Too much cores, automatically resizing the data chunks"], 'debug', logger)
nb_chunks, last_chunk_len = data_file.analyze(chunk_size)
if smart_search is False:
gpass = 1
else:
gpass = 0
## We will perform several passes to enhance the quality of the clustering
while gpass < (nb_repeats + 1):
comm.Barrier()
if gpass == 1:
sdata = all_gather_array(smart_searches[search_peaks[0]][comm.rank::comm.size], comm, 0)
if comm.rank == 0:
if gpass == 0:
print_and_log(["Searching random spikes to sample amplitudes..."], 'default', logger)
elif gpass == 1:
if not numpy.all(sdata > 0):
lines = ["Smart Search disabled on %d electrodes" %(numpy.sum(sdata == 0))]
print_and_log(lines, 'debug', logger)
if numpy.any(sdata > 0):
print_and_log(["Smart Search of good spikes for the clustering (%d/%d)..." %(gpass, nb_repeats)], 'default', logger)
else:
print_and_log(["Searching random spikes for the clustering (%d/%d) (no smart search)" %(gpass, nb_repeats)], 'default', logger)
else:
print_and_log(["Searching random spikes to refine the clustering (%d/%d)..." %(gpass, nb_repeats)], 'default', logger)
for i in range(N_e):
if gpass == 0:
for p in search_peaks:
result['tmp_%s_' %p + str(i)] = numpy.zeros(0, dtype=numpy.float32)
result['nb_chunks_%s_' %p + str(i)] = 1
else:
n_neighb = len(edges[nodes[i]])
for p in search_peaks:
result['tmp_%s_' %p + str(i)] = numpy.zeros((0, basis['proj_%s' %p].shape[1] * n_neighb), dtype=numpy.float32)
# If not the first pass, we sync all the detected times among nodes and give all nodes the w/pca
result['all_times_' + str(i)] = numpy.concatenate((result['all_times_' + str(i)], all_gather_array(result['loc_times_' + str(i)], comm, dtype='uint32', compress=blosc_compress)))
result['loc_times_' + str(i)] = numpy.zeros(0, dtype=numpy.uint32)
if gpass == 1:
for p in search_peaks:
result['pca_%s_' %p + str(i)] = comm.bcast(result['pca_%s_' %p + str(i)], root=numpy.mod(i, comm.size))
result['data_%s_' %p + str(i)] = numpy.zeros((0, basis['proj_%s' %p].shape[1] * n_neighb), dtype=numpy.float32)
result['data_' + str(i)] = numpy.zeros((0, sub_output_dim), dtype=numpy.float32)
# I guess this is more relevant, to take signals from all over the recordings
numpy.random.seed(gpass)
all_chunks = numpy.random.permutation(numpy.arange(nb_chunks, dtype=numpy.int64))
rejected = 0
elt_count = 0
## This is not easy to read, but during the smart search pass, we need to loop over all chunks, and every nodes should
## search spikes for a subset of electrodes, to avoid too many communications.
if gpass <= 1:
nb_elecs = numpy.sum(comm.rank == numpy.mod(numpy.arange(N_e), comm.size))
loop_max_elts_elec = params.getint('clustering', 'max_elts')
if sign_peaks == 'both':
loop_max_elts_elec *= 2
loop_nb_elts = numpy.int64(params.getfloat('clustering', 'nb_elts') * nb_elecs * loop_max_elts_elec)
to_explore = range(nb_chunks)
else:
loop_max_elts_elec = max_elts_elec
loop_nb_elts = nb_elts
to_explore = range(comm.rank, nb_chunks, comm.size)
if comm.rank == 0:
to_explore = get_tqdm_progressbar(to_explore)
comm.Barrier()
## Random selection of spikes
for gcount, gidx in enumerate(to_explore):
gidx = all_chunks[gidx]
if (elt_count < loop_nb_elts):
#print "Node", comm.rank, "is analyzing chunk", gidx, "/", nb_chunks, " ..."
local_chunk, t_offset = data_file.get_data(gidx, chunk_size, nodes=nodes)
local_shape = len(local_chunk)
if do_spatial_whitening:
if use_gpu:
local_chunk = cmt.CUDAMatrix(local_chunk, copy_on_host=False)
local_chunk = local_chunk.dot(spatial_whitening).asarray()
else:
local_chunk = numpy.dot(local_chunk, spatial_whitening)
if do_temporal_whitening:
local_chunk = scipy.ndimage.filters.convolve1d(local_chunk, temporal_whitening, axis=0, mode='constant')
#print "Extracting the peaks..."
all_peaktimes = numpy.zeros(0, dtype=numpy.uint32)
all_extremas = numpy.zeros(0, dtype=numpy.uint32)
if matched_filter:
if sign_peaks in ['positive', 'both']:
filter_chunk = scipy.ndimage.filters.convolve1d(local_chunk, waveform_pos, axis=0, mode='constant')
for i in range(N_e):
peaktimes = algo.detect_peaks(filter_chunk[:, i], matched_tresholds_pos[i], mpd=dist_peaks)
all_peaktimes = numpy.concatenate((all_peaktimes, peaktimes))
all_extremas = numpy.concatenate((all_extremas, i*numpy.ones(len(peaktimes), dtype=numpy.uint32)))
if sign_peaks in ['negative', 'both']:
filter_chunk = scipy.ndimage.filters.convolve1d(local_chunk, waveform_neg, axis=0, mode='constant')
for i in range(N_e):
peaktimes = algo.detect_peaks(filter_chunk[:, i], matched_tresholds_neg[i], mpd=dist_peaks)
all_peaktimes = numpy.concatenate((all_peaktimes, peaktimes))
all_extremas = numpy.concatenate((all_extremas, i*numpy.ones(len(peaktimes), dtype=numpy.uint32)))
else:
for i in range(N_e):
if sign_peaks == 'negative':
peaktimes = algo.detect_peaks(local_chunk[:, i], thresholds[i], valley=True, mpd=1) #mpd=dist_peaks
elif sign_peaks == 'positive':
peaktimes = algo.detect_peaks(local_chunk[:, i], thresholds[i], valley=False, mpd=20)
elif sign_peaks == 'both':
#------------------------------------MEG-SPIKES--------------------------------------------------#
if i == 144:
plot_ts = False
else: plot_ts = False
peaktimes = algo.detect_peaks(local_chunk[:, i], thresholds[i], valley=False, mpd=dist_peaks, show=plot_ts, chunk_number=gidx) #numpy.abs(local_chunk[:, i])
#------------------------------------MEG-SPIKES--------------------------------------------------#
all_peaktimes = numpy.concatenate((all_peaktimes, peaktimes))
all_extremas = numpy.concatenate((all_extremas, i*numpy.ones(len(peaktimes), dtype=numpy.uint32)))
#print "Removing the useless borders..."
if alignment:
local_borders = (template_shift_2, local_shape - template_shift_2)
else:
local_borders = (template_shift, local_shape - template_shift)
idx = (all_peaktimes >= local_borders[0]) & (all_peaktimes < local_borders[1])
all_peaktimes = numpy.compress(idx, all_peaktimes)
all_extremas = numpy.compress(idx, all_extremas)
local_peaktimes = numpy.unique(all_peaktimes)
local_offset = t_offset
if ignore_dead_times:
indices = numpy.searchsorted(all_dead_times, [t_offset, t_offset + local_shape])
if indices[0] != indices[1]:
local_peaktimes = numpy.array(list(set(local_peaktimes + t_offset).difference(all_dead_times[indices[0]:indices[1]])), dtype=numpy.uint32) - t_offset
local_peaktimes = numpy.sort(local_peaktimes)
if len(local_peaktimes) > 0:
diff_times = local_peaktimes[-1]-local_peaktimes[0]
all_times = numpy.zeros((N_e, diff_times+1), dtype=numpy.bool)
min_times = numpy.maximum(local_peaktimes - local_peaktimes[0] - safety_time, 0)
max_times = numpy.minimum(local_peaktimes - local_peaktimes[0] + safety_time + 1, diff_times)
n_times = len(local_peaktimes)
argmax_peak = numpy.random.permutation(numpy.arange(n_times))
all_idx = numpy.take(local_peaktimes, argmax_peak)
if gpass > 1:
for elec in range(N_e):
subset = result['all_times_' + str(elec)] - local_offset
peaks = numpy.compress((subset >= 0) & (subset < (local_shape)), subset)
inter = numpy.in1d(local_peaktimes, peaks)
indices = numpy.take(inv_nodes, edges[nodes[elec]])
remove = numpy.where(inter == True)[0]
for t in remove:
if safety_space:
all_times[indices, min_times[t]:max_times[t]] = True
else:
all_times[elec, min_times[t]:max_times[t]] = True
#print "Selection of the peaks with spatio-temporal masks..."
for midx, peak in zip(argmax_peak, all_idx):
if elt_count == loop_nb_elts:
break
if sign_peaks == 'negative':
elec = numpy.argmin(local_chunk[peak])
negative_peak = True
loc_peak = 'neg'
elif sign_peaks == 'positive':
elec = numpy.argmax(local_chunk[peak])
negative_peak = False
loc_peak = 'pos'
elif sign_peaks == 'both':
if N_e == 1:
if local_chunk[peak] < 0:
negative_peak = True
loc_peak = 'neg'
elif local_chunk[peak] > 0:
negative_peak = False
loc_peak = 'pos'
elec = 0
else:
if numpy.abs(numpy.max(local_chunk[peak])) > numpy.abs(numpy.min(local_chunk[peak])):
elec = numpy.argmax(local_chunk[peak])
negative_peak = False
loc_peak = 'pos'
else:
elec = numpy.argmin(local_chunk[peak])
negative_peak = True
loc_peak = 'neg'
if ((gpass > 1) or (numpy.mod(elec, comm.size) == comm.rank)):
indices = numpy.take(inv_nodes, edges[nodes[elec]])
if safety_space:
myslice = all_times[indices, min_times[midx]:max_times[midx]]
else:
myslice = all_times[elec, min_times[midx]:max_times[midx]]
is_local_extrema = elec in all_extremas[all_peaktimes == peak]
if is_local_extrema and not myslice.any():
to_accept = False
if gpass == 1:
to_update = result['data_%s_' %loc_peak + str(elec)]
else:
to_update = result['tmp_%s_' %loc_peak + str(elec)]
if len(to_update) < loop_max_elts_elec:
if alignment:
idx = elec_positions[elec]
zdata = numpy.take(local_chunk[peak - template_shift_2:peak + template_shift_2 + 1], indices, axis=1)
ydata = numpy.arange(len(indices))
if len(ydata) == 1:
#if False:
# smoothing_factor = smoothing_factor*xdata.size*mads[elec]**2
# f = scipy.interpolate.UnivariateSpline(xdata, zdata, s=smoothing_factor, k=3)
#else:
f = scipy.interpolate.UnivariateSpline(xdata, zdata, k=3, s=0)
if negative_peak:
rmin = (numpy.argmin(f(cdata)) - xoff)/over_factor
else:
rmin = (numpy.argmax(f(cdata)) - xoff)/over_factor
ddata = numpy.linspace(rmin - template_shift, rmin + template_shift, N_t)
sub_mat = f(ddata).astype(numpy.float32).reshape(N_t, 1)
else:
#if False:
# smoothing_factor = smoothing_factor*zdata.size*numpy.median(mads[indices])**2
# f = scipy.interpolate.RectBivariateSpline(xdata, ydata, zdata, s=smoothing_factor, kx=3, ky=1)
#else:
f = scipy.interpolate.RectBivariateSpline(xdata, ydata, zdata, kx=3, ky=1, s=0)
if negative_peak:
rmin = (numpy.argmin(f(cdata, idx)[:, 0]) - xoff)/over_factor
else:
rmin = (numpy.argmax(f(cdata, idx)[:, 0]) - xoff)/over_factor
ddata = numpy.linspace(rmin - template_shift, rmin + template_shift, N_t)
sub_mat = f(ddata, ydata).astype(numpy.float32)
else:
sub_mat = numpy.take(local_chunk[peak - template_shift:peak + template_shift+1], indices, axis=1)
if use_hanning:
sub_mat = (sub_mat.T*hanning_filter).T
if isolation:
is_isolated = numpy.all(numpy.max(numpy.abs(sub_mat[yoff]), 0) <= thresholds[indices])
to_accept = False
else:
is_isolated = True
if is_isolated:
if gpass == 0:
to_accept = True
idx = elec_positions[elec]
ext_amp = sub_mat[template_shift, idx]
result['tmp_%s_' %loc_peak + str(elec)] = numpy.concatenate((result['tmp_%s_' %loc_peak + str(elec)], ext_amp))
elif gpass == 1:
if smart_searches[loc_peak][elec] > 0:
idx = elec_positions[elec]
ext_amp = sub_mat[template_shift, idx]
idx = numpy.searchsorted(result['bounds_%s_' %loc_peak + str(elec)], ext_amp, 'right') - 1
to_keep = result['hist_%s_' %loc_peak + str(elec)][idx] < numpy.random.rand()
if to_keep:
to_accept = True
else:
rejected += 1
else:
to_accept = True
if to_accept:
sub_mat = numpy.dot(basis['rec_%s' %loc_peak], sub_mat)
nx, ny = sub_mat.shape
sub_mat = sub_mat.reshape((1, nx * ny))
result['data_%s_' %loc_peak + str(elec)] = numpy.vstack((result['data_%s_' %loc_peak + str(elec)], sub_mat))
else:
sub_mat = numpy.dot(basis['rec_%s' %loc_peak], sub_mat)
nx, ny = sub_mat.shape
sub_mat = sub_mat.reshape((1, nx * ny))
to_accept = True
result['tmp_%s_' %loc_peak + str(elec)] = numpy.vstack((result['tmp_%s_' %loc_peak + str(elec)], sub_mat))
if to_accept:
elt_count += 1
if gpass >= 1:
to_add = numpy.array([peak + local_offset], dtype=numpy.uint32)
result['loc_times_' + str(elec)] = numpy.concatenate((result['loc_times_' + str(elec)], to_add))
if gpass == 1:
result['peaks_' + str(elec)] = numpy.concatenate((result['peaks_' + str(elec)], [int(negative_peak)]))
if safety_space:
all_times[indices, min_times[midx]:max_times[midx]] = True
else:
all_times[elec, min_times[midx]:max_times[midx]] = True
if gpass == 0:
for i in range(comm.rank, N_e, comm.size):
for p in search_peaks:
if len(result['tmp_%s_' %p + str(i)]) < loop_max_elts_elec:
result['nb_chunks_%s_' %p + str(i)] += 1
comm.Barrier()
sys.stderr.flush()
print_and_log(['Node %d has collected %d spikes and rejected %d spikes' % (comm.rank, elt_count, rejected)], 'debug', logger)
gdata = all_gather_array(numpy.array([elt_count], dtype=numpy.float32), comm, 0)
gdata2 = gather_array(numpy.array([rejected], dtype=numpy.float32), comm, 0)
nb_elements = numpy.int64(numpy.sum(gdata))
nb_rejected = numpy.int64(numpy.sum(gdata2))
nb_total = numpy.int64(nb_elts*comm.size)
if ((smart_search and (gpass == 0)) or (not smart_search and (gpass == 1))) and nb_elements == 0:
if comm.rank == 0:
print_and_log(['No waveforms found! Are the data properly loaded??'], 'error', logger)
sys.exit(0)
if nb_elements == 0:
gpass = nb_repeats
if comm.rank == 0:
if gpass != 1:
if isolation:
print_and_log(["We found %d isolated spikes over %d requested" %(nb_elements, nb_total)], 'default', logger)
else:
print_and_log(["We found %d spikes over %d requested" %(nb_elements, nb_total)], 'default', logger)
if nb_elements == 0:
print_and_log(["No more isolated spikes in the recording, stop searching"], 'info', logger)
else:
if isolation:
print_and_log(["We found %d isolated spikes over %d requested (%d rejected)" %(nb_elements, nb_total, nb_rejected)], 'default', logger)
else:
print_and_log(["We found %d spikes over %d requested (%d rejected)" %(nb_elements, nb_total, nb_rejected)], 'default', logger)
if nb_elements < 0.2*nb_total:
few_elts = True
#CLUSTERING: once we have been through enough chunks (we don't need all of them), we run a clustering for each electrode.
#print "Clustering the data..."
local_nb_clusters = 0
local_hits = 0
local_mergings = 0
cluster_results = {}
for p in search_peaks:
cluster_results[p] = {}
if gpass > 1:
for ielec in range(N_e):
for p in search_peaks:
result['tmp_%s_' %p + str(ielec)] = gather_array(result['tmp_%s_' %p + str(ielec)], comm, numpy.mod(ielec, comm.size), 1, compress=blosc_compress)
elif gpass == 1:
for ielec in range(comm.rank, N_e, comm.size):
result['times_' + str(ielec)] = numpy.copy(result['loc_times_' + str(ielec)])
if comm.rank == 0:
if gpass == 0:
print_and_log(["Estimating amplitudes distributions..."], 'default', logger)
elif gpass == 1:
print_and_log(["Computing density estimations..."], 'default', logger)
else:
print_and_log(["Refining density estimations..."], 'default', logger)
if not os.path.exists(plot_path):
os.makedirs(plot_path)
if gpass == 1:
dist_file = tempfile.NamedTemporaryFile()
tmp_file = os.path.join(tmp_path_loc, os.path.basename(dist_file.name)) + '.hdf5'
dist_file.close()
result['dist_file'] = tmp_file
tmp_h5py = h5py.File(result['dist_file'], 'w', libver='earliest')
print_and_log(["Node %d will use temp file %s" %(comm.rank, tmp_file)], 'debug', logger)
elif gpass > 1:
tmp_h5py = h5py.File(result['dist_file'], 'r', libver='earliest')
to_explore = list(range(comm.rank, N_e, comm.size))
sys.stderr.flush()
if (comm.rank == 0) and gpass == nb_repeats:
print_and_log(["Running density-based clustering..."], 'default', logger)
to_explore = get_tqdm_progressbar(to_explore)
for ielec in to_explore:
for p in search_peaks:
cluster_results[p][ielec] = {}
if gpass == 0:
if len(result['tmp_%s_' %p + str(ielec)]) > 1:
# Need to estimate the number of spikes
ratio = nb_chunks / float(result['nb_chunks_%s_' %p + str(ielec)])
ampmin, ampmax = numpy.min(result['tmp_%s_' %p + str(ielec)]), numpy.max(result['tmp_%s_' %p + str(ielec)])
if p == 'pos':
if matched_filter:
bound = matched_tresholds_pos[ielec]
else:
bound = thresholds[ielec]
if bound < ampmax:
bins = [-numpy.inf] + numpy.linspace(bound, ampmax, nb_ss_bins - 1).tolist() + [numpy.inf]
else:
bins = [-numpy.inf] + numpy.linspace(bound, bound*5, nb_ss_bins - 1).tolist() + [numpy.inf]
elif p == 'neg':
if matched_filter:
bound = -matched_tresholds_neg[ielec]
else:
bound = -thresholds[ielec]
if ampmin < bound:
bins = [-numpy.inf] + numpy.linspace(ampmin, bound, nb_ss_bins - 1).tolist() + [numpy.inf]
else:
bins = [-numpy.inf] + numpy.linspace(5*bound, bound, nb_ss_bins - 1).tolist() + [numpy.inf]
a, b = numpy.histogram(result['tmp_%s_' %p + str(ielec)], bins)
nb_spikes = numpy.sum(a)
a = a/float(nb_spikes)
z = a[a > 0]
c = 1./numpy.min(z)
d = (1./(c*a))
d = numpy.minimum(1, d)
d /= numpy.sum(d)
twist = numpy.sum(a*d)
factor = twist*c
rejection_curve = numpy.minimum(0.95, factor*a)
if ratio > 1:
target_max = 1 - (1 - rejection_curve.max())/ratio
rejection_curve *= target_max/(rejection_curve.max())
result['hist_%s_'%p + str(ielec) ] = rejection_curve
result['bounds_%s_' %p + str(ielec)] = b
# if make_plots not in ['None', '']:
# save = [plot_path, '%s_%d.%s' %(p, ielec, make_plots)]
# plot.view_rejection(a, b[1:], result['hist_%s_'%p + str(ielec)], save=save)
else:
smart_searches[p][ielec] = 0
if smart_searches[p][ielec] > 0:
print_and_log(['Smart search is actived on channel %d' % ielec], 'debug', logger)
elif gpass == 1:
if len(result['data_%s_' %p + str(ielec)]) >= 1:
if result['pca_%s_' %p + str(ielec)] is None:
pca = PCA(sub_output_dim)
pca.fit(result['data_%s_' %p + str(ielec)])
result['pca_%s_' %p + str(ielec)] = pca.components_.T.astype(numpy.float32)
print_and_log(["The percentage of variance explained by local PCA on electrode %d is %s"
%(ielec, numpy.sum(pca.explained_variance_ratio_))], 'debug', logger)
if result['pca_%s_' %p + str(ielec)].shape[1] < sub_output_dim:
zeros = numpy.zeros((result['pca_%s_' %p + str(ielec)].shape[0], sub_output_dim - result['pca_%s_' %p + str(ielec)].shape[1]))
result['pca_%s_' %p + str(ielec)] = numpy.hstack((result['pca_%s_' %p + str(ielec)], zeros))
result['sub_%s_' %p + str(ielec)] = numpy.dot(result['data_%s_' %p + str(ielec)], result['pca_%s_' %p + str(ielec)])
rho, dist, sdist = algo.compute_rho(result['sub_%s_' %p + str(ielec)], mratio=m_ratio)
result['rho_%s_' %p + str(ielec)] = rho
result['sdist_%s_' %p + str(ielec)] = sdist
if hdf5_compress:
tmp_h5py.create_dataset('dist_%s_' %p + str(ielec), data=dist.distances, chunks=True, compression='gzip')
else:
tmp_h5py.create_dataset('dist_%s_' %p + str(ielec), data=dist.distances, chunks=True)
del dist, rho
else:
if result['pca_%s_' %p + str(ielec)] is None:
n_neighb = len(edges[nodes[ielec]])
dimension = basis['proj_%s' %p].shape[1] * n_neighb
result['pca_%s_' %p + str(ielec)] = numpy.zeros((dimension, sub_output_dim), dtype=numpy.float32)
result['pca_%s_' %p + str(ielec)][numpy.arange(sub_output_dim), numpy.arange(sub_output_dim)] = 1
result['rho_%s_' %p + str(ielec)] = numpy.zeros((0), dtype=numpy.float32)
result['sub_%s_' %p + str(ielec)] = numpy.zeros((0, sub_output_dim), dtype=numpy.float32)
result['sdist_%s_' %p + str(ielec)] = numpy.zeros((0), dtype=numpy.float32)
else:
if len(result['tmp_%s_' %p + str(ielec)]) > 1:
data = numpy.dot(result['tmp_%s_' %p + str(ielec)], result['pca_%s_' %p + str(ielec)])
rho, sdist = algo.compute_rho(result['sub_%s_' %p + str(ielec)], update=(data, result['sdist_%s_' %p + str(ielec)]), mratio=m_ratio)
result['rho_%s_' %p + str(ielec)] = rho
result['sdist_%s_' %p + str(ielec)] = sdist
del rho
if gpass == nb_repeats:
result.pop('tmp_%s_' %p + str(ielec))
n_data = len(result['data_%s_' %p + str(ielec)])
n_min = numpy.maximum(n_abs_min, int(nclus_min*n_data))
if p == 'pos':
flag = 'positive'
elif p == 'neg':
flag = 'negative'
if (n_data > 1):
dist = tmp_h5py.get('dist_%s_' %p + str(ielec))[:]
result['rho_%s_' %p + str(ielec)] = -result['rho_%s_' %p + str(ielec)] + result['rho_%s_' %p + str(ielec)].max()
cluster_results[p][ielec]['groups'], r, d, c = algo.clustering_by_density(result['rho_%s_' %p + str(ielec)], dist,
n_min=n_min, alpha=sensitivity)
# Now we perform a merging step, for clusters that look too similar
data = result['sub_%s_' %p + str(ielec)]
cluster_results[p][ielec]['groups'], merged = algo.merging(cluster_results[p][ielec]['groups'],
sim_same_elec,
data)
idx_clusters, counts = numpy.unique(cluster_results[p][ielec]['groups'], return_counts=True)
for label, cluster_size in zip(idx_clusters, counts):
if cluster_size < n_min:
tmp = cluster_results[p][ielec]['groups'] == label
cluster_results[p][ielec]['groups'][tmp] = -1
if make_plots not in ['None', '']:
save = [plot_path, '%s_%d.%s' %(p, ielec, make_plots)]
injected = None
if test_clusters:
injected = numpy.zeros(len(result['data_%s_' %p + str(ielec)]), dtype=numpy.bool)
key = 'spikes_' + str(ielec)
thresh = 2
if key in injected_spikes:
for icount, spike in enumerate(result['loc_times_' + str(ielec)]):
idx = numpy.where(numpy.abs(spike - injected_spikes['spikes_' + str(ielec)]) < thresh)[0]
if len(idx) > 0:
if icount < (len(injected) - 1):
injected[icount] = True
data = numpy.dot(result['data_%s_' %p + str(ielec)], result['pca_%s_' %p + str(ielec)])
plot.view_clusters(data, r, d, c,
cluster_results[p][ielec]['groups'], injected=injected,
save=save, alpha=sensitivity)
keys = ['loc_times_' + str(ielec), 'all_times_' + str(ielec), 'rho_%s_' %p + str(ielec)]
for key in keys:
if key in result:
result.pop(key)
mask = numpy.where(cluster_results[p][ielec]['groups'] > -1)[0]
cluster_results[p][ielec]['n_clus'] = len(numpy.unique(cluster_results[p][ielec]['groups'][mask]))
n_clusters = []
result['clusters_%s_' %p + str(ielec)] = cluster_results[p][ielec]['groups']
for i in numpy.unique(cluster_results[p][ielec]['groups'][mask]):
n_clusters += [numpy.sum(cluster_results[p][ielec]['groups'][mask] == i)]
line = ["Node %d: %d-%d %s templates on channel %d from %d spikes: %s" %(comm.rank, merged[0], merged[1], flag, ielec, n_data, str(n_clusters))]
print_and_log(line, 'debug', logger)
local_mergings += merged[1]
del dist, r, d, c
else:
cluster_results[p][ielec]['groups'] = numpy.zeros(0, dtype=numpy.int32)
cluster_results[p][ielec]['n_clus'] = 0
result['clusters_%s_' %p + str(ielec)] = numpy.zeros(0, dtype=numpy.int32)
line = ["Node %d: not enough %s spikes on channel %d" %(comm.rank, flag, ielec)]
print_and_log(line, 'debug', logger)
local_nb_clusters += cluster_results[p][ielec]['n_clus']
if gpass >= 1:
tmp_h5py.close()
gpass += 1
sys.stderr.flush()
try:
os.remove(result['dist_file'])
except Exception:
pass
comm.Barrier()
gdata = gather_array(numpy.array([local_hits], dtype=numpy.float32), comm, 0)
gdata2 = gather_array(numpy.array([local_mergings], dtype=numpy.float32), comm, 0)
gdata3 = gather_array(numpy.array([local_nb_clusters], dtype=numpy.float32), comm, 0)
mean_channels = 0
if comm.rank == 0:
total_hits = int(numpy.sum(gdata))
total_mergings = int(numpy.sum(gdata2))
total_nb_clusters = int(numpy.sum(gdata3))
lines = ["Number of clusters found : %d" %total_nb_clusters,
"Number of local merges : %d" %total_mergings]
if few_elts:
lines += ["Not enough spikes gathered: -put safety_space=False?"]
if numpy.any(sdata > 0):
lines += [" -remove smart_search?"]
if isolation:
lines += [" -remove isolation mode?"]
print_and_log(lines, 'info', logger)
print_and_log(["Estimating the templates with the %s procedure ..." %extraction], 'default', logger)
if extraction in ['median-raw', 'median-pca', 'mean-raw', 'mean-pca']:
total_nb_clusters = int(comm.bcast(numpy.array([int(numpy.sum(gdata3))], dtype=numpy.int32), root=0)[0])
offsets = numpy.zeros(comm.size, dtype=numpy.int32)
for i in range(comm.size-1):
offsets[i+1] = comm.bcast(numpy.array([local_nb_clusters], dtype=numpy.int32), root=i)
node_pad = numpy.sum(offsets[:comm.rank+1])
if parallel_hdf5:
hfile = h5py.File(file_out_suff + '.templates.hdf5', 'w', driver='mpio', comm=comm, libver='earliest')
norms = hfile.create_dataset('norms', shape=(2*total_nb_clusters, ), dtype=numpy.float32, chunks=True)
electrodes = hfile.create_dataset('electrodes', shape=(total_nb_clusters, ), dtype=numpy.int32, chunks=True)
amps_lims = hfile.create_dataset('limits', shape=(total_nb_clusters, 2), dtype=numpy.float32, chunks=True)
g_count = node_pad
g_offset = total_nb_clusters
else:
hfile = h5py.File(file_out_suff + '.templates-%d.hdf5' %comm.rank, 'w', libver='earliest')
electrodes = hfile.create_dataset('electrodes', shape=(local_nb_clusters, ), dtype=numpy.int32, chunks=True)
norms = hfile.create_dataset('norms', shape=(2*local_nb_clusters, ), dtype=numpy.float32, chunks=True)
amps_lims = hfile.create_dataset('limits', shape=(local_nb_clusters, 2), dtype=numpy.float32, chunks=True)
g_count = 0
g_offset = local_nb_clusters
temp_x = numpy.zeros(0, dtype=numpy.uint32)
temp_y = numpy.zeros(0, dtype=numpy.uint32)
temp_data = numpy.zeros(0, dtype=numpy.float32)
shifted_templates = numpy.zeros(0, dtype=numpy.int32)
comm.Barrier()
cfile = h5py.File(file_out_suff + '.clusters-%d.hdf5' %comm.rank, 'w', libver='earliest')
count_templates = node_pad
data_file.close()
to_explore = range(comm.rank, N_e, comm.size)
if (comm.rank == 0):
to_explore = get_tqdm_progressbar(to_explore)
for ielec in to_explore:
n_neighb = len(edges[nodes[ielec]])
indices = inv_nodes[edges[nodes[ielec]]]
for p in search_peaks:
#print "Dealing with cluster", ielec
n_data = len(result['data_%s_' %p + str(ielec)])
data = result['data_%s_' %p + str(ielec)].reshape(n_data, basis['proj_%s' %p].shape[1], n_neighb)
loc_pad = count_templates
myamps = []
mask = numpy.where(cluster_results[p][ielec]['groups'] > -1)[0]
if p == 'pos':
myslice2 = numpy.where(result['peaks_' + str(ielec)] == 0)[0]
elif p == 'neg':
myslice2 = numpy.where(result['peaks_' + str(ielec)] == 1)[0]
loc_times = numpy.take(result['times_' + str(ielec)], myslice2)
loc_clusters = numpy.take(cluster_results[p][ielec]['groups'], mask)
for group in numpy.unique(loc_clusters):
electrodes[g_count] = ielec
myslice = numpy.where(cluster_results[p][ielec]['groups'] == group)[0]
if extraction == 'median-pca':
sub_data = numpy.take(data, myslice, axis=0)
first_component = numpy.median(sub_data, axis=0)
tmp_templates = numpy.dot(first_component.T, basis['rec_%s' %p])
elif extraction == 'mean-pca':
sub_data = numpy.take(data, myslice, axis=0)
first_component = numpy.mean(sub_data, axis=0)
tmp_templates = numpy.dot(first_component.T, basis['rec_%s' %p])
elif extraction == 'median-raw':
labels_i = numpy.random.permutation(myslice)[:min(len(myslice), 250)]
times_i = numpy.take(loc_times, labels_i)
sub_data = io.get_stas(params, times_i, labels_i, ielec, neighs=indices, nodes=nodes, pos=p)
first_component = numpy.median(sub_data, 0)
tmp_templates = first_component
elif extraction == 'mean-raw':
labels_i = numpy.random.permutation(myslice)[:min(len(myslice), 250)]
times_i = numpy.take(loc_times, labels_i)
sub_data = io.get_stas(params, times_i, labels_i, ielec, neighs=indices, nodes=nodes, pos=p) #io.get_stas(sub_data, times_i, labels_i, ielec, neighs=indices, nodes=nodes, pos=p)
first_component = numpy.mean(sub_data, 0)
tmp_templates = first_component
if p == 'neg':
tmpidx = divmod(tmp_templates.argmin(), tmp_templates.shape[1])
elif p == 'pos':
tmpidx = divmod(tmp_templates.argmax(), tmp_templates.shape[1])
shift = template_shift - tmpidx[1]
if np.abs(shift) > template_shift / 4:
shifted_templates = numpy.concatenate((shifted_templates, numpy.array([count_templates], dtype='int32')))
myamps += [[0, 10]]
else:
templates = numpy.zeros((N_e, N_t), dtype=numpy.float32)
if shift > 0:
templates[indices, shift:] = tmp_templates[:, :-shift]
elif shift < 0:
templates[indices, :shift] = tmp_templates[:, -shift:]
else:
templates[indices, :] = tmp_templates
mean_channels += len(indices)
if comp_templates:
to_delete = []
for i in indices:
if (numpy.abs(templates[i, :]).max() < 0.5*(thresholds[i]/spike_thresh)):
templates[i, :] = 0
to_delete += [i]
mean_channels -= len(to_delete)
templates = templates.ravel()
dx = templates.nonzero()[0].astype(numpy.uint32)
temp_x = numpy.concatenate((temp_x, dx))
temp_y = numpy.concatenate((temp_y, count_templates*numpy.ones(len(dx), dtype=numpy.uint32)))
temp_data = numpy.concatenate((temp_data, templates[dx]))
norms[g_count] = numpy.sqrt(numpy.sum(templates.ravel()**2)/(N_e*N_t))
x, y, z = sub_data.shape
sub_data_flat = sub_data.reshape(x, y*z)
first_flat = first_component.reshape(y*z, 1)
amplitudes = numpy.dot(sub_data_flat, first_flat)
amplitudes /= numpy.sum(first_flat**2)
variation = numpy.median(numpy.abs(amplitudes - numpy.median(amplitudes)))
physical_limit = noise_thr*(-thresholds[indices[tmpidx[0]]])/tmp_templates.min()
amp_min = min(0.8, max(physical_limit, numpy.median(amplitudes) - dispersion[0]*variation))
amp_max = max(1.2, numpy.median(amplitudes) + dispersion[1]*variation)
amps_lims[g_count] = [amp_min, amp_max]
myamps += [[amp_min, amp_max]]
for i in range(x):
sub_data_flat[i, :] -= amplitudes[i]*first_flat[:, 0]
if len(sub_data_flat) > 1:
pca = PCA(1)
pca.fit(sub_data_flat)
second_component = pca.components_.T.astype(numpy.float32).reshape(y, z)
else:
second_component = sub_data_flat.reshape(y, z)/numpy.sum(sub_data_flat**2)
if extraction in ['median-pca', 'mean-pca']:
tmp_templates = numpy.dot(second_component.T, basis['rec_%s' %p])
elif extraction in ['median-raw', 'mean-raw']:
tmp_templates = second_component
offset = total_nb_clusters + count_templates
sub_templates = numpy.zeros((N_e, N_t), dtype=numpy.float32)
if shift > 0:
sub_templates[indices, shift:] = tmp_templates[:, :-shift]
elif shift < 0:
sub_templates[indices, :shift] = tmp_templates[:, -shift:]
else:
sub_templates[indices, :] = tmp_templates
if comp_templates:
for i in to_delete:
sub_templates[i, :] = 0
sub_templates = sub_templates.ravel()
dx = sub_templates.nonzero()[0].astype(numpy.uint32)
temp_x = numpy.concatenate((temp_x, dx))
temp_y = numpy.concatenate((temp_y, offset*numpy.ones(len(dx), dtype=numpy.uint32)))
temp_data = numpy.concatenate((temp_data, sub_templates[dx]))
norms[g_count + g_offset] = numpy.sqrt(numpy.sum(sub_templates.ravel()**2)/(N_e*N_t))
count_templates += 1
g_count += 1
if make_plots not in ['None', '']:
if n_data > 1:
save = [plot_path, '%s_%d.%s' %(p, ielec, make_plots)]
idx = numpy.where(indices == ielec)[0][0]
sub_data = numpy.take(data, idx, axis=2)
nb_temp = cluster_results[p][ielec]['n_clus']
vidx = numpy.where((temp_y >= loc_pad) & (temp_y < loc_pad+nb_temp))[0]
sub_tmp = scipy.sparse.csr_matrix((temp_data[vidx], (temp_x[vidx], temp_y[vidx]-loc_pad)), shape=(N_e*N_t, nb_temp))
sub_tmp = sub_tmp.toarray().reshape(N_e, N_t, nb_temp)
sub_tmp = sub_tmp[ielec, :, :]
plot.view_waveforms_clusters(numpy.dot(sub_data, basis['rec_%s' %p]), cluster_results[p][ielec]['groups'],
thresholds[ielec], sub_tmp,
numpy.array(myamps), save=save)
data = numpy.dot(result['data_%s_' %p + str(ielec)], result['pca_%s_' %p + str(ielec)])
result['data_' + str(ielec)] = numpy.concatenate((result['data_' + str(ielec)], data))
if len(result['clusters_' + str(ielec)]) > 0:
max_offset = numpy.int32(numpy.max(result['clusters_' + str(ielec)]) + 1)
else:
max_offset = numpy.int32(0)
mask = result['clusters_%s_' %p + str(ielec)] > -1
result['clusters_%s_' %p + str(ielec)][mask] += max_offset
result['clusters_' + str(ielec)] = numpy.concatenate((result['clusters_' + str(ielec)], result['clusters_%s_' %p + str(ielec)]))
del data
all_indices = numpy.zeros(0, dtype=numpy.uint32)
for p in search_peaks:
if p == 'pos':
target = 0
elif p == 'neg':
target = 1
all_indices = numpy.concatenate((all_indices, numpy.where(result['peaks_' + str(ielec)] == target)[0]))
result['times_' + str(ielec)] = result['times_' + str(ielec)][all_indices]
result['peaks_' + str(ielec)] = result['peaks_' + str(ielec)][all_indices]
io.write_datasets(cfile, to_write, result, ielec, compression=hdf5_compress)
#At the end we should have a templates variable to store.
cfile.close()
del result, amps_lims
sys.stderr.flush()
comm.Barrier()
if local_nb_clusters > 0:
mean_channels /= local_nb_clusters
gdata4 = gather_array(numpy.array([mean_channels], dtype=numpy.float32), comm)
shifted_templates = all_gather_array(shifted_templates, comm, 0, dtype='int32')
if comm.rank == 0:
idx = numpy.where(gdata4 != 0)[0]
mean_channels = numpy.mean(gdata4[idx])
if mean_channels < 3 and params.getfloat('clustering', 'cc_merge') != 1:
print_and_log(["Templates on few channels only, cc_merge should be 1"], 'info', logger)
#We need to gather the sparse arrays
temp_x = gather_array(temp_x, comm, dtype='uint32', compress=blosc_compress)
temp_y = gather_array(temp_y, comm, dtype='uint32', compress=blosc_compress)
temp_data = gather_array(temp_data, comm, compress=blosc_compress)
if parallel_hdf5:
if comm.rank == 0:
rs = [h5py.File(file_out_suff + '.clusters-%d.hdf5' %i, 'r', libver='earliest') for i in range(comm.size)]
cfile = h5py.File(file_out_suff + '.clusters.hdf5', 'w', libver='earliest')
io.write_datasets(cfile, ['electrodes'], {'electrodes' : electrodes[:]})
for i in range(comm.size):
for j in range(i, N_e, comm.size):
io.write_datasets(cfile, to_write, rs[i], j, compression=hdf5_compress)
rs[i].close()
os.remove(file_out_suff + '.clusters-%d.hdf5' %i)
cfile.close()
hfile.close()
else:
hfile.close()
comm.Barrier()
if comm.rank == 0:
ts = [h5py.File(file_out_suff + '.templates-%d.hdf5' %i, 'r', libver='earliest') for i in range(comm.size)]
rs = [h5py.File(file_out_suff + '.clusters-%d.hdf5' %i, 'r', libver='earliest') for i in range(comm.size)]
result = {}
hfile = h5py.File(file_out_suff + '.templates.hdf5', 'w', libver='earliest')
cfile = h5py.File(file_out_suff + '.clusters.hdf5', 'w', libver='earliest')
electrodes = hfile.create_dataset('electrodes', shape=(total_nb_clusters, ), dtype=numpy.int32, chunks=True)
norms = hfile.create_dataset('norms', shape=(2*total_nb_clusters, ), dtype=numpy.float32, chunks=True)
amplitudes = hfile.create_dataset('limits', shape=(total_nb_clusters, 2), dtype=numpy.float32, chunks=True)
count = 0
for i in range(comm.size):
loc_norms = ts[i].get('norms')
middle = len(loc_norms)//2
norms[count:count+middle] = loc_norms[:middle]
norms[total_nb_clusters+count:total_nb_clusters+count+middle] = loc_norms[middle:]
electrodes[count:count+middle] = ts[i].get('electrodes')
amplitudes[count:count+middle] = ts[i].get('limits')
count += middle
for j in range(i, N_e, comm.size):
io.write_datasets(cfile, to_write, rs[i], j, compression=hdf5_compress)
ts[i].close()
rs[i].close()
os.remove(file_out_suff + '.templates-%d.hdf5' %i)
os.remove(file_out_suff + '.clusters-%d.hdf5' %i)
hfile.flush() # we need to flush otherwise electrodes[:] refers to zeros and not the real values
io.write_datasets(cfile, ['electrodes'], {'electrodes' : electrodes[:]})
hfile.close()
cfile.close()
if comm.rank == 0:
hfile = h5py.File(file_out_suff + '.templates.hdf5', 'r+', libver='earliest')
if hdf5_compress:
hfile.create_dataset('temp_x', data=temp_x, compression='gzip')
hfile.create_dataset('temp_y', data=temp_y, compression='gzip')
hfile.create_dataset('temp_data', data=temp_data, compression='gzip')
else:
hfile.create_dataset('temp_x', data=temp_x)
hfile.create_dataset('temp_y', data=temp_y)
hfile.create_dataset('temp_data', data=temp_data)
hfile.create_dataset('temp_shape', data=numpy.array([N_e, N_t, 2*total_nb_clusters], dtype=numpy.int32))
hfile.close()
del temp_x, temp_y, temp_data
import gc
gc.collect()
comm.Barrier()
if len(shifted_templates) > 0:
if comm.rank == 0:
print_and_log(["Removing %d strongly shifted templates..." %len(shifted_templates)], 'default', logger)
if comm.rank == 0:
result = io.load_data(params, 'clusters')
else:
result = []
algo.slice_templates(params, to_remove=shifted_templates)
algo.slice_clusters(params, to_remove=shifted_templates, result=result)
del result
comm.Barrier()
total_nb_clusters = int(io.load_data(params, 'nb_templates') // 2)
if total_nb_clusters > 0:
if comm.rank == 0:
print_and_log(["Merging similar templates..."], 'default', logger)
merged1 = algo.merging_cc(params, nb_cpu=nb_cpu, nb_gpu=nb_gpu, use_gpu=use_gpu)
comm.Barrier()
if remove_mixture:
if comm.rank == 0:
print_and_log(["Removing mixtures of templates..."], 'default', logger)
merged2 = algo.delete_mixtures(params, nb_cpu=nb_cpu, nb_gpu=nb_gpu, use_gpu=use_gpu)
else:
merged2 = [0, 0]
else:
merged1 = [0, 0]
merged2 = [0, 0]
if comm.rank == 0:
print_and_log(["Number of global merges : %d" %merged1[1],
"Number of mixtures removed : %d" %merged2[1]], 'info', logger)
comm.Barrier()
io.get_overlaps(params, erase=True, nb_cpu=nb_cpu, nb_gpu=nb_gpu, use_gpu=use_gpu)
|
[
"h5py.File",
"warnings.filterwarnings",
"cudamat.CUDAMatrix",
"circus.shared.algorithms.detect_peaks",
"cudamat.cuda_set_device",
"circus.shared.algorithms.slice_clusters",
"circus.shared.messages.init_logging",
"cudamat.cuda_sync_threads",
"circus.shared.algorithms.merging_cc",
"gc.collect",
"circus.shared.messages.print_and_log",
"warnings.catch_warnings",
"circus.shared.utils.get_parallel_hdf5_flag",
"circus.shared.algorithms.slice_templates",
"cudamat.init",
"circus.shared.algorithms.merging",
"circus.shared.algorithms.delete_mixtures",
"circus.shared.probes.get_nodes_and_edges"
] |
[((114, 139), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (137, 139), False, 'import warnings\n'), ((145, 202), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (168, 202), False, 'import warnings\n'), ((524, 554), 'circus.shared.utils.get_parallel_hdf5_flag', 'get_parallel_hdf5_flag', (['params'], {}), '(params)\n', (546, 554), False, 'from circus.shared.utils import get_parallel_hdf5_flag\n'), ((620, 648), 'circus.shared.messages.init_logging', 'init_logging', (['params.logfile'], {}), '(params.logfile)\n', (632, 648), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((2499, 2526), 'circus.shared.probes.get_nodes_and_edges', 'get_nodes_and_edges', (['params'], {}), '(params)\n', (2518, 2526), False, 'from circus.shared.probes import get_nodes_and_edges\n'), ((60713, 60725), 'gc.collect', 'gc.collect', ([], {}), '()\n', (60723, 60725), False, 'import gc\n'), ((6270, 6297), 'cudamat.cuda_set_device', 'cmt.cuda_set_device', (['gpu_id'], {}), '(gpu_id)\n', (6289, 6297), True, 'import cudamat as cmt\n'), ((6306, 6316), 'cudamat.init', 'cmt.init', ([], {}), '()\n', (6314, 6316), True, 'import cudamat as cmt\n'), ((6325, 6348), 'cudamat.cuda_sync_threads', 'cmt.cuda_sync_threads', ([], {}), '()\n', (6346, 6348), True, 'import cudamat as cmt\n'), ((6947, 7000), 'cudamat.CUDAMatrix', 'cmt.CUDAMatrix', (['spatial_whitening'], {'copy_on_host': '(False)'}), '(spatial_whitening, copy_on_host=False)\n', (6961, 7000), True, 'import cudamat as cmt\n'), ((27443, 27573), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Node %d has collected %d spikes and rejected %d spikes' % (comm.rank,\n elt_count, rejected)]", '"""debug"""', 'logger'], {}), "(['Node %d has collected %d spikes and rejected %d spikes' % (\n comm.rank, elt_count, rejected)], 'debug', logger)\n", (27456, 27573), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((43492, 43528), 'circus.shared.messages.print_and_log', 'print_and_log', (['lines', '"""info"""', 'logger'], {}), "(lines, 'info', logger)\n", (43505, 43528), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((43537, 43642), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Estimating the templates with the %s procedure ...' % extraction]", '"""default"""', 'logger'], {}), "(['Estimating the templates with the %s procedure ...' %\n extraction], 'default', logger)\n", (43550, 43642), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((45501, 45588), 'h5py.File', 'h5py.File', (["(file_out_suff + '.clusters-%d.hdf5' % comm.rank)", '"""w"""'], {'libver': '"""earliest"""'}), "(file_out_suff + '.clusters-%d.hdf5' % comm.rank, 'w', libver=\n 'earliest')\n", (45510, 45588), False, 'import h5py\n'), ((61055, 61112), 'circus.shared.algorithms.slice_templates', 'algo.slice_templates', (['params'], {'to_remove': 'shifted_templates'}), '(params, to_remove=shifted_templates)\n', (61075, 61112), True, 'import circus.shared.algorithms as algo\n'), ((61121, 61192), 'circus.shared.algorithms.slice_clusters', 'algo.slice_clusters', (['params'], {'to_remove': 'shifted_templates', 'result': 'result'}), '(params, to_remove=shifted_templates, result=result)\n', (61140, 61192), True, 'import circus.shared.algorithms as algo\n'), ((61462, 61532), 'circus.shared.algorithms.merging_cc', 'algo.merging_cc', (['params'], {'nb_cpu': 'nb_cpu', 'nb_gpu': 'nb_gpu', 'use_gpu': 'use_gpu'}), '(params, nb_cpu=nb_cpu, nb_gpu=nb_gpu, use_gpu=use_gpu)\n', (61477, 61532), True, 'import circus.shared.algorithms as algo\n'), ((61939, 62071), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Number of global merges : %d' % merged1[1], \n 'Number of mixtures removed : %d' % merged2[1]]", '"""info"""', 'logger'], {}), "(['Number of global merges : %d' % merged1[1], \n 'Number of mixtures removed : %d' % merged2[1]], 'info', logger)\n", (61952, 62071), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((7944, 8038), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Too much cores, automatically resizing the data chunks']", '"""debug"""', 'logger'], {}), "(['Too much cores, automatically resizing the data chunks'],\n 'debug', logger)\n", (7957, 8038), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((30725, 30779), 'h5py.File', 'h5py.File', (["result['dist_file']", '"""w"""'], {'libver': '"""earliest"""'}), "(result['dist_file'], 'w', libver='earliest')\n", (30734, 30779), False, 'import h5py\n'), ((30792, 30885), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Node %d will use temp file %s' % (comm.rank, tmp_file)]", '"""debug"""', 'logger'], {}), "(['Node %d will use temp file %s' % (comm.rank, tmp_file)],\n 'debug', logger)\n", (30805, 30885), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((31138, 31211), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Running density-based clustering...']", '"""default"""', 'logger'], {}), "(['Running density-based clustering...'], 'default', logger)\n", (31151, 31211), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((44133, 44231), 'h5py.File', 'h5py.File', (["(file_out_suff + '.templates.hdf5')", '"""w"""'], {'driver': '"""mpio"""', 'comm': 'comm', 'libver': '"""earliest"""'}), "(file_out_suff + '.templates.hdf5', 'w', driver='mpio', comm=comm,\n libver='earliest')\n", (44142, 44231), False, 'import h5py\n'), ((44705, 44793), 'h5py.File', 'h5py.File', (["(file_out_suff + '.templates-%d.hdf5' % comm.rank)", '"""w"""'], {'libver': '"""earliest"""'}), "(file_out_suff + '.templates-%d.hdf5' % comm.rank, 'w', libver=\n 'earliest')\n", (44714, 44793), False, 'import h5py\n'), ((59958, 60027), 'h5py.File', 'h5py.File', (["(file_out_suff + '.templates.hdf5')", '"""r+"""'], {'libver': '"""earliest"""'}), "(file_out_suff + '.templates.hdf5', 'r+', libver='earliest')\n", (59967, 60027), False, 'import h5py\n'), ((61376, 61442), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Merging similar templates...']", '"""default"""', 'logger'], {}), "(['Merging similar templates...'], 'default', logger)\n", (61389, 61442), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((61726, 61801), 'circus.shared.algorithms.delete_mixtures', 'algo.delete_mixtures', (['params'], {'nb_cpu': 'nb_cpu', 'nb_gpu': 'nb_gpu', 'use_gpu': 'use_gpu'}), '(params, nb_cpu=nb_cpu, nb_gpu=nb_gpu, use_gpu=use_gpu)\n', (61746, 61801), True, 'import circus.shared.algorithms as algo\n'), ((8515, 8604), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Searching random spikes to sample amplitudes...']", '"""default"""', 'logger'], {}), "(['Searching random spikes to sample amplitudes...'],\n 'default', logger)\n", (8528, 8604), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((28066, 28156), 'circus.shared.messages.print_and_log', 'print_and_log', (["['No waveforms found! Are the data properly loaded??']", '"""error"""', 'logger'], {}), "(['No waveforms found! Are the data properly loaded??'],\n 'error', logger)\n", (28079, 28156), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((30073, 30149), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Estimating amplitudes distributions...']", '"""default"""', 'logger'], {}), "(['Estimating amplitudes distributions...'], 'default', logger)\n", (30086, 30149), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((30929, 30983), 'h5py.File', 'h5py.File', (["result['dist_file']", '"""r"""'], {'libver': '"""earliest"""'}), "(result['dist_file'], 'r', libver='earliest')\n", (30938, 30983), False, 'import h5py\n'), ((56632, 56723), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Templates on few channels only, cc_merge should be 1']", '"""info"""', 'logger'], {}), "(['Templates on few channels only, cc_merge should be 1'],\n 'info', logger)\n", (56645, 56723), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((57235, 57302), 'h5py.File', 'h5py.File', (["(file_out_suff + '.clusters.hdf5')", '"""w"""'], {'libver': '"""earliest"""'}), "(file_out_suff + '.clusters.hdf5', 'w', libver='earliest')\n", (57244, 57302), False, 'import h5py\n'), ((58168, 58236), 'h5py.File', 'h5py.File', (["(file_out_suff + '.templates.hdf5')", '"""w"""'], {'libver': '"""earliest"""'}), "(file_out_suff + '.templates.hdf5', 'w', libver='earliest')\n", (58177, 58236), False, 'import h5py\n'), ((58266, 58333), 'h5py.File', 'h5py.File', (["(file_out_suff + '.clusters.hdf5')", '"""w"""'], {'libver': '"""earliest"""'}), "(file_out_suff + '.clusters.hdf5', 'w', libver='earliest')\n", (58275, 58333), False, 'import h5py\n'), ((61632, 61703), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Removing mixtures of templates...']", '"""default"""', 'logger'], {}), "(['Removing mixtures of templates...'], 'default', logger)\n", (61645, 61703), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((9211, 9339), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Searching random spikes to refine the clustering (%d/%d)...' % (gpass,\n nb_repeats)]", '"""default"""', 'logger'], {}), "([\n 'Searching random spikes to refine the clustering (%d/%d)...' % (gpass,\n nb_repeats)], 'default', logger)\n", (9224, 9339), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((28343, 28457), 'circus.shared.messages.print_and_log', 'print_and_log', (["['We found %d isolated spikes over %d requested' % (nb_elements, nb_total)]", '"""default"""', 'logger'], {}), "(['We found %d isolated spikes over %d requested' % (\n nb_elements, nb_total)], 'default', logger)\n", (28356, 28457), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((28494, 28598), 'circus.shared.messages.print_and_log', 'print_and_log', (["['We found %d spikes over %d requested' % (nb_elements, nb_total)]", '"""default"""', 'logger'], {}), "(['We found %d spikes over %d requested' % (nb_elements,\n nb_total)], 'default', logger)\n", (28507, 28598), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((28651, 28746), 'circus.shared.messages.print_and_log', 'print_and_log', (["['No more isolated spikes in the recording, stop searching']", '"""info"""', 'logger'], {}), "(['No more isolated spikes in the recording, stop searching'],\n 'info', logger)\n", (28664, 28746), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((28811, 28957), 'circus.shared.messages.print_and_log', 'print_and_log', (["['We found %d isolated spikes over %d requested (%d rejected)' % (\n nb_elements, nb_total, nb_rejected)]", '"""default"""', 'logger'], {}), "([\n 'We found %d isolated spikes over %d requested (%d rejected)' % (\n nb_elements, nb_total, nb_rejected)], 'default', logger)\n", (28824, 28957), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((28989, 29121), 'circus.shared.messages.print_and_log', 'print_and_log', (["['We found %d spikes over %d requested (%d rejected)' % (nb_elements,\n nb_total, nb_rejected)]", '"""default"""', 'logger'], {}), "(['We found %d spikes over %d requested (%d rejected)' % (\n nb_elements, nb_total, nb_rejected)], 'default', logger)\n", (29002, 29121), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((30195, 30265), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Computing density estimations...']", '"""default"""', 'logger'], {}), "(['Computing density estimations...'], 'default', logger)\n", (30208, 30265), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((30300, 30369), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Refining density estimations...']", '"""default"""', 'logger'], {}), "(['Refining density estimations...'], 'default', logger)\n", (30313, 30369), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((57105, 57179), 'h5py.File', 'h5py.File', (["(file_out_suff + '.clusters-%d.hdf5' % i)", '"""r"""'], {'libver': '"""earliest"""'}), "(file_out_suff + '.clusters-%d.hdf5' % i, 'r', libver='earliest')\n", (57114, 57179), False, 'import h5py\n'), ((57874, 57949), 'h5py.File', 'h5py.File', (["(file_out_suff + '.templates-%d.hdf5' % i)", '"""r"""'], {'libver': '"""earliest"""'}), "(file_out_suff + '.templates-%d.hdf5' % i, 'r', libver='earliest')\n", (57883, 57949), False, 'import h5py\n'), ((58006, 58080), 'h5py.File', 'h5py.File', (["(file_out_suff + '.clusters-%d.hdf5' % i)", '"""r"""'], {'libver': '"""earliest"""'}), "(file_out_suff + '.clusters-%d.hdf5' % i, 'r', libver='earliest')\n", (58015, 58080), False, 'import h5py\n'), ((8791, 8828), 'circus.shared.messages.print_and_log', 'print_and_log', (['lines', '"""debug"""', 'logger'], {}), "(lines, 'debug', logger)\n", (8804, 8828), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((8890, 9011), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Smart Search of good spikes for the clustering (%d/%d)...' % (gpass,\n nb_repeats)]", '"""default"""', 'logger'], {}), "(['Smart Search of good spikes for the clustering (%d/%d)...' %\n (gpass, nb_repeats)], 'default', logger)\n", (8903, 9011), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((9049, 9186), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Searching random spikes for the clustering (%d/%d) (no smart search)' % (\n gpass, nb_repeats)]", '"""default"""', 'logger'], {}), "([\n 'Searching random spikes for the clustering (%d/%d) (no smart search)' %\n (gpass, nb_repeats)], 'default', logger)\n", (9062, 9186), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((12336, 12383), 'cudamat.CUDAMatrix', 'cmt.CUDAMatrix', (['local_chunk'], {'copy_on_host': '(False)'}), '(local_chunk, copy_on_host=False)\n', (12350, 12383), True, 'import cudamat as cmt\n'), ((34274, 34359), 'circus.shared.messages.print_and_log', 'print_and_log', (["['Smart search is actived on channel %d' % ielec]", '"""debug"""', 'logger'], {}), "(['Smart search is actived on channel %d' % ielec], 'debug',\n logger)\n", (34287, 34359), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((38699, 38769), 'circus.shared.algorithms.merging', 'algo.merging', (["cluster_results[p][ielec]['groups']", 'sim_same_elec', 'data'], {}), "(cluster_results[p][ielec]['groups'], sim_same_elec, data)\n", (38711, 38769), True, 'import circus.shared.algorithms as algo\n'), ((41695, 41731), 'circus.shared.messages.print_and_log', 'print_and_log', (['line', '"""debug"""', 'logger'], {}), "(line, 'debug', logger)\n", (41708, 41731), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((42240, 42276), 'circus.shared.messages.print_and_log', 'print_and_log', (['line', '"""debug"""', 'logger'], {}), "(line, 'debug', logger)\n", (42253, 42276), False, 'from circus.shared.messages import print_and_log, init_logging\n'), ((13227, 13306), 'circus.shared.algorithms.detect_peaks', 'algo.detect_peaks', (['filter_chunk[:, i]', 'matched_tresholds_pos[i]'], {'mpd': 'dist_peaks'}), '(filter_chunk[:, i], matched_tresholds_pos[i], mpd=dist_peaks)\n', (13244, 13306), True, 'import circus.shared.algorithms as algo\n'), ((13798, 13877), 'circus.shared.algorithms.detect_peaks', 'algo.detect_peaks', (['filter_chunk[:, i]', 'matched_tresholds_neg[i]'], {'mpd': 'dist_peaks'}), '(filter_chunk[:, i], matched_tresholds_neg[i], mpd=dist_peaks)\n', (13815, 13877), True, 'import circus.shared.algorithms as algo\n'), ((14257, 14328), 'circus.shared.algorithms.detect_peaks', 'algo.detect_peaks', (['local_chunk[:, i]', 'thresholds[i]'], {'valley': '(True)', 'mpd': '(1)'}), '(local_chunk[:, i], thresholds[i], valley=True, mpd=1)\n', (14274, 14328), True, 'import circus.shared.algorithms as algo\n'), ((14440, 14513), 'circus.shared.algorithms.detect_peaks', 'algo.detect_peaks', (['local_chunk[:, i]', 'thresholds[i]'], {'valley': '(False)', 'mpd': '(20)'}), '(local_chunk[:, i], thresholds[i], valley=False, mpd=20)\n', (14457, 14513), True, 'import circus.shared.algorithms as algo\n'), ((14872, 14991), 'circus.shared.algorithms.detect_peaks', 'algo.detect_peaks', (['local_chunk[:, i]', 'thresholds[i]'], {'valley': '(False)', 'mpd': 'dist_peaks', 'show': 'plot_ts', 'chunk_number': 'gidx'}), '(local_chunk[:, i], thresholds[i], valley=False, mpd=\n dist_peaks, show=plot_ts, chunk_number=gidx)\n', (14889, 14991), True, 'import circus.shared.algorithms as algo\n')]
|
#!/usr/bin/env python3
# replicate.py
# This master script gives me a way to record the settings I used for
# various aspects of the article "The Life Cycles of Genres," and
# (I hope) allows other scholars to reproduce the same tests.
# Generally, I've defined a separate function for each part of the
# article that needs replication, and generally, they're listed
# in article order.
# Running this script gives you a menu and allows you to choose
# a test to replicate. Or you could just use this code as a guide
# in order to build your own script that calls logisticpredict with
# settings of your own devising.
# I have to admit that those "settings" are bloody complicated.
# This is a consequence of trying to build a single
# script that can do a whole bunch of different things.
# The "paths" are somewhat straightforward. They tell the script where
# to look for metadata, data files, and a vocabulary of features to
# use in classification (if this last isn't found, it will be created.)
#
# The "exclusions" can focus the model on a particular segment of the timeline.
# The fuss about nonegatives is less important than it seems; in most
# circumstances logisticpredict will automatically exclude tags in the
# positive set from the negative contrast set. The only situation where
# you need to explicitly exclude particular tags is elucidated in the
# function ghastly_stew below. In that case, it's achieved by putting
# a special key in excludeif which excludes tags from the *negative* set,
# (whereas excludeif would ordinarily exclude from the positives.)
#
# The testconditions variable is very important for one key move made
# in the article: extrapolating from a model to another set of volumes.
# This is achieved by defining a set of volumes that are only ever allowed
# to be in the test set; they never appear in the training set.
#
# Unfortunately, the logic I've used is confusing.
# When I provide a pair of dates in testconditions, this actually sets
# the range of dates within which volumes *are* allowed in the training
# set. On the other hand, when I provide a tag in testconditions, this
# defines a tag that *is not* allowed into the training set (unless volumes
# bearing that tag are also qualified as a member of the positive set
# by some other positive tag). This is all done by the function
# get_donttrainset in metafilter.py. Sorry the logic is a bit
# convoluted.
#
# Those are, I think, the gnarliest aspects of this code.
import logisticpredict, comparemodels
import datetime, sys
def ghastly_stew():
## PATHS.
sourcefolder = '../newdata/'
extension = '.fic.tsv'
metadatapath = '../meta/finalmeta.csv'
vocabpath = '../lexicon/new10k.csv'
modelname = 'ghastlystew'
outputpath = '../results/' + modelname + str(datetime.date.today()) + '.csv'
# We can simply exclude volumes from consideration on the basis on any
# metadata category we want, using the dictionaries defined below.
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = 1700
excludeabove['firstpub'] = 2020
allstewgenres = {'cozy', 'hardboiled', 'det100', 'chimyst', 'locdetective', 'lockandkey', 'crime', 'locdetmyst', 'blcrime', 'anatscifi', 'locscifi', 'chiscifi', 'femscifi', 'stangothic', 'pbgothic', 'lochorror', 'chihorror', 'locghost'}
# We have to explicitly exclude genres because the category "stew" in the
# positive category wouldn't otherwise automatically exclude the constituent
# tags that were used to create it.
# I would just have put all those tags in the positive tag list, but then you'd lose
# the ability to explicitly balance equal numbers of crime, gothic,
# and science fiction, plus sensation novels. You'd get a list dominated by
# the crime categories, which are better-represented in the dataset.
excludeif['negatives'] = allstewgenres
sizecap = 250
# CLASSIFY CONDITIONS
# We ask the user for a list of categories to be included in the positive
# set, as well as a list for the negative set. Default for the negative set
# is to include all the "random"ly selected categories. Note that random volumes
# can also be tagged with various specific genre tags; they are included in the
# negative set only if they lack tags from the positive set.
positive_tags = ['stew']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
def make_paths(modelname):
'''
Makes a pathtuple using a model name and a default set of
paths to feature-vocab and metadata files.
'''
sourcefolder = '../newdata/'
extension = '.fic.tsv'
metadatapath = '../meta/finalmeta.csv'
vocabpath = '../lexicon/new10k.csv'
# These words will be used as features
outputpath = '../results/' + modelname + str(datetime.date.today()) + '.csv'
return (sourcefolder, extension, metadatapath, outputpath, vocabpath)
def make_exclusions(startdate, enddate, sizecap, negatives):
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = startdate
excludeabove['firstpub'] = enddate
if negatives != 'nonegatives':
excludeif['negatives'] = set(negatives)
# This is a way to exclude certain tags from the negative contrast set.
return (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
def model_taglist(positive_tags, modelname):
print('We are modeling these positive tags:')
for tag in positive_tags:
print(tag)
sizecap = 1000
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath, vocabpath = paths
exclusions = make_exclusions(0, 2000, sizecap, 'nonegatives')
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
return allvolumes
def model_taglist_within_dates(positive_tags, modelname, mindate, maxdate):
print('We are modeling these positive tags:')
for tag in positive_tags:
print(tag)
sizecap = 1000
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath, vocabpath = paths
exclusions = make_exclusions(mindate, maxdate, sizecap, 'nonegatives')
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
return allvolumes
def project_detective_beyond_date(dividedate):
print('First we create a model of detective fiction only after ' + str(dividedate))
sizecap = 300
modelname = 'detectivejustpost' + str(dividedate)
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath1, vocabpath = paths
exclusions = make_exclusions(dividedate, 2000, sizecap, 'nonegatives')
positive_tags = ['locdetective', 'locdetmyst', 'chimyst', 'det100']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
print()
print('Then we create a model of detective fiction blindly predicting after ' + str(dividedate))
modelname = 'detectivepredictpost' + str(dividedate)
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath2, vocabpath = paths
exclusions = make_exclusions(0, 2001, sizecap, 'nonegatives')
testconditions = {'1700', str(dividedate)}
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the second dataset at 0.5, accuracy is: ', str(rawaccuracy))
print()
# Now we compare the predictions made by these two models, comparing only
# the volumes that are in both models but excluded from the training process
# in the second model.
comparemodels.compare_untrained(outputpath1, outputpath2)
def project_tag_to_another(tagtoproject, tagtarget):
print('First we create a model of ' + tagtarget)
sizecap = 400
modelname = tagtarget + 'byitself'
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath1, vocabpath = paths
exclusions = make_exclusions(0, 2000, sizecap, tagtoproject)
# Note that we exclude tagtoproject from the negative contrast set, so the
# contrast sets for the two models will be identical.
positive_tags = [tagtarget]
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
print()
print('Then we create a model of ' + tagtoproject + ' and use it to predict ' + tagtarget)
modelname = tagtoproject + 'predicts' + tagtarget
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath2, vocabpath = paths
exclusions = make_exclusions(0, 2001, sizecap, 'nonegatives')
positive_tags = [tagtarget, tagtoproject]
testconditions = {tagtarget}
# That's the line that actually excludes tagtarget from training.
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the second dataset at 0.5, accuracy is: ', str(rawaccuracy))
print()
# Now we compare the predictions made by these two models, comparing only
# the volumes that are in both models but excluded from the training process
# in the second model.
comparemodels.compare_untrained(outputpath1, outputpath2)
def project_tags(tagstoproject, tagtargets):
targetstring = ','.join(tagtargets)
projectstring = ','.join(tagstoproject)
print('First we create a model of ' + targetstring)
sizecap = 400
modelname = targetstring + 'byitself'
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath1, vocabpath = paths
exclusions = make_exclusions(0, 2000, sizecap, tagstoproject)
# Note that we exclude tagstoproject from the negative contrast set, so the
# contrast sets for the two models will be identical.
positive_tags = tagtargets
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
print()
print('Then we create a model of ' + projectstring + ' and use it to predict ' + targetstring)
modelname = projectstring + 'predicts' + targetstring
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath2, vocabpath = paths
exclusions = make_exclusions(0, 2000, sizecap, 'nonegatives')
positive_tags = list(tagtargets)
positive_tags.extend(tagstoproject)
testconditions = set(tagtargets)
# That's the line that actually excludes tagtarget from training.
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the second dataset at 0.5, accuracy is: ', str(rawaccuracy))
print()
# Now we compare the predictions made by these two models, comparing only
# the volumes that are in both models but excluded from the training process
# in the second model.
comparemodels.compare_untrained(outputpath1, outputpath2)
def the_red_and_the_black():
sizecap = 140
modelname = 'blackandthered'
paths = make_paths(modelname)
exclusions = make_exclusions(1700, 2001, sizecap, 'nonegatives')
positive_tags = ['teamred']
negative_tags = ['teamblack']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
accuracies = []
for i in range(40):
modelname = 'redandtheblack' + str(i)
paths = make_paths(modelname)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print(rawaccuracy)
accuracies.append(rawaccuracy)
with open('finalaccuracies.csv', mode = 'w', encoding = 'utf-8') as f:
for accuracy in accuracies:
f.write(str(accuracy) + '\n')
def replicate_stew():
sizecap = 140
modelname = 'replicatestew'
paths = make_paths(modelname)
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = 1700
excludeabove['firstpub'] = 2020
allstewgenres = {'cozy', 'hardboiled', 'det100', 'chimyst', 'locdetective', 'lockandkey', 'crime', 'locdetmyst', 'blcrime', 'anatscifi', 'locscifi', 'chiscifi', 'femscifi', 'stangothic', 'pbgothic', 'lochorror', 'chihorror', 'locghost'}
# We have to explicitly exclude genres because the category "stew" in the
# positive category wouldn't otherwise automatically exclude the constituent
# tags that were used to create it.
# I would just have put all those tags in the positive tag list, but then you'd lose
# the ability to explicitly balance equal numbers of crime, gothic,
# and science fiction, plus sensation novels. You'd get a list dominated by
# the crime categories, which are better-represented in the dataset.
excludeif['negatives'] = allstewgenres
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
positive_tags = ['stew']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
accuracies = []
for i in range(20):
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print(rawaccuracy)
accuracies.append(rawaccuracy)
with open('stewaccuracies.csv', mode = 'a', encoding = 'utf-8') as f:
for accuracy in accuracies:
f.write(str(accuracy) + '\n')
def replicate_detective():
sizecap = 140
modelname = 'replicatedet'
paths = make_paths(modelname)
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = 1700
excludeabove['firstpub'] = 2020
# We have to explicitly exclude genres because the category "stew" in the
# positive category wouldn't otherwise automatically exclude the constituent
# tags that were used to create it.
# I would just have put all those tags in the positive tag list, but then you'd lose
# the ability to explicitly balance equal numbers of crime, gothic,
# and science fiction, plus sensation novels. You'd get a list dominated by
# the crime categories, which are better-represented in the dataset.
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
positive_tags = ['locdetective', 'locdetmyst', 'chimyst', 'locdetmyst', 'det100']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
accuracies = []
for i in range(20):
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print(rawaccuracy)
accuracies.append(rawaccuracy)
with open('detaccuracies.csv', mode = 'a', encoding = 'utf-8') as f:
for accuracy in accuracies:
f.write(str(accuracy) + '\n')
def calibrate_detective():
'''
Tests accuracy of classification for detective fiction at different sample
sizes.
'''
modelname = 'calibratedet'
paths = make_paths(modelname)
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = 1700
excludeabove['firstpub'] = 2020
positive_tags = ['locdetective', 'locdetmyst', 'chimyst', 'locdetmyst', 'det100']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
sizes = [5,6,7,8,9,11,13,15,17,18,21,27,29,32,34,36,40,45,50,55,60,65,70,75,80,85,90,100]
# with open('../results/collateddetectiveaccuracies.tsv', mode = 'a', encoding = 'utf-8') as f:
# f.write('sizecap\tavgsize\trawaccuracy\n')
accuracies = []
for sizecap in sizes:
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
trainsizes = []
for vol in allvolumes:
trainsizes.append(vol[11])
# this is unfortunately dependent on the exact way
# logisticpredict formats its output
avgsize = sum(trainsizes) / len(trainsizes)
print(sizecap, avgsize, rawaccuracy)
with open('../final/collateddetaccuracies.tsv', mode = 'a', encoding = 'utf-8') as f:
f.write(str(sizecap) + '\t' + str(avgsize) + '\t' + str(rawaccuracy) + '\n')
return None
def calibrate_stew():
'''
Tests accuracy of classification for ghastly stew at different sample
sizes.
'''
modelname = 'calibratestew'
paths = make_paths(modelname)
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = 1700
excludeabove['firstpub'] = 2020
allstewgenres = {'cozy', 'hardboiled', 'det100', 'chimyst', 'locdetective', 'lockandkey', 'crime', 'locdetmyst', 'blcrime', 'anatscifi', 'locscifi', 'chiscifi', 'femscifi', 'stangothic', 'pbgothic', 'lochorror', 'chihorror', 'locghost'}
# We have to explicitly exclude genres because the category "stew" in the
# positive category wouldn't otherwise automatically exclude the constituent
# tags that were used to create it.
# I would just have put all those tags in the positive tag list, but then you'd lose
# the ability to explicitly balance equal numbers of crime, gothic,
# and science fiction, plus sensation novels. You'd get a list dominated by
# the crime categories, which are better-represented in the dataset.
excludeif['negatives'] = allstewgenres
positive_tags = ['stew']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
sizes = [5,6,7,8,9,11,13,15,17,18,21,27,29,32,34,36,40,45,50,55,60,65,70,75,80,85,90,100]
# with open('../results/collatedstewaccuracies.tsv', mode = 'a', encoding = 'utf-8') as f:
# f.write('sizecap\tavgsize\trawaccuracy\n')
accuracies = []
for sizecap in sizes:
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
trainsizes = []
for vol in allvolumes:
trainsizes.append(vol[11])
# this is unfortunately dependent on the exact way
# logisticpredict formats its output
avgsize = sum(trainsizes) / len(trainsizes)
print(sizecap, avgsize, rawaccuracy)
with open('../final/collatedstewaccuracies.tsv', mode = 'a', encoding = 'utf-8') as f:
f.write(str(sizecap) + '\t' + str(avgsize) + '\t' + str(rawaccuracy) + '\n')
return None
def project_gothic_beyond_date(dividedate):
print('First we create a model of gothic fiction only after ' + str(dividedate))
sizecap = 300
modelname = 'gothicjustpost' + str(dividedate)
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath1, vocabpath = paths
exclusions = make_exclusions(dividedate, 2000, sizecap, 'nonegatives')
positive_tags = ['lochorror', 'pbgothic', 'locghost', 'stangothic', 'chihorror']
negative_tags = ['random', 'chirandom']
testconditions = set()
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
print()
print('Then we create a model of gothic fiction blindly predicting after ' + str(dividedate))
modelname = 'gothicpredictpost' + str(dividedate)
paths = make_paths(modelname)
sourcefolder, extension, metadatapath, outputpath2, vocabpath = paths
exclusions = make_exclusions(0, 2001, sizecap, 'nonegatives')
testconditions = {'1700', str(dividedate)}
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = logisticpredict.create_model(paths, exclusions, classifyconditions)
print('If we divide the second dataset at 0.5, accuracy is: ', str(rawaccuracy))
print()
# Now we compare the predictions made by these two models, comparing only
# the volumes that are in both models but excluded from the training process
# in the second model.
comparemodels.compare_untrained(outputpath1, outputpath2)
if __name__ == '__main__':
# args = sys.argv
print('Your options include: ')
print(' 1) Model Indiana detective fiction by itself.')
print(' 2) Model LOC detective-esque categories by themselves.')
print(' 3) Model LOC and Indiana together.')
print(' 4) Extrapolate a model of LoC detective fiction to the Indiana exhibition.')
print(' 5) Extrapolate a model of detective fiction beyond a particular date.')
print(' 6) Extrapolate a model of one arbitrary genre tag to another.')
print(' 7) Extrapolate a model of gothic fiction beyond a particular date.')
print(' 8) Extrapolate a model of several tags to several others.')
print(' 9) Run detective prediction at many different sizes.')
print(' 10) Run ghastly stew prediction at many different sizes.')
print(' 11) Try to use detective fiction to predict scifi (fails).')
print(' 12) Model an arbitrary tag against random control set.')
print(' 13) Model all early gothic 1760-1840.')
print(' 14) Model all gothic.')
print(' 15) Model all SF.')
userchoice = int(input('\nyour choice: '))
if userchoice == 1:
tagstomodel = ['det100']
modelname = 'IndianaDetective'
allvolumes = model_taglist(tagstomodel, modelname)
print('Results are in allvolumes.')
elif userchoice == 2:
tagstomodel = ['locdetmyst', 'locdetective', 'chimyst']
modelname = 'LOCdetective'
allvolumes = model_taglist(tagstomodel, modelname)
print('Results are in allvolumes.')
elif userchoice == 3:
tagstomodel = ['det100', 'locdetmyst', 'locdetective', 'chimyst']
modelname = 'AllDetective'
allvolumes = model_taglist(tagstomodel, modelname)
print('Results are in allvolumes.')
elif userchoice == 4:
tagtoproject = ['locdetmyst', 'locdetective', 'chimyst']
tagtarget = ['det100']
project_tags(tagtoproject, tagtarget)
elif userchoice == 5:
dividedate = int(input('date beyond which to project: '))
project_detective_beyond_date(dividedate)
elif userchoice == 6:
tagtoproject = input('tag to project from: ')
tagtarget = input('tag to project onto: ')
project_tag_to_another(tagtoproject, tagtarget)
elif userchoice == 7:
dividedate = int(input('date beyond which to project: '))
project_gothic_beyond_date(dividedate)
elif userchoice == 8:
tagstoproject = input('comma-separated list of tags to model and project from: ')
tagstoproject = [x.strip() for x in tagstoproject.split(',')]
tagtargets = input('comma-separated list of tags project onto: ')
tagtargets = [x.strip() for x in tagtargets.split(',')]
project_tags(tagstoproject, tagtargets)
elif userchoice == 9:
calibrate_detective()
elif userchoice == 10:
calibrate_stew()
elif userchoice == 11:
projectfrom = 'chimyst'
projectonto = 'chiscifi'
project_tag_to_another(projectfrom, projectonto)
elif userchoice == 12:
tagtomodel = input('tag to model (must be in metadata)? ')
tagstomodel = [tagtomodel]
allvolumes = model_taglist(tagstomodel, tagtomodel)
elif userchoice == 13:
tagstomodel = ['stangothic', 'pbgothic', 'lochorror', 'locghost']
allvolumes = model_taglist_within_dates(tagstomodel, 'EarlyGothic', 1760, 1840)
elif userchoice == 14:
tagstomodel = ['stangothic', 'pbgothic', 'lochorror', 'locghost', 'chihorror']
modelname = 'AllGothic'
allvolumes = model_taglist(tagstomodel, modelname)
print('Results are in allvolumes.')
elif userchoice == 15:
tagstomodel = ['locscifi', 'femscifi', 'anatscifi', 'chiscifi']
modelname = 'AllSF'
allvolumes = model_taglist(tagstomodel, modelname)
print('Results are in allvolumes.')
print('Done.')
|
[
"comparemodels.compare_untrained",
"datetime.date.today",
"logisticpredict.create_model"
] |
[((4909, 4976), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (4937, 4976), False, 'import logisticpredict, comparemodels\n'), ((6707, 6774), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (6735, 6774), False, 'import logisticpredict, comparemodels\n'), ((7592, 7659), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (7620, 7659), False, 'import logisticpredict, comparemodels\n'), ((8565, 8632), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (8593, 8632), False, 'import logisticpredict, comparemodels\n'), ((9290, 9357), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (9318, 9357), False, 'import logisticpredict, comparemodels\n'), ((9648, 9705), 'comparemodels.compare_untrained', 'comparemodels.compare_untrained', (['outputpath1', 'outputpath2'], {}), '(outputpath1, outputpath2)\n', (9679, 9705), False, 'import logisticpredict, comparemodels\n'), ((10529, 10596), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (10557, 10596), False, 'import logisticpredict, comparemodels\n'), ((11347, 11414), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (11375, 11414), False, 'import logisticpredict, comparemodels\n'), ((11705, 11762), 'comparemodels.compare_untrained', 'comparemodels.compare_untrained', (['outputpath1', 'outputpath2'], {}), '(outputpath1, outputpath2)\n', (11736, 11762), False, 'import logisticpredict, comparemodels\n'), ((12670, 12737), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (12698, 12737), False, 'import logisticpredict, comparemodels\n'), ((13531, 13598), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (13559, 13598), False, 'import logisticpredict, comparemodels\n'), ((13889, 13946), 'comparemodels.compare_untrained', 'comparemodels.compare_untrained', (['outputpath1', 'outputpath2'], {}), '(outputpath1, outputpath2)\n', (13920, 13946), False, 'import logisticpredict, comparemodels\n'), ((23424, 23491), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (23452, 23491), False, 'import logisticpredict, comparemodels\n'), ((24143, 24210), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (24171, 24210), False, 'import logisticpredict, comparemodels\n'), ((24501, 24558), 'comparemodels.compare_untrained', 'comparemodels.compare_untrained', (['outputpath1', 'outputpath2'], {}), '(outputpath1, outputpath2)\n', (24532, 24558), False, 'import logisticpredict, comparemodels\n'), ((14603, 14670), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (14631, 14670), False, 'import logisticpredict, comparemodels\n'), ((16467, 16534), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (16495, 16534), False, 'import logisticpredict, comparemodels\n'), ((18107, 18174), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (18135, 18174), False, 'import logisticpredict, comparemodels\n'), ((19577, 19644), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (19605, 19644), False, 'import logisticpredict, comparemodels\n'), ((22065, 22132), 'logisticpredict.create_model', 'logisticpredict.create_model', (['paths', 'exclusions', 'classifyconditions'], {}), '(paths, exclusions, classifyconditions)\n', (22093, 22132), False, 'import logisticpredict, comparemodels\n'), ((2799, 2820), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2818, 2820), False, 'import datetime, sys\n'), ((5469, 5490), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (5488, 5490), False, 'import datetime, sys\n')]
|
from django.db import models
from django.utils.translation import ugettext as _
import reversion
from tally_ho.libs.models.base_model import BaseModel
from tally_ho.apps.tally.models.user_profile import UserProfile
class QuarantineCheck(BaseModel):
class Meta:
app_label = 'tally'
user = models.ForeignKey(UserProfile, null=True, on_delete=models.PROTECT)
name = models.CharField(max_length=256, unique=True)
method = models.CharField(max_length=256, unique=True)
value = models.FloatField(default=0)
percentage = models.FloatField(default=0)
active = models.BooleanField(default=False)
description = models.TextField(null=True, blank=True)
def local_name(self):
return _(self.name)
reversion.register(QuarantineCheck)
|
[
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.FloatField",
"django.db.models.BooleanField",
"django.utils.translation.ugettext",
"reversion.register"
] |
[((743, 778), 'reversion.register', 'reversion.register', (['QuarantineCheck'], {}), '(QuarantineCheck)\n', (761, 778), False, 'import reversion\n'), ((308, 375), 'django.db.models.ForeignKey', 'models.ForeignKey', (['UserProfile'], {'null': '(True)', 'on_delete': 'models.PROTECT'}), '(UserProfile, null=True, on_delete=models.PROTECT)\n', (325, 375), False, 'from django.db import models\n'), ((388, 433), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'unique': '(True)'}), '(max_length=256, unique=True)\n', (404, 433), False, 'from django.db import models\n'), ((447, 492), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'unique': '(True)'}), '(max_length=256, unique=True)\n', (463, 492), False, 'from django.db import models\n'), ((505, 533), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (522, 533), False, 'from django.db import models\n'), ((551, 579), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (568, 579), False, 'from django.db import models\n'), ((593, 627), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (612, 627), False, 'from django.db import models\n'), ((646, 685), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (662, 685), False, 'from django.db import models\n'), ((728, 740), 'django.utils.translation.ugettext', '_', (['self.name'], {}), '(self.name)\n', (729, 740), True, 'from django.utils.translation import ugettext as _\n')]
|
import logging
import importlib
import multiprocessing
from alvi.tests.resources.base import Resource
logger = logging.getLogger(__name__)
class LocalPythonClient(Resource):
def __init__(self):
logger.info("setting up clients")
self._clients = []
for scene in self.scenes:
module_name, class_name = scene.rsplit(".", 1)
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
process = multiprocessing.Process(target=class_.start)
process.start()
self._clients.append(process)
def destroy(self):
logger.info("terminating clients")
for client in self._clients:
client.terminate()
@property
def scenes(self):
#TODO following scenes could be autodiscovered
PREFIX = 'alvi.tests.resources.client.local_python_client.scenes.'
return (
PREFIX + 'graph.create_node.GraphCreateNode',
PREFIX + 'graph.update_node.GraphUpdateNode',
PREFIX + 'graph.remove_node.GraphRemoveNode',
PREFIX + 'graph.add_multi_marker.GraphAddMultiMarker',
PREFIX + 'graph.marker.GraphMarker',
PREFIX + 'array.create_node.ArrayCreateNode',
PREFIX + 'array.update_node.ArrayUpdateNode',
PREFIX + 'tree.create_node.TreeCreateNode',
PREFIX + 'tree.append_and_insert.TreeAppendAndInsert',
PREFIX + 'tree.marker.TreeMarker',
PREFIX + 'tree.multi_marker.TreeMultiMarker',
)
|
[
"multiprocessing.Process",
"importlib.import_module",
"logging.getLogger"
] |
[((112, 139), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (129, 139), False, 'import logging\n'), ((384, 420), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (407, 420), False, 'import importlib\n'), ((492, 536), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'class_.start'}), '(target=class_.start)\n', (515, 536), False, 'import multiprocessing\n')]
|
import pandas as pd
import glob
import csv
files = [
"a100-results.csv",
"clx-1S-results.csv",
"clx-results.csv",
"gen9-results.csv",
"mi100-results.csv",
# "rome-results-aocc.csv",
"rome-results-cce.csv"]
csv_frames = []
for f in files:
csv_frames.append(pd.read_csv(f, skipinitialspace=True))
df = pd.concat(csv_frames, axis=0, ignore_index=True)
df.loc[df['model'] == 'kokkos-sycl',['model']] = 'kokkos'
df.set_index(["kernel", "model", "arch", "compiler"], inplace=True)
df.sort_index(inplace=True)
avg = df.groupby(level=["kernel", "model", "arch", "compiler"]).mean()
peaks = pd.read_csv("peaks.csv", skipinitialspace=True)
peaks= pd.Series(peaks.bandwidth.values, index=peaks.arch).to_dict()
peakmap= {'rome': (2, 'EPYC 7742'),
'clx_1S': (1, 'Xeon 6230'),
'clx': (2, 'Xeon 6230'),
'gen9': (1, 'Core 6770HQ')
}
arches = avg.index.unique(level='arch')
for arch in arches:
try:
mul, key = peakmap[arch]
except KeyError:
mul, key = 1, arch
avg.loc[(slice(None), slice(None), arch), 'bandwidth'] /= (mul*peaks[key])
app_name_map = {
"openmp": "OpenMP",
"kokkos-sycl" : "Kokkos (SYCL)",
"omp-target": "OpenMP (target)",
"onedpl": "oneDPL",
"raja": "Raja",
"kokkos": "Kokkos",
"sycl": "SYCL",
}
app_order = ['openmp', 'kokkos', 'raja', 'sycl', 'onedpl']
subapp_map = {
'openmp' : 'openmp',
'omp-target' : 'openmp',
'kokkos' : 'kokkos',
'kokkos-sycl' : 'kokkos',
'raja' : 'raja',
'sycl' : 'sycl',
'onedpl' : 'onedpl',
}
platform_name_map = {
'clx' : "2 x Intel® Xeon® Gold 6230",
'clx_1S' : "1 x Intel® Xeon® Gold 6230",
'a100' : "NVIDIA A100",
'mi100' : "AMD MI100",
'rome' : '2 x AMD EPYC 7742',
'rome_cce' : '2 x AMD EPYC 7742',
'rome_aocc' : '2 x AMD EPYC 7742',
'gen9' : 'Intel® Iris® Pro 580'
}
for kernel in avg.index.unique(level='kernel'):
with open(f"{kernel}.csv", "w") as fp:
ocsv = csv.writer(fp)
kslice = avg.loc[kernel]
kslice.index.remove_unused_levels()
models = kslice.index.unique(level='model')
ocsv.writerow(["Device"] + list([app_name_map[x] for x in models]))
for arch in arches:
res = [platform_name_map[arch]]
for m in models:
try:
v = avg.loc[(kernel, m, arch),'bandwidth'][0]*100
except KeyError:
v = 'X'
res.append(v)
ocsv.writerow(res)
|
[
"pandas.read_csv",
"pandas.Series",
"pandas.concat",
"csv.writer"
] |
[((335, 383), 'pandas.concat', 'pd.concat', (['csv_frames'], {'axis': '(0)', 'ignore_index': '(True)'}), '(csv_frames, axis=0, ignore_index=True)\n', (344, 383), True, 'import pandas as pd\n'), ((623, 670), 'pandas.read_csv', 'pd.read_csv', (['"""peaks.csv"""'], {'skipinitialspace': '(True)'}), "('peaks.csv', skipinitialspace=True)\n", (634, 670), True, 'import pandas as pd\n'), ((289, 326), 'pandas.read_csv', 'pd.read_csv', (['f'], {'skipinitialspace': '(True)'}), '(f, skipinitialspace=True)\n', (300, 326), True, 'import pandas as pd\n'), ((678, 729), 'pandas.Series', 'pd.Series', (['peaks.bandwidth.values'], {'index': 'peaks.arch'}), '(peaks.bandwidth.values, index=peaks.arch)\n', (687, 729), True, 'import pandas as pd\n'), ((2112, 2126), 'csv.writer', 'csv.writer', (['fp'], {}), '(fp)\n', (2122, 2126), False, 'import csv\n')]
|
# -*- python -*-
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gdb_test
class CompleteTest(gdb_test.GdbTest):
def test_complete(self):
# Test that continue causes the debugged program to run to completion.
self.gdb.ResumeCommand('continue')
def tearDown(self):
# Test program should run to completion and return a special value.
# Intentionally bypass superclass's tearDown as it assumes gdb exits first.
self.AssertSelLdrExits(expected_returncode=123)
self.gdb.Quit()
self.gdb.Wait()
if __name__ == '__main__':
gdb_test.Main()
|
[
"gdb_test.Main"
] |
[((685, 700), 'gdb_test.Main', 'gdb_test.Main', ([], {}), '()\n', (698, 700), False, 'import gdb_test\n')]
|
#--------------------------------------------
#-- Libraries
#--------------------------------------------
# system imports
import os
import sys
# add parent path to python paths
parentdir = os.path.join(os.path.dirname(__file__), '../')
sys.path.insert(0,parentdir)
# imports
import cv2
import datasets
from nfeReader import barcode, qrcode, ocr
#--------------------------------------------
#-- Testing
#--------------------------------------------
def showImage(imageArray, imageName="Without name"):
cv2.imshow('Image - %s' % imageName,imageArray)
cv2.waitKey(0)
cv2.destroyAllWindows()
def checkAllImages(imagesPath, decoder, display=False):
for image in imagesPath:
# decoding
decoding, imgArray = decoder(image)
for decodedItem in decoding:
print("-[INFO] Found {} code: {}".format(decodedItem['type'], decodedItem['data']))
# display
if display:
showImage(imgArray, image)
def checkOCRFromImages(imagesPath, decoder, display=False):
for image in imagesPath:
# decoding
decoding = decoder(image)
print("-[OCR] Found '{}':".format(image))
print(decoding)
# display
if display:
imgArray = cv2.imread(image,0)
showImage(imgArray, image)
#--------------------------------------------
#-- Main
#--------------------------------------------
if __name__ == '__main__':
# barcode
print('\n-- Testing: barcode')
checkAllImages(datasets.barcodeImages, barcode.decode, display=True)
# QR code
print('\n-- Testing: qrcode')
checkAllImages(datasets.qrcodeImages, qrcode.decode, display=True)
# OCR
print('\n-- Testing: OCR - Optical Character Recognition')
checkOCRFromImages(datasets.ocrImages, ocr.decode, display=True)
|
[
"cv2.waitKey",
"os.path.dirname",
"cv2.imshow",
"sys.path.insert",
"cv2.imread",
"cv2.destroyAllWindows"
] |
[((238, 267), 'sys.path.insert', 'sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (253, 267), False, 'import sys\n'), ((204, 229), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (219, 229), False, 'import os\n'), ((511, 559), 'cv2.imshow', 'cv2.imshow', (["('Image - %s' % imageName)", 'imageArray'], {}), "('Image - %s' % imageName, imageArray)\n", (521, 559), False, 'import cv2\n'), ((563, 577), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (574, 577), False, 'import cv2\n'), ((582, 605), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (603, 605), False, 'import cv2\n'), ((1245, 1265), 'cv2.imread', 'cv2.imread', (['image', '(0)'], {}), '(image, 0)\n', (1255, 1265), False, 'import cv2\n')]
|
"""
Constants for Idasen Desk Controller Integration
"""
import logging
LOGGER = logging.getLogger(__package__)
DOMAIN = 'idasen-desk-controller'
PLATFORMS = ["cover", "sensor", "switch"]
MIN_HEIGHT = 620
MAX_HEIGHT = 1270 # 6500
HEIGHT_TOLERANCE = 2.0
ADAPTER_NAME = 'hci0'
SCAN_TIMEOUT = 5
CONNECTION_TIMEOUT = 20
MOVEMENT_TIMEOUT = 30
|
[
"logging.getLogger"
] |
[((83, 113), 'logging.getLogger', 'logging.getLogger', (['__package__'], {}), '(__package__)\n', (100, 113), False, 'import logging\n')]
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for glazier.lib.logs."""
import os
import zipfile
from absl.testing import absltest
from glazier.lib import constants
from glazier.lib import file_util
from glazier.lib import logs
import mock
from pyfakefs.fake_filesystem_unittest import Patcher
TEST_ID = '1A19SEL90000R90DZN7A-1234567'
class LoggingTest(absltest.TestCase):
def testCollect(self):
with Patcher() as patcher:
files = [
os.path.join(constants.SYS_LOGS_PATH, 'log1.log'),
os.path.join(constants.SYS_LOGS_PATH, 'log2.log'),
]
patcher.fs.create_dir(constants.SYS_LOGS_PATH)
patcher.fs.create_file(files[0], contents='log1 content')
patcher.fs.create_file(files[1], contents='log2 content')
logs.Collect(r'C:\glazier.zip')
with zipfile.ZipFile(r'C:\glazier.zip', 'r') as out:
with out.open(files[1].lstrip('/')) as f2:
self.assertEqual(f2.read(), b'log2 content')
def testCollectIOErr(self):
with Patcher() as patcher:
patcher.fs.create_dir(constants.SYS_LOGS_PATH)
with self.assertRaises(logs.LogError):
logs.Collect(constants.SYS_LOGS_PATH)
@mock.patch.object(zipfile.ZipFile, 'write', autospec=True)
def testCollectValueErr(self, wr):
wr.side_effect = ValueError('ZIP does not support timestamps before 1980')
with Patcher() as patcher:
patcher.fs.create_dir(constants.SYS_LOGS_PATH)
patcher.fs.create_file(os.path.join(constants.SYS_LOGS_PATH, 'log1.log'))
with self.assertRaises(logs.LogError):
logs.Collect(r'C:\glazier.zip')
@mock.patch.object(logs.winpe, 'check_winpe', autospec=True)
def testGetLogsPath(self, wpe):
# WinPE
wpe.return_value = True
self.assertEqual(logs.GetLogsPath(), logs.constants.WINPE_LOGS_PATH)
# Host
wpe.return_value = False
self.assertEqual(logs.GetLogsPath(), logs.constants.SYS_LOGS_PATH)
@mock.patch.object(file_util, 'CreateDirectories')
@mock.patch.object(logs.buildinfo.BuildInfo, 'ImageID', autospec=True)
@mock.patch.object(logs.winpe, 'check_winpe', autospec=True)
@mock.patch.object(logs.logging, 'FileHandler')
def testSetup(self, fh, wpe, ii, create_dir):
ii.return_value = TEST_ID
wpe.return_value = False
logs.Setup()
create_dir.assert_called_with(r'%s\glazier.log' %
logs.constants.SYS_LOGS_PATH)
fh.assert_called_with(r'%s\glazier.log' % logs.constants.SYS_LOGS_PATH)
@mock.patch.object(file_util, 'CreateDirectories')
@mock.patch.object(logs.buildinfo.BuildInfo, 'ImageID', autospec=True)
@mock.patch.object(logs.winpe, 'check_winpe', autospec=True)
@mock.patch.object(logs.logging, 'FileHandler')
def testSetupError(self, fh, wpe, ii, create_dir):
ii.return_value = TEST_ID
wpe.return_value = False
fh.side_effect = IOError
with self.assertRaises(logs.LogError):
logs.Setup()
self.assertTrue(create_dir.called)
if __name__ == '__main__':
absltest.main()
|
[
"mock.patch.object",
"absl.testing.absltest.main",
"pyfakefs.fake_filesystem_unittest.Patcher",
"zipfile.ZipFile",
"glazier.lib.logs.Setup",
"glazier.lib.logs.Collect",
"glazier.lib.logs.GetLogsPath",
"os.path.join"
] |
[((1733, 1791), 'mock.patch.object', 'mock.patch.object', (['zipfile.ZipFile', '"""write"""'], {'autospec': '(True)'}), "(zipfile.ZipFile, 'write', autospec=True)\n", (1750, 1791), False, 'import mock\n'), ((2161, 2220), 'mock.patch.object', 'mock.patch.object', (['logs.winpe', '"""check_winpe"""'], {'autospec': '(True)'}), "(logs.winpe, 'check_winpe', autospec=True)\n", (2178, 2220), False, 'import mock\n'), ((2484, 2533), 'mock.patch.object', 'mock.patch.object', (['file_util', '"""CreateDirectories"""'], {}), "(file_util, 'CreateDirectories')\n", (2501, 2533), False, 'import mock\n'), ((2537, 2606), 'mock.patch.object', 'mock.patch.object', (['logs.buildinfo.BuildInfo', '"""ImageID"""'], {'autospec': '(True)'}), "(logs.buildinfo.BuildInfo, 'ImageID', autospec=True)\n", (2554, 2606), False, 'import mock\n'), ((2610, 2669), 'mock.patch.object', 'mock.patch.object', (['logs.winpe', '"""check_winpe"""'], {'autospec': '(True)'}), "(logs.winpe, 'check_winpe', autospec=True)\n", (2627, 2669), False, 'import mock\n'), ((2673, 2719), 'mock.patch.object', 'mock.patch.object', (['logs.logging', '"""FileHandler"""'], {}), "(logs.logging, 'FileHandler')\n", (2690, 2719), False, 'import mock\n'), ((3042, 3091), 'mock.patch.object', 'mock.patch.object', (['file_util', '"""CreateDirectories"""'], {}), "(file_util, 'CreateDirectories')\n", (3059, 3091), False, 'import mock\n'), ((3095, 3164), 'mock.patch.object', 'mock.patch.object', (['logs.buildinfo.BuildInfo', '"""ImageID"""'], {'autospec': '(True)'}), "(logs.buildinfo.BuildInfo, 'ImageID', autospec=True)\n", (3112, 3164), False, 'import mock\n'), ((3168, 3227), 'mock.patch.object', 'mock.patch.object', (['logs.winpe', '"""check_winpe"""'], {'autospec': '(True)'}), "(logs.winpe, 'check_winpe', autospec=True)\n", (3185, 3227), False, 'import mock\n'), ((3231, 3277), 'mock.patch.object', 'mock.patch.object', (['logs.logging', '"""FileHandler"""'], {}), "(logs.logging, 'FileHandler')\n", (3248, 3277), False, 'import mock\n'), ((3551, 3566), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (3564, 3566), False, 'from absl.testing import absltest\n'), ((2831, 2843), 'glazier.lib.logs.Setup', 'logs.Setup', ([], {}), '()\n', (2841, 2843), False, 'from glazier.lib import logs\n'), ((971, 980), 'pyfakefs.fake_filesystem_unittest.Patcher', 'Patcher', ([], {}), '()\n', (978, 980), False, 'from pyfakefs.fake_filesystem_unittest import Patcher\n'), ((1326, 1357), 'glazier.lib.logs.Collect', 'logs.Collect', (['"""C:\\\\glazier.zip"""'], {}), "('C:\\\\glazier.zip')\n", (1338, 1357), False, 'from glazier.lib import logs\n'), ((1563, 1572), 'pyfakefs.fake_filesystem_unittest.Patcher', 'Patcher', ([], {}), '()\n', (1570, 1572), False, 'from pyfakefs.fake_filesystem_unittest import Patcher\n'), ((1917, 1926), 'pyfakefs.fake_filesystem_unittest.Patcher', 'Patcher', ([], {}), '()\n', (1924, 1926), False, 'from pyfakefs.fake_filesystem_unittest import Patcher\n'), ((2316, 2334), 'glazier.lib.logs.GetLogsPath', 'logs.GetLogsPath', ([], {}), '()\n', (2332, 2334), False, 'from glazier.lib import logs\n'), ((2430, 2448), 'glazier.lib.logs.GetLogsPath', 'logs.GetLogsPath', ([], {}), '()\n', (2446, 2448), False, 'from glazier.lib import logs\n'), ((3468, 3480), 'glazier.lib.logs.Setup', 'logs.Setup', ([], {}), '()\n', (3478, 3480), False, 'from glazier.lib import logs\n'), ((1019, 1068), 'os.path.join', 'os.path.join', (['constants.SYS_LOGS_PATH', '"""log1.log"""'], {}), "(constants.SYS_LOGS_PATH, 'log1.log')\n", (1031, 1068), False, 'import os\n'), ((1080, 1129), 'os.path.join', 'os.path.join', (['constants.SYS_LOGS_PATH', '"""log2.log"""'], {}), "(constants.SYS_LOGS_PATH, 'log2.log')\n", (1092, 1129), False, 'import os\n'), ((1369, 1408), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""C:\\\\glazier.zip"""', '"""r"""'], {}), "('C:\\\\glazier.zip', 'r')\n", (1384, 1408), False, 'import zipfile\n'), ((1691, 1728), 'glazier.lib.logs.Collect', 'logs.Collect', (['constants.SYS_LOGS_PATH'], {}), '(constants.SYS_LOGS_PATH)\n', (1703, 1728), False, 'from glazier.lib import logs\n'), ((2021, 2070), 'os.path.join', 'os.path.join', (['constants.SYS_LOGS_PATH', '"""log1.log"""'], {}), "(constants.SYS_LOGS_PATH, 'log1.log')\n", (2033, 2070), False, 'import os\n'), ((2125, 2156), 'glazier.lib.logs.Collect', 'logs.Collect', (['"""C:\\\\glazier.zip"""'], {}), "('C:\\\\glazier.zip')\n", (2137, 2156), False, 'from glazier.lib import logs\n')]
|
import paho.mqtt.client as mqtt
import json
import numpy as np
import pandas as pd
import cv2 as cv
from New.TrainModel import test_model, predict_location, linear_train_model, logistic_train_model, svm_train_model, \
rf_train_model
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client.subscribe("IoT_Project")
def on_message(client, userdata, message):
msg = str(message.payload.decode("utf-8"))
print("Message: ", msg)
data = get_data(msg)
pred = predict_location(classifier, data)
print("Current Location: ", pred)
map_location(pred)
def get_data(msg):
data = {"f0:ec:af:cf:6c:e1": -150, "c9:a6:4d:9b:c0:8c": -150, "c2:b6:6e:70:fa:f7": -150,
"d9:5f:f5:4f:10:89": -150, "c4:52:32:5c:31:e7": -150, "e9:3c:4a:34:13:fb": -150,
"ed:61:e4:e8:22:30": -150, "ea:01:26:75:a4:c3": -150, "d0:4e:10:2e:cb:84": -150,
"e4:e0:0a:ae:fd:e2": -150, "fa:35:76:56:6f:e3": -150, "d5:b7:dc:69:ca:ae": -150,
"ca:81:7a:d7:55:49": -150, "e7:2b:ea:2f:95:c5": -150, "d4:32:fc:b5:f0:b5": -150}
all_beacons = list(data.keys())
msg_json = json.loads(msg)
beacons = list(msg_json.keys())
for x in beacons:
data[x] = msg_json[x]
data_list = []
for y in all_beacons:
data_list.append(data[y])
return data_list
def map_location(prediction):
map = cv.imread("map.jpeg")
locations = [(275, 215), (75, 240), (135, 300), (208, 270), (355, 270), (420, 390), (320, 335), (535, 215),
(520, 275), (410, 260), (430, 215), (580, 180), (200, 230), (440, 360), (250, 255), (395, 290),
(320, 240), (360, 340), (380, 390), (250, 320), (410, 330), (480, 190), (460, 260)]
cv.circle(map, locations[prediction-1], 10, (0, 0, 255), thickness=5)
cv.imshow("Location", map)
cv.waitKey()
cv.destroyAllWindows()
# Train the model
classifier = rf_train_model()
test_model(classifier)
# Subscribe to topic
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("mqtt.eclipse.org", 1883, 60)
client.loop_forever()
|
[
"New.TrainModel.rf_train_model",
"cv2.circle",
"json.loads",
"cv2.waitKey",
"cv2.destroyAllWindows",
"New.TrainModel.predict_location",
"cv2.imread",
"paho.mqtt.client.Client",
"cv2.imshow",
"New.TrainModel.test_model"
] |
[((2002, 2018), 'New.TrainModel.rf_train_model', 'rf_train_model', ([], {}), '()\n', (2016, 2018), False, 'from New.TrainModel import test_model, predict_location, linear_train_model, logistic_train_model, svm_train_model, rf_train_model\n'), ((2020, 2042), 'New.TrainModel.test_model', 'test_model', (['classifier'], {}), '(classifier)\n', (2030, 2042), False, 'from New.TrainModel import test_model, predict_location, linear_train_model, logistic_train_model, svm_train_model, rf_train_model\n'), ((2077, 2090), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {}), '()\n', (2088, 2090), True, 'import paho.mqtt.client as mqtt\n'), ((545, 579), 'New.TrainModel.predict_location', 'predict_location', (['classifier', 'data'], {}), '(classifier, data)\n', (561, 579), False, 'from New.TrainModel import test_model, predict_location, linear_train_model, logistic_train_model, svm_train_model, rf_train_model\n'), ((1200, 1215), 'json.loads', 'json.loads', (['msg'], {}), '(msg)\n', (1210, 1215), False, 'import json\n'), ((1461, 1482), 'cv2.imread', 'cv.imread', (['"""map.jpeg"""'], {}), "('map.jpeg')\n", (1470, 1482), True, 'import cv2 as cv\n'), ((1817, 1888), 'cv2.circle', 'cv.circle', (['map', 'locations[prediction - 1]', '(10)', '(0, 0, 255)'], {'thickness': '(5)'}), '(map, locations[prediction - 1], 10, (0, 0, 255), thickness=5)\n', (1826, 1888), True, 'import cv2 as cv\n'), ((1892, 1918), 'cv2.imshow', 'cv.imshow', (['"""Location"""', 'map'], {}), "('Location', map)\n", (1901, 1918), True, 'import cv2 as cv\n'), ((1924, 1936), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (1934, 1936), True, 'import cv2 as cv\n'), ((1942, 1964), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1962, 1964), True, 'import cv2 as cv\n')]
|
import main, os
class LogLevel:
def __init__(self, levelName, directory):
self.levelName = levelName
self.unzippedFilePath = os.path.join(directory, levelName + ".log")
self.zippedFiles = []
self.totalZippedSize = 0
# add each zipped file of the logLevel to the list 'zippedFiles'
for fileName, filePath in main.listLogArchives(levelName, directory):
if ".gz" in fileName:
self.addZippedFile(filePath)
def addZippedFile(self, filePath):
self.zippedFiles.append({
"path": filePath,
"size": main.size(filePath),
"index": main.index(filePath)
})
self.totalZippedSize += main.size(filePath) # add the size to the total size of zipped logs
self.zippedFiles.sort(key=lambda x: x["index"]) # sort list by index
|
[
"main.index",
"main.size",
"main.listLogArchives",
"os.path.join"
] |
[((147, 190), 'os.path.join', 'os.path.join', (['directory', "(levelName + '.log')"], {}), "(directory, levelName + '.log')\n", (159, 190), False, 'import main, os\n'), ((362, 404), 'main.listLogArchives', 'main.listLogArchives', (['levelName', 'directory'], {}), '(levelName, directory)\n', (382, 404), False, 'import main, os\n'), ((715, 734), 'main.size', 'main.size', (['filePath'], {}), '(filePath)\n', (724, 734), False, 'import main, os\n'), ((609, 628), 'main.size', 'main.size', (['filePath'], {}), '(filePath)\n', (618, 628), False, 'import main, os\n'), ((651, 671), 'main.index', 'main.index', (['filePath'], {}), '(filePath)\n', (661, 671), False, 'import main, os\n')]
|
from telegram.constants import MAX_FILESIZE_DOWNLOAD
from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler
from pdf_bot.consts import (
BACK,
BEAUTIFY,
BY_PERCENT,
BY_SIZE,
CANCEL,
COMPRESS,
COMPRESSED,
CROP,
DECRYPT,
ENCRYPT,
EXTRACT_IMAGE,
EXTRACT_TEXT,
IMAGES,
OCR,
PDF_INFO,
PREVIEW,
RENAME,
ROTATE,
SCALE,
SPLIT,
TEXT_FILE,
TEXT_FILTER,
TEXT_MESSAGE,
TO_DIMENSIONS,
TO_IMAGES,
TO_PDF,
WAIT_CROP_OFFSET,
WAIT_CROP_PERCENT,
WAIT_CROP_TYPE,
WAIT_DECRYPT_PW,
WAIT_DOC_TASK,
WAIT_ENCRYPT_PW,
WAIT_EXTRACT_IMAGE_TYPE,
WAIT_FILE_NAME,
WAIT_IMAGE_TASK,
WAIT_ROTATE_DEGREE,
WAIT_SCALE_DIMENSION,
WAIT_SCALE_PERCENT,
WAIT_SCALE_TYPE,
WAIT_SPLIT_RANGE,
WAIT_TEXT_TYPE,
WAIT_TO_IMAGE_TYPE,
)
from pdf_bot.files.compress import compress_pdf
from pdf_bot.files.crop import (
ask_crop_type,
ask_crop_value,
check_crop_percent,
check_crop_size,
)
from pdf_bot.files.crypto import (
ask_decrypt_pw,
ask_encrypt_pw,
decrypt_pdf,
encrypt_pdf,
)
from pdf_bot.files.document import ask_doc_task
from pdf_bot.files.image import (
ask_image_results_type,
ask_image_task,
get_pdf_images,
get_pdf_preview,
pdf_to_images,
process_image_task,
)
from pdf_bot.files.ocr import add_ocr_to_pdf
from pdf_bot.files.rename import ask_pdf_new_name, rename_pdf
from pdf_bot.files.rotate import ask_rotate_degree, check_rotate_degree
from pdf_bot.files.scale import (
ask_scale_type,
ask_scale_value,
check_scale_dimension,
check_scale_percent,
)
from pdf_bot.files.split import ask_split_range, split_pdf
from pdf_bot.files.text import ask_text_type, get_pdf_text
from pdf_bot.language import set_lang
from pdf_bot.utils import cancel
def file_cov_handler():
conv_handler = ConversationHandler(
entry_points=[
MessageHandler(Filters.document, check_doc),
MessageHandler(Filters.photo, check_image),
],
states={
WAIT_DOC_TASK: [MessageHandler(TEXT_FILTER, check_doc_task)],
WAIT_IMAGE_TASK: [MessageHandler(TEXT_FILTER, check_image_task)],
WAIT_CROP_TYPE: [MessageHandler(TEXT_FILTER, check_crop_task)],
WAIT_CROP_PERCENT: [MessageHandler(TEXT_FILTER, check_crop_percent)],
WAIT_CROP_OFFSET: [MessageHandler(TEXT_FILTER, check_crop_size)],
WAIT_DECRYPT_PW: [MessageHandler(TEXT_FILTER, decrypt_pdf)],
WAIT_ENCRYPT_PW: [MessageHandler(TEXT_FILTER, encrypt_pdf)],
WAIT_FILE_NAME: [MessageHandler(TEXT_FILTER, rename_pdf)],
WAIT_ROTATE_DEGREE: [MessageHandler(TEXT_FILTER, check_rotate_degree)],
WAIT_SPLIT_RANGE: [MessageHandler(TEXT_FILTER, split_pdf)],
WAIT_TEXT_TYPE: [MessageHandler(TEXT_FILTER, check_text_task)],
WAIT_SCALE_TYPE: [MessageHandler(TEXT_FILTER, check_scale_task)],
WAIT_SCALE_PERCENT: [MessageHandler(TEXT_FILTER, check_scale_percent)],
WAIT_SCALE_DIMENSION: [MessageHandler(TEXT_FILTER, check_scale_dimension)],
WAIT_EXTRACT_IMAGE_TYPE: [
MessageHandler(TEXT_FILTER, check_get_images_task)
],
WAIT_TO_IMAGE_TYPE: [MessageHandler(TEXT_FILTER, check_to_images_task)],
},
fallbacks=[CommandHandler("cancel", cancel)],
allow_reentry=True,
)
return conv_handler
def check_doc(update, context):
doc = update.effective_message.document
if doc.mime_type.startswith("image"):
return ask_image_task(update, context, doc)
if not doc.mime_type.endswith("pdf"):
return ConversationHandler.END
if doc.file_size >= MAX_FILESIZE_DOWNLOAD:
_ = set_lang(update, context)
update.effective_message.reply_text(
"{desc_1}\n\n{desc_2}".format(
desc_1=_("Your file is too big for me to download and process"),
desc_2=_(
"Note that this is a Telegram Bot limitation and there's "
"nothing I can do unless Telegram changes this limit"
),
),
)
return ConversationHandler.END
context.user_data[PDF_INFO] = doc.file_id, doc.file_name
return ask_doc_task(update, context)
def check_image(update, context):
return ask_image_task(update, context, update.effective_message.photo[-1])
def check_doc_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text == _(CROP):
return ask_crop_type(update, context)
if text == _(DECRYPT):
return ask_decrypt_pw(update, context)
if text == _(ENCRYPT):
return ask_encrypt_pw(update, context)
if text in [_(EXTRACT_IMAGE), _(TO_IMAGES)]:
return ask_image_results_type(update, context)
if text == _(PREVIEW):
return get_pdf_preview(update, context)
if text == _(RENAME):
return ask_pdf_new_name(update, context)
if text == _(ROTATE):
return ask_rotate_degree(update, context)
if text in [_(SCALE)]:
return ask_scale_type(update, context)
if text == _(SPLIT):
return ask_split_range(update, context)
if text == _(EXTRACT_TEXT):
return ask_text_type(update, context)
if text == OCR:
return add_ocr_to_pdf(update, context)
if text == COMPRESS:
return compress_pdf(update, context)
if text == _(CANCEL):
return cancel(update, context)
return WAIT_DOC_TASK
def check_image_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(BEAUTIFY), _(TO_PDF)]:
return process_image_task(update, context)
if text == _(CANCEL):
return cancel(update, context)
return WAIT_IMAGE_TASK
def check_crop_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(BY_PERCENT), _(BY_SIZE)]:
return ask_crop_value(update, context)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_CROP_TYPE
def check_scale_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(BY_PERCENT), _(TO_DIMENSIONS)]:
return ask_scale_value(update, context)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_SCALE_TYPE
def check_text_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text == _(TEXT_MESSAGE):
return get_pdf_text(update, context, is_file=False)
if text == _(TEXT_FILE):
return get_pdf_text(update, context, is_file=True)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_TEXT_TYPE
def check_get_images_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(IMAGES), _(COMPRESSED)]:
return get_pdf_images(update, context)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_EXTRACT_IMAGE_TYPE
def check_to_images_task(update, context):
_ = set_lang(update, context)
text = update.effective_message.text
if text in [_(IMAGES), _(COMPRESSED)]:
return pdf_to_images(update, context)
if text == _(BACK):
return ask_doc_task(update, context)
return WAIT_TO_IMAGE_TYPE
|
[
"pdf_bot.files.rotate.ask_rotate_degree",
"pdf_bot.files.crop.ask_crop_type",
"pdf_bot.utils.cancel",
"pdf_bot.files.text.ask_text_type",
"pdf_bot.files.image.get_pdf_images",
"pdf_bot.files.ocr.add_ocr_to_pdf",
"pdf_bot.files.text.get_pdf_text",
"pdf_bot.language.set_lang",
"pdf_bot.files.document.ask_doc_task",
"pdf_bot.files.image.ask_image_task",
"pdf_bot.files.image.ask_image_results_type",
"telegram.ext.CommandHandler",
"pdf_bot.files.compress.compress_pdf",
"pdf_bot.files.crypto.ask_decrypt_pw",
"pdf_bot.files.crypto.ask_encrypt_pw",
"pdf_bot.files.image.process_image_task",
"pdf_bot.files.scale.ask_scale_value",
"pdf_bot.files.rename.ask_pdf_new_name",
"telegram.ext.MessageHandler",
"pdf_bot.files.split.ask_split_range",
"pdf_bot.files.crop.ask_crop_value",
"pdf_bot.files.image.get_pdf_preview",
"pdf_bot.files.scale.ask_scale_type",
"pdf_bot.files.image.pdf_to_images"
] |
[((4367, 4396), 'pdf_bot.files.document.ask_doc_task', 'ask_doc_task', (['update', 'context'], {}), '(update, context)\n', (4379, 4396), False, 'from pdf_bot.files.document import ask_doc_task\n'), ((4444, 4511), 'pdf_bot.files.image.ask_image_task', 'ask_image_task', (['update', 'context', 'update.effective_message.photo[-1]'], {}), '(update, context, update.effective_message.photo[-1])\n', (4458, 4511), False, 'from pdf_bot.files.image import ask_image_results_type, ask_image_task, get_pdf_images, get_pdf_preview, pdf_to_images, process_image_task\n'), ((4559, 4584), 'pdf_bot.language.set_lang', 'set_lang', (['update', 'context'], {}), '(update, context)\n', (4567, 4584), False, 'from pdf_bot.language import set_lang\n'), ((5677, 5702), 'pdf_bot.language.set_lang', 'set_lang', (['update', 'context'], {}), '(update, context)\n', (5685, 5702), False, 'from pdf_bot.language import set_lang\n'), ((5978, 6003), 'pdf_bot.language.set_lang', 'set_lang', (['update', 'context'], {}), '(update, context)\n', (5986, 6003), False, 'from pdf_bot.language import set_lang\n'), ((6282, 6307), 'pdf_bot.language.set_lang', 'set_lang', (['update', 'context'], {}), '(update, context)\n', (6290, 6307), False, 'from pdf_bot.language import set_lang\n'), ((6593, 6618), 'pdf_bot.language.set_lang', 'set_lang', (['update', 'context'], {}), '(update, context)\n', (6601, 6618), False, 'from pdf_bot.language import set_lang\n'), ((6991, 7016), 'pdf_bot.language.set_lang', 'set_lang', (['update', 'context'], {}), '(update, context)\n', (6999, 7016), False, 'from pdf_bot.language import set_lang\n'), ((7307, 7332), 'pdf_bot.language.set_lang', 'set_lang', (['update', 'context'], {}), '(update, context)\n', (7315, 7332), False, 'from pdf_bot.language import set_lang\n'), ((3659, 3695), 'pdf_bot.files.image.ask_image_task', 'ask_image_task', (['update', 'context', 'doc'], {}), '(update, context, doc)\n', (3673, 3695), False, 'from pdf_bot.files.image import ask_image_results_type, ask_image_task, get_pdf_images, get_pdf_preview, pdf_to_images, process_image_task\n'), ((3836, 3861), 'pdf_bot.language.set_lang', 'set_lang', (['update', 'context'], {}), '(update, context)\n', (3844, 3861), False, 'from pdf_bot.language import set_lang\n'), ((4666, 4696), 'pdf_bot.files.crop.ask_crop_type', 'ask_crop_type', (['update', 'context'], {}), '(update, context)\n', (4679, 4696), False, 'from pdf_bot.files.crop import ask_crop_type, ask_crop_value, check_crop_percent, check_crop_size\n'), ((4739, 4770), 'pdf_bot.files.crypto.ask_decrypt_pw', 'ask_decrypt_pw', (['update', 'context'], {}), '(update, context)\n', (4753, 4770), False, 'from pdf_bot.files.crypto import ask_decrypt_pw, ask_encrypt_pw, decrypt_pdf, encrypt_pdf\n'), ((4813, 4844), 'pdf_bot.files.crypto.ask_encrypt_pw', 'ask_encrypt_pw', (['update', 'context'], {}), '(update, context)\n', (4827, 4844), False, 'from pdf_bot.files.crypto import ask_decrypt_pw, ask_encrypt_pw, decrypt_pdf, encrypt_pdf\n'), ((4909, 4948), 'pdf_bot.files.image.ask_image_results_type', 'ask_image_results_type', (['update', 'context'], {}), '(update, context)\n', (4931, 4948), False, 'from pdf_bot.files.image import ask_image_results_type, ask_image_task, get_pdf_images, get_pdf_preview, pdf_to_images, process_image_task\n'), ((4991, 5023), 'pdf_bot.files.image.get_pdf_preview', 'get_pdf_preview', (['update', 'context'], {}), '(update, context)\n', (5006, 5023), False, 'from pdf_bot.files.image import ask_image_results_type, ask_image_task, get_pdf_images, get_pdf_preview, pdf_to_images, process_image_task\n'), ((5065, 5098), 'pdf_bot.files.rename.ask_pdf_new_name', 'ask_pdf_new_name', (['update', 'context'], {}), '(update, context)\n', (5081, 5098), False, 'from pdf_bot.files.rename import ask_pdf_new_name, rename_pdf\n'), ((5140, 5174), 'pdf_bot.files.rotate.ask_rotate_degree', 'ask_rotate_degree', (['update', 'context'], {}), '(update, context)\n', (5157, 5174), False, 'from pdf_bot.files.rotate import ask_rotate_degree, check_rotate_degree\n'), ((5217, 5248), 'pdf_bot.files.scale.ask_scale_type', 'ask_scale_type', (['update', 'context'], {}), '(update, context)\n', (5231, 5248), False, 'from pdf_bot.files.scale import ask_scale_type, ask_scale_value, check_scale_dimension, check_scale_percent\n'), ((5289, 5321), 'pdf_bot.files.split.ask_split_range', 'ask_split_range', (['update', 'context'], {}), '(update, context)\n', (5304, 5321), False, 'from pdf_bot.files.split import ask_split_range, split_pdf\n'), ((5369, 5399), 'pdf_bot.files.text.ask_text_type', 'ask_text_type', (['update', 'context'], {}), '(update, context)\n', (5382, 5399), False, 'from pdf_bot.files.text import ask_text_type, get_pdf_text\n'), ((5435, 5466), 'pdf_bot.files.ocr.add_ocr_to_pdf', 'add_ocr_to_pdf', (['update', 'context'], {}), '(update, context)\n', (5449, 5466), False, 'from pdf_bot.files.ocr import add_ocr_to_pdf\n'), ((5507, 5536), 'pdf_bot.files.compress.compress_pdf', 'compress_pdf', (['update', 'context'], {}), '(update, context)\n', (5519, 5536), False, 'from pdf_bot.files.compress import compress_pdf\n'), ((5578, 5601), 'pdf_bot.utils.cancel', 'cancel', (['update', 'context'], {}), '(update, context)\n', (5584, 5601), False, 'from pdf_bot.utils import cancel\n'), ((5801, 5836), 'pdf_bot.files.image.process_image_task', 'process_image_task', (['update', 'context'], {}), '(update, context)\n', (5819, 5836), False, 'from pdf_bot.files.image import ask_image_results_type, ask_image_task, get_pdf_images, get_pdf_preview, pdf_to_images, process_image_task\n'), ((5878, 5901), 'pdf_bot.utils.cancel', 'cancel', (['update', 'context'], {}), '(update, context)\n', (5884, 5901), False, 'from pdf_bot.utils import cancel\n'), ((6105, 6136), 'pdf_bot.files.crop.ask_crop_value', 'ask_crop_value', (['update', 'context'], {}), '(update, context)\n', (6119, 6136), False, 'from pdf_bot.files.crop import ask_crop_type, ask_crop_value, check_crop_percent, check_crop_size\n'), ((6176, 6205), 'pdf_bot.files.document.ask_doc_task', 'ask_doc_task', (['update', 'context'], {}), '(update, context)\n', (6188, 6205), False, 'from pdf_bot.files.document import ask_doc_task\n'), ((6415, 6447), 'pdf_bot.files.scale.ask_scale_value', 'ask_scale_value', (['update', 'context'], {}), '(update, context)\n', (6430, 6447), False, 'from pdf_bot.files.scale import ask_scale_type, ask_scale_value, check_scale_dimension, check_scale_percent\n'), ((6487, 6516), 'pdf_bot.files.document.ask_doc_task', 'ask_doc_task', (['update', 'context'], {}), '(update, context)\n', (6499, 6516), False, 'from pdf_bot.files.document import ask_doc_task\n'), ((6708, 6752), 'pdf_bot.files.text.get_pdf_text', 'get_pdf_text', (['update', 'context'], {'is_file': '(False)'}), '(update, context, is_file=False)\n', (6720, 6752), False, 'from pdf_bot.files.text import ask_text_type, get_pdf_text\n'), ((6797, 6840), 'pdf_bot.files.text.get_pdf_text', 'get_pdf_text', (['update', 'context'], {'is_file': '(True)'}), '(update, context, is_file=True)\n', (6809, 6840), False, 'from pdf_bot.files.text import ask_text_type, get_pdf_text\n'), ((6880, 6909), 'pdf_bot.files.document.ask_doc_task', 'ask_doc_task', (['update', 'context'], {}), '(update, context)\n', (6892, 6909), False, 'from pdf_bot.files.document import ask_doc_task\n'), ((7117, 7148), 'pdf_bot.files.image.get_pdf_images', 'get_pdf_images', (['update', 'context'], {}), '(update, context)\n', (7131, 7148), False, 'from pdf_bot.files.image import ask_image_results_type, ask_image_task, get_pdf_images, get_pdf_preview, pdf_to_images, process_image_task\n'), ((7188, 7217), 'pdf_bot.files.document.ask_doc_task', 'ask_doc_task', (['update', 'context'], {}), '(update, context)\n', (7200, 7217), False, 'from pdf_bot.files.document import ask_doc_task\n'), ((7433, 7463), 'pdf_bot.files.image.pdf_to_images', 'pdf_to_images', (['update', 'context'], {}), '(update, context)\n', (7446, 7463), False, 'from pdf_bot.files.image import ask_image_results_type, ask_image_task, get_pdf_images, get_pdf_preview, pdf_to_images, process_image_task\n'), ((7503, 7532), 'pdf_bot.files.document.ask_doc_task', 'ask_doc_task', (['update', 'context'], {}), '(update, context)\n', (7515, 7532), False, 'from pdf_bot.files.document import ask_doc_task\n'), ((1978, 2021), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.document', 'check_doc'], {}), '(Filters.document, check_doc)\n', (1992, 2021), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((2035, 2077), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.photo', 'check_image'], {}), '(Filters.photo, check_image)\n', (2049, 2077), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((3430, 3462), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""cancel"""', 'cancel'], {}), "('cancel', cancel)\n", (3444, 3462), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((2135, 2178), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'check_doc_task'], {}), '(TEXT_FILTER, check_doc_task)\n', (2149, 2178), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((2211, 2256), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'check_image_task'], {}), '(TEXT_FILTER, check_image_task)\n', (2225, 2256), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((2288, 2332), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'check_crop_task'], {}), '(TEXT_FILTER, check_crop_task)\n', (2302, 2332), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((2367, 2414), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'check_crop_percent'], {}), '(TEXT_FILTER, check_crop_percent)\n', (2381, 2414), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((2448, 2492), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'check_crop_size'], {}), '(TEXT_FILTER, check_crop_size)\n', (2462, 2492), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((2525, 2565), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'decrypt_pdf'], {}), '(TEXT_FILTER, decrypt_pdf)\n', (2539, 2565), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((2598, 2638), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'encrypt_pdf'], {}), '(TEXT_FILTER, encrypt_pdf)\n', (2612, 2638), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((2670, 2709), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'rename_pdf'], {}), '(TEXT_FILTER, rename_pdf)\n', (2684, 2709), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((2745, 2793), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'check_rotate_degree'], {}), '(TEXT_FILTER, check_rotate_degree)\n', (2759, 2793), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((2827, 2865), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'split_pdf'], {}), '(TEXT_FILTER, split_pdf)\n', (2841, 2865), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((2897, 2941), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'check_text_task'], {}), '(TEXT_FILTER, check_text_task)\n', (2911, 2941), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((2974, 3019), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'check_scale_task'], {}), '(TEXT_FILTER, check_scale_task)\n', (2988, 3019), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((3055, 3103), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'check_scale_percent'], {}), '(TEXT_FILTER, check_scale_percent)\n', (3069, 3103), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((3141, 3191), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'check_scale_dimension'], {}), '(TEXT_FILTER, check_scale_dimension)\n', (3155, 3191), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((3249, 3299), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'check_get_images_task'], {}), '(TEXT_FILTER, check_get_images_task)\n', (3263, 3299), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n'), ((3348, 3397), 'telegram.ext.MessageHandler', 'MessageHandler', (['TEXT_FILTER', 'check_to_images_task'], {}), '(TEXT_FILTER, check_to_images_task)\n', (3362, 3397), False, 'from telegram.ext import CommandHandler, ConversationHandler, Filters, MessageHandler\n')]
|
#!/usr/bin/env python
"""
A script for converting the README.md to a quick-start guide for inclusion
in the documentation
"""
from m2r import convert
def build_quick_start():
replace = {'# MPAS-Analysis': '# Quick Start Guide\n',
'[![Build Status]': '',
'[![Documentation Status]': '',
'':
'\n'}
skip = [('## conda-forge', '## Installation')]
outContent = ''
skipMode = False
with open('../README.md', 'r') as inFile:
for line in inFile.readlines():
for skipStart, skipEnd in skip:
if not skipMode and skipStart in line:
skipMode = True
if skipMode and skipEnd in line:
skipMode = False
if not skipMode:
for replaceString in replace:
if replaceString in line:
line = replace[replaceString]
break
outContent = outContent + line
outContent = convert(outContent)
with open('quick_start.rst', 'w') as outFile:
outFile.write('.. _quick_start:\n\n')
outFile.write(outContent)
|
[
"m2r.convert"
] |
[((1145, 1164), 'm2r.convert', 'convert', (['outContent'], {}), '(outContent)\n', (1152, 1164), False, 'from m2r import convert\n')]
|
# #########################################################################
# Copyright (c) 2020, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2020. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
from orangewidget import gui
from oasys.widgets import gui as oasysgui
from orangecontrib.wavepy2.util.gui.ow_wavepy_widget import WavePyWidget
from orangecontrib.wavepy2.util.wavepy_objects import OasysWavePyData
class WavePyProcessWidget(WavePyWidget):
CONTROL_AREA_HEIGTH = 900
CONTROL_AREA_WIDTH = 1600
MAX_WIDTH_NO_MAIN = CONTROL_AREA_WIDTH + 10
MAX_HEIGHT = CONTROL_AREA_HEIGTH + 10
inputs = [("WavePy Data", OasysWavePyData, "set_input"),]
outputs = [{"name": "WavePy Data",
"type": OasysWavePyData,
"doc": "WavePy Data",
"id": "WavePy_Data"}]
must_clean_layout = True
show_results_when_ready = True
def __init__(self, show_general_option_box=True, show_automatic_box=True, show_results_when_ready_box=True):
super(WavePyProcessWidget, self).__init__(show_general_option_box=show_general_option_box, show_automatic_box=show_automatic_box)
self.setFixedWidth(self.MAX_WIDTH_NO_MAIN)
self.setFixedHeight(self.MAX_HEIGHT)
if show_results_when_ready_box : gui.checkBox(self._general_options_box, self, 'show_results_when_ready', 'Show results when ready')
else: self.show_results_when_ready = False
gui.rubber(self.controlArea)
def set_input(self, data):
if not data is None:
data = data.duplicate()
self._initialization_parameters = data.get_initialization_parameters()
self._calculation_parameters = data.get_calculation_parameters()
self._process_manager = data.get_process_manager()
if self.is_automatic_run: self._execute()
def _execute(self):
self.progressBarInit()
if self.must_clean_layout: self._clear_wavepy_layout()
self.progressBarSet(10)
output_calculation_parameters = self._get_output_parameters()
self.progressBarSet(90)
self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH)
self.controlArea.setFixedHeight(self.CONTROL_AREA_HEIGTH)
gui.rubber(self.controlArea)
output = OasysWavePyData()
output.set_process_manager(self._process_manager)
output.set_initialization_parameters(self._initialization_parameters)
output.set_calculation_parameters(output_calculation_parameters)
self.progressBarSet(100)
self.progressBarFinished()
self.send("WavePy Data", output)
if self.show_results_when_ready: self.show()
def _get_output_parameters(self):
raise NotImplementedError()
from orangecontrib.wavepy2.util.gui.ow_wavepy_widget import clear_layout
from wavepy2.util.plot.plot_tools import DefaultContextWidget
class WavePyProcessWidgetWithOptions(WavePyProcessWidget):
def __init__(self, show_general_option_box=True, show_automatic_box=True, show_results_when_ready_box=True):
super(WavePyProcessWidgetWithOptions, self).__init__(show_general_option_box=show_general_option_box, show_automatic_box=show_automatic_box, show_results_when_ready_box=show_results_when_ready_box)
self._options_area = oasysgui.widgetBox(self._wavepy_widget_area, "Options", addSpace=False, orientation="vertical",
width=self._get_option_area_width())
self._lateral_wavepy_widget_area = oasysgui.widgetBox(self._wavepy_widget_area, "", addSpace=False, orientation="vertical",
width=self.CONTROL_AREA_WIDTH - self._get_option_area_width())
def _get_option_area_width(self):
return 200
def _clear_wavepy_layout(self):
clear_layout(self._lateral_wavepy_widget_area.layout())
def _get_default_context(self):
return DefaultContextWidget(self._lateral_wavepy_widget_area)
|
[
"orangecontrib.wavepy2.util.wavepy_objects.OasysWavePyData",
"wavepy2.util.plot.plot_tools.DefaultContextWidget",
"orangewidget.gui.checkBox",
"orangewidget.gui.rubber"
] |
[((4596, 4624), 'orangewidget.gui.rubber', 'gui.rubber', (['self.controlArea'], {}), '(self.controlArea)\n', (4606, 4624), False, 'from orangewidget import gui\n'), ((5411, 5439), 'orangewidget.gui.rubber', 'gui.rubber', (['self.controlArea'], {}), '(self.controlArea)\n', (5421, 5439), False, 'from orangewidget import gui\n'), ((5458, 5475), 'orangecontrib.wavepy2.util.wavepy_objects.OasysWavePyData', 'OasysWavePyData', ([], {}), '()\n', (5473, 5475), False, 'from orangecontrib.wavepy2.util.wavepy_objects import OasysWavePyData\n'), ((7148, 7202), 'wavepy2.util.plot.plot_tools.DefaultContextWidget', 'DefaultContextWidget', (['self._lateral_wavepy_widget_area'], {}), '(self._lateral_wavepy_widget_area)\n', (7168, 7202), False, 'from wavepy2.util.plot.plot_tools import DefaultContextWidget\n'), ((4436, 4539), 'orangewidget.gui.checkBox', 'gui.checkBox', (['self._general_options_box', 'self', '"""show_results_when_ready"""', '"""Show results when ready"""'], {}), "(self._general_options_box, self, 'show_results_when_ready',\n 'Show results when ready')\n", (4448, 4539), False, 'from orangewidget import gui\n')]
|
#!/usr/bin/env python
"""
Setup for distribution package.
"""
from setuptools import setup
setup(name='dist_pdf',
version='1.0',
description='Distribution of data',
packages=['dist_pdf'],
zip_sage=False)
|
[
"setuptools.setup"
] |
[((92, 208), 'setuptools.setup', 'setup', ([], {'name': '"""dist_pdf"""', 'version': '"""1.0"""', 'description': '"""Distribution of data"""', 'packages': "['dist_pdf']", 'zip_sage': '(False)'}), "(name='dist_pdf', version='1.0', description='Distribution of data',\n packages=['dist_pdf'], zip_sage=False)\n", (97, 208), False, 'from setuptools import setup\n')]
|
from numpy.testing._private.utils import assert_allclose
from sysidentpy.polynomial_basis import PolynomialNarmax
from sysidentpy.utils.generate_data import get_miso_data, get_siso_data
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from numpy.testing import assert_raises
from sysidentpy.polynomial_basis import SimulatePolynomialNarmax
def test_get_index_from_regressor_code():
s = SimulatePolynomialNarmax()
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
regressor_space = np.array(
[
[0, 0],
[1001, 0],
[2001, 0],
[2002, 0],
[1001, 1001],
[2001, 1001],
[2002, 1001],
[2001, 2001],
[2002, 2001],
[2002, 2002],
]
)
index = s._get_index_from_regressor_code(
regressor_code=regressor_space, model_code=model
)
assert (index == np.array([1, 3, 5])).all()
def test_list_output_regressor():
s = SimulatePolynomialNarmax()
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
y_code = s._list_output_regressor_code(model)
assert (y_code == np.array([1001, 1001])).all()
def test_list_input_regressor():
s = SimulatePolynomialNarmax()
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
x_code = s._list_input_regressor_code(model)
assert (x_code == np.array([2001, 2002])).all()
def test_get_lag_from_regressor_code():
s = SimulatePolynomialNarmax()
list_regressor1 = np.array([2001, 2002])
list_regressor2 = np.array([1004, 1002])
max_lag1 = s._get_lag_from_regressor_code(list_regressor1)
max_lag2 = s._get_lag_from_regressor_code(list_regressor2)
assert max_lag1 == 2
assert max_lag2 == 4
def test_simulate():
x_train, x_valid, y_train, y_valid = get_siso_data(
n=1000, colored_noise=False, sigma=0.001, train_percentage=90
)
s = SimulatePolynomialNarmax()
# the model must be a numpy array
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
# theta must be a numpy array of shape (n, 1) where n is the number of regressors
theta = np.array([[0.2, 0.9, 0.1]]).T
yhat, results = s.simulate(
X_test=x_valid, y_test=y_valid, model_code=model, theta=theta, plot=False
)
assert yhat.shape == (100, 1)
assert len(results) == 3
def test_simulate_theta():
x_train, x_valid, y_train, y_valid = get_siso_data(
n=1000, colored_noise=False, sigma=0.001, train_percentage=90
)
s = SimulatePolynomialNarmax(estimate_parameter=True)
# the model must be a numpy array
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
yhat, results = s.simulate(
X_train=x_train,
y_train=y_train,
X_test=x_valid,
y_test=y_valid,
model_code=model,
plot=False,
)
theta = np.array([[0.2, 0.9, 0.1]]).T
assert_almost_equal(s.theta, theta, decimal=1)
def test_estimate_parameter():
assert_raises(TypeError, SimulatePolynomialNarmax, estimmate_parameter=1)
|
[
"numpy.testing.assert_raises",
"numpy.testing.assert_almost_equal",
"sysidentpy.polynomial_basis.SimulatePolynomialNarmax",
"numpy.array",
"sysidentpy.utils.generate_data.get_siso_data"
] |
[((428, 454), 'sysidentpy.polynomial_basis.SimulatePolynomialNarmax', 'SimulatePolynomialNarmax', ([], {}), '()\n', (452, 454), False, 'from sysidentpy.polynomial_basis import SimulatePolynomialNarmax\n'), ((467, 513), 'numpy.array', 'np.array', (['[[1001, 0], [2001, 1001], [2002, 0]]'], {}), '([[1001, 0], [2001, 1001], [2002, 0]])\n', (475, 513), True, 'import numpy as np\n'), ((636, 776), 'numpy.array', 'np.array', (['[[0, 0], [1001, 0], [2001, 0], [2002, 0], [1001, 1001], [2001, 1001], [2002,\n 1001], [2001, 2001], [2002, 2001], [2002, 2002]]'], {}), '([[0, 0], [1001, 0], [2001, 0], [2002, 0], [1001, 1001], [2001, \n 1001], [2002, 1001], [2001, 2001], [2002, 2001], [2002, 2002]])\n', (644, 776), True, 'import numpy as np\n'), ((1119, 1145), 'sysidentpy.polynomial_basis.SimulatePolynomialNarmax', 'SimulatePolynomialNarmax', ([], {}), '()\n', (1143, 1145), False, 'from sysidentpy.polynomial_basis import SimulatePolynomialNarmax\n'), ((1158, 1204), 'numpy.array', 'np.array', (['[[1001, 0], [2001, 1001], [2002, 0]]'], {}), '([[1001, 0], [2001, 1001], [2002, 0]])\n', (1166, 1204), True, 'import numpy as np\n'), ((1450, 1476), 'sysidentpy.polynomial_basis.SimulatePolynomialNarmax', 'SimulatePolynomialNarmax', ([], {}), '()\n', (1474, 1476), False, 'from sysidentpy.polynomial_basis import SimulatePolynomialNarmax\n'), ((1489, 1535), 'numpy.array', 'np.array', (['[[1001, 0], [2001, 1001], [2002, 0]]'], {}), '([[1001, 0], [2001, 1001], [2002, 0]])\n', (1497, 1535), True, 'import numpy as np\n'), ((1787, 1813), 'sysidentpy.polynomial_basis.SimulatePolynomialNarmax', 'SimulatePolynomialNarmax', ([], {}), '()\n', (1811, 1813), False, 'from sysidentpy.polynomial_basis import SimulatePolynomialNarmax\n'), ((1836, 1858), 'numpy.array', 'np.array', (['[2001, 2002]'], {}), '([2001, 2002])\n', (1844, 1858), True, 'import numpy as np\n'), ((1881, 1903), 'numpy.array', 'np.array', (['[1004, 1002]'], {}), '([1004, 1002])\n', (1889, 1903), True, 'import numpy as np\n'), ((2145, 2221), 'sysidentpy.utils.generate_data.get_siso_data', 'get_siso_data', ([], {'n': '(1000)', 'colored_noise': '(False)', 'sigma': '(0.001)', 'train_percentage': '(90)'}), '(n=1000, colored_noise=False, sigma=0.001, train_percentage=90)\n', (2158, 2221), False, 'from sysidentpy.utils.generate_data import get_miso_data, get_siso_data\n'), ((2245, 2271), 'sysidentpy.polynomial_basis.SimulatePolynomialNarmax', 'SimulatePolynomialNarmax', ([], {}), '()\n', (2269, 2271), False, 'from sysidentpy.polynomial_basis import SimulatePolynomialNarmax\n'), ((2323, 2369), 'numpy.array', 'np.array', (['[[1001, 0], [2001, 1001], [2002, 0]]'], {}), '([[1001, 0], [2001, 1001], [2002, 0]])\n', (2331, 2369), True, 'import numpy as np\n'), ((2851, 2927), 'sysidentpy.utils.generate_data.get_siso_data', 'get_siso_data', ([], {'n': '(1000)', 'colored_noise': '(False)', 'sigma': '(0.001)', 'train_percentage': '(90)'}), '(n=1000, colored_noise=False, sigma=0.001, train_percentage=90)\n', (2864, 2927), False, 'from sysidentpy.utils.generate_data import get_miso_data, get_siso_data\n'), ((2951, 3000), 'sysidentpy.polynomial_basis.SimulatePolynomialNarmax', 'SimulatePolynomialNarmax', ([], {'estimate_parameter': '(True)'}), '(estimate_parameter=True)\n', (2975, 3000), False, 'from sysidentpy.polynomial_basis import SimulatePolynomialNarmax\n'), ((3052, 3098), 'numpy.array', 'np.array', (['[[1001, 0], [2001, 1001], [2002, 0]]'], {}), '([[1001, 0], [2001, 1001], [2002, 0]])\n', (3060, 3098), True, 'import numpy as np\n'), ((3427, 3473), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['s.theta', 'theta'], {'decimal': '(1)'}), '(s.theta, theta, decimal=1)\n', (3446, 3473), False, 'from numpy.testing import assert_almost_equal, assert_array_equal\n'), ((3511, 3584), 'numpy.testing.assert_raises', 'assert_raises', (['TypeError', 'SimulatePolynomialNarmax'], {'estimmate_parameter': '(1)'}), '(TypeError, SimulatePolynomialNarmax, estimmate_parameter=1)\n', (3524, 3584), False, 'from numpy.testing import assert_raises\n'), ((2567, 2594), 'numpy.array', 'np.array', (['[[0.2, 0.9, 0.1]]'], {}), '([[0.2, 0.9, 0.1]])\n', (2575, 2594), True, 'import numpy as np\n'), ((3393, 3420), 'numpy.array', 'np.array', (['[[0.2, 0.9, 0.1]]'], {}), '([[0.2, 0.9, 0.1]])\n', (3401, 3420), True, 'import numpy as np\n'), ((1048, 1067), 'numpy.array', 'np.array', (['[1, 3, 5]'], {}), '([1, 3, 5])\n', (1056, 1067), True, 'import numpy as np\n'), ((1377, 1399), 'numpy.array', 'np.array', (['[1001, 1001]'], {}), '([1001, 1001])\n', (1385, 1399), True, 'import numpy as np\n'), ((1707, 1729), 'numpy.array', 'np.array', (['[2001, 2002]'], {}), '([2001, 2002])\n', (1715, 1729), True, 'import numpy as np\n')]
|
# Generated by Django 2.0.3 on 2018-05-14 04:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('chat', '0004_auto_20180319_1220'),
]
operations = [
migrations.AddField(
model_name='chat',
name='product_seller',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='product_seller', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='chat',
name='product_wanting_user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='product_wanting_user', to=settings.AUTH_USER_MODEL),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.migrations.swappable_dependency"
] |
[((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((470, 608), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""product_seller"""', 'to': 'settings.AUTH_USER_MODEL'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='product_seller', to=settings.AUTH_USER_MODEL)\n", (487, 608), False, 'from django.db import migrations, models\n'), ((736, 880), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""product_wanting_user"""', 'to': 'settings.AUTH_USER_MODEL'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='product_wanting_user', to=settings.AUTH_USER_MODEL)\n", (753, 880), False, 'from django.db import migrations, models\n')]
|
"""
Unit and regression test for the tau_screened_coulomb method.
"""
from ThermoElectric import tau_screened_coulomb
import numpy as np
from pytest import approx
def test_tau_screened_coulomb():
energy = np.array([[0.1]])
e_eff_mass = np.array([[0.23 * 9.109e-31]])
dielectric = 11.7
imp = np.array([[1e23]])
screen_len = np.array([[1e-7]])
expected_tau = 1.8e-10
calculated_tau = tau_screened_coulomb(energy=energy, mass_c=e_eff_mass,
n_imp=imp, dielectric=dielectric, screen_len = screen_len)
assert approx(expected_tau, abs=1e-11) == calculated_tau
|
[
"pytest.approx",
"numpy.array",
"ThermoElectric.tau_screened_coulomb"
] |
[((213, 230), 'numpy.array', 'np.array', (['[[0.1]]'], {}), '([[0.1]])\n', (221, 230), True, 'import numpy as np\n'), ((248, 278), 'numpy.array', 'np.array', (['[[0.23 * 9.109e-31]]'], {}), '([[0.23 * 9.109e-31]])\n', (256, 278), True, 'import numpy as np\n'), ((311, 330), 'numpy.array', 'np.array', (['[[1e+23]]'], {}), '([[1e+23]])\n', (319, 330), True, 'import numpy as np\n'), ((347, 366), 'numpy.array', 'np.array', (['[[1e-07]]'], {}), '([[1e-07]])\n', (355, 366), True, 'import numpy as np\n'), ((415, 530), 'ThermoElectric.tau_screened_coulomb', 'tau_screened_coulomb', ([], {'energy': 'energy', 'mass_c': 'e_eff_mass', 'n_imp': 'imp', 'dielectric': 'dielectric', 'screen_len': 'screen_len'}), '(energy=energy, mass_c=e_eff_mass, n_imp=imp,\n dielectric=dielectric, screen_len=screen_len)\n', (435, 530), False, 'from ThermoElectric import tau_screened_coulomb\n'), ((583, 614), 'pytest.approx', 'approx', (['expected_tau'], {'abs': '(1e-11)'}), '(expected_tau, abs=1e-11)\n', (589, 614), False, 'from pytest import approx\n')]
|
import word2vec
from config import *
word2vec.word2phrase(filename_start, filename_phrases, verbose=True)
word2vec.word2vec(filename_phrases, filename_bin, size=100, verbose=True)
word2vec.word2clusters(filename_start, filename_clusters, 100, verbose=True)
|
[
"word2vec.word2vec",
"word2vec.word2clusters",
"word2vec.word2phrase"
] |
[((37, 105), 'word2vec.word2phrase', 'word2vec.word2phrase', (['filename_start', 'filename_phrases'], {'verbose': '(True)'}), '(filename_start, filename_phrases, verbose=True)\n', (57, 105), False, 'import word2vec\n'), ((106, 179), 'word2vec.word2vec', 'word2vec.word2vec', (['filename_phrases', 'filename_bin'], {'size': '(100)', 'verbose': '(True)'}), '(filename_phrases, filename_bin, size=100, verbose=True)\n', (123, 179), False, 'import word2vec\n'), ((180, 256), 'word2vec.word2clusters', 'word2vec.word2clusters', (['filename_start', 'filename_clusters', '(100)'], {'verbose': '(True)'}), '(filename_start, filename_clusters, 100, verbose=True)\n', (202, 256), False, 'import word2vec\n')]
|
import json
import pytz
import datetime
from cwm_worker_operator import metrics_updater
from cwm_worker_operator import common
from .mocks.metrics import MockMetricsUpdaterMetrics
def iterate_redis_pools(dc):
for pool in ['ingress', 'internal', 'metrics']:
with getattr(dc, 'get_{}_redis'.format(pool))() as r:
yield r
def delete_all_redis_pools_keys(dc):
for r in iterate_redis_pools(dc):
for key in r.keys("*"):
r.delete(key)
def test_update_agg_metrics():
agg_metrics = {}
now = datetime.datetime(2020, 11, 5, 3, 0).astimezone(pytz.UTC)
metrics_updater.update_agg_metrics(agg_metrics, now, {}, limit=2)
assert agg_metrics == {
'lu': now.strftime("%Y%m%d%H%M%S"),
'm': [{'t': now.strftime("%Y%m%d%H%M%S")}]
}
for i in range(5):
now = now + datetime.timedelta(minutes=1)
metrics_updater.update_agg_metrics(agg_metrics, now, {}, limit=2)
assert agg_metrics == {
'lu': now.strftime("%Y%m%d%H%M%S"),
'm': [{'t': (now - datetime.timedelta(minutes=1)).strftime("%Y%m%d%H%M%S")},
{'t': now.strftime("%Y%m%d%H%M%S")}]
}
def test_update_release_metrics(domains_config, deployments_manager):
worker_id = 'worker1'
namespace_name = common.get_namespace_name_from_worker_id(worker_id)
aggregated_metrics_key = 'worker:aggregated-metrics:{}'.format(worker_id)
minio_metrics_base_key = 'deploymentid:minio-metrics:{}:'.format(namespace_name)
metrics_updater_metrics = MockMetricsUpdaterMetrics()
deployments_manager.prometheus_metrics[namespace_name] = {}
deployments_manager.kube_metrics[namespace_name] = {
'ram_limit_bytes': 0,
'ram_requests_bytes': 0
}
now = datetime.datetime(2020, 1, 5, 4, 3, 2).astimezone(pytz.UTC)
delete_all_redis_pools_keys(domains_config)
domains_config._set_mock_volume_config(worker_id)
# no aggregated metrics, no current metrics - aggregated metrics are updated with empty metrics for current minute
metrics_updater.update_release_metrics(domains_config, deployments_manager, metrics_updater_metrics, namespace_name, now=now, update_interval_seconds=59)
assert json.loads(domains_config.keys.worker_aggregated_metrics.get(worker_id)) == {
'lu': now.strftime("%Y%m%d%H%M%S"),
'm': [{'t': now.strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0}]
}
# fast forward 61 seconds, another empty current metric is recorded in aggregated metrics
now = now + datetime.timedelta(seconds=61)
metrics_updater.update_release_metrics(domains_config, deployments_manager, metrics_updater_metrics, namespace_name, now=now, update_interval_seconds=59)
assert json.loads(domains_config.keys.worker_aggregated_metrics.get(worker_id)) == {
'lu': now.strftime("%Y%m%d%H%M%S"),
'm': [{'t': (now-datetime.timedelta(seconds=61)).strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0},
{'t': now.strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0}]
}
# clear all keys and set some current metrics (cpu and ram) - they are added to aggregated metrics
with domains_config.get_internal_redis() as r:
[r.delete(key) for key in [aggregated_metrics_key]]
with domains_config.get_metrics_redis() as r:
[r.delete(key) for key in r.keys(minio_metrics_base_key + '*')]
r.set(minio_metrics_base_key+'cpu', '500')
r.set(minio_metrics_base_key+'ram', '700.5')
now = now + datetime.timedelta(seconds=61)
metrics_updater.update_release_metrics(domains_config, deployments_manager, metrics_updater_metrics, namespace_name, now=now, update_interval_seconds=59)
assert json.loads(domains_config.keys.worker_aggregated_metrics.get(worker_id)) == {
'lu': now.strftime("%Y%m%d%H%M%S"),
'm': [{'t': now.strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0, 'cpu': 500, 'ram': 700.5}]
}
# set different current metrics and fast-forward 61 seconds - they are appended to the aggregated metrics
# in this case we also set the cpu and ram in different buckets which are also summed as all metrics for each bucket are summed
# we also add some prometheus metrics this time
deployments_manager.prometheus_metrics[namespace_name] = {
'cpu_seconds': '1234',
'ram_bytes': '5678'
}
with domains_config.get_metrics_redis() as r:
r.set(minio_metrics_base_key + 'cpu', '600')
r.set(minio_metrics_base_key + 'ram', '800.5')
now = now + datetime.timedelta(seconds=61)
metrics_updater.update_release_metrics(domains_config, deployments_manager, metrics_updater_metrics, namespace_name, now=now, update_interval_seconds=59)
assert json.loads(domains_config.keys.worker_aggregated_metrics.get(worker_id)) == {
'lu': now.strftime("%Y%m%d%H%M%S"),
'm': [
{'t': (now-datetime.timedelta(seconds=61)).strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0, 'cpu': 500.0, 'ram': 700.5},
{
't': now.strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0, 'cpu': 600.0, 'ram': 800.5,
'cpu_seconds': '1234', 'ram_bytes': '5678'
}
]
}
# fast forward 50 seconds (less than 1 minute), aggregated metrics are not updated
now = now + datetime.timedelta(seconds=50)
metrics_updater.update_release_metrics(domains_config, deployments_manager, metrics_updater_metrics, namespace_name, now=now, update_interval_seconds=59)
assert json.loads(domains_config.keys.worker_aggregated_metrics.get(worker_id)) == {
'lu': (now - datetime.timedelta(seconds=50)).strftime("%Y%m%d%H%M%S"),
'm': [
{'t': (now - datetime.timedelta(seconds=50+61)).strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0, 'cpu': 500.0, 'ram': 700.5},
{
't': (now - datetime.timedelta(seconds=50)).strftime("%Y%m%d%H%M%S"), 'disk_usage_bytes': 0, 'ram_limit_bytes': 0, 'ram_requests_bytes': 0, 'cpu': 600.0, 'ram': 800.5,
'cpu_seconds': '1234', 'ram_bytes': '5678'
}
]
}
|
[
"cwm_worker_operator.metrics_updater.update_release_metrics",
"datetime.datetime",
"datetime.timedelta",
"cwm_worker_operator.metrics_updater.update_agg_metrics",
"cwm_worker_operator.common.get_namespace_name_from_worker_id"
] |
[((608, 673), 'cwm_worker_operator.metrics_updater.update_agg_metrics', 'metrics_updater.update_agg_metrics', (['agg_metrics', 'now', '{}'], {'limit': '(2)'}), '(agg_metrics, now, {}, limit=2)\n', (642, 673), False, 'from cwm_worker_operator import metrics_updater\n'), ((1303, 1354), 'cwm_worker_operator.common.get_namespace_name_from_worker_id', 'common.get_namespace_name_from_worker_id', (['worker_id'], {}), '(worker_id)\n', (1343, 1354), False, 'from cwm_worker_operator import common\n'), ((2061, 2222), 'cwm_worker_operator.metrics_updater.update_release_metrics', 'metrics_updater.update_release_metrics', (['domains_config', 'deployments_manager', 'metrics_updater_metrics', 'namespace_name'], {'now': 'now', 'update_interval_seconds': '(59)'}), '(domains_config, deployments_manager,\n metrics_updater_metrics, namespace_name, now=now,\n update_interval_seconds=59)\n', (2099, 2222), False, 'from cwm_worker_operator import metrics_updater\n'), ((2621, 2782), 'cwm_worker_operator.metrics_updater.update_release_metrics', 'metrics_updater.update_release_metrics', (['domains_config', 'deployments_manager', 'metrics_updater_metrics', 'namespace_name'], {'now': 'now', 'update_interval_seconds': '(59)'}), '(domains_config, deployments_manager,\n metrics_updater_metrics, namespace_name, now=now,\n update_interval_seconds=59)\n', (2659, 2782), False, 'from cwm_worker_operator import metrics_updater\n'), ((3681, 3842), 'cwm_worker_operator.metrics_updater.update_release_metrics', 'metrics_updater.update_release_metrics', (['domains_config', 'deployments_manager', 'metrics_updater_metrics', 'namespace_name'], {'now': 'now', 'update_interval_seconds': '(59)'}), '(domains_config, deployments_manager,\n metrics_updater_metrics, namespace_name, now=now,\n update_interval_seconds=59)\n', (3719, 3842), False, 'from cwm_worker_operator import metrics_updater\n'), ((4753, 4914), 'cwm_worker_operator.metrics_updater.update_release_metrics', 'metrics_updater.update_release_metrics', (['domains_config', 'deployments_manager', 'metrics_updater_metrics', 'namespace_name'], {'now': 'now', 'update_interval_seconds': '(59)'}), '(domains_config, deployments_manager,\n metrics_updater_metrics, namespace_name, now=now,\n update_interval_seconds=59)\n', (4791, 4914), False, 'from cwm_worker_operator import metrics_updater\n'), ((5626, 5787), 'cwm_worker_operator.metrics_updater.update_release_metrics', 'metrics_updater.update_release_metrics', (['domains_config', 'deployments_manager', 'metrics_updater_metrics', 'namespace_name'], {'now': 'now', 'update_interval_seconds': '(59)'}), '(domains_config, deployments_manager,\n metrics_updater_metrics, namespace_name, now=now,\n update_interval_seconds=59)\n', (5664, 5787), False, 'from cwm_worker_operator import metrics_updater\n'), ((884, 949), 'cwm_worker_operator.metrics_updater.update_agg_metrics', 'metrics_updater.update_agg_metrics', (['agg_metrics', 'now', '{}'], {'limit': '(2)'}), '(agg_metrics, now, {}, limit=2)\n', (918, 949), False, 'from cwm_worker_operator import metrics_updater\n'), ((2586, 2616), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(61)'}), '(seconds=61)\n', (2604, 2616), False, 'import datetime\n'), ((3646, 3676), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(61)'}), '(seconds=61)\n', (3664, 3676), False, 'import datetime\n'), ((4718, 4748), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(61)'}), '(seconds=61)\n', (4736, 4748), False, 'import datetime\n'), ((5591, 5621), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(50)'}), '(seconds=50)\n', (5609, 5621), False, 'import datetime\n'), ((546, 582), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(11)', '(5)', '(3)', '(0)'], {}), '(2020, 11, 5, 3, 0)\n', (563, 582), False, 'import datetime\n'), ((846, 875), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (864, 875), False, 'import datetime\n'), ((1775, 1813), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(5)', '(4)', '(3)', '(2)'], {}), '(2020, 1, 5, 4, 3, 2)\n', (1792, 1813), False, 'import datetime\n'), ((5890, 5920), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(50)'}), '(seconds=50)\n', (5908, 5920), False, 'import datetime\n'), ((2933, 2963), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(61)'}), '(seconds=61)\n', (2951, 2963), False, 'import datetime\n'), ((5078, 5108), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(61)'}), '(seconds=61)\n', (5096, 5108), False, 'import datetime\n'), ((5988, 6023), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(50 + 61)'}), '(seconds=50 + 61)\n', (6006, 6023), False, 'import datetime\n'), ((6190, 6220), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(50)'}), '(seconds=50)\n', (6208, 6220), False, 'import datetime\n'), ((1061, 1090), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (1079, 1090), False, 'import datetime\n')]
|
from rest_framework import mixins
from rest_framework.viewsets import ReadOnlyModelViewSet, GenericViewSet
from rules.contrib.rest_framework import AutoPermissionViewSetMixin
from .serializers import NewsSerializer
from myapp.models import DrfNews
class NewsReadOnlyModelViewSet(AutoPermissionViewSetMixin, ReadOnlyModelViewSet):
queryset = DrfNews.objects.all()
serializer_class = NewsSerializer
class NewsRetrieveModelViewSet(AutoPermissionViewSetMixin,
mixins.RetrieveModelMixin,
GenericViewSet):
queryset = DrfNews.objects.all()
serializer_class = NewsSerializer
|
[
"myapp.models.DrfNews.objects.all"
] |
[((348, 369), 'myapp.models.DrfNews.objects.all', 'DrfNews.objects.all', ([], {}), '()\n', (367, 369), False, 'from myapp.models import DrfNews\n'), ((590, 611), 'myapp.models.DrfNews.objects.all', 'DrfNews.objects.all', ([], {}), '()\n', (609, 611), False, 'from myapp.models import DrfNews\n')]
|
# - * - coding: utf-8 - * -
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches
def ecg_fixpeaks(rpeaks, sampling_rate=1000, iterative=True, show=False):
"""Correct R-peaks location based on their interval (RRi).
Identify erroneous inter-beat-intervals. Lipponen & Tarvainen (2019).
Parameters
----------
rpeaks : dict
The samples at which the R-peak occur. Dict returned by
`ecg_findpeaks()`.
sampling_rate : int
The sampling frequency of the signal that contains the peaks (in Hz,
i.e., samples/second).
iterative : bool
Whether or not to apply the artifact correction repeatedly (results
in superior artifact correction).
show : bool
Whether or not to visualize artifacts and artifact thresholds.
Returns
-------
artifacts : dict
A dictionary containing the indices of artifacts, accessible with the
keys "ectopic", "missed", "extra", and "longshort".
See Also
--------
ecg_clean, ecg_findpeaks, ecg_peaks, ecg_rate, ecg_process, ecg_plot
Examples
--------
>>> import neurokit2 as nk
>>> import matplotlib.pyplot as plt
>>> ecg = nk.ecg_simulate(duration=240, noise=0.1, heart_rate=70,
>>> random_state=41)
>>> rpeaks_uncorrected = nk.ecg_findpeaks(ecg)
>>> artifacts, rpeaks_corrected = nk.ecg_fixpeaks(rpeaks_uncorrected,
>>> iterative=True,
>>> show=True)
>>> rate_corrected = nk.ecg_rate(rpeaks_uncorrected,
>>> desired_length=len(ecg))
>>> rate_uncorrected = nk.ecg_rate(rpeaks, desired_length=len(ecg_signal))
>>>
>>> fig, ax = plt.subplots()
>>> ax.plot(rate_uncorrected, label="heart rate without artifact correction")
>>> ax.plot(rate_corrected, label="heart rate with artifact correction")
>>> ax.legend(loc="upper right")
References
----------
- <NAME>., & <NAME>. (2019). A robust algorithm for heart
rate variability time series artefact correction using novel beat
classification. Journal of medical engineering & technology, 43(3),
173-181. 10.1080/03091902.2019.1640306
"""
# Format input.
rpeaks = rpeaks["ECG_R_Peaks"]
# Get corrected peaks and normal-to-normal intervals.
artifacts, subspaces = _find_artifacts(rpeaks, sampling_rate=sampling_rate)
peaks_clean = _correct_artifacts(artifacts, rpeaks)
if iterative:
# Iteratively apply the artifact correction until the number of artifact
# reaches an equilibrium (i.e., the number of artifacts does not change
# anymore from one iteration to the next).
n_artifacts_previous = np.inf
n_artifacts_current = sum([len(i) for i in artifacts.values()])
previous_diff = 0
while n_artifacts_current - n_artifacts_previous != previous_diff:
previous_diff = n_artifacts_previous - n_artifacts_current
artifacts, subspaces = _find_artifacts(peaks_clean,
sampling_rate=sampling_rate)
peaks_clean = _correct_artifacts(artifacts, peaks_clean)
n_artifacts_previous = n_artifacts_current
n_artifacts_current = sum([len(i) for i in artifacts.values()])
if show:
_plot_artifacts_lipponen2019(artifacts, subspaces)
return artifacts, {"ECG_R_Peaks": peaks_clean}
# =============================================================================
# Lipponen & Tarvainen (2019).
# =============================================================================
def _find_artifacts(rpeaks, c1=0.13, c2=0.17, alpha=5.2, window_width=91,
medfilt_order=11, sampling_rate=1000):
peaks = np.ravel(rpeaks)
# Compute period series (make sure it has same numer of elements as peaks);
# peaks are in samples, convert to seconds.
rr = np.ediff1d(peaks, to_begin=0) / sampling_rate
# For subsequent analysis it is important that the first element has
# a value in a realistic range (e.g., for median filtering).
rr[0] = np.mean(rr[1:])
# Artifact identification #################################################
###########################################################################
# Compute dRRs: time series of differences of consecutive periods (dRRs).
drrs = np.ediff1d(rr, to_begin=0)
drrs[0] = np.mean(drrs[1:])
# Normalize by threshold.
th1 = _compute_threshold(drrs, alpha, window_width)
drrs /= th1
# Cast dRRs to subspace s12.
# Pad drrs with one element.
padding = 2
drrs_pad = np.pad(drrs, padding, "reflect")
s12 = np.zeros(drrs.size)
for d in np.arange(padding, padding + drrs.size):
if drrs_pad[d] > 0:
s12[d - padding] = np.max([drrs_pad[d - 1], drrs_pad[d + 1]])
elif drrs_pad[d] < 0:
s12[d - padding] = np.min([drrs_pad[d - 1], drrs_pad[d + 1]])
# Cast dRRs to subspace s22.
s22 = np.zeros(drrs.size)
for d in np.arange(padding, padding + drrs.size):
if drrs_pad[d] >= 0:
s22[d - padding] = np.min([drrs_pad[d + 1], drrs_pad[d + 2]])
elif drrs_pad[d] < 0:
s22[d - padding] = np.max([drrs_pad[d + 1], drrs_pad[d + 2]])
# Compute mRRs: time series of deviation of RRs from median.
df = pd.DataFrame({'signal': rr})
medrr = df.rolling(medfilt_order, center=True,
min_periods=1).median().signal.to_numpy()
mrrs = rr - medrr
mrrs[mrrs < 0] = mrrs[mrrs < 0] * 2
# Normalize by threshold.
th2 = _compute_threshold(mrrs, alpha, window_width)
mrrs /= th2
# Artifact classification #################################################
###########################################################################
# Artifact classes.
extra_idcs = []
missed_idcs = []
ectopic_idcs = []
longshort_idcs = []
i = 0
while i < rr.size - 2: # The flow control is implemented based on Figure 1
if np.abs(drrs[i]) <= 1: # Figure 1
i += 1
continue
eq1 = np.logical_and(drrs[i] > 1, s12[i] < (-c1 * drrs[i] - c2)) # Figure 2a
eq2 = np.logical_and(drrs[i] < -1, s12[i] > (-c1 * drrs[i] + c2)) # Figure 2a
if np.any([eq1, eq2]):
# If any of the two equations is true.
ectopic_idcs.append(i)
i += 1
continue
# If none of the two equations is true.
if ~np.any([np.abs(drrs[i]) > 1, np.abs(mrrs[i]) > 3]): # Figure 1
i += 1
continue
longshort_candidates = [i]
# Check if the following beat also needs to be evaluated.
if np.abs(drrs[i + 1]) < np.abs(drrs[i + 2]):
longshort_candidates.append(i + 1)
for j in longshort_candidates:
# Long beat.
eq3 = np.logical_and(drrs[j] > 1, s22[j] < -1) # Figure 2b
# Long or short.
eq4 = np.abs(mrrs[j]) > 3 # Figure 1
# Short beat.
eq5 = np.logical_and(drrs[j] < -1, s22[j] > 1) # Figure 2b
if ~np.any([eq3, eq4, eq5]):
# If none of the three equations is true: normal beat.
i += 1
continue
# If any of the three equations is true: check for missing or extra
# peaks.
# Missing.
eq6 = np.abs(rr[j] / 2 - medrr[j]) < th2[j] # Figure 1
# Extra.
eq7 = np.abs(rr[j] + rr[j + 1] - medrr[j]) < th2[j] # Figure 1
# Check if extra.
if np.all([eq5, eq7]):
extra_idcs.append(j)
i += 1
continue
# Check if missing.
if np.all([eq3, eq6]):
missed_idcs.append(j)
i += 1
continue
# If neither classified as extra or missing, classify as "long or
# short".
longshort_idcs.append(j)
i += 1
# Prepare output
artifacts = {"ectopic": ectopic_idcs, "missed": missed_idcs,
"extra": extra_idcs, "longshort": longshort_idcs}
subspaces = {"rr": rr, "drrs": drrs, "mrrs": mrrs, "s12": s12, "s22": s22,
"c1": c1, "c2": c2}
return artifacts, subspaces
def _compute_threshold(signal, alpha, window_width):
df = pd.DataFrame({'signal': np.abs(signal)})
q1 = df.rolling(window_width, center=True,
min_periods=1).quantile(.25).signal.to_numpy()
q3 = df.rolling(window_width, center=True,
min_periods=1).quantile(.75).signal.to_numpy()
th = alpha * ((q3 - q1) / 2)
return th
def _correct_artifacts(artifacts, peaks):
# Artifact correction
#####################
# The integrity of indices must be maintained if peaks are inserted or
# deleted: for each deleted beat, decrease indices following that beat in
# all other index lists by 1. Likewise, for each added beat, increment the
# indices following that beat in all other lists by 1.
extra_idcs = artifacts["extra"]
missed_idcs = artifacts["missed"]
ectopic_idcs = artifacts["ectopic"]
longshort_idcs = artifacts["longshort"]
# Delete extra peaks.
if extra_idcs:
peaks = _correct_extra(extra_idcs, peaks)
# Update remaining indices.
missed_idcs = _update_indices(extra_idcs, missed_idcs, -1)
ectopic_idcs = _update_indices(extra_idcs, ectopic_idcs, -1)
longshort_idcs = _update_indices(extra_idcs, longshort_idcs, -1)
# Add missing peaks.
if missed_idcs:
peaks = _correct_missed(missed_idcs, peaks)
# Update remaining indices.
ectopic_idcs = _update_indices(missed_idcs, ectopic_idcs, 1)
longshort_idcs = _update_indices(missed_idcs, longshort_idcs, 1)
if ectopic_idcs:
peaks = _correct_misaligned(ectopic_idcs, peaks)
if longshort_idcs:
peaks = _correct_misaligned(longshort_idcs, peaks)
return peaks
def _correct_extra(extra_idcs, peaks):
corrected_peaks = peaks.copy()
corrected_peaks = np.delete(corrected_peaks, extra_idcs)
return corrected_peaks
def _correct_missed(missed_idcs, peaks):
corrected_peaks = peaks.copy()
missed_idcs = np.array(missed_idcs)
# Calculate the position(s) of new beat(s). Make sure to not generate
# negative indices. prev_peaks and next_peaks must have the same
# number of elements.
valid_idcs = np.logical_and(missed_idcs > 1,
missed_idcs < len(corrected_peaks))
missed_idcs = missed_idcs[valid_idcs]
prev_peaks = corrected_peaks[[i - 1 for i in missed_idcs]]
next_peaks = corrected_peaks[missed_idcs]
added_peaks = prev_peaks + (next_peaks - prev_peaks) / 2
# Add the new peaks before the missed indices (see numpy docs).
corrected_peaks = np.insert(corrected_peaks, missed_idcs, added_peaks)
return corrected_peaks
def _correct_misaligned(misaligned_idcs, peaks):
corrected_peaks = peaks.copy()
misaligned_idcs = np.array(misaligned_idcs)
# Make sure to not generate negative indices, or indices that exceed
# the total number of peaks. prev_peaks and next_peaks must have the
# same number of elements.
valid_idcs = np.logical_and(misaligned_idcs > 1,
misaligned_idcs < len(corrected_peaks))
misaligned_idcs = misaligned_idcs[valid_idcs]
prev_peaks = corrected_peaks[[i - 1 for i in misaligned_idcs]]
next_peaks = corrected_peaks[[i + 1 for i in misaligned_idcs]]
half_ibi = (next_peaks - prev_peaks) / 2
peaks_interp = prev_peaks + half_ibi
# Shift the R-peaks from the old to the new position.
corrected_peaks = np.delete(corrected_peaks, misaligned_idcs)
corrected_peaks = np.concatenate((corrected_peaks,
peaks_interp)).astype(int)
corrected_peaks.sort(kind="mergesort")
return corrected_peaks
def _update_indices(source_idcs, update_idcs, update):
"""
For every element s in source_idcs, change every element u in update_idcs
according to update, if u is larger than s.
"""
if not update_idcs:
return update_idcs
for s in source_idcs:
update_idcs = [u + update if u > s else u for u in update_idcs]
return update_idcs
def _plot_artifacts_lipponen2019(artifacts, info):
"""
"""
# Extract parameters
longshort_idcs = artifacts["longshort"]
ectopic_idcs = artifacts["ectopic"]
extra_idcs = artifacts["extra"]
missed_idcs = artifacts["missed"]
rr = info["rr"]
drrs = info["drrs"]
mrrs = info["mrrs"]
s12 = info["s12"]
s22 = info["s22"]
c1 = info["c1"]
c2 = info["c2"]
# Visualize artifact type indices.
# Set grids
gs = matplotlib.gridspec.GridSpec(ncols=4, nrows=3,
width_ratios=[1, 2, 2, 2])
fig = plt.figure(constrained_layout=False)
ax0 = fig.add_subplot(gs[0, :-2])
ax1 = fig.add_subplot(gs[1, :-2])
ax2 = fig.add_subplot(gs[2, :-2])
ax3 = fig.add_subplot(gs[:, -1])
ax4 = fig.add_subplot(gs[:, -2])
ax0.set_title("Artifact types", fontweight="bold")
ax0.plot(rr, label="heart period")
ax0.scatter(longshort_idcs, rr[longshort_idcs], marker='x', c='m',
s=100, zorder=3, label="long/short")
ax0.scatter(ectopic_idcs, rr[ectopic_idcs], marker='x', c='g', s=100,
zorder=3, label="ectopic")
ax0.scatter(extra_idcs, rr[extra_idcs], marker='x', c='y', s=100,
zorder=3, label="false positive")
ax0.scatter(missed_idcs, rr[missed_idcs], marker='x', c='r', s=100,
zorder=3, label="false negative")
ax0.legend(loc="upper right")
# Visualize first threshold.
ax1.set_title("Consecutive-difference criterion", fontweight="bold")
ax1.plot(np.abs(drrs), label="difference consecutive heart periods")
ax1.axhline(1, c='r', label="artifact threshold")
ax1.legend(loc="upper right")
# Visualize second threshold.
ax2.set_title("Difference-from-median criterion", fontweight="bold")
ax2.plot(np.abs(mrrs), label="difference from median over 11 periods")
ax2.axhline(3, c="r", label="artifact threshold")
ax2.legend(loc="upper right")
# Visualize subspaces.
ax4.set_title("Subspace 1", fontweight="bold")
ax4.set_xlabel("S11")
ax4.set_ylabel("S12")
ax4.scatter(drrs, s12, marker="x", label="heart periods")
verts0 = [(min(drrs), max(s12)),
(min(drrs), -c1 * min(drrs) + c2),
(-1, -c1 * -1 + c2),
(-1, max(s12))]
poly0 = matplotlib.patches.Polygon(verts0, alpha=0.3, facecolor="r",
edgecolor=None, label="ectopic periods")
ax4.add_patch(poly0)
verts1 = [(1, -c1 * 1 - c2),
(1, min(s12)),
(max(drrs), min(s12)),
(max(drrs), -c1 * max(drrs) - c2)]
poly1 = matplotlib.patches.Polygon(verts1, alpha=0.3, facecolor="r",
edgecolor=None)
ax4.add_patch(poly1)
ax4.legend(loc="upper right")
ax3.set_title("Subspace 2", fontweight="bold")
ax3.set_xlabel("S21")
ax3.set_ylabel("S22")
ax3.scatter(drrs, s22, marker="x", label="heart periods")
verts2 = [(min(drrs), max(s22)),
(min(drrs), 1),
(-1, 1),
(-1, max(s22))]
poly2 = matplotlib.patches.Polygon(verts2, alpha=0.3, facecolor="r",
edgecolor=None, label="short periods")
ax3.add_patch(poly2)
verts3 = [(1, -1),
(1, min(s22)),
(max(drrs), min(s22)),
(max(drrs), -1)]
poly3 = matplotlib.patches.Polygon(verts3, alpha=0.3, facecolor="y",
edgecolor=None, label="long periods")
ax3.add_patch(poly3)
ax3.legend(loc="upper right")
|
[
"numpy.pad",
"pandas.DataFrame",
"numpy.abs",
"numpy.concatenate",
"numpy.logical_and",
"numpy.ravel",
"numpy.zeros",
"numpy.insert",
"numpy.any",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.array",
"numpy.max",
"numpy.min",
"numpy.delete",
"numpy.all",
"numpy.ediff1d"
] |
[((3895, 3911), 'numpy.ravel', 'np.ravel', (['rpeaks'], {}), '(rpeaks)\n', (3903, 3911), True, 'import numpy as np\n'), ((4246, 4261), 'numpy.mean', 'np.mean', (['rr[1:]'], {}), '(rr[1:])\n', (4253, 4261), True, 'import numpy as np\n'), ((4513, 4539), 'numpy.ediff1d', 'np.ediff1d', (['rr'], {'to_begin': '(0)'}), '(rr, to_begin=0)\n', (4523, 4539), True, 'import numpy as np\n'), ((4554, 4571), 'numpy.mean', 'np.mean', (['drrs[1:]'], {}), '(drrs[1:])\n', (4561, 4571), True, 'import numpy as np\n'), ((4772, 4804), 'numpy.pad', 'np.pad', (['drrs', 'padding', '"""reflect"""'], {}), "(drrs, padding, 'reflect')\n", (4778, 4804), True, 'import numpy as np\n'), ((4816, 4835), 'numpy.zeros', 'np.zeros', (['drrs.size'], {}), '(drrs.size)\n', (4824, 4835), True, 'import numpy as np\n'), ((4849, 4888), 'numpy.arange', 'np.arange', (['padding', '(padding + drrs.size)'], {}), '(padding, padding + drrs.size)\n', (4858, 4888), True, 'import numpy as np\n'), ((5141, 5160), 'numpy.zeros', 'np.zeros', (['drrs.size'], {}), '(drrs.size)\n', (5149, 5160), True, 'import numpy as np\n'), ((5174, 5213), 'numpy.arange', 'np.arange', (['padding', '(padding + drrs.size)'], {}), '(padding, padding + drrs.size)\n', (5183, 5213), True, 'import numpy as np\n'), ((5498, 5526), 'pandas.DataFrame', 'pd.DataFrame', (["{'signal': rr}"], {}), "({'signal': rr})\n", (5510, 5526), True, 'import pandas as pd\n'), ((10327, 10365), 'numpy.delete', 'np.delete', (['corrected_peaks', 'extra_idcs'], {}), '(corrected_peaks, extra_idcs)\n', (10336, 10365), True, 'import numpy as np\n'), ((10491, 10512), 'numpy.array', 'np.array', (['missed_idcs'], {}), '(missed_idcs)\n', (10499, 10512), True, 'import numpy as np\n'), ((11101, 11153), 'numpy.insert', 'np.insert', (['corrected_peaks', 'missed_idcs', 'added_peaks'], {}), '(corrected_peaks, missed_idcs, added_peaks)\n', (11110, 11153), True, 'import numpy as np\n'), ((11291, 11316), 'numpy.array', 'np.array', (['misaligned_idcs'], {}), '(misaligned_idcs)\n', (11299, 11316), True, 'import numpy as np\n'), ((11969, 12012), 'numpy.delete', 'np.delete', (['corrected_peaks', 'misaligned_idcs'], {}), '(corrected_peaks, misaligned_idcs)\n', (11978, 12012), True, 'import numpy as np\n'), ((13170, 13206), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(False)'}), '(constrained_layout=False)\n', (13180, 13206), True, 'import matplotlib.pyplot as plt\n'), ((4050, 4079), 'numpy.ediff1d', 'np.ediff1d', (['peaks'], {'to_begin': '(0)'}), '(peaks, to_begin=0)\n', (4060, 4079), True, 'import numpy as np\n'), ((6275, 6331), 'numpy.logical_and', 'np.logical_and', (['(drrs[i] > 1)', '(s12[i] < -c1 * drrs[i] - c2)'], {}), '(drrs[i] > 1, s12[i] < -c1 * drrs[i] - c2)\n', (6289, 6331), True, 'import numpy as np\n'), ((6363, 6420), 'numpy.logical_and', 'np.logical_and', (['(drrs[i] < -1)', '(s12[i] > -c1 * drrs[i] + c2)'], {}), '(drrs[i] < -1, s12[i] > -c1 * drrs[i] + c2)\n', (6377, 6420), True, 'import numpy as np\n'), ((6450, 6468), 'numpy.any', 'np.any', (['[eq1, eq2]'], {}), '([eq1, eq2])\n', (6456, 6468), True, 'import numpy as np\n'), ((14127, 14139), 'numpy.abs', 'np.abs', (['drrs'], {}), '(drrs)\n', (14133, 14139), True, 'import numpy as np\n'), ((14396, 14408), 'numpy.abs', 'np.abs', (['mrrs'], {}), '(mrrs)\n', (14402, 14408), True, 'import numpy as np\n'), ((4950, 4992), 'numpy.max', 'np.max', (['[drrs_pad[d - 1], drrs_pad[d + 1]]'], {}), '([drrs_pad[d - 1], drrs_pad[d + 1]])\n', (4956, 4992), True, 'import numpy as np\n'), ((5276, 5318), 'numpy.min', 'np.min', (['[drrs_pad[d + 1], drrs_pad[d + 2]]'], {}), '([drrs_pad[d + 1], drrs_pad[d + 2]])\n', (5282, 5318), True, 'import numpy as np\n'), ((6185, 6200), 'numpy.abs', 'np.abs', (['drrs[i]'], {}), '(drrs[i])\n', (6191, 6200), True, 'import numpy as np\n'), ((6874, 6893), 'numpy.abs', 'np.abs', (['drrs[i + 1]'], {}), '(drrs[i + 1])\n', (6880, 6893), True, 'import numpy as np\n'), ((6896, 6915), 'numpy.abs', 'np.abs', (['drrs[i + 2]'], {}), '(drrs[i + 2])\n', (6902, 6915), True, 'import numpy as np\n'), ((7047, 7087), 'numpy.logical_and', 'np.logical_and', (['(drrs[j] > 1)', '(s22[j] < -1)'], {}), '(drrs[j] > 1, s22[j] < -1)\n', (7061, 7087), True, 'import numpy as np\n'), ((7228, 7268), 'numpy.logical_and', 'np.logical_and', (['(drrs[j] < -1)', '(s22[j] > 1)'], {}), '(drrs[j] < -1, s22[j] > 1)\n', (7242, 7268), True, 'import numpy as np\n'), ((7785, 7803), 'numpy.all', 'np.all', (['[eq5, eq7]'], {}), '([eq5, eq7])\n', (7791, 7803), True, 'import numpy as np\n'), ((7937, 7955), 'numpy.all', 'np.all', (['[eq3, eq6]'], {}), '([eq3, eq6])\n', (7943, 7955), True, 'import numpy as np\n'), ((8592, 8606), 'numpy.abs', 'np.abs', (['signal'], {}), '(signal)\n', (8598, 8606), True, 'import numpy as np\n'), ((12035, 12082), 'numpy.concatenate', 'np.concatenate', (['(corrected_peaks, peaks_interp)'], {}), '((corrected_peaks, peaks_interp))\n', (12049, 12082), True, 'import numpy as np\n'), ((5054, 5096), 'numpy.min', 'np.min', (['[drrs_pad[d - 1], drrs_pad[d + 1]]'], {}), '([drrs_pad[d - 1], drrs_pad[d + 1]])\n', (5060, 5096), True, 'import numpy as np\n'), ((5380, 5422), 'numpy.max', 'np.max', (['[drrs_pad[d + 1], drrs_pad[d + 2]]'], {}), '([drrs_pad[d + 1], drrs_pad[d + 2]])\n', (5386, 5422), True, 'import numpy as np\n'), ((7150, 7165), 'numpy.abs', 'np.abs', (['mrrs[j]'], {}), '(mrrs[j])\n', (7156, 7165), True, 'import numpy as np\n'), ((7301, 7324), 'numpy.any', 'np.any', (['[eq3, eq4, eq5]'], {}), '([eq3, eq4, eq5])\n', (7307, 7324), True, 'import numpy as np\n'), ((7588, 7616), 'numpy.abs', 'np.abs', (['(rr[j] / 2 - medrr[j])'], {}), '(rr[j] / 2 - medrr[j])\n', (7594, 7616), True, 'import numpy as np\n'), ((7679, 7715), 'numpy.abs', 'np.abs', (['(rr[j] + rr[j + 1] - medrr[j])'], {}), '(rr[j] + rr[j + 1] - medrr[j])\n', (7685, 7715), True, 'import numpy as np\n'), ((6664, 6679), 'numpy.abs', 'np.abs', (['drrs[i]'], {}), '(drrs[i])\n', (6670, 6679), True, 'import numpy as np\n'), ((6685, 6700), 'numpy.abs', 'np.abs', (['mrrs[i]'], {}), '(mrrs[i])\n', (6691, 6700), True, 'import numpy as np\n')]
|
import json
from lib.Component import Component
class BlockerChecker:
def __init__(self, file: str):
self.__blocked = {}
self.__populate_blocked_list(file)
def __populate_blocked_list(self, file: str):
with open(file, 'r') as inimage:
blocked_list = json.load(inimage)
for blocked in blocked_list:
component = Component(blocked['purl'])
self.__blocked[component.get_purl()] = component
def check(self, components: list) -> list:
return [x for x in components if x.get_purl() in self.__blocked]
|
[
"json.load",
"lib.Component.Component"
] |
[((299, 317), 'json.load', 'json.load', (['inimage'], {}), '(inimage)\n', (308, 317), False, 'import json\n'), ((387, 413), 'lib.Component.Component', 'Component', (["blocked['purl']"], {}), "(blocked['purl'])\n", (396, 413), False, 'from lib.Component import Component\n')]
|
"""Functional tests of CLI."""
from offline_docs.cli import clean, python
def test_python():
"""Run 'python' command of CLI."""
python()
def test_clean():
"""Run 'clean' command of CLI."""
clean()
|
[
"offline_docs.cli.python",
"offline_docs.cli.clean"
] |
[((140, 148), 'offline_docs.cli.python', 'python', ([], {}), '()\n', (146, 148), False, 'from offline_docs.cli import clean, python\n'), ((211, 218), 'offline_docs.cli.clean', 'clean', ([], {}), '()\n', (216, 218), False, 'from offline_docs.cli import clean, python\n')]
|
from django.contrib.auth.models import User
from django.db import models
class Category(models.Model):
"""
Categories data model per user/blog
"""
owner = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=150)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = 'Categories'
def __str__(self):
"""
Define like an object is showed in admin panel
:return: category name
"""
return '{0}'.format(self.name)
|
[
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] |
[((175, 224), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (192, 224), False, 'from django.db import models\n'), ((236, 268), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (252, 268), False, 'from django.db import models\n'), ((286, 325), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (306, 325), False, 'from django.db import models\n'), ((343, 378), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (363, 378), False, 'from django.db import models\n')]
|
import socket
def sendmsg(msgFromClient):
bytesToSend = str.encode(msgFromClient)
serverAddressPort = ("127.0.0.1", 20001)
bufferSize = 5120
UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
UDPClientSocket.sendto(bytesToSend, serverAddressPort)
msgFromServer = UDPClientSocket.recvfrom(bufferSize)[0]
return msgFromServer.decode("utf-8")
|
[
"socket.socket"
] |
[((199, 259), 'socket.socket', 'socket.socket', ([], {'family': 'socket.AF_INET', 'type': 'socket.SOCK_DGRAM'}), '(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n', (212, 259), False, 'import socket\n')]
|
from xml.etree.ElementTree import Element,ElementTree,tostring
def create_author(autor_id,language,author_type):
author =Element('author', id =autor_id,lang =language, type= author_type)
return author
def write_authors():
author1=create_author("1","en","0")
author2=create_author("2","en","0")
with open('out.xml','a') as f:
f.write(tostring(author2).decode("utf-8") + "\n")
f.write(tostring(author1).decode("utf-8") +"\n" )
write_authors()
|
[
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.tostring"
] |
[((126, 189), 'xml.etree.ElementTree.Element', 'Element', (['"""author"""'], {'id': 'autor_id', 'lang': 'language', 'type': 'author_type'}), "('author', id=autor_id, lang=language, type=author_type)\n", (133, 189), False, 'from xml.etree.ElementTree import Element, ElementTree, tostring\n'), ((373, 390), 'xml.etree.ElementTree.tostring', 'tostring', (['author2'], {}), '(author2)\n', (381, 390), False, 'from xml.etree.ElementTree import Element, ElementTree, tostring\n'), ((431, 448), 'xml.etree.ElementTree.tostring', 'tostring', (['author1'], {}), '(author1)\n', (439, 448), False, 'from xml.etree.ElementTree import Element, ElementTree, tostring\n')]
|
import os
import boto3
import click
import json
import signal
import sys
import yaml
from botocore.exceptions import ClientError
from datetime import datetime
from typing import List
from warrant import Cognito
from superai import __version__
from superai.client import Client
from superai.config import get_config_dir, list_env_configs, set_env_config, settings
from superai.exceptions import SuperAIAuthorizationError
from superai.log import logger
from superai.utils import load_api_key, remove_aws_credentials, save_api_key, save_aws_credentials, save_cognito_user
from superai.utils.pip_config import pip_configure
from superai.meta_ai.dockerizer import build_image, push_image
from superai.meta_ai.dockerizer.sagemaker_endpoint import (
upload_model_to_s3,
invoke_sagemaker_endpoint,
create_endpoint,
invoke_local,
)
BASE_FOLDER = get_config_dir()
COGNITO_USERPOOL_ID = settings.get("cognito", {}).get("userpool_id")
COGNITO_CLIENT_ID = settings.get("cognito", {}).get("client_id")
COGNITO_REGION = settings.get("cognito", {}).get("region")
log = logger.get_logger(__name__)
def _signal_handler(s, f):
sys.exit(1)
@click.group()
def cli():
pass
@cli.command()
@click.option("--verbose/--no-verbose", "-vvv", help="Verbose output", default=False)
def info(verbose):
"""Print CLI Configuration"""
click.echo("=================")
click.echo("Super.AI CLI Info:")
click.echo("=================")
load_api_key()
click.echo(f"VERSION: {__version__}")
click.echo(f"ENVIRONMENT: {settings.current_env}")
click.echo(f"USER: {settings.get('user',{}).get('username')}")
if verbose:
click.echo(yaml.dump(settings.as_dict(env=settings.current_env), default_flow_style=False))
@cli.group()
@click.pass_context
def env(ctx):
"""
super.AI Config operations
"""
pass
@env.command(name="list")
@click.pass_context
def env_list(ctx):
"""
:param ctx:
:return:
"""
list_env_configs(printInConsole=True)
@env.command(name="set")
@click.option("--api-key", help="Your super.AI API KEY", required=False)
@click.option("--environment", "-e", help="Set environment", required=False)
@click.pass_context
def env_set(ctx, api_key, environment):
"""
Set configuration
"""
if environment:
set_env_config(name=environment)
if api_key:
save_api_key(api_key)
@cli.group()
@click.pass_context
def client(ctx):
"""
super.AI API operations
"""
api_key = ""
try:
api_key = load_api_key()
except Exception as e:
pass
if len(api_key) == 0:
print("User needs to login or set api key")
exit()
ctx.obj = {}
ctx.obj["client"] = Client(api_key=api_key)
@client.command(name="create_jobs")
@click.option("--app_id", "-a", help="Application id", required=True)
@click.option("--callback_url", "-c", help="Callback URL for post when jobs finish")
@click.option("--inputs", "-i", help="Json list with inputs")
@click.option("--inputs_file", "-if", help="URL pointing to JSON file")
@click.pass_context
def create_jobs(ctx, app_id: str, callback_url: str, inputs: str, inputs_file: str):
"""
Submit jobs
"""
client = ctx.obj["client"]
print("Submitting jobs")
json_inputs = None
if inputs is not None:
try:
json_inputs = json.loads(inputs)
except:
print("Couldn't read json inputs")
exit()
print(client.create_jobs(app_id, callback_url, json_inputs, inputs_file))
@client.command(name="fetch_job")
@click.option("--job_id", "-j", help="Job id", required=True)
@click.pass_context
def fetch_job(ctx, job_id: str):
"""
Get Job given job id
"""
client = ctx.obj["client"]
print(f"Fetching job {job_id}")
print(client.fetch_job(job_id))
@client.command(name="fetch_batches_job")
@click.option("--app_id", "-a", help="App id", required=True)
@click.pass_context
def fetch_batches_job(ctx, app_id: str):
"""
Get not processed Batches given app id
"""
client = ctx.obj["client"]
print(f"Fetching batches {app_id}")
print(client.fetch_batches_job(app_id))
@client.command(name="fetch_batch_job")
@click.option("--app_id", "-a", help="App id", required=True)
@click.option("--batch_id", "-b", help="Batch id", required=True)
@click.pass_context
def fetch_batch_job(ctx, app_id: str, batch_id: str):
"""
Get Batch given app id and batch id
"""
client = ctx.obj["client"]
print(f"Fetching batch {app_id} {batch_id}")
print(client.fetch_batch_job(app_id, batch_id))
@client.command(name="get_job_response")
@click.option("--job_id", "-j", help="Job id", required=True)
@click.pass_context
def get_job_response(ctx, job_id: str):
"""
Get Job response given job id
"""
client = ctx.obj["client"]
print(f"Getting job response {job_id}")
print(client.get_job_response(job_id))
@client.command(name="cancel_job")
@click.option("--job_id", "-j", help="Job id", required=True)
@click.pass_context
def cancel_job(ctx, job_id: str):
"""
Cancel a job given job id. Only for jobs in SCHEDULED, IN_PROGRESS or SUSPENDED state.
"""
client = ctx.obj["client"]
print(f"Cancelling job {job_id}")
print(client.cancel_job(job_id))
@client.command(name="list_jobs")
@click.option("--app_id", "-a", help="Application id", required=True)
@click.option("--page", "-p", help="Page number", type=int)
@click.option("--size", "-s", help="Size of page", type=int)
@click.option("--sort_by", "-sort", help="Job field to sort by", type=str, default="id", show_default=True)
@click.option(
"--order_by",
"-order",
help="Sort direction (asc or desc)",
type=click.Choice(["asc", "desc"]),
default="asc",
show_default=True,
)
@click.option(
"--created_start_date",
"-c0",
help="Created start date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--created_end_date",
"-c1",
help="Created end date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--completed_start_date",
"-e0",
help="Completed start date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--completed_end_date",
"-e1",
help="Completed end date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--status_in",
"-s_in",
help="Status of jobs",
multiple=True,
type=click.Choice(["SCHEDULED", "IN_PROGRESS", "FAILED", "SUSPENDED", "CANCELED", "EXPIRED", "COMPLETED"]),
)
@click.pass_context
def list_jobs(
ctx,
app_id: str,
page: int,
size: int,
sort_by: str,
order_by: str,
created_start_date: datetime,
created_end_date: datetime,
completed_start_date: datetime,
completed_end_date: datetime,
status_in: List[str] = None,
):
"""
Get a paginated list of jobs (without response) given an application id
"""
client = ctx.obj["client"]
print(f"Fetching jobs per application {app_id}")
if len(status_in) == 0:
status_in = None
print(
client.list_jobs(
app_id,
page,
size,
sort_by,
order_by,
created_start_date,
created_end_date,
completed_start_date,
completed_end_date,
status_in,
)
)
@client.command(name="download_jobs")
@click.option("--app_id", "-a", help="Application id", required=True)
@click.option(
"--created_start_date",
"-c0",
help="Created start date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--created_end_date",
"-c1",
help="Created end date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--completed_start_date",
"-e0",
help="Completed start date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--completed_end_date",
"-e1",
help="Completed end date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]),
)
@click.option(
"--status_in",
"-s_in",
help="Status of jobs",
multiple=True,
type=click.Choice(["SCHEDULED", "IN_PROGRESS", "FAILED", "SUSPENDED", "CANCELED", "EXPIRED", "COMPLETED"]),
)
@click.pass_context
def download_jobs(
ctx,
app_id: str,
created_start_date: datetime,
created_end_date: datetime,
completed_start_date: datetime,
completed_end_date: datetime,
status_in: List[str] = None,
):
"""
Trigger processing of job responses that is sent to customer email once is finished.
"""
client = ctx.obj["client"]
print(f"Triggering job responses processing per application {app_id}")
if len(status_in) == 0:
status_in = None
print(
client.download_jobs(
app_id, created_start_date, created_end_date, completed_start_date, completed_end_date, status_in
)
)
@client.command(name="create_ground_truth")
@click.option("--app_id", "-a", help="Application id", required=True)
@click.option("--input_json", "-i", help="Input json of ground truth", required=True)
@click.option("--label", "-l", help="Label (or output) json of ground truth", required=True)
@click.option("--tag", "-t", help="Tag ground truth data")
@click.option("--metadata", "-m", help="Metadata json")
@click.pass_context
def create_ground_truth(
ctx, app_id: str, input_json: str = None, label: str = None, tag: str = None, metadata: str = None
):
"""
Submit fresh ground truth data
"""
client = ctx.obj["client"]
print("Submitting fresh ground truth data")
input_dict = None
metadata_dict = None
label_dict = None
if input_json is not None:
try:
input_dict = json.loads(input_json)
except:
print("Couldn't load input json of ground truth")
exit()
if metadata is not None:
try:
metadata_dict = json.loads(metadata)
except:
print("Couldn't load metadata json of ground truth")
exit()
if label is not None:
try:
label_dict = json.loads(label)
except:
print("Couldn't load label json of ground truth")
exit()
print(client.create_ground_truth(app_id, input_dict, label_dict, tag, metadata_dict))
@client.command(name="update_ground_truth")
@click.option("--ground_truth_data_id", "-g", help="Ground truth data id", required=True)
@click.option("--input_json", "-i", help="Input json of ground truth")
@click.option("--label", "-l", help="Label (or output) json of ground truth")
@click.option("--tag", "-t", help="Tag ground truth data")
@click.option("--metadata", "-m", help="Metadata json")
@click.pass_context
def update_ground_truth(
ctx, ground_truth_data_id: str, input_json: str = None, label: str = None, tag: str = None, metadata: str = None
):
"""
Update (patch) ground truth data
"""
client = ctx.obj["client"]
print(f"Updating ground truth data {ground_truth_data_id}")
input_dict = None
metadata_dict = None
label_dict = None
if input_json is not None:
try:
input_dict = json.loads(input_json)
except:
print("Couldn't load input json of ground truth")
exit()
if metadata is not None:
try:
metadata_dict = json.loads(metadata)
except:
print("Couldn't load metadata json of ground truth")
exit()
if label is not None:
try:
label_dict = json.loads(label)
except:
print("Couldn't load label json of ground truth")
exit()
print(client.update_ground_truth(ground_truth_data_id, input_dict, label_dict, tag, metadata_dict))
@client.command(name="list_ground_truth_data")
@click.option("--app_id", "-a", help="Application id", required=True)
@click.option("--page", "-p", help="Page number", type=int)
@click.option("--size", "-s", help="Size of page", type=int)
@click.pass_context
def list_ground_truth_data(ctx, app_id: str, page: int, size: int):
"""
List all ground truth data for an application
"""
client = ctx.obj["client"]
print(f"Fetching ground truth data per application {app_id}")
print(client.list_ground_truth_data(app_id, page, size))
@client.command(name="get_ground_truth_data")
@click.option("--ground_truth_data_id", "-g", help="Ground truth data id", required=True)
@click.pass_context
def get_ground_truth_data(ctx, ground_truth_data_id: str):
"""
Fetch single ground truth data object
"""
client = ctx.obj["client"]
print(f"Fetching ground truth data {ground_truth_data_id}")
print(client.get_ground_truth_data(ground_truth_data_id))
@client.command(name="delete_ground_truth_data")
@click.option("--ground_truth_data_id", "-g", help="Ground truth data id", required=True)
@click.pass_context
def delete_ground_truth_data(ctx, ground_truth_data_id: str):
"""
Mark ground truth data as deleted
"""
client = ctx.obj["client"]
print(f"Deleting ground truth data {ground_truth_data_id}")
print(client.delete_ground_truth_data(ground_truth_data_id))
@client.command(name="create_ground_truth_from_job")
@click.option("--app_id", "-a", help="Application id", required=True)
@click.option("-job_id", "-j", help="Job id", required=True)
@click.pass_context
def create_ground_truth_from_job(ctx, app_id: str, job_id: str):
client = ctx.obj["client"]
print(f"Converting job {job_id} to ground truth data")
print(client.create_ground_truth_from_job(app_id, job_id))
@cli.command()
@click.option("--api-key", help="Your super.AI API KEY", required=True)
def config(api_key):
"""
Set api key.
"""
save_api_key(api_key)
@cli.command()
@click.option("--username", "-u", help="super.AI Username", required=True)
@click.option("--password", "-p", prompt=True, hide_input=True)
@click.option("--show-pip/--no-show-pip", "-pip", default=False, help="Shows how to set pip configuration manually")
def login(username, password, show_pip):
"""
Use username and password to get super.AI api key.
"""
user = Cognito(
access_key="AKIAIOSFODNN7EXAMPLE",
secret_key="<KEY>",
user_pool_id=COGNITO_USERPOOL_ID,
client_id=COGNITO_CLIENT_ID,
user_pool_region=COGNITO_REGION,
username=username,
)
try:
user.authenticate(password)
except ClientError as e:
if (
e.response["Error"]["Code"] == "UserNotFoundException"
or e.response["Error"]["Code"] == "NotAuthorizedException"
):
print("Incorrect username or password")
return
else:
print(f"Unexpected error: {e}")
return
client = Client(auth_token=user.access_token, id_token=user.id_token)
api_keys = client.get_apikeys()
if len(api_keys) > 0:
save_api_key(api_keys[0], username=username)
save_cognito_user(user)
print(f"Api key {api_keys[0]} was set")
else:
print(f"User {username} doesn't have any api keys")
try:
aws_credentials = client.get_awskeys()
if aws_credentials:
save_aws_credentials(aws_credentials)
pip_configure(show_pip=show_pip)
except SuperAIAuthorizationError as authorization_error:
logger.debug(f"ERROR Authorization: {str(authorization_error)}")
remove_aws_credentials()
except Exception as exception:
logger.debug(f"ERROR: {str(exception)}")
remove_aws_credentials()
@cli.command()
def logout():
"""
Remove stored api key
"""
save_api_key("")
print("Stored api key was removed")
@cli.group()
def ai():
"""Build and push your model docker images"""
pass
@ai.group()
def docker():
"""Docker specific commands"""
pass
@docker.command(name="build", help="Build a docker image for a sagemaker model.")
@click.option("--image-name", "-i", required=True, help="Name of the image to be built")
@click.option(
"--entry-point",
"-e",
required=True,
help="Path to file which will serve as entrypoint to the sagemaker model. Generally this is a method which calls "
"the predict method",
)
@click.option("--dockerfile", "-d", help="Path to Dockerfile. Default: Dockerfile", default="Dockerfile")
@click.option(
"--command", "-c", help="Command to run after the entrypoint in the image. Default: serve", default="serve"
)
@click.option("--worker-count", "-w", help="Number of workers to run. Default: 1", default=1)
@click.option(
"--entry-point-method",
"-em",
help="Method to be called inside the entry point. Make sure this method accepts the input data and context. "
"Default: handle",
default="handle",
)
@click.option(
"--use-shell", "-u", help="Use shell to run the build process, which is more verbose. Used by default", default=True
)
def build_docker_image(image_name, entry_point, dockerfile, command, worker_count, entry_point_method, use_shell):
build_image(
image_name=image_name,
entry_point=entry_point,
dockerfile=dockerfile,
command=command,
worker_count=worker_count,
entry_point_method=entry_point_method,
use_shell=use_shell,
)
@docker.command(name="push", help="Push the docker image built by `superai model docker-build` to ECR. ")
@click.option(
"--image-name", "-i", required=True, help="Name of the image to be pushed. You can get this from `docker image ls`"
)
@click.option("--region", "-r", help="AWS region. Default: us-east-1", default="us-east-1")
def push_docker_image(image_name, region):
push_image(image_name=image_name, region=region)
@docker.command(
"run-local",
help="Run a docker container built by `superai model docker-build` locally. "
"We assume here that the ports 8080 & 8081 are available",
)
@click.option("--image-name", "-i", required=True, help="Name of the image to be run")
@click.option(
"--model-path",
"-m",
required=True,
help="Path to the folder containing weights file to be used for getting inference",
)
@click.option(
"--gpu",
"-g",
default=False,
help="Run docker with GPUs enabled. Make sure this is a GPU container with cuda enabled, "
"and nvidia-container-runtime installed",
)
def docker_run_local(image_name, model_path, gpu):
options = [f"-v {os.path.abspath(model_path)}:/opt/ml/model/", "-p 80:8080", "-p 8081:8081 "]
if gpu:
options.append("--rm --gpus all")
options = " ".join(options)
command = f"docker run {options} {image_name}"
logger.info(f"Running command: {command}")
os.system(command)
@docker.command(
"invoke-local",
help="Invoke the locally deployed container. The API description of the local container can be found at "
"http://localhost/api-description",
)
@click.option(
"--mime",
"-mm",
default="application/json",
help="MIME type of the payload. `application/json` will be sent to the invocation directly. For other MIME types, "
"you can pass the path to file with --body. If its a valid path, it will be loaded and sent to the request. "
"Default: `application/json`",
)
@click.option(
"--body", "-b", required=True, help="Body of payload to be sent to the invocation. Can be a path to a file as well."
)
def docker_invoke_local(mime, body):
invoke_local(mime, body)
def main():
signal.signal(signal.SIGINT, _signal_handler)
sys.exit(cli())
if __name__ == "__main__":
main()
|
[
"superai.meta_ai.dockerizer.push_image",
"click.option",
"click.echo",
"superai.utils.load_api_key",
"warrant.Cognito",
"superai.meta_ai.dockerizer.build_image",
"superai.config.set_env_config",
"superai.utils.save_api_key",
"superai.config.settings.as_dict",
"os.path.abspath",
"json.loads",
"superai.client.Client",
"superai.log.logger.info",
"click.Choice",
"click.DateTime",
"click.group",
"superai.utils.save_aws_credentials",
"superai.utils.pip_config.pip_configure",
"superai.config.get_config_dir",
"superai.meta_ai.dockerizer.sagemaker_endpoint.invoke_local",
"os.system",
"superai.log.logger.get_logger",
"signal.signal",
"sys.exit",
"superai.config.list_env_configs",
"superai.utils.save_cognito_user",
"superai.config.settings.get",
"superai.utils.remove_aws_credentials"
] |
[((856, 872), 'superai.config.get_config_dir', 'get_config_dir', ([], {}), '()\n', (870, 872), False, 'from superai.config import get_config_dir, list_env_configs, set_env_config, settings\n'), ((1073, 1100), 'superai.log.logger.get_logger', 'logger.get_logger', (['__name__'], {}), '(__name__)\n', (1090, 1100), False, 'from superai.log import logger\n'), ((1149, 1162), 'click.group', 'click.group', ([], {}), '()\n', (1160, 1162), False, 'import click\n'), ((1201, 1289), 'click.option', 'click.option', (['"""--verbose/--no-verbose"""', '"""-vvv"""'], {'help': '"""Verbose output"""', 'default': '(False)'}), "('--verbose/--no-verbose', '-vvv', help='Verbose output',\n default=False)\n", (1213, 1289), False, 'import click\n'), ((2035, 2106), 'click.option', 'click.option', (['"""--api-key"""'], {'help': '"""Your super.AI API KEY"""', 'required': '(False)'}), "('--api-key', help='Your super.AI API KEY', required=False)\n", (2047, 2106), False, 'import click\n'), ((2108, 2183), 'click.option', 'click.option', (['"""--environment"""', '"""-e"""'], {'help': '"""Set environment"""', 'required': '(False)'}), "('--environment', '-e', help='Set environment', required=False)\n", (2120, 2183), False, 'import click\n'), ((2781, 2849), 'click.option', 'click.option', (['"""--app_id"""', '"""-a"""'], {'help': '"""Application id"""', 'required': '(True)'}), "('--app_id', '-a', help='Application id', required=True)\n", (2793, 2849), False, 'import click\n'), ((2851, 2939), 'click.option', 'click.option', (['"""--callback_url"""', '"""-c"""'], {'help': '"""Callback URL for post when jobs finish"""'}), "('--callback_url', '-c', help=\n 'Callback URL for post when jobs finish')\n", (2863, 2939), False, 'import click\n'), ((2936, 2996), 'click.option', 'click.option', (['"""--inputs"""', '"""-i"""'], {'help': '"""Json list with inputs"""'}), "('--inputs', '-i', help='Json list with inputs')\n", (2948, 2996), False, 'import click\n'), ((2998, 3068), 'click.option', 'click.option', (['"""--inputs_file"""', '"""-if"""'], {'help': '"""URL pointing to JSON file"""'}), "('--inputs_file', '-if', help='URL pointing to JSON file')\n", (3010, 3068), False, 'import click\n'), ((3571, 3631), 'click.option', 'click.option', (['"""--job_id"""', '"""-j"""'], {'help': '"""Job id"""', 'required': '(True)'}), "('--job_id', '-j', help='Job id', required=True)\n", (3583, 3631), False, 'import click\n'), ((3874, 3934), 'click.option', 'click.option', (['"""--app_id"""', '"""-a"""'], {'help': '"""App id"""', 'required': '(True)'}), "('--app_id', '-a', help='App id', required=True)\n", (3886, 3934), False, 'import click\n'), ((4213, 4273), 'click.option', 'click.option', (['"""--app_id"""', '"""-a"""'], {'help': '"""App id"""', 'required': '(True)'}), "('--app_id', '-a', help='App id', required=True)\n", (4225, 4273), False, 'import click\n'), ((4275, 4339), 'click.option', 'click.option', (['"""--batch_id"""', '"""-b"""'], {'help': '"""Batch id"""', 'required': '(True)'}), "('--batch_id', '-b', help='Batch id', required=True)\n", (4287, 4339), False, 'import click\n'), ((4646, 4706), 'click.option', 'click.option', (['"""--job_id"""', '"""-j"""'], {'help': '"""Job id"""', 'required': '(True)'}), "('--job_id', '-j', help='Job id', required=True)\n", (4658, 4706), False, 'import click\n'), ((4973, 5033), 'click.option', 'click.option', (['"""--job_id"""', '"""-j"""'], {'help': '"""Job id"""', 'required': '(True)'}), "('--job_id', '-j', help='Job id', required=True)\n", (4985, 5033), False, 'import click\n'), ((5338, 5406), 'click.option', 'click.option', (['"""--app_id"""', '"""-a"""'], {'help': '"""Application id"""', 'required': '(True)'}), "('--app_id', '-a', help='Application id', required=True)\n", (5350, 5406), False, 'import click\n'), ((5408, 5466), 'click.option', 'click.option', (['"""--page"""', '"""-p"""'], {'help': '"""Page number"""', 'type': 'int'}), "('--page', '-p', help='Page number', type=int)\n", (5420, 5466), False, 'import click\n'), ((5468, 5527), 'click.option', 'click.option', (['"""--size"""', '"""-s"""'], {'help': '"""Size of page"""', 'type': 'int'}), "('--size', '-s', help='Size of page', type=int)\n", (5480, 5527), False, 'import click\n'), ((5529, 5639), 'click.option', 'click.option', (['"""--sort_by"""', '"""-sort"""'], {'help': '"""Job field to sort by"""', 'type': 'str', 'default': '"""id"""', 'show_default': '(True)'}), "('--sort_by', '-sort', help='Job field to sort by', type=str,\n default='id', show_default=True)\n", (5541, 5639), False, 'import click\n'), ((7512, 7580), 'click.option', 'click.option', (['"""--app_id"""', '"""-a"""'], {'help': '"""Application id"""', 'required': '(True)'}), "('--app_id', '-a', help='Application id', required=True)\n", (7524, 7580), False, 'import click\n'), ((9127, 9195), 'click.option', 'click.option', (['"""--app_id"""', '"""-a"""'], {'help': '"""Application id"""', 'required': '(True)'}), "('--app_id', '-a', help='Application id', required=True)\n", (9139, 9195), False, 'import click\n'), ((9197, 9285), 'click.option', 'click.option', (['"""--input_json"""', '"""-i"""'], {'help': '"""Input json of ground truth"""', 'required': '(True)'}), "('--input_json', '-i', help='Input json of ground truth',\n required=True)\n", (9209, 9285), False, 'import click\n'), ((9283, 9378), 'click.option', 'click.option', (['"""--label"""', '"""-l"""'], {'help': '"""Label (or output) json of ground truth"""', 'required': '(True)'}), "('--label', '-l', help='Label (or output) json of ground truth',\n required=True)\n", (9295, 9378), False, 'import click\n'), ((9376, 9433), 'click.option', 'click.option', (['"""--tag"""', '"""-t"""'], {'help': '"""Tag ground truth data"""'}), "('--tag', '-t', help='Tag ground truth data')\n", (9388, 9433), False, 'import click\n'), ((9435, 9489), 'click.option', 'click.option', (['"""--metadata"""', '"""-m"""'], {'help': '"""Metadata json"""'}), "('--metadata', '-m', help='Metadata json')\n", (9447, 9489), False, 'import click\n'), ((10536, 10628), 'click.option', 'click.option', (['"""--ground_truth_data_id"""', '"""-g"""'], {'help': '"""Ground truth data id"""', 'required': '(True)'}), "('--ground_truth_data_id', '-g', help='Ground truth data id',\n required=True)\n", (10548, 10628), False, 'import click\n'), ((10626, 10695), 'click.option', 'click.option', (['"""--input_json"""', '"""-i"""'], {'help': '"""Input json of ground truth"""'}), "('--input_json', '-i', help='Input json of ground truth')\n", (10638, 10695), False, 'import click\n'), ((10697, 10773), 'click.option', 'click.option', (['"""--label"""', '"""-l"""'], {'help': '"""Label (or output) json of ground truth"""'}), "('--label', '-l', help='Label (or output) json of ground truth')\n", (10709, 10773), False, 'import click\n'), ((10775, 10832), 'click.option', 'click.option', (['"""--tag"""', '"""-t"""'], {'help': '"""Tag ground truth data"""'}), "('--tag', '-t', help='Tag ground truth data')\n", (10787, 10832), False, 'import click\n'), ((10834, 10888), 'click.option', 'click.option', (['"""--metadata"""', '"""-m"""'], {'help': '"""Metadata json"""'}), "('--metadata', '-m', help='Metadata json')\n", (10846, 10888), False, 'import click\n'), ((11984, 12052), 'click.option', 'click.option', (['"""--app_id"""', '"""-a"""'], {'help': '"""Application id"""', 'required': '(True)'}), "('--app_id', '-a', help='Application id', required=True)\n", (11996, 12052), False, 'import click\n'), ((12054, 12112), 'click.option', 'click.option', (['"""--page"""', '"""-p"""'], {'help': '"""Page number"""', 'type': 'int'}), "('--page', '-p', help='Page number', type=int)\n", (12066, 12112), False, 'import click\n'), ((12114, 12173), 'click.option', 'click.option', (['"""--size"""', '"""-s"""'], {'help': '"""Size of page"""', 'type': 'int'}), "('--size', '-s', help='Size of page', type=int)\n", (12126, 12173), False, 'import click\n'), ((12535, 12627), 'click.option', 'click.option', (['"""--ground_truth_data_id"""', '"""-g"""'], {'help': '"""Ground truth data id"""', 'required': '(True)'}), "('--ground_truth_data_id', '-g', help='Ground truth data id',\n required=True)\n", (12547, 12627), False, 'import click\n'), ((12970, 13062), 'click.option', 'click.option', (['"""--ground_truth_data_id"""', '"""-g"""'], {'help': '"""Ground truth data id"""', 'required': '(True)'}), "('--ground_truth_data_id', '-g', help='Ground truth data id',\n required=True)\n", (12982, 13062), False, 'import click\n'), ((13411, 13479), 'click.option', 'click.option', (['"""--app_id"""', '"""-a"""'], {'help': '"""Application id"""', 'required': '(True)'}), "('--app_id', '-a', help='Application id', required=True)\n", (13423, 13479), False, 'import click\n'), ((13481, 13540), 'click.option', 'click.option', (['"""-job_id"""', '"""-j"""'], {'help': '"""Job id"""', 'required': '(True)'}), "('-job_id', '-j', help='Job id', required=True)\n", (13493, 13540), False, 'import click\n'), ((13797, 13867), 'click.option', 'click.option', (['"""--api-key"""'], {'help': '"""Your super.AI API KEY"""', 'required': '(True)'}), "('--api-key', help='Your super.AI API KEY', required=True)\n", (13809, 13867), False, 'import click\n'), ((13966, 14039), 'click.option', 'click.option', (['"""--username"""', '"""-u"""'], {'help': '"""super.AI Username"""', 'required': '(True)'}), "('--username', '-u', help='super.AI Username', required=True)\n", (13978, 14039), False, 'import click\n'), ((14041, 14103), 'click.option', 'click.option', (['"""--password"""', '"""-p"""'], {'prompt': '(True)', 'hide_input': '(True)'}), "('--password', '-p', prompt=True, hide_input=True)\n", (14053, 14103), False, 'import click\n'), ((14105, 14225), 'click.option', 'click.option', (['"""--show-pip/--no-show-pip"""', '"""-pip"""'], {'default': '(False)', 'help': '"""Shows how to set pip configuration manually"""'}), "('--show-pip/--no-show-pip', '-pip', default=False, help=\n 'Shows how to set pip configuration manually')\n", (14117, 14225), False, 'import click\n'), ((16140, 16232), 'click.option', 'click.option', (['"""--image-name"""', '"""-i"""'], {'required': '(True)', 'help': '"""Name of the image to be built"""'}), "('--image-name', '-i', required=True, help=\n 'Name of the image to be built')\n", (16152, 16232), False, 'import click\n'), ((16229, 16423), 'click.option', 'click.option', (['"""--entry-point"""', '"""-e"""'], {'required': '(True)', 'help': '"""Path to file which will serve as entrypoint to the sagemaker model. Generally this is a method which calls the predict method"""'}), "('--entry-point', '-e', required=True, help=\n 'Path to file which will serve as entrypoint to the sagemaker model. Generally this is a method which calls the predict method'\n )\n", (16241, 16423), False, 'import click\n'), ((16441, 16550), 'click.option', 'click.option', (['"""--dockerfile"""', '"""-d"""'], {'help': '"""Path to Dockerfile. Default: Dockerfile"""', 'default': '"""Dockerfile"""'}), "('--dockerfile', '-d', help=\n 'Path to Dockerfile. Default: Dockerfile', default='Dockerfile')\n", (16453, 16550), False, 'import click\n'), ((16547, 16677), 'click.option', 'click.option', (['"""--command"""', '"""-c"""'], {'help': '"""Command to run after the entrypoint in the image. Default: serve"""', 'default': '"""serve"""'}), "('--command', '-c', help=\n 'Command to run after the entrypoint in the image. Default: serve',\n default='serve')\n", (16559, 16677), False, 'import click\n'), ((16676, 16773), 'click.option', 'click.option', (['"""--worker-count"""', '"""-w"""'], {'help': '"""Number of workers to run. Default: 1"""', 'default': '(1)'}), "('--worker-count', '-w', help=\n 'Number of workers to run. Default: 1', default=1)\n", (16688, 16773), False, 'import click\n'), ((16770, 16967), 'click.option', 'click.option', (['"""--entry-point-method"""', '"""-em"""'], {'help': '"""Method to be called inside the entry point. Make sure this method accepts the input data and context. Default: handle"""', 'default': '"""handle"""'}), "('--entry-point-method', '-em', help=\n 'Method to be called inside the entry point. Make sure this method accepts the input data and context. Default: handle'\n , default='handle')\n", (16782, 16967), False, 'import click\n'), ((16985, 17125), 'click.option', 'click.option', (['"""--use-shell"""', '"""-u"""'], {'help': '"""Use shell to run the build process, which is more verbose. Used by default"""', 'default': '(True)'}), "('--use-shell', '-u', help=\n 'Use shell to run the build process, which is more verbose. Used by default'\n , default=True)\n", (16997, 17125), False, 'import click\n'), ((17600, 17734), 'click.option', 'click.option', (['"""--image-name"""', '"""-i"""'], {'required': '(True)', 'help': '"""Name of the image to be pushed. You can get this from `docker image ls`"""'}), "('--image-name', '-i', required=True, help=\n 'Name of the image to be pushed. You can get this from `docker image ls`')\n", (17612, 17734), False, 'import click\n'), ((17737, 17832), 'click.option', 'click.option', (['"""--region"""', '"""-r"""'], {'help': '"""AWS region. Default: us-east-1"""', 'default': '"""us-east-1"""'}), "('--region', '-r', help='AWS region. Default: us-east-1',\n default='us-east-1')\n", (17749, 17832), False, 'import click\n'), ((18109, 18199), 'click.option', 'click.option', (['"""--image-name"""', '"""-i"""'], {'required': '(True)', 'help': '"""Name of the image to be run"""'}), "('--image-name', '-i', required=True, help=\n 'Name of the image to be run')\n", (18121, 18199), False, 'import click\n'), ((18196, 18339), 'click.option', 'click.option', (['"""--model-path"""', '"""-m"""'], {'required': '(True)', 'help': '"""Path to the folder containing weights file to be used for getting inference"""'}), "('--model-path', '-m', required=True, help=\n 'Path to the folder containing weights file to be used for getting inference'\n )\n", (18208, 18339), False, 'import click\n'), ((18350, 18532), 'click.option', 'click.option', (['"""--gpu"""', '"""-g"""'], {'default': '(False)', 'help': '"""Run docker with GPUs enabled. Make sure this is a GPU container with cuda enabled, and nvidia-container-runtime installed"""'}), "('--gpu', '-g', default=False, help=\n 'Run docker with GPUs enabled. Make sure this is a GPU container with cuda enabled, and nvidia-container-runtime installed'\n )\n", (18362, 18532), False, 'import click\n'), ((19097, 19415), 'click.option', 'click.option', (['"""--mime"""', '"""-mm"""'], {'default': '"""application/json"""', 'help': '"""MIME type of the payload. `application/json` will be sent to the invocation directly. For other MIME types, you can pass the path to file with --body. If its a valid path, it will be loaded and sent to the request. Default: `application/json`"""'}), "('--mime', '-mm', default='application/json', help=\n 'MIME type of the payload. `application/json` will be sent to the invocation directly. For other MIME types, you can pass the path to file with --body. If its a valid path, it will be loaded and sent to the request. Default: `application/json`'\n )\n", (19109, 19415), False, 'import click\n'), ((19440, 19580), 'click.option', 'click.option', (['"""--body"""', '"""-b"""'], {'required': '(True)', 'help': '"""Body of payload to be sent to the invocation. Can be a path to a file as well."""'}), "('--body', '-b', required=True, help=\n 'Body of payload to be sent to the invocation. Can be a path to a file as well.'\n )\n", (19452, 19580), False, 'import click\n'), ((1134, 1145), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1142, 1145), False, 'import sys\n'), ((1343, 1374), 'click.echo', 'click.echo', (['"""================="""'], {}), "('=================')\n", (1353, 1374), False, 'import click\n'), ((1379, 1411), 'click.echo', 'click.echo', (['"""Super.AI CLI Info:"""'], {}), "('Super.AI CLI Info:')\n", (1389, 1411), False, 'import click\n'), ((1416, 1447), 'click.echo', 'click.echo', (['"""================="""'], {}), "('=================')\n", (1426, 1447), False, 'import click\n'), ((1452, 1466), 'superai.utils.load_api_key', 'load_api_key', ([], {}), '()\n', (1464, 1466), False, 'from superai.utils import load_api_key, remove_aws_credentials, save_api_key, save_aws_credentials, save_cognito_user\n'), ((1471, 1508), 'click.echo', 'click.echo', (['f"""VERSION: {__version__}"""'], {}), "(f'VERSION: {__version__}')\n", (1481, 1508), False, 'import click\n'), ((1513, 1563), 'click.echo', 'click.echo', (['f"""ENVIRONMENT: {settings.current_env}"""'], {}), "(f'ENVIRONMENT: {settings.current_env}')\n", (1523, 1563), False, 'import click\n'), ((1969, 2006), 'superai.config.list_env_configs', 'list_env_configs', ([], {'printInConsole': '(True)'}), '(printInConsole=True)\n', (1985, 2006), False, 'from superai.config import get_config_dir, list_env_configs, set_env_config, settings\n'), ((2718, 2741), 'superai.client.Client', 'Client', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (2724, 2741), False, 'from superai.client import Client\n'), ((13926, 13947), 'superai.utils.save_api_key', 'save_api_key', (['api_key'], {}), '(api_key)\n', (13938, 13947), False, 'from superai.utils import load_api_key, remove_aws_credentials, save_api_key, save_aws_credentials, save_cognito_user\n'), ((14344, 14531), 'warrant.Cognito', 'Cognito', ([], {'access_key': '"""AKIAIOSFODNN7EXAMPLE"""', 'secret_key': '"""<KEY>"""', 'user_pool_id': 'COGNITO_USERPOOL_ID', 'client_id': 'COGNITO_CLIENT_ID', 'user_pool_region': 'COGNITO_REGION', 'username': 'username'}), "(access_key='AKIAIOSFODNN7EXAMPLE', secret_key='<KEY>', user_pool_id\n =COGNITO_USERPOOL_ID, client_id=COGNITO_CLIENT_ID, user_pool_region=\n COGNITO_REGION, username=username)\n", (14351, 14531), False, 'from warrant import Cognito\n'), ((14975, 15035), 'superai.client.Client', 'Client', ([], {'auth_token': 'user.access_token', 'id_token': 'user.id_token'}), '(auth_token=user.access_token, id_token=user.id_token)\n', (14981, 15035), False, 'from superai.client import Client\n'), ((15842, 15858), 'superai.utils.save_api_key', 'save_api_key', (['""""""'], {}), "('')\n", (15854, 15858), False, 'from superai.utils import load_api_key, remove_aws_credentials, save_api_key, save_aws_credentials, save_cognito_user\n'), ((17241, 17436), 'superai.meta_ai.dockerizer.build_image', 'build_image', ([], {'image_name': 'image_name', 'entry_point': 'entry_point', 'dockerfile': 'dockerfile', 'command': 'command', 'worker_count': 'worker_count', 'entry_point_method': 'entry_point_method', 'use_shell': 'use_shell'}), '(image_name=image_name, entry_point=entry_point, dockerfile=\n dockerfile, command=command, worker_count=worker_count,\n entry_point_method=entry_point_method, use_shell=use_shell)\n', (17252, 17436), False, 'from superai.meta_ai.dockerizer import build_image, push_image\n'), ((17876, 17924), 'superai.meta_ai.dockerizer.push_image', 'push_image', ([], {'image_name': 'image_name', 'region': 'region'}), '(image_name=image_name, region=region)\n', (17886, 17924), False, 'from superai.meta_ai.dockerizer import build_image, push_image\n'), ((18839, 18881), 'superai.log.logger.info', 'logger.info', (['f"""Running command: {command}"""'], {}), "(f'Running command: {command}')\n", (18850, 18881), False, 'from superai.log import logger\n'), ((18886, 18904), 'os.system', 'os.system', (['command'], {}), '(command)\n', (18895, 18904), False, 'import os\n'), ((19618, 19642), 'superai.meta_ai.dockerizer.sagemaker_endpoint.invoke_local', 'invoke_local', (['mime', 'body'], {}), '(mime, body)\n', (19630, 19642), False, 'from superai.meta_ai.dockerizer.sagemaker_endpoint import upload_model_to_s3, invoke_sagemaker_endpoint, create_endpoint, invoke_local\n'), ((19661, 19706), 'signal.signal', 'signal.signal', (['signal.SIGINT', '_signal_handler'], {}), '(signal.SIGINT, _signal_handler)\n', (19674, 19706), False, 'import signal\n'), ((895, 922), 'superai.config.settings.get', 'settings.get', (['"""cognito"""', '{}'], {}), "('cognito', {})\n", (907, 922), False, 'from superai.config import get_config_dir, list_env_configs, set_env_config, settings\n'), ((962, 989), 'superai.config.settings.get', 'settings.get', (['"""cognito"""', '{}'], {}), "('cognito', {})\n", (974, 989), False, 'from superai.config import get_config_dir, list_env_configs, set_env_config, settings\n'), ((1024, 1051), 'superai.config.settings.get', 'settings.get', (['"""cognito"""', '{}'], {}), "('cognito', {})\n", (1036, 1051), False, 'from superai.config import get_config_dir, list_env_configs, set_env_config, settings\n'), ((2310, 2342), 'superai.config.set_env_config', 'set_env_config', ([], {'name': 'environment'}), '(name=environment)\n', (2324, 2342), False, 'from superai.config import get_config_dir, list_env_configs, set_env_config, settings\n'), ((2367, 2388), 'superai.utils.save_api_key', 'save_api_key', (['api_key'], {}), '(api_key)\n', (2379, 2388), False, 'from superai.utils import load_api_key, remove_aws_credentials, save_api_key, save_aws_credentials, save_cognito_user\n'), ((2529, 2543), 'superai.utils.load_api_key', 'load_api_key', ([], {}), '()\n', (2541, 2543), False, 'from superai.utils import load_api_key, remove_aws_credentials, save_api_key, save_aws_credentials, save_cognito_user\n'), ((5733, 5762), 'click.Choice', 'click.Choice', (["['asc', 'desc']"], {}), "(['asc', 'desc'])\n", (5745, 5762), False, 'import click\n'), ((5902, 5960), 'click.DateTime', 'click.DateTime', ([], {'formats': "['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d']"}), "(formats=['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d'])\n", (5916, 5960), False, 'import click\n'), ((6054, 6112), 'click.DateTime', 'click.DateTime', ([], {'formats': "['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d']"}), "(formats=['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d'])\n", (6068, 6112), False, 'import click\n'), ((6214, 6272), 'click.DateTime', 'click.DateTime', ([], {'formats': "['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d']"}), "(formats=['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d'])\n", (6228, 6272), False, 'import click\n'), ((6370, 6428), 'click.DateTime', 'click.DateTime', ([], {'formats': "['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d']"}), "(formats=['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d'])\n", (6384, 6428), False, 'import click\n'), ((6534, 6639), 'click.Choice', 'click.Choice', (["['SCHEDULED', 'IN_PROGRESS', 'FAILED', 'SUSPENDED', 'CANCELED', 'EXPIRED',\n 'COMPLETED']"], {}), "(['SCHEDULED', 'IN_PROGRESS', 'FAILED', 'SUSPENDED', 'CANCELED',\n 'EXPIRED', 'COMPLETED'])\n", (6546, 6639), False, 'import click\n'), ((7675, 7733), 'click.DateTime', 'click.DateTime', ([], {'formats': "['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d']"}), "(formats=['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d'])\n", (7689, 7733), False, 'import click\n'), ((7827, 7885), 'click.DateTime', 'click.DateTime', ([], {'formats': "['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d']"}), "(formats=['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d'])\n", (7841, 7885), False, 'import click\n'), ((7987, 8045), 'click.DateTime', 'click.DateTime', ([], {'formats': "['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d']"}), "(formats=['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d'])\n", (8001, 8045), False, 'import click\n'), ((8143, 8201), 'click.DateTime', 'click.DateTime', ([], {'formats': "['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d']"}), "(formats=['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d'])\n", (8157, 8201), False, 'import click\n'), ((8307, 8412), 'click.Choice', 'click.Choice', (["['SCHEDULED', 'IN_PROGRESS', 'FAILED', 'SUSPENDED', 'CANCELED', 'EXPIRED',\n 'COMPLETED']"], {}), "(['SCHEDULED', 'IN_PROGRESS', 'FAILED', 'SUSPENDED', 'CANCELED',\n 'EXPIRED', 'COMPLETED'])\n", (8319, 8412), False, 'import click\n'), ((15106, 15150), 'superai.utils.save_api_key', 'save_api_key', (['api_keys[0]'], {'username': 'username'}), '(api_keys[0], username=username)\n', (15118, 15150), False, 'from superai.utils import load_api_key, remove_aws_credentials, save_api_key, save_aws_credentials, save_cognito_user\n'), ((15159, 15182), 'superai.utils.save_cognito_user', 'save_cognito_user', (['user'], {}), '(user)\n', (15176, 15182), False, 'from superai.utils import load_api_key, remove_aws_credentials, save_api_key, save_aws_credentials, save_cognito_user\n'), ((3355, 3373), 'json.loads', 'json.loads', (['inputs'], {}), '(inputs)\n', (3365, 3373), False, 'import json\n'), ((9909, 9931), 'json.loads', 'json.loads', (['input_json'], {}), '(input_json)\n', (9919, 9931), False, 'import json\n'), ((10099, 10119), 'json.loads', 'json.loads', (['metadata'], {}), '(metadata)\n', (10109, 10119), False, 'import json\n'), ((10284, 10301), 'json.loads', 'json.loads', (['label'], {}), '(label)\n', (10294, 10301), False, 'import json\n'), ((11340, 11362), 'json.loads', 'json.loads', (['input_json'], {}), '(input_json)\n', (11350, 11362), False, 'import json\n'), ((11530, 11550), 'json.loads', 'json.loads', (['metadata'], {}), '(metadata)\n', (11540, 11550), False, 'import json\n'), ((11715, 11732), 'json.loads', 'json.loads', (['label'], {}), '(label)\n', (11725, 11732), False, 'import json\n'), ((15398, 15435), 'superai.utils.save_aws_credentials', 'save_aws_credentials', (['aws_credentials'], {}), '(aws_credentials)\n', (15418, 15435), False, 'from superai.utils import load_api_key, remove_aws_credentials, save_api_key, save_aws_credentials, save_cognito_user\n'), ((15448, 15480), 'superai.utils.pip_config.pip_configure', 'pip_configure', ([], {'show_pip': 'show_pip'}), '(show_pip=show_pip)\n', (15461, 15480), False, 'from superai.utils.pip_config import pip_configure\n'), ((15623, 15647), 'superai.utils.remove_aws_credentials', 'remove_aws_credentials', ([], {}), '()\n', (15645, 15647), False, 'from superai.utils import load_api_key, remove_aws_credentials, save_api_key, save_aws_credentials, save_cognito_user\n'), ((15740, 15764), 'superai.utils.remove_aws_credentials', 'remove_aws_credentials', ([], {}), '()\n', (15762, 15764), False, 'from superai.utils import load_api_key, remove_aws_credentials, save_api_key, save_aws_credentials, save_cognito_user\n'), ((1676, 1718), 'superai.config.settings.as_dict', 'settings.as_dict', ([], {'env': 'settings.current_env'}), '(env=settings.current_env)\n', (1692, 1718), False, 'from superai.config import get_config_dir, list_env_configs, set_env_config, settings\n'), ((18621, 18648), 'os.path.abspath', 'os.path.abspath', (['model_path'], {}), '(model_path)\n', (18636, 18648), False, 'import os\n'), ((1588, 1612), 'superai.config.settings.get', 'settings.get', (['"""user"""', '{}'], {}), "('user', {})\n", (1600, 1612), False, 'from superai.config import get_config_dir, list_env_configs, set_env_config, settings\n')]
|
import json
from abc import ABCMeta, abstractmethod
class Feed(metaclass=ABCMeta):
def __init__(self, keyword):
self._stories = []
self._len = len(self._stories)
self.__iter__ = self._stories.__iter__
self._stories = self._getStories(keyword)
try:
self.sources = [self._stories[0].source]
except IndexError:
# TODO: No results
raise
self._len = len(self._stories)
@abstractmethod
def _getStories(self, keyword):
pass
def __repr__(self):
sources = ', '.join(source for source in self.sources)
length = self._len
rep = "Sources: {0}\nLength: {1}".format(sources, length)
return rep
def __len__(self):
return self._len
def __getitem__(self, index):
return self._stories[index]
def __setitem__(self, index, value):
self._stories[index] = value
def append(self, story):
self._stories.append(story)
self._len += 1
if story.source not in self.sources:
self.sources.append(story.source)
def extend(self, feed):
self._stories.extend(feed)
self._len += len(self._stories)
for source in feed.sources:
if source not in self.sources:
self.sources.append(source)
def sortByTime(self, reverse):
self._stories.sort(key=lambda x: x.published, reverse=reverse)
def sortByPopularity(self):
pass
def toJson(self):
res = []
for x in self._stories:
res.append(x.__dict__)
return json.dumps(res)
|
[
"json.dumps"
] |
[((1616, 1631), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (1626, 1631), False, 'import json\n')]
|
from allauth.account.adapter import DefaultAccountAdapter
from django.conf import settings
from django.utils import timezone
from django.shortcuts import resolve_url
class AccountAdapter(DefaultAccountAdapter):
def get_login_redirect_url(self, request):
"""
If the user has never logged in before we need them to
create their club. However, last_login will be set before
this is called, so we check if now() - last_login is suitably
short to indicate a first-time login.
"""
threshold = 90
assert request.user.is_authenticated()
if (timezone.now() - request.user.last_login).seconds < threshold:
url = '/clubs/add/'
else:
url = settings.LOGIN_REDIRECT_URL
return resolve_url(url)
|
[
"django.utils.timezone.now",
"django.shortcuts.resolve_url"
] |
[((781, 797), 'django.shortcuts.resolve_url', 'resolve_url', (['url'], {}), '(url)\n', (792, 797), False, 'from django.shortcuts import resolve_url\n'), ((611, 625), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (623, 625), False, 'from django.utils import timezone\n')]
|
from django.db import models
# Create your models here.
class AddressFile(models.Model):
excel_file = models.FileField(upload_to='excel/')
|
[
"django.db.models.FileField"
] |
[((107, 143), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""excel/"""'}), "(upload_to='excel/')\n", (123, 143), False, 'from django.db import models\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import sys
import math
import random
import operator
def euclidean(x, x_p):
return ((x[0] - x_p[0]) ** 2 + (x[1] - x_p[1]) ** 2) ** 0.5
def greatest_euclidean(data, centers):
maxi = {}
for x in centers:
for x_p in data:
euc = euclidean(x, x_p)
if x_p not in maxi:
maxi[x_p] = 0
maxi[x_p] += euc
return max(maxi.items(), key=operator.itemgetter(1))[0]
# Uses a greedy approach, selects a data point at random and assigns this as a center for a classification
# it then finds the furthest data point from this and assigns this as a center and places it in the set
# the next center will be the furthest datapoint from all centers until all regions have a center
def gen_centers(M, data):
centers = []
N = len(data)
rand = random.randint(0, N - 1)
centers.append(data.pop(rand))
center = (0, 0)
classifiers = []
for i in range(M - 1):
center = greatest_euclidean(data, centers)
data.remove(center)
centers.append(center)
for x in data:
num = voronoi(x, centers)
classifiers.append(num)
return centers, classifiers
# Determine the Voronoi region for the data point. This basically just decides how to classify all the data points
# assigning it to the closest center by euclidean distance
def voronoi(x, centers):
order = []
for i in range(len(centers)):
datapoint = centers[i]
# Euclidean to x
order.append((euclidean(x, datapoint), i))
order.sort()
g = order[0][1]
return g
# Generates 10,000 random datapoints with x and y values between 0 and 1
def generate_data():
data = []
for x1_ in range(100):
for x2_ in range(100):
x1 = np.random.uniform(0, 1)
x2 = np.random.uniform(0, 1)
data.append((x1, x2))
return data
def plot(M):
data = generate_data()
centers, classifers = gen_centers(M, data)
unique=set(classifers)
print(unique)
plt.scatter(*zip(*data), c=classifers, cmap='rainbow')
plt.scatter(*zip(*centers), c='black')
plt.title('Greedy with {} Regions'.format(M))
plt.xlabel('x1', color='#1C2833')
plt.ylabel('x2', color='#1C2833')
plt.grid()
plt.show()
if __name__ == "__main__":
# 10 Clusters for users
regions = 10
plot(regions)
# Assumption: Users will be datapoints, users will create a voronoi region and counselors
# will be assigned to their closest associated region.
# Just using greedy. May add in branch and bound.
|
[
"numpy.random.uniform",
"matplotlib.pyplot.show",
"random.randint",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"operator.itemgetter"
] |
[((862, 886), 'random.randint', 'random.randint', (['(0)', '(N - 1)'], {}), '(0, N - 1)\n', (876, 886), False, 'import random\n'), ((2213, 2246), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x1"""'], {'color': '"""#1C2833"""'}), "('x1', color='#1C2833')\n", (2223, 2246), True, 'import matplotlib.pyplot as plt\n'), ((2251, 2284), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x2"""'], {'color': '"""#1C2833"""'}), "('x2', color='#1C2833')\n", (2261, 2284), True, 'import matplotlib.pyplot as plt\n'), ((2289, 2299), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2297, 2299), True, 'import matplotlib.pyplot as plt\n'), ((2304, 2314), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2312, 2314), True, 'import matplotlib.pyplot as plt\n'), ((1808, 1831), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1825, 1831), True, 'import numpy as np\n'), ((1849, 1872), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1866, 1872), True, 'import numpy as np\n'), ((453, 475), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (472, 475), False, 'import operator\n')]
|
import sys
num = int(sys.stdin.readline().strip())
n = int(sys.stdin.readline().strip())
if n:
btn = sys.stdin.readline().strip().split()
else:
btn = []
result = abs(100 - num)
for i in range(1_000_000):
for j in str(i):
if j in btn:
break
else:
result = min(result, len(str(i)) + abs(i - num))
sys.stdout.write(str(result))
|
[
"sys.stdin.readline"
] |
[((22, 42), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (40, 42), False, 'import sys\n'), ((60, 80), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (78, 80), False, 'import sys\n'), ((106, 126), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (124, 126), False, 'import sys\n')]
|
import speech_recognition
import pyttsx3
class Receptor:
def listen(self):
microphone = speech_recognition.Recognizer()
phrase = ''
with speech_recognition.Microphone() as source:
microphone.adjust_for_ambient_noise(source)
audio = microphone.listen(source)
try:
print("Ouvindo... ")
phrase = microphone.recognize_google(audio,language='pt-BR')
except: # speech_recognition.UnknownValueError
self._speak("Não entendi")
if (phrase):
return phrase
def _speak(self, phrase):
speaker = pyttsx3.init()
voices = speaker.getProperty('voices')
speaker.setProperty('voice', voices[53].id)
rate = speaker.getProperty('rate')
speaker.setProperty('rate', rate-80)
speaker.say(phrase)
speaker.runAndWait()
|
[
"pyttsx3.init",
"speech_recognition.Recognizer",
"speech_recognition.Microphone"
] |
[((101, 132), 'speech_recognition.Recognizer', 'speech_recognition.Recognizer', ([], {}), '()\n', (130, 132), False, 'import speech_recognition\n'), ((636, 650), 'pyttsx3.init', 'pyttsx3.init', ([], {}), '()\n', (648, 650), False, 'import pyttsx3\n'), ((167, 198), 'speech_recognition.Microphone', 'speech_recognition.Microphone', ([], {}), '()\n', (196, 198), False, 'import speech_recognition\n')]
|
# coding: utf-8
"""
Uptrends API v4
This document describes Uptrends API version 4. This Swagger environment also lets you execute API methods directly. Please note that this is not a sandbox environment: these API methods operate directly on your actual Uptrends account. For more information, please visit https://www.uptrends.com/api. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import uptrends
from uptrends.api.alert_definition_api import AlertDefinitionApi # noqa: E501
from uptrends.rest import ApiException
class TestAlertDefinitionApi(unittest.TestCase):
"""AlertDefinitionApi unit test stubs"""
def setUp(self):
self.api = uptrends.api.alert_definition_api.AlertDefinitionApi() # noqa: E501
def tearDown(self):
pass
def test_alert_definition_add_monitor_group_to_alert_definition(self):
"""Test case for alert_definition_add_monitor_group_to_alert_definition
Adds a monitor group to the specified alert definition. # noqa: E501
"""
pass
def test_alert_definition_add_monitor_to_alert_definition(self):
"""Test case for alert_definition_add_monitor_to_alert_definition
Adds a monitor to the specified alert definition. # noqa: E501
"""
pass
def test_alert_definition_add_operator_group_to_escalation_level(self):
"""Test case for alert_definition_add_operator_group_to_escalation_level
Adds an operator group to the specified escalation level. # noqa: E501
"""
pass
def test_alert_definition_add_operator_to_escalation_level(self):
"""Test case for alert_definition_add_operator_to_escalation_level
Adds an operator to the specified escalation level. # noqa: E501
"""
pass
def test_alert_definition_create_alert_definition(self):
"""Test case for alert_definition_create_alert_definition
Creates a new alert definition. # noqa: E501
"""
pass
def test_alert_definition_delete_alert_definition(self):
"""Test case for alert_definition_delete_alert_definition
Deletes an existing alert definition. # noqa: E501
"""
pass
def test_alert_definition_get_all_alert_definitions(self):
"""Test case for alert_definition_get_all_alert_definitions
Gets a list of all alert definitions. # noqa: E501
"""
pass
def test_alert_definition_get_all_members(self):
"""Test case for alert_definition_get_all_members
Gets a list of all monitor and monitor group guids of the specified alert definition. # noqa: E501
"""
pass
def test_alert_definition_get_escalation_level(self):
"""Test case for alert_definition_get_escalation_level
Gets the escalation level information of the specified alert definition. # noqa: E501
"""
pass
def test_alert_definition_get_escalation_level_integration(self):
"""Test case for alert_definition_get_escalation_level_integration
Gets the integrations for the specified escalation level. # noqa: E501
"""
pass
def test_alert_definition_get_escalation_level_operator(self):
"""Test case for alert_definition_get_escalation_level_operator
Gets the operator and operator group guids for the specified escalation level. # noqa: E501
"""
pass
def test_alert_definition_get_specified_alert_definitions(self):
"""Test case for alert_definition_get_specified_alert_definitions
Gets the specified alert definition. # noqa: E501
"""
pass
def test_alert_definition_patch_alert_definition(self):
"""Test case for alert_definition_patch_alert_definition
Partially updates the definition of the specified alert definition. # noqa: E501
"""
pass
def test_alert_definition_put_alert_definition(self):
"""Test case for alert_definition_put_alert_definition
Updates the definition of the specified alert definition. # noqa: E501
"""
pass
def test_alert_definition_remove_monitor_from_alert_definition(self):
"""Test case for alert_definition_remove_monitor_from_alert_definition
Removes a monitor for the specified alert definition. # noqa: E501
"""
pass
def test_alert_definition_remove_monitor_group_from_alert_definition(self):
"""Test case for alert_definition_remove_monitor_group_from_alert_definition
Removes a monitor group for the specified alert definition. # noqa: E501
"""
pass
def test_alert_definition_remove_operator_from_escalation_level(self):
"""Test case for alert_definition_remove_operator_from_escalation_level
Removes an operator for the specified escalation level. # noqa: E501
"""
pass
def test_alert_definition_remove_operator_group_from_escalation_level(self):
"""Test case for alert_definition_remove_operator_group_from_escalation_level
Removes an operator group for the specified escalation level. # noqa: E501
"""
pass
def test_alert_definition_update_integration_for_escalation_with_patch(self):
"""Test case for alert_definition_update_integration_for_escalation_with_patch
Partially updates an integration to the specified escalation level. # noqa: E501
"""
pass
def test_alert_definition_update_integration_for_escalation_with_put(self):
"""Test case for alert_definition_update_integration_for_escalation_with_put
Updates an integration for the specified escalation level. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"uptrends.api.alert_definition_api.AlertDefinitionApi"
] |
[((5931, 5946), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5944, 5946), False, 'import unittest\n'), ((805, 859), 'uptrends.api.alert_definition_api.AlertDefinitionApi', 'uptrends.api.alert_definition_api.AlertDefinitionApi', ([], {}), '()\n', (857, 859), False, 'import uptrends\n')]
|
from importlib import import_module
import logging
import time
import json
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
import django.db.backends.mysql.base
WRAPPED_BACKEND = import_module('django.db.backends.mysql.base')
LOGGER = logging.getLogger('django_mt')
def lower_dict(d):
new_dict = dict((k.lower(), v) for k, v in d.items())
return new_dict
class DatabaseWrapper(WRAPPED_BACKEND.DatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.db_info = None
# self.default_db_info = None
self.default_db_info = settings.DATABASES['default']
def _cursor(self):
cursor = super(DatabaseWrapper, self)._cursor()
# create a new db object here and set it to connection and db_name
conn_params = None
if self.db_info:
LOGGER.info('--- using %s db connection ---', self.db_info.name)
# now init the connection using data from db_info and set it to cursor
conn_params = {
'ENGINE': self.db_info.engine,
'NAME': self.db_info.name,
'USER': self.db_info.user,
'PASSWORD': self.db_info.password,
'HOST': self.db_info.host,
'PORT': self.db_info.port,
# 'OPTIONS': json.loads(self.db_info.options)
'OPTIONS': {},
'AUTOCOMMIT': False
}
self.settings_dict = conn_params
updated_conn_params = self.get_connection_params()
connection = self.get_new_connection(updated_conn_params)
# self.connection = connection
return connection.cursor()
else:
LOGGER.info('--- using default db connection ---')
return cursor
|
[
"logging.getLogger",
"importlib.import_module"
] |
[((219, 265), 'importlib.import_module', 'import_module', (['"""django.db.backends.mysql.base"""'], {}), "('django.db.backends.mysql.base')\n", (232, 265), False, 'from importlib import import_module\n'), ((276, 306), 'logging.getLogger', 'logging.getLogger', (['"""django_mt"""'], {}), "('django_mt')\n", (293, 306), False, 'import logging\n')]
|
import unittest
from day3 import Router, Path, Traveler
class TestRouter(unittest.TestCase):
def test_houses_visited_single_visitor_2(self):
input = '>'
expected = 2
path = Path()
Router.route(input, Traveler(path))
self.assertEqual(expected, path.houses_visited())
def test_houses_visited_single_visitor_4(self):
input = '^>v<'
expected = 4
path = Path()
Router.route(input, Traveler(path))
self.assertEqual(expected, path.houses_visited())
def test_houses_visited_long_single_visitor_2(self):
input = '^v^v^v^v^v'
expected = 2
path = Path()
Router.route(input, Traveler(path))
self.assertEqual(expected, path.houses_visited())
def test_houses_visited_multiple_visitors_2(self):
input = '^v'
expected = 3
path = Path()
Router.route(input, Traveler(path), Traveler(path))
self.assertEqual(expected, path.houses_visited())
def test_houses_visited_multiple_visitors_4(self):
input = '^>v<'
expected = 3
path = Path()
Router.route(input, Traveler(path), Traveler(path))
self.assertEqual(expected, path.houses_visited())
def test_houses_visited_long_multiple_visitors_2(self):
input = '^v^v^v^v^v'
expected = 11
path = Path()
Router.route(input, Traveler(path), Traveler(path))
self.assertEqual(expected, path.houses_visited())
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"day3.Traveler",
"day3.Path"
] |
[((1527, 1542), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1540, 1542), False, 'import unittest\n'), ((203, 209), 'day3.Path', 'Path', ([], {}), '()\n', (207, 209), False, 'from day3 import Router, Path, Traveler\n'), ((424, 430), 'day3.Path', 'Path', ([], {}), '()\n', (428, 430), False, 'from day3 import Router, Path, Traveler\n'), ((656, 662), 'day3.Path', 'Path', ([], {}), '()\n', (660, 662), False, 'from day3 import Router, Path, Traveler\n'), ((878, 884), 'day3.Path', 'Path', ([], {}), '()\n', (882, 884), False, 'from day3 import Router, Path, Traveler\n'), ((1118, 1124), 'day3.Path', 'Path', ([], {}), '()\n', (1122, 1124), False, 'from day3 import Router, Path, Traveler\n'), ((1370, 1376), 'day3.Path', 'Path', ([], {}), '()\n', (1374, 1376), False, 'from day3 import Router, Path, Traveler\n'), ((238, 252), 'day3.Traveler', 'Traveler', (['path'], {}), '(path)\n', (246, 252), False, 'from day3 import Router, Path, Traveler\n'), ((459, 473), 'day3.Traveler', 'Traveler', (['path'], {}), '(path)\n', (467, 473), False, 'from day3 import Router, Path, Traveler\n'), ((691, 705), 'day3.Traveler', 'Traveler', (['path'], {}), '(path)\n', (699, 705), False, 'from day3 import Router, Path, Traveler\n'), ((913, 927), 'day3.Traveler', 'Traveler', (['path'], {}), '(path)\n', (921, 927), False, 'from day3 import Router, Path, Traveler\n'), ((929, 943), 'day3.Traveler', 'Traveler', (['path'], {}), '(path)\n', (937, 943), False, 'from day3 import Router, Path, Traveler\n'), ((1153, 1167), 'day3.Traveler', 'Traveler', (['path'], {}), '(path)\n', (1161, 1167), False, 'from day3 import Router, Path, Traveler\n'), ((1169, 1183), 'day3.Traveler', 'Traveler', (['path'], {}), '(path)\n', (1177, 1183), False, 'from day3 import Router, Path, Traveler\n'), ((1405, 1419), 'day3.Traveler', 'Traveler', (['path'], {}), '(path)\n', (1413, 1419), False, 'from day3 import Router, Path, Traveler\n'), ((1421, 1435), 'day3.Traveler', 'Traveler', (['path'], {}), '(path)\n', (1429, 1435), False, 'from day3 import Router, Path, Traveler\n')]
|
# @Time : 2016/9/1 19:09
# @Author : lixintong
import unittest
import time
from uitester.case_manager.database import DBCommandLineHelper, Tag, Case, DB
class TestDataBase(unittest.TestCase):
def setUp(self):
self.db_helper = DBCommandLineHelper()
def test_operate_tag_data(self):
ms_str = str(time.time())
tag_name = "test_tag_name_" + ms_str
tag_description = "test_tag_name_" + ms_str
tag = self.db_helper.insert_tag(tag_name, tag_description) # 插入tag
dst_tag = self.db_helper.query_tag_by_id(tag.id) # 根据tag.id 查询tag
self.assertTrue(tag == dst_tag)
tag_list = self.db_helper.fuzzy_query_tag_by_name(tag.name)
self.assertTrue(tag in tag_list)
dst_tag = self.db_helper.query_tag_by_name(tag.name)
self.assertTrue(tag == dst_tag)
tag_list = self.db_helper.query_tag_all() # 查询所有tag
self.assertTrue(type(tag_list[0]) is Tag)
self.db_helper.delete_tag(tag.id) # 删除tag
dst_tag = self.db_helper.query_tag_by_id(tag.id) # 根据tag.id 查询tag、
self.assertTrue(dst_tag is None)
def test_operate_case_data(self):
ms_str = str(time.time())
tag_name = "test_tag_name_" + ms_str
tag_description = "test_tag_name_" + ms_str
tag = self.db_helper.insert_tag(tag_name, tag_description) # 插入tag
tags = [tag]
case_name = case_content = "test_case_name_" + ms_str
case = self.db_helper.insert_case_with_tags(case_name, case_content, tags) # 插入case
dst_case = self.db_helper.query_case_by_id(case.id)
self.assertTrue(case == dst_case)
dst_case = self.db_helper.query_case_by_name(True, case.name)
self.assertTrue(case == dst_case)
dst_case_list = self.db_helper.query_case_by_name(False, case.name)
self.assertTrue(case in dst_case_list)
case_list = self.db_helper.query_case_by_tag_names([tag.name])
self.assertTrue(type(case_list[0]) is Case)
# tag_name = "test_tag_name_" + str(time.time())
# 更改case:
case = self.db_helper.query_case_by_id(case.id)
case_id = case.id
case_name = 'test_case_name_' + str(time.time())
case.name = case_name
case.content = 'test_case_name_' + str(time.time())
tags = self.db_helper.query_tag_all()
case.tags = tags
self.db_helper.update_case()
case = self.db_helper.query_case_by_id(case.id)
self.assertTrue(case.name == case_name)
tag_name = "test_tag_name_" + str(time.time())
case = self.db_helper.insert_case_with_tagnames(case.name, case.content, [tag.name], [tag_name])
self.assertTrue(type(case) is Case and case.id)
result = self.db_helper.get_table_data_by_cases_id(str(case.id))
self.assertTrue(result['case'] and result['tag'] and result['case_tag'])
self.db_helper.delete_case(case.id)
dst_case = self.db_helper.query_case_by_id(case.id)
self.assertTrue(dst_case is None)
def test_delete_tag_by_name(self):
tag_name = 'test_123'
tag = self.db_helper.insert_tag(tag_name, 'test tag')
case = self.db_helper.insert_case_with_tags('test_123', 'test case', [tag])
self.db_helper.delete_tag_by_name(tag_name)
self.assertTrue(self.db_helper.query_case_by_id(case.id) is not None)
self.assertTrue(self.db_helper.query_tag_by_name(tag_name) is None)
|
[
"time.time",
"uitester.case_manager.database.DBCommandLineHelper"
] |
[((246, 267), 'uitester.case_manager.database.DBCommandLineHelper', 'DBCommandLineHelper', ([], {}), '()\n', (265, 267), False, 'from uitester.case_manager.database import DBCommandLineHelper, Tag, Case, DB\n'), ((327, 338), 'time.time', 'time.time', ([], {}), '()\n', (336, 338), False, 'import time\n'), ((1179, 1190), 'time.time', 'time.time', ([], {}), '()\n', (1188, 1190), False, 'import time\n'), ((2206, 2217), 'time.time', 'time.time', ([], {}), '()\n', (2215, 2217), False, 'import time\n'), ((2296, 2307), 'time.time', 'time.time', ([], {}), '()\n', (2305, 2307), False, 'import time\n'), ((2564, 2575), 'time.time', 'time.time', ([], {}), '()\n', (2573, 2575), False, 'import time\n')]
|
"""
Checks that Pylint does not complain about django lazy proxy
when using gettext_lazy
"""
from django.utils.translation import gettext_lazy
gettext_lazy('{something}').format(something='lala')
|
[
"django.utils.translation.gettext_lazy"
] |
[((144, 171), 'django.utils.translation.gettext_lazy', 'gettext_lazy', (['"""{something}"""'], {}), "('{something}')\n", (156, 171), False, 'from django.utils.translation import gettext_lazy\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from dataIO import js, textfile
from crawl_trulia import trulia_urlencoder, trulia_htmlparser
from crawl_trulia.packages.crawlib.spider import spider
PATH = "test.html"
address = "22 Yew Rd"
city = "Baltimore"
zipcode = "21221"
# url = urlencoder.by_address_and_zipcode(address, zipcode)
url = trulia_urlencoder.by_address_city_and_zipcode(address, city, zipcode)
if not os.path.exists(PATH):
html = spider.get_html(url, encoding="utf-8")
textfile.write(html, PATH)
html = textfile.read(PATH)
data = trulia_htmlparser.get_house_detail(html)
js.pprint(data)
|
[
"crawl_trulia.trulia_urlencoder.by_address_city_and_zipcode",
"dataIO.textfile.read",
"os.path.exists",
"crawl_trulia.trulia_htmlparser.get_house_detail",
"crawl_trulia.packages.crawlib.spider.spider.get_html",
"dataIO.textfile.write",
"dataIO.js.pprint"
] |
[((354, 423), 'crawl_trulia.trulia_urlencoder.by_address_city_and_zipcode', 'trulia_urlencoder.by_address_city_and_zipcode', (['address', 'city', 'zipcode'], {}), '(address, city, zipcode)\n', (399, 423), False, 'from crawl_trulia import trulia_urlencoder, trulia_htmlparser\n'), ((543, 562), 'dataIO.textfile.read', 'textfile.read', (['PATH'], {}), '(PATH)\n', (556, 562), False, 'from dataIO import js, textfile\n'), ((570, 610), 'crawl_trulia.trulia_htmlparser.get_house_detail', 'trulia_htmlparser.get_house_detail', (['html'], {}), '(html)\n', (604, 610), False, 'from crawl_trulia import trulia_urlencoder, trulia_htmlparser\n'), ((611, 626), 'dataIO.js.pprint', 'js.pprint', (['data'], {}), '(data)\n', (620, 626), False, 'from dataIO import js, textfile\n'), ((432, 452), 'os.path.exists', 'os.path.exists', (['PATH'], {}), '(PATH)\n', (446, 452), False, 'import os\n'), ((465, 503), 'crawl_trulia.packages.crawlib.spider.spider.get_html', 'spider.get_html', (['url'], {'encoding': '"""utf-8"""'}), "(url, encoding='utf-8')\n", (480, 503), False, 'from crawl_trulia.packages.crawlib.spider import spider\n'), ((508, 534), 'dataIO.textfile.write', 'textfile.write', (['html', 'PATH'], {}), '(html, PATH)\n', (522, 534), False, 'from dataIO import js, textfile\n')]
|
import os
from setuptools import Extension, setup
try:
from Cython.Build import cythonize
except ImportError:
cythonize = None
extensions = [
Extension(
name="pvrtc_decoder",
version="1.0.2",
author="K0lb3",
author_email="",
description="A PVRTC decoder for PIL",
long_description=open('README.md', 'rt', encoding='utf8').read(),
long_description_content_type="text/markdown",
url="https://github.com/K0lb3/pvrtc_decoder",
download_url="https://github.com/K0lb3/pvrtc_decoder/tarball/master",
keywords=['PVRTC', 'PVRT', 'decoder', "PIL", "Pillow", "texture"],
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Multimedia :: Graphics",
],
sources=[
"pvrtc_decoder.pyx",
'src/PVRTDecompress.cpp',
],
language="c++",
include_dirs=[
"src"
],
install_requires=[
"cython"
],
)
]
if cythonize:
extensions = cythonize(extensions)
setup(ext_modules=extensions)
|
[
"Cython.Build.cythonize",
"setuptools.setup"
] |
[((1195, 1224), 'setuptools.setup', 'setup', ([], {'ext_modules': 'extensions'}), '(ext_modules=extensions)\n', (1200, 1224), False, 'from setuptools import Extension, setup\n'), ((1172, 1193), 'Cython.Build.cythonize', 'cythonize', (['extensions'], {}), '(extensions)\n', (1181, 1193), False, 'from Cython.Build import cythonize\n')]
|
# func.py
import numpy as np
from numba import njit, jit, prange
#------------------------ Distance Functions -----------------------#
def corr_dist(A):
return 1 - np.corrcoef(A)
def abs_diff(A):
target_matrix = np.zeros((len(A), len(A)))
mat_dim = target_matrix.shape[0]
for r in range(mat_dim):
for c in range(r, mat_dim):
target_matrix[r,c] = np.absolute(np.subtract(A[r], A[c]))
target_matrix[c,r] = target_matrix[r,c]
return target_matrix
def cond_diff(A):
target_matrix = np.ones((len(A), len(A)), dtype = bool)
mat_dim = target_matrix.shape[0]
for r in range(mat_dim):
for c in range(r, mat_dim):
target_matrix[r,c] = (A[r] == A[c])
target_matrix[c,r] = target_matrix[r,c]
return target_matrix
def len_diff(A):
target_matrix = np.ones((len(A), len(A)), dtype = int)
mat_dim = target_matrix.shape[0]
for r in range(mat_dim):
for c in range(r, mat_dim):
target_matrix[r,c] = np.absolute(np.subtract(len(A[r]), len(A[c])))
target_matrix[c,r] = target_matrix[r,c]
return target_matrix
def levenshtein_dist(A):
target_matrix = np.ones((len(A), len(A)), dtype = int)
mat_dim = target_matrix.shape[0]
for r in range(mat_dim):
for c in range(r, mat_dim):
target_matrix[r,c] = levenshtein(A[r], A[c])
target_matrix[c,r] = target_matrix[r,c]
return target_matrix
def weighted_euclidian(A, weights):
matrices = []
for arr in A:
mat = np.zeros((len(arr), len(arr)))
matrix_iteration(arr, mat, squared_dist)
matrices.append(mat)
weighted_dist = np.zeros((len(arr), len(arr)))
for ind in range(len(weights)):
weighted_dist = weighted_dist + weights[ind] * matrices[ind]
return np.sqrt(weighted_dist)
#------------------------ Transform Functions -----------------------#
def corrcoef_z_transform(A):
A = np.subtract(1, A)
results = np.empty(len(A), dtype = A.dtype)
quick_z_transform(A, results)
return results
def invert_corrcoef(A):
return np.subtract(1, A)
def z_transform(A):
results = np.empty(len(A), dtype = A.dtype)
quick_z_transform(A, results)
return results
@njit(parallel = True)
def quick_z_transform(A, results):
for i in prange(len(A)):
results[i] = np.log((1+A[i])/(1-A[i]))/2
#------------------------ Other Functions -----------------------#
def levenshtein(seq1, seq2):
size_x = len(seq1) + 1
size_y = len(seq2) + 1
matrix = np.zeros((size_x, size_y))
for x in range(size_x):
matrix [x, 0] = x
for y in range(size_y):
matrix [0, y] = y
for x in range(1, size_x):
for y in range(1, size_y):
if seq1[x-1] == seq2[y-1]:
matrix [x,y] = min(
matrix[x-1, y] + 1,
matrix[x-1, y-1],
matrix[x, y-1] + 1
)
else:
matrix [x,y] = min(
matrix[x-1,y] + 1,
matrix[x-1,y-1] + 1,
matrix[x,y-1] + 1
)
return (matrix[size_x - 1, size_y - 1])
|
[
"numpy.subtract",
"numpy.log",
"numpy.corrcoef",
"numba.njit",
"numpy.zeros",
"numpy.sqrt"
] |
[((2052, 2071), 'numba.njit', 'njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (2056, 2071), False, 'from numba import njit, jit, prange\n'), ((1644, 1666), 'numpy.sqrt', 'np.sqrt', (['weighted_dist'], {}), '(weighted_dist)\n', (1651, 1666), True, 'import numpy as np\n'), ((1776, 1793), 'numpy.subtract', 'np.subtract', (['(1)', 'A'], {}), '(1, A)\n', (1787, 1793), True, 'import numpy as np\n'), ((1919, 1936), 'numpy.subtract', 'np.subtract', (['(1)', 'A'], {}), '(1, A)\n', (1930, 1936), True, 'import numpy as np\n'), ((2346, 2372), 'numpy.zeros', 'np.zeros', (['(size_x, size_y)'], {}), '((size_x, size_y))\n', (2354, 2372), True, 'import numpy as np\n'), ((170, 184), 'numpy.corrcoef', 'np.corrcoef', (['A'], {}), '(A)\n', (181, 184), True, 'import numpy as np\n'), ((2150, 2181), 'numpy.log', 'np.log', (['((1 + A[i]) / (1 - A[i]))'], {}), '((1 + A[i]) / (1 - A[i]))\n', (2156, 2181), True, 'import numpy as np\n'), ((373, 396), 'numpy.subtract', 'np.subtract', (['A[r]', 'A[c]'], {}), '(A[r], A[c])\n', (384, 396), True, 'import numpy as np\n')]
|
from django.urls import path, include
from django.views.generic import TemplateView
from rest_framework.schemas import get_schema_view
from rest_framework.routers import DefaultRouter
from . import views
from scraping.xe import PropertyType, Xe
router = DefaultRouter()
router.register(r'load_config', views.LoadConfigView)
router.register(r'data_loads', views.DataLoadView)
urlpatterns = [
path("", include(router.urls)),
]
|
[
"rest_framework.routers.DefaultRouter",
"django.urls.include"
] |
[((256, 271), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (269, 271), False, 'from rest_framework.routers import DefaultRouter\n'), ((408, 428), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (415, 428), False, 'from django.urls import path, include\n')]
|
import json
import os
import argparse
import re
from comment_getter import CommentGetter
from analyzer import Analyzer
from settings_loader import SettingsLoader
DEFAULT_SETTINGS_JSON_PATH = "default_settings.json"
YOUTUBE_VIDEO_ID_PATTERN = r"\?v=([^&]+)"
def get_timed_link(video_id, sec):
return f"https://www.youtube.com/watch?v={video_id}&t={sec}s"
def get_video_id(text):
m = re.search(YOUTUBE_VIDEO_ID_PATTERN, text)
if m is not None:
return m.group(1)
else:
return text
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('video_id', help="Youtubeの動画ID または 動画URL")
parser.add_argument('--settings', nargs='*', help="設定ファイル。複数指定で指定した順に読み込み。")
parser.add_argument('--force_download', action='store_true', help="コメントデータが存在する場合でもDLし直す")
parser.add_argument('--gen_default_settings', action='store_true', help="設定ファイルを出力")
parser.add_argument('--debug', action='store_true', help="デバッグメッセージを表示")
args = parser.parse_args()
target_video_id = get_video_id(args.video_id)
settings = SettingsLoader().get_init_settings()
to_loads = []
if os.path.exists(DEFAULT_SETTINGS_JSON_PATH):
to_loads.append(DEFAULT_SETTINGS_JSON_PATH)
if args.settings is not None and len(args.settings) > 0:
to_loads.extend(args.settings)
settings = SettingsLoader().load(to_loads)
if args.force_download:
settings['force_download'] = args.force_download
if args.debug:
settings['debug'] = args.debug
if args.gen_default_settings:
settings = SettingsLoader().get_init_settings()
with open(DEFAULT_SETTINGS_JSON_PATH, mode='w', encoding='utf-8') as fh:
json.dump(settings, fh, indent=4, ensure_ascii=False)
if target_video_id == "" or target_video_id is None:
print("video_idは必須です!")
exit(0)
comment_data_path = os.path.join(settings['comment_data_directory'], f"comment_data-{target_video_id}.json")
comment_data = {}
if os.path.exists(comment_data_path) and not settings['force_download']:
with open(comment_data_path, mode='r', encoding='utf-8') as fh:
comment_data = json.load(fh)
print(f"Load Comment Data File: {comment_data_path}")
else:
print(f"Start download comment data. id={target_video_id}")
comment_data = CommentGetter(settings).get_comment_data(target_video_id)
with open(comment_data_path, mode='w', encoding="utf-8") as fh:
json.dump(comment_data, fh, indent=4, ensure_ascii=False)
print("Finish download.")
analyzer = Analyzer(settings)
before_secs = settings['link_before_secs']
print("### Total ###")
i = 1
for dt, score in analyzer.analyze(comment_data):
print(f"{i}. {dt} - {score}\n {get_timed_link(target_video_id, dt.seconds-before_secs)}")
i += 1
|
[
"json.dump",
"json.load",
"argparse.ArgumentParser",
"os.path.exists",
"comment_getter.CommentGetter",
"analyzer.Analyzer",
"settings_loader.SettingsLoader",
"re.search",
"os.path.join"
] |
[((396, 437), 're.search', 're.search', (['YOUTUBE_VIDEO_ID_PATTERN', 'text'], {}), '(YOUTUBE_VIDEO_ID_PATTERN, text)\n', (405, 437), False, 'import re\n'), ((558, 583), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (581, 583), False, 'import argparse\n'), ((1153, 1195), 'os.path.exists', 'os.path.exists', (['DEFAULT_SETTINGS_JSON_PATH'], {}), '(DEFAULT_SETTINGS_JSON_PATH)\n', (1167, 1195), False, 'import os\n'), ((1911, 2003), 'os.path.join', 'os.path.join', (["settings['comment_data_directory']", 'f"""comment_data-{target_video_id}.json"""'], {}), "(settings['comment_data_directory'],\n f'comment_data-{target_video_id}.json')\n", (1923, 2003), False, 'import os\n'), ((2625, 2643), 'analyzer.Analyzer', 'Analyzer', (['settings'], {}), '(settings)\n', (2633, 2643), False, 'from analyzer import Analyzer\n'), ((2029, 2062), 'os.path.exists', 'os.path.exists', (['comment_data_path'], {}), '(comment_data_path)\n', (2043, 2062), False, 'import os\n'), ((1091, 1107), 'settings_loader.SettingsLoader', 'SettingsLoader', ([], {}), '()\n', (1105, 1107), False, 'from settings_loader import SettingsLoader\n'), ((1364, 1380), 'settings_loader.SettingsLoader', 'SettingsLoader', ([], {}), '()\n', (1378, 1380), False, 'from settings_loader import SettingsLoader\n'), ((1727, 1780), 'json.dump', 'json.dump', (['settings', 'fh'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(settings, fh, indent=4, ensure_ascii=False)\n', (1736, 1780), False, 'import json\n'), ((2198, 2211), 'json.load', 'json.load', (['fh'], {}), '(fh)\n', (2207, 2211), False, 'import json\n'), ((2517, 2574), 'json.dump', 'json.dump', (['comment_data', 'fh'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(comment_data, fh, indent=4, ensure_ascii=False)\n', (2526, 2574), False, 'import json\n'), ((1597, 1613), 'settings_loader.SettingsLoader', 'SettingsLoader', ([], {}), '()\n', (1611, 1613), False, 'from settings_loader import SettingsLoader\n'), ((2375, 2398), 'comment_getter.CommentGetter', 'CommentGetter', (['settings'], {}), '(settings)\n', (2388, 2398), False, 'from comment_getter import CommentGetter\n')]
|
from . import generator
import os
MD5 = generator.HASH_MD5
SHA1 = generator.HASH_SHA1
SHA224 = generator.HASH_SHA224
SHA256 = generator.HASH_SHA256
SHA384 = generator.HASH_SHA384
SHA512 = generator.HASH_SHA512
class Avatar:
DEFAULT_OUTPUT_PATH = os.path.join(os.getcwd(), "output/")
DEFAULT_FILENAME = "pagan"
DEFAULT_EXTENSION = "png"
ALLOWED_EXTENSIONS = ["bmp", "gif", "png", "tiff"]
DEFAULT_HASHFUN = generator.HASH_MD5
def __init__(self, inpt, hashfun=DEFAULT_HASHFUN):
"""Initialize the avatar and creates the image."""
self.img = self.__create_image(inpt, hashfun)
def __create_image(self, inpt, hashfun):
"""Creates the avatar based on the input and
the chosen hash function."""
if hashfun not in generator.HASHES.keys():
print(
"Unknown or unsupported hash function. Using default: %s"
% self.DEFAULT_HASHFUN
)
algo = self.DEFAULT_HASHFUN
else:
algo = hashfun
return generator.generate(inpt, algo)
def show(self):
"""Shows a preview of the avatar in an external
image viewer."""
self.img.show()
def change(self, inpt, hashfun=DEFAULT_HASHFUN):
"""Change the avatar by providing a new input.
Uses the standard hash function if no one is given."""
self.img = self.__create_image(inpt, hashfun)
def save(
self,
path=DEFAULT_OUTPUT_PATH,
filename=DEFAULT_FILENAME,
extension=DEFAULT_EXTENSION,
):
"""Saves a avatar under the given output path to
a given filename. The file ending ".png" is appended
automatically. If the path does not exist, it will be
created. When no parameters are omitted, a default path
and/or filename will be used."""
if extension not in self.ALLOWED_EXTENSIONS:
raise Exception(
'Extension "%s" is not supported. Supported extensions are: %s'
% (extension, ", ".join(self.ALLOWED_EXTENSIONS))
)
if not os.path.exists(path):
os.makedirs(path)
if extension.startswith("."):
extension = extension[1:]
if filename[-len(extension) :] == extension:
filename = filename[: -len(extension) - 1]
filepath = "%s%s.%s" % (path, filename, extension)
filepath = os.path.join(path, "%s.%s" % (filename, extension))
self.img.save(filepath, extension.upper())
|
[
"os.getcwd",
"os.path.join",
"os.makedirs",
"os.path.exists"
] |
[((266, 277), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (275, 277), False, 'import os\n'), ((2426, 2477), 'os.path.join', 'os.path.join', (['path', "('%s.%s' % (filename, extension))"], {}), "(path, '%s.%s' % (filename, extension))\n", (2438, 2477), False, 'import os\n'), ((2112, 2132), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2126, 2132), False, 'import os\n'), ((2146, 2163), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2157, 2163), False, 'import os\n')]
|
from app.main import bp
from app import db
from flask import render_template, url_for, flash, redirect, request, current_app
from flask_login import login_required, fresh_login_required, current_user
from app.main.forms import UpdateProfileForm, StatusForm, MessageForm
from app.models import Users, Status, Messages, Blogs
from datetime import datetime
@bp.route('/', methods=['GET', 'POST'])
@bp.route('/index', methods=['GET', 'POST'])
def index():
form = StatusForm()
# if request.method == 'POST' and form.submit():
if form.validate_on_submit():
status = Status(body=form.status.data, author=current_user)
db.session.add(status)
db.session.commit()
flash('Your status has been updated!')
return redirect(url_for('main.index'))
# verbose version of following line of operating code
# if current_user.is_authenticated:
# user_status = Status.query.filter_by(user_id=current_user.id).order_by(Status.timestamp.desc())
# else:
# user_status = Status.query.order_by(Status.timestamp.desc())
if current_user.is_authenticated:
post_page = request.args.get('post_page', 1, type=int)
shown_posts = current_user.related_posts().paginate(
post_page, current_app.config['POSTS_PER_PAGE'], False, max_per_page=10)
if not shown_posts.items:
shown_posts = Status.query.order_by(Status.timestamp.desc()).paginate(
post_page, current_app.config['POSTS_PER_PAGE'], False, max_per_page=10)
post_next_url = url_for('main.index', post_page=shown_posts.next_num) if shown_posts.has_next else None
post_prev_url = url_for('main.index', post_page=shown_posts.prev_num) if shown_posts.has_prev else None
else:
post_page = request.args.get('post_page', 1, type=int)
shown_posts = Status.query.order_by(Status.timestamp.desc()).paginate(
post_page, current_app.config['POSTS_PER_PAGE'], False, max_per_page=10)
post_next_url = url_for('main.index', post_page=shown_posts.next_num) if shown_posts.has_next else None
post_prev_url = url_for('main.index', post_page=shown_posts.prev_num) if shown_posts.has_prev else None
blog_page = request.args.get('blog_page', 1, type=int)
blogs = Blogs.query.order_by(Blogs.timestamp.desc()).paginate(
blog_page, current_app.config['POSTS_PER_PAGE'], False, max_per_page=10)
blog_next_url = url_for('main.index', blog_page=blogs.next_num) if blogs.has_next else None
blog_prev_url = url_for('main.index', blog_page=blogs.prev_num) if blogs.has_prev else None
return render_template('main/index.html', title='Welcome to the Blog!', form=form,
shown_posts=shown_posts.items, post_next_url=post_next_url, post_prev_url=post_prev_url,
blogs=blogs.items, blog_next_url=blog_next_url, blog_prev_url=blog_prev_url)
@bp.route('/user/<username>')
@login_required
def profile(username):
user = Users.query.filter_by(username=username).first_or_404()
status_page = request.args.get('status_page', 1, type=int)
statuses = user.status.order_by(Status.timestamp.desc()).paginate(
status_page, current_app.config["POSTS_PER_PAGE"], False)
status_next_url = url_for('main.profile', username=username,
status_page=statuses.next_num) if statuses.has_next else None
status_prev_url = url_for('main.profile', username=username,
status_page=statuses.prev_num) if statuses.has_prev else None
blog_page = request.args.get('blog_page', 1, type=int)
blogs = Blogs.query.filter_by(user_id=user.id).paginate(
blog_page, current_app.config['POSTS_PER_PAGE'], False)
blog_next_url = url_for('main.profile', username=username,
blog_page=blogs.next_num) if blogs.has_next else None
blog_prev_url = url_for('main.profile', username=username,
blog_page=blogs.next_num) if blogs.has_next else None
return render_template('main/profile.html', title='Profile', user=user,
statuses=statuses.items, status_next_url=status_next_url,
status_prev_url=status_prev_url,
blogs=blogs.items, blog_next_url=blog_next_url, blog_prev_url=blog_prev_url)
@bp.route('/user/<username>/update', methods=['GET', 'POST'])
@fresh_login_required
def update_profile(username):
user = Users.query.filter_by(username=username).first()
if current_user != user:
flash('This is not your profile!')
return redirect(url_for('main.index'))
form = UpdateProfileForm(obj=user, original_username=current_user.username)
if form.validate_on_submit():
form.populate_obj(user)
db.session.commit()
flash('Your profile has been updated!')
return redirect(url_for('main.profile', username=current_user.username))
return render_template('main/update_profile.html', title='Update your Profile', form=form)
@bp.route('/follow/<username>')
@login_required
def follow(username):
user = Users.query.filter_by(username=username).first()
if user is None:
flash(f'User {username} was not found.')
return redirect(url_for('main.index'))
if current_user == user:
flash('You cannot follow yourself!')
return redirect(url_for('main.index'))
current_user.follow(user)
db.session.commit()
flash(f'You are now following {username}!')
return redirect(url_for('main.profile', username=username))
@bp.route('/unfollow/<username>')
@login_required
def unfollow(username):
user = Users.query.filter_by(username=username).first()
if user is None:
flash(f'User {username} was not found.')
return redirect(url_for('main.index'))
if current_user == user:
flash('You cannot unfollow yourself!')
return redirect(url_for('main.index'))
current_user.unfollow(user)
db.session.commit()
flash(f'You are no longer following {username}.')
return redirect(url_for('main.profile', username=username))
@bp.route('/friend-request/<username>')
@login_required
def friend_request(username):
user = Users.query.filter_by(username=username).first()
if user is None:
flash(f'User {username} could not be found.')
return redirect(url_for('main.index'))
if user == current_user:
flash('You cannot send yourself a friend request!')
return redirect(url_for('main.index'))
user.add_friend_request(current_user)
db.session.commit()
flash(f'You have sent a friend request to {username}.')
return redirect(url_for('main.profile', username=username))
@bp.route('/requests/<username>', methods=['GET', 'POST'])
@login_required
def pending_requests(username):
if request.method == 'POST':
user = Users.query.filter_by(id=request.form.get('accept')).first() if request.form.get('accept') \
else Users.query.filter_by(id=request.form.get('deny')).first()
if user is not None and user in current_user.pending_friend_requests:
if request.form.get('accept'):
flash(f'On your way to accepting friend request from {user.username}!')
current_user.add_friend(user)
current_user.pending_friend_requests.remove(user)
db.session.commit()
return redirect(url_for('main.index'))
elif request.form.get('deny'):
flash(f'You are readying to deny a friend request from {user.username}.')
current_user.pending_friend_requests.remove(user)
db.session.commit()
return redirect(url_for('main.index'))
user = Users.query.filter_by(username=username).first()
if user is None:
flash(f'Could not find user {username}.')
return redirect(url_for('main.index'))
if user != current_user:
flash('This is not your page!')
return redirect(url_for('main.index'))
pending_friend_requests = user.pending_friend_requests.all()
return render_template('main/pending_requests.html', title='View Your Pending Requests',
user=user, requests=pending_friend_requests)
@bp.route('/unfriend/<username>')
@login_required
def unfriend(username):
user = Users.query.filter_by(username=username).first()
if user is None:
flash(f'User {username} could not be found.')
return redirect(url_for('main.index'))
if user == current_user:
flash('You cannot unfriend yourself!')
return redirect(url_for('main.index'))
current_user.unfriend(user)
db.session.commit()
flash(f'You are no longer friends with {username}.')
return redirect(url_for('main.profile', username=username))
@bp.route('/send-message/<user>', methods=['GET', 'POST'])
@login_required
def send_message(user):
user = Users.query.filter_by(username=user).first_or_404()
form = MessageForm()
if form.validate_on_submit():
message = Messages(
author=current_user,
recipient=user,
body=form.message.data)
db.session.add(message)
db.session.commit()
flash('Your message was sent.')
return redirect(url_for('main.profile', username=user.username))
return render_template('main/send_message.html', title='Send a Message',
form=form, user=user)
@bp.route('/messages')
@login_required
def messages():
current_user.message_last_read_time = datetime.utcnow()
db.session.commit()
page = request.args.get('page', 1, type=int)
messages = current_user.messages_received.order_by(
Messages.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.messages', page=messages.next_num) \
if messages.has_next else None
prev_url = url_for('main.messages', page=messages.prev_num) \
if messages.has_prev else None
return render_template('main/view_messages.html', messages=messages.items,
next_url=next_url, prev_url=prev_url)
@bp.route('/<user>/user-popup')
@login_required
def user_popup(user):
user = Users.query.filter_by(username=user).first_or_404()
return render_template('user_popup.html', user=user)
|
[
"flask.flash",
"app.main.bp.route",
"flask_login.current_user.follow",
"flask.request.form.get",
"app.models.Status.timestamp.desc",
"datetime.datetime.utcnow",
"flask.url_for",
"flask_login.current_user.add_friend",
"app.main.forms.StatusForm",
"app.main.forms.UpdateProfileForm",
"app.models.Messages",
"flask_login.current_user.unfriend",
"flask.request.args.get",
"app.models.Messages.timestamp.desc",
"app.models.Blogs.timestamp.desc",
"app.models.Users.query.filter_by",
"app.db.session.commit",
"flask.render_template",
"app.main.forms.MessageForm",
"app.models.Status",
"flask_login.current_user.unfollow",
"flask_login.current_user.pending_friend_requests.remove",
"flask_login.current_user.related_posts",
"app.models.Blogs.query.filter_by",
"app.db.session.add"
] |
[((357, 395), 'app.main.bp.route', 'bp.route', (['"""/"""'], {'methods': "['GET', 'POST']"}), "('/', methods=['GET', 'POST'])\n", (365, 395), False, 'from app.main import bp\n'), ((397, 440), 'app.main.bp.route', 'bp.route', (['"""/index"""'], {'methods': "['GET', 'POST']"}), "('/index', methods=['GET', 'POST'])\n", (405, 440), False, 'from app.main import bp\n'), ((2921, 2949), 'app.main.bp.route', 'bp.route', (['"""/user/<username>"""'], {}), "('/user/<username>')\n", (2929, 2949), False, 'from app.main import bp\n'), ((4372, 4432), 'app.main.bp.route', 'bp.route', (['"""/user/<username>/update"""'], {'methods': "['GET', 'POST']"}), "('/user/<username>/update', methods=['GET', 'POST'])\n", (4380, 4432), False, 'from app.main import bp\n'), ((5065, 5095), 'app.main.bp.route', 'bp.route', (['"""/follow/<username>"""'], {}), "('/follow/<username>')\n", (5073, 5095), False, 'from app.main import bp\n'), ((5601, 5633), 'app.main.bp.route', 'bp.route', (['"""/unfollow/<username>"""'], {}), "('/unfollow/<username>')\n", (5609, 5633), False, 'from app.main import bp\n'), ((6151, 6189), 'app.main.bp.route', 'bp.route', (['"""/friend-request/<username>"""'], {}), "('/friend-request/<username>')\n", (6159, 6189), False, 'from app.main import bp\n'), ((6747, 6804), 'app.main.bp.route', 'bp.route', (['"""/requests/<username>"""'], {'methods': "['GET', 'POST']"}), "('/requests/<username>', methods=['GET', 'POST'])\n", (6755, 6804), False, 'from app.main import bp\n'), ((8299, 8331), 'app.main.bp.route', 'bp.route', (['"""/unfriend/<username>"""'], {}), "('/unfriend/<username>')\n", (8307, 8331), False, 'from app.main import bp\n'), ((8857, 8914), 'app.main.bp.route', 'bp.route', (['"""/send-message/<user>"""'], {'methods': "['GET', 'POST']"}), "('/send-message/<user>', methods=['GET', 'POST'])\n", (8865, 8914), False, 'from app.main import bp\n'), ((9504, 9525), 'app.main.bp.route', 'bp.route', (['"""/messages"""'], {}), "('/messages')\n", (9512, 9525), False, 'from app.main import bp\n'), ((10208, 10238), 'app.main.bp.route', 'bp.route', (['"""/<user>/user-popup"""'], {}), "('/<user>/user-popup')\n", (10216, 10238), False, 'from app.main import bp\n'), ((465, 477), 'app.main.forms.StatusForm', 'StatusForm', ([], {}), '()\n', (475, 477), False, 'from app.main.forms import UpdateProfileForm, StatusForm, MessageForm\n'), ((2228, 2270), 'flask.request.args.get', 'request.args.get', (['"""blog_page"""', '(1)'], {'type': 'int'}), "('blog_page', 1, type=int)\n", (2244, 2270), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((2622, 2876), 'flask.render_template', 'render_template', (['"""main/index.html"""'], {'title': '"""Welcome to the Blog!"""', 'form': 'form', 'shown_posts': 'shown_posts.items', 'post_next_url': 'post_next_url', 'post_prev_url': 'post_prev_url', 'blogs': 'blogs.items', 'blog_next_url': 'blog_next_url', 'blog_prev_url': 'blog_prev_url'}), "('main/index.html', title='Welcome to the Blog!', form=form,\n shown_posts=shown_posts.items, post_next_url=post_next_url,\n post_prev_url=post_prev_url, blogs=blogs.items, blog_next_url=\n blog_next_url, blog_prev_url=blog_prev_url)\n", (2637, 2876), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((3074, 3118), 'flask.request.args.get', 'request.args.get', (['"""status_page"""', '(1)'], {'type': 'int'}), "('status_page', 1, type=int)\n", (3090, 3118), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((3586, 3628), 'flask.request.args.get', 'request.args.get', (['"""blog_page"""', '(1)'], {'type': 'int'}), "('blog_page', 1, type=int)\n", (3602, 3628), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((4055, 4301), 'flask.render_template', 'render_template', (['"""main/profile.html"""'], {'title': '"""Profile"""', 'user': 'user', 'statuses': 'statuses.items', 'status_next_url': 'status_next_url', 'status_prev_url': 'status_prev_url', 'blogs': 'blogs.items', 'blog_next_url': 'blog_next_url', 'blog_prev_url': 'blog_prev_url'}), "('main/profile.html', title='Profile', user=user, statuses=\n statuses.items, status_next_url=status_next_url, status_prev_url=\n status_prev_url, blogs=blogs.items, blog_next_url=blog_next_url,\n blog_prev_url=blog_prev_url)\n", (4070, 4301), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((4675, 4743), 'app.main.forms.UpdateProfileForm', 'UpdateProfileForm', ([], {'obj': 'user', 'original_username': 'current_user.username'}), '(obj=user, original_username=current_user.username)\n', (4692, 4743), False, 'from app.main.forms import UpdateProfileForm, StatusForm, MessageForm\n'), ((4978, 5065), 'flask.render_template', 'render_template', (['"""main/update_profile.html"""'], {'title': '"""Update your Profile"""', 'form': 'form'}), "('main/update_profile.html', title='Update your Profile',\n form=form)\n", (4993, 5065), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((5436, 5461), 'flask_login.current_user.follow', 'current_user.follow', (['user'], {}), '(user)\n', (5455, 5461), False, 'from flask_login import login_required, fresh_login_required, current_user\n'), ((5466, 5485), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5483, 5485), False, 'from app import db\n'), ((5490, 5533), 'flask.flash', 'flash', (['f"""You are now following {username}!"""'], {}), "(f'You are now following {username}!')\n", (5495, 5533), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((5978, 6005), 'flask_login.current_user.unfollow', 'current_user.unfollow', (['user'], {}), '(user)\n', (5999, 6005), False, 'from flask_login import login_required, fresh_login_required, current_user\n'), ((6010, 6029), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (6027, 6029), False, 'from app import db\n'), ((6034, 6083), 'flask.flash', 'flash', (['f"""You are no longer following {username}."""'], {}), "(f'You are no longer following {username}.')\n", (6039, 6083), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((6600, 6619), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (6617, 6619), False, 'from app import db\n'), ((6624, 6679), 'flask.flash', 'flash', (['f"""You have sent a friend request to {username}."""'], {}), "(f'You have sent a friend request to {username}.')\n", (6629, 6679), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((8142, 8273), 'flask.render_template', 'render_template', (['"""main/pending_requests.html"""'], {'title': '"""View Your Pending Requests"""', 'user': 'user', 'requests': 'pending_friend_requests'}), "('main/pending_requests.html', title=\n 'View Your Pending Requests', user=user, requests=pending_friend_requests)\n", (8157, 8273), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((8681, 8708), 'flask_login.current_user.unfriend', 'current_user.unfriend', (['user'], {}), '(user)\n', (8702, 8708), False, 'from flask_login import login_required, fresh_login_required, current_user\n'), ((8713, 8732), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (8730, 8732), False, 'from app import db\n'), ((8737, 8789), 'flask.flash', 'flash', (['f"""You are no longer friends with {username}."""'], {}), "(f'You are no longer friends with {username}.')\n", (8742, 8789), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((9029, 9042), 'app.main.forms.MessageForm', 'MessageForm', ([], {}), '()\n', (9040, 9042), False, 'from app.main.forms import UpdateProfileForm, StatusForm, MessageForm\n'), ((9386, 9477), 'flask.render_template', 'render_template', (['"""main/send_message.html"""'], {'title': '"""Send a Message"""', 'form': 'form', 'user': 'user'}), "('main/send_message.html', title='Send a Message', form=form,\n user=user)\n", (9401, 9477), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((9600, 9617), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (9615, 9617), False, 'from datetime import datetime\n'), ((9622, 9641), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9639, 9641), False, 'from app import db\n'), ((9653, 9690), 'flask.request.args.get', 'request.args.get', (['"""page"""', '(1)'], {'type': 'int'}), "('page', 1, type=int)\n", (9669, 9690), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((10072, 10181), 'flask.render_template', 'render_template', (['"""main/view_messages.html"""'], {'messages': 'messages.items', 'next_url': 'next_url', 'prev_url': 'prev_url'}), "('main/view_messages.html', messages=messages.items,\n next_url=next_url, prev_url=prev_url)\n", (10087, 10181), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((10351, 10396), 'flask.render_template', 'render_template', (['"""user_popup.html"""'], {'user': 'user'}), "('user_popup.html', user=user)\n", (10366, 10396), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((582, 632), 'app.models.Status', 'Status', ([], {'body': 'form.status.data', 'author': 'current_user'}), '(body=form.status.data, author=current_user)\n', (588, 632), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((641, 663), 'app.db.session.add', 'db.session.add', (['status'], {}), '(status)\n', (655, 663), False, 'from app import db\n'), ((672, 691), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (689, 691), False, 'from app import db\n'), ((700, 738), 'flask.flash', 'flash', (['"""Your status has been updated!"""'], {}), "('Your status has been updated!')\n", (705, 738), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((1132, 1174), 'flask.request.args.get', 'request.args.get', (['"""post_page"""', '(1)'], {'type': 'int'}), "('post_page', 1, type=int)\n", (1148, 1174), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((1781, 1823), 'flask.request.args.get', 'request.args.get', (['"""post_page"""', '(1)'], {'type': 'int'}), "('post_page', 1, type=int)\n", (1797, 1823), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((2439, 2486), 'flask.url_for', 'url_for', (['"""main.index"""'], {'blog_page': 'blogs.next_num'}), "('main.index', blog_page=blogs.next_num)\n", (2446, 2486), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((2535, 2582), 'flask.url_for', 'url_for', (['"""main.index"""'], {'blog_page': 'blogs.prev_num'}), "('main.index', blog_page=blogs.prev_num)\n", (2542, 2582), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((3278, 3351), 'flask.url_for', 'url_for', (['"""main.profile"""'], {'username': 'username', 'status_page': 'statuses.next_num'}), "('main.profile', username=username, status_page=statuses.next_num)\n", (3285, 3351), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((3435, 3508), 'flask.url_for', 'url_for', (['"""main.profile"""'], {'username': 'username', 'status_page': 'statuses.prev_num'}), "('main.profile', username=username, status_page=statuses.prev_num)\n", (3442, 3508), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((3774, 3842), 'flask.url_for', 'url_for', (['"""main.profile"""'], {'username': 'username', 'blog_page': 'blogs.next_num'}), "('main.profile', username=username, blog_page=blogs.next_num)\n", (3781, 3842), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((3919, 3987), 'flask.url_for', 'url_for', (['"""main.profile"""'], {'username': 'username', 'blog_page': 'blogs.next_num'}), "('main.profile', username=username, blog_page=blogs.next_num)\n", (3926, 3987), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((4582, 4616), 'flask.flash', 'flash', (['"""This is not your profile!"""'], {}), "('This is not your profile!')\n", (4587, 4616), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((4818, 4837), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4835, 4837), False, 'from app import db\n'), ((4846, 4885), 'flask.flash', 'flash', (['"""Your profile has been updated!"""'], {}), "('Your profile has been updated!')\n", (4851, 4885), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((5223, 5263), 'flask.flash', 'flash', (['f"""User {username} was not found."""'], {}), "(f'User {username} was not found.')\n", (5228, 5263), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((5348, 5384), 'flask.flash', 'flash', (['"""You cannot follow yourself!"""'], {}), "('You cannot follow yourself!')\n", (5353, 5384), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((5554, 5596), 'flask.url_for', 'url_for', (['"""main.profile"""'], {'username': 'username'}), "('main.profile', username=username)\n", (5561, 5596), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((5763, 5803), 'flask.flash', 'flash', (['f"""User {username} was not found."""'], {}), "(f'User {username} was not found.')\n", (5768, 5803), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((5888, 5926), 'flask.flash', 'flash', (['"""You cannot unfollow yourself!"""'], {}), "('You cannot unfollow yourself!')\n", (5893, 5926), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((6104, 6146), 'flask.url_for', 'url_for', (['"""main.profile"""'], {'username': 'username'}), "('main.profile', username=username)\n", (6111, 6146), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((6325, 6370), 'flask.flash', 'flash', (['f"""User {username} could not be found."""'], {}), "(f'User {username} could not be found.')\n", (6330, 6370), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((6455, 6506), 'flask.flash', 'flash', (['"""You cannot send yourself a friend request!"""'], {}), "('You cannot send yourself a friend request!')\n", (6460, 6506), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((6700, 6742), 'flask.url_for', 'url_for', (['"""main.profile"""'], {'username': 'username'}), "('main.profile', username=username)\n", (6707, 6742), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((7861, 7902), 'flask.flash', 'flash', (['f"""Could not find user {username}."""'], {}), "(f'Could not find user {username}.')\n", (7866, 7902), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((7987, 8018), 'flask.flash', 'flash', (['"""This is not your page!"""'], {}), "('This is not your page!')\n", (7992, 8018), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((8461, 8506), 'flask.flash', 'flash', (['f"""User {username} could not be found."""'], {}), "(f'User {username} could not be found.')\n", (8466, 8506), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((8591, 8629), 'flask.flash', 'flash', (['"""You cannot unfriend yourself!"""'], {}), "('You cannot unfriend yourself!')\n", (8596, 8629), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((8810, 8852), 'flask.url_for', 'url_for', (['"""main.profile"""'], {'username': 'username'}), "('main.profile', username=username)\n", (8817, 8852), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((9095, 9164), 'app.models.Messages', 'Messages', ([], {'author': 'current_user', 'recipient': 'user', 'body': 'form.message.data'}), '(author=current_user, recipient=user, body=form.message.data)\n', (9103, 9164), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((9210, 9233), 'app.db.session.add', 'db.session.add', (['message'], {}), '(message)\n', (9224, 9233), False, 'from app import db\n'), ((9242, 9261), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9259, 9261), False, 'from app import db\n'), ((9270, 9301), 'flask.flash', 'flash', (['"""Your message was sent."""'], {}), "('Your message was sent.')\n", (9275, 9301), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((9866, 9914), 'flask.url_for', 'url_for', (['"""main.messages"""'], {'page': 'messages.next_num'}), "('main.messages', page=messages.next_num)\n", (9873, 9914), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((9971, 10019), 'flask.url_for', 'url_for', (['"""main.messages"""'], {'page': 'messages.prev_num'}), "('main.messages', page=messages.prev_num)\n", (9978, 10019), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((763, 784), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (770, 784), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((1551, 1604), 'flask.url_for', 'url_for', (['"""main.index"""'], {'post_page': 'shown_posts.next_num'}), "('main.index', post_page=shown_posts.next_num)\n", (1558, 1604), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((1663, 1716), 'flask.url_for', 'url_for', (['"""main.index"""'], {'post_page': 'shown_posts.prev_num'}), "('main.index', post_page=shown_posts.prev_num)\n", (1670, 1716), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((2012, 2065), 'flask.url_for', 'url_for', (['"""main.index"""'], {'post_page': 'shown_posts.next_num'}), "('main.index', post_page=shown_posts.next_num)\n", (2019, 2065), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((2124, 2177), 'flask.url_for', 'url_for', (['"""main.index"""'], {'post_page': 'shown_posts.prev_num'}), "('main.index', post_page=shown_posts.prev_num)\n", (2131, 2177), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((3000, 3040), 'app.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'username': 'username'}), '(username=username)\n', (3021, 3040), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((3641, 3679), 'app.models.Blogs.query.filter_by', 'Blogs.query.filter_by', ([], {'user_id': 'user.id'}), '(user_id=user.id)\n', (3662, 3679), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((4496, 4536), 'app.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'username': 'username'}), '(username=username)\n', (4517, 4536), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((4641, 4662), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (4648, 4662), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((4910, 4965), 'flask.url_for', 'url_for', (['"""main.profile"""'], {'username': 'current_user.username'}), "('main.profile', username=current_user.username)\n", (4917, 4965), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((5145, 5185), 'app.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'username': 'username'}), '(username=username)\n', (5166, 5185), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((5288, 5309), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (5295, 5309), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((5409, 5430), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (5416, 5430), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((5685, 5725), 'app.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'username': 'username'}), '(username=username)\n', (5706, 5725), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((5828, 5849), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (5835, 5849), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((5951, 5972), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (5958, 5972), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((6247, 6287), 'app.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'username': 'username'}), '(username=username)\n', (6268, 6287), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((6395, 6416), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (6402, 6416), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((6531, 6552), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (6538, 6552), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((6965, 6991), 'flask.request.form.get', 'request.form.get', (['"""accept"""'], {}), "('accept')\n", (6981, 6991), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((7163, 7189), 'flask.request.form.get', 'request.form.get', (['"""accept"""'], {}), "('accept')\n", (7179, 7189), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((7783, 7823), 'app.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'username': 'username'}), '(username=username)\n', (7804, 7823), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((7927, 7948), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (7934, 7948), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((8043, 8064), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (8050, 8064), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((8383, 8423), 'app.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'username': 'username'}), '(username=username)\n', (8404, 8423), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((8531, 8552), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (8538, 8552), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((8654, 8675), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (8661, 8675), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((8966, 9002), 'app.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'username': 'user'}), '(username=user)\n', (8987, 9002), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((9326, 9373), 'flask.url_for', 'url_for', (['"""main.profile"""'], {'username': 'user.username'}), "('main.profile', username=user.username)\n", (9333, 9373), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((10288, 10324), 'app.models.Users.query.filter_by', 'Users.query.filter_by', ([], {'username': 'user'}), '(username=user)\n', (10309, 10324), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((1197, 1225), 'flask_login.current_user.related_posts', 'current_user.related_posts', ([], {}), '()\n', (1223, 1225), False, 'from flask_login import login_required, fresh_login_required, current_user\n'), ((2304, 2326), 'app.models.Blogs.timestamp.desc', 'Blogs.timestamp.desc', ([], {}), '()\n', (2324, 2326), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((3155, 3178), 'app.models.Status.timestamp.desc', 'Status.timestamp.desc', ([], {}), '()\n', (3176, 3178), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((7207, 7278), 'flask.flash', 'flash', (['f"""On your way to accepting friend request from {user.username}!"""'], {}), "(f'On your way to accepting friend request from {user.username}!')\n", (7212, 7278), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((7295, 7324), 'flask_login.current_user.add_friend', 'current_user.add_friend', (['user'], {}), '(user)\n', (7318, 7324), False, 'from flask_login import login_required, fresh_login_required, current_user\n'), ((7341, 7390), 'flask_login.current_user.pending_friend_requests.remove', 'current_user.pending_friend_requests.remove', (['user'], {}), '(user)\n', (7384, 7390), False, 'from flask_login import login_required, fresh_login_required, current_user\n'), ((7407, 7426), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (7424, 7426), False, 'from app import db\n'), ((7499, 7523), 'flask.request.form.get', 'request.form.get', (['"""deny"""'], {}), "('deny')\n", (7515, 7523), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((9755, 9780), 'app.models.Messages.timestamp.desc', 'Messages.timestamp.desc', ([], {}), '()\n', (9778, 9780), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((1868, 1891), 'app.models.Status.timestamp.desc', 'Status.timestamp.desc', ([], {}), '()\n', (1889, 1891), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((7459, 7480), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (7466, 7480), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((7541, 7614), 'flask.flash', 'flash', (['f"""You are readying to deny a friend request from {user.username}."""'], {}), "(f'You are readying to deny a friend request from {user.username}.')\n", (7546, 7614), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((7631, 7680), 'flask_login.current_user.pending_friend_requests.remove', 'current_user.pending_friend_requests.remove', (['user'], {}), '(user)\n', (7674, 7680), False, 'from flask_login import login_required, fresh_login_required, current_user\n'), ((7697, 7716), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (7714, 7716), False, 'from app import db\n'), ((1403, 1426), 'app.models.Status.timestamp.desc', 'Status.timestamp.desc', ([], {}), '()\n', (1424, 1426), False, 'from app.models import Users, Status, Messages, Blogs\n'), ((7749, 7770), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (7756, 7770), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((6926, 6952), 'flask.request.form.get', 'request.form.get', (['"""accept"""'], {}), "('accept')\n", (6942, 6952), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n'), ((7036, 7060), 'flask.request.form.get', 'request.form.get', (['"""deny"""'], {}), "('deny')\n", (7052, 7060), False, 'from flask import render_template, url_for, flash, redirect, request, current_app\n')]
|
import os
import hashlib
import base64
from binascii import hexlify
import datetime
import getpass
import select
import socket
import sys
import time
import traceback
import json
import logging
import paramiko
import subprocess
import config
from wificlient import get_active_clients
logger = logging.getLogger('eonbackup')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def calculate_file_hash(filename):
sha256_hash = hashlib.sha256()
with open(filename,"rb") as f:
for byte_block in iter(lambda: f.read(4096),b""):
sha256_hash.update(byte_block)
return (sha256_hash.hexdigest())
def download_files(sftp, session_name, file_defs):
directory = os.path.join(config.root_dir, session_name)
if not os.path.exists(directory):
os.makedirs(directory)
failed = False
for fd in file_defs:
h = fd[0]
fn = fd[1]
if os.path.exists(fn):
logger.info("File {} was downloaded already".format(fn))
continue
fn_d = fn+".tdl"
logger.info("Downloading: " + str(fn) + " " + h)
sftp.get(fn, fn_d)
h2 = calculate_file_hash(fn_d)
if h2 == h:
os.rename(fn_d, fn)
logger.info("Download of {} complete".format(fn))
elif os.path.exists(fn_d):
os.remove(fn_d)
failed = True
if not failed:
status_file = get_session_status_file_path(session_name)
with open(status_file, "w") as fs:
fs.write(json.dumps(file_defs))
def get_file_stat(t, sftp, fn):
command = "sha256sum " + fn
logger.info(command)
session = t.open_channel(kind='session')
session.exec_command(command)
while not session.recv_ready():
pass
sha_result = filter(None, session.recv(512).strip().split(' '))
stat = sftp.stat(sha_result[1])
fd = {
"name": sha_result[1],
"sha256hash": sha_result[0],
"atime": stat.st_atime,
"mtime": stat.st_mtime,
"size": stat.st_size
}
return fd , sha_result
def get_session_status_file_path(session_name):
return os.path.join(config.status_dir, session_name)
def sesson_sync_complete(session_name):
directory = os.path.join(config.root_dir, session_name)
if not os.path.exists(directory):
return False
status_file = get_session_status_file_path(session_name)
if os.path.exists(status_file):
return True
return False
def init():
if not os.path.exists(config.root_dir):
os.makedirs(config.root_dir)
if not os.path.exists(config.status_dir):
os.makedirs(config.status_dir)
def connect(hostname, port, key):
t = None
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, port))
t = paramiko.Transport(sock)
t.start_client()
t.auth_publickey(config.username, key)
except Exception as e:
logger.info("Connection failed: " + str(e))
return None
return t
def load_files(t, sftp, session_dir, age):
start_time = int((datetime.datetime.utcnow() - datetime.datetime(1970,1,1)).total_seconds())
files = sftp.listdir(session_dir)
sha_results = []
for f in files:
fn = os.path.join(session_dir, f)
fd, sha_result = get_file_stat(t, sftp, fn)
file_age = start_time - fd["atime"]
if file_age < age:
return None
sha_results.append(sha_result)
return sha_results
def process_session(t, sftp, session):
if sesson_sync_complete(session):
logger.info("Ignoring complete session {}".format(session))
return
session_dir = os.path.join(config.root_dir, session)
sha_results = load_files(t, sftp, session_dir, 2 * 60)
if sha_results:
download_files(sftp, session, sha_results)
else:
logger.info("Ignoring recent session {}".format(session))
def process_host(t):
sftp = paramiko.SFTPClient.from_transport(t)
dirlist = sftp.listdir(config.root_dir)
for d in dirlist:
if not disk_ok():
return
process_session(t, sftp, d)
def disk_ok():
df = subprocess.Popen(["df", "/data/"], stdout=subprocess.PIPE)
output = df.communicate()[0]
device, size, used, available, percent, mountpoint = \
output.split("\n")[1].split()
u = int(used)
a = int(available)
s = int(size)
logger.info("Disk usage {}/{} {}".format(u, s,percent))
if s == 0:
return False
return (u * 100 / s) < config.disk_full_percent
def main():
init()
if not disk_ok():
logger.error("Disk full. Stopping.")
return
key = paramiko.RSAKey.from_private_key_file(config.key_path)
hosts = get_active_clients()
for host in hosts:
logger.info("Trying host {}".format(host))
hostname = host["ip"]
t = connect(hostname, config.port, key)
if not t:
continue
process_host(t)
if __name__ == "__main__":
main()
|
[
"subprocess.Popen",
"os.remove",
"os.makedirs",
"paramiko.RSAKey.from_private_key_file",
"logging.StreamHandler",
"os.path.exists",
"socket.socket",
"os.rename",
"paramiko.Transport",
"logging.Formatter",
"hashlib.sha256",
"json.dumps",
"datetime.datetime.utcnow",
"datetime.datetime",
"wificlient.get_active_clients",
"os.path.join",
"paramiko.SFTPClient.from_transport",
"logging.getLogger"
] |
[((296, 326), 'logging.getLogger', 'logging.getLogger', (['"""eonbackup"""'], {}), "('eonbackup')\n", (313, 326), False, 'import logging\n'), ((363, 386), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (384, 386), False, 'import logging\n'), ((399, 472), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (416, 472), False, 'import logging\n'), ((576, 592), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (590, 592), False, 'import hashlib\n'), ((835, 878), 'os.path.join', 'os.path.join', (['config.root_dir', 'session_name'], {}), '(config.root_dir, session_name)\n', (847, 878), False, 'import os\n'), ((2373, 2418), 'os.path.join', 'os.path.join', (['config.status_dir', 'session_name'], {}), '(config.status_dir, session_name)\n', (2385, 2418), False, 'import os\n'), ((2476, 2519), 'os.path.join', 'os.path.join', (['config.root_dir', 'session_name'], {}), '(config.root_dir, session_name)\n', (2488, 2519), False, 'import os\n'), ((2652, 2679), 'os.path.exists', 'os.path.exists', (['status_file'], {}), '(status_file)\n', (2666, 2679), False, 'import os\n'), ((3937, 3975), 'os.path.join', 'os.path.join', (['config.root_dir', 'session'], {}), '(config.root_dir, session)\n', (3949, 3975), False, 'import os\n'), ((4216, 4253), 'paramiko.SFTPClient.from_transport', 'paramiko.SFTPClient.from_transport', (['t'], {}), '(t)\n', (4250, 4253), False, 'import paramiko\n'), ((4427, 4485), 'subprocess.Popen', 'subprocess.Popen', (["['df', '/data/']"], {'stdout': 'subprocess.PIPE'}), "(['df', '/data/'], stdout=subprocess.PIPE)\n", (4443, 4485), False, 'import subprocess\n'), ((4947, 5001), 'paramiko.RSAKey.from_private_key_file', 'paramiko.RSAKey.from_private_key_file', (['config.key_path'], {}), '(config.key_path)\n', (4984, 5001), False, 'import paramiko\n'), ((5014, 5034), 'wificlient.get_active_clients', 'get_active_clients', ([], {}), '()\n', (5032, 5034), False, 'from wificlient import get_active_clients\n'), ((891, 916), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (905, 916), False, 'import os\n'), ((926, 948), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (937, 948), False, 'import os\n'), ((1041, 1059), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (1055, 1059), False, 'import os\n'), ((2531, 2556), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (2545, 2556), False, 'import os\n'), ((2743, 2774), 'os.path.exists', 'os.path.exists', (['config.root_dir'], {}), '(config.root_dir)\n', (2757, 2774), False, 'import os\n'), ((2784, 2812), 'os.makedirs', 'os.makedirs', (['config.root_dir'], {}), '(config.root_dir)\n', (2795, 2812), False, 'import os\n'), ((2825, 2858), 'os.path.exists', 'os.path.exists', (['config.status_dir'], {}), '(config.status_dir)\n', (2839, 2858), False, 'import os\n'), ((2868, 2898), 'os.makedirs', 'os.makedirs', (['config.status_dir'], {}), '(config.status_dir)\n', (2879, 2898), False, 'import os\n'), ((2971, 3020), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (2984, 3020), False, 'import socket\n'), ((3072, 3096), 'paramiko.Transport', 'paramiko.Transport', (['sock'], {}), '(sock)\n', (3090, 3096), False, 'import paramiko\n'), ((3517, 3545), 'os.path.join', 'os.path.join', (['session_dir', 'f'], {}), '(session_dir, f)\n', (3529, 3545), False, 'import os\n'), ((1332, 1351), 'os.rename', 'os.rename', (['fn_d', 'fn'], {}), '(fn_d, fn)\n', (1341, 1351), False, 'import os\n'), ((1427, 1447), 'os.path.exists', 'os.path.exists', (['fn_d'], {}), '(fn_d)\n', (1441, 1447), False, 'import os\n'), ((1461, 1476), 'os.remove', 'os.remove', (['fn_d'], {}), '(fn_d)\n', (1470, 1476), False, 'import os\n'), ((1652, 1673), 'json.dumps', 'json.dumps', (['file_defs'], {}), '(file_defs)\n', (1662, 1673), False, 'import json\n'), ((3348, 3374), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3372, 3374), False, 'import datetime\n'), ((3377, 3406), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (3394, 3406), False, 'import datetime\n')]
|
import warnings
import biorbd_casadi as biorbd
import numpy as np
from scipy import interpolate
from bioptim import (
OdeSolver,
Node,
OptimalControlProgram,
ConstraintFcn,
DynamicsFcn,
ObjectiveFcn,
QAndQDotBounds,
QAndQDotAndQDDotBounds,
ConstraintList,
ObjectiveList,
DynamicsList,
Bounds,
BoundsList,
InitialGuessList,
ControlType,
Solver,
InitialGuess,
InterpolationType,
PhaseTransitionList,
PhaseTransitionFcn,
RigidBodyDynamics,
)
from ..initial_guess.humanoid_initial_pose import set_initial_pose
class HumanoidOcp:
def __init__(
self,
biorbd_model_path: str = None,
n_shooting: int = 10,
phase_time: float = 0.3,
n_threads: int = 8,
control_type: ControlType = ControlType.CONSTANT,
ode_solver: OdeSolver = OdeSolver.COLLOCATION(),
rigidbody_dynamics: RigidBodyDynamics = RigidBodyDynamics.ODE,
step_length: float = 0.8,
right_foot_location: np.array = np.zeros(3),
use_sx: bool = False,
):
self.biorbd_model_path = biorbd_model_path
self.n_shooting = n_shooting
self.phase_time = phase_time
self.n_threads = n_threads
self.control_type = control_type
self.ode_solver = ode_solver
self.rigidbody_dynamics = rigidbody_dynamics
if biorbd_model_path is not None:
self.biorbd_model = biorbd.Model(biorbd_model_path)
self.n_shooting = n_shooting
self.phase_time = phase_time
self._set_head()
self._set_knee()
self._set_shoulder()
self.n_q = self.biorbd_model.nbQ()
self.n_qdot = self.biorbd_model.nbQdot()
self.n_qddot = self.biorbd_model.nbQddot()
self.n_qdddot = self.n_qddot
self.n_tau = self.biorbd_model.nbGeneralizedTorque()
self.tau_min, self.tau_init, self.tau_max = -500, 0, 500
self.qddot_min, self.qddot_init, self.qddot_max = -1000, 0, 1000
self.qdddot_min, self.qdddot_init, self.qdddot_max = -10000, 0, 10000
self.right_foot_location = right_foot_location
self.step_length = step_length
self.initial_left_foot_location = right_foot_location - np.array([0, step_length / 2, 0])
self.final_left_foot_location = right_foot_location + np.array([0, step_length / 2, 0])
self.dynamics = DynamicsList()
self.constraints = ConstraintList()
self.objective_functions = ObjectiveList()
self.phase_transitions = PhaseTransitionList()
self.x_bounds = BoundsList()
self.u_bounds = BoundsList()
self.initial_states = []
self.x_init = InitialGuessList()
self.u_init = InitialGuessList()
self.control_type = control_type
self.control_nodes = Node.ALL if self.control_type == ControlType.LINEAR_CONTINUOUS else Node.ALL_SHOOTING
self._set_dynamics()
self._set_constraints()
self._set_objective_functions()
self._set_phase_transition()
self._set_boundary_conditions()
self._set_initial_guesses()
self.ocp = OptimalControlProgram(
self.biorbd_model,
self.dynamics,
self.n_shooting,
self.phase_time,
x_init=self.x_init,
x_bounds=self.x_bounds,
u_init=self.u_init,
u_bounds=self.u_bounds,
objective_functions=self.objective_functions,
constraints=self.constraints,
n_threads=n_threads,
control_type=self.control_type,
ode_solver=ode_solver,
use_sx=use_sx,
)
def _set_head(self):
self.has_head = False
for i in range(self.biorbd_model.nbSegment()):
seg = self.biorbd_model.segment(i)
if seg.name().to_string() == "Head":
self.has_head = True
break
def _set_knee(self):
self.has_knee = False
for i in range(self.biorbd_model.nbSegment()):
seg = self.biorbd_model.segment(i)
if seg.name().to_string() == "RShank":
self.has_knee = True
break
def _set_shoulder(self):
self.has_shoulder = False
for i in range(self.biorbd_model.nbSegment()):
seg = self.biorbd_model.segment(i)
if seg.name().to_string() == "RArm":
self.has_shoulder = True
break
def _set_dynamics(self):
# warnings.warn("not implemented under this version of bioptim")
self.dynamics.add(
DynamicsFcn.TORQUE_DRIVEN, rigidbody_dynamics=self.rigidbody_dynamics, with_contact=True, phase=0
)
# self.dynamics.add(DynamicsFcn.TORQUE_DRIVEN, with_contact=True, phase=0)
def _set_objective_functions(self):
# --- Objective function --- #
self.objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="tau", phase=0)
idx_stability = [0, 1, 2]
if self.has_head:
idx_stability.append(3)
# torso stability
self.objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_QDDOT, phase=0, index=idx_stability, weight=0.01)
# head stability
if self.has_head:
self.objective_functions.add(
ObjectiveFcn.Lagrange.MINIMIZE_QDDOT, derivative=True, phase=0, index=3, weight=0.01
)
self.objective_functions.add(
ObjectiveFcn.Lagrange.MINIMIZE_STATE, key="qdot", phase=0, index=3, weight=0.01
)
# keep velocity CoM around 1.5 m/s
self.objective_functions.add(
ObjectiveFcn.Mayer.MINIMIZE_COM_VELOCITY, index=1, target=1.5, node=Node.START, weight=1000
)
self.objective_functions.add(
ObjectiveFcn.Mayer.MINIMIZE_COM_VELOCITY, index=1, target=1.5, node=Node.END, weight=1000
)
# instead of phase transition along z
self.objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_COM_VELOCITY, index=2, weight=0.1)
if (
self.rigidbody_dynamics == RigidBodyDynamics.DAE_INVERSE_DYNAMICS_JERK
or self.rigidbody_dynamics == RigidBodyDynamics.DAE_FORWARD_DYNAMICS_JERK
):
self.objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, phase=0, key="qdddot", weight=1e-4)
def _set_constraints(self):
# --- Constraints --- #
# Contact force in Z are positive
self.constraints.add(
ConstraintFcn.TRACK_CONTACT_FORCES, min_bound=0, max_bound=np.inf, node=Node.ALL, contact_index=1, phase=0
) # FP0 > 0 en Z
# contact node at zero position and zero speed
# node = Node.ALL if self.implicit_dynamics else Node.START
node = Node.START
self.constraints.add(
ConstraintFcn.TRACK_MARKERS, node=node, target=self.right_foot_location, marker_index="RFoot", phase=0
)
self.constraints.add(ConstraintFcn.TRACK_MARKERS_VELOCITY, node=node, marker_index="RFoot", phase=0)
# node = Node.END
# self.constraints.add(
# ConstraintFcn.TRACK_MARKERS, node=node, target=self.right_foot_location, marker_index="RFoot", phase=0
# )
# self.constraints.add(ConstraintFcn.TRACK_MARKERS_VELOCITY, node=node, marker_index="RFoot", phase=0)
# first and last step constraints
self.constraints.add(
ConstraintFcn.TRACK_MARKERS,
target=self.initial_left_foot_location,
node=Node.START,
marker_index="LFoot",
phase=0,
)
self.constraints.add(
ConstraintFcn.TRACK_MARKERS,
target=self.final_left_foot_location,
node=Node.END,
marker_index="LFoot",
phase=0,
)
# Ensure lift of foot
if self.has_knee:
self.constraints.add(
ConstraintFcn.TRACK_MARKERS,
index=2,
min_bound=0.05,
max_bound=np.inf,
node=Node.MID,
marker_index="LFoot",
phase=0,
)
def _set_phase_transition(self):
idx = [0, 1, 2]
idx = idx.append(3) if self.has_head else idx
self.phase_transitions.add(PhaseTransitionFcn.CYCLIC, index=idx, weight=1000)
def _set_boundary_conditions(self):
self.x_bounds = BoundsList()
self.x_bounds.add(
bounds=QAndQDotAndQDDotBounds(self.biorbd_model)
if self.rigidbody_dynamics == RigidBodyDynamics.DAE_INVERSE_DYNAMICS_JERK
or self.rigidbody_dynamics == RigidBodyDynamics.DAE_FORWARD_DYNAMICS_JERK
else QAndQDotBounds(self.biorbd_model)
)
nq = self.n_q
self.x_bounds[0].max[2, :] = 0 # torso bended forward
if self.has_head:
self.x_bounds[0][nq + 3, 0] = 0 # head velocity zero at the beginning
self.x_bounds[0][nq + 3, -1] = 0 # head velocity zero at the end
if self.has_knee:
self.x_bounds[0].min[nq - 2 : nq, 0] = -np.pi / 8 # driving knees
# Supervised shoulders
if self.has_shoulder:
i = 1 if self.has_head else 0
self.x_bounds[0][5 + i, 0] = -np.pi / 6
self.x_bounds[0][6 + i, 0] = np.pi / 6
self.x_bounds[0][5 + i, -1] = np.pi / 6
self.x_bounds[0][6 + i, -1] = -np.pi / 6
self.x_bounds[0][5 + i + nq, 0] = 0
self.x_bounds[0][5 + i + nq, -1] = 0
self.x_bounds[0][6 + i + nq, 0] = 0
self.x_bounds[0][6 + i + nq, -1] = 0
# Unsupervised arms not working trying another time with cyclic constraints
# x_bounds[0].max[5, 0] = -1e-5 # position is negative at start
# x_bounds[0].min[6, 0] = 1e-5 # position is positive at start
#
# x_bounds[0].min[5, -1] = 1e-5 # position is positive at the end
# x_bounds[0].max[6, -1] = -1e-5 # position is negative at the end
#
# x_bounds[0][n_q + 5, [0, -1]] = 0 # velocity of shoulders zero at begining and end
# x_bounds[0][n_q + 6, [0, -1]] = 0 # velocity of shoulders zero at begining and end
# x_bounds[0].max[n_q + 6, 1] = -1e-5 # velocity of left shoulder negative
# x_bounds[0].min[n_q + 6, 1] = -5 # velocity of left shoulder negative
# x_bounds[0].min[n_q + 5, 1] = 1e-5 # velocity of right shoulder positive
# x_bounds[0].max[n_q + 5, 1] = 5 # velocity of right shoulder positive
if self.rigidbody_dynamics == RigidBodyDynamics.DAE_INVERSE_DYNAMICS:
self.u_bounds.add(
[self.tau_min] * self.n_tau
+ [self.qddot_min] * self.n_qddot
+ [self.qddot_min] * self.biorbd_model.nbContacts(),
[self.tau_max] * self.n_tau
+ [self.qddot_max] * self.n_qddot
+ [self.qddot_max] * self.biorbd_model.nbContacts(),
)
elif self.rigidbody_dynamics == RigidBodyDynamics.DAE_FORWARD_DYNAMICS:
self.u_bounds.add(
[self.tau_min] * self.n_tau + [self.qddot_min] * self.n_qddot,
[self.tau_max] * self.n_tau + [self.qddot_max] * self.n_qddot,
)
elif self.rigidbody_dynamics == RigidBodyDynamics.DAE_INVERSE_DYNAMICS_JERK:
self.u_bounds.add(
[self.tau_min] * self.n_tau
+ [self.qdddot_min] * self.n_qddot
+ [self.qddot_min] * self.biorbd_model.nbContacts(),
[self.tau_max] * self.n_tau
+ [self.qdddot_max] * self.n_qddot
+ [self.qddot_max] * self.biorbd_model.nbContacts(),
)
elif self.rigidbody_dynamics == RigidBodyDynamics.DAE_FORWARD_DYNAMICS_JERK:
self.u_bounds.add(
[self.tau_min] * self.n_tau + [self.qdddot_min] * self.n_qddot,
[self.tau_max] * self.n_tau + [self.qdddot_max] * self.n_qddot,
)
else:
self.u_bounds.add([self.tau_min] * self.n_tau, [self.tau_max] * self.n_tau)
# root is not actuated
self.u_bounds[0][:3, :] = 0
def _set_initial_guesses(self):
"""
Set initial guess for the optimization problem.
"""
# --- Initial guess --- #
q0 = [0] * self.n_q
# Torso over the floor and bent
q0[1] = 0.8
q0[2] = -np.pi / 6
self.q0i = set_initial_pose(
self.biorbd_model_path, np.array(q0), self.right_foot_location, self.initial_left_foot_location
)
self.q0end = set_initial_pose(
self.biorbd_model_path, np.array(q0), self.right_foot_location, self.final_left_foot_location
)
qdot0 = [0] * self.n_qdot
X0i = []
X0i.extend(self.q0i)
X0i.extend(qdot0)
X0end = []
X0end.extend(self.q0end)
X0end.extend(qdot0)
if (
self.rigidbody_dynamics == RigidBodyDynamics.DAE_INVERSE_DYNAMICS_JERK
or self.rigidbody_dynamics == RigidBodyDynamics.DAE_FORWARD_DYNAMICS_JERK
):
X0i.extend([0] * self.n_qddot)
X0end.extend([0] * self.n_qddot)
# X0i.extend([0] * self.n_qddot + [0] * self.biorbd_model.nbContacts())
# X0end.extend([0] * self.n_qddot + [0] * self.biorbd_model.nbContacts())
x = np.linspace(0, self.phase_time, 2)
y = np.array([X0i, X0end]).T
f = interpolate.interp1d(x, y)
x_new = np.linspace(0, self.phase_time, self.n_shooting + 1)
X0 = f(x_new) # use interpolation function returned by `interp1d`
self._set_initial_states(X0)
self._set_initial_controls()
def _set_initial_states(self, X0: np.array = None):
if X0 is None:
self.x_init.add([0] * (self.n_q + self.n_q))
else:
if X0.shape[1] != self.n_shooting + 1:
X0 = self._interpolate_initial_states(X0)
if not self.ode_solver.is_direct_shooting:
n = self.ode_solver.polynomial_degree
X0 = np.repeat(X0, n + 1, axis=1)
X0 = X0[:, :-n]
self.x_init.add(X0, interpolation=InterpolationType.EACH_FRAME)
def _set_initial_controls(self, U0: np.array = None):
if U0 is None:
if self.rigidbody_dynamics == RigidBodyDynamics.DAE_INVERSE_DYNAMICS:
self.u_init.add(
[self.tau_init] * self.n_tau
+ [self.qddot_init] * self.n_qddot
+ [5] * self.biorbd_model.nbContacts()
)
elif self.rigidbody_dynamics == RigidBodyDynamics.DAE_INVERSE_DYNAMICS_JERK:
self.u_init.add(
[self.tau_init] * self.n_tau
+ [self.qdddot_init] * self.n_qdddot
+ [5] * self.biorbd_model.nbContacts()
)
elif self.rigidbody_dynamics == RigidBodyDynamics.DAE_FORWARD_DYNAMICS_JERK:
self.u_init.add([self.tau_init] * self.n_tau + [self.qdddot_init] * self.n_qdddot)
elif self.rigidbody_dynamics == RigidBodyDynamics.DAE_FORWARD_DYNAMICS:
self.u_init.add([self.tau_init] * self.n_tau + [self.qddot_init] * self.n_qddot)
else:
self.u_init.add([self.tau_init] * self.n_tau)
else:
if U0.shape[1] != self.n_shooting:
U0 = self._interpolate_initial_controls(U0)
self.u_init.add(U0, interpolation=InterpolationType.EACH_FRAME)
def _interpolate_initial_states(self, X0: np.array):
print("interpolating initial states to match the number of shooting nodes")
x = np.linspace(0, self.phase_time, X0.shape[1])
y = X0
f = interpolate.interp1d(x, y)
x_new = np.linspace(0, self.phase_time, self.n_shooting + 1)
y_new = f(x_new) # use interpolation function returned by `interp1d`
return y_new
def _interpolate_initial_controls(self, U0: np.array):
print("interpolating initial controls to match the number of shooting nodes")
x = np.linspace(0, self.phase_time, U0.shape[1])
y = U0
f = interpolate.interp1d(x, y)
x_new = np.linspace(0, self.phase_time, self.n_shooting)
y_new = f(x_new) # use interpolation function returned by `interp1d`
return y_new
|
[
"bioptim.BoundsList",
"bioptim.OdeSolver.COLLOCATION",
"bioptim.ObjectiveList",
"bioptim.PhaseTransitionList",
"bioptim.InitialGuessList",
"biorbd_casadi.Model",
"numpy.zeros",
"bioptim.QAndQDotBounds",
"numpy.array",
"bioptim.OptimalControlProgram",
"numpy.linspace",
"bioptim.ConstraintList",
"scipy.interpolate.interp1d",
"bioptim.QAndQDotAndQDDotBounds",
"bioptim.DynamicsList",
"numpy.repeat"
] |
[((864, 887), 'bioptim.OdeSolver.COLLOCATION', 'OdeSolver.COLLOCATION', ([], {}), '()\n', (885, 887), False, 'from bioptim import OdeSolver, Node, OptimalControlProgram, ConstraintFcn, DynamicsFcn, ObjectiveFcn, QAndQDotBounds, QAndQDotAndQDDotBounds, ConstraintList, ObjectiveList, DynamicsList, Bounds, BoundsList, InitialGuessList, ControlType, Solver, InitialGuess, InterpolationType, PhaseTransitionList, PhaseTransitionFcn, RigidBodyDynamics\n'), ((1034, 1045), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1042, 1045), True, 'import numpy as np\n'), ((8682, 8694), 'bioptim.BoundsList', 'BoundsList', ([], {}), '()\n', (8692, 8694), False, 'from bioptim import OdeSolver, Node, OptimalControlProgram, ConstraintFcn, DynamicsFcn, ObjectiveFcn, QAndQDotBounds, QAndQDotAndQDDotBounds, ConstraintList, ObjectiveList, DynamicsList, Bounds, BoundsList, InitialGuessList, ControlType, Solver, InitialGuess, InterpolationType, PhaseTransitionList, PhaseTransitionFcn, RigidBodyDynamics\n'), ((13751, 13785), 'numpy.linspace', 'np.linspace', (['(0)', 'self.phase_time', '(2)'], {}), '(0, self.phase_time, 2)\n', (13762, 13785), True, 'import numpy as np\n'), ((13835, 13861), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'y'], {}), '(x, y)\n', (13855, 13861), False, 'from scipy import interpolate\n'), ((13878, 13930), 'numpy.linspace', 'np.linspace', (['(0)', 'self.phase_time', '(self.n_shooting + 1)'], {}), '(0, self.phase_time, self.n_shooting + 1)\n', (13889, 13930), True, 'import numpy as np\n'), ((16093, 16137), 'numpy.linspace', 'np.linspace', (['(0)', 'self.phase_time', 'X0.shape[1]'], {}), '(0, self.phase_time, X0.shape[1])\n', (16104, 16137), True, 'import numpy as np\n'), ((16165, 16191), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'y'], {}), '(x, y)\n', (16185, 16191), False, 'from scipy import interpolate\n'), ((16208, 16260), 'numpy.linspace', 'np.linspace', (['(0)', 'self.phase_time', '(self.n_shooting + 1)'], {}), '(0, self.phase_time, self.n_shooting + 1)\n', (16219, 16260), True, 'import numpy as np\n'), ((16518, 16562), 'numpy.linspace', 'np.linspace', (['(0)', 'self.phase_time', 'U0.shape[1]'], {}), '(0, self.phase_time, U0.shape[1])\n', (16529, 16562), True, 'import numpy as np\n'), ((16590, 16616), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'y'], {}), '(x, y)\n', (16610, 16616), False, 'from scipy import interpolate\n'), ((16633, 16681), 'numpy.linspace', 'np.linspace', (['(0)', 'self.phase_time', 'self.n_shooting'], {}), '(0, self.phase_time, self.n_shooting)\n', (16644, 16681), True, 'import numpy as np\n'), ((1450, 1481), 'biorbd_casadi.Model', 'biorbd.Model', (['biorbd_model_path'], {}), '(biorbd_model_path)\n', (1462, 1481), True, 'import biorbd_casadi as biorbd\n'), ((2481, 2495), 'bioptim.DynamicsList', 'DynamicsList', ([], {}), '()\n', (2493, 2495), False, 'from bioptim import OdeSolver, Node, OptimalControlProgram, ConstraintFcn, DynamicsFcn, ObjectiveFcn, QAndQDotBounds, QAndQDotAndQDDotBounds, ConstraintList, ObjectiveList, DynamicsList, Bounds, BoundsList, InitialGuessList, ControlType, Solver, InitialGuess, InterpolationType, PhaseTransitionList, PhaseTransitionFcn, RigidBodyDynamics\n'), ((2527, 2543), 'bioptim.ConstraintList', 'ConstraintList', ([], {}), '()\n', (2541, 2543), False, 'from bioptim import OdeSolver, Node, OptimalControlProgram, ConstraintFcn, DynamicsFcn, ObjectiveFcn, QAndQDotBounds, QAndQDotAndQDDotBounds, ConstraintList, ObjectiveList, DynamicsList, Bounds, BoundsList, InitialGuessList, ControlType, Solver, InitialGuess, InterpolationType, PhaseTransitionList, PhaseTransitionFcn, RigidBodyDynamics\n'), ((2583, 2598), 'bioptim.ObjectiveList', 'ObjectiveList', ([], {}), '()\n', (2596, 2598), False, 'from bioptim import OdeSolver, Node, OptimalControlProgram, ConstraintFcn, DynamicsFcn, ObjectiveFcn, QAndQDotBounds, QAndQDotAndQDDotBounds, ConstraintList, ObjectiveList, DynamicsList, Bounds, BoundsList, InitialGuessList, ControlType, Solver, InitialGuess, InterpolationType, PhaseTransitionList, PhaseTransitionFcn, RigidBodyDynamics\n'), ((2636, 2657), 'bioptim.PhaseTransitionList', 'PhaseTransitionList', ([], {}), '()\n', (2655, 2657), False, 'from bioptim import OdeSolver, Node, OptimalControlProgram, ConstraintFcn, DynamicsFcn, ObjectiveFcn, QAndQDotBounds, QAndQDotAndQDDotBounds, ConstraintList, ObjectiveList, DynamicsList, Bounds, BoundsList, InitialGuessList, ControlType, Solver, InitialGuess, InterpolationType, PhaseTransitionList, PhaseTransitionFcn, RigidBodyDynamics\n'), ((2686, 2698), 'bioptim.BoundsList', 'BoundsList', ([], {}), '()\n', (2696, 2698), False, 'from bioptim import OdeSolver, Node, OptimalControlProgram, ConstraintFcn, DynamicsFcn, ObjectiveFcn, QAndQDotBounds, QAndQDotAndQDDotBounds, ConstraintList, ObjectiveList, DynamicsList, Bounds, BoundsList, InitialGuessList, ControlType, Solver, InitialGuess, InterpolationType, PhaseTransitionList, PhaseTransitionFcn, RigidBodyDynamics\n'), ((2727, 2739), 'bioptim.BoundsList', 'BoundsList', ([], {}), '()\n', (2737, 2739), False, 'from bioptim import OdeSolver, Node, OptimalControlProgram, ConstraintFcn, DynamicsFcn, ObjectiveFcn, QAndQDotBounds, QAndQDotAndQDDotBounds, ConstraintList, ObjectiveList, DynamicsList, Bounds, BoundsList, InitialGuessList, ControlType, Solver, InitialGuess, InterpolationType, PhaseTransitionList, PhaseTransitionFcn, RigidBodyDynamics\n'), ((2803, 2821), 'bioptim.InitialGuessList', 'InitialGuessList', ([], {}), '()\n', (2819, 2821), False, 'from bioptim import OdeSolver, Node, OptimalControlProgram, ConstraintFcn, DynamicsFcn, ObjectiveFcn, QAndQDotBounds, QAndQDotAndQDDotBounds, ConstraintList, ObjectiveList, DynamicsList, Bounds, BoundsList, InitialGuessList, ControlType, Solver, InitialGuess, InterpolationType, PhaseTransitionList, PhaseTransitionFcn, RigidBodyDynamics\n'), ((2848, 2866), 'bioptim.InitialGuessList', 'InitialGuessList', ([], {}), '()\n', (2864, 2866), False, 'from bioptim import OdeSolver, Node, OptimalControlProgram, ConstraintFcn, DynamicsFcn, ObjectiveFcn, QAndQDotBounds, QAndQDotAndQDDotBounds, ConstraintList, ObjectiveList, DynamicsList, Bounds, BoundsList, InitialGuessList, ControlType, Solver, InitialGuess, InterpolationType, PhaseTransitionList, PhaseTransitionFcn, RigidBodyDynamics\n'), ((3296, 3658), 'bioptim.OptimalControlProgram', 'OptimalControlProgram', (['self.biorbd_model', 'self.dynamics', 'self.n_shooting', 'self.phase_time'], {'x_init': 'self.x_init', 'x_bounds': 'self.x_bounds', 'u_init': 'self.u_init', 'u_bounds': 'self.u_bounds', 'objective_functions': 'self.objective_functions', 'constraints': 'self.constraints', 'n_threads': 'n_threads', 'control_type': 'self.control_type', 'ode_solver': 'ode_solver', 'use_sx': 'use_sx'}), '(self.biorbd_model, self.dynamics, self.n_shooting,\n self.phase_time, x_init=self.x_init, x_bounds=self.x_bounds, u_init=\n self.u_init, u_bounds=self.u_bounds, objective_functions=self.\n objective_functions, constraints=self.constraints, n_threads=n_threads,\n control_type=self.control_type, ode_solver=ode_solver, use_sx=use_sx)\n', (3317, 3658), False, 'from bioptim import OdeSolver, Node, OptimalControlProgram, ConstraintFcn, DynamicsFcn, ObjectiveFcn, QAndQDotBounds, QAndQDotAndQDDotBounds, ConstraintList, ObjectiveList, DynamicsList, Bounds, BoundsList, InitialGuessList, ControlType, Solver, InitialGuess, InterpolationType, PhaseTransitionList, PhaseTransitionFcn, RigidBodyDynamics\n'), ((12863, 12875), 'numpy.array', 'np.array', (['q0'], {}), '(q0)\n', (12871, 12875), True, 'import numpy as np\n'), ((13020, 13032), 'numpy.array', 'np.array', (['q0'], {}), '(q0)\n', (13028, 13032), True, 'import numpy as np\n'), ((13798, 13820), 'numpy.array', 'np.array', (['[X0i, X0end]'], {}), '([X0i, X0end])\n', (13806, 13820), True, 'import numpy as np\n'), ((2318, 2351), 'numpy.array', 'np.array', (['[0, step_length / 2, 0]'], {}), '([0, step_length / 2, 0])\n', (2326, 2351), True, 'import numpy as np\n'), ((2418, 2451), 'numpy.array', 'np.array', (['[0, step_length / 2, 0]'], {}), '([0, step_length / 2, 0])\n', (2426, 2451), True, 'import numpy as np\n'), ((14472, 14500), 'numpy.repeat', 'np.repeat', (['X0', '(n + 1)'], {'axis': '(1)'}), '(X0, n + 1, axis=1)\n', (14481, 14500), True, 'import numpy as np\n'), ((8741, 8782), 'bioptim.QAndQDotAndQDDotBounds', 'QAndQDotAndQDDotBounds', (['self.biorbd_model'], {}), '(self.biorbd_model)\n', (8763, 8782), False, 'from bioptim import OdeSolver, Node, OptimalControlProgram, ConstraintFcn, DynamicsFcn, ObjectiveFcn, QAndQDotBounds, QAndQDotAndQDDotBounds, ConstraintList, ObjectiveList, DynamicsList, Bounds, BoundsList, InitialGuessList, ControlType, Solver, InitialGuess, InterpolationType, PhaseTransitionList, PhaseTransitionFcn, RigidBodyDynamics\n'), ((8972, 9005), 'bioptim.QAndQDotBounds', 'QAndQDotBounds', (['self.biorbd_model'], {}), '(self.biorbd_model)\n', (8986, 9005), False, 'from bioptim import OdeSolver, Node, OptimalControlProgram, ConstraintFcn, DynamicsFcn, ObjectiveFcn, QAndQDotBounds, QAndQDotAndQDDotBounds, ConstraintList, ObjectiveList, DynamicsList, Bounds, BoundsList, InitialGuessList, ControlType, Solver, InitialGuess, InterpolationType, PhaseTransitionList, PhaseTransitionFcn, RigidBodyDynamics\n')]
|
# 需要安装 openpyxl, pandas, BeautifulSoup
# pip install 即可
import requests
import re
import pandas as pd
from bs4 import BeautifulSoup
region = ['ganjingzi', 'shahekou', 'zhongshan', 'xigang', 'gaoxinyuanqu']
#这个变量里放区域名称的拼音
regnam = ['甘井子', '沙河口', '中山', '西岗', '高新园']#这个变量里放区域名称的中文
page = 5
reTryTime = 5
price=[] #这个变量里放房屋总价
uprice=[] #这个变量里放房屋均价
house=[] #这个变量里放房屋信息
room=[]
area=[]
direct=[]
decorate=[]
elevator=[]
def generate_allurl(page):
url = 'http://dl.lianjia.com/ershoufang/{}/pg{}/'
# 改url换城市
for url_region in range(len(region)):
print("\n开始爬取地区:"+ regnam[url_region] + "\n")
for url_next in range(1,int(page) + 1):
print("正在爬取第"+ str(url_next) + "页")
yield url.format(region[url_region], url_next)
def get_allurl(generate_allurl):
gotData = False
reTry = 0
while reTry < reTryTime and not gotData:
try:
reTry += 1
get_url = requests.get(generate_allurl, timeout=1)
if get_url.status_code == 200:
re_set = re.compile('<li.*?class="clear">.*?<a.*?class="img.*?".*?href="(.*?)"')
re_get = re.findall(re_set,get_url.text)
gotData = True
return re_get
except:
pass
def open_url(re_get):
gotData = False
reTry = 0
while reTry < reTryTime and not gotData:
try:
reTry += 1
res = requests.get(re_get, timeout=1)
if res.status_code == 200:
soup = BeautifulSoup(res.text,'lxml')
price.append(soup.select('.total')[0].text + '万')
uprice.append(soup.select('.unitPriceValue')[0].text)
house.append(soup.select('.communityName > a')[0].text)
room.append(soup.find("div", class_="room").find("div", class_="mainInfo").text)
area.append(soup.find("div", class_="area").find("div", class_="mainInfo").text)
direct.append(soup.find("div", class_="type").find("div", class_="mainInfo").text)
decorate.append(soup.find("div", class_="introContent").find_all("li")[8].text[4:])
elevator.append(soup.find("div", class_="introContent").find_all("li")[11].text[4:])
gotData = True
except:
pass
def toTxt():
print("\n开始保存txt文件……\n")
for regnum in range(len(region)):
print("录入" + regnam[regnum] + "数据")
with open(regnam[regnum] + '.txt', 'w') as f: # 建立并打开一个txt文件
for i in range(len(price)): # 建立一个循环
f.write(str(price[i]) + ' | ' + str(uprice[i]) + ' | ' + str(house[i]) + ' | ' + str(room[i]) + ' | ' + str(area[i]) + ' | ' + str(direct[i]) + ' | ' + str(decorate[i]) + ' | ' + str(elevator[i]) +'\n') # 将房屋总价写入txt文件
print('已保存为 ' + regnam[regnum] + '.txt ')
def toXls():
print("\n开始将所有地区数据保存为xls文件……\n")
df = pd.DataFrame({
"总价": price,
"每平米均价": uprice,
"房屋名称": house,
"格局": room,
"面积": area,
"朝向": direct,
"装修": decorate,
"电梯": elevator
})
df.to_excel('大连链家二手房.xlsx',sheet_name='大连链家二手房')
print("已保存为 大连链家二手房.xlsx")
def main():
page = input('输入各地区生成页数:')
print()
for i in generate_allurl(page):
print(i)
url_tmp = get_allurl(i)
for j in url_tmp:
info = open_url(j)
toTxt()
print()
toXls()
print("完成")
if __name__ == '__main__':
main()
# def get_allurl(generate_allurl):
# get_url = requests.get(generate_allurl,)
# if get_url.status_code == 200:
# re_set = re.compile('<li.*?class="clear">.*?<a.*?class="img.*?".*?href="(.*?)"')
# re_get = re.findall(re_set,get_url.text)
# return re_get
# def open_url(re_get):
# res = requests.get(re_get, timeout=0.1)
# if res.status_code == 200:
# soup = BeautifulSoup(res.text,'lxml')
# price.append(soup.select('.total')[0].text + '万')
# uprice.append(soup.select('.unitPriceValue')[0].text)
# house.append(soup.select('.communityName > a')[0].text)
# room.append(soup.find("div", class_="room").find("div", class_="mainInfo").text)
# area.append(soup.find("div", class_="area").find("div", class_="mainInfo").text)
# direct.append(soup.find("div", class_="type").find("div", class_="mainInfo").text)
# decorate.append(soup.find("div", class_="introContent").find_all("li")[8].text[4:])
# elevator.append(soup.find("div", class_="introContent").find_all("li")[11].text[4:])
|
[
"pandas.DataFrame",
"re.findall",
"requests.get",
"bs4.BeautifulSoup",
"re.compile"
] |
[((3022, 3155), 'pandas.DataFrame', 'pd.DataFrame', (["{'总价': price, '每平米均价': uprice, '房屋名称': house, '格局': room, '面积': area, '朝向':\n direct, '装修': decorate, '电梯': elevator}"], {}), "({'总价': price, '每平米均价': uprice, '房屋名称': house, '格局': room, '面积':\n area, '朝向': direct, '装修': decorate, '电梯': elevator})\n", (3034, 3155), True, 'import pandas as pd\n'), ((987, 1027), 'requests.get', 'requests.get', (['generate_allurl'], {'timeout': '(1)'}), '(generate_allurl, timeout=1)\n', (999, 1027), False, 'import requests\n'), ((1500, 1531), 'requests.get', 'requests.get', (['re_get'], {'timeout': '(1)'}), '(re_get, timeout=1)\n', (1512, 1531), False, 'import requests\n'), ((1098, 1169), 're.compile', 're.compile', (['"""<li.*?class="clear">.*?<a.*?class="img.*?".*?href="(.*?)\\""""'], {}), '(\'<li.*?class="clear">.*?<a.*?class="img.*?".*?href="(.*?)"\')\n', (1108, 1169), False, 'import re\n'), ((1196, 1228), 're.findall', 're.findall', (['re_set', 'get_url.text'], {}), '(re_set, get_url.text)\n', (1206, 1228), False, 'import re\n'), ((1596, 1627), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.text', '"""lxml"""'], {}), "(res.text, 'lxml')\n", (1609, 1627), False, 'from bs4 import BeautifulSoup\n')]
|
from GetAllDanmuInfo_V2 import GetAllDanmuInfo
from GetClearCommandInstruction import GetClearCommandInstruction
from GetVideoTitle import GetVideoTitle
from ExportAllDanmu import ExportAllDanmu
from CheckLoginSituation import CheckLoginSituation
import base64
import Sqlite3_Bilibili
import sys
import os
headers = {
'cookie': "",
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36',
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Referer': 'https://www.bilibili.com'
}
def isnum(n):
try:
float(int(n))
return True
except:
return False
def FromUrlGetAidOrBvid(video_url):
base_url_list = [
"https://www.bilibili.com/video/"
]
if "http" in video_url:
for i in range(len(base_url_list)):
if base_url_list[i] in video_url:
return str(video_url).replace(base_url_list[i],"").split("?",1)[0].replace("/","")
return video_url
def Meum():
clear_comand_instruction = GetClearCommandInstruction()
Index_Server = Sqlite3_Bilibili.Bilibili_Danmu_Index_Server("root", "root")
if os.path.exists(".config") == False:
while True:
print("检测到您第一次使用本程序,请输入您的SESSDATA便于接下来的操作")
print("若不清楚自己的SESSDATA的话,请查看README中的教程链接解决该问题")
print("格式为:SESSDATA=您获取的SESSDATA")
user_input = input(">>")
if "SESSDATA=" in user_input.upper():
with open(".config", "w", encoding="utf-8") as f:
f.write(base64.b64encode(user_input.encode("utf-8")).decode())
break
else:
print("请输入正确格式的SESSDATA!")
os.system(clear_comand_instruction)
else:
with open(".config","r",encoding="utf-8") as f:
temp_sessdata = f.read()
temp_sessdata = base64.b64decode(temp_sessdata).decode()
headers["cookie"] = temp_sessdata
while CheckLoginSituation(headers) == 1:
os.system(clear_comand_instruction)
print("SESSDATA已过期,请重新输入您的SESSDATA")
print("若不清楚自己的SESSDATA的话,请查看README中的教程链接解决该问题")
print("格式为:SESSDATA=您获取的SESSDATA")
user_input = input(">>")
if "SESSDATA=" in user_input.upper():
with open(".config", "w", encoding="utf-8") as f:
f.write(base64.b64encode(user_input.encode("utf-8")).decode())
headers["cookie"] = temp_sessdata
else:
print("请输入正确格式的SESSDATA!")
os.system(clear_comand_instruction)
while True:
os.system(clear_comand_instruction)
while CheckLoginSituation(headers) == 1:
if os.path.exists(".config") == True:
print("SESSDATA已过期,请重新输入您的SESSDATA")
print("若不清楚自己的SESSDATA的话,请查看README中的教程链接解决该问题")
print("格式为:SESSDATA=您获取的SESSDATA")
user_input = input(">>")
if "SESSDATA=" in user_input.upper():
with open(".config", "w", encoding="utf-8") as f:
f.write(base64.b64encode(user_input.encode("utf-8")).decode())
headers["cookie"] = temp_sessdata
else:
print("请输入正确格式的SESSDATA!")
os.system(clear_comand_instruction)
else:
print("警告!!!未登录!!!无法获取历史弹幕!!!")
print("请查看文档进行登录!!")
input("按下任意键退出...")
sys.exit(0)
if os.path.exists("sqlite3") == False:
os.makedirs("sqlite3")
if os.path.exists("Export") == False:
os.makedirs("Export")
print("Bilibili(B站)全弹幕获取程序")
print("作者:菠萝小西瓜(DNLINYJ)")
print("Github:https://github.com/DNLINYJ")
print("注意:仅供个人学习交流使用,切勿用于非法用途!")
print("---------------------------------------------------------")
print("1) 收集指定视频全部历史弹幕(数据量较大时所用时间较久)")
print("2) 导出数据库内指定视频全部历史弹幕")
print("3) 收集并导出指定视频全部历史弹幕(数据量较大时所用时间较久,谨慎使用)")
print("4) 退出")
user_input = str(input(">>"))
if user_input == "1":
os.system(clear_comand_instruction)
print("请输入B站视频的AV号/BV号,或者输入B站视频地址(仅支持单P视频/多P视频中的单P下载)")
user_input = str(input(">>"))
user_input = FromUrlGetAidOrBvid(user_input)
result = GetAllDanmuInfo(user_input, headers)
if result == 0:
print(f"获取视频:{GetVideoTitle(user_input, headers)} 的所有历史弹幕成功.")
input("按下任意键继续...")
elif result == 2:
input("按下任意键继续...")
else:
print(f"获取视频:{GetVideoTitle(user_input, headers)} 的所有历史弹幕失败.")
input("按下任意键继续...")
elif user_input == "2":
os.system(clear_comand_instruction)
Video_Info_List = Index_Server.GetAllVideoDatabaseName()
if Video_Info_List != None:
print("历史弹幕数据库中存在的视频如下:")
print("-----------------------------------------------------------")
for i in range(len(Video_Info_List)):
print(f"{i + 1}) 视频标题:{Video_Info_List[i][1]} 视频AV号:{Video_Info_List[i][2]} 保存的弹幕结束日期:{Video_Info_List[i][4]}")
print("-----------------------------------------------------------")
print("请输入您想导出的视频序号")
user_input = input(">>")
if isnum(user_input) == False:
print("请输入正确的选项!")
input("按下回车继续运行...")
os.system(clear_comand_instruction)
else:
if int(user_input) > len(Video_Info_List) + 1:
print("请输入正确的选项!")
input("按下回车继续运行...")
else:
ExportAllDanmu(Video_Info_List[i][2])
input("按下回车继续运行...")
os.system(clear_comand_instruction)
else:
print("历史弹幕数据库中无可用视频历史弹幕可导出!")
input("按下回车继续运行...")
os.system(clear_comand_instruction)
elif user_input == "3":
os.system(clear_comand_instruction)
print("请输入B站视频的AV号/BV号,或者输入B站视频地址(仅支持单P视频/多P视频中的单P下载)")
user_input = str(input(">>"))
user_input = FromUrlGetAidOrBvid(user_input)
result = GetAllDanmuInfo(user_input, headers)
if result == 0:
print(f"获取视频{GetVideoTitle(user_input, headers)} 的所有历史弹幕成功.")
ExportAllDanmu(user_input)
input("按下任意键继续...")
elif result == 2:
input("按下任意键继续...")
else:
print(f"获取视频{GetVideoTitle(user_input, headers)} 的所有历史弹幕失败.")
input("按下任意键继续...")
elif user_input == "4":
sys.exit(0)
if __name__ == '__main__':
Meum()
|
[
"Sqlite3_Bilibili.Bilibili_Danmu_Index_Server",
"os.makedirs",
"GetAllDanmuInfo_V2.GetAllDanmuInfo",
"ExportAllDanmu.ExportAllDanmu",
"os.path.exists",
"os.system",
"base64.b64decode",
"GetVideoTitle.GetVideoTitle",
"CheckLoginSituation.CheckLoginSituation",
"GetClearCommandInstruction.GetClearCommandInstruction",
"sys.exit"
] |
[((1133, 1161), 'GetClearCommandInstruction.GetClearCommandInstruction', 'GetClearCommandInstruction', ([], {}), '()\n', (1159, 1161), False, 'from GetClearCommandInstruction import GetClearCommandInstruction\n'), ((1181, 1241), 'Sqlite3_Bilibili.Bilibili_Danmu_Index_Server', 'Sqlite3_Bilibili.Bilibili_Danmu_Index_Server', (['"""root"""', '"""root"""'], {}), "('root', 'root')\n", (1225, 1241), False, 'import Sqlite3_Bilibili\n'), ((1250, 1275), 'os.path.exists', 'os.path.exists', (['""".config"""'], {}), "('.config')\n", (1264, 1275), False, 'import os\n'), ((2734, 2769), 'os.system', 'os.system', (['clear_comand_instruction'], {}), '(clear_comand_instruction)\n', (2743, 2769), False, 'import os\n'), ((2070, 2098), 'CheckLoginSituation.CheckLoginSituation', 'CheckLoginSituation', (['headers'], {}), '(headers)\n', (2089, 2098), False, 'from CheckLoginSituation import CheckLoginSituation\n'), ((2117, 2152), 'os.system', 'os.system', (['clear_comand_instruction'], {}), '(clear_comand_instruction)\n', (2126, 2152), False, 'import os\n'), ((2785, 2813), 'CheckLoginSituation.CheckLoginSituation', 'CheckLoginSituation', (['headers'], {}), '(headers)\n', (2804, 2813), False, 'from CheckLoginSituation import CheckLoginSituation\n'), ((3650, 3675), 'os.path.exists', 'os.path.exists', (['"""sqlite3"""'], {}), "('sqlite3')\n", (3664, 3675), False, 'import os\n'), ((3698, 3720), 'os.makedirs', 'os.makedirs', (['"""sqlite3"""'], {}), "('sqlite3')\n", (3709, 3720), False, 'import os\n'), ((3732, 3756), 'os.path.exists', 'os.path.exists', (['"""Export"""'], {}), "('Export')\n", (3746, 3756), False, 'import os\n'), ((3779, 3800), 'os.makedirs', 'os.makedirs', (['"""Export"""'], {}), "('Export')\n", (3790, 3800), False, 'import os\n'), ((4283, 4318), 'os.system', 'os.system', (['clear_comand_instruction'], {}), '(clear_comand_instruction)\n', (4292, 4318), False, 'import os\n'), ((4509, 4545), 'GetAllDanmuInfo_V2.GetAllDanmuInfo', 'GetAllDanmuInfo', (['user_input', 'headers'], {}), '(user_input, headers)\n', (4524, 4545), False, 'from GetAllDanmuInfo_V2 import GetAllDanmuInfo\n'), ((1805, 1840), 'os.system', 'os.system', (['clear_comand_instruction'], {}), '(clear_comand_instruction)\n', (1814, 1840), False, 'import os\n'), ((1973, 2004), 'base64.b64decode', 'base64.b64decode', (['temp_sessdata'], {}), '(temp_sessdata)\n', (1989, 2004), False, 'import base64\n'), ((2673, 2708), 'os.system', 'os.system', (['clear_comand_instruction'], {}), '(clear_comand_instruction)\n', (2682, 2708), False, 'import os\n'), ((2835, 2860), 'os.path.exists', 'os.path.exists', (['""".config"""'], {}), "('.config')\n", (2849, 2860), False, 'import os\n'), ((3626, 3637), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3634, 3637), False, 'import sys\n'), ((4949, 4984), 'os.system', 'os.system', (['clear_comand_instruction'], {}), '(clear_comand_instruction)\n', (4958, 4984), False, 'import os\n'), ((3434, 3469), 'os.system', 'os.system', (['clear_comand_instruction'], {}), '(clear_comand_instruction)\n', (3443, 3469), False, 'import os\n'), ((6290, 6325), 'os.system', 'os.system', (['clear_comand_instruction'], {}), '(clear_comand_instruction)\n', (6299, 6325), False, 'import os\n'), ((6371, 6406), 'os.system', 'os.system', (['clear_comand_instruction'], {}), '(clear_comand_instruction)\n', (6380, 6406), False, 'import os\n'), ((6597, 6633), 'GetAllDanmuInfo_V2.GetAllDanmuInfo', 'GetAllDanmuInfo', (['user_input', 'headers'], {}), '(user_input, headers)\n', (6612, 6633), False, 'from GetAllDanmuInfo_V2 import GetAllDanmuInfo\n'), ((5752, 5787), 'os.system', 'os.system', (['clear_comand_instruction'], {}), '(clear_comand_instruction)\n', (5761, 5787), False, 'import os\n'), ((6756, 6782), 'ExportAllDanmu.ExportAllDanmu', 'ExportAllDanmu', (['user_input'], {}), '(user_input)\n', (6770, 6782), False, 'from ExportAllDanmu import ExportAllDanmu\n'), ((7062, 7073), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7070, 7073), False, 'import sys\n'), ((4614, 4648), 'GetVideoTitle.GetVideoTitle', 'GetVideoTitle', (['user_input', 'headers'], {}), '(user_input, headers)\n', (4627, 4648), False, 'from GetVideoTitle import GetVideoTitle\n'), ((6016, 6053), 'ExportAllDanmu.ExportAllDanmu', 'ExportAllDanmu', (['Video_Info_List[i][2]'], {}), '(Video_Info_List[i][2])\n', (6030, 6053), False, 'from ExportAllDanmu import ExportAllDanmu\n'), ((6123, 6158), 'os.system', 'os.system', (['clear_comand_instruction'], {}), '(clear_comand_instruction)\n', (6132, 6158), False, 'import os\n'), ((4813, 4847), 'GetVideoTitle.GetVideoTitle', 'GetVideoTitle', (['user_input', 'headers'], {}), '(user_input, headers)\n', (4826, 4847), False, 'from GetVideoTitle import GetVideoTitle\n'), ((6699, 6733), 'GetVideoTitle.GetVideoTitle', 'GetVideoTitle', (['user_input', 'headers'], {}), '(user_input, headers)\n', (6712, 6733), False, 'from GetVideoTitle import GetVideoTitle\n'), ((6940, 6974), 'GetVideoTitle.GetVideoTitle', 'GetVideoTitle', (['user_input', 'headers'], {}), '(user_input, headers)\n', (6953, 6974), False, 'from GetVideoTitle import GetVideoTitle\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, <NAME> (@briantist)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import subprocess
import re
import getopt
from pathlib import Path
def get_flags(pattern, input):
patpat = r'\{([^\}]+)\}'
pats = re.findall(patpat, pattern)
matcher = re.sub(patpat, r'(.*?)', pattern)
match = re.search(matcher, input)
if match:
return [pats[i].replace('%', result) for i, result in enumerate(match.groups())]
return None
def main(argv):
additional_flags = file_flag_pattern = directory_flag_pattern = directory = fail_on_error = None
opts, args = getopt.getopt(argv, '', [
'directory=',
'directory-flag-pattern=',
'file-flag-pattern=',
'additional-flags=',
'fail-on-error',
])
for opt, arg in opts:
if opt == '--directory':
directory = arg
elif opt == '--directory-flag-pattern':
directory_flag_pattern = arg
elif opt == '--file-flag-pattern':
file_flag_pattern = arg
elif opt == '--additional-flags':
additional_flags = arg
elif opt == '--fail-on-error':
fail_on_error = True
extra_flags = additional_flags.split(',') if additional_flags else []
flags = {}
directory = Path(directory) if directory else Path.cwd()
for f in directory.rglob('*'):
if f.is_file():
iflags = set()
if directory_flag_pattern:
for part in f.parent.parts:
dflags = get_flags(directory_flag_pattern, part)
if dflags:
iflags.update(dflags)
fflags = get_flags(file_flag_pattern, str(f.name))
if fflags:
iflags.update(fflags)
for flag in iflags:
flags.setdefault(flag, []).append(str(f.resolve()))
logextra = ' (+%r)' % extra_flags if extra_flags else ''
for flag, files in flags.items():
cmd = ['codecov', '-F', flag]
[cmd.extend(['-F', extra]) for extra in extra_flags]
[cmd.extend(['-f', file]) for file in files]
if fail_on_error:
cmd.append('-Z')
print('::group::Flag: %s%s' % (flag, logextra))
print('Executing: %r' % cmd)
subprocess.run(cmd, stderr=subprocess.STDOUT, check=True)
print('::endgroup::')
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"subprocess.run",
"getopt.getopt",
"pathlib.Path",
"re.findall",
"re.search",
"pathlib.Path.cwd",
"re.sub"
] |
[((425, 452), 're.findall', 're.findall', (['patpat', 'pattern'], {}), '(patpat, pattern)\n', (435, 452), False, 'import re\n'), ((467, 499), 're.sub', 're.sub', (['patpat', '"""(.*?)"""', 'pattern'], {}), "(patpat, '(.*?)', pattern)\n", (473, 499), False, 'import re\n'), ((513, 538), 're.search', 're.search', (['matcher', 'input'], {}), '(matcher, input)\n', (522, 538), False, 'import re\n'), ((797, 927), 'getopt.getopt', 'getopt.getopt', (['argv', '""""""', "['directory=', 'directory-flag-pattern=', 'file-flag-pattern=',\n 'additional-flags=', 'fail-on-error']"], {}), "(argv, '', ['directory=', 'directory-flag-pattern=',\n 'file-flag-pattern=', 'additional-flags=', 'fail-on-error'])\n", (810, 927), False, 'import getopt\n'), ((1484, 1499), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (1488, 1499), False, 'from pathlib import Path\n'), ((1518, 1528), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1526, 1528), False, 'from pathlib import Path\n'), ((2482, 2539), 'subprocess.run', 'subprocess.run', (['cmd'], {'stderr': 'subprocess.STDOUT', 'check': '(True)'}), '(cmd, stderr=subprocess.STDOUT, check=True)\n', (2496, 2539), False, 'import subprocess\n')]
|
import os
import random
import string
import time
from flask_testing import TestCase
from cellphonedb.src.app.cellphonedb_app import cellphonedb_app
from cellphonedb.src.local_launchers.local_collector_launcher import LocalCollectorLauncher
from cellphonedb.utils import utils
class CellphoneFlaskTestCase(TestCase):
@staticmethod
def fixtures_dir():
current_dir = os.path.dirname(os.path.realpath(__file__))
fixtures_dir = '{}/fixtures'.format(current_dir)
return fixtures_dir
@staticmethod
def reset_db():
cellphonedb_app.cellphonedb.database_manager.database.drop_everything()
cellphonedb_app.cellphonedb.database_manager.database.create_all()
def populate_db(self):
LocalCollectorLauncher().all('collect_protein.csv', 'collect_gene.csv', 'collect_complex.csv',
'collect_interaction.csv', self.fixtures_dir())
@staticmethod
def remove_file(file):
os.remove(file)
@staticmethod
def rand_string(digits=5):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(digits))
@staticmethod
def get_test_filename(original_namefile, extension, prefix='TESTING'):
namefile = '{}_{}_{}_{}.{}'.format(prefix, original_namefile, int(time.time()),
CellphoneFlaskTestCase.rand_string(),
extension)
return namefile
def assert_file_not_empty(self, file, message=''):
if not message:
message = 'File {} is empty'.format(file)
read_data = utils.read_data_table_from_file(file)
self.assertFalse(read_data.empty, message)
def assert_file_exist(self, path_file, message=''):
if not message:
message = 'File {} didnt exist'.format(path_file)
self.assertTrue(os.path.isfile(path_file), message)
|
[
"os.remove",
"cellphonedb.src.local_launchers.local_collector_launcher.LocalCollectorLauncher",
"cellphonedb.src.app.cellphonedb_app.cellphonedb_app.cellphonedb.database_manager.database.drop_everything",
"os.path.realpath",
"random.choice",
"cellphonedb.src.app.cellphonedb_app.cellphonedb_app.cellphonedb.database_manager.database.create_all",
"time.time",
"cellphonedb.utils.utils.read_data_table_from_file",
"os.path.isfile"
] |
[((562, 633), 'cellphonedb.src.app.cellphonedb_app.cellphonedb_app.cellphonedb.database_manager.database.drop_everything', 'cellphonedb_app.cellphonedb.database_manager.database.drop_everything', ([], {}), '()\n', (631, 633), False, 'from cellphonedb.src.app.cellphonedb_app import cellphonedb_app\n'), ((642, 708), 'cellphonedb.src.app.cellphonedb_app.cellphonedb_app.cellphonedb.database_manager.database.create_all', 'cellphonedb_app.cellphonedb.database_manager.database.create_all', ([], {}), '()\n', (706, 708), False, 'from cellphonedb.src.app.cellphonedb_app import cellphonedb_app\n'), ((987, 1002), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (996, 1002), False, 'import os\n'), ((1651, 1688), 'cellphonedb.utils.utils.read_data_table_from_file', 'utils.read_data_table_from_file', (['file'], {}), '(file)\n', (1682, 1688), False, 'from cellphonedb.utils import utils\n'), ((401, 427), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (417, 427), False, 'import os\n'), ((1908, 1933), 'os.path.isfile', 'os.path.isfile', (['path_file'], {}), '(path_file)\n', (1922, 1933), False, 'import os\n'), ((745, 769), 'cellphonedb.src.local_launchers.local_collector_launcher.LocalCollectorLauncher', 'LocalCollectorLauncher', ([], {}), '()\n', (767, 769), False, 'from cellphonedb.src.local_launchers.local_collector_launcher import LocalCollectorLauncher\n'), ((1076, 1129), 'random.choice', 'random.choice', (['(string.ascii_uppercase + string.digits)'], {}), '(string.ascii_uppercase + string.digits)\n', (1089, 1129), False, 'import random\n'), ((1322, 1333), 'time.time', 'time.time', ([], {}), '()\n', (1331, 1333), False, 'import time\n')]
|
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
import re
#from kivy.config import Config
from telas import *
#from kivy.garden.mapview import MapView, MapMarker
from kivy.uix.textinput import TextInput
from kivy.clock import Clock, mainthread
from kivy.uix.popup import Popup
#from plyer import gps
from kivy.uix.label import Label
from kivy.metrics import sp
#import openssl
#For buildoze spec
# (list) Permissions
#android.permissions = INTERNET,ACCESS_FINE_LOCATION,ACCESS_COARSE_LOCATION
# (list) Application requirements
#requirements = kivy,plyer
#Config.read('config.ini')
class LabelAdap(Label):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.size_hint = (1, None)
def on_size(self,*args):
# vamos colocar um espaço de 10 sp
self.text_size = (self.width - sp(10), None)
def on_texture_size(self,*args):
self.size = self.texture_size
self.height += sp(20)
class FloatInput(TextInput):
pat = re.compile('[^0-9]')
def insert_text(self, substring, from_undo=False):
pat = self.pat
if '.' in self.text:
s = re.sub(pat, '', substring)
else:
s = '.'.join([re.sub(pat, '', s) for s in substring.split('.', 1)])
return super(FloatInput, self).insert_text(s, from_undo=from_undo)
class Gerenciador(ScreenManager):
def __init__(self, **kw):
super().__init__(**kw)
#def current_location(self):
# try:
# gps.configure(on_location=self.on_location)
# gps.start()
# except NotImplementedError:
# popup = Popup(title="GPS Error",
# content=Label(text="GPS support is not implemented on your platform")
# ).open()
# Clock.schedule_once(lambda d: popup.dismiss(), 3)
#
#@mainthread
#def on_location(self, **kwargs):
# print(kwargs)
class Mandiokito(App):
def build (self):
return Gerenciador()
if __name__ == '__main__':
Mandiokito().run()
'''map = MapView(zoom=11, lon=50.6394, lat=3.057)
m1 = MapMarker(lon=-34.977078, lat=-7.138594)
map.add_marker(m1)
return map'''
|
[
"re.sub",
"kivy.metrics.sp",
"re.compile"
] |
[((1021, 1041), 're.compile', 're.compile', (['"""[^0-9]"""'], {}), "('[^0-9]')\n", (1031, 1041), False, 'import re\n'), ((974, 980), 'kivy.metrics.sp', 'sp', (['(20)'], {}), '(20)\n', (976, 980), False, 'from kivy.metrics import sp\n'), ((1165, 1191), 're.sub', 're.sub', (['pat', '""""""', 'substring'], {}), "(pat, '', substring)\n", (1171, 1191), False, 'import re\n'), ((857, 863), 'kivy.metrics.sp', 'sp', (['(10)'], {}), '(10)\n', (859, 863), False, 'from kivy.metrics import sp\n'), ((1232, 1250), 're.sub', 're.sub', (['pat', '""""""', 's'], {}), "(pat, '', s)\n", (1238, 1250), False, 'import re\n')]
|
from celery import task
from c2g.models import ExamRecord, Course, Exam
from django.core.mail import EmailMessage, get_connection
from django.core.mail import send_mail
from storages.backends.s3boto import S3BotoStorage
import json
import settings
import datetime
FILE_DIR = getattr(settings, 'FILE_UPLOAD_TEMP_DIR', '/tmp')
AWS_ACCESS_KEY_ID = getattr(settings, 'AWS_ACCESS_KEY_ID', '')
AWS_SECRET_ACCESS_KEY = getattr(settings, 'AWS_SECRET_ACCESS_KEY', '')
AWS_SECURE_STORAGE_BUCKET_NAME = getattr(settings, 'AWS_SECURE_STORAGE_BUCKET_NAME', '')
@task()
def generate_submission_csv_task(course_id, exam_id, email_to):
course = Course.objects.get(id=course_id)
exam = Exam.objects.get(id=exam_id)
course_prefix = course.prefix
course_suffix = course.suffix
exam_slug = exam.slug
submitters = ExamRecord.objects.filter(exam=exam, complete=True, time_created__lt=exam.grace_period).values('student').distinct()
fname = course_prefix+"-"+course_suffix+"-"+exam_slug+"-"+datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")+".csv"
outfile = open(FILE_DIR+"/"+fname,"w+")
could_not_parse = ""
for s in submitters: #yes, there is sql in a loop here. We'll optimize later
latest_sub = ExamRecord.objects.values('student__username', 'time_created', 'json_data').filter(exam=exam, time_created__lt=exam.grace_period, student=s['student']).latest('time_created')
try:
sub_obj = json.loads(latest_sub['json_data']).iteritems()
for k,v in sub_obj:
vals = parse_val(v)
outstring = '"%s","%s","%s"\n' % (latest_sub['student__username'], k, vals)
outfile.write(outstring)
except ValueError:
could_not_parse += latest_sub['student__username']+ " " #Don't output if the latest submission was erroneous
outfile.write("\n")
#if there were items we could not parse
if could_not_parse:
#write the usernames at the beginning of the file
outfile.seek(0)
data=outfile.read()
outfile.seek(0)
outfile.truncate()
outfile.write("Could not parse data from the following users: " + could_not_parse + "\n")
outfile.write(data)
#write to S3
secure_file_storage = S3BotoStorage(bucket=AWS_SECURE_STORAGE_BUCKET_NAME, access_key=AWS_ACCESS_KEY_ID, secret_key=AWS_SECRET_ACCESS_KEY)
s3file = secure_file_storage.open("/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname),'w')
try:
outfile.seek(0)
s3file.write(outfile.read())
finally:
s3file.close()
outfile.close()
dl_url = secure_file_storage.url_monkeypatched("/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname), response_headers={'response-content-disposition': 'attachment'})
email = EmailMessage('%s: Submission CSV for %s' % (course.title, exam.title), "The student submissions CSV for %s is ready. Because the file can be large, please download it at %s." % (exam.title, dl_url),
settings.SERVER_EMAIL,
[email_to])
email.send()
def parse_val(v):
"""Helper function to parse AJAX submissions"""
if isinstance(v,list):
sorted_list = sorted(map(lambda li: li['value'], v))
return reduce(lambda x,y: x+y+",", sorted_list, "")
else:
try:
return v.get('value', "")
except (TypeError, AttributeError):
return str(v)
|
[
"c2g.models.ExamRecord.objects.filter",
"c2g.models.ExamRecord.objects.values",
"json.loads",
"celery.task",
"c2g.models.Exam.objects.get",
"django.core.mail.EmailMessage",
"storages.backends.s3boto.S3BotoStorage",
"datetime.datetime.now",
"c2g.models.Course.objects.get"
] |
[((552, 558), 'celery.task', 'task', ([], {}), '()\n', (556, 558), False, 'from celery import task\n'), ((641, 673), 'c2g.models.Course.objects.get', 'Course.objects.get', ([], {'id': 'course_id'}), '(id=course_id)\n', (659, 673), False, 'from c2g.models import ExamRecord, Course, Exam\n'), ((685, 713), 'c2g.models.Exam.objects.get', 'Exam.objects.get', ([], {'id': 'exam_id'}), '(id=exam_id)\n', (701, 713), False, 'from c2g.models import ExamRecord, Course, Exam\n'), ((2301, 2422), 'storages.backends.s3boto.S3BotoStorage', 'S3BotoStorage', ([], {'bucket': 'AWS_SECURE_STORAGE_BUCKET_NAME', 'access_key': 'AWS_ACCESS_KEY_ID', 'secret_key': 'AWS_SECRET_ACCESS_KEY'}), '(bucket=AWS_SECURE_STORAGE_BUCKET_NAME, access_key=\n AWS_ACCESS_KEY_ID, secret_key=AWS_SECRET_ACCESS_KEY)\n', (2314, 2422), False, 'from storages.backends.s3boto import S3BotoStorage\n'), ((2854, 3098), 'django.core.mail.EmailMessage', 'EmailMessage', (["('%s: Submission CSV for %s' % (course.title, exam.title))", "('The student submissions CSV for %s is ready. Because the file can be large, please download it at %s.'\n % (exam.title, dl_url))", 'settings.SERVER_EMAIL', '[email_to]'], {}), "('%s: Submission CSV for %s' % (course.title, exam.title), \n 'The student submissions CSV for %s is ready. Because the file can be large, please download it at %s.'\n % (exam.title, dl_url), settings.SERVER_EMAIL, [email_to])\n", (2866, 3098), False, 'from django.core.mail import EmailMessage, get_connection\n'), ((835, 927), 'c2g.models.ExamRecord.objects.filter', 'ExamRecord.objects.filter', ([], {'exam': 'exam', 'complete': '(True)', 'time_created__lt': 'exam.grace_period'}), '(exam=exam, complete=True, time_created__lt=exam.\n grace_period)\n', (860, 927), False, 'from c2g.models import ExamRecord, Course, Exam\n'), ((1014, 1037), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1035, 1037), False, 'import datetime\n'), ((1467, 1502), 'json.loads', 'json.loads', (["latest_sub['json_data']"], {}), "(latest_sub['json_data'])\n", (1477, 1502), False, 'import json\n'), ((1257, 1332), 'c2g.models.ExamRecord.objects.values', 'ExamRecord.objects.values', (['"""student__username"""', '"""time_created"""', '"""json_data"""'], {}), "('student__username', 'time_created', 'json_data')\n", (1282, 1332), False, 'from c2g.models import ExamRecord, Course, Exam\n')]
|
from collections import namedtuple
from pytest import fixture, raises
from pyorbs.orbs import Orbs
@fixture
def orbs(tmp_path):
orbs = Orbs(str(tmp_path))
orbs.orbs = ['test']
return orbs
@fixture
def orb(mocker, orbs):
mocker.patch('pyorbs.orbs.exists', return_value=True)
return orbs.orb('test')
@fixture
def make(mocker):
return mocker.patch('pyorbs.orbs.Orb.make')
@fixture
def make_actions(mocker):
execute = mocker.patch('pyorbs.orbs.execute')
execute.return_value.returncode = 0
mocker.patch('pyorbs.orbs.Orb.activate')
return {
'write_text': mocker.patch('pyorbs.orbs.Path.write_text'),
'execute': execute,
'lock_reqs': mocker.patch('pyorbs.reqs.Requirements.lock')
}
def test_list(capsys, orbs):
orbs.list()
assert 'test' in capsys.readouterr().out
def test_freeze_invalid_paths(orbs):
with raises(ValueError):
orbs.freeze('invalid')
with raises(ValueError):
orbs.freeze('tests/reqs/empty')
def test_freeze_changed(orbs, make, reqs):
orbs.freeze(reqs('changed', raw=True))
assert make.called
def test_freeze_unchanged(orbs, make, reqs):
orbs.freeze(reqs(raw=True))
assert not make.called
def test_freeze_folder(orbs, make):
orbs.freeze('tests/reqs')
assert make.called
def test_toggle_glow_invalid_name(orbs):
with raises(ValueError):
orbs.toggle_glow('invalid')
def test_toggle_glow(orbs, monkeypatch):
assert orbs.glowing() is None
orbs.toggle_glow('test')
assert orbs.glowing() == 'test'
orbs.toggle_glow('test', force_on=True)
assert orbs.glowing() == 'test'
orbs.toggle_glow('test')
assert orbs.glowing() is None
monkeypatch.setenv('PYORBS_ACTIVE_ORB', 'test')
orbs.toggle_glow()
assert orbs.glowing() == 'test'
def test_orb_errors(orbs):
with raises(ValueError):
Orbs('invalid').orb()
with raises(ValueError):
orbs.orb('invalid')
with raises(RuntimeError):
orbs.orb()
def test_orb_shell(mocker, orbs):
execute = mocker.patch('pyorbs.orbs.execute')
orbs.orb(shell=True)
assert execute.called
def test_orb_glowing(orbs):
orbs.toggle_glow('test')
assert orbs.orb().name == 'test'
def test_orb(orbs):
assert orbs.orb('test').name == 'test'
def test_make_reqs_changed(orbs, reqs):
with raises(RuntimeError):
orbs.orb('test').make(reqs('changed'))
def test_make_venv_error(make_actions, orbs, reqs):
make_actions['execute'].return_value.returncode = 1
with raises(RuntimeError):
orbs.orb('test').make(reqs())
def test_make_install_error(make_actions, orbs, reqs):
make_actions['execute'].side_effect = [
namedtuple('CompletedProcess', 'returncode')(0),
namedtuple('CompletedProcess', 'returncode')(1),
]
with raises(RuntimeError):
orbs.orb('test').make(reqs())
def test_make(make_actions, orbs, reqs):
orbs.orb('test').make(reqs())
assert make_actions['write_text'].called
assert make_actions['execute'].called
assert not make_actions['lock_reqs'].called
def test_make_reqs_new(make_actions, orbs, reqs):
orbs.orb('test').make(reqs('new'))
assert make_actions['lock_reqs'].called
def test_make_update(make_actions, orbs, reqs):
orbs.orb('test').make(reqs('changed'), update=True)
assert make_actions['lock_reqs'].called
def test_make_quiet(mocker, make_actions, orbs, reqs):
mocked_print = mocker.patch('builtins.print')
orbs.orb('test').make(reqs(), quiet=True)
assert not mocked_print.called
assert not make_actions['lock_reqs'].called
def test_destroy_exit(monkeypatch, orbs):
monkeypatch.setenv('PYORBS_ACTIVE_ORB', 'test')
with raises(RuntimeError):
orbs.orb('test').destroy()
def test_destroy(mocker, orbs):
mocker.patch('pyorbs.orbs.Orbs.glowing', return_value='test')
toggle_glow = mocker.patch('pyorbs.orbs.Orbs.toggle_glow')
rmtree = mocker.patch('pyorbs.orbs.rmtree')
orbs.orb('test').destroy()
assert toggle_glow.called
assert rmtree.called
def test_info(capsys, mocker, orb):
execute = mocker.patch('pyorbs.orbs.execute')
execute.return_value.stdout = 'outdated'
orb.info()
assert 'outdated' in capsys.readouterr().out
def test_activate_invalid(orbs):
with raises(RuntimeError):
orbs.orb('test').activate()
def test_activate(mocker, orb):
toggle_glow = mocker.patch('pyorbs.orbs.Orbs.toggle_glow')
execute = mocker.patch('pyorbs.orbs.execute')
orb.activate()
toggle_glow.assert_called_with(orb.name, force_on=True)
execute.assert_called_with(init=orb.orb(), command=None, replace=True, capture=False)
def test_activate_run(mocker, orb):
execute = mocker.patch('pyorbs.orbs.execute')
command = 'source "%s"; test' % orb.orb()
orb.activate(run='test')
execute.assert_called_with(init=None, command=command, replace=True, capture=False)
orb.activate(run='test', no_cd=True, capture=True)
execute.assert_called_with(init=None, command=command, replace=False, capture=True)
|
[
"pyorbs.orbs.Orbs",
"pytest.raises",
"collections.namedtuple"
] |
[((894, 912), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (900, 912), False, 'from pytest import fixture, raises\n'), ((954, 972), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (960, 972), False, 'from pytest import fixture, raises\n'), ((1374, 1392), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (1380, 1392), False, 'from pytest import fixture, raises\n'), ((1864, 1882), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (1870, 1882), False, 'from pytest import fixture, raises\n'), ((1923, 1941), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (1929, 1941), False, 'from pytest import fixture, raises\n'), ((1980, 2000), 'pytest.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1986, 2000), False, 'from pytest import fixture, raises\n'), ((2370, 2390), 'pytest.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (2376, 2390), False, 'from pytest import fixture, raises\n'), ((2558, 2578), 'pytest.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (2564, 2578), False, 'from pytest import fixture, raises\n'), ((2848, 2868), 'pytest.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (2854, 2868), False, 'from pytest import fixture, raises\n'), ((3746, 3766), 'pytest.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3752, 3766), False, 'from pytest import fixture, raises\n'), ((4341, 4361), 'pytest.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (4347, 4361), False, 'from pytest import fixture, raises\n'), ((2727, 2771), 'collections.namedtuple', 'namedtuple', (['"""CompletedProcess"""', '"""returncode"""'], {}), "('CompletedProcess', 'returncode')\n", (2737, 2771), False, 'from collections import namedtuple\n'), ((2784, 2828), 'collections.namedtuple', 'namedtuple', (['"""CompletedProcess"""', '"""returncode"""'], {}), "('CompletedProcess', 'returncode')\n", (2794, 2828), False, 'from collections import namedtuple\n'), ((1892, 1907), 'pyorbs.orbs.Orbs', 'Orbs', (['"""invalid"""'], {}), "('invalid')\n", (1896, 1907), False, 'from pyorbs.orbs import Orbs\n')]
|
from django.contrib import admin
from .models import GiropayTransaction
class GiropayTransactionAdmin(admin.ModelAdmin):
list_display = ('merchant_tx_id', 'reference', 'latest_response_code')
list_filter = ('latest_response_code',)
ordering = ('-created_at',)
fields = ('merchant_tx_id', 'reference', 'latest_response_code')
admin.site.register(GiropayTransaction, GiropayTransactionAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((345, 409), 'django.contrib.admin.site.register', 'admin.site.register', (['GiropayTransaction', 'GiropayTransactionAdmin'], {}), '(GiropayTransaction, GiropayTransactionAdmin)\n', (364, 409), False, 'from django.contrib import admin\n')]
|
import pytest
from scrapyscript import Job, Processor, ScrapyScriptException
from spiders import ParamReturnSpider, TitleSpider
def test_job_raises_if_no_spider_provided():
with pytest.raises(TypeError):
Job()
def test_create_valid_job():
spider = TitleSpider
job = Job(spider)
assert isinstance(job, Job)
|
[
"pytest.raises",
"scrapyscript.Job"
] |
[((291, 302), 'scrapyscript.Job', 'Job', (['spider'], {}), '(spider)\n', (294, 302), False, 'from scrapyscript import Job, Processor, ScrapyScriptException\n'), ((185, 209), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (198, 209), False, 'import pytest\n'), ((219, 224), 'scrapyscript.Job', 'Job', ([], {}), '()\n', (222, 224), False, 'from scrapyscript import Job, Processor, ScrapyScriptException\n')]
|
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
import unittest
from test import test_rc
if __name__ == '__main__':
SeTestSuite = unittest.defaultTestLoader.discover(start_dir='./')
unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(SeTestSuite))
|
[
"unittest.defaultTestLoader.discover",
"os.path.dirname",
"unittest.TextTestRunner",
"unittest.TestSuite"
] |
[((177, 228), 'unittest.defaultTestLoader.discover', 'unittest.defaultTestLoader.discover', ([], {'start_dir': '"""./"""'}), "(start_dir='./')\n", (212, 228), False, 'import unittest\n'), ((54, 79), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (69, 79), False, 'import os\n'), ((274, 305), 'unittest.TestSuite', 'unittest.TestSuite', (['SeTestSuite'], {}), '(SeTestSuite)\n', (292, 305), False, 'import unittest\n'), ((233, 269), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (256, 269), False, 'import unittest\n')]
|
# Copyright 2014 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests if Midolman agent / interface status update
"""
from hamcrest import assert_that
from hamcrest import none
from hamcrest import not_none
from nose.plugins.attrib import attr
from mdts.lib.physical_topology_manager import PhysicalTopologyManager
from mdts.tests.utils.utils import get_midonet_api
from mdts.tests.utils.utils import start_midolman_agents
from mdts.tests.utils.utils import stop_midolman_agents
from mdts.tests.utils.utils import check_all_midolman_hosts
import time
# Only a physical topology containing a single interface.
PTM = PhysicalTopologyManager(
'../topologies/mmm_physical_test_midolman_and_interfaces.yaml')
# We don't build a Physical Topology Manager in the setup. Instead we create
# a new interface inside 'test_new_interface_becomes_visible'.
def teardown():
time.sleep(2)
PTM.destroy()
@attr(version="v1.2.0", slow=False)
def test_host_status():
"""
Title: Test host status update
Scenario:
When: The test starts up,
Then: check if all Midolman agents are alive,
Then: stops all Midolman agents,
Then: check if all Midolman agents are now dead,
Then: restarts all Midolman agetns,
And: check again if all Midolman agents are alive,
"""
midonet_api = get_midonet_api()
check_all_midolman_hosts(midonet_api, alive=True)
stop_midolman_agents()
time.sleep(5)
check_all_midolman_hosts(midonet_api, alive=False)
start_midolman_agents()
time.sleep(30)
check_all_midolman_hosts(midonet_api, alive=True)
def get_interface(midonet_api, host_name, interface_name):
"""Returns an interface with the given name.
Args:
midonet_api: A MidonetApi instance
host_name: A MidoNet host name.
interface_name: An interface name.
Returns:
An interface if one is found with the specified host, otherwise
None.
"""
host = None
for h in midonet_api.get_hosts():
if h.get_id() == host_name: host = h
# No matching host found. Return None.
if not host: return None
interface = None
for i in host.get_interfaces():
if i.get_name() == interface_name:
interface = i
break
return interface
@attr(version="v1.2.0", slow=False)
def test_new_interface_becomes_visible():
"""
Title: Test new interface becomes visible
Scenario:
When: On start up, a Midolman sees no interface,
Then: adds a new interface,
And: Midolman detects a new interface.
"""
midonet_api = get_midonet_api()
new_interface = get_interface(
midonet_api, '00000000-0000-0000-0000-000000000001', 'interface_01')
# Test that no interface with name 'interface_01' exists.
assert_that(new_interface, none(), 'interface interface_01')
# Create a new interface 'interface_01'.
PTM.build()
time.sleep(5)
new_interface = get_interface(
midonet_api, '00000000-0000-0000-0000-000000000001', 'interface_01')
# Test that the created interface is visible.
assert_that(new_interface, not_none(), 'interface interface_01.')
|
[
"mdts.tests.utils.utils.get_midonet_api",
"hamcrest.not_none",
"time.sleep",
"mdts.tests.utils.utils.check_all_midolman_hosts",
"mdts.tests.utils.utils.stop_midolman_agents",
"mdts.tests.utils.utils.start_midolman_agents",
"mdts.lib.physical_topology_manager.PhysicalTopologyManager",
"nose.plugins.attrib.attr",
"hamcrest.none"
] |
[((1138, 1230), 'mdts.lib.physical_topology_manager.PhysicalTopologyManager', 'PhysicalTopologyManager', (['"""../topologies/mmm_physical_test_midolman_and_interfaces.yaml"""'], {}), "(\n '../topologies/mmm_physical_test_midolman_and_interfaces.yaml')\n", (1161, 1230), False, 'from mdts.lib.physical_topology_manager import PhysicalTopologyManager\n'), ((1432, 1466), 'nose.plugins.attrib.attr', 'attr', ([], {'version': '"""v1.2.0"""', 'slow': '(False)'}), "(version='v1.2.0', slow=False)\n", (1436, 1466), False, 'from nose.plugins.attrib import attr\n'), ((2823, 2857), 'nose.plugins.attrib.attr', 'attr', ([], {'version': '"""v1.2.0"""', 'slow': '(False)'}), "(version='v1.2.0', slow=False)\n", (2827, 2857), False, 'from nose.plugins.attrib import attr\n'), ((1397, 1410), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1407, 1410), False, 'import time\n'), ((1840, 1857), 'mdts.tests.utils.utils.get_midonet_api', 'get_midonet_api', ([], {}), '()\n', (1855, 1857), False, 'from mdts.tests.utils.utils import get_midonet_api\n'), ((1862, 1911), 'mdts.tests.utils.utils.check_all_midolman_hosts', 'check_all_midolman_hosts', (['midonet_api'], {'alive': '(True)'}), '(midonet_api, alive=True)\n', (1886, 1911), False, 'from mdts.tests.utils.utils import check_all_midolman_hosts\n'), ((1917, 1939), 'mdts.tests.utils.utils.stop_midolman_agents', 'stop_midolman_agents', ([], {}), '()\n', (1937, 1939), False, 'from mdts.tests.utils.utils import stop_midolman_agents\n'), ((1944, 1957), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1954, 1957), False, 'import time\n'), ((1962, 2012), 'mdts.tests.utils.utils.check_all_midolman_hosts', 'check_all_midolman_hosts', (['midonet_api'], {'alive': '(False)'}), '(midonet_api, alive=False)\n', (1986, 2012), False, 'from mdts.tests.utils.utils import check_all_midolman_hosts\n'), ((2018, 2041), 'mdts.tests.utils.utils.start_midolman_agents', 'start_midolman_agents', ([], {}), '()\n', (2039, 2041), False, 'from mdts.tests.utils.utils import start_midolman_agents\n'), ((2046, 2060), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (2056, 2060), False, 'import time\n'), ((2065, 2114), 'mdts.tests.utils.utils.check_all_midolman_hosts', 'check_all_midolman_hosts', (['midonet_api'], {'alive': '(True)'}), '(midonet_api, alive=True)\n', (2089, 2114), False, 'from mdts.tests.utils.utils import check_all_midolman_hosts\n'), ((3123, 3140), 'mdts.tests.utils.utils.get_midonet_api', 'get_midonet_api', ([], {}), '()\n', (3138, 3140), False, 'from mdts.tests.utils.utils import get_midonet_api\n'), ((3450, 3463), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3460, 3463), False, 'import time\n'), ((3350, 3356), 'hamcrest.none', 'none', ([], {}), '()\n', (3354, 3356), False, 'from hamcrest import none\n'), ((3662, 3672), 'hamcrest.not_none', 'not_none', ([], {}), '()\n', (3670, 3672), False, 'from hamcrest import not_none\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.