id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3325431
|
<gh_stars>10-100
# from datetime import date
from django import template
# from django.conf import settings
# from demo.models import PersonPage, BlogPage, EventPage, Advert, Page
register = template.Library()
@register.assignment_tag(takes_context=True)
def get_site_root(context):
"""
Gets the root page for the site. Returns a wagtailcore.Page model.
"""
return context['request'].site.root_page
def has_menu_children(page):
"""
Returns a boolean for whether or not the current page has child pages.
"""
return page.get_children().live().in_menu().exists()
@register.inclusion_tag('wagtailpress/tags/nav_menu.html', takes_context=True)
def nav_menu(context, parent, calling_page=None):
"""
Retrieves the top menu items - the immediate children of the parent page
The has_menu_children method is necessary because the bootstrap menu requires
a dropdown class to be applied to a parent
"""
menuitems = parent.get_children().live().in_menu()
for menuitem in menuitems:
menuitem.show_dropdown = has_menu_children(menuitem)
# We don't directly check if calling_page is None since the template
# engine can pass an empty string to calling_page
# if the variable passed as calling_page does not exist.
menuitem.active = (calling_page.url.startswith(menuitem.url)
if calling_page else False)
return {
'calling_page': calling_page,
'menuitems': menuitems,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Retrieves the children of the top menu items for the drop downs
@register.inclusion_tag('wagtailpress/tags/nav_menu_children.html', takes_context=True)
def nav_menu_children(context, parent):
menuitems_children = parent.get_children()
menuitems_children = menuitems_children.live().in_menu()
return {
'parent': parent,
'menuitems_children': menuitems_children,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
|
StarcoderdataPython
|
3261218
|
""" Prediction models.
"""
import typing as t
import numpy as np
import tensorflow as tf
import embed
import data
import op
import nn
from nn.base import Model, WeightedSoftmaxCrossEntropyMixin
from util.log import exec_log as log
from util.debug import *
class ESIM(WeightedSoftmaxCrossEntropyMixin, Model):
def __init__(self,
embeddings: embed.IndexedWordEmbedding,
class_num: int,
scale_l1: float = 0.0,
scale_l2: float = 0.0,
lstm_unit: int = 300,
seq_len: int = 0,
char_filer_width: int = 5,
char_embed_dim: int = 8,
char_conv_dim: int = 100,
class_weights: t.List[float] = [1.1, 1, 1],
) -> None:
super(ESIM, self).__init__()
self._class_num = class_num
self.class_weights = class_weights
self.scale_l1 = scale_l1
self.scale_l2 = scale_l2
self.lstm_unit = lstm_unit
self.seq_len = seq_len
self.char_filter_width = char_filer_width
self.char_embed_dim = char_embed_dim
self.char_conv_dim = char_conv_dim
op_kwargs = {'scale_l1': self.scale_l1,
'scale_l2': self.scale_l2,
'keep_prob': self.keep_prob,
'drop_after': False}
with tf.variable_scope('embed') as s:
def set_seq_len(x):
x_len = tf.shape(x)[1]
return tf.cond(
tf.less(self.seq_len, x_len),
lambda: x[:,:self.seq_len],
lambda: tf.pad(x, [[0, 0], [0, self.seq_len - x_len]]))
if self.seq_len > 0:
x1, x2 = map(set_seq_len, [self.x1, self.x2])
else:
x1, x2 = self.x1, self.x2
#embed_init_var = embeddings.get_embeddings()
#embed = op.get_variable('embeddings',
# shape=embed_init_var.shape,
# initializer=tf.constant_initializer(embed_init_var))
#embed = tf.constant(embeddings.get_embeddings(),
# dtype=tf.float32,
# name='embeddings')
#x1, x2 = map(lambda x: tf.gather(embed, x), [x1, x2])
# Word pretrained embeddings (300D)
word_embed = tf.constant(embeddings.get_embeddings(),
dtype=tf.float32,
name='word_embed')
word_embed1, word_embed2 = map(lambda x: tf.gather(word_embed, x),
[self.x1, self.x2])
embed_dim = word_embed.get_shape()[-1]
# Character convolutional embeddings (`char_conv_dim`D)
char_embed = op.get_variable('char_embed',
shape=(256, char_embed_dim))
char_filter = op.get_variable('char_filter',
shape=(1, self.char_filter_width, self.char_embed_dim,
self.char_conv_dim))
def embed_chars(x_char):
embed = tf.gather(char_embed, x_char)
# shape: [batch, seq_len, word_len, embed_dim]
conv = tf.nn.conv2d(embed, char_filter, [1, 1, 1, 1], 'VALID')
# shape: [batch, seq_len, word_len - filter_width + 1, conv_dim]
return tf.reduce_max(conv, 2)
# shape: [batch, seq_len, conv_dim]
char_embed1, char_embed2 = map(embed_chars, [self.char1, self.char2])
# Tag one-hot embeddings (72D)
def embed_tags(x_ids, x_tags, x_len):
x_tags *= tf.sequence_mask(x_len, tf.shape(x_tags)[1],
dtype=tf.int32)
# shape: [batch, seq_len]
tag_embed = tf.one_hot(x_tags, data.SNLI.TAGS,
dtype=tf.float32,
name='char_embed')
return tag_embed[:,:tf.shape(x_ids)[1]]
tag_embed1, tag_embed2 = map(embed_tags,
*zip((self.x1, self.tag1, self.len1),
(self.x2, self.tag2, self.len2)))
# Merge embeddings
#x1 = tf.concat([word_embed1, char_embed1, tag_embed1], 2)
#x2 = tf.concat([word_embed2, char_embed2, tag_embed2], 2)
x1 = tf.concat([word_embed1, char_embed1], 2)
x2 = tf.concat([word_embed2, char_embed2], 2)
x1 = self.unfold_tree(x1, self.temp1, self.tag1, self.len1, 'x1')
x2 = self.unfold_tree(x2, self.temp2, self.tag2, self.len2, 'x2')
with tf.variable_scope('encode', reuse=tf.AUTO_REUSE) as s:
x1, x2 = map(lambda x: tf.nn.dropout(x, self.keep_prob), [x1, x2])
#import pdb; pdb.set_trace()
x1, x2 = map(self.bilstm, [x1, x2])
# shape: [batch, seq_len, embed_dim * 2]
with tf.variable_scope('attent') as s:
sim = tf.matmul(x1, tf.matrix_transpose(x2))
alpha = tf.matmul(tf.nn.softmax(tf.matrix_transpose(sim)), x1)
beta = tf.matmul(tf.nn.softmax(sim), x2)
x1 = tf.concat([x1, beta, x1 * beta, x1 - beta ], 2)
x2 = tf.concat([x2, alpha, x2 * alpha, x2 - alpha], 2)
# shape: [batch, seq_len, embed_dim * 8]
with tf.variable_scope('decode', reuse=tf.AUTO_REUSE) as s:
x1, x2 = map(lambda x: op.linear(x, dim=embed_dim, **op_kwargs),
[x1, x2])
# NOTE: dropout here in the author's code
# shape: [batch, seq_len, embed_dim]
x1, x2 = map(self.bilstm, [x1, x2])
# shape: [batch, seq_len, embed_dim * 2]
with tf.variable_scope('aggregate') as s:
def pool(x):
return tf.concat([
tf.reduce_sum(x, axis=1),
tf.reduce_max(x, axis=1)
], 1)
y_hat = op.linear(tf.concat([pool(x1), pool(x2)], 1),
dim=embed_dim,
activation_fn=tf.nn.tanh,
scope='linear-1',
**op_kwargs)
# shape: [batch, embed_dim * 8]
y_hat = op.linear(y_hat,
dim=self._class_num,
activation_fn=None,
scope='linear-2',
**op_kwargs)
# shape: [batch, class_num]
self.evaluate_and_loss(y_hat, self.class_weights)
def bilstm(self, x):
# shape: [batch, seq_len, embed_dim]
if self.seq_len > 0:
# Static RNN
#lstm = tf.contrib.cudnn_rnn.CudnnLSTM(1, self.lstm_unit,
# direction='bidirectional')
#return tf.transpose(lstm(tf.transpose(x, [1, 0, 2]))[0], [1, 0, 2])
x_seq = tf.unstack(
tf.reshape(x, [-1, self.seq_len, x.get_shape()[-1]]),
axis=1)
out, _, _ = tf.nn.static_bidirectional_rnn(
cell_fw=tf.contrib.rnn.LSTMBlockCell(self.lstm_unit),
cell_bw=tf.contrib.rnn.LSTMBlockCell(self.lstm_unit),
inputs=x_seq,
dtype=tf.float32)
return tf.stack(out, axis=1)
else:
# Dynamic RNN
(outputs_fw, outputs_bw), (states_fw, states_bw) = \
tf.nn.bidirectional_dynamic_rnn(
cell_fw=tf.contrib.rnn.LSTMBlockCell(self.lstm_unit),
cell_bw=tf.contrib.rnn.LSTMBlockCell(self.lstm_unit),
inputs=x,
dtype=tf.float32)
return tf.concat([outputs_fw, outputs_bw], 2)
# shape: [batch, seq_len, embed_dim * 2]
def unfold_tree(self,
embed: tf.Tensor, # 3D: [batch, seq_len, embed_dim]
temp: tf.Tensor, # 3D: [batch, temp_len, temp_size]
tag: tf.Tensor, # 2D: [batch, seq_len + temp_len]
len_: tf.Tensor, # 1D: [batch]
suffix: str):
with tf.name_scope('unfold_tree_%s' % suffix):
batch_size = tf.shape(embed)[0]
# Create a container of size (x.len + temp.len + 1) for the
# unfoldered tree embeddings, where one zero embedding
# vector is padded at head.
tree = tf.pad(embed, [[0, 0], [1, tf.shape(temp)[1]], [0, 0]])
# NOTE: This is a trick to have a fixed embedding dimension in the
# construction time. This is used for initializing variables (e.g.
# in a linear transofrmation layer).
tree = tf.reshape(tree, [batch_size, -1, embed.get_shape()[-1]])
# shape: [batch, 1 + seq_len + temp_len, embed_dim]
# Add batch index to each template position.
temp = tf.expand_dims(temp, -1)
bidx = tf.tile(tf.reshape(tf.range(batch_size), [-1, 1, 1, 1]),
[1, tf.shape(temp)[1], tf.shape(temp)[2], 1])
temp = tf.concat([bidx, temp], axis=3)
# shape: [batch, temp_len, temp_size, 2]
temp = tf.cast(temp, tf.float32) # NOTE: register tf.gather in GPU.
# Pad a leading 0 to align with the unfolded tree
tag = tf.pad(tag, [[0, 0], [1, 0]])
tag = tf.cast(tag, tf.float32) # NOTE: register tf.gather in GPU.
# shape: [batch, 1 + tag_len]
# NOTE: tag_len <= seq_len + temp_len
# find the next available position (zero embedding)
top = tf.expand_dims(len_ + 1, -1)
# shape: [batch, 1]
i = tf.constant(1)
def loop_cond(i, tree, temp, tag, batch_size):
return tf.less(i, tf.shape(temp)[1])
def loop_body(i, tree, temp, tag, batch_size):
c_idx = tf.gather(temp, i, axis=1)
c_idx = tf.cast(c_idx, tf.int32) # NOTE: restore type
# shape: [batch, temp_size, 2]
p_idx = tf.concat(
[tf.expand_dims(tf.range(batch_size), -1), top + i],
axis=1)
# shape: [batch, 2]
p_tag = tf.gather_nd(tag, p_idx)
p_tag = tf.cast(p_tag, tf.int32) # NOTE: restore type
# shape: [batch]
c_embed = tf.gather_nd(tree, c_idx)
# shape: [batch, temp_size, embed_dim]
c_tag = tf.gather_nd(tag, c_idx)
c_tag = tf.cast(c_tag, tf.int32) # NOTE: restore type
# shape: [batch, temp_size]
p_embed = self.merge_fn(c_embed, c_tag, p_tag)
tree += tf.scatter_nd(
indices=p_idx,
updates=p_embed,
shape=tf.shape(tree))
i += 1
return [i, tree, temp, tag, batch_size]
_, x_loop, _, _, _ = tf.while_loop(loop_cond, loop_body,
[i, tree, temp, tag, batch_size],
parallel_iterations=1)
return x_loop
def merge_fn(self,
c_embeds: tf.Tensor, # 3D: [batch, temp_size, embed_dim]
c_tags: tf.Tensor, # 2D: [batch, temp_size]
p_tags: tf.Tensor # 1D: [batch]
)-> tf.Tensor: # 2D: [batch, embed_dim]
return tf.reduce_mean(c_embeds, axis=1)
|
StarcoderdataPython
|
1748377
|
#!/usr/bin/python3
"""
Given a non-empty array containing only positive integers, find if the array can
be partitioned into two subsets such that the sum of elements in both subsets is
equal.
"""
from collections import defaultdict
class Solution:
def canPartition(self, nums):
"""
0/1 Knapsack problem
Carefully define the state:
Let d[i][s] be # subset of nums[:i+1], can be sum to s
Transition function:
d[i][s] = d[i-1][s] + d[i-1][s-nums[i]]
= case not choose nums[i] + case choose nums[i]
:type nums: List[int]
:rtype: bool
"""
if not nums:
return False
s = sum(nums)
if s % 2 != 0:
return False
target = s // 2
d = defaultdict(lambda: defaultdict(int))
d[0][0] = 1
d[0][nums[0]] = 1
for i in range(1, len(nums)):
for v in range(target + 1):
d[i][v] = d[i-1][v] + d[i-1][v-nums[i]]
return any(d[i][target] > 0 for i in range(len(nums)))
def canPartition_TLE(self, nums):
"""
subset rather than sub array
positive number only
dfs with pruning O(2^n), whether to choose the number or not
:type nums: List[int]
:rtype: bool
"""
nums.sort()
s = sum(nums)
if s % 2 != 0:
return False
target = s // 2
return self.dfs(nums, 0, target)
def dfs(self, nums, idx, target):
"""Find a subset that sum to target"""
if not idx < len(nums):
return False
if nums[idx] == target:
return True
if nums[idx] > target:
return False
return (
self.dfs(nums, idx + 1, target) or # not take nums[idx]
self.dfs(nums, idx + 1, target - nums[idx]) # take nums[idx]
)
if __name__ == "__main__":
assert Solution().canPartition([1, 5, 11, 5]) == True
assert Solution().canPartition([1, 2, 3, 5]) == False
|
StarcoderdataPython
|
1712021
|
<reponame>swapnilsparsh/HacktoberFest2020
# pass list in a function
def count(lst):
even = 0
odd = 0
for i in lst:
if i % 2 == 0:
even += 1
else:
odd += 1
return even,odd
lst = []
for i in range(1,6):
app = int(input("Enter the "+ str(i)+ " no. "))
lst.append(app)
even, odd = count(lst)
print("There are " + str(even) + " even no ")
print("There are " + str(odd) + " odd no ")
|
StarcoderdataPython
|
194162
|
<reponame>Joeffison/MachineLearningBuilder
# -*- coding: utf-8 -*-
template_model_creation = """import pandas as pd
from sklearn.model_selection import train_test_split
{model_import} as ChosenMLAlgorithm
from sklearn.metrics import accuracy_score, confusion_matrix
import pickle
csv_file = "{csv_file}"
model_file = "{model_file}"
predictors = {predictors}
targets = {targets}
historical_data = pd.read_csv(csv_file)
pred_train, pred_test, tar_train, tar_test = train_test_split(historical_data[predictors], historical_data[targets],
test_size=.3)
if len(targets) == 1:
tar_train = tar_train.values.ravel()
tar_test = tar_test.values.ravel()
model = ChosenMLAlgorithm().fit(pred_train, tar_train)
predictions = model.predict(pred_test)
# Analyze accuracy of prediction.
# Remember that the data is randomly split into training and test set, so the values below will change
# ever time you create a new model
print confusion_matrix(tar_test, predictions)
print accuracy_score(tar_test, predictions)
pickle.dump(model, open(model_file, "wb"))
"""
template_model_predictor = """import pickle
import numpy as np
model_file = "{model_file}"
model = pickle.load(open(model_file, 'rb'))
# input_to_predict has to follow the order below:
# {predictors}
prediction = model.predict(np.array([input_to_predict]))
print prediction
"""
|
StarcoderdataPython
|
3349255
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 24 19:08:52 2021
@author: majdi
"""
import os
import sys
import subprocess
from setuptools import setup, find_packages
from distutils.version import LooseVersion
from neorl.version import version
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", message=r"Passing", category=FutureWarning)
#check python version
if sys.version_info.major != 3:
raise ValueError('--ERROR: This package is only compatible with Python 3, but you are running '
'Python {}. The installation will likely fail.'.format(sys.version_info.major))
# The directory containing this file
HERE = os.getcwd()
# The text of the README file
with open(os.path.join(HERE , "README.md"), encoding='utf-8') as f:
README = f.read()
long_description = """
# NEORL
NEORL (**N**euro**E**volution **O**ptimisation with **R**einforcement **L**earning) is a set of implementations of hybrid algorithms combining neural networks and evolutionary computation based on a wide range of machine learning and evolutionary intelligence architectures. NEORL aims to solve large-scale optimisation problems relevant to operation & optimisation research, engineering, business, and other disciplines.
NEORL can be used for multidisciplinary applications for research, industrial, academic, and/or teaching purposes. NEORL can be used as a standalone platform or an additional benchmarking tool to supplement or validate other optimisation packages. Our objective when we built NEORL is to give the user a simple and easy-to-use framework with an access to a wide range of covering both standalone and hybrid algorithms in evolutionary, swarm, supervised learning, deep learning, and reinforcement learning. We hope our implementation will allow beginners to enjoy more advanced optimisation and algorithms, without being involved in too many theoretical/implementation details.
## Copyright
This repository and its content are copyright of [Exelon Corporation](https://www.exeloncorp.com/) © in collaboration with [MIT](https://web.mit.edu/nse/) Nuclear Science and Engineering 2021. All rights reserved.
You can read the first successful application of NEORL for nuclear fuel optimisation in this [News Article](https://news.mit.edu/2020/want-cheaper-nuclear-energy-turn-design-process-game-1217).
## Links
Repository:
https://github.com/mradaideh/neorl
Main News Article:
https://news.mit.edu/2020/want-cheaper-nuclear-energy-turn-design-process-game-1217
Documentation:
https://neorl.readthedocs.io/en/latest/index.html
## Quick Example
Here is a quick example of how to use NEORL to minimize a 5-D sphere function:
```python
#---------------------------------
# Import packages
#---------------------------------
import numpy as np
import matplotlib.pyplot as plt
from neorl import DE, XNES
#---------------------------------
# Fitness
#---------------------------------
#Define the fitness function
def FIT(individual):
'''Sphere test objective function.
F(x) = sum_{i=1}^d xi^2
d=1,2,3,...
Range: [-100,100]
Minima: 0
'''
return sum(x**2 for x in individual)
#---------------------------------
# Parameter Space
#---------------------------------
#Setup the parameter space (d=5)
nx=5
BOUNDS={}
for i in range(1,nx+1):
BOUNDS['x'+str(i)]=['float', -100, 100]
#---------------------------------
# DE
#---------------------------------
de=DE(mode='min', bounds=BOUNDS, fit=FIT, npop=50, CR=0.5, F=0.7, ncores=1, seed=1)
x_best, y_best, de_hist=de.evolute(ngen=120, verbose=0)
print('---DE Results---', )
print('x:', x_best)
print('y:', y_best)
#---------------------------------
# NES
#---------------------------------
x0=[-50]*len(BOUNDS)
amat = np.eye(nx)
xnes=XNES(mode='min', bounds=BOUNDS, fit=FIT, npop=50, eta_mu=0.9,
eta_sigma=0.5, adapt_sampling=True, seed=1)
x_best, y_best, nes_hist=xnes.evolute(120, x0=x0, verbose=0)
print('---XNES Results---', )
print('x:', x_best)
print('y:', y_best)
#---------------------------------
# Plot
#---------------------------------
#Plot fitness for both methods
plt.figure()
plt.plot(np.array(de_hist), label='DE')
plt.plot(np.array(nes_hist['fitness']), label='NES')
plt.xlabel('Generation')
plt.ylabel('Fitness')
plt.legend()
plt.show()
```
## Citing the Project
To cite this repository in publications:
```
@misc{neorl,
author = {<NAME>. and <NAME> and Wang, Haijia and <NAME>},
title = {NEORL},
year = {2021},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {https://github.com/mradaideh/neorl},
}
```
"""
# Read version from file
__version__ = version()
# Check tensorflow installation to avoid
# breaking pre-installed tf gpu ##(credit to @hill-a and stable-baselines)
def find_tf_dependency():
install_tf, tf_gpu = False, False
try:
import tensorflow as tf
if tf.__version__ < LooseVersion('1.8.0'):
install_tf = True
# check if a gpu version is needed
tf_gpu = tf.test.is_gpu_available()
except ImportError:
install_tf = True
# Check if a nvidia gpu is present
for command in ['nvidia-smi', '/usr/bin/nvidia-smi', 'nvidia-smi.exe']:
try:
if subprocess.call([command]) == 0:
tf_gpu = True
break
except IOError: # command does not exist / is not executable
pass
if os.environ.get('USE_GPU') == 'True': # force GPU even if not auto-detected
tf_gpu = True
tf_dependency = []
if install_tf:
tf_dependency = ['tensorflow-gpu>=1.8.0,<2.0.0'] if tf_gpu else ['tensorflow>=1.8.0,<2.0.0']
if tf_gpu:
print("A GPU was detected, tensorflow-gpu will be installed")
return tf_dependency
# This call to setup() does all the work
setup(
name="neorl",
packages=[package for package in find_packages() if package.startswith('neorl')],
include_package_data=True,
package_data={'neorl': ['requirements.txt', 'version.txt']},
install_requires=['tensorflow==1.14.0',
'numpy== 1.16.2',
'gym >= 0.15.4, < 0.17.0',
'scikit-optimize==0.8.1',
'cloudpickle >= 1.2.2',
'scipy',
'joblib',
'pandas',
'xlrd==1.2.0',
'matplotlib',
'pytest',
'pytest-cov',
'sphinx',
'sphinx-rtd-theme',
'sphinx-autobuild'] + find_tf_dependency(),
extras_require={'tests': ['pytest', 'pytest-cov', 'pytest-env', 'pytest-xdist', 'pytype'],
'docs': ['sphinx', 'sphinx-autobuild', 'sphinx-rtd-theme']},
description="NeuroEvolution Optimisation with Reinforcement Learning",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mradaideh/neorl",
author="<NAME>",
author_email="<EMAIL>",
entry_points={
"console_scripts": [
"neorl=neorl.scripts:main ",
]
},
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"],
version= __version__,
)
|
StarcoderdataPython
|
3315047
|
<reponame>g10f/sso<filename>apps/smart_selects/form_fields.py<gh_stars>1-10
from django.apps import apps
from django.forms import ChoiceField
from django.forms.models import ModelChoiceField
from smart_selects.widgets import ChainedSelect
class ChainedModelChoiceField(ModelChoiceField):
def __init__(self, app_name, model_name, chain_field, model_field, show_all, auto_choose, manager=None, initial=None, *args, **kwargs):
defaults = {
'widget': ChainedSelect(app_name, model_name, chain_field, model_field, show_all, auto_choose, manager),
}
defaults.update(kwargs)
if 'queryset' not in kwargs:
queryset = apps.get_model(app_name, model_name).objects.all()
super().__init__(queryset=queryset, initial=initial, *args, **defaults)
else:
super().__init__(initial=initial, *args, **defaults)
def _get_choices(self):
self.widget.queryset = self.queryset
choices = super()._get_choices()
return choices
choices = property(_get_choices, ChoiceField._set_choices)
|
StarcoderdataPython
|
3286165
|
<reponame>TomMakkink/transformers-for-rl<gh_stars>1-10
# import math
# import random
#
# import numpy as np
# import torch
# import torch.nn.functional as F
# import torch.optim as optim
#
# from agents.agent import Agent
# from agents.replay_buffer import ReplayBuffer
# from configs.dqn_config import dqn_config
# from models.mlp import MLP
#
#
# class DQN(Agent):
# def __init__(
# self,
# state_size,
# action_size,
# device,
# lr,
# buffer_size,
# hidden_size=[50, 50],
# memory=None,
# ):
# super(DQN, self).__init__(state_size, action_size, hidden_size, memory)
# self.device = device
# self.policy_net = MLP(
# state_size, action_size, hidden_size, memory_type=memory
# ).to(self.device)
# self.target_network = MLP(
# state_size, action_size, hidden_size, memory_type=memory
# ).to(self.device)
# self.update_target_network()
# self.target_network.eval()
# self.replay_buffer = ReplayBuffer(buffer_size)
# self.optimiser = optim.Adam(self.policy_net.parameters(), lr=lr)
# self.current_timestep = 1
# self.action_size = action_size
# self.sample_sequentially = (
# True if self.policy_net.memory_network.memory is not None else False
# )
# self.episode_number = 1
#
# def act(self, state):
# """
# Select an action greedily from the Q-network given the state
# :param state: the current state
# :return: the action to take
# """
# epsilon = self.calculate_epsilon()
# self.current_timestep += 1
# if random.random() > epsilon:
# with torch.no_grad():
# q_values = self.policy_net(state)
# _, action = q_values.max(1)
# return action.item()
# else:
# return random.randrange(self.action_size)
#
# def update_target_network(self):
# """
# Update the target Q-network by copying the weights from the current Q-network
# """
# self.target_network.load_state_dict(self.policy_net.state_dict())
#
# def update_target_update_by_percentage(self, tau):
# for param, target_param in zip(
# self.policy_net.parameters(), self.target_network.parameters()
# ):
# target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
#
# def calculate_epsilon(self):
# # return dqn_config["epsilon"]["final"] + (
# # dqn_config["epsilon"]["start"] - dqn_config["epsilon"]["final"]
# # ) * math.exp(-1.0 * self.current_timestep / dqn_config["epsilon"]["decay"])
# return 0.05
#
# def optimize_network(self):
# if self.warm_up_timesteps <= self.current_timestep:
# self.policy_net.reset()
# states, actions, rewards, next_states, dones = self.replay_buffer.sample(
# dqn_config["batch_size"], self.device, self.sample_sequentially
# )
#
# with torch.no_grad():
# _, max_next_action = self.policy_net(next_states).max(1)
# max_next_q_values = (
# self.target_network(next_states)
# .gather(1, max_next_action.unsqueeze(1))
# .squeeze()
# )
# target_q_values = (
# rewards + (1 - dones) * dqn_config["gamma"] * max_next_q_values
# )
#
# input_q_values = self.policy_net(states)
# input_q_values = input_q_values.gather(1, actions.unsqueeze(1)).squeeze()
#
# loss = F.smooth_l1_loss(input_q_values, target_q_values)
#
# self.optimiser.zero_grad()
# loss.backward()
# self.optimiser.step()
#
# if self.episode_number % dqn_config["target_update"] == 0:
# # print(
# # self.episode_number, "updating update_target_update_by_percentage"
# # )
# self.update_target_network()
# # self.update_target_update_by_percentage()
#
# return loss.item()
# else:
# # print("Didn't optimise", self.current_timestep)
# return np.NaN
#
# def reset(self):
# self.replay_buffer.reset()
# self.policy_net.reset()
# self.target_network.reset()
# self.episode_number += 1
#
# def collect_experience(self, state, action, reward, next_state, done):
# self.replay_buffer.push(state, action, reward, next_state, done)
#
# def get_parameters(self):
# return dqn_config
|
StarcoderdataPython
|
3356981
|
<filename>website/web/__init__.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import base64
from zipfile import ZipFile, ZIP_DEFLATED
from io import BytesIO
import os
from pathlib import Path
from datetime import datetime, timedelta
import json
import http
import calendar
from flask import Flask, render_template, request, send_file, redirect, url_for, Response, flash, jsonify
from flask_bootstrap import Bootstrap # type: ignore
from flask_httpauth import HTTPDigestAuth # type: ignore
from lookyloo.helpers import get_homedir, update_user_agents, get_user_agents, get_config, get_taxonomies
from lookyloo.lookyloo import Lookyloo, Indexing
from lookyloo.exceptions import NoValidHarFile, MissingUUID
from .proxied import ReverseProxied
from typing import Optional, Dict, Any, Union
import logging
app: Flask = Flask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app) # type: ignore
secret_file_path: Path = get_homedir() / 'secret_key'
if not secret_file_path.exists() or secret_file_path.stat().st_size < 64:
with secret_file_path.open('wb') as f:
f.write(os.urandom(64))
with secret_file_path.open('rb') as f:
app.config['SECRET_KEY'] = f.read()
Bootstrap(app)
app.config['BOOTSTRAP_SERVE_LOCAL'] = True
app.config['SESSION_COOKIE_NAME'] = 'lookyloo'
app.debug = False
auth = HTTPDigestAuth()
lookyloo: Lookyloo = Lookyloo()
user = get_config('generic', 'cache_clean_user')
time_delta_on_index = get_config('generic', 'time_delta_on_index')
blur_screenshot = get_config('generic', 'enable_default_blur_screenshot')
max_depth = get_config('generic', 'max_depth')
enable_mail_notification = get_config('generic', 'enable_mail_notification')
enable_context_by_users = get_config('generic', 'enable_context_by_users')
enable_categorization = get_config('generic', 'enable_categorization')
enable_bookmark = get_config('generic', 'enable_bookmark')
auto_trigger_modules = get_config('generic', 'auto_trigger_modules')
logging.basicConfig(level=get_config('generic', 'loglevel'))
# ##### Global methods passed to jinja
# Method to make sizes in bytes human readable
# Source: https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
app.jinja_env.globals.update(sizeof_fmt=sizeof_fmt)
def http_status_description(code: int):
if code in http.client.responses:
return http.client.responses[code]
return f'Invalid code: {code}'
app.jinja_env.globals.update(http_status_description=http_status_description)
def month_name(month: int):
return calendar.month_name[month]
app.jinja_env.globals.update(month_name=month_name)
# ##### Generic/configuration methods #####
@app.after_request
def after_request(response):
ua = request.headers.get('User-Agent')
real_ip = request.headers.get('X-Real-IP')
if ua:
if real_ip:
lookyloo.cache_user_agents(ua, real_ip)
else:
lookyloo.cache_user_agents(ua, request.remote_addr)
return response
@auth.get_password
def get_pw(username: str) -> Optional[str]:
if username in user:
return user.get(username)
return None
# ##### Hostnode level methods #####
@app.route('/tree/<string:tree_uuid>/host/<string:node_uuid>/hashes', methods=['GET'])
def hashes_hostnode(tree_uuid: str, node_uuid: str):
hashes = lookyloo.get_hashes(tree_uuid, hostnode_uuid=node_uuid)
return send_file(BytesIO('\n'.join(hashes).encode()),
mimetype='test/plain', as_attachment=True, attachment_filename=f'hashes.{node_uuid}.txt')
@app.route('/tree/<string:tree_uuid>/host/<string:node_uuid>/text', methods=['GET'])
def urls_hostnode(tree_uuid: str, node_uuid: str):
hostnode = lookyloo.get_hostnode_from_tree(tree_uuid, node_uuid)
return send_file(BytesIO('\n'.join(url.name for url in hostnode.urls).encode()),
mimetype='test/plain', as_attachment=True, attachment_filename=f'urls.{node_uuid}.txt')
@app.route('/tree/<string:tree_uuid>/host/<string:node_uuid>', methods=['GET'])
def hostnode_popup(tree_uuid: str, node_uuid: str):
keys_response = {
'js': "/static/javascript.png",
'exe': "/static/exe.png",
'css': "/static/css.png",
'font': "/static/font.png",
'html': "/static/html.png",
'json': "/static/json.png",
'text': "/static/json.png", # FIXME: Need new icon
'iframe': "/static/ifr.png",
'image': "/static/img.png",
'unset_mimetype': "/static/wtf.png",
'octet-stream': "/static/wtf.png",
'unknown_mimetype': "/static/wtf.png",
'video': "/static/video.png",
'livestream': "/static/video.png",
'response_cookie': "/static/cookie_received.png",
# redirect has to be last
'redirect': "/static/redirect.png",
'redirect_to_nothing': "/static/cookie_in_url.png"
}
keys_request = {
'request_cookie': "/static/cookie_read.png",
}
hostnode, urls = lookyloo.get_hostnode_investigator(tree_uuid, node_uuid)
return render_template('hostname_popup.html',
tree_uuid=tree_uuid,
hostnode_uuid=node_uuid,
hostnode=hostnode,
urls=urls,
keys_response=keys_response,
keys_request=keys_request,
enable_context_by_users=enable_context_by_users)
# ##### Tree level Methods #####
@app.route('/tree/<string:tree_uuid>/rebuild')
@auth.login_required
def rebuild_tree(tree_uuid: str):
try:
lookyloo.remove_pickle(tree_uuid)
return redirect(url_for('tree', tree_uuid=tree_uuid))
except Exception:
return redirect(url_for('index'))
@app.route('/tree/<string:tree_uuid>/trigger_modules/', defaults={'force': False})
@app.route('/tree/<string:tree_uuid>/trigger_modules/<int:force>', methods=['GET'])
def trigger_modules(tree_uuid: str, force: int):
lookyloo.trigger_modules(tree_uuid, True if force else False)
return redirect(url_for('modules', tree_uuid=tree_uuid))
@app.route('/tree/<string:tree_uuid>/categories_capture/', defaults={'query': ''})
@app.route('/tree/<string:tree_uuid>/categories_capture/<string:query>', methods=['GET'])
def categories_capture(tree_uuid: str, query: str):
if not enable_categorization:
return redirect(url_for('tree', tree_uuid=tree_uuid))
current_categories = lookyloo.categories_capture(tree_uuid)
matching_categories = None
if query:
matching_categories = {}
t = get_taxonomies()
entries = t.search(query)
if entries:
matching_categories = {e: t.revert_machinetag(e) for e in entries}
return render_template('categories_capture.html', tree_uuid=tree_uuid,
current_categories=current_categories,
matching_categories=matching_categories)
@app.route('/tree/<string:tree_uuid>/uncategorize/', defaults={'category': ''})
@app.route('/tree/<string:tree_uuid>/uncategorize/<string:category>', methods=['GET'])
def uncategorize_capture(tree_uuid: str, category: str):
if not enable_categorization:
return jsonify({'response': 'Categorization not enabled.'})
lookyloo.uncategorize_capture(tree_uuid, category)
return jsonify({'response': f'{category} successfully added to {tree_uuid}'})
@app.route('/tree/<string:tree_uuid>/categorize/', defaults={'category': ''})
@app.route('/tree/<string:tree_uuid>/categorize/<string:category>', methods=['GET'])
def categorize_capture(tree_uuid: str, category: str):
if not enable_categorization:
return jsonify({'response': 'Categorization not enabled.'})
lookyloo.categorize_capture(tree_uuid, category)
return jsonify({'response': f'{category} successfully removed from {tree_uuid}'})
@app.route('/tree/<string:tree_uuid>/stats', methods=['GET'])
def stats(tree_uuid: str):
stats = lookyloo.get_statistics(tree_uuid)
return render_template('statistics.html', uuid=tree_uuid, stats=stats)
@app.route('/tree/<string:tree_uuid>/modules', methods=['GET'])
def modules(tree_uuid: str):
modules_responses = lookyloo.get_modules_responses(tree_uuid)
if not modules_responses:
return redirect(url_for('tree', tree_uuid=tree_uuid))
vt_short_result: Dict[str, Dict[str, Any]] = {}
if 'vt' in modules_responses:
# VirusTotal cleanup
vt = modules_responses.pop('vt')
# Get malicious entries
for url, full_report in vt.items():
if not full_report:
continue
vt_short_result[url] = {
'permaurl': f'https://www.virustotal.com/gui/url/{full_report["id"]}/detection',
'malicious': []
}
for vendor, result in full_report['attributes']['last_analysis_results'].items():
if result['category'] == 'malicious':
vt_short_result[url]['malicious'].append((vendor, result['result']))
pi_short_result: Dict[str, str] = {}
if 'pi' in modules_responses:
pi = modules_responses.pop('pi')
for url, full_report in pi.items():
if not full_report:
continue
pi_short_result[url] = full_report['results'][0]['tag_label']
return render_template('modules.html', uuid=tree_uuid, vt=vt_short_result, pi=pi_short_result)
@app.route('/tree/<string:tree_uuid>/redirects', methods=['GET'])
def redirects(tree_uuid: str):
cache = lookyloo.capture_cache(tree_uuid)
if not cache:
return Response('Not available.', mimetype='text/text')
if not cache['redirects']:
return Response('No redirects.', mimetype='text/text')
if cache['url'] == cache['redirects'][0]: # type: ignore
to_return = BytesIO('\n'.join(cache['redirects']).encode()) # type: ignore
else:
to_return = BytesIO('\n'.join([cache['url']] + cache['redirects']).encode()) # type: ignore
return send_file(to_return, mimetype='text/text',
as_attachment=True, attachment_filename='redirects.txt')
@app.route('/tree/<string:tree_uuid>/image', methods=['GET'])
def image(tree_uuid: str):
to_return = lookyloo.get_screenshot(tree_uuid)
return send_file(to_return, mimetype='image/png',
as_attachment=True, attachment_filename='image.png')
@app.route('/tree/<string:tree_uuid>/html', methods=['GET'])
def html(tree_uuid: str):
to_return = lookyloo.get_html(tree_uuid)
return send_file(to_return, mimetype='text/html',
as_attachment=True, attachment_filename='page.html')
@app.route('/tree/<string:tree_uuid>/cookies', methods=['GET'])
def cookies(tree_uuid: str):
to_return = lookyloo.get_cookies(tree_uuid)
return send_file(to_return, mimetype='application/json',
as_attachment=True, attachment_filename='cookies.json')
@app.route('/tree/<string:tree_uuid>/hashes', methods=['GET'])
def hashes_tree(tree_uuid: str):
hashes = lookyloo.get_hashes(tree_uuid)
return send_file(BytesIO('\n'.join(hashes).encode()),
mimetype='test/plain', as_attachment=True, attachment_filename='hashes.txt')
@app.route('/tree/<string:tree_uuid>/export', methods=['GET'])
def export(tree_uuid: str):
to_return = lookyloo.get_capture(tree_uuid)
return send_file(to_return, mimetype='application/zip',
as_attachment=True, attachment_filename='capture.zip')
@app.route('/tree/<string:tree_uuid>/hide', methods=['GET'])
@auth.login_required
def hide_capture(tree_uuid: str):
lookyloo.hide_capture(tree_uuid)
return redirect(url_for('tree', tree_uuid=tree_uuid))
@app.route('/tree/<string:tree_uuid>/cache', methods=['GET'])
def cache_tree(tree_uuid: str):
lookyloo.cache_tree(tree_uuid)
return redirect(url_for('index'))
@app.route('/tree/<string:tree_uuid>/send_mail', methods=['POST', 'GET'])
def send_mail(tree_uuid: str):
if not enable_mail_notification:
return redirect(url_for('tree', tree_uuid=tree_uuid))
email: str = request.form.get('email') if request.form.get('email') else '' # type: ignore
if '@' not in email:
# skip clearly incorrect emails
email = ''
comment: str = request.form.get('comment') if request.form.get('comment') else '' # type: ignore
lookyloo.send_mail(tree_uuid, email, comment)
return redirect(url_for('tree', tree_uuid=tree_uuid))
@app.route('/tree/<string:tree_uuid>', methods=['GET'])
@app.route('/tree/<string:tree_uuid>/<string:urlnode_uuid>', methods=['GET'])
def tree(tree_uuid: str, urlnode_uuid: Optional[str]=None):
if tree_uuid == 'False':
flash("Unable to process your request. The domain may not exist, or splash isn't started", 'error')
return redirect(url_for('index'))
try:
cache = lookyloo.capture_cache(tree_uuid)
except MissingUUID:
flash(f'Unable to find this UUID ({tree_uuid}). The capture may still be ongoing, try again later.', 'error')
return redirect(url_for('index'))
if not cache:
flash('Invalid cache.', 'error')
return redirect(url_for('index'))
if 'error' in cache:
flash(cache['error'], 'error')
try:
tree_json, start_time, user_agent, root_url, meta = lookyloo.load_tree(tree_uuid)
b64_thumbnail = lookyloo.get_screenshot_thumbnail(tree_uuid, for_datauri=True)
return render_template('tree.html', tree_json=tree_json, start_time=start_time,
user_agent=user_agent, root_url=root_url, tree_uuid=tree_uuid,
screenshot_thumbnail=b64_thumbnail,
meta=meta, enable_mail_notification=enable_mail_notification,
enable_context_by_users=enable_context_by_users,
enable_categorization=enable_categorization,
enable_bookmark=enable_bookmark,
blur_screenshot=blur_screenshot, urlnode_uuid=urlnode_uuid,
auto_trigger_modules=auto_trigger_modules,
has_redirects=True if cache['redirects'] else False)
except NoValidHarFile as e:
return render_template('error.html', error_message=e)
@app.route('/tree/<string:tree_uuid>/mark_as_legitimate', methods=['POST'])
@auth.login_required
def mark_as_legitimate(tree_uuid: str):
if request.data:
legitimate_entries = request.get_json(force=True)
lookyloo.add_to_legitimate(tree_uuid, **legitimate_entries)
else:
lookyloo.add_to_legitimate(tree_uuid)
return jsonify({'message': 'Legitimate entry added.'})
# ##### helpers #####
def index_generic(show_hidden: bool=False, category: Optional[str]=None):
titles = []
if time_delta_on_index:
# We want to filter the captures on the index
cut_time = datetime.now() - timedelta(**time_delta_on_index)
else:
cut_time = None # type: ignore
for cached in lookyloo.sorted_cache:
if not cached:
continue
if category:
if 'categories' not in cached or category not in cached['categories']:
continue
if show_hidden:
if 'no_index' not in cached:
# Only display the hidden ones
continue
elif 'no_index' in cached:
continue
if cut_time and datetime.fromisoformat(cached['timestamp'][:-1]) < cut_time:
continue
titles.append((cached['uuid'], cached['title'], cached['timestamp'], cached['url'],
cached['redirects'], True if cached['incomplete_redirects'] == '1' else False))
titles = sorted(titles, key=lambda x: (x[2], x[3]), reverse=True)
return render_template('index.html', titles=titles)
# ##### Index level methods #####
@app.route('/', methods=['GET'])
def index():
if request.method == 'HEAD':
# Just returns ack if the webserver is running
return 'Ack'
update_user_agents()
return index_generic()
@app.route('/hidden', methods=['GET'])
@auth.login_required
def index_hidden():
return index_generic(show_hidden=True)
@app.route('/category/<string:category>', methods=['GET'])
def index_category(category: str):
return index_generic(category=category)
@app.route('/cookies', methods=['GET'])
def cookies_lookup():
i = Indexing()
cookies_names = [(name, freq, i.cookies_names_number_domains(name)) for name, freq in i.cookies_names]
return render_template('cookies.html', cookies_names=cookies_names)
@app.route('/ressources', methods=['GET'])
def ressources():
i = Indexing()
ressources = []
for h, freq in i.ressources:
domain_freq = i.ressources_number_domains(h)
context = lookyloo.context.find_known_content(h)
capture_uuid, url_uuid, hostnode_uuid = i.get_hash_uuids(h)
ressources.append((h, freq, domain_freq, context.get(h), capture_uuid, url_uuid, hostnode_uuid))
return render_template('ressources.html', ressources=ressources)
@app.route('/categories', methods=['GET'])
def categories():
i = Indexing()
print(i.categories)
return render_template('categories.html', categories=i.categories)
@app.route('/rebuild_all')
@auth.login_required
def rebuild_all():
lookyloo.rebuild_all()
return redirect(url_for('index'))
@app.route('/rebuild_cache')
@auth.login_required
def rebuild_cache():
lookyloo.rebuild_cache()
return redirect(url_for('index'))
@app.route('/submit', methods=['POST', 'GET'])
def submit():
to_query = request.get_json(force=True)
perma_uuid = lookyloo.enqueue_capture(to_query)
return Response(perma_uuid, mimetype='text/text')
@app.route('/capture', methods=['GET', 'POST'])
def capture_web():
if request.form.get('url'):
# check if the post request has the file part
if 'cookies' in request.files and request.files['cookies'].filename:
cookie_file = request.files['cookies'].stream
else:
cookie_file = None
url = request.form.get('url')
if request.form.get('personal_ua') and request.headers.get('User-Agent'):
user_agent = request.headers.get('User-Agent')
else:
user_agent = request.form.get('user_agent')
if url:
depth: int = request.form.get('depth') if request.form.get('depth') else 1 # type: ignore
listing: bool = request.form.get('listing') if request.form.get('listing') else False # type: ignore
perma_uuid = lookyloo.capture(url=url, cookies_pseudofile=cookie_file,
depth=depth, listing=listing,
user_agent=user_agent,
referer=request.form.get('referer'), # type: ignore
os=request.form.get('os'), browser=request.form.get('browser'))
return redirect(url_for('tree', tree_uuid=perma_uuid))
user_agents: Dict[str, Any] = {}
if get_config('generic', 'use_user_agents_users'):
lookyloo.build_ua_file()
# NOTE: For now, just generate the file, so we have an idea of the size
# user_agents = get_user_agents('own_user_agents')
if not user_agents:
user_agents = get_user_agents()
user_agents.pop('by_frequency')
return render_template('capture.html', user_agents=user_agents,
max_depth=max_depth, personal_ua=request.headers.get('User-Agent'))
@app.route('/cookies/<string:cookie_name>', methods=['GET'])
def cookies_name_detail(cookie_name: str):
captures, domains = lookyloo.get_cookie_name_investigator(cookie_name)
return render_template('cookie_name.html', cookie_name=cookie_name, domains=domains, captures=captures)
@app.route('/body_hashes/<string:body_hash>', methods=['GET'])
def body_hash_details(body_hash: str):
captures, domains = lookyloo.get_body_hash_investigator(body_hash)
return render_template('body_hash.html', body_hash=body_hash, domains=domains, captures=captures)
@app.route('/stats', methods=['GET'])
def statsfull():
stats = lookyloo.get_stats()
return render_template('stats.html', stats=stats)
# ##### Methods related to a specific URLNode #####
@app.route('/tree/<string:tree_uuid>/url/<string:node_uuid>/request_cookies', methods=['GET'])
def urlnode_request_cookies(tree_uuid: str, node_uuid: str):
urlnode = lookyloo.get_urlnode_from_tree(tree_uuid, node_uuid)
if not urlnode.request_cookie:
return
return send_file(BytesIO(json.dumps(urlnode.request_cookie, indent=2).encode()),
mimetype='text/plain', as_attachment=True, attachment_filename='request_cookies.txt')
@app.route('/tree/<string:tree_uuid>/url/<string:node_uuid>/response_cookies', methods=['GET'])
def urlnode_response_cookies(tree_uuid: str, node_uuid: str):
urlnode = lookyloo.get_urlnode_from_tree(tree_uuid, node_uuid)
if not urlnode.response_cookie:
return
return send_file(BytesIO(json.dumps(urlnode.response_cookie, indent=2).encode()),
mimetype='text/plain', as_attachment=True, attachment_filename='response_cookies.txt')
@app.route('/tree/<string:tree_uuid>/url/<string:node_uuid>/rendered_content', methods=['GET'])
def urlnode_rendered_content(tree_uuid: str, node_uuid: str):
urlnode = lookyloo.get_urlnode_from_tree(tree_uuid, node_uuid)
if not urlnode.rendered_html:
return
return send_file(BytesIO(urlnode.rendered_html.getvalue()), mimetype='text/plain',
as_attachment=True, attachment_filename='rendered_content.txt')
@app.route('/tree/<string:tree_uuid>/url/<string:node_uuid>/posted_data', methods=['GET'])
def urlnode_post_request(tree_uuid: str, node_uuid: str):
urlnode = lookyloo.get_urlnode_from_tree(tree_uuid, node_uuid)
if not urlnode.posted_data:
return
posted: Union[str, bytes]
if isinstance(urlnode.posted_data, (dict, list)):
# JSON blob, pretty print.
posted = json.dumps(urlnode.posted_data, indent=2)
else:
posted = urlnode.posted_data
if isinstance(posted, str):
to_return = BytesIO(posted.encode())
is_blob = False
else:
to_return = BytesIO(posted)
is_blob = True
to_return.seek(0)
if is_blob:
return send_file(to_return, mimetype='application/octet-stream',
as_attachment=True, attachment_filename='posted_data.bin')
else:
return send_file(to_return, mimetype='text/plain',
as_attachment=True, attachment_filename='posted_data.txt')
@app.route('/tree/<string:tree_uuid>/url/<string:node_uuid>/ressource', methods=['POST', 'GET'])
def get_ressource(tree_uuid: str, node_uuid: str):
if request.method == 'POST':
h_request = request.form.get('ressource_hash')
else:
h_request = None
ressource = lookyloo.get_ressource(tree_uuid, node_uuid, h_request)
to_return = BytesIO()
with ZipFile(to_return, 'w', ZIP_DEFLATED) as zfile:
if ressource:
filename, r = ressource
zfile.writestr(filename, r.getvalue())
else:
zfile.writestr('file.txt', b'Unknown Hash')
to_return.seek(0)
return send_file(to_return, mimetype='application/zip',
as_attachment=True, attachment_filename='file.zip')
@app.route('/tree/<string:tree_uuid>/url/<string:node_uuid>/hashes', methods=['GET'])
def hashes_urlnode(tree_uuid: str, node_uuid: str):
hashes = lookyloo.get_hashes(tree_uuid, urlnode_uuid=node_uuid)
return send_file(BytesIO('\n'.join(hashes).encode()),
mimetype='test/plain', as_attachment=True, attachment_filename='hashes.txt')
@app.route('/tree/<string:tree_uuid>/url/<string:node_uuid>/add_context', methods=['POST'])
@auth.login_required
def add_context(tree_uuid: str, node_uuid: str):
if not enable_context_by_users:
return redirect(url_for('ressources'))
context_data = request.form
ressource_hash: str = context_data.get('hash_to_contextualize') # type: ignore
hostnode_uuid: str = context_data.get('hostnode_uuid') # type: ignore
callback_str: str = context_data.get('callback_str') # type: ignore
legitimate: bool = True if context_data.get('legitimate') else False
malicious: bool = True if context_data.get('malicious') else False
details: Dict[str, Dict] = {'malicious': {}, 'legitimate': {}}
if malicious:
malicious_details = {}
if context_data.get('malicious_type'):
malicious_details['type'] = context_data['malicious_type']
if context_data.get('malicious_target'):
malicious_details['target'] = context_data['malicious_target']
details['malicious'] = malicious_details
if legitimate:
legitimate_details = {}
if context_data.get('legitimate_domain'):
legitimate_details['domain'] = context_data['legitimate_domain']
if context_data.get('legitimate_description'):
legitimate_details['description'] = context_data['legitimate_description']
details['legitimate'] = legitimate_details
lookyloo.add_context(tree_uuid, node_uuid, ressource_hash, legitimate, malicious, details)
if callback_str == 'hostnode_popup':
return redirect(url_for('hostnode_popup', tree_uuid=tree_uuid, node_uuid=hostnode_uuid))
elif callback_str == 'ressources':
return redirect(url_for('ressources'))
# Query API
@app.route('/json/<string:tree_uuid>/redirects', methods=['GET'])
def json_redirects(tree_uuid: str):
cache = lookyloo.capture_cache(tree_uuid)
if not cache:
return {'error': 'UUID missing in cache, try again later.'}
to_return: Dict[str, Any] = {'response': {'url': cache['url'], 'redirects': []}}
if not cache['redirects']:
to_return['response']['info'] = 'No redirects'
return to_return
if cache['incomplete_redirects']:
# Trigger tree build, get all redirects
lookyloo.cache_tree(tree_uuid)
cache = lookyloo.capture_cache(tree_uuid)
if cache:
to_return['response']['redirects'] = cache['redirects']
else:
to_return['response']['redirects'] = cache['redirects']
return jsonify(to_return)
@app.route('/json/<string:tree_uuid>/misp_export', methods=['GET'])
def misp_export(tree_uuid: str):
event = lookyloo.misp_export(tree_uuid)
if isinstance(event, dict):
return jsonify(event)
return Response(event.to_json(indent=2), mimetype='application/json')
@app.route('/json/hash_info/<h>', methods=['GET'])
def json_hash_info(h: str):
details, body = lookyloo.get_body_hash_full(h)
if not details:
return {'error': 'Unknown Hash.'}
to_return: Dict[str, Any] = {'response': {'hash': h, 'details': details,
'body': base64.b64encode(body.getvalue()).decode()}}
return jsonify(to_return)
@app.route('/json/url_info', methods=['POST'])
def json_url_info():
to_query = request.get_json(force=True)
occurrences = lookyloo.get_url_occurrences(**to_query)
return jsonify(occurrences)
@app.route('/json/hostname_info', methods=['POST'])
def json_hostname_info():
to_query = request.get_json(force=True)
occurrences = lookyloo.get_hostname_occurrences(**to_query)
return jsonify(occurrences)
@app.route('/json/stats', methods=['GET'])
def json_stats():
to_return = lookyloo.get_stats()
return Response(json.dumps(to_return), mimetype='application/json')
|
StarcoderdataPython
|
3350666
|
"""
:mod:`zsl.application.modules.cache_module`
-------------------------------------------
"""
from __future__ import unicode_literals
import logging
from injector import Binder, Module, singleton
from zsl.cache.cache_module import CacheModule
from zsl.cache.id_helper import IdHelper
from zsl.cache.redis_cache_module import RedisCacheModule
from zsl.cache.redis_id_helper import RedisIdHelper
class RedisCacheInjectionModule(Module):
"""Adds cache modules into to current configuration using reddis as
backend.
"""
def configure(self, binder):
# type: (Binder) -> None
"""Initializer of the cache - creates the Redis cache module as the
default cache infrastructure. The module is bound to `RedisCacheModule`
and `CacheModule` keys. The initializer also creates `RedisIdHelper`
and bounds it to `RedisIdHelper` and `IdHelper` keys.
:param Binder binder: The binder object holding the binding context, we\
add cache to the binder.
"""
redis_cache_module = RedisCacheModule()
binder.bind(
RedisCacheModule,
to=redis_cache_module,
scope=singleton
)
binder.bind(
CacheModule,
to=redis_cache_module,
scope=singleton
)
redis_id_helper = RedisIdHelper()
binder.bind(
RedisIdHelper,
to=redis_id_helper,
scope=singleton
)
binder.bind(
IdHelper,
to=redis_id_helper,
scope=singleton
)
logging.debug("Created RedisCache binding.")
|
StarcoderdataPython
|
1645542
|
from typing import Tuple
import torch
from torch_nlp_utils.common import Registrable
from vae_lm.models.base.torch_module import TorchModule
class Flow(TorchModule, Registrable):
"""Generic Class for Generative Flow."""
def forward(
self, z: torch.Tensor, mask: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Pass latent codes through transformation."""
raise NotImplementedError()
def backward(
self, z: torch.Tensor, mask: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute inverse of computed transformation."""
raise NotImplementedError()
|
StarcoderdataPython
|
1761416
|
<reponame>xinglun/TestFramework<filename>sutie/common/test_menu.py<gh_stars>0
import pytest
import allure
from util.common.login import login
from util.yaml.yaml_util import YamlUtil
from util.request.requestSend import send_request
class TestMenu:
@allure.description("menu test")
@allure.severity("normal") # blocker > critical > normal > minor > trivial
@allure.feature("common")
@allure.story("login")
# @allure.issure() bug number
@allure.testcase("正常ログインテスト ")
# @pytest.mark.run(order=1)
# @pytest.mark.smoke
@pytest.mark.parametrize("testcases",YamlUtil().read_testcases_yaml("menu.yml"))
def test_menu(self,testcases):
# send post
_,check = send_request(testcases)
assert check == True
|
StarcoderdataPython
|
127716
|
# Copyright 2019 Indiana Biosciences Research Institute (IBRI)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
def callback(ch, method, properties, body):
print(" [x] Received %r" % body)
channel.basic_consume(callback,
queue='hello',
no_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
|
StarcoderdataPython
|
172784
|
# x = n^2 + an + b; |a| < 1000 and |b| < 1000
# b has to be odd, positive and prime as we are testing consecutive values for n
# a has to be negative otherwise the difference between consecutive x's will be huge!
def is_prime(num) :
if (num <= 1) :
return False
if (num <= 3) :
return True
if (num % 2 == 0 or num % 3 == 0) :
return False
i = 5
while(i * i <= num) :
if (num % i == 0 or num % (i + 2) == 0) :
return False
i = i + 6
return True
def max_prod(max_val_a , max_val_b):
max_n = 0
max_b = 0
max_a = 0
for i in range(-max_val_a, -1):
for j in range(1, max_val_b, 2):
n = 0
while is_prime(n**2 + i*n + j):
n += 1
if n > max_n:
max_a = i
max_b = j
max_n = n
return max_a * max_b
if __name__ == '__main__':
print(max_prod(1000, 1000))
|
StarcoderdataPython
|
1687401
|
<gh_stars>10-100
import asyncio
from pprint import pprint
from aiohttp import ClientSession, TCPConnector
async def fetch_url(session, url):
"""return html body of url"""
async with session.get(url, timeout=60 * 60) as response:
return await response.text()
async def fetch_all_urls(session, urls):
"""return html bodies of multiple urls"""
# futures for response html content
futures = [fetch_url(session, url) for url in urls]
# gather all responses as one future
futures = asyncio.gather(*futures, return_exceptions=True)
return await futures
def get_htmls(urls, concurrency=100):
"""
download html contents of supplied urls asynchronously
:param concurrency: amount of concurrent requests
"""
loop = asyncio.get_event_loop()
connector = TCPConnector(limit=concurrency)
session = ClientSession(loop=loop, connector=connector)
htmls = loop.run_until_complete(fetch_all_urls(session, urls))
loop.run_until_complete(session.close())
return dict(zip(urls, htmls))
def crawl():
urls = [f'http://httpbin.org/links/100/{i}' for i in range(10)]
data = get_htmls(urls)
pprint(data)
|
StarcoderdataPython
|
1685644
|
"""
Copyright 2008-2009 <NAME>
This file is part of PyCAM.
PyCAM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyCAM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyCAM. If not, see <http://www.gnu.org/licenses/>.
"""
from pycam.Geometry.Triangle import Triangle
from pycam.Geometry.Line import Line
from pycam.Geometry.Model import Model
def get_test_model():
points = []
points.append((-2, 1, 4))
points.append((2, 1, 4))
points.append((0, -2, 4))
points.append((-5, 2, 2))
points.append((-1, 3, 2))
points.append((5, 2, 2))
points.append((4, -1, 2))
points.append((2, -4, 2))
points.append((-2, -4, 2))
points.append((-3, -2, 2))
lines = []
lines.append(Line(points[0], points[1]))
lines.append(Line(points[1], points[2]))
lines.append(Line(points[2], points[0]))
lines.append(Line(points[0], points[3]))
lines.append(Line(points[3], points[4]))
lines.append(Line(points[4], points[0]))
lines.append(Line(points[4], points[1]))
lines.append(Line(points[4], points[5]))
lines.append(Line(points[5], points[1]))
lines.append(Line(points[5], points[6]))
lines.append(Line(points[6], points[1]))
lines.append(Line(points[6], points[2]))
lines.append(Line(points[6], points[7]))
lines.append(Line(points[7], points[2]))
lines.append(Line(points[7], points[8]))
lines.append(Line(points[8], points[2]))
lines.append(Line(points[8], points[9]))
lines.append(Line(points[9], points[2]))
lines.append(Line(points[9], points[0]))
lines.append(Line(points[9], points[3]))
model = Model()
for p1, p2, p3, l1, l2, l3 in ((0, 1, 2, 0, 1, 2),
(0, 3, 4, 3, 4, 5),
(0, 4, 1, 5, 6, 0),
(1, 4, 5, 6, 7, 8),
(1, 5, 6, 8, 9, 10),
(1, 6, 2, 10, 11, 1),
(2, 6, 7, 11, 12, 13),
(2, 7, 8, 13, 14, 15),
(2, 8, 9, 15, 16, 17),
(2, 9, 0, 17, 18, 2),
(0, 9, 3, 18, 19, 3)):
model.append(Triangle(points[p1], points[p2], points[p3]))
return model
|
StarcoderdataPython
|
1722379
|
<filename>main.py<gh_stars>0
import string
while True:
user_inp = str(input("Enter Password: "))
s1 = string.ascii_lowercase
s2 = string.ascii_uppercase
s3 = string.digits
s4 = string.punctuation
if s1[0:2] and s2[0:2] and s3[0:2] and s4[0:2] in user_inp:
print("Password Strong!")
elif len(user_inp) == 8 or len(user_inp) > 8:
print("Password Strong!")
else:
print("Password Weak!")
|
StarcoderdataPython
|
157090
|
import numpy as np
class NeuralNetwork:
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate,
weights_input_to_hidden=None, weights_hidden_to_output=None):
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
if type(weights_input_to_hidden).__name__ == 'NoneType' and type(weights_hidden_to_output).__name__ == 'NoneType':
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
else:
self.weights_input_to_hidden = weights_input_to_hidden
self.weights_hidden_to_output = weights_hidden_to_output
self.lr = learning_rate
def sigmoid(x):
return 1 / (1 + np.exp( -x ))
def sigmoid_prime(x):
return sigmoid(x) * (1 - sigmoid(x))
def linear(x):
return x
def linear_prime(x):
return x ** 0
# Activation functions
self.activation_function = sigmoid
self.activation_function_prime = sigmoid_prime
self.activation_function2 = linear
self.activation_function_prime2 = linear_prime
def train(self, features, targets):
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
# Forward Pass
hidden_inputs = np.dot(X, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = self.activation_function2(final_inputs)
# Backward Pass
error = y - final_outputs
output_error_term = error * self.activation_function_prime2(final_inputs)
hidden_error = np.dot(self.weights_hidden_to_output, error)
hidden_error_term = hidden_error * self.activation_function_prime(hidden_inputs)
# Weight steps
delta_weights_i_h += hidden_error_term * X[:, None]
delta_weights_h_o += output_error_term * hidden_outputs[:, None]
# Weights update
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records
def run(self, features):
hidden_inputs = np.dot(features, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = self.activation_function2(final_inputs)
return final_outputs
def get_weights(self):
return self.weights_input_to_hidden, self.weights_hidden_to_output
|
StarcoderdataPython
|
1709695
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-11-6 23:21:21
# @Author : poplar. (<EMAIL>)
# @Link : http://white-poplar.github.io
# @Version : $Id$
import traceback
import lib.Tool
from Action import Action
import time
import threading
def main():
print("当前时间", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
# 主功能
action = Action()
# 线程组
threads = []
# 摇红包
t1 = threading.Thread(target=action.get_packet)
threads.append(t1)
# 拍小票
t2 = threading.Thread(target=action.up_receipt)
threads.append(t2)
# 遍历启动线程
for t in threads:
# t.setDaemon(True)
t.start()
t.join()
if __name__ == "__main__":
try:
main()
# pass
except Exception as e:
print("主程序出错(;′⌒`)")
lib.Tool.write_error_log("主程序出错(;′⌒`)")
lib.Tool.write_error_log(traceback.format_exc())
|
StarcoderdataPython
|
3330299
|
<reponame>ChaoTzuYin/AutoPPT
# -*- coding: utf-8 -*-
"""
@author: ChaoTzuYin
"""
from pptx import Presentation
import numpy as np
import io
from PIL import Image
import copy
class info_keeper():
def __init__(self, slide_idx, shape_idx, left, top, width, height):
self.slide_idx = slide_idx
self.shape_idx = shape_idx
self.left = left
self.top = top
self.width = width
self.height = height
class ppt_recorder():
def __init__(self, template):
'''
template [string]: path to your template pptx file
'''
self.prs_ori = Presentation(template)
self.prs = Presentation(template)
self.bias = -len(self.prs_ori.slides)
self.variables = {}
for count in range(len(self.prs_ori.slides)):
slide = self.prs_ori.slides[count]
for shape_count in range(len(slide.shapes)):
shape = slide.shapes[shape_count]
if shape.has_text_frame:
text_frame = shape.text_frame
if('#(' in text_frame.text):
variable_name = text_frame.text[2:-1]
self.variables[variable_name] = info_keeper(slide_idx=count,
shape_idx=shape_count,
left=shape.left,
top=shape.top,
width=shape.width,
height=shape.height)
def _get_blank_slide_layout(self,pres):
layout_items_count = [len(layout.placeholders) for layout in pres.slide_layouts]
min_items = min(layout_items_count)
blank_layout_id = layout_items_count.index(min_items)
return pres.slide_layouts[blank_layout_id]
def new_record(self):
"""Duplicate the slide with the given index in pres.
Adds slide to the end of the presentation"""
if self.bias != -len(self.prs_ori.slides):
for source in self.prs_ori.slides:
blank_slide_layout = self._get_blank_slide_layout(self.prs)
dest = self.prs.slides.add_slide(blank_slide_layout)
for shp in source.shapes:
el = shp.element
newel = copy.deepcopy(el)
dest.shapes._spTree.insert_element_before(newel, 'p:extLst')
self.bias = self.bias + len(self.prs_ori.slides)
def placeholder(self):
return self.variables
def assign(self, feed_dict):
assert self.bias != -len(self.prs_ori.slides), 'NULL SLIDE ERROR: Please call the function "new_record()" to create a new set of slides, with the formats respected to the template, before recording.'
for holder, data in feed_dict.items():
if(isinstance(data, np.ndarray)):
if(len(data.shape)==2):
np_image = np.tile(data[:,:,None]*255,(1,1,3)).astype(np.uint8)
else:
np_image = (data*255).astype(np.uint8)
s = io.BytesIO()
Image.fromarray(np_image).save(s, format='png')
s.seek(0)
x, y, cx, cy = holder.left, holder.top, holder.width, holder.height
Image.MAX_IMAGE_PIXELS = 1000000000
self.prs.slides[holder.slide_idx+self.bias].shapes.add_picture(io.BytesIO(s.getvalue()), x, y, cx, cy)
s.truncate(0)
s.close()
elif(isinstance(data,str)):
text_frame = self.prs.slides[holder.slide_idx+self.bias].shapes[holder.shape_idx].text_frame
cur_text = text_frame.text
new_text = cur_text.replace(cur_text, data)
text_frame.text = new_text
else:
print(type(data))
def to_pptx(self, path):
self.prs.save(path)
if __name__ == '__main__':
# Create ppt recorder given the reference template.
writer = ppt_recorder(template='template_example.pptx')
# Get the placeholders that you've created in the template file.
# Placeholders are formated in dictionary, in which the keys are repected to their name.
ph = writer.placeholder()
test_image1 = np.eye(128)
test_image2 = np.random.uniform(low=0.0, high=1.0, size=[64,64,3])
for i in range(3):
# Duplicate a set of formate respected to your template pptx file.
# You must include .new_record() in your code even if only one set of your template is needed in your report.
writer.new_record()
# Create a placeholder-data dictionary.
feed_dict={ph['text1']:'Hello World!_'+str(i),
ph['pic1']:test_image1,
ph['pic2']:test_image2,
ph['text2']:"What's up, World?_"+str(i),
ph['pic3']:test_image1,
ph['pic4']:test_image2}
# Assign your data to pptx file.
writer.assign(feed_dict=feed_dict)
#export pptx file.
writer.to_pptx('result.pptx')
|
StarcoderdataPython
|
3310272
|
<reponame>teald/vplanet<filename>tests/IoHeat/test_IoHeat.py
from benchmark import Benchmark, benchmark
import astropy.units as u
import pytest
@benchmark(
{
"log.initial.io.PowerEqtide": {
"value": 9.380954e13,
"unit": u.kg * u.m ** 2 / u.sec ** 3,
},
"log.initial.io.SurfEnFluxEqtide": {
"value": 2.243481,
"unit": u.kg * u.m ** 2 / u.sec ** 2 / (u.m ** 2 * u.sec),
},
"log.initial.io.DsemiDtEqtide": {"value": -1.671102e-09, "unit": u.m / u.sec},
"log.initial.io.DeccDtEqtide": {"value": -8.503575e-16, "unit": 1 / u.sec},
"log.initial.io.DOblDtEqtide": {"value": -3.792877e-13, "unit": u.rad / u.sec},
}
)
class TestIoHeat(Benchmark):
pass
|
StarcoderdataPython
|
154185
|
def aumentar(preco=0, taxa=0, formatado=False):
res = preco + (preco * taxa / 100)
return res if formatado is False else moeda(res)
def diminuir(preco=0, taxa=0, formatado=False):
res = preco - (preco * taxa / 100)
return res if formatado is False else moeda(res)
def dobro(preco=0, formatado=False):
res = preco * 2
return res if formatado is False else moeda(res)
def metade(preco=0, formatado=False):
res = preco / 2
return res if formatado is False else moeda(res)
def moeda(preco=0, moeda="MTs"):
return f"{preco:.2f}{moeda}".replace(".", ",")
def resumo(p=0, taxaa=10, taxar=5):
print("-" * 30)
print("RESUMO DO VALOR".center(30))
print("-" * 30)
print("Preco analisado: \t\t{}".format(moeda(p)))
print("Dobro do preco: \t\t{}".format(dobro(p, True)))
print("Metade do preco: \t\t{}".format(metade(p, True)))
print("Com {}% de aumento: \t{}".format(taxaa, aumentar(p, taxaa, True)))
print("Com {}% de reducao: \t{}".format(taxar, diminuir(p, taxar, True)))
print("-" * 30)
|
StarcoderdataPython
|
12861
|
<filename>python/moderation_text_token_demo.py
# -*- coding:utf-8 -*-
from moderation_sdk.gettoken import get_token
from moderation_sdk.moderation_text import moderation_text
from moderation_sdk.utils import init_global_env
if __name__ == '__main__':
# Services currently support North China-Beijing(cn-north-4),China East-Shanghai1(cn-east-3), CN-Hong Kong(ap-southeast-1),AP-Singapore(ap-southeast-3)
init_global_env('cn-north-4')
#
# access moderation text enhance,posy data by token
#
user_name = '******'
password = '******'
account_name = '******' # the same as user_name in commonly use
token = get_token(user_name, password, account_name)
# call interface use the text
result = moderation_text(token, '<PASSWORD>请+110亚砷酸钾六位qq,fuck666666666666666', 'content',
['ad', 'politics', 'porn', 'abuse', 'contraband', 'flood'])
print(result)
|
StarcoderdataPython
|
1661933
|
"""Emulates a Philips Wake-up Light / sunrise.
This cycles through a sequence of RGB tuples and then
linearily interpolates them in HSV color space as time
proceeds.
The routine can be triggered a service `pyscript.wake_up_light`.
The sequence is canceled by turning the light on and off.
"""
import sys
sys.path.append("/config/pyscript")
from python_modules.wake_up_light import rgb_and_brightness
DEFAULT_LAMP = "light.ceiling_living_room"
DEFAULT_TOTAL_TIME = 900
DEFAULTS = {
"lamp": DEFAULT_LAMP,
"total_time": DEFAULT_TOTAL_TIME,
}
RGB_SEQUENCE = [
(255, 0, 0),
(255, 0, 0),
(255, 63, 0),
(255, 120, 0),
(255, 187, 131),
(255, 205, 166),
]
MIN_TIME_STEP = 2 # time between settings
@service
def wake_up_light(
lamp=DEFAULT_LAMP,
total_time=DEFAULT_TOTAL_TIME,
):
rgb, brightness = rgb_and_brightness(total_time, RGB_SEQUENCE)
steps = min(total_time // MIN_TIME_STEP, 255) + 1
transition = total_time / (steps - 1)
t = 0
for _ in range(steps):
service.call(
"light",
"turn_on",
entity_id=lamp,
rgb_color=rgb(t),
brightness=brightness(t),
transition=transition,
)
task.sleep(transition)
t += transition
|
StarcoderdataPython
|
1725657
|
from blockchain_users_generator import generator as blockchain_users_generator
users = blockchain_users_generator.generate(1000)
users_dict = [user.to_dict() for user in users]
print("\n\n------model------\n", users[0])
print("\n\n------dict-------\n", users_dict[0])
|
StarcoderdataPython
|
163444
|
# pylint: disable=C0303
import os
import numpy as np
import h5py
import matplotlib.pyplot as plt
from math import sqrt
from array import array
from copy import deepcopy
import pickle
import time
from keras.models import load_model
from collections import namedtuple
from sklearn import utils
from keras.utils import to_categorical
from multiprocessing import Process
import subprocess
#from measure_yields.utilities import TemplatePredictor, Prediction, Histogram
from neuralnet.utilities import select_features
from neuralnet.utilities import get_dataset_from_path
from measure_yields.basepredictors import BasePredictor, TemplatePredictor, \
Histogram, Prediction, stdout_redirected_to
from measure_yields.utilities import data_already_scaled, get_adapted_bin_edges
from ROOT import RooRealVar, RooDataHist, RooDataSet, RooArgSet, TCanvas, TFile
from ROOT import RooKeysPdf, RooAddPdf, RooArgList, RooWorkspace
from ROOT.RooFit import Binning, Save, PrintLevel
#plt.style.use('../../plot/paper.mplstyle')
from matplotlib import rcParams
class MaxLikelihoodNnPredictorBinned(TemplatePredictor):
"""
Predict based on a binned maximum likelihood fit to the output of a single
neural network. Uses HistFactory for the template fit implementation.
Arguments:
title: String
nbins: Number of bins in histograms
saved_model: Pre-trained Keras model (.h5 file)
saved_scaler: Pickled StandardScaler (.pkl file)
samplelist: List of samples to create templates for, e.g. ['A', 'H', 'bgnd']
"""
def __init__(self, title, nbins, saved_model, saved_scaler, samplelist):
super(MaxLikelihoodNnPredictorBinned, self).__init__(title)
# Load the NN model
self.model = load_model(saved_model)
# Load the data scaler
assert os.path.exists(saved_scaler)
with open(saved_scaler) as hfile:
self.scaler = pickle.load(hfile)
# Histogram settings
self.histrange = [0.0, 1.0]
self.samples = samplelist
self.templates = {'H' : None, 'A' : None, 'bgnd' : None}
# Get templates
for sample in self.templates:
filename = '%s/histo_%s_%s.pkl' % (self.outdir, self.title, sample)
if os.path.exists(filename):
print 'Found histogram file:', filename
with open(filename) as hfile:
self.templates[sample] = pickle.load(hfile)
self.binedges = self.templates[sample].bin_edges
else:
print 'Could not locate', filename
# Colors
self.colors = {
"H": "#440154",
"A": "#39568C",
"data": "#FFFFFF",
"model": "#20A387"
}
def create_template(self, x, sample, title, istrain=True, save=False):
"""
Create a histogram of network output for a given sample type
Arguments:
x: Data, un-scaled
sample: Either 'H', 'A' or 'bgnd'
title: String
istrain: Templates for training data are scaled to unit integral
save: Store the template
"""
assert sample in ['H', 'A', 'bgnd'], 'Sample %s not recognised' % sample
assert not data_already_scaled(x), 'Data already scaled?'
# Preprocess the inputs
sx = self.scaler.transform(x)
# Make predictions
preds = self.model.predict(sx) # (?, n_class) matrix, floats
# Choose prediction column
column_to_predict = 0 # -> H
if sample == 'A':
column_to_predict = 1 # -> A
elif sample == 'bgnd':
column_to_predict = 2
# Fill the histogram
hist = Histogram(preds[:, column_to_predict], self.binedges, self.title, title)
# Scale to unit integral
if istrain:
hist.th1.Scale(1.0/hist.th1.Integral())
if save:
with open('%s/histo_%s_%s.pkl' % (self.outdir, self.title, title), 'w') as hout:
pickle.dump(hist, hout, -1)
return hist
def create_train_templates(self, paths_to_data, adaptive_binning=False):
"""
Create templates from training files. Backgrounds merged into one.
Arguments:
paths_to_data: List of paths to directories containing training
data. Typically only one, but can also train on multiple
datasets from different models (i.e. different masses)
adaptive_binning: Use narrower bin width in dense regions
"""
if not isinstance(paths_to_data, list):
paths_to_data = [paths_to_data]
files_H = []
files_A = []
for path in paths_to_data:
# These are merged, so only one of each
if not path.endswith('/'): path += '/'
files_H.append(path+'model_H_merged.h5')
files_A.append(path+'model_A_merged.h5')
# Find background samples, put into one template
files_bgnd = []
for path in paths_to_data:
for sample in self.samples:
if sample not in ['H', 'A']:
files_bgnd.append(path+'/model_%s_merged.h5' % sample)
# Make the templates
dataH_X, _, feats = self.read_array_from_files(files_H)
dataH_X, _ = select_features(dataH_X, feats, include_mass_variables=False)
dataA_X, _, feats = self.read_array_from_files(files_A)
dataA_X, _ = select_features(dataA_X, feats, include_mass_variables=False)
dataH_X = self.scaler.transform(dataH_X)
dataA_X = self.scaler.transform(dataA_X)
print 'WARNING: LIMITING TEMPLATE EVENTS'
dataH_X = dataH_X[:5000]
dataA_X = dataA_X[:5000]
# Need all data merged in order to do adaptive binning
data_all_X = np.vstack((dataH_X, dataA_X))
if files_bgnd:
dataBgnd_X, _, feats = self.read_array_from_files(files_bgnd)
dataBgnd_X, _ = select_features(dataBgnd_X, feats, include_mass_variables=False)
dataBgnd_X = self.scaler.transform(dataBgnd_X)
data_all_X = np.vstack((data_all_X, dataBgnd_X))
# Get reasonable histogram binning
if adaptive_binning:
temp_preds = self.model.predict(data_all_X)[:, 1]
self.binedges = get_adapted_bin_edges(temp_preds, fullrange=(0,1))
else:
self.binedges = np.linspace(0, 1, self.nbins+1)
print ' DBG: bin edges:', self.binedges
# Now create the templates
self.templates['H'] = self.create_template(dataH_X, 'H', 'H', istrain=True, save=True)
self.templates['A'] = self.create_template(dataA_X, 'A', 'A', istrain=True, save=True)
if files_bgnd:
self.templates['bgnd'] = self.create_template(dataBgnd_X, 'bgnd', 'bgnd', istrain=True, save=True)
# Plot templates
"""
fig = plt.figure()
plt.hist(self.model.predict(dataH_X)[:,1].flatten(), bins=self.binedges, normed=True, histtype='step', label='H')
plt.hist(self.model.predict(dataA_X)[:,1].flatten(), bins=self.binedges, normed=True, histtype='step', label='A')
plt.legend(loc='best')
fig.show()
plt.show()
"""
print 'Created train templates for', self.title
class MaxLikelihoodNnPredictorUnbinned(BasePredictor):
"""
Predict based on an unbinned maximum likelihood fit to the output of a
single neural network. Uses kernel density estimation (KDE) to create pdfs
for network output distributions for H and A
Arguments:
title: String
saved_model: Pre-trained Keras model (.h5 file)
saved_scaler: Pickled StandardScaler (.pkl file)
samplelist: List of samples to create templates for, e.g. ['A', 'H', 'bgnd']
"""
def __init__(self, title, saved_model, saved_scaler, sample_list):
super(MaxLikelihoodNnPredictorUnbinned, self).__init__(title)
# Load the NN model
self.model = load_model(saved_model)
# Load the data scaler
assert os.path.exists(saved_scaler)
with open(saved_scaler) as hfile:
self.scaler = pickle.load(hfile)
# RooFit variables
self.roopred = None
self.theta_min, self.theta_max = -10, 10
self.samples = sample_list
self.pdfs = {'H' : None, 'A' : None, 'bgnd' : None}
# Load existing pdfs
filename = '%s/pdfs_%s.root' % (self.outdir, self.title)
if os.path.exists(filename):
print 'Found pdf file:', filename
fin = TFile(filename)
ws = fin.Get('ws')
self.roopred = ws.var('roopred')
self.pdfs['H'] = ws.pdf('keysH')
self.pdfs['A'] = ws.pdf('keysA')
# Tweak ROOT
from ROOT.Math import MinimizerOptions
MinimizerOptions.SetDefaultMinimizer("Minuit2");
MinimizerOptions.SetDefaultStrategy(2);
# Debug
self.max_nn_output = 0.0
self.min_nn_output = 1.0
# Colors
self.colors = {
"H": "#440154",
"A": "#39568C",
"data": "#FFFFFF",
"model": "#20A387"
}
def create_pdfs(self, paths_to_data, save=True):
"""
Create KDE pdfs for network output distribution
"""
if not isinstance(paths_to_data, list):
paths_to_data = [paths_to_data]
files_H = []
files_A = []
for path in paths_to_data:
# These are merged, so only one of each
if not path.endswith('/'): path += '/'
files_H.append(path+'model_H_merged.h5')
files_A.append(path+'model_A_merged.h5')
# Find background samples
files_bgnd = []
for path in paths_to_data:
for sample in self.samples:
if sample not in ['H', 'A']:
files_bgnd.append(path+'/model_%s_merged.h5' % sample)
# Read in the data
dataH_X, _, feats = self.read_array_from_files(files_H)
dataH_X, _ = select_features(dataH_X, feats, include_mass_variables=False)
dataH_X = self.scaler.transform(dataH_X)
rho = 0.8
#ntemplateevents = 1000000
ntemplateevents = 500000
#ntemplateevents = 50000
print 'Limiting events for pdf creation to %d' % ntemplateevents
# 700k -> 4h 42m
# 500k -> 2h 30m
# 200k -> 0h 27m
# 100k -> 0h 7m
# 50k -> 0h 2m
dataH_X = dataH_X[:ntemplateevents]
# Predict, fill a RooDataHist
starttime = time.time()
predsH = self.model.predict(dataH_X)[:,1]
self.roopred = RooRealVar('roopred', 'roopred', 0, 1)
roopreddataH = RooDataSet('roopreddataH', 'roopreddataH', RooArgSet(self.roopred))
for pred in predsH:
self.roopred.setVal(pred)
roopreddataH.add(RooArgSet(self.roopred))
# Create the KDE pdfs
def createKDE(self, data, htype):
starttime = time.time()
keys = RooKeysPdf('keys%s' % htype, 'keys%s' % htype, self.roopred, data)
self.pdfs[htype] = keys
print 'Creating KDE pdf for %s took %s' % (htype, time.strftime("%H:%M:%S", time.gmtime(time.time()-starttime)))
from ROOT.RooKeysPdf import NoMirror
keysH = RooKeysPdf('keysH', 'keysH', self.roopred, roopreddataH, NoMirror, rho)
self.pdfs['H'] = keysH
# Do the same for A
dataA_X, _, feats = self.read_array_from_files(files_A)
dataA_X, _ = select_features(dataA_X, feats, include_mass_variables=False)
dataA_X = self.scaler.transform(dataA_X)
dataA_X = dataA_X[:ntemplateevents]
starttime = time.time()
predsA = self.model.predict(dataA_X)[:,1]
roopreddataA = RooDataSet('roopreddataA', 'roopreddataA', RooArgSet(self.roopred))
for pred in predsA:
self.roopred.setVal(pred)
roopreddataA.add(RooArgSet(self.roopred))
keysA = RooKeysPdf('keysA', 'keysA', self.roopred, roopreddataA, NoMirror, rho)
self.pdfs['A'] = keysA
if save:
ws = RooWorkspace('ws', 'ws')
getattr(ws, 'import')(self.roopred)
getattr(ws, 'import')(self.pdfs['H'])
getattr(ws, 'import')(self.pdfs['A'])
ws.writeToFile('%s/pdfs_%s.root' % (self.outdir, self.title))
def predict(self, x, theta_true):
"""
Run an unbinned ML fit to make predictions
"""
# Create RooDataSet
xs = self.scaler.transform(x)
preds = self.model.predict(xs)[:, 1]
min_nn_output_local, max_nn_output_local = np.min(preds), np.max(preds)
if min_nn_output_local < self.min_nn_output:
self.min_nn_output = min_nn_output_local
if max_nn_output_local > self.max_nn_output:
self.max_nn_output = max_nn_output_local
roodata = RooDataSet('data', 'data', RooArgSet(self.roopred))
for pred in preds:
self.roopred.setVal(pred)
roodata.add(RooArgSet(self.roopred))
# Fit
theta = RooRealVar('theta', 'theta', 0.5, self.theta_min, self.theta_max)
model = RooAddPdf('model', 'model',
RooArgList(self.pdfs['A'], self.pdfs['H']),
RooArgList(theta))
with stdout_redirected_to('%s/minuit_output.log' % self.outdir):
res = model.fitTo(roodata, Save(True))
nll = res.minNll()
fitstatus = res.status()
fitstatus |= (not subprocess.call(['grep', 'p.d.f value is less than zero', 'output_MLE_unbinned/minuit_output.log']))
fitted_theta = theta.getValV()
# Get Lambda(theta_true | theta_best)
logl = model.createNLL(roodata)
theta.setVal(theta_true)
nll_theta_true = logl.getValV()
nll_ratio = nll_theta_true - nll
return fitted_theta, nll, nll_ratio, fitstatus
def predict_and_plot(self, x):
"""
Do the same as predict(), but add plots
Return -logL ratio, for external plotting
"""
rcParams['xtick.major.pad'] = 12
rcParams['ytick.major.pad'] = 12
xs = self.scaler.transform(x)
preds = self.model.predict(xs)[:, 1]
roodata = RooDataSet('data', 'data', RooArgSet(self.roopred))
for pred in preds:
self.roopred.setVal(pred)
roodata.add(RooArgSet(self.roopred))
theta = RooRealVar('theta', 'theta', 0.5, self.theta_min, self.theta_max)
model = RooAddPdf('model', 'model',
RooArgList(self.pdfs['A'], self.pdfs['H']),
RooArgList(theta))
#with stdout_redirected_to():
print '\n\nNEURAL NETWORK FIT'
res = model.fitTo(roodata, PrintLevel(10))
fitted_theta = theta.getValV()
# Histogram binning for data points
nbins = 14
xvals = np.linspace(0, 1, 300)
yvals_H = []
yvals_A = []
# Get points for pdf curves
for xval in xvals:
self.roopred.setVal(xval)
yvals_H.append(self.pdfs['H'].getValV(RooArgSet(self.roopred)))
yvals_A.append(self.pdfs['A'].getValV(RooArgSet(self.roopred)))
yvals_H = np.array(yvals_H)
yvals_A = np.array(yvals_A)
# Plot pdfs by themselves
fig = plt.figure()
plt.plot(xvals, yvals_H, color=self.colors["H"], label=r'$p_{H}(y)$')
plt.plot(xvals, yvals_A, color=self.colors["A"], label=r'$p_{A}(y)$')
plt.fill_between(xvals, 0, yvals_H, color=self.colors["H"], alpha=0.2)
plt.fill_between(xvals, 0, yvals_A, color=self.colors["A"], alpha=0.2)
plt.xlim([0, 1])
plt.ylim([0, 11])
plt.xlabel(r'Network output ($y$)')
plt.ylabel('Probability density')
plt.legend(loc='upper right')
plt.tight_layout()
fig.show()
fig.savefig('mle_nn_pdfs.pdf')
# Scale to event yield
yvals_H *= roodata.numEntries()*(1-fitted_theta)/float(nbins)
yvals_A *= roodata.numEntries()*(fitted_theta)/float(nbins)
yvals_sum = yvals_H + yvals_A
# Make plot of fit to data
fig, ax = plt.subplots(1)
histentries, binedges = np.histogram(preds, bins=nbins, range=(0, 1))
bincenters = (binedges[:-1] + binedges[1:])/2.0
yerr = np.sqrt(histentries)
plt.errorbar(bincenters, histentries, xerr=np.diff(binedges)*0.5, yerr=yerr, linestyle='None', ecolor='black', label='Data')
plt.plot(xvals, yvals_H, color=self.colors["H"], label=r'$p_{H}(y)$')
plt.plot(xvals, yvals_A, color=self.colors["A"], label=r'$p_{A}(y)$')
plt.plot(xvals, yvals_sum, color=self.colors["model"], label=r'$p(y \,|\, \alpha = %.2f)$' % fitted_theta)
plt.fill_between(xvals, 0, yvals_H, color=self.colors["H"], alpha=0.2)
plt.fill_between(xvals, 0, yvals_A, color=self.colors["A"], alpha=0.2)
# Set correct legend order
handles, labels = ax.get_legend_handles_labels()
handles = [handles[3], handles[2], handles[0], handles[1]]
labels = [labels[3], labels[2], labels[0], labels[1]]
ax.legend(handles, labels, loc='upper right')
plt.xlabel(r'Network output ($y$)')
plt.ylabel('Events / %.2f' % (1.0/nbins))
axes = plt.gca()
axes.set_xlim([0, 1])
axes.set_ylim([0, max(histentries)*2.3])
plt.tight_layout()
fig.show()
fig.savefig('mle_nn_fit.pdf')
# Create likelihood curve
logl = model.createNLL(roodata)
xvals = np.linspace(0, 1, 200)
yvals = []
ymin = 999.
for xval in xvals:
theta.setVal(xval)
yvals.append(logl.getValV())
if yvals[-1] < ymin:
ymin = yvals[-1]
yvals = np.array(yvals)
yvals -= ymin
# Return points for the NLL curve
return xvals, yvals
class MostLikelyNnPredictorBinned(BasePredictor):
"""
Do maximum likelihood fits to the output of several networks, predict based
on the result of the best fit
Arguments:
title: String
nbins: Number of bins in histograms
saved_models: List of pre-trained Keras models (.h5 files)
saved_scaler: Pickled StandardScaler (.pkl file)
sample_list: List of samples to consider, e.g. ['H', 'A', 'bgnd']
mode: Choose NLL-weighted average of all networks ('weighted'), or pick
the one with lowest NLL value ('pick-best')
"""
def __init__(self, title, nbins, saved_models, saved_scaler, sample_list, mode='pick-best'):
super(MostLikelyNnPredictorBinned, self).__init__(title)
assert mode in ['weighted', 'pick-best'], 'Invalid mode: %s' % mode
self.mode = mode
self.predictors = []
for modelfile in saved_models:
thistitle = modelfile.split('/')[-1].replace('.h5', '')
mlpred = MaxLikelihoodNnPredictor(
title=thistitle,
nbins=nbins, saved_model=modelfile, saved_scaler=saved_scaler,
samplelist=sample_list)
self.predictors.append(mlpred)
def create_train_templates(self, paths):
""" Create train templates for all models """
for pred in self.predictors:
pred.create_train_templates(paths)
def predict(self, x, plot=False):
allpreds = []
allnlls = []
for predictor in self.predictors:
preds, nll = predictor.predict(x, plot=plot)
allpreds.append(preds)
allnlls.append(nll)
if self.mode == 'weighted':
nAs = []; nHs = []; nBgnds = []
for p in allpreds:
nAs.append(p.nA)
nHs.append(p.nH)
nBgnds.append(p.nBgnd)
result = Prediction(nH=np.average(nHs, weights=allnlls),
nA=np.average(nAs, weights=allnlls),
nBgnd=np.average(nBgnds, weights=allnlls))
elif self.mode == 'pick-best':
result = preds[allnlls.index(min(allnlls))]
print ' DBG: nlls are:', allnlls
return result
class MostLikelyNnPredictorUnbinned(BasePredictor):
"""
Do maximum likelihood fits to the output of several networks, predict based
on the result of the best fit
Arguments:
title: String
saved_models: List of pre-trained Keras models (.h5 files)
saved_scaler: Pickled StandardScaler (.pkl file)
sample_list: List of samples to consider, e.g. ['H', 'A', 'bgnd']
mode: Choose NLL-weighted average of all networks ('weighted'), or pick
the one with lowest NLL value ('pick-best')
"""
def __init__(self, title, saved_models, saved_scaler, sample_list, mode='pick-best'):
super(MostLikelyNnPredictorUnbinned, self).__init__(title)
assert mode in ['weighted', 'pick-best'], 'Invalid mode: %s' % mode
self.mode = mode
self.predictors = []
for modelfile in saved_models:
thistitle = modelfile.split('/')[-1].replace('.h5', '')
mlpred = MaxLikelihoodNnPredictorUnbinned(
title=thistitle,
saved_model=modelfile, saved_scaler=saved_scaler,
sample_list=sample_list)
self.predictors.append(mlpred)
def create_pdfs(self, paths):
""" Create pdfs for all models """
for pred in self.predictors:
pred.create_pdfs(paths)
def predict(self, x, plot=False):
allpreds = []
allnlls = []
for predictor in self.predictors:
preds, nll, _ = predictor.predict(x, theta_true=0) # TODO
allpreds.append(preds)
allnlls.append(nll)
if self.mode == 'weighted':
result = np.average(allpreds, allnlls)
elif self.mode == 'pick-best':
result = allpreds[allnlls.index(min(allnlls))]
return result, allnlls.index(min(allnlls))
|
StarcoderdataPython
|
1731893
|
<reponame>letuananh/pyinkscape
import logging
import platform
import subprocess
from pathlib import Path
WIN_EXE_POTENTIAL_PATHS = [
"C:\\Program Files\\Inkscape\\inkscape.exe",
"C:\\Program Files\\Inkscape\\bin\\inkscape.exe"
]
if platform.system() == "Windows":
INKSCAPE_PATH = None
for _potential_path in WIN_EXE_POTENTIAL_PATHS:
if Path(_potential_path).is_file():
INKSCAPE_PATH = _potential_path
if not INKSCAPE_PATH:
# use any inkscape.exe in PATH as backup solution
INKSCAPE_PATH = "inkscape.exe"
else:
INKSCAPE_PATH = "/usr/bin/inkscape"
try:
from PyPDF2 import PdfFileMerger
PYPDF2_ENABLED = True
except Exception as e:
PYPDF2_ENABLED = False
def getLogger():
return logging.getLogger(__name__)
def _verify_pypdf():
''' Verify that it is possible to merge PDF files with current setup (PyPDF2, pdfunite, etc.) '''
if not PYPDF2_ENABLED:
if platform.system() == "Windows":
logging.getLogger(__name__).error("pyInkscape requires PyPDF2 when running on Windows")
raise e
else:
logging.getLogger(__name__).warning("PyPDF2 is not available. PDF files will be merged using `pdfunite`")
# TODO: Verify that pdfunite is available at runtime
return False
else:
return True
def prepare_output_dir(output_dir='ouput', mkdir=False):
output_dir = Path(output_dir)
if mkdir and not output_dir.exists():
output_dir.mkdir(parents=True)
return output_dir
def svg_to_pdf(filename, overwrite=False, inkscape_path=INKSCAPE_PATH):
''' Convert an SVG file into PDF using Inkscape '''
_inkscape_path_obj = Path(inkscape_path)
if not _inkscape_path_obj.is_file():
getLogger().error(f"Inkscape binary is not available at {inkscape_path}")
svg_file = Path(filename)
output_dir = svg_file.parent
pdf_file = output_dir / (svg_file.stem + ".pdf")
if not overwrite and pdf_file.exists():
getLogger().warning(f"WARNING: File {pdf_file} exists. SKIPPED")
else:
output = subprocess.run([inkscape_path, f"{svg_file}", f"--export-filename={pdf_file}", "--export-area-drawing"])
if output.returncode != 0:
getLogger().warning(f"Abnomal Inkscape exit code: {output.returncode}")
def merge_pdf(output_path, input_paths, **kwargs):
''' Merge differnt PDF files into one '''
if _verify_pypdf():
merger = PdfFileMerger()
file_objects = []
for input_path in input_paths:
input_file = open(input_path, "rb")
file_objects.append(input_file)
merger.append(input_file)
with open(output_path, "wb") as output_file:
merger.write(output_file)
for file_obj in file_objects:
file_obj.close()
else:
# use pdfunite command to merge PDF files
subprocess.run(["pdfunite"] + input_paths + [output_path])
|
StarcoderdataPython
|
1774449
|
class Solution:
def minFallingPathSum(self, A):
"""
:type A: List[List[int]]
:rtype: int
"""
rows = len(A)
if rows == 0:
return 0
cols = len(A[0])
if cols == 0:
return 0
if rows == 1:
return min(A[0])
# dp[i][j] stores the minimum falling path sum for starting point i,j
dp = [[0]*cols for _ in range(rows)]
for j in range(cols):
dp[-1][j] = A[-1][j]
for i in range(rows-2, -1, -1):
dp[i][0] = A[i][0]+min(dp[i+1][0], dp[i+1][1])
dp[i][cols-1] = A[i][cols-1] + min(dp[i+1][cols-1], dp[i+1][cols-2])
for j in range(1, cols-1):
dp[i][j] = A[i][j] + min(dp[i+1][j], dp[i+1][j-1], dp[i+1][j+1])
return min(dp[0])
|
StarcoderdataPython
|
4816146
|
<filename>Homework/2019/Task1/6/code/hyperparams.py<gh_stars>0
# -*- coding: utf-8 -*-
#/usr/bin/python2
'''
By <NAME>. <EMAIL>.
www.github.com/kyubyong/neural_chinese_transliterator
'''
class Hyperparams:
'''Hyper parameters'''
isqwerty = True # If False, 10 keyboard layout is assumed.
# model
embed_size = 300 # alias = E
encoder_num_banks = 16
num_highwaynet_blocks = 4
maxlen = 50 # maximum number of a pinyin sentence
minlen = 10 # minimum number of a pinyin sentence
norm_type = "bn" # Either "bn", "ln", "ins", or None
dropout_rate = 0.5
# training scheme
lr = 0.0001
logdir = "log/qwerty" if isqwerty is True else "log/nine"
batch_size = 64
num_epochs = 20
|
StarcoderdataPython
|
3213936
|
<reponame>nbeaver/fonts_with_chars<filename>fonts_with_chars.py
#! /usr/bin/env python3
import argparse
import fontconfig
def get_fonts_with_chars(chars):
for font_file in fontconfig.query():
font = fontconfig.FcFont(font_file)
if all([font.has_char(char) for char in chars]):
yield font
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Find a list of fonts that support a given list of characters.'
)
parser.add_argument(
'chars',
type=str,
help='Characters the font must support.',
)
args = parser.parse_args()
for font in get_fonts_with_chars(args.chars):
print(font.file)
|
StarcoderdataPython
|
3276925
|
<reponame>joshbaptiste/media_cache_cluster
import os
import configparser
import psutil
import scanner
def parseConfig(configFile):
""" Parses config file """
options = []
config = configparser.ConfigParser()
config.read(configFile)
if config.has_section('global'):
sections = config.sections()
log.info("Loading Sections:")
for section in sections:
value = config.get(section)
log.info("Found Section: {} = {}".format(section, val))
options.add(section, value)
else:
raise configparser.NoSectionError
return options
def checkDiskUsage(path):
try:
du = psutil.disk_usage(path)
return du
except OSError:
log.error("Unable to check disk usage of: " + path)
def checkSymLinks(config):
"""
Scans Local directory which contain symlinks
"""
log.info("Scanning directory {} for symlinks")
scandir = scanner.Scanner()
for file in scandir.scanDirectory(config.LOCAL_DIR):
stats = os.path.stat(file, follow_symlink=False):
|
StarcoderdataPython
|
198640
|
from django.conf import settings
# Port used to communicate with Discord Proxy
DISCORDNOTIFY_DISCORDPROXY_PORT = getattr(
settings, "DISCORDNOTIFY_DISCORDPROXY_PORT", 50051
)
# When set to True, only superusers will be get their notifications forwarded
DISCORDNOTIFY_SUPERUSER_ONLY = getattr(settings, "DISCORDNOTIFY_SUPERUSER_ONLY", False)
# Set this to False to disable this app temporarily
DISCORDNOTIFY_ENABLED = getattr(settings, "DISCORDNOTIFY_ENABLED", True)
# When set True will mark all notifications as read
# that have been successfully submitted to Discord
DISCORDNOTIFY_MARK_AS_VIEWED = getattr(settings, "DISCORDNOTIFY_MARK_AS_VIEWED", False)
|
StarcoderdataPython
|
127725
|
#
# littletable_demo.py
#
# Copyright 2010, <NAME>
#
from __future__ import print_function
from littletable import Table
from collections import namedtuple
import sys
Customer = namedtuple("Customer", "id name")
CatalogItem = namedtuple("CatalogItem", "sku descr unitofmeas unitprice")
customers = Table("customers")
customers.create_index("id", unique=True)
customer_data = """\
id,name
0010,<NAME>
0020,<NAME>
0030,<NAME>"""
customers.csv_import(customer_data, row_class=Customer)
catalog = Table("catalog")
catalog.create_index("sku", unique=True)
catalog_data = """\
sku,descr,unitofmeas,unitprice
BRDSD-001,Bird seed,LB,3
BBS-001,Steel BB's,LB,5
MGNT-001,Magnet,EA,8
MAGLS-001,Magnifying glass,EA,12
ANVIL-001,1000lb anvil,EA,100
ROPE-001,1 in. heavy rope,100FT,10
ROBOT-001,Domestic robot,EA,5000"""
catalog.csv_import(catalog_data, row_class=CatalogItem, transforms={"unitprice": int})
wishitems = Table("wishitems")
wishitems.create_index("custid")
wishitems.create_index("sku")
# there is no user-defined type for these items, just use DataObjects
wishlist_data = """\
custid,sku
0030,MAGLS-001
0020,MAGLS-001
0020,ANVIL-001
0020,ROPE-001
0020,BRDSD-001
0020,BBS-001
0020,MAGNT-001
0030,MAGNT-001
0030,ROBOT-001
0010,ROBOT-001"""
wishitems.csv_import(wishlist_data)
# print a particular customer name
print(customers.by.id["0030"].name)
print()
# print all items sold by the pound
for item in catalog.where(unitofmeas="LB"):
print(item.sku, item.descr)
print()
# if querying on an indexed item, use ".by.attribute-name[key]"
catalog.create_index("unitofmeas")
for item in catalog.by.unitofmeas["LB"]:
print(item.sku, item.descr)
print()
# print all items that cost more than 10
for item in catalog.where(lambda ob: ob.unitprice > 10):
print(item.sku, item.descr, item.unitprice)
print()
# join tables to create queryable wishlists collection - the following are all equivalent
wishlists = (customers.join_on("id") + wishitems.join_on("custid")).join_on("sku") + catalog.join_on("sku")
wishlists = (customers.join_on("id") + wishitems.join_on("custid")).join_on("sku") + catalog
wishlists = catalog + (customers.join_on("id") + wishitems.join_on("custid")).join_on("sku")
wishlists = catalog.join_on("sku") + (customers.join_on("id") + wishitems.join_on("custid"))
wishlists = customers.join_on("id") + wishitems.join_on("custid") + catalog.join_on("sku")
print(wishlists().table_name)
print(wishlists()("wishlists").table_name)
# print all wishlist items with price > 10 (use Tabe.gt instead of lambda)
# bigticketitems = wishlists().where(lambda ob : ob.unitprice > 10)
bigticketitems = wishlists().where(unitprice=Table.gt(10))
for bti in bigticketitems:
print(bti)
print()
# list all wishlist items by customer, then in descending order by unit price
for item in wishlists().sort("custid, unitprice desc"):
print(item)
print()
# display formatted tabular output (only on Python 3.6 and later)
if sys.version_info >= (3, 6):
wishlists().sort("custid, unitprice desc")("Wishlists").select(
"custid name sku descr"
).present()
# create simple pivot table, grouping wishlist data by customer name
wishlistsdata = wishlists()
wishlistsdata.create_index("name")
pivot = wishlistsdata.pivot("name")
pivot.dump(row_fn=lambda o: "%s %s" % (o.sku, o.descr))
print()
# pivot on both sku number and customer name, giving tabular output
piv2 = wishlistsdata.pivot("sku name")
piv2.dump_counts()
print()
# pivot on both sku number and customer name, giving tabular output
# tabulate by sum(unitprice) for all items in each pivot table cell
piv2.dump_counts(count_fn=lambda recs: sum(r.unitprice for r in recs))
print()
|
StarcoderdataPython
|
3310931
|
<gh_stars>0
import logging
from models.datamodel import DataModel
from models.datamodel import TextDataRow
import fileutils
import time
import csv_formatter
from collections import Counter
from enum import Enum
from nltk.stem.snowball import SnowballStemmer
logger = logging.getLogger()
class ProcessMode(Enum):
ROW = 1
COL = 2
def printDict(d):
sortedKeys = sorted(d.keys())
for key in sortedKeys:
print(key, ' => ', d[key])
def formatForCSV(keywords, allCounters, mode):
if mode == ProcessMode.ROW:
return csv_formatter.rowFormatForCSV(keywords, allCounters)
else:
return csv_formatter.colFormatForCSV(keywords, allCounters)
class Processor(object):
__slots__ = 'model', 'language', 'stemmer', 'keywords', 'noise'
def __init__(self, language, keywordfile, noisefile):
self.model = DataModel()
self.language = language
self.stemmer = SnowballStemmer(language)
logger.info('creating stemmer for language %s', language)
self.keywords = self.prepareWords(keywordfile)
self.noise = self.prepareWords(noisefile)
def prepareWords(self, file):
words = fileutils.safeReadWordsFromFile(file)
wordSet = set()
for w in words:
prepared = self.prepare(w)
logger.info(w + ' => ' + prepared)
wordSet.add(prepared)
return wordSet
def prepare(self, word):
return self.stemmer.stem(word.strip().lower())
def process(self, name, mode=ProcessMode.COL):
logger.info('processing %s', name)
rows = self.model.findByName(name)
if rows:
results = dict()
for row in rows:
counter = self.processRow(row, self.keywords, self.noise)
results[row.name] = counter
printDict(results)
csv = formatForCSV(self.keywords, results, mode)
fileutils.writeToFile(name + '.csv', csv)
else:
logger.error('no data for name %s', name)
def processRow(self, row, keywords, noise):
#logger.debug('processing row: %s', row.name)
start = time.time()
content = row.data.split('\n')
words = fileutils.splitLinesIntoWords(content)
#logger.debug('word count in document: %d', len(words))
counter = Counter()
for word in words:
wordLc = self.prepare(word)
if (not wordLc in noise and wordLc in keywords):
counter[wordLc] += 1
end = time.time()
elapsed = end - start
logger.debug('row %s processed in %.2f seconds', row.name, elapsed)
#logger.info(str(counter))
return counter
#logger.debug(row.data)
|
StarcoderdataPython
|
1778560
|
import pandas as pd
import numpy as np
file = pd.read_csv('Non_US_Cities.csv')
for country in file.Country.unique():
print(country)
temp = file[file.Country == country]
master = pd.DataFrame()
for i, row in temp.iterrows():
print(row.City)
df = pd.read_csv(row['Reviews Link'])
df['City'] = np.full(len(df), row.City)
df['Country'] = np.full(len(df), row.Country)
df['Continent'] = np.full(len(df), row.Continent)
df['Region'] = np.full(len(df), row.Region)
master = pd.concat([master, df])
master.to_csv('non_us_reviews_' + country + '.csv', index=False)
|
StarcoderdataPython
|
35787
|
<reponame>MarshallRawson/NaviGator<filename>mission_control/navigator_missions/navigator_missions/start_gate_marshall.py
#!/usr/bin/env python
from __future__ import division
import txros
import numpy as np
import mil_tools
from mil_misc_tools.text_effects import fprint
from navigator import Navigator
import math
from twisted.internet import defer
from mil_tools import rosmsg_to_numpy
from mil_misc_tools import ThrowingArgumentParser
___author___ = "<NAME>"
#This mission takes care of the second part of the qualifier objective
class StartGateMarshall(Navigator):
#this method gets the location of the nearest black totem and the scan the code platform
#from the PCODAR database
#runs the scan the code preception script eventually
def get_scan_the_code(self):
#currently hard coded, no STC that works yet :/
return False
#returns the xy of target totem and unit vector from target to non target totem
@txros.util.cancellableInlineCallbacks
def get_bouy_go_round_target(self):
return_array = []
#gets the xy and state of the scan the code from the database
scan_the_code = np.array([])
res = yield self.database_query('stc_platform')
#makes sure that only 1 scan the code exists
assert len(res.objects) == 1
#raises error if the scan the code platform is nto
if not res.found:
raise TaskException(query + ' not found in object database')
point = rosmsg_to_numpy(res.objects[0].pose.position)[:2]
#runs the function that retrives/runs the scan the code state True for circle scan
#the code, False for circle the black totem
scan_the_code = point
return_array.append(scan_the_code)
#print scan_the_code
#this portion of the method gets the location of the nearest black totem
#gets all of the black totems from the database
num_of_black_totems = 1
black_totems = yield self.database_query('totem_black')
black_totems_poses = []
for i in black_totems.objects:
point = rosmsg_to_numpy(i.pose.position)[:2]
black_totems_poses.append(point)
#the follwing determins which is the closest
#i wish python had a do while loop
closest = black_totems_poses[0]
dist = ((black_totems_poses[0][0]-self.pose[0][0])**2)+((black_totems_poses[0][1]-self.pose[0][1])**2)
j=0 #an index for populating the dist_temp array
while j < len(black_totems_poses):
dist_temp = ((black_totems_poses[j][0]-self.pose[0][0])**2)+((black_totems_poses[j][1]-self.pose[0][1])**2)
if dist_temp < dist:
dist = dist_temp
closest = black_totems[j]
j+=1
#closest now has the position of the closest black totem
#closest is a np array
return_array.append(closest)
#returnValue has the scan the code and closest black totem location
defer.returnValue(return_array)
@txros.util.cancellableInlineCallbacks
def bouy_go_round(self):
TOTEM_MARGIN = 6 #m, distance to pass behind the totem
start_pose = self.pose[0][:2]
locations = yield self.get_bouy_go_round_target()
#target contains xy of target totem and unit vector from target to non target totem
scan_the_code = locations[0]
black_totem = locations[1]
#an ENU vector from the scan_the_code to start pose of magnitude TOTEM_MARGIN (N=0)
stc_waypoint = np.append((((start_pose-scan_the_code)/np.linalg.norm(start_pose-scan_the_code))*TOTEM_MARGIN)+scan_the_code, 0)
#go to the end of that vector and look at the scan_the_code platform
yield self.move.set_position(stc_waypoint).look_at(np.append(scan_the_code, 0)).go()
#determine weather or not to circle the stc platform
if self.get_scan_the_code() == True:
#turn 90deg to the left so we cirlce prograde
yield self.move.yaw_left(math.pi/2).go()
#we cirlce clock-wise .75 revolutions
circle = self.move.circle_point([scan_the_code[0], scan_the_code[1], 0], "cw", .75)
elif self.get_scan_the_code() == False:
#an ENU vector from black_totem to self.pose of magnitude TOTEM_MARGIN (N=0)
black_totem_waypoint = np.append(((((self.pose[0][:2]-black_totem[:2])/np.linalg.norm(self.pose[0][:2]-black_totem[:2]))*TOTEM_MARGIN)+black_totem[:2]),0)
yield self.move.set_position(black_totem_waypoint).look_at(np.append(black_totem[:2], 0)).go()
#turn 90deg to the right so we cirlce prograde
yield self.move.yaw_right(math.pi/2).go()
#we cirlce counter clock-wise .5 revolutions
circle = self.move.circle_point([black_totem[0], black_totem[1], 0], "ccw", .5)
yield circle.go()
#go bakc to where was dropped off to listen for hydrophones
yield self.move.set_position(np.append(start_pose,0)).go()
@txros.util.cancellableInlineCallbacks
def go_around_black_totem(self):
TOTEM_MARGIN = 6 #m, distance to pass behind the totem
start_pose = self.pose[0][:2]
locations = yield self.get_bouy_go_round_target()
waypoint = np.append((((start_pose-locations[1])/np.linalg.norm(start_pose-locations[1]))*TOTEM_MARGIN)+locations[1], 0)
yield self.move.set_position(waypoint).look_at(np.append(locations[1], 0)).go()
circle = self.move.circle_point([locations[1][0], locations[1][1], 0], "ccw", 1)
yield circle.go()
yield self.move.yaw_right(math.pi*2).go()
@classmethod
def decode_parameters(cls, parameters):
argv = parameters.split()
return cls.parser.parse_args(argv)
@classmethod
def init(cls):
parser = ThrowingArgumentParser(description='start gate marshall',
usage='''start gate marshall''')
parser.add_argument('-q', '--quals', action='store_true',
help='set for quals')
cls.parser = parser
@txros.util.cancellableInlineCallbacks
def run (self, parameters):
if parameters.quals:
yield self.go_around_black_totem()
else:
yield self.bouy_go_round()
|
StarcoderdataPython
|
3353878
|
<reponame>mverleg/django_misc
import settings
from django.http import HttpResponseRedirect
def secure_redirect(request, url = None):
"""
turns a request into a securified redirect
"""
if url is None:
url = request.build_absolute_uri(request.get_full_path())
url = request.build_absolute_uri(url)
url = url.replace('http://', 'https://')
if settings.DEBUG:
url = url.replace(':8000/', ':8443/')
return HttpResponseRedirect(url)
def desecure_redirect(request, url = None):
"""
turns a request into a desecurified redirect
"""
if url is None:
url = request.get_full_path()
url = request.build_absolute_uri(url)
url = url.replace('https://', 'http://')
if settings.DEBUG:
url = url.replace(':8443/', ':8000/')
return HttpResponseRedirect(url)
|
StarcoderdataPython
|
187071
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""bibgrep: Grep for bib(la)tex files.
To get all articles where the author contains 'Johnson' and the article is from
2010 or beyond:
>>> bibgrep --entry="article" --field="author~Johnson" --field="year>=2010"
The key, entry and field arguments take strings in a mini query language. For
keys and entries, the format is:
"[^][~]<key>"
"[^][~]<bibtype>"
where <key> is something like 'Johnson2002' and <bibtype> is 'article',
'inproceedings' etc. The caret denotes negation, and the tilde denotes
approximate matches instead of exact. For example, '~ceed' would match the
'proceedings', 'inproceedings' and 'mvproceedings' entries. The language for
fields is slightly more involved:
Field occurrence: "[^]<field_name>"
Field values : "[^]<field_name>(=|~)<value>"
Field range : "[^]<field_name>(<|>|<=|>=|=)<numeric_value>"
Field range : "[^]<numeric_value>(<|<=)<field_name>(<|<=)<numeric_value>"
Field range : "[^]<field_name>=<numeric_value>-<numeric_value>"
All punctuation has the same meaning as for keys and entries. Here are some
example queries:
Find entries that have a publisher field.
>>> bibgrep --field="publisher"
Find entries that do not have a note field.
>>> bibgrep --field="^note"
Find entries where the author is exactly '<NAME>' and the title contains
the word 'concurrency'.
>>> bibgrep --field="author=<NAME>" --field="title~concurrency"
Find entries that were published in 2001 or later and whose volume is not
between 11 and 50.
>>> bibgrep --field="year>=2001" --field="^10<volume<=50"
Find entries that were published between 2000 and 2018 inclusive.
>>> bibgrep --field="year=2000-2018"
"""
import argparse
import bibpy
import bibpy.parser
import bibpy.tools
import itertools
import operator
import re
import os
import signal
import sys
__author__ = bibpy.__author__
__version__ = '0.1.0'
__license__ = bibpy.__license__
# TODO: How to combine predicates with '&&' and '||'?
# TODO: Make approximate matches use regexes
_DESCRIPTION = """Grep bib(la)tex files satisfying some predicates."""
_NAME_TO_OPERATOR = {
'<': operator.lt,
'>': operator.gt,
'<=': operator.le,
'>=': operator.ge,
'=': operator.eq
}
def sigterm_handler(signum, stack_frame):
"""Handle SIGTERM signal."""
sys.exit('bibgrep: Caught SIGTERM')
# Set up a signal handler for SIGTERM
signal.signal(signal.SIGTERM, sigterm_handler)
class BibgrepError(Exception):
"""Exception class for errors specific to bibgrep."""
pass
def approx_field_predicate(field, value, args):
"""Return a function that does an approximate match of a string."""
flags = re.I if args.ignore_case else 0
def _approx_match(entry):
field_value = getattr(entry, field, None)
if field_value is None:
return False
else:
return re.search(value, field_value, flags)
return _approx_match
def exact_field_predicate(field, value, args):
"""Return a function that does an exact match of a string."""
func = str.lower if args.ignore_case else str
def _exact_match(entry):
return func(getattr(entry, field, '')) == func(value)
return _exact_match
def field_occurrence_predicate(field, args):
"""Return a function that checks for the occurrence of a field."""
newfield = field.lower() if args.ignore_case else field
def _field_occurrence(entry):
return bool(getattr(entry, newfield, None))
return _field_occurrence
def negate(func):
"""Return a new function that negates the boolean result of func."""
def _negate(entry):
return not func(entry)
return _negate
def operator_from_string(op_name):
"""Return an operator function from its string equivalent."""
op = _NAME_TO_OPERATOR.get(op_name, None)
if op is None:
raise BibgrepError("Invalid operator '{0}'".format(op_name))
return op
def comparison_predicate(field, op_name, value):
"""Return a predicate function that compares a field to a value."""
operator = operator_from_string(op_name)
def _comparison_predicate(entry):
if not field:
return False
attr = getattr(entry, field, None)
try:
return attr and operator(int(attr), int(value))
except ValueError:
raise BibgrepError(
"Cannot compare '{0}' with '{1}'".format(value, attr)
)
return _comparison_predicate
def check_and_get_bounds(lower, upper):
"""Convert string bounds to integers and check if lower <= upper."""
try:
ilower = int(lower)
iupper = int(upper)
except ValueError:
raise BibgrepError('Bounds cannot be converted to integers')
if ilower > iupper:
raise BibgrepError('Lower bound must be <= upper bound')
return ilower, iupper
def interval_predicate(field, lower, upper):
"""Return a predicate function that checks if a field is in an interval."""
ilower, iupper = check_and_get_bounds(lower, upper)
def _interval_predicate(entry):
if not field:
return False
attr = getattr(entry, field, None)
try:
return attr and ilower <= int(attr) <= iupper
except ValueError:
raise BibgrepError(
"Cannot compare '{0}' with interval [{1}, {2}]"
.format(attr, lower, upper)
)
return _interval_predicate
def range_predicate(lower, op_name1, field, op_name2, upper):
"""Return a predicate function that checks if a field is in a range.
Example: '1 <= series < 10'
"""
ilower, iupper = check_and_get_bounds(lower, upper)
operator1 = operator_from_string(op_name1)
operator2 = operator_from_string(op_name2)
def _range_predicate(entry):
attr = getattr(entry, field, None)
try:
if attr:
iattr = int(attr)
return operator1(ilower, iattr) and operator2(iattr, iupper)
except ValueError:
raise BibgrepError(
"Cannot compare '{0}' with range {1} {2} field {3} {4}"
.format(attr, lower, op_name1, op_name2, upper)
)
return _range_predicate
def construct_key_entry_predicate(name, key, tokens, args):
"""Return a key/entry predicate to test if they are of given types."""
f = None
prefix_op = tokens[0] if tokens[0] else ''
if prefix_op and not set(prefix_op).issubset(set('^~')):
raise BibgrepError("Invalid field operator(s) '{0}'".format(tokens[0]))
if '~' in prefix_op:
f = approx_field_predicate(key, tokens[1], args)
else:
f = exact_field_predicate(key, tokens[1], args)
if '^' in prefix_op:
f = negate(f)
return f
def construct_field_predicate(name, key, tokens, args):
"""Return a predicate function from the parsed tokens of a query."""
predicate = None
if name == 'value':
if tokens[2] == '=':
predicate = exact_field_predicate(tokens[1], tokens[-1], args)
elif tokens[2] == '~':
predicate = approx_field_predicate(tokens[1], tokens[-1], args)
else:
raise BibgrepError(
"Invalid field operator '{0}'".format(tokens[1])
)
elif name == 'occurrence':
predicate = field_occurrence_predicate(tokens[1], args)
elif name == 'comparison':
predicate = comparison_predicate(*tokens[1:])
elif name == 'interval':
predicate = interval_predicate(*tokens[1:])
elif name == 'range':
predicate = range_predicate(*tokens[1:])
elif name == 'value':
predicate = comparison_predicate(*tokens[1:])
else:
raise BibgrepError('Invalid field query syntax')
neg = tokens[0] == '^'
return negate(predicate) if neg else predicate
def construct_predicates(values, predicate_func, key, pred_combiner, args):
"""Return a list of predicates on entries."""
# Parse and compose all predicates on values given on the command line
predicates = []
for value in values:
name, tokens = bibpy.parser.parse_query(value, key)
predicates.append(predicate_func(name, key, tokens, args))
return bibpy.tools.compose_predicates(predicates, pred_combiner)
def filter_entries(entries, predicates):
"""Filter entries based on predicates on entry type, key and fields."""
for entry in entries:
if any(pred(entry) for pred in predicates):
yield entry
def unique_entries(entries):
"""Remove duplicates from a set of entries."""
return [k for k, _ in itertools.groupby(entries)]
def process_file(source, unique, predicates):
"""Process a single bibliographic file."""
entries = bibpy.read_file(source).entries
if unique:
entries = unique_entries(entries)
return filter_entries(entries, predicates)
def main():
parser = argparse.ArgumentParser(prog='bibgrep', description=_DESCRIPTION)
parser.add_argument(
'-v', '--version',
action='version',
version=bibpy.tools.format_version(__version__)
)
parser.add_argument(
'-e', '--entry',
action='append',
help="Print entries matching an entry type (e.g. '@article')"
)
parser.add_argument(
'-k', '--key',
action='append',
dest='keys',
help='Print entries with exact or similar key. For example, '
"--key='article1 | article2' prints the entries with keys that "
'match either'
)
parser.add_argument(
'-f', '--field',
type=str,
action='append',
dest='fields',
help='Print entries that satisfy a list of field constraints'
)
parser.add_argument(
'-c', '--count',
action='store_true',
help='Only a count of selected lines is written to standard output. '
'If -n is given, prints a grand total'
)
parser.add_argument(
'-i', '--ignore-case',
action='store_true',
help='Perform case insensitive matching. By default, bibgrep is case '
' sensitive'
)
parser.add_argument(
'-r', '--recursive',
action='store_true',
help='Recursively search listed subdirectories'
)
parser.add_argument(
'-u', '--unique',
action='store_true',
help='Print only one entry if duplicates are encountered'
)
parser.add_argument(
'-n', '--no-filenames',
action='store_true',
help='Do not print filename headers before each entry when --count is '
'given. Overrides --abbreviate-filenames'
)
parser.add_argument(
'-a', '--abbreviate-filenames',
action='store_true',
help='Display only filename and not the full path when --count is '
' given'
)
args, rest = parser.parse_known_args()
key_predicate = bibpy.tools.always_false
entry_predicate = bibpy.tools.always_false
field_predicate = bibpy.tools.always_false
try:
if args.keys:
key_predicate = construct_predicates(
args.keys,
construct_key_entry_predicate,
'bibkey',
any,
args
)
if args.entry:
bibtypes = [
e for es in args.entry for e in map(str.strip, es.split(','))
]
entry_predicate = construct_predicates(
bibtypes,
construct_key_entry_predicate,
'bibtype',
any,
args
)
if args.fields:
field_predicate = construct_predicates(
args.fields,
construct_field_predicate,
'field',
any,
args
)
except (BibgrepError, bibpy.error.ParseException) as ex:
sys.exit('{0}'.format(ex))
if not args.keys and not args.entry and not args.fields:
# If no constraints are defined, all entries pass
key_predicate = bibpy.tools.always_true
entry_predicate = bibpy.tools.always_true
field_predicate = bibpy.tools.always_true
filtered_entries = []
total_count = 0
predicates = [entry_predicate, key_predicate, field_predicate]
try:
if not rest:
filtered_entries = process_file(sys.stdin, args.unique, predicates)
if args.count:
num_entries = len(list(filtered_entries))
total_count += num_entries
filtered_entries = []
else:
bib_files = bibpy.tools.iter_files(rest, '*.bib', args.recursive)
for filename in bib_files:
filtered_entries += list(
process_file(filename, args.unique, predicates)
)
if args.count:
if args.no_filenames:
total_count += len(filtered_entries)
else:
if args.abbreviate_filenames:
filename = os.path.basename(filename)
print('{0}:{1}'.format(
filename, len(filtered_entries))
)
filtered_entries = []
except (IOError, bibpy.error.ParseException, BibgrepError) as ex:
sys.exit('bibgrep: {0}'.format(ex))
except KeyboardInterrupt:
sys.exit(1)
if args.count and (args.no_filenames or not rest):
print(total_count)
if filtered_entries:
# Write all filtered entries to sys.stdout
print(bibpy.write_string(filtered_entries))
bibpy.tools.close_output_handles()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3277576
|
<gh_stars>10-100
import argparse
import torch
from copy import deepcopy
from pprint import pprint
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
pass
from torch.nn.parallel import DistributedDataParallel as torch_DDP
from zerovl.core import init_device, cfg, update_cfg
from zerovl.datasets.clip.clip_dataset import build_torch_valid_loader
from zerovl.models import PIPELINE
from zerovl.utils import build_from_cfg, ENV, logger, all_gather
from zerovl.core.hooks.checkpoint import get_dist_state_dict
from zerovl.tasks.clip.hooks.utils import RetrievalMetric, IndexedEmbInfo
from zerovl.tasks.clip.config import task_cfg_init_fn, update_clip_config
@ENV.root_only
def calcaulate_retrieval_metrics_and_log(collection_dict, cuda_eval = True):
retrieval = RetrievalMetric()
index = collection_dict['image_id'] if collection_dict['dataset_name'] != 'imagenet' else collection_dict['caption_id']
image_embedding = collection_dict['image_embeddings']
text_embedding = collection_dict['text_embeddings']
if not cuda_eval:
index = index.cpu()
image_embedding = image_embedding.cpu()
text_embedding = text_embedding.cpu()
if collection_dict["dataset_name"] != 'imagenet':
img_emb = IndexedEmbInfo(emb_name='image',group_idx=index,emb_mat=image_embedding).unique()
text_emb = IndexedEmbInfo(emb_name='text',group_idx=index,emb_mat=text_embedding)
else:
img_emb = IndexedEmbInfo(emb_name='image',group_idx=index,emb_mat=image_embedding)
text_emb = IndexedEmbInfo(emb_name='text',group_idx=index,emb_mat=text_embedding).unique()
logger.info('{} validation: image emb shape: {}, text emb shape: {}'.format(collection_dict['dataset_name'], img_emb.emb_mat.shape, text_emb.emb_mat.shape))
i2t = retrieval(img_emb, text_emb)
t2i = retrieval(text_emb, img_emb)
i2t.update(t2i)
summary_dict = {}
for k, v in i2t.items():
k = k.replace('[image] to [text]', 'I2T')
k = k.replace('[text] to [image]', 'T2I')
k = k.replace(': ', '-')
summary_dict[k] = v * 100.0
summary_dict['RSUM'] = sum(list(summary_dict.values()))
summary_dict = {'{}_{}'.format(collection_dict['dataset_name'], k): v for k, v in summary_dict.items()}
logger.emph('-------------- {} Evaluation --------------'.format(collection_dict['dataset_name']))
pprint(summary_dict)
logger.emph('-------------- {} Evaluation --------------\n'.format(collection_dict['dataset_name']))
def evaluate_benchmark(loader, model, name):
collection_keys = ['image_embeddings', 'text_embeddings', 'image_id', 'caption_id']
epoch_state = {}
for key in collection_keys:
epoch_state[key] = []
for batch in loader:
batch_dict = {}
batch_dict['image'], batch_dict['input_ids'], batch_dict['attention_mask'], \
batch_dict['caption'], batch_dict['image_id'], batch_dict['caption_id'] = batch
batch_dict = {k: v.cuda(ENV.device, non_blocking=True) for k,v in batch_dict.items() if k not in ['caption']}
image_embeddings, text_embeddings = model(batch_dict, embeddings='all')
output = {'image_embeddings': image_embeddings,
'text_embeddings': text_embeddings,
'image_id': batch_dict['image_id'],
'caption_id': batch_dict['caption_id']}
for key in collection_keys:
epoch_state[key].append(output[key])
collection_dict = {}
for key in collection_keys:
value = torch.cat(epoch_state[key], 0)
value = torch.cat(all_gather(value), 0)
collection_dict[key] = value
valid_index = collection_dict['image_id'] > -1
collection_dict = {k: v[valid_index] for k,v in collection_dict.items()}
collection_dict['dataset_name'] = name
calcaulate_retrieval_metrics_and_log(collection_dict)
def parse_args():
# Parse args with argparse tool
parser = argparse.ArgumentParser(description='ZeroVL Evaluation')
parser.add_argument('--cfg', type=str, required=True,
help='experiment configure file name')
parser.add_argument("--local_rank", type=int, default=0) # Compatibility with torch launch.py
parser.add_argument("--ckpt_path", type=str, default='')
args, cfg_overrided = parser.parse_known_args()
# Update config from yaml and argv for override
update_cfg(task_cfg_init_fn, args.cfg, cfg_overrided, preprocess_fn=update_clip_config)
# Record the global config and its snapshot (for easy experiment reproduction)
ENV.cfg = cfg
ENV.cfg_snapshot = deepcopy(cfg)
ENV.local_rank = args.local_rank
return args
def main():
# Configuration: user config updating and global config generating
args = parse_args()
# Initialization: set device, generate global config and inform the user library
init_device(cfg)
# Build model
model = build_from_cfg(cfg.model.name, cfg, PIPELINE).to(ENV.device)
if cfg.dist.name == 'apex':
model = DDP(model, delay_allreduce=False)
elif cfg.dist.name == 'torch':
model = torch_DDP(model,
device_ids=[ENV.local_rank],
output_device=ENV.local_rank,
find_unused_parameters=False)
else:
raise NotImplementedError
# Runner: building and running
checkpoint = torch.load(args.ckpt_path, map_location="cpu")
model_checkpoint = checkpoint['state_dict']
model.load_state_dict(get_dist_state_dict(model_checkpoint), strict=False)
model.eval()
logger.emph(f'Loaded ckpt path: {args.ckpt_path}')
for name in cfg.data.valid_name:
valid_loader = build_torch_valid_loader(cfg, name, mode='valid')
with torch.no_grad():
evaluate_benchmark(valid_loader, model, name)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3246499
|
s = 9
print(f'O valor da variável s é {s}')
|
StarcoderdataPython
|
102233
|
#%%
from user_agents import parse
user_agent = "Mozilla/5.0 (Linux; Android 10; SM-N960F Build/QP1A.190711.020; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/78.0.3904.62 XWEB/2889 MMWEBSDK/20210902 Mobile Safari/537.36 MMWEBID/1696 MicroMessenger/8.0.15.2001(0x28000F41) Process/to"
ua = parse(user_agent)
print(ua.browser)
print(ua.os)
# %%
import csv
from user_agents import parse
reader = csv.reader(open('uaList.csv', 'r',encoding='UTF-8',errors='ignore'))
writer = csv.writer(open('uaOutput.csv', 'w'))
headers = next(reader)
headers.append("Browser")
headers.append("OperatingSystem")
writer.writerow(headers)
for row in reader:
temp = parse(row[0])
row.append(str(temp.browser))
row.append(str(temp.os))
writer.writerow(row)
# os = next(next(reader))
# os.append("OperatingSystem")
# writer.writerow(os)
# for row in reader:
# temp = parse(row[0])
# row.append(str(temp.os))
# writer.writerows(row)
# print(row)
# %%
|
StarcoderdataPython
|
43969
|
import time
from tasks.capp import app
from others.affine_applications import MoveApps
@app.task(name="sdc.move11", bind=True)
def task_1(self, x):
time.sleep(1)
return MoveApps(":move", x).foo()
@app.task(name="sdc.move12", bind=True)
def task_2(self, x):
return MoveApps(":move", x + 1).foo()
|
StarcoderdataPython
|
1716155
|
<reponame>umarcor/litex<filename>test/test_led.py
#
# This file is part of LiteX.
#
# Copyright (c) 2022 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
import unittest
from migen import *
from litex.soc.cores.led import WS2812
class TestWS2812(unittest.TestCase):
test_clk_freqs = [75e6, 50e6, 25e6]
def generator(self, dut, led_signal, led_data, sys_clk_freq, iterations):
# Error Margin from WS2812 datasheet.
error_margin = 150e-9
# Cap on how long a sequence will be evaluated.
max_cycles_per_seq = int(dut.trst * sys_clk_freq * 2)
# Verify initial reset.
rst_cycles = 0
for _ in range(max_cycles_per_seq):
if (yield led_signal) != 0:
break
rst_cycles += 1
yield
rst_time = rst_cycles / sys_clk_freq
assert rst_time >= dut.trst
# Verify generated data pulses.
length = len(led_data)
for _ in range(iterations):
for i_num, num in enumerate(led_data, start=1):
for idx_bit, bit in enumerate(TestWS2812.to_bits(num), start=1):
exp_high, exp_low = {
0 : (dut.t0h, dut.t0l),
1 : (dut.t1h, dut.t1l)
}[bit]
# On end of chain, add reset time to exp_low
if i_num == length and idx_bit == 24:
exp_low += dut.trst
# Verify high cycle.
high_cycles = 0
for _ in range(max_cycles_per_seq):
if (yield led_signal) != 1:
break
high_cycles += 1
yield
high_time = high_cycles / sys_clk_freq
assert high_time >= exp_high - error_margin
assert high_time <= exp_high + error_margin
# Verify low cycle.
low_cycles = 0
for _ in range(max_cycles_per_seq):
if (yield led_signal) != 0:
break
low_cycles += 1
yield
low_time = low_cycles / sys_clk_freq
assert low_time >= exp_low - error_margin
assert low_time <= exp_low + error_margin
def to_bits(num, length = 24):
return ( int(x) for x in bin(num)[2:].zfill(length) )
def run_test(self, revision, sys_clk_freq):
led_signal = Signal()
led_data = [0x100000, 0x200000, 0x300000, 0x400000, 0x500000, 0x600000, 0x700000, 0x800000, 0x900000]
iterations = 2
dut = WS2812(led_signal, len(led_data), sys_clk_freq, revision=revision, init=led_data)
run_simulation(dut, self.generator(dut, led_signal, led_data, sys_clk_freq, iterations), vcd_name="sim.vcd")
def test_WS2812_old(self):
for sys_clk_freq in self.test_clk_freqs:
self.run_test("old", sys_clk_freq)
def test_WS2812_new(self):
for sys_clk_freq in self.test_clk_freqs:
self.run_test("new", sys_clk_freq)
|
StarcoderdataPython
|
482
|
<filename>hypnettorch/data/timeseries/preprocess_audioset.py
#!/usr/bin/env python3
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# title :data/timeseries/preprocess_audioset.py
# author :be
# contact :<EMAIL>
# created :31/03/2020
# version :1.0
# python_version :3.7
"""
Script to structure the audioset dataset, which can then be used via
:class:`data.timeseries.audioset_data.AudiosetData`.
The result of this script is available at
https://www.dropbox.com/s/07dfeeuf5aq4w1h/audioset_data_balanced?dl=0
If you want to recreate or modify this dataset, download the audioset data from
https://research.google.com/audioset/download.html
and extract the tar.gz into the following folder:
``datasets/sequential/audioset/audioset_download``.
Subsequently executing this script will create a pickle file containing the 100
class subset of audioset used in this study.
The dataset is stored in tensorflow files. Since we work with pytorch and there
is no utility to read tensorflow files, we extract the data and safe them as
numpy arrays in a pickle file.
Furthermore the data are preprocessed to fit our continual learning experiments.
The original dataset provides three subsets with different compositions of
samples and classes. Since we only work with a subset of classes and samples,
we load all available data and then filter and structure them according to our
criteria.
We use the same criteria as Kemker et al. Classes and samples are restricted in
the following way:
Classes:
- no restriction according to ontology file (parsed from ontology.json)
- no parent / child relationship (parsed from ontology.json)
- confidence level > 70% (data was copied from website into txt file)
- number of samples: we only take classes that have more samples than
a certain threshold
Samples:
- since samples can have multiple labels, we only use samples which
only belong to one of the classes we use
- we exclude samples that don't have the full length of 10 seconds
The chosen classes and samples are then split into train and test data and
saved to a pickle file.
"""
import numpy as np
import pickle
import tensorflow as tf
import os
import json
from warnings import warn
warn('The script was created for one time usage and has to be adapted when ' +
'reusing it. All paths specified here are absolute.')
# Tensorflow eager mode needs to be enabled for dataset mapping to work!
tf.enable_eager_execution()
# Set paths and parameters
data_dir = '../../datasets/sequential/audioset/'
download_dir = os.path.join(data_dir,'audioset_download')
fpath_conf_data = os.path.join(data_dir, 'confidence_data.csv')
fpath_label_inds = os.path.join(data_dir, 'class_labels_indices.csv')
fpath_ontology = os.path.join(data_dir, 'ontology.json')
target_path = os.path.join(data_dir, 'audioset_data_balanced.pickle')
n_classes = 100
n_sample = 1000
test_frac = 0.20
### Load data by serializing files and applying decode function.
def decode(serialized_example):
"""Decode data from TFRecord files.
Args:
serialized_example: serialized_example as created by
tf.data.TFRecordDataset
Returns:
(tuple): Tuple containing:
- **audio** (numpy.ndarray): Array of shape (10,128) representing one
sample with 10 timesteps and 128 features
- **label** (numpy.ndarray): Array of shape (1,) containing the class
of the corresponding sample
"""
sequence_features = {
'audio_embedding': tf.FixedLenSequenceFeature([], tf.string),
}
context_features = {
'start_time_seconds': tf.FixedLenFeature([], tf.float32),
'labels': tf.VarLenFeature(dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized_example,
sequence_features=sequence_features,
context_features=context_features
)
audio = tf.decode_raw(sequence_parsed['audio_embedding'], tf.uint8)
label = tf.cast(context_parsed['labels'], tf.int64)
return audio, label
# Apply decode function to all dataset entries using map function.
# Take files from all three data sets since we repartition anyway.
fpaths = []
for path, subdirs, files in os.walk(download_dir):
for name in files:
if 'tfrecord' in name:
fpaths.append(os.path.join(path, name))
# Create dataset and decode
dataset = tf.data.TFRecordDataset(fpaths)
dataset = dataset.map(decode)
# Extract data to lists
x = []
y = []
for d in dataset:
x.append(d[0].numpy())
y.append(tf.sparse.to_dense(tf.sparse.reorder(d[1])).numpy())
### Filter classes as described above.
# Parse confidence values
conf_data = {}
with open(fpath_conf_data) as f:
for line in f:
tokens = line.split()
# parse confidence
c = 0
for t in tokens:
if t.find('%') is not -1:
c = int(t[:-1])
# parse class name
n = ''
for t in tokens:
if t.find('%') == -1 and t != '-':
if n == '':
n = t
else:
n = n+' '+t
else:
break
conf_data.update({n:c})
# Parse class numbers from label csv file
l = -1
csv_data = {}
with open(fpath_label_inds) as f:
for line in f:
if l == -1:
l += 1
continue
tokens = line.split('"')
n = tokens[1]
csv_data.update({n:l})
l +=1
# Parse ontology info from json file
with open(fpath_ontology, 'r') as f:
json_data = json.load(f)
# Put all data into a single list.
all_data = []
for j in json_data:
if j['name'] in conf_data.keys():
class_info = {
'name' : j['name'],
'restricted' : j['restrictions'] != [],
'has_child' : j['child_ids'] != [],
'conf' : conf_data[j['name']],
'id' : csv_data[j['name']]
}
all_data.append(class_info)
# Filter classes
classes = []
for c in all_data:
if not c['restricted'] and not c['has_child'] and c['conf'] >= 70:
classes.append(c['id'])
### Filter the samples.
# Find samples that belong to only one of the potential classes.
# We also exclude some samples that don't have data for the full 10 seconds.
# First discard labels that are not in the set of potential classes
y_fil = []
for i in range(len(y)):
y_fil.append( np.intersect1d(y[i],classes))
# Find samples with one label
n_labels = np.asarray([len(y) for y in y_fil])
single_label_idx = np.where(n_labels == 1)[0]
# Find samples that are shorter than 10 seconds (to be excluded)
too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0]
# Construct the set of valid samples
valid_idx = np.setdiff1d(single_label_idx,too_short)
# Count number of valid samples for potential classes
y_single = np.asarray([y_fil[i][0] for i in valid_idx])
num_samples = [len(np.where(y_single == i)[0]) for i in classes]
# Take the n classes with the highest number of samples
n_sample_cutoff = np.sort(num_samples)[-n_classes]
class_idx = np.where(np.asarray(num_samples) >= n_sample_cutoff)[0]
our_classes = [classes[i] for i in class_idx]
### Filter the data again according the the chosen classes
y_fil = []
for i in range(len(y)):
y_fil.append( np.intersect1d(y[i],our_classes))
# Find samples that belong to only one of the potential classes
n_labels = np.asarray([len(y) for y in y_fil])
single_label_idx = np.where(n_labels == 1)[0]
# Find samples that dont are shorter than 10 seconds
too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0]
# Construct the set of valid samples
valid_idx = np.setdiff1d(single_label_idx,too_short)
# Restructure data and relabel the classes to be between 0 and n_classes
y_data = [y_fil[i][0] for i in valid_idx]
y_data = [np.where(np.asarray(our_classes) == i)[0][0] for i in y_data]
y_data = np.asarray(y_data)
x_data = [x[i] for i in valid_idx]
x_data = np.stack(x_data)
### Split into test and train and restrict the number of samples per class
np.random.seed(42)
n_train = int(n_sample * (1-test_frac))
n_test = int(n_sample * test_frac)
train_ind = []
test_ind = []
for i in range(n_classes):
sample_idx = np.where(y_data == i)[0]
n_sample_class = len(sample_idx)
rand_idx = np.arange(n_sample_class)
np.random.shuffle(rand_idx)
train_ind.extend(sample_idx[rand_idx[0:n_train]])
test_ind.extend(sample_idx[rand_idx[n_train:n_sample]])
train_ind = np.asarray(train_ind)
test_ind = np.asarray(test_ind)
sub_sample_idx = np.hstack((train_ind,test_ind))
x_data_sub = x_data[sub_sample_idx,:,:]
y_data_sub = y_data[sub_sample_idx]
train_ind = np.arange(0,len(train_ind))
test_ind = np.arange(len(train_ind),len(train_ind)+len(test_ind))
### Save data
with open(target_path, 'wb') as f:
pickle.dump([x_data_sub, y_data_sub, train_ind, test_ind], f)
|
StarcoderdataPython
|
1727732
|
import gemmi
import unittest
from swamp.utils import *
from conkit.core import ContactMap, Contact
class UtilsTestCase(unittest.TestCase):
def test_1(self):
contact_map = ContactMap("test")
contact_map.add(Contact(1, 4, 1.0))
contact_map.add(Contact(2, 4, 1.0))
contact_map.add(Contact(5, 8, 1.0))
contact_map.add(Contact(3, 6, 1.0))
contact_map.sequence = Sequence("TEST", "ACDEFGHK")
inverted = invert_contactmap(contact_map)
self.assertListEqual([x.id for x in inverted], [(8, 5), (7, 5), (4, 1), (6, 3)])
def test_2(self):
pdb_content = """CRYST1 73.330 73.330 163.520 90.00 90.00 90.00 P 41 2 2 8
ATOM 760 N VAL A 100 17.668 61.385 96.142 1.00 36.12 N
ANISOU 760 N VAL A 100 4189 5832 3703 370 -20 96 N
ATOM 761 CA VAL A 100 16.510 62.175 95.720 1.00 34.76 C
ANISOU 761 CA VAL A 100 3981 5676 3550 300 62 84 C
ATOM 762 C VAL A 100 16.924 63.214 94.641 1.00 39.15 C
ANISOU 762 C VAL A 100 4461 6274 4139 307 77 -9 C
ATOM 763 O VAL A 100 16.205 63.379 93.656 1.00 38.11 O
ANISOU 763 O VAL A 100 4288 6134 4059 275 108 -15 O
ATOM 764 CB VAL A 100 15.715 62.769 96.916 1.00 37.75 C
ANISOU 764 CB VAL A 100 4379 6111 3852 257 129 130 C
ATOM 765 CG1 VAL A 100 14.623 63.727 96.450 1.00 36.89 C
ANISOU 765 CG1 VAL A 100 4216 6025 3776 215 217 110 C
ATOM 766 CG2 VAL A 100 15.112 61.661 97.786 1.00 38.05 C
ANISOU 766 CG2 VAL A 100 4485 6113 3858 228 124 244 C
ATOM 767 N GLY A 101 18.105 63.825 94.809 1.00 36.09 N
ANISOU 767 N GLY A 101 4052 5944 3718 343 50 -70 N
ATOM 768 CA GLY A 101 18.670 64.791 93.867 1.00 34.68 C
ANISOU 768 CA GLY A 101 3805 5805 3566 340 63 -145 C
ATOM 769 C GLY A 101 18.998 64.193 92.514 1.00 37.41 C
ANISOU 769 C GLY A 101 4110 6137 3967 361 26 -177 C
ATOM 770 O GLY A 101 18.818 64.843 91.481 1.00 35.74 O
ANISOU 770 O GLY A 101 3843 5954 3784 335 57 -198 O
ATOM 771 N VAL A 102 19.463 62.931 92.513 1.00 34.96 N
ANISOU 771 N VAL A 102 3830 5784 3671 410 -36 -177 N
ATOM 772 CA VAL A 102 19.819 62.187 91.297 1.00 34.18 C
ANISOU 772 CA VAL A 102 3699 5666 3623 436 -67 -233 C
ATOM 773 C VAL A 102 18.531 61.710 90.593 1.00 37.41 C
ANISOU 773 C VAL A 102 4118 6023 4073 373 -37 -212 C
ATOM 774 O VAL A 102 18.409 61.831 89.370 1.00 35.53 O
ANISOU 774 O VAL A 102 3822 5829 3850 347 -28 -263 O
ATOM 775 CB VAL A 102 20.820 61.047 91.624 1.00 38.45 C
ANISOU 775 CB VAL A 102 4268 6161 4180 528 -137 -249 C
ATOM 776 CG1 VAL A 102 21.126 60.185 90.399 1.00 38.40 C
ANISOU 776 CG1 VAL A 102 4237 6117 4236 561 -155 -331 C
ATOM 777 CG2 VAL A 102 22.111 61.608 92.229 1.00 37.89 C
ANISOU 777 CG2 VAL A 102 4155 6188 4054 582 -172 -273 C
ATOM 778 N ILE A 103 17.542 61.236 91.381 1.00 34.36 N
ANISOU 778 N ILE A 103 3794 5569 3692 337 -19 -133 N
ATOM 779 CA ILE A 103 16.260 60.794 90.844 1.00 33.66 C
ANISOU 779 CA ILE A 103 3704 5449 3636 259 10 -110 C
ATOM 780 C ILE A 103 15.544 61.966 90.187 1.00 37.70 C
ANISOU 780 C ILE A 103 4134 6061 4131 214 61 -102 C
ATOM 781 O ILE A 103 15.031 61.813 89.070 1.00 37.84 O
ANISOU 781 O ILE A 103 4097 6118 4163 169 63 -129 O
ATOM 782 CB ILE A 103 15.417 60.020 91.896 1.00 37.07 C
ANISOU 782 CB ILE A 103 4214 5798 4074 220 22 -17 C
ATOM 783 CG1 ILE A 103 16.062 58.633 92.170 1.00 37.34 C
ANISOU 783 CG1 ILE A 103 4330 5703 4156 266 -32 -15 C
ATOM 784 CG2 ILE A 103 13.920 59.876 91.451 1.00 37.66 C
ANISOU 784 CG2 ILE A 103 4258 5883 4167 115 67 14 C
ATOM 785 CD1 ILE A 103 15.598 57.949 93.432 1.00 47.42 C
ANISOU 785 CD1 ILE A 103 5694 6900 5425 246 -28 105 C
ATOM 786 N LEU A 104 15.594 63.153 90.831 1.00 33.68 N
ANISOU 786 N LEU A 104 3611 5596 3591 231 101 -71 N
ATOM 787 CA LEU A 104 14.977 64.376 90.307 1.00 33.36 C
ANISOU 787 CA LEU A 104 3499 5623 3552 211 157 -47 C
ATOM 788 C LEU A 104 15.511 64.746 88.917 1.00 34.07 C
ANISOU 788 C LEU A 104 3518 5780 3648 214 139 -90 C
ATOM 789 O LEU A 104 14.708 65.012 88.027 1.00 32.90 O
ANISOU 789 O LEU A 104 3303 5692 3505 182 158 -56 O
ATOM 790 CB LEU A 104 15.136 65.530 91.310 1.00 33.69 C
ANISOU 790 CB LEU A 104 3558 5667 3576 234 207 -34 C
ATOM 791 CG LEU A 104 14.360 66.816 91.054 1.00 38.02 C
ANISOU 791 CG LEU A 104 4051 6243 4152 231 283 6 C
ATOM 792 CD1 LEU A 104 12.849 66.546 90.864 1.00 37.78 C
ANISOU 792 CD1 LEU A 104 3981 6236 4138 203 317 79 C
ATOM 793 CD2 LEU A 104 14.564 67.790 92.218 1.00 40.00 C
ANISOU 793 CD2 LEU A 104 4341 6468 4390 248 340 -14 C
ATOM 794 N VAL A 105 16.858 64.715 88.727 1.00 31.14 N
ANISOU 794 N VAL A 105 3147 5419 3264 251 101 -159 N
ATOM 795 CA VAL A 105 17.526 64.969 87.443 1.00 30.96 C
ANISOU 795 CA VAL A 105 3053 5478 3231 250 87 -207 C
ATOM 796 C VAL A 105 17.042 63.944 86.411 1.00 35.03 C
ANISOU 796 C VAL A 105 3543 6022 3746 218 59 -243 C
ATOM 797 O VAL A 105 16.709 64.332 85.295 1.00 35.34 O
ANISOU 797 O VAL A 105 3507 6161 3761 183 71 -233 O
ATOM 798 CB VAL A 105 19.074 64.920 87.574 1.00 35.01 C
ANISOU 798 CB VAL A 105 3565 6008 3728 296 51 -284 C
ATOM 799 CG1 VAL A 105 19.755 64.761 86.208 1.00 34.91 C
ANISOU 799 CG1 VAL A 105 3479 6090 3696 293 33 -351 C
ATOM 800 CG2 VAL A 105 19.596 66.143 88.285 1.00 34.55 C
ANISOU 800 CG2 VAL A 105 3509 5958 3662 295 82 -268 C
ATOM 801 N GLY A 106 17.024 62.660 86.802 1.00 31.90 N
ANISOU 801 N GLY A 106 3210 5538 3374 226 26 -284 N
ATOM 802 CA GLY A 106 16.603 61.553 85.954 1.00 32.79 C
ANISOU 802 CA GLY A 106 3316 5644 3498 184 4 -348 C
ATOM 803 C GLY A 106 15.165 61.656 85.488 1.00 38.66 C
ANISOU 803 C GLY A 106 4015 6444 4230 96 28 -295 C
ATOM 804 O GLY A 106 14.840 61.246 84.373 1.00 39.30 O
ANISOU 804 O GLY A 106 4041 6603 4287 40 16 -354 O
ATOM 805 N CYS A 107 14.292 62.202 86.336 1.00 34.66 N
ANISOU 805 N CYS A 107 3520 5918 3732 83 64 -190 N
ATOM 806 CA CYS A 107 12.871 62.327 86.029 1.00 33.96 C
ANISOU 806 CA CYS A 107 3372 5898 3635 11 89 -126 C
ATOM 807 C CYS A 107 12.559 63.546 85.180 1.00 36.79 C
ANISOU 807 C CYS A 107 3622 6399 3958 17 112 -65 C
ATOM 808 O CYS A 107 11.462 63.650 84.641 1.00 34.93 O
ANISOU 808 O CYS A 107 3306 6262 3703 -34 121 -13 O
ATOM 809 CB CYS A 107 12.047 62.300 87.309 1.00 34.50 C
ANISOU 809 CB CYS A 107 3487 5894 3726 0 126 -44 C
ATOM 810 SG CYS A 107 12.085 60.707 88.159 1.00 39.40 S
ANISOU 810 SG CYS A 107 4227 6357 4386 -36 99 -71 S
ATOM 811 N CYS A 108 13.515 64.471 85.058 1.00 35.29 N
ANISOU 811 N CYS A 108 3424 6224 3761 77 122 -62 N
ATOM 812 CA CYS A 108 13.303 65.682 84.256 1.00 35.81 C
ANISOU 812 CA CYS A 108 3398 6402 3805 88 148 20 C
ATOM 813 C CYS A 108 13.248 65.386 82.748 1.00 39.76 C
ANISOU 813 C CYS A 108 3808 7060 4239 38 113 -8 C
ATOM 814 O CYS A 108 13.805 64.369 82.295 1.00 39.23 O
ANISOU 814 O CYS A 108 3760 7001 4146 8 73 -131 O
ATOM 815 CB CYS A 108 14.373 66.725 84.577 1.00 35.44 C
ANISOU 815 CB CYS A 108 3377 6313 3777 143 174 28 C
ATOM 816 SG CYS A 108 14.063 67.645 86.106 1.00 38.80 S
ANISOU 816 SG CYS A 108 3867 6613 4263 189 240 86 S
ATOM 817 N PRO A 109 12.626 66.278 81.941 1.00 36.74 N
ANISOU 817 N PRO A 109 3324 6811 3825 33 128 102 N
ATOM 818 CA PRO A 109 12.651 66.072 80.487 1.00 36.54 C
ANISOU 818 CA PRO A 109 3203 6973 3707 -19 92 82 C
ATOM 819 C PRO A 109 14.051 66.355 79.917 1.00 39.90 C
ANISOU 819 C PRO A 109 3632 7433 4096 -2 88 25 C
ATOM 820 O PRO A 109 14.984 66.720 80.652 1.00 38.31 O
ANISOU 820 O PRO A 109 3500 7110 3946 47 109 3 O
ATOM 821 CB PRO A 109 11.626 67.086 79.984 1.00 38.91 C
ANISOU 821 CB PRO A 109 3395 7399 3989 -7 112 256 C
ATOM 822 CG PRO A 109 11.709 68.211 80.953 1.00 43.16 C
ANISOU 822 CG PRO A 109 3985 7792 4623 78 173 354 C
ATOM 823 CD PRO A 109 11.921 67.533 82.291 1.00 38.62 C
ANISOU 823 CD PRO A 109 3527 7039 4109 85 182 254 C
ATOM 824 N GLY A 110 14.178 66.209 78.606 1.00 36.96 N
ANISOU 824 N GLY A 110 3172 7251 3622 -51 63 1 N
ATOM 825 CA GLY A 110 15.412 66.500 77.896 1.00 36.79 C
ANISOU 825 CA GLY A 110 3125 7311 3543 -47 66 -43 C
ATOM 826 C GLY A 110 15.754 67.977 77.871 1.00 39.68 C
ANISOU 826 C GLY A 110 3468 7678 3930 -11 108 116 C
ATOM 827 O GLY A 110 14.932 68.832 78.237 1.00 37.90 O
ANISOU 827 O GLY A 110 3235 7404 3761 20 137 269 O
"""
to_extract = [1, 2, 3, 4, 5, 9]
hierarchy = gemmi.read_pdb_string(pdb_content)
renumber_hierarchy(hierarchy)
self.assertListEqual(list(range(1, 12)), [x.seqid.num for x in hierarchy[0][0]])
new_hierarchy = extract_hierarchy(hierarchy, to_extract)
self.assertListEqual([x.seqid.num for x in new_hierarchy[0][0]], to_extract)
inverted_hierarchy = invert_hiearchy(new_hierarchy)
self.assertListEqual([x.name for x in inverted_hierarchy[0][0]],
list(reversed([y.name for y in new_hierarchy[0][0]])))
merged_hierarchy = merge_into_ensemble((inverted_hierarchy, new_hierarchy))
self.assertEqual(len(merged_hierarchy), 2)
self.assertListEqual([x.name for x in merged_hierarchy[0][0]],
list([y.name for y in inverted_hierarchy[0][0]]))
self.assertListEqual([x.name for x in merged_hierarchy[1][0]],
list([y.name for y in new_hierarchy[0][0]]))
models = split_ensemble_into_models(merged_hierarchy)
self.assertListEqual([x.name for x in models[0][0][0]],
list([y.name for y in inverted_hierarchy[0][0]]))
self.assertListEqual([x.name for x in models[1][0][0]],
list([y.name for y in new_hierarchy[0][0]]))
merged = merge_hierarchies((inverted_hierarchy, new_hierarchy))
self.assertListEqual([x.seqid.num for x in merged[0][0]],
[x.seqid.num for x in inverted_hierarchy[0][0]]
+ [x.seqid.num for x in new_hierarchy[0][0]])
helices = ((1, 2, 3, 4), (5, 6, 7, 8))
fragment_cmap = extract_fragment_cmap(hierarchy, helices)
self.assertListEqual([x.id for x in fragment_cmap],
[(1, 5), (2, 5), (2, 6), (3, 5), (3, 6), (3, 7), (4, 5), (4, 6), (4, 7), (4, 8)])
|
StarcoderdataPython
|
159829
|
<reponame>petrLorenc/Labelling-Tool<filename>app/modules/loader/model_utils.py
import numpy as np
import torch
class ModelUtils:
"""
Group of function to help work with embeddings and models.
"""
@staticmethod
def load_glove_mapping(path):
"""
creates a dictionary mapping words to vectors from a file in glove format.
"""
with open(path, ) as f:
all_words = f.readlines()
glove_embeddings = []
mapping = {}
for index, line in enumerate(all_words):
values = line.split()
word = values[0]
vector = np.array(values[1:], dtype='float32')
glove_embeddings.append(vector)
mapping[word] = index
return mapping, glove_embeddings
@staticmethod
def load_glove(path):
"""
creates a dictionary of words with theirs vectors from a file in glove format.
"""
embeddings_index = {}
with open(path, ) as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
return embeddings_index
@staticmethod
def add_categories_for_model(names_of_categories):
"""
For returning corresponding tags for categories
:param names_of_categories: List of categories
:return: New list of categories with B/I prefix
"""
new_names_of_categories = ["0"]
for category in names_of_categories:
new_names_of_categories.append("B-" + category[0])
new_names_of_categories.append("I-" + category[0])
return new_names_of_categories
|
StarcoderdataPython
|
3301493
|
<gh_stars>1-10
#!/usr/bin/env python
__tempdir__ = '/tmp'
|
StarcoderdataPython
|
4806918
|
<filename>src/main.py
import src.preprocessing.preprocessing as prep
import src.classification.classification as classification
if __name__ == '__main__':
# Apply preprocessing steps and generate features then save
prep.main()
# Run classification algorithms to predict kinship relations
classification.main()
|
StarcoderdataPython
|
1703473
|
<filename>book/book/items.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from scrapy import Item, Field
class Subject(Item):
douban_id = Field()
type = Field()
class Meta(Item):
douban_id = Field()
slug = Field()
name = Field()
sub_name = Field()
alt_name = Field()
cover = Field()
summary = Field()
authors = Field()
author_intro = Field()
translators = Field()
series = Field()
publisher = Field()
publish_date = Field()
pages = Field()
price = Field()
binding = Field()
isbn = Field()
douban_id = Field()
douban_score = Field()
douban_votes = Field()
tags = Field()
class Comment(Item):
douban_id = Field()
douban_comment_id = Field()
douban_user_nickname = Field()
douban_user_avatar = Field()
douban_user_url = Field()
content = Field()
votes = Field()
|
StarcoderdataPython
|
3306129
|
<gh_stars>0
# Import system libraries
import os
import sys
# Import 3rd party libraries
import pytest
# Import custom libraries
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../"))
import lib.common.type_validator as type_validator
class TestBaseValidator(object):
def test_base_validator_is_none(self):
assert type_validator.BaseValidator.is_none(None) == True
assert type_validator.BaseValidator.is_none(False) == False
assert type_validator.BaseValidator.is_none(1) == False
assert type_validator.BaseValidator.is_none('str') == False
assert type_validator.BaseValidator.is_none([]) == False
assert type_validator.BaseValidator.is_none({}) == False
def test_base_validator_is_not_none(self):
assert type_validator.BaseValidator.is_not_none(None) == False
assert type_validator.BaseValidator.is_not_none(False) == True
assert type_validator.BaseValidator.is_not_none(1) == True
assert type_validator.BaseValidator.is_not_none('str') == True
assert type_validator.BaseValidator.is_not_none([]) == True
assert type_validator.BaseValidator.is_not_none({}) == True
class TestArrayValidator(object):
def test_array_validator_is_array(self):
assert type_validator.ArrayValidator.is_array(None) == False
assert type_validator.ArrayValidator.is_array(False) == False
assert type_validator.ArrayValidator.is_array(1) == False
assert type_validator.ArrayValidator.is_array('str') == False
assert type_validator.ArrayValidator.is_array([]) == True
assert type_validator.ArrayValidator.is_array(['a', 'b']) == True
assert type_validator.ArrayValidator.is_array({}) == False
def test_array_validator_is_empty_array(self):
assert type_validator.ArrayValidator.is_empty_array(None) == False
assert type_validator.ArrayValidator.is_empty_array(False) == False
assert type_validator.ArrayValidator.is_empty_array(1) == False
assert type_validator.ArrayValidator.is_empty_array('str') == False
assert type_validator.ArrayValidator.is_empty_array([]) == True
assert type_validator.ArrayValidator.is_empty_array(['a', 'b']) == False
assert type_validator.ArrayValidator.is_empty_array({}) == False
def test_array_validator_is_empty_array(self):
assert type_validator.ArrayValidator.is_non_empty_array(None) == False
assert type_validator.ArrayValidator.is_non_empty_array(False) == False
assert type_validator.ArrayValidator.is_non_empty_array(1) == False
assert type_validator.ArrayValidator.is_non_empty_array('str') == False
assert type_validator.ArrayValidator.is_non_empty_array([]) == False
assert type_validator.ArrayValidator.is_non_empty_array(['a', 'b']) == True
assert type_validator.ArrayValidator.is_non_empty_array({}) == False
|
StarcoderdataPython
|
118725
|
#!/usr/bin/env python3
from functools import lru_cache
from typing import NamedTuple, Dict, Any
from datetime import datetime
from pathlib import Path
import json
import pytz
from mycfg import paths
# TODO Json type?
# TODO memoised properties?
# TODO lazy mode and eager mode?
# lazy is a bit nicer in terms of more flexibility and less processing?
# eager is a bit more explicit for error handling
class Scrobble(NamedTuple):
raw: Dict[str, Any]
@property
def dt(self) -> datetime:
ts = int(self.raw['date'])
return datetime.fromtimestamp(ts, tz=pytz.utc)
@property
def artist(self) -> str:
return self.raw['artist']
@property
def name(self) -> str:
return self.raw['name']
@property
def track(self) -> str:
return f'{self.artist} — {self.name}'
# TODO __repr__, __str__
# TODO could also be nice to make generic? maybe even depending on eagerness
# TODO memoise...?
# TODO watch out, if we keep the app running it might expire
def _iter_scrobbles():
last = max(Path(paths.lastfm.export_path).glob('*.json'))
# TODO mm, no timezone? hopefuly it's UTC
j = json.loads(last.read_text())
for raw in j:
yield Scrobble(raw=raw)
@lru_cache(1)
def get_scrobbles():
return list(sorted(_iter_scrobbles(), key=lambda s: s.dt))
def test():
assert len(get_scrobbles()) > 1000
|
StarcoderdataPython
|
61594
|
#Enquiry Form
name=input('Enter your First Name ')
Class=int(input('Enter your class '))
school=input('Enter your school name ')
address=input('Enter your Address ')
number=int(input('Enter your phone number '))
#print("Name- ",name,"Class- ",Class,"School- ",school,"Address- ",address,"Phone Number- ",number,sep='\n')
print("Name- ",name)
print("Class- ",Class)
print("School- ",school)
print("Address- ",address)
print("Phone number- ",number)
|
StarcoderdataPython
|
1619777
|
nome = str(input('Insira um nome aqui: ')).strip().upper()
noma = nome.split()
nom1 = 'SILVA' in noma
print('A pessoa tem o nome Silva?: {}'.format(nom1))
|
StarcoderdataPython
|
1757917
|
import aiohttp
import asyncio
import json
# global dict settings
urls = {
"shezheng": "http://",
"shehuang": 'http://',
}
# NOTE: can be remove in productive code
# this is ok too
# form = aiohttp.FormData()
# form.add_field('image',
# open('/home/mory/data/face_test/10.jpg', 'rb'),
# filename='image.png',
# content_type='image/jpeg')
def requests_all(filepath):
files = [{'image': open(filepath, 'rb')} for _ in urls]
results = {}
async def requests(key, files):
async with aiohttp.ClientSession() as session:
async with session.post(urls[key], data=files) as resp:
content = await resp.read()
results.update({key: json.loads(content)})
loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(requests(key, files_)) for key, files_ in zip(urls, files)]
loop.run_until_complete(asyncio.wait(tasks))
return results
def face_parse(result):
result_ = result['results']
if not result_:
return {'infotype': 'shezheng', 'rate': 0.0, 'content': ''}
positive = [r for r in result_ if r['label'] != 'unknown']
if not positive:
return {'infotype': 'shezheng', 'rate': 0.0, 'content': ''}
positive.sort(key=lambda x: x['distance'])
rate = min(1, 1 - positive[0]['distance'] / 2 + (len(positive) - 1) * 0.1)
content = " ".join(p['label'] for p in positive)
return {'infotype':'shezheng', 'rate': rate, 'content': content}
def construct(results):
rets = []
for key, result in results.items():
if key == 'shezheng':
ret = face_parse(result)
elif key == 'shehuang':
ret = {'infotype':'shehuang', 'rate': result['pos'], 'content': ''}
rets.append(ret)
return rets
def multitask(filepath):
results = requests_all(filepath)
results = construct(results)
return results
if __name__ == '__main__':
import pprint
filepath = 'zhou.jpg'
result = multitask(filepath)
pprint.pprint(result)
|
StarcoderdataPython
|
3347221
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-25 14:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20170105_1426'),
]
operations = [
migrations.AlterField(
model_name='cfauser',
name='funder_name',
field=models.CharField(blank=True, default='', max_length=256),
),
migrations.AlterField(
model_name='cfauser',
name='osa_email',
field=models.EmailField(blank=True, help_text='The email address for contacting OSA when an app is funded.', max_length=254, null=True, verbose_name='OSA Contact Email'),
),
migrations.AlterField(
model_name='cfauser',
name='user',
field=models.OneToOneField(help_text='You must first create a user before adding them to the CFA.', on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='cfauser',
name='user_type',
field=models.CharField(choices=[('R', 'REQUESTER'), ('F', 'FUNDER')], max_length=1),
),
migrations.AlterField(
model_name='eligibilityanswer',
name='answer',
field=models.CharField(choices=[('Y', 'YES'), ('N', 'NO')], max_length=1),
),
migrations.AlterField(
model_name='event',
name='status',
field=models.CharField(choices=[('S', 'SAVED'), ('B', 'SUBMITTED'), ('F', 'FUNDED'), ('W', 'FOLLOWUP'), ('O', 'OVER')], max_length=1),
),
migrations.AlterField(
model_name='funderconstraint',
name='answer',
field=models.CharField(choices=[('Y', 'YES'), ('N', 'NO')], max_length=1),
),
migrations.AlterField(
model_name='item',
name='category',
field=models.CharField(choices=[('H', 'Honoraria/Services'), ('E', 'Equipment/Supplies'), ('F', 'Food/Drinks'), ('S', 'Facilities/Security'), ('T', 'Travel/Conference'), ('P', 'Photocopies/Printing/Publicity'), ('O', 'Other')], max_length=1),
),
]
|
StarcoderdataPython
|
135839
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.lookup_service import LookupServiceClient
from .services.lookup_service import LookupServiceAsyncClient
from .services.registration_service import RegistrationServiceClient
from .services.registration_service import RegistrationServiceAsyncClient
from .types.endpoint import Endpoint
from .types.lookup_service import ResolveServiceRequest
from .types.lookup_service import ResolveServiceResponse
from .types.namespace import Namespace
from .types.registration_service import CreateEndpointRequest
from .types.registration_service import CreateNamespaceRequest
from .types.registration_service import CreateServiceRequest
from .types.registration_service import DeleteEndpointRequest
from .types.registration_service import DeleteNamespaceRequest
from .types.registration_service import DeleteServiceRequest
from .types.registration_service import GetEndpointRequest
from .types.registration_service import GetNamespaceRequest
from .types.registration_service import GetServiceRequest
from .types.registration_service import ListEndpointsRequest
from .types.registration_service import ListEndpointsResponse
from .types.registration_service import ListNamespacesRequest
from .types.registration_service import ListNamespacesResponse
from .types.registration_service import ListServicesRequest
from .types.registration_service import ListServicesResponse
from .types.registration_service import UpdateEndpointRequest
from .types.registration_service import UpdateNamespaceRequest
from .types.registration_service import UpdateServiceRequest
from .types.service import Service
__all__ = (
'LookupServiceAsyncClient',
'RegistrationServiceAsyncClient',
'CreateEndpointRequest',
'CreateNamespaceRequest',
'CreateServiceRequest',
'DeleteEndpointRequest',
'DeleteNamespaceRequest',
'DeleteServiceRequest',
'Endpoint',
'GetEndpointRequest',
'GetNamespaceRequest',
'GetServiceRequest',
'ListEndpointsRequest',
'ListEndpointsResponse',
'ListNamespacesRequest',
'ListNamespacesResponse',
'ListServicesRequest',
'ListServicesResponse',
'LookupServiceClient',
'Namespace',
'RegistrationServiceClient',
'ResolveServiceRequest',
'ResolveServiceResponse',
'Service',
'UpdateEndpointRequest',
'UpdateNamespaceRequest',
'UpdateServiceRequest',
)
|
StarcoderdataPython
|
112462
|
# Generated by Django 4.0.1 on 2022-04-07 01:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('model_api', '0005_remove_order_datetimecreated_alter_order__id_and_more'),
]
operations = [
migrations.AddField(
model_name='order',
name='dateTimeCreated',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
|
StarcoderdataPython
|
1707483
|
#!/usr/bin/env python
"""
The mosaic QGraphicsView. This is QGraphicsView in the mosaic
UI tab.
Hazen 10/18
"""
import os
from PyQt5 import QtCore, QtGui, QtWidgets
#import storm_control.steve.qtMultifieldView as multiView
import storm_control.steve.coord as coord
import storm_control.steve.steveItems as steveItems
class Crosshair(QtWidgets.QGraphicsItem):
"""
The cross-hair item to indicate the current stage position.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.ch_pen = QtGui.QPen(QtGui.QColor(0,0,255))
self.ch_size = 15.0
self.r_size = self.ch_size
self.ch_pen.setWidth(0)
self.setZValue(1001.0)
def boundingRect(self):
return QtCore.QRectF(-self.r_size,
-self.r_size,
2.0 * self.r_size,
2.0 * self.r_size)
def paint(self, painter, options, widget):
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setPen(self.ch_pen)
painter.drawLine(-self.r_size, 0, self.r_size, 0)
painter.drawLine(0, -self.r_size, 0, self.r_size)
painter.drawEllipse(-0.5 * self.r_size,
-0.5 * self.r_size,
self.r_size,
self.r_size)
def setScale(self, scale):
"""
Resizes the cross-hair based on the current view scale.
"""
self.r_size = round(self.ch_size/scale)
class MosaicView(QtWidgets.QGraphicsView):
"""
Handles user interaction with the mosaic.
All coordinates are in pixels.
"""
extrapolateTakeMovie = QtCore.pyqtSignal(object)
mosaicViewContextMenuEvent = QtCore.pyqtSignal(object, object)
mosaicViewDropEvent = QtCore.pyqtSignal(list)
mosaicViewKeyPressEvent = QtCore.pyqtSignal(object, object)
mouseMove = QtCore.pyqtSignal(object)
scaleChange = QtCore.pyqtSignal(float)
def __init__(self, **kwds):
super().__init__(**kwds)
self.bg_brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
self.cross_hair = Crosshair()
self.currentz = 0.0
self.extrapolate_start = None
self.view_scale = 1.0
self.zoom_in = 1.2
self.zoom_out = 1.0 / self.zoom_in
self.showCrossHair(False)
self.setAcceptDrops(True)
self.setMinimumSize(QtCore.QSize(200, 200))
self.setBackgroundBrush(self.bg_brush)
self.setMouseTracking(True)
self.setRenderHint(QtGui.QPainter.SmoothPixmapTransform)
self.setToolTip("Hot keys are 'space','3','5','7','9','g','p','s'")
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
def dropEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
# Initialize filenames variable
filenames = []
# Tranfer urls to filenames
for url in event.mimeData().urls():
filenames.append(str(url.toLocalFile()))
# Sort file names
filenames = sorted(filenames)
# Identify first type
firstType = os.path.splitext(filenames[0])[1]
# Check to see if all types are the same
sameType = []
for filename in filenames:
fileType = os.path.splitext(filename)[1]
sameType.append(fileType == firstType)
# If not, raise an error and abort load
if not all(sameType):
QtGui.QMessageBox.information(self,
"Too many file types",
"")
return
self.mosaicViewDropEvent.emit(filenames)
def keyPressEvent(self, event):
"""
Handles key press events. Valid events are:
'space' Take a picture.
'3' Take a 3 picture spiral.
'5' Take a 5 picture spiral.
'7' Take a 7 picture spiral.
'9' Take a 9 picture spiral.
'g' Take a grid of pictures.
'p' Add the current cursor position to the list of positions.
's' Add the current cursor position to the list of sections.
"""
event_pos = self.mapFromGlobal(QtGui.QCursor.pos())
pointf = self.mapToScene(event_pos)
a_coord = coord.Point(pointf.x(), pointf.y(), "pix")
self.mosaicViewKeyPressEvent.emit(event, a_coord)
super().keyPressEvent(event)
def mouseMoveEvent(self, event):
"""
Tracks mouse movements across the view.
"""
pointf = self.mapToScene(event.pos())
self.mouseMove.emit(coord.Point(pointf.x(), pointf.y(), "pix"))
def mousePressEvent(self, event):
"""
If the left mouse button is pressed then the view is centered on the current cursor position.
If the right mouse button is pressed then the current location of the cursor in the scene
is recorded. If self.extrapolate_start exists then self.handleExtrapolatePict() is called,
otherwise the popup menu is displayed.
"""
if event.button() == QtCore.Qt.LeftButton:
self.centerOn(self.mapToScene(event.pos()))
elif event.button() == QtCore.Qt.RightButton:
pointf = self.mapToScene(event.pos())
a_coord = coord.Point(pointf.x(), pointf.y(), "pix")
if self.extrapolate_start:
self.extrapolateTakeMovie.emit(a_coord)
else:
self.mosaicViewContextMenuEvent.emit(event, a_coord)
def setCrossHairPosition(self, x_pos_um, y_pos_um):
x_pos = coord.umToPix(x_pos_um)
y_pos = coord.umToPix(y_pos_um)
self.cross_hair.setPos(x_pos, y_pos)
def setScale(self, scale):
self.view_scale = scale
transform = QtGui.QTransform()
transform.scale(scale, scale)
self.setTransform(transform)
self.cross_hair.setScale(scale)
def setScene(self, scene):
super().setScene(scene)
scene.addItem(self.cross_hair)
def showCrossHair(self, is_visible):
"""
True/False to show or hide the current stage position cross-hair.
"""
if (self.cross_hair.isVisible() != is_visible):
self.cross_hair.setVisible(is_visible)
def wheelEvent(self, event):
"""
Resizes the stage tracking cross-hair based on the current scale.
"""
if not event.angleDelta().isNull():
if (event.angleDelta().y() > 0):
self.view_scale = self.view_scale * self.zoom_in
self.setScale(self.view_scale)
else:
self.view_scale = self.view_scale * self.zoom_out
self.setScale(self.view_scale)
self.scaleChange.emit(self.view_scale)
event.accept()
#multiView.MultifieldView.wheelEvent(self, event)
#self.cross_hair.setScale(self.view_scale)
#
# The MIT License
#
# Copyright (c) 2018 <NAME>, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
|
StarcoderdataPython
|
1674048
|
from typing import Callable, Optional
import torch
from torch import nn
import constants
from rl_multi_agent import MultiAgent
from rl_multi_agent.experiments.experiment import ExperimentConfig
from rl_multi_agent.furnmove_episode_samplers import FurnMoveEpisodeSampler
from rl_multi_agent.furnmove_episodes import FurnMoveEgocentricEpisode
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoVision
class FurnMoveExperimentConfig(ExperimentConfig):
"""
All scenes + 1.0 reward for getting closer to the target
Discrete communication with NO discourage failed coordination loss
"""
# Env/episode config
num_agents = 2
screen_size = 84
episode_class = FurnMoveEgocentricEpisode
frame_type = "image"
episode_sampler_class = FurnMoveEpisodeSampler
visible_agents = True
include_depth_frame = False
include_move_obj_actions = True
headless = False
# Model config
state_repr_length = 512
talk_embed_length = 16
reply_embed_length = 16
agent_num_embed_length = 8
num_talk_symbols = 2
num_reply_symbols = 2
coordinate_actions = False
# Agent config
agent_class = MultiAgent
turn_off_communication = False
# Training config
max_ep_using_expert_actions = 0
train_scenes = constants.TRAIN_SCENE_NAMES[20:40]
valid_scenes = constants.VALID_SCENE_NAMES[5:10]
use_a3c_loss_when_not_expert_forcing = True
# Misc (e.g. visualization)
record_all_in_test = False
save_talk_reply_probs_path = None
include_test_eval_results = not torch.cuda.is_available()
return_likely_successfuly_move_actions = False
# Balancing
final_cnn_channels = 140
@classmethod
def get_init_train_params(cls):
init_train_params = {
"scenes": cls.train_scenes,
"num_agents": cls.num_agents,
"object_type": "Television",
"to_object_type": "Dresser",
"episode_class": cls.episode_class,
"player_screen_height": cls.screen_size,
"player_screen_width": cls.screen_size,
"max_ep_using_expert_actions": cls.max_ep_using_expert_actions,
"visible_agents": cls.visible_agents,
"include_depth_frame": cls.include_depth_frame,
"object_initial_height": 1.3,
"headless": cls.headless,
"max_distance_from_object": 0.76,
"max_episode_length": 500,
"episode_args": {
"include_move_obj_actions": cls.include_move_obj_actions,
"first_correct_coord_reward": 0.0,
"exploration_bonus": 0.0,
"failed_action_penalty": -0.02,
"step_penalty": -0.01,
# "increasing_rotate_penalty": True,
"joint_pass_penalty": -0.09,
"moved_closer_reward": 1.0,
# "moved_closer_reward": 0.50,
"min_dist_to_to_object": 0.26,
"frame_type": cls.frame_type,
"reached_target_reward": 1.0,
"return_likely_successfuly_move_actions": cls.return_likely_successfuly_move_actions,
"pass_conditioned_coordination": True,
},
}
return init_train_params
@classmethod
def get_init_valid_params(cls):
init_valid_params = {
**cls.get_init_train_params(),
"scenes": cls.valid_scenes,
"player_screen_height": 224,
"player_screen_width": 224,
"headless": False,
}
if cls.save_talk_reply_probs_path is not None:
init_valid_params[
"save_talk_reply_probs_path"
] = cls.save_talk_reply_probs_path
return init_valid_params
def __init__(self):
self._init_train_agent = self.episode_sampler_class(
**self.get_init_train_params()
)
self._init_test_agent = self.episode_sampler_class(
**self.get_init_valid_params()
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def _create_model(**kwargs):
return A3CLSTMNStepComCoordinatedActionsEgoVision(
**{
**dict(
num_inputs_per_agent=3 + 1 * cls.include_depth_frame,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
final_cnn_channels=cls.final_cnn_channels,
),
**kwargs,
}
)
return _create_model(**kwargs)
@classmethod
def create_agent(cls, **kwargs) -> MultiAgent:
return cls.agent_class(
model=kwargs["model"],
gpu_id=kwargs["gpu_id"],
include_test_eval_results=cls.include_test_eval_results,
use_a3c_loss_when_not_expert_forcing=cls.use_a3c_loss_when_not_expert_forcing,
record_all_in_test=cls.record_all_in_test,
include_depth_frame=cls.include_depth_frame,
resize_image_as=cls.screen_size,
)
@property
def init_train_agent(self) -> Callable:
return self._init_train_agent
@property
def init_test_agent(self) -> Callable:
return self._init_test_agent
@property
def saved_model_path(self) -> Optional[str]:
return None
def get_experiment():
return FurnMoveExperimentConfig()
|
StarcoderdataPython
|
3228474
|
<gh_stars>1-10
#
# Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import wiod.common
import common.more_exchange_rates as exrate
from jp import config
from common import matrixutils, sqlhelper
from common.dbconnect import db
from common.ioutils import IOMatrixGenerator, EnvMatrixGenerator
from common.counterfact import CounterfactGenerator
iogen = IOMatrixGenerator(
transaction_table=None,
from_sector_name="from_sector",
to_sector_name="to_sector",
value_column_name="value")
envgen = EnvMatrixGenerator(
envtable=None,
ind_col_name="sector",
series_col_name="series",
value_col_name="value")
sector_titles = {}
stmt = db.prepare("select distinct harmonized, description" +
" from jp.io_map_1990 order by harmonized")
for row in stmt():
sector_titles[row[0]] = row[1]
cfgen = CounterfactGenerator(iogen, envgen)
for series_code in config.env_series.keys():
cfgen.set_series_code(series_code)
for year in config.STUDY_YEARS:
iogen = cfgen.get_iogen()
iogen.set_table("%s.ixi_%d" % (config.SCHEMA, year))
iogen.set_fd_sectors(config.fd_sectors[year])
iogen.blacklist_from_sectors(config.from_blacklists[year])
iogen.blacklist_to_sectors(config.to_blacklists[year])
iogen.set_pce_col(config.pce_sector[year])
iogen.set_export_col(config.export_sector[year])
exchange_rate = wiod.common.get_exchange_rate("JPN", year)
if exchange_rate is None:
exchange_rate = 1 / exrate.get_rate("jp", year)
# tons co2 / (M jpy * exchange rate) = tons co2 / M usd
# GJ / (M jpy * exchange rate) = GJ / M usd
# exchange_rate * 1000 gives us kilotons and terrajoules
iogen.set_exchange_rate(exchange_rate * 1000)
envgen = cfgen.get_envgen()
envgen.set_table("%s.env_%d" % (config.SCHEMA, year))
env_harmonizer = matrixutils.generate_selector_matrix(
"%s.env_map_%d" % (config.SCHEMA, year),
envgen.get_sectors(), "env_sector", "harmonized")
io_harmonizer = matrixutils.generate_selector_matrix(
"%s.io_map_%d" % (config.SCHEMA, year),
iogen.get_sectors(), "io_sector", "harmonized")
series = config.env_series[series_code][year]
cfgen.prepare(year, series, io_harmonizer, env_harmonizer)
cfgen.set_sector_titles(sector_titles)
cfgen.describe()
cfgen.describe(True)
cfgen.counterfact(1995, "jp")
|
StarcoderdataPython
|
3357407
|
#! /usr/bin/env python
# run this test against an instance of uwsgi for websockets
from nose import tools
import threading
from websocket import create_connection
class WebsocketClient(threading.Thread):
"""Simulate a websocket client"""
def __init__(self, websocket_url):
self.websocket_url = websocket_url
super(WebsocketClient, self).__init__()
def run(self):
ws = create_connection(self.websocket_url)
assert ws.connected
result = ws.recv()
tools.eq_(result, 'Hello, World')
ws.close()
tools.eq_(ws.connected, False)
def test_subscribe_publish_broadcast():
# the sender
websocket_url = 'ws://localhost:8000/ws/foobar?publish-broadcast'
ws = create_connection(websocket_url)
ws.send('Hello, World')
# the receivers
websocket_url = 'ws://localhost:8000/ws/foobar?subscribe-broadcast'
clients = [WebsocketClient(websocket_url) for _ in range(0, 1000)]
for client in clients:
client.start()
for client in clients:
client.join(5)
ws.close()
|
StarcoderdataPython
|
3210752
|
import os
from os import path
from os.path import join
import sys
import json
import printj
import pyjeasy.file_utils as f
import cv2
from tqdm import tqdm
PATH = "/home/jitesh/sekisui/bolt/hexagon_bolts"
OUTPUT_PATH = "/home/jitesh/sekisui/bolt/cropped_hexagon_bolts"
f.make_dir_if_not_exists(OUTPUT_PATH)
json_list = f.get_all_filenames_of_extension(dirpath=PATH, extension="json")
# printj.blue(json_list)
for json_file in tqdm(json_list):
filename = json_file.split(".")[0]
output_image_path = os.path.join(OUTPUT_PATH, f"{filename}.jpg")
image_path = os.path.join(PATH, f"{filename}.jpg")
json_path = os.path.join(PATH, json_file)
if f.path_exists(image_path):
img = cv2.imread(image_path)
with open(json_path) as json_data:
data = json.load(json_data)
data = data["shapes"]
i = 0
for d in data:
if d["label"] =="bolt-roi":
[p1, p2] = d["points"]
xmin = int(min(p1[0], p2[0]))
ymin = int(min(p1[1], p2[1]))
xmax = int(max(p1[0], p2[0]))
ymax = int(max(p1[1], p2[1]))
output_img = img[ymin:ymax, xmin:xmax]
# cv2.imshow("", output_img)
# cv2.waitKey(0)
output_image_path = os.path.join(OUTPUT_PATH, f"{filename}_{i}.jpg")
cv2.imwrite(output_image_path, output_img)
i += 1
# printj.cyan(filename)
# sys.exit()
|
StarcoderdataPython
|
3217935
|
<reponame>alirezaghey/leetcode-solutions
from functools import cache
class Solution:
# iterative solution
# TC: O(n)
# SC: O(1)
def numTilings(self, n: int) -> int:
dp = [1,1,2]
if n <= 2:
return dp[n]
MOD = 1_000_000_007
for _ in range(2, n):
third, second, first = dp
dp[0], dp[1], dp[2] = second, first, (2*first+third)% MOD
return dp[-1]
# recursive memoized solution
# TC: O(n)
# SC: O(n) for the recursive call stack and memoization
def numTilings2(self, n: int) -> int:
MOD = 1_000_000_007
@cache
def calc(n):
if n == 0:
return 1
if n <= 2:
return n
return (2*calc(n-1)+calc(n-3)) % MOD
return calc(n)
|
StarcoderdataPython
|
164515
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import httpretty
from pyfakefs import fake_filesystem_unittest
from click import UsageError, ClickException
from mock import Mock, MagicMock, patch
from requests.exceptions import SSLError
from cloudshell.rest.api import FeatureUnavailable
from shellfoundry.commands.list_command import ListCommandExecutor
from shellfoundry.models.shell_template import ShellTemplate
from shellfoundry.utilities.template_retriever import FilteredTemplateRetriever, TemplateRetriever, TEMPLATES_YML
class TestListCommand(unittest.TestCase):
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_single_template_is_displayed(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 62 # mocking the max width to eliminate the distinction
# between the running console size
template_retriever = Mock()
template_retriever.get_templates = Mock(
return_value={'gen1/base': [ShellTemplate('gen1/base', 'description', '', '7.0')]})
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(u' Template Name CloudShell Ver. Description \n'
u'---------------------------------------------\n'
u' gen1/base 7.0 and up description ')
@patch('shellfoundry.commands.list_command.Configuration')
def test_shows_informative_message_when_offline(self, conf_class):
# Arrange
configuration = MagicMock(read=MagicMock(return_value=MagicMock(online_mode="True")))
conf_class.return_value = configuration
template_retriever = Mock()
template_retriever.get_templates.side_effect = SSLError()
list_command_executor = ListCommandExecutor(template_retriever=template_retriever,
standards=Mock())
# Assert
self.assertRaisesRegexp(UsageError, "offline", list_command_executor.list)
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_two_templates_are_displayed(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 62 # mocking the max width to eliminate the distinction
# between the running console size
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen1/base': [ShellTemplate('gen1/base', 'base description', '', '7.0', 'base')],
'gen1/switch': [ShellTemplate('gen1/switch', 'switch description', '', '7.0')]})
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'----------------------------------------------------\n'
u' gen1/base 7.0 and up base description \n'
u' gen1/switch 7.0 and up switch description ')
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_two_long_named_templates_are_displayed_on_normal_window(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 40 # mocking the max width to eliminate the distinction
# between the running console size
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')]})
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'-----------------------------------------------------------------------------------------------\n'
u' gen2/networking/WirelessController 8.0 and up TOSCA based template for standard \n'
u' WirelessController devices/virtual \n'
u' appliances \n'
u' gen2/networking/switch 8.0 and up TOSCA based template for standard Switch \n'
u' devices/virtual appliances '
)
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_console_size_small_description_wrapping_logic_ignored(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 0
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')]})
standards = Mock()
standards.fetch.return_value = {"networking": ['2.0.0'],
"resource": ['5.0.0', '5.0.1'],
"vido": ['3.0.1', '3.0.2', '3.0.3']}
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_called_once_with(
u' Template Name CloudShell Ver. Description \n'
u'--------------------------------------------------------------------------------------------------------------------------------------\n'
u' gen2/networking/WirelessController 8.0 and up TOSCA based template for standard WirelessController devices/virtual appliances \n'
u' gen2/networking/switch 8.0 and up TOSCA based template for standard Switch devices/virtual appliances '
)
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_filter_by_tosca_shows_all_tosca_templates(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 40
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')],
'gen1/base': [ShellTemplate('gen1/base', 'base description', '', '7.0')],
'gen1/switch': [ShellTemplate('gen1/switch', 'switch description', '', '7.0')]})
flag_value = 'gen2'
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(
template_retriever=FilteredTemplateRetriever(flag_value, template_retriever), standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'-----------------------------------------------------------------------------------------------\n'
u' gen2/networking/WirelessController 8.0 and up TOSCA based template for standard \n'
u' WirelessController devices/virtual \n'
u' appliances \n'
u' gen2/networking/switch 8.0 and up TOSCA based template for standard Switch \n'
u' devices/virtual appliances ')
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_filter_by_legacy_shows_all_legacy_templates(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 62
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')],
'gen1/base': [ShellTemplate('gen1/base', 'base description', '', '7.0')],
'gen1/switch': [ShellTemplate('gen1/switch', 'switch description', '', '7.0')]})
flag_value = 'gen1'
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(
template_retriever=FilteredTemplateRetriever(flag_value, template_retriever), standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'----------------------------------------------------\n'
u' gen1/base 7.0 and up base description \n'
u' gen1/switch 7.0 and up switch description ')
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_filter_by_all_shows_all_templates(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 40
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen1/base': [ShellTemplate('gen1/base', 'base description', '', '7.0')],
'gen1/switch': [ShellTemplate('gen1/switch', 'switch description', '', '7.0')],
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')]})
flag_value = 'all'
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(
template_retriever=FilteredTemplateRetriever(flag_value, template_retriever), standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'-----------------------------------------------------------------------------------------------\n'
u' gen2/networking/WirelessController 8.0 and up TOSCA based template for standard \n'
u' WirelessController devices/virtual \n'
u' appliances \n'
u' gen1/base 7.0 and up base description \n'
u' gen1/switch 7.0 and up switch description \n'
u' gen2/networking/switch 8.0 and up TOSCA based template for standard Switch \n'
u' devices/virtual appliances ')
# @patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_list_shows_nothing_because_filter_is_set_for_templates_that_do_not_exist(self, max_width_mock):
# Arrange
max_width_mock.return_value = 40
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')]})
flag_value = 'gen1'
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(
template_retriever=FilteredTemplateRetriever(flag_value, template_retriever), standards=standards)
# Act
with self.assertRaisesRegexp(ClickException, "No templates matched the view criteria\(gen1/gen2\) or "
"available templates and standards are not compatible"):
list_command_executor.list()
# Assert
# echo_mock.assert_called_once_with("No templates matched the criteria")
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_devguide_text_note_appears_when_no_filter_was_selected(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 40
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')]})
flag_value = None
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(
template_retriever=FilteredTemplateRetriever(flag_value, template_retriever), standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call('''
As of CloudShell 8.0, CloudShell uses 2nd generation shells, to view the list of 1st generation shells use: shellfoundry list --gen1.
For more information, please visit our devguide: https://qualisystems.github.io/devguide/''')
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
@patch('shellfoundry.commands.list_command.Configuration')
@patch.object(TemplateRetriever, '_get_min_cs_version')
@httpretty.activate
def test_templates_are_filtered_based_upon_the_result_of_cs_standards(self, _get_min_cs_version, conf_class,
max_width_mock, echo_mock):
# Arrange
_get_min_cs_version.return_value = None
configuration = MagicMock(read=MagicMock(return_value=MagicMock(online_mode="True")))
conf_class.return_value = configuration
max_width_mock.return_value = 40
templates = """templates:
- name : gen1/resource
description : base description
repository : https://github.com/QualiSystems/shell-resource-standard
params:
project_name :
min_cs_ver: 7.0
- name : gen1/switch
description : switch description
repository : https://github.com/QualiSystems/shell-switch-standard
params:
project_name :
min_cs_ver: 7.0
- name : gen2/resource
params:
project_name :
family_name:
description : 2nd generation shell template for a standard resource
repository : https://github.com/QualiSystems/shellfoundry-tosca-resource-template
min_cs_ver: 8.0
- name : gen2/networking/switch
params:
project_name :
family_name: Switch
description : 2nd generation shell template for a standard switch
repository : https://github.com/QualiSystems/shellfoundry-tosca-networking-template
min_cs_ver: 8.0
- name : gen2/networking/wireless-controller
params:
project_name :
family_name: WirelessController
description : 2nd generation shell template for a standard wireless controller
repository : https://github.com/QualiSystems/shellfoundry-tosca-networking-template
min_cs_ver: 8.0"""
flag_value = 'all'
standards = Mock()
standards.fetch.return_value = {"resource": ['5.0.0']}
template_retriever = FilteredTemplateRetriever(flag_value, TemplateRetriever())
httpretty.register_uri(httpretty.GET, TEMPLATES_YML, body=templates)
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'---------------------------------------------------------------------\n'
u' gen1/resource 7.0 and up base description \n'
u' gen1/switch 7.0 and up switch description \n'
u' gen2/resource 8.0 and up 2nd generation shell template for a \n'
u' standard resource ')
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
@patch('shellfoundry.commands.list_command.Configuration')
@patch.object(TemplateRetriever, '_get_min_cs_version')
@httpretty.activate
def test_templates_are_filtered_based_upon_the_result_of_cs_standards_gen2(self, _get_min_cs_version, conf_class,
max_width_mock, echo_mock):
# Arrange
_get_min_cs_version.return_value = None
configuration = MagicMock(read=MagicMock(return_value=MagicMock(online_mode="True")))
conf_class.return_value = configuration
max_width_mock.return_value = 40
templates = """templates:
- name : gen1/resource
description : base description
repository : https://github.com/QualiSystems/shell-resource-standard
params:
project_name :
min_cs_ver: 7.0
- name : gen1/switch
description : switch description
repository : https://github.com/QualiSystems/shell-switch-standard
params:
project_name :
min_cs_ver: 7.0
- name : gen2/resource
params:
project_name :
family_name:
description : 2nd generation shell template for a standard resource
repository : https://github.com/QualiSystems/shellfoundry-tosca-resource-template
min_cs_ver: 8.0
- name : gen2/networking/switch
params:
project_name :
family_name: Switch
description : 2nd generation shell template for a standard switch
repository : https://github.com/QualiSystems/shellfoundry-tosca-networking-template
min_cs_ver: 8.0
- name : gen2/networking/wireless-controller
params:
project_name :
family_name: WirelessController
description : 2nd generation shell template for a standard wireless controller
repository : https://github.com/QualiSystems/shellfoundry-tosca-networking-template
min_cs_ver: 8.0"""
flag_value = 'gen2'
standards = Mock()
standards.fetch.return_value = {"networking": ['5.0.0']}
template_retriever = FilteredTemplateRetriever(flag_value, TemplateRetriever())
httpretty.register_uri(httpretty.GET, TEMPLATES_YML, body=templates)
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'-------------------------------------------------------------------------------------------\n'
u' gen2/networking/switch 8.0 and up 2nd generation shell template for a \n'
u' standard switch \n'
u' gen2/networking/wireless-controller 8.0 and up 2nd generation shell template for a \n'
u' standard wireless controller ')
class TestListCommandWithFakeFs(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
@staticmethod
def get_8_0_templates_output():
return (
u' Template Name CloudShell Ver. Description \n'
u'-------------------------------------------------------------------------------------------------------------------\n'
u' gen1/compute 7.0 and up 1st generation shell template for compute servers \n'
u' gen1/deployed-app 7.0 and up 1st generation shell template for a deployed app \n'
u' gen1/firewall 7.0 and up 1st generation shell template for a standard firewall \n'
u' gen1/networking/router 7.0 and up 1st generation shell template for a standard router \n'
u' gen1/networking/switch 7.0 and up 1st generation shell template for a standard switch \n'
u' gen1/pdu 7.0 and up 1st generation shell template for a standard pdu \n'
u' gen1/resource 7.0 and up 1st generation shell template for basic inventory resources \n'
u' gen1/resource-clean 7.0 and up 1st generation shell template for basic inventory resources \n'
u' (without sample commands) \n'
u' gen2/compute 8.0 and up 2nd generation shell template for compute servers \n'
u' gen2/deployed-app 8.0 and up 2nd generation shell template for a deployed app \n'
u' gen2/firewall 8.0 and up 2nd generation shell template for firewall resources \n'
u' gen2/networking/router 8.0 and up 2nd generation shell template for a standard router \n'
u' gen2/networking/switch 8.0 and up 2nd generation shell template for a standard switch \n'
u' gen2/networking/wireless-controller 8.0 and up 2nd generation shell template for a standard wireless \n'
u' controller \n'
u' gen2/pdu 8.0 and up 2nd generation shell template for a standard pdu \n'
u' gen2/resource 8.0 and up 2nd generation shell template for basic inventory resources \n'
u' layer-1-switch 7.0 and up A native shell template for layer 1 switches ')
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_get_cs_standards_unavailable_shows_cs_8_0_shipped_templates(self, max_width_mock, echo_mock):
# Assert
max_width_mock.return_value = 60
from shellfoundry import ALTERNATIVE_TEMPLATES_PATH
self.fs.add_real_file(ALTERNATIVE_TEMPLATES_PATH)
standards = Mock(fetch=Mock(side_effect=FeatureUnavailable()))
template_retriever = FilteredTemplateRetriever('all', TemplateRetriever())
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
templates_output = self.get_8_0_templates_output()
echo_mock.assert_any_call(templates_output)
|
StarcoderdataPython
|
3316734
|
from .csv_rule import CSVRule
from flask import Flask
app = Flask(__name__)
class limitAcuityRule(CSVRule):
# def __init__(self, name_in_csv, node_id):
# super().__init__(name_in_csv, node_id)
def __init__(self, name_in_csv, allowed_acuity, node_id):
super().__init__(name_in_csv, node_id)
self.allowed_acuity = allowed_acuity
'''receives a patient object'''
def check(self, patient):
app.logger.info("Limit Acuity rule")
patient_acuity = int(patient.get_attribute(self.get_name_in_csv()))
app.logger.info("patient {} acuity is {}".format(patient.get_id(), patient_acuity))
allowed_acuity = [int(x) for x in self.allowed_acuity.split(',') if x.strip().isdigit()]
for x in allowed_acuity:
app.logger.info("Allowed acuity is {}".format(x))
if patient_acuity in allowed_acuity:
return True
return False
|
StarcoderdataPython
|
112905
|
"""
AlexNet 模型
本模型默认总参数量[参考基准:cifar10]:
Total params: 24,769,290
Trainable params: 24,768,586
Non-trainable params: 704
本模型默认总参数量[参考基准:ImageNet]:
Total params: 62,379,752
Trainable params: 62,379,048
Non-trainable params: 704
"""
from hat.models.advance import AdvNet
class alexnet(AdvNet):
"""
AlexNet
"""
def args(self):
self.CONV = [96, 256, 384, 384, 256]
self.SIZE = [11, 5, 3, 3, 3]
self.STEP = [4 if self.INPUT_SHAPE[0] >= 160 else 2, 2, 2, 2]
self.PAD = 'valid' if self.INPUT_SHAPE[0] >= 160 else 'same'
self.POOL_SIZE = 3
self.LOCAL = [4096, 4096]
self.DROP = 0.5
def build_model(self):
x_in = self.input(self.INPUT_SHAPE)
# conv
x = self.conv(x_in, self.CONV[0], self.SIZE[0], strides=self.STEP[0], padding=self.PAD, activation='relu')
x = self.bn(x)
x = self.maxpool(x, self.POOL_SIZE, self.STEP[1])
x = self.conv(x, self.CONV[1], self.SIZE[1], activation='relu')
x = self.bn(x)
x = self.maxpool(x, self.POOL_SIZE, self.STEP[2], padding=self.PAD)
x = self.conv(x, self.CONV[2], self.SIZE[2], activation='relu')
x = self.conv(x, self.CONV[3], self.SIZE[3], activation='relu')
x = self.conv(x, self.CONV[4], self.SIZE[4], activation='relu')
x = self.maxpool(x, self.POOL_SIZE, self.STEP[3], padding=self.PAD)
# local
x = self.flatten(x)
x = self.local(x, self.LOCAL[0])
x = self.dropout(x, self.DROP)
x = self.local(x, self.LOCAL[1])
x = self.dropout(x, self.DROP)
x = self.local(x, self.NUM_CLASSES, activation='softmax')
return self.Model(inputs=x_in, outputs=x, name='alexnet')
# test part
if __name__ == "__main__":
mod = alexnet(DATAINFO={'INPUT_SHAPE': (32, 32, 3), 'NUM_CLASSES': 10}, built=True)
mod.summary()
|
StarcoderdataPython
|
1659430
|
#!/usr/bin/python
import time
for x in xrange(0,10):
print x
time.sleep(2);
|
StarcoderdataPython
|
3336778
|
# ==============================================================================
# Copyright 2020 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pytest
from tensorflow.keras.layers import * # pylint: disable=wildcard-import
from tensorflow.keras.models import Model
from qkeras import * # pylint: disable=wildcard-import
from qkeras.autoqkeras.forgiving_metrics import ForgivingFactorBits # pylint: disable=line-too-long
from qkeras.utils import model_quantize
def get_model():
"""Returns sample model."""
xi = Input((28, 28, 1), name="input") # pylint: disable=undefined-variable
x = Conv2D(32, 3, strides=1, padding="same", name="c1")(xi) # pylint: disable=undefined-variable
x = BatchNormalization(name="b1")(x) # pylint: disable=undefined-variable
x = Activation("relu", name="a1")(x) # pylint: disable=undefined-variable
x = MaxPooling2D(2, 2, name="mp1")(x) # pylint: disable=undefined-variable
x = QConv2D(32, 3, kernel_quantizer="binary", bias_quantizer="binary", # pylint: disable=undefined-variable
strides=1, padding="same", name="c2")(x)
x = QBatchNormalization(name="b2")(x) # pylint: disable=undefined-variable
x = QActivation("binary", name="a2")(x) # pylint: disable=undefined-variable
x = MaxPooling2D(2, 2, name="mp2")(x) # pylint: disable=undefined-variable
x = QConv2D(32, 3, kernel_quantizer="ternary", bias_quantizer="ternary", # pylint: disable=undefined-variable
strides=1, padding="same", activation="binary", name="c3")(x)
x = Flatten(name="flatten")(x) # pylint: disable=undefined-variable
x = Dense(1, name="dense", activation="softmax")(x) # pylint: disable=undefined-variable
model = Model(inputs=xi, outputs=x)
return model
def test_forgiving_factor_bits():
"""Tests forgiving factor bits."""
delta_p = 8.0
delta_n = 8.0
rate = 2.0
stress = 1.0
input_bits = 8
output_bits = 8
ref_bits = 8
config = {
"QDense": ["parameters", "activations"],
"Dense": ["parameters", "activations"],
"QConv2D": ["parameters", "activations"],
"Conv2D": ["parameters", "activations"],
"DepthwiseConv2D": ["parameters", "activations"],
"QDepthwiseConv2D": ["parameters", "activations"],
"Activation": ["activations"],
"QActivation": ["activations"],
"QBatchNormalization": ["parameters"],
"BatchNormalization": ["parameters"],
"default": ["activations"],
}
model = get_model()
ffb = ForgivingFactorBits(
delta_p, delta_n, rate, stress,
input_bits, output_bits, ref_bits,
config
)
cached_result = ffb.compute_model_size(model)
ref_size = cached_result[0]
ref_p = cached_result[1]
ref_a = cached_result[2]
assert ref_size == 258544
assert ref_p == 43720
assert ref_a == 214824
def test_new_forgiving_factor():
"""Tests forgiving factor."""
delta_p = 8.0
delta_n = 8.0
rate = 2.0
stress = 1.0
input_bits = 8
output_bits = 8
ref_bits = 8
config = {
"QDense": ["parameters", "activations"],
"Dense": ["parameters", "activations"],
"QConv2D": ["parameters", "activations"],
"Conv2D": ["parameters", "activations"],
"DepthwiseConv2D": ["parameters", "activations"],
"QDepthwiseConv2D": ["parameters", "activations"],
"Activation": ["activations"],
"QActivation": ["activations"],
"QBatchNormalization": ["parameters"],
"BatchNormalization": ["parameters"],
"default": ["activations"]
}
model = get_model()
ffb = ForgivingFactorBits(
delta_p, delta_n, rate, stress,
input_bits, output_bits, ref_bits,
config
)
cached_result = ffb.compute_model_size(model)
ref_size = cached_result[0]
ref_p = cached_result[1]
ref_a = cached_result[2]
ref_size_dict = cached_result[3]
assert ref_size == 258544
assert ref_p == 43720
assert ref_a == 214824
q_dict = {
"c1": {
"kernel_quantizer": "binary",
"bias_quantizer": "quantized_bits(4)"
}
}
q_model = model_quantize(model, q_dict, 4)
cached_result = ffb.compute_model_size(q_model)
trial_size_dict = cached_result[3]
for name in trial_size_dict:
if name != "c1":
assert trial_size_dict[name] == ref_size_dict[name]
assert trial_size_dict["c1"]["parameters"] == 416
if __name__ == "__main__":
pytest.main([__file__])
|
StarcoderdataPython
|
53117
|
<reponame>zhigangjiang/LGT-Net
"""
@Date: 2021/07/18
@description:
"""
import os
import models
import torch.distributed as dist
import torch
from torch.nn import init
from torch.optim import lr_scheduler
from utils.time_watch import TimeWatch
from models.other.optimizer import build_optimizer
from models.other.criterion import build_criterion
def build_model(config, logger):
name = config.MODEL.NAME
w = TimeWatch(f"Build model: {name}", logger)
ddp = config.WORLD_SIZE > 1
if ddp:
logger.info(f"use ddp")
dist.init_process_group("nccl", init_method='tcp://127.0.0.1:23456', rank=config.LOCAL_RANK,
world_size=config.WORLD_SIZE)
device = config.TRAIN.DEVICE
logger.info(f"Creating model: {name} to device:{device}, args:{config.MODEL.ARGS[0]}")
net = getattr(models, name)
ckpt_dir = os.path.abspath(os.path.join(config.CKPT.DIR, os.pardir)) if config.DEBUG else config.CKPT.DIR
if len(config.MODEL.ARGS) != 0:
model = net(ckpt_dir=ckpt_dir, **config.MODEL.ARGS[0])
else:
model = net(ckpt_dir=ckpt_dir)
logger.info(f'model dropout: {model.dropout_d}')
model = model.to(device)
optimizer = None
scheduler = None
if config.MODE == 'train':
optimizer = build_optimizer(config, model, logger)
config.defrost()
config.TRAIN.START_EPOCH = model.load(device, logger, optimizer, best=config.MODE != 'train' or not config.TRAIN.RESUME_LAST)
config.freeze()
if config.MODE == 'train' and len(config.MODEL.FINE_TUNE) > 0:
for param in model.parameters():
param.requires_grad = False
for layer in config.MODEL.FINE_TUNE:
logger.info(f'Fine-tune: {layer}')
getattr(model, layer).requires_grad_(requires_grad=True)
getattr(model, layer).reset_parameters()
model.show_parameter_number(logger)
if config.MODE == 'train':
if len(config.TRAIN.LR_SCHEDULER.NAME) > 0:
if 'last_epoch' not in config.TRAIN.LR_SCHEDULER.ARGS[0].keys():
config.TRAIN.LR_SCHEDULER.ARGS[0]['last_epoch'] = config.TRAIN.START_EPOCH - 1
scheduler = getattr(lr_scheduler, config.TRAIN.LR_SCHEDULER.NAME)(optimizer=optimizer,
**config.TRAIN.LR_SCHEDULER.ARGS[0])
logger.info(f"Use scheduler: name:{config.TRAIN.LR_SCHEDULER.NAME} args: {config.TRAIN.LR_SCHEDULER.ARGS[0]}")
logger.info(f"Current scheduler last lr: {scheduler.get_last_lr()}")
else:
scheduler = None
if config.AMP_OPT_LEVEL != "O0" and 'cuda' in device:
import apex
logger.info(f"use amp:{config.AMP_OPT_LEVEL}")
model, optimizer = apex.amp.initialize(model, optimizer, opt_level=config.AMP_OPT_LEVEL, verbosity=0)
if ddp:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.TRAIN.DEVICE],
broadcast_buffers=True) # use rank:0 bn
criterion = build_criterion(config, logger)
if optimizer is not None:
logger.info(f"Finally lr: {optimizer.param_groups[0]['lr']}")
return model, optimizer, criterion, scheduler
|
StarcoderdataPython
|
50875
|
#!/usr/bin/env python
import yaml
from pprint import pprint as pp
from napalm import get_network_driver
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Read YAML file
with open("my_devices.yml", 'r') as stream:
devices = yaml.load(stream)
driver = get_network_driver('ios')
pynet_rtr2_conn = driver(devices['pynet-rtr2']['hostname'], devices['pynet-rtr2']['username'], devices['pynet-rtr2']['password'], optional_args = devices['pynet-rtr2']['optional_args'])
new_route = "ip route 192.168.3.11 255.255.255.255 10.220.88.1\n"
pynet_rtr2_conn.open()
pynet_rtr2_conn.load_merge_candidate(config=new_route)
pp(pynet_rtr2_conn.compare_config())
input("Hit any key to continue!")
pynet_rtr2_conn.discard_config()
pp(pynet_rtr2_conn.compare_config())
input("Hit any key to continue!")
pynet_rtr2_conn.load_merge_candidate(config=new_route)
pynet_rtr2_conn.commit_config()
pynet_rtr2_conn.close()
|
StarcoderdataPython
|
147905
|
from .__init__ import *
from ..__init__ import Generator
def percentageFunc(maxValue=99, maxpercentage=99):
a = random.randint(1, maxpercentage)
b = random.randint(1, maxValue)
problem = f"What is {a}% of {b}?"
percentage = a / 100 * b
formatted_float = "{:.2f}".format(percentage)
solution = f"Required percentage = {formatted_float}%"
return problem, solution
percentage = Generator("Percentage of a number", 80, "What is a% of b?",
"percentage", percentageFunc)
|
StarcoderdataPython
|
3378082
|
from jupyter_client import KernelManager
import queue
from jupyter_client.manager import run_kernel
class MyKernel():
def __init__():
kernel = KernelManager()
kernel.start_kernel()
client = = km.client()
def run_code(self, code):
print("executing code: " + code)
with run_kernel() as kc:
msg_id = kc.execute("print(1+2)")
reply = kc.get_shell_msg(msg_id)
print(reply['content'])
print()
while True:
try:
io_msg = kc.get_iopub_msg(timeout=1)
print(io_msg['content'])
except queue.Empty:
print('timeout kc.get_iopub_msg')
break
|
StarcoderdataPython
|
43995
|
#!/usr/bin/python3
# ==================================================
"""
File: RMedian - Unittest - Phase 1
Author: <NAME>
"""
# ==================================================
# Import
import math
import random
import pytest
# ==================================================
# Phase 1
def phase1(X, k, d):
# Initiation
n = len(X)
random.shuffle(X)
S = X[:k]
XS = X[k:]
S.sort()
# Keeping the list entries below k/2
if 2*(k*math.log2(n))**0.5 < k/2:
lst = [2*(k*math.log2(n))**0.5]
if 3*(k*math.log2(n))**0.5 < k/2:
lst.append(3*(k*math.log2(n))**0.5)
while d*lst[len(lst) - 1] < k/2:
lst.append(d*lst[len(lst) - 1])
lst.append(k/2)
else:
lst = [k/2]
# Buckets
L = [[] for _ in range(len(lst) - 1)]
R = [[] for _ in range(len(lst) - 1)]
C = []
for s in S[math.floor(k / 2 - lst[0]): math.ceil(k / 2 + lst[0])]:
C.append(s)
for i in range(1, len(lst)):
for s in S[math.floor(k / 2 - lst[i]): math.floor(k / 2 - lst[i - 1])]:
L[i - 1].append(s)
for s in S[math.ceil(k / 2 + lst[i - 1]): math.ceil(k / 2 + lst[i])]:
R[i - 1].append(s)
return S, XS, L, C, R
# ==================================================
# Unittest : Parameter
@pytest.mark.parametrize(('n'), [
# Randomized input
random.randint(2**9, 2**15),
# Manuel input
2**10, 2**12, 2**14, 2**12 + 1, 2**12 - 1
])
# ==================================================
# Unittest : Test
def test_p1(n):
# Generating Tastcase
X0 = [i for i in range(n)]
k0 = int(n ** (2 / 3))
d0 = int(n ** (1 / 12))
S0, XS0, L0, C0, R0 = phase1(X0, k0, d0)
X1 = [i for i in range(n)]
k1 = int(n / math.log(n, 2)**(1/3))
d1 = int(math.log(n, 2)**(1/3))
S1, XS1, L1, C1, R1 = phase1(X1, k1, d1)
sumL0, sumR0, sumL1, sumR1 = 0, 0, 0, 0
for l0 in L0:
sumL0 += len(l0)
for l1 in L1:
sumL1 += len(l1)
for r0 in R0:
sumR0 += len(r0)
for r1 in R1:
sumR1 += len(r1)
# Test
assert sumL0 == sumR0 # ||L|| = ||R||
assert sumL1 == sumR1 # ||L|| = ||R||
assert len(L0) == len(R0) # |L| = |R|
assert len(L1) == len(R1) # |L| = |R|
assert sumL0 + len(C0) + sumR0 == k0 # |L| + |C| + |R| = k
assert sumL1 + len(C1) + sumR1 == k1 # |L| + |C| + |R| = k
return
# ==================================================
|
StarcoderdataPython
|
1784617
|
from flask_wtf import FlaskForm
from wtforms import TextField
from wtforms.validators import Required, Email, Length
from app.jobs.models import Jobs
from app import db
class RegisterJobsForm(FlaskForm):
name = TextField(validators=[Required()])
address = TextField()
|
StarcoderdataPython
|
3389412
|
import json
from jwkest import BadSignature
from jwkest.jwk import SYMKey
from jwkest.jws import NoSuitableSigningKeys
from jwkest.jwt import JWT
from oic.oic import OIDCONF_PATTERN
from oic.oic.message import IdToken, ProviderConfigurationResponse
from oic.utils.keyio import KeyJar, KeyBundle
import requests
class IDTokenVerificationError(Exception):
pass
def verify_signed_id_token(token, key=None, jwks=None):
jwt = JWT().unpack(token)
payload = jwt.payload()
issuer = payload['iss']
provider_keys = None
if jwt.headers['alg'].startswith('HS') and not key:
raise IDTokenVerificationError(
'No symmetric key provided for signature using \'{}\' algorithm.'.format(
jwt.headers['alg']))
if key:
provider_keys = _create_symmetric_key(issuer, key)
elif jwks:
provider_keys = _parse_provider_keys_from_jwks(issuer, jwks)
elif jwt.headers['alg'] != 'none': # don't fetch keys for unsigned JWT
provider_keys = _fetch_provider_keys(issuer)
try:
return IdToken().from_jwt(token, keyjar=provider_keys).to_json()
except (BadSignature, NoSuitableSigningKeys) as e:
raise IDTokenVerificationError(
'No key that could be used to verify the signature could be found.')
def _fetch_provider_keys(issuer):
try:
provider_config = ProviderConfigurationResponse(
**requests.get(OIDCONF_PATTERN % issuer).json())
except requests.exceptions.RequestException:
raise IDTokenVerificationError('The providers configuration could not be fetched.')
if issuer != provider_config['issuer']:
raise IDTokenVerificationError(
'Issuer in provider configuration does not match issuer of ID Token.')
provider_keys = KeyJar()
keybundle = provider_keys.add(issuer, provider_config['jwks_uri'])
try:
keybundle.update() # force fetch of remote keys from jwks_uri
except requests.exceptions.RequestException:
raise IDTokenVerificationError('Keys could not be fetched from the providers \'jwks_uri\'.')
return provider_keys
def _parse_provider_keys_from_jwks(issuer, jwks):
keys = json.loads(jwks)['keys']
provider_keys = KeyJar()
provider_keys[issuer] = [KeyBundle(keys=keys)]
return provider_keys
def _create_symmetric_key(issuer, key):
provider_keys = KeyJar()
key = SYMKey(use='sig', k=key)
kb = KeyBundle(keytype='oct')
kb.append(key)
provider_keys[issuer] = [kb]
return provider_keys
|
StarcoderdataPython
|
3343419
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
import os.path as op
import sys
import logging
import string
from collections import defaultdict
from itertools import product, combinations
from jcvi.formats.blast import BlastLine
from jcvi.formats.fasta import Fasta
from jcvi.formats.bed import Bed
from jcvi.formats.base import must_open, BaseFile
from jcvi.utils.grouper import Grouper
from jcvi.utils.cbook import gene_name
from jcvi.compara.synteny import AnchorFile, check_beds
from jcvi.apps.base import OptionParser, OptionGroup, glob, ActionDispatcher, \
need_update, sh, mkdir
class OMGFile (BaseFile):
def __init__(self, filename):
super(OMGFile, self).__init__(filename)
fp = open(filename)
inblock = False
components = []
component = []
for row in fp:
if inblock:
atoms = row.split()
natoms = len(atoms)
assert natoms in (0, 7)
if natoms:
gene, taxa = atoms[0], atoms[5]
component.append((gene, taxa))
else:
inblock = False
components.append(tuple(component))
if row.strip().startswith("---"):
inblock = True
component = []
if inblock:
components.append(tuple(component))
self.components = components
def best(self):
bb = set()
for component in self.components:
size = len(component)
if size > 1:
bb.add(component)
return bb
def main():
actions = (
('tandem', 'identify tandem gene groups within certain distance'),
('ortholog', 'run a combined synteny and RBH pipeline to call orthologs'),
('group', 'cluster the anchors into ortho-groups'),
('omgprepare', 'prepare weights file to run Sankoff OMG algorithm'),
('omg', 'generate a series of Sankoff OMG algorithm inputs'),
('omgparse', 'parse the OMG outputs to get gene lists'),
('enrich', 'enrich OMG output by pulling genes missed by OMG'),
('layout', 'layout the gene lists'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def get_weights(weightsfiles=None):
if weightsfiles is None:
weightsfiles = glob("*.weights")
weights = defaultdict(list)
for row in must_open(weightsfiles):
a, b, c = row.split()
weights[a].append((a, b, c))
return weights
def get_edges(weightsfiles=None):
if weightsfiles is None:
weightsfiles = glob("*.weights")
edges = {}
for row in must_open(weightsfiles):
a, b, c = row.split()
c = int(c)
edges[(a, b)] = c
edges[(b, a)] = c
return edges
def get_info():
infofiles = glob("*.info")
info = {}
for row in must_open(infofiles):
a = row.split()[0]
info[a] = row.rstrip()
return info
def enrich(args):
"""
%prog enrich omgfile groups ntaxa > enriched.omg
Enrich OMG output by pulling genes misses by OMG.
"""
p = OptionParser(enrich.__doc__)
p.add_option("--ghost", default=False, action="store_true",
help="Add ghost homologs already used [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
omgfile, groupsfile, ntaxa = args
ntaxa = int(ntaxa)
ghost = opts.ghost
# Get gene pair => weight mapping
weights = get_edges()
info = get_info()
# Get gene => taxon mapping
info = dict((k, v.split()[5]) for k, v in info.items())
groups = Grouper()
fp = open(groupsfile)
for row in fp:
members = row.strip().split(",")
groups.join(*members)
logging.debug("Imported {0} families with {1} members.".\
format(len(groups), groups.num_members))
seen = set()
omggroups = Grouper()
fp = open(omgfile)
for row in fp:
genes, idxs = row.split()
genes = genes.split(",")
seen.update(genes)
omggroups.join(*genes)
nmembers = omggroups.num_members
logging.debug("Imported {0} OMG families with {1} members.".\
format(len(omggroups), nmembers))
assert nmembers == len(seen)
alltaxa = set(str(x) for x in range(ntaxa))
recruited = []
fp = open(omgfile)
for row in fp:
genes, idxs = row.split()
genes = genes.split(",")
a = genes[0]
idxs = set(idxs.split(","))
missing_taxa = alltaxa - idxs
if not missing_taxa:
print(row.rstrip())
continue
leftover = groups[a]
if not ghost:
leftover = set(leftover) - seen
if not leftover:
print(row.rstrip())
continue
leftover_sorted_by_taxa = dict((k, \
[x for x in leftover if info[x] == k]) \
for k in missing_taxa)
#print genes, leftover
#print leftover_sorted_by_taxa
solutions = []
for solution in product(*leftover_sorted_by_taxa.values()):
score = sum(weights.get((a, b), 0) for a in solution for b in genes)
if score == 0:
continue
score += sum(weights.get((a, b), 0) for a, b in combinations(solution, 2))
solutions.append((score, solution))
#print solution, score
best_solution = max(solutions) if solutions else None
if best_solution is None:
print(row.rstrip())
continue
#print "best ==>", best_solution
best_score, best_addition = best_solution
genes.extend(best_addition)
recruited.extend(best_addition)
genes = sorted([(info[x], x) for x in genes])
idxs, genes = zip(*genes)
if ghost: # decorate additions so it's clear that they were added
pgenes = []
for g in genes:
if g in recruited and g in seen:
pgenes.append("|{0}|".format(g))
else:
pgenes.append(g)
genes = pgenes
print("\t".join((",".join(genes), ",".join(idxs))))
if not ghost:
seen.update(best_addition)
logging.debug("Recruited {0} new genes.".format(len(recruited)))
def pairwise_distance(a, b, threadorder):
d = 0
for x, y in zip(a, b)[:-1]: # Last column not used
x, y = x.strip("|"), y.strip("|")
if "." in (x, y):
dd = 50
else:
xi, x = threadorder[x]
yi, y = threadorder[y]
dd = min(abs(xi - yi), 50)
d += dd
return d
def insert_into_threaded(atoms, threaded, threadorder):
min_idx, min_d = 0, 1000
for i, t in enumerate(threaded):
# calculate distance
d = pairwise_distance(atoms, t, threadorder)
if d < min_d:
min_idx = i
min_d = d
i = min_idx
t = threaded[i]
threaded.insert(i, atoms)
logging.debug("Insert {0} before {1} (d={2})".format(atoms, t, min_d))
def sort_layout(thread, listfile, column=0):
"""
Sort the syntelog table according to chromomomal positions. First orient the
contents against threadbed, then for contents not in threadbed, insert to
the nearest neighbor.
"""
from jcvi.formats.base import DictFile
outfile = listfile.rsplit(".", 1)[0] + ".sorted.list"
threadorder = thread.order
fw = open(outfile, "w")
lt = DictFile(listfile, keypos=column, valuepos=None)
threaded = []
imported = set()
for t in thread:
accn = t.accn
if accn not in lt:
continue
imported.add(accn)
atoms = lt[accn]
threaded.append(atoms)
assert len(threaded) == len(imported)
total = sum(1 for x in open(listfile))
logging.debug("Total: {0}, currently threaded: {1}".format(total, len(threaded)))
fp = open(listfile)
for row in fp:
atoms = row.split()
accn = atoms[0]
if accn in imported:
continue
insert_into_threaded(atoms, threaded, threadorder)
for atoms in threaded:
print("\t".join(atoms), file=fw)
fw.close()
logging.debug("File `{0}` sorted to `{1}`.".format(outfile, thread.filename))
def layout(args):
"""
%prog layout omgfile taxa
Build column formatted gene lists after omgparse(). Use species list
separated by comma in place of taxa, e.g. "BR,BO,AN,CN"
"""
p = OptionParser(layout.__doc__)
p.add_option("--sort",
help="Sort layout file based on bedfile [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
omgfile, taxa = args
listfile = omgfile.rsplit(".", 1)[0] + ".list"
taxa = taxa.split(",")
ntaxa = len(taxa)
fw = open(listfile, "w")
data = []
fp = open(omgfile)
for row in fp:
genes, idxs = row.split()
row = ["."] * ntaxa
genes = genes.split(",")
ixs = [int(x) for x in idxs.split(",")]
for gene, idx in zip(genes, ixs):
row[idx] = gene
txs = ",".join(taxa[x] for x in ixs)
print("\t".join(("\t".join(row), txs)), file=fw)
data.append(row)
coldata = zip(*data)
ngenes = []
for i, tx in enumerate(taxa):
genes = [x for x in coldata[i] if x != '.']
genes = set(x.strip("|") for x in genes)
ngenes.append((len(genes), tx))
details = ", ".join("{0} {1}".format(a, b) for a, b in ngenes)
total = sum(a for a, b in ngenes)
s = "A list of {0} orthologous families that collectively".format(len(data))
s += " contain a total of {0} genes ({1})".format(total, details)
print(s, file=sys.stderr)
fw.close()
lastcolumn = ntaxa + 1
cmd = "sort -k{0},{0} {1} -o {1}".format(lastcolumn, listfile)
sh(cmd)
logging.debug("List file written to `{0}`.".format(listfile))
sort = opts.sort
if sort:
thread = Bed(sort)
sort_layout(thread, listfile)
def omgparse(args):
"""
%prog omgparse work
Parse the OMG outputs to get gene lists.
"""
p = OptionParser(omgparse.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
work, = args
omgfiles = glob(op.join(work, "gf*.out"))
for omgfile in omgfiles:
omg = OMGFile(omgfile)
best = omg.best()
for bb in best:
genes, taxa = zip(*bb)
print("\t".join((",".join(genes), ",".join(taxa))))
def group(args):
"""
%prog group anchorfiles
Group the anchors into ortho-groups. Can input multiple anchor files.
"""
p = OptionParser(group.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
anchorfiles = args
groups = Grouper()
for anchorfile in anchorfiles:
ac = AnchorFile(anchorfile)
for a, b, idx in ac.iter_pairs():
groups.join(a, b)
logging.debug("Created {0} groups with {1} members.".\
format(len(groups), groups.num_members))
outfile = opts.outfile
fw = must_open(outfile, "w")
for g in groups:
print(",".join(sorted(g)), file=fw)
fw.close()
return outfile
def omg(args):
"""
%prog omg weightsfile
Run Sankoff's OMG algorithm to get orthologs. Download OMG code at:
<http://172.16.31.10/IsbraSoftware/OMGMec.html>
This script only writes the partitions, but not launch OMGMec. You may need to:
$ parallel "java -cp ~/code/OMGMec TestOMGMec {} 4 > {}.out" ::: work/gf?????
Then followed by omgparse() to get the gene lists.
"""
p = OptionParser(omg.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
weightsfiles = args
groupfile = group(weightsfiles + ["--outfile=groups"])
weights = get_weights(weightsfiles)
info = get_info()
fp = open(groupfile)
work = "work"
mkdir(work)
for i, row in enumerate(fp):
gf = op.join(work, "gf{0:05d}".format(i))
genes = row.rstrip().split(",")
fw = open(gf, "w")
contents = ""
npairs = 0
for gene in genes:
gene_pairs = weights[gene]
for a, b, c in gene_pairs:
if b not in genes:
continue
contents += "weight {0}".format(c) + '\n'
contents += info[a] + '\n'
contents += info[b] + '\n\n'
npairs += 1
header = "a group of genes :length ={0}".format(npairs)
print(header, file=fw)
print(contents, file=fw)
fw.close()
def geneinfo(bed, order, genomeidx, ploidy):
bedfile = bed.filename
p = bedfile.split(".")[0]
idx = genomeidx[p]
pd = ploidy[p]
infofile = p + ".info"
if not need_update(bedfile, infofile):
return infofile
fwinfo = open(infofile, "w")
for s in bed:
chr = "".join(x for x in s.seqid if x in string.digits)
try:
chr = int(chr)
except ValueError:
chr = "0"
print("\t".join(str(x) for x in \
(s.accn, chr, s.start, s.end, s.strand, idx, pd)), file=fwinfo)
fwinfo.close()
logging.debug("Update info file `{0}`.".format(infofile))
return infofile
def omgprepare(args):
"""
%prog omgprepare ploidy anchorsfile blastfile
Prepare to run Sankoff's OMG algorithm to get orthologs.
"""
from jcvi.formats.blast import cscore
from jcvi.formats.base import DictFile
p = OptionParser(omgprepare.__doc__)
p.add_option("--norbh", action="store_true",
help="Disable RBH hits [default: %default]")
p.add_option("--pctid", default=0, type="int",
help="Percent id cutoff for RBH hits [default: %default]")
p.add_option("--cscore", default=90, type="int",
help="C-score cutoff for RBH hits [default: %default]")
p.set_stripnames()
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
ploidy, anchorfile, blastfile = args
norbh = opts.norbh
pctid = opts.pctid
cs = opts.cscore
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts)
fp = open(ploidy)
genomeidx = dict((x.split()[0], i) for i, x in enumerate(fp))
fp.close()
ploidy = DictFile(ploidy)
geneinfo(qbed, qorder, genomeidx, ploidy)
geneinfo(sbed, sorder, genomeidx, ploidy)
pf = blastfile.rsplit(".", 1)[0]
cscorefile = pf + ".cscore"
cscore([blastfile, "-o", cscorefile, "--cutoff=0", "--pct"])
ac = AnchorFile(anchorfile)
pairs = set((a, b) for a, b, i in ac.iter_pairs())
logging.debug("Imported {0} pairs from `{1}`.".format(len(pairs), anchorfile))
weightsfile = pf + ".weights"
fp = open(cscorefile)
fw = open(weightsfile, "w")
npairs = 0
for row in fp:
a, b, c, pct = row.split()
c, pct = float(c), float(pct)
c = int(c * 100)
if (a, b) not in pairs:
if norbh:
continue
if c < cs:
continue
if pct < pctid:
continue
c /= 10 # This severely penalizes RBH against synteny
print("\t".join((a, b, str(c))), file=fw)
npairs += 1
fw.close()
logging.debug("Write {0} pairs to `{1}`.".format(npairs, weightsfile))
def make_ortholog(blocksfile, rbhfile, orthofile):
from jcvi.formats.base import DictFile
# Generate mapping both ways
adict = DictFile(rbhfile)
bdict = DictFile(rbhfile, keypos=1, valuepos=0)
adict.update(bdict)
fp = open(blocksfile)
fw = open(orthofile, "w")
nrecruited = 0
for row in fp:
a, b = row.split()
if b == '.':
if a in adict:
b = adict[a]
nrecruited += 1
b += "'"
print("\t".join((a, b)), file=fw)
logging.debug("Recruited {0} pairs from RBH.".format(nrecruited))
fp.close()
fw.close()
def ortholog(args):
"""
%prog ortholog species_a species_b
Run a sensitive pipeline to find orthologs between two species a and b.
The pipeline runs LAST and generate .lifted.anchors.
`--full` mode would assume 1-to-1 quota synteny blocks as the backbone of
such predictions. Extra orthologs will be recruited from reciprocal best
match (RBH).
"""
from jcvi.apps.align import last as last_main
from jcvi.compara.blastfilter import main as blastfilter_main
from jcvi.compara.quota import main as quota_main
from jcvi.compara.synteny import scan, mcscan, liftover
from jcvi.formats.blast import cscore, filter
p = OptionParser(ortholog.__doc__)
p.add_option("--dbtype", default="nucl",
choices=("nucl", "prot"),
help="Molecule type of subject database")
p.add_option("--full", default=False, action="store_true",
help="Run in full mode, including blocks and RBH")
p.add_option("--cscore", default=0.7, type="float",
help="C-score cutoff [default: %default]")
p.add_option("--dist", default=20, type="int",
help="Extent of flanking regions to search")
p.add_option("--quota", help="Quota align parameter")
p.add_option("--no_strip_names", default=False, action="store_true",
help="Do not strip alternative splicing "
"(e.g. At5g06540.1 -> At5g06540)")
p.set_dotplot_opts()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
a, b = args
dbtype = opts.dbtype
suffix = ".cds" if dbtype == "nucl" else ".pep"
abed, afasta = a + ".bed", a + suffix
bbed, bfasta = b + ".bed", b + suffix
ccscore = opts.cscore
quota = opts.quota
dist = "--dist={0}".format(opts.dist)
aprefix = afasta.split(".")[0]
bprefix = bfasta.split(".")[0]
pprefix = ".".join((aprefix, bprefix))
qprefix = ".".join((bprefix, aprefix))
last = pprefix + ".last"
if need_update((afasta, bfasta), last):
last_main([bfasta, afasta], dbtype)
if a == b:
lastself = last + ".P98L0.inverse"
if need_update(last, lastself):
filter([last, "--hitlen=0", "--pctid=98", "--inverse", "--noself"])
last = lastself
filtered_last = last + ".filtered"
if need_update(last, filtered_last):
if opts.no_strip_names:
blastfilter_main([last, "--cscore={0}".format(ccscore), "--no_strip_names"])
else:
blastfilter_main([last, "--cscore={0}".format(ccscore)])
anchors = pprefix + ".anchors"
lifted_anchors = pprefix + ".lifted.anchors"
pdf = pprefix + ".pdf"
if not opts.full:
if need_update(filtered_last, lifted_anchors):
if opts.no_strip_names:
scan([filtered_last, anchors, dist,
"--liftover={0}".format(last), "--no_strip_names"])
else:
scan([filtered_last, anchors, dist,
"--liftover={0}".format(last)])
if quota:
quota_main([lifted_anchors,
"--quota={0}".format(quota), "--screen"])
if need_update(anchors, pdf):
from jcvi.graphics.dotplot import dotplot_main
dargs = [anchors]
if opts.nostdpf:
dargs += ["--nostdpf"]
if opts.nochpf:
dargs += ["--nochpf"]
if opts.skipempty:
dargs += ["--skipempty"]
if opts.genomenames:
dargs += ["--genomenames", opts.genomenames]
dotplot_main(dargs)
return
if need_update(filtered_last, anchors):
if opts.no_strip_names:
scan([filtered_last, anchors, dist, "--no_strip_names"])
else:
scan([filtered_last, anchors, dist])
ooanchors = pprefix + ".1x1.anchors"
if need_update(anchors, ooanchors):
quota_main([anchors, "--quota=1:1", "--screen"])
lifted_anchors = pprefix + ".1x1.lifted.anchors"
if need_update((last, ooanchors), lifted_anchors):
if opts.no_strip_names:
liftover([last, ooanchors, dist, "--no_strip_names"])
else:
liftover([last, ooanchors, dist])
pblocks = pprefix + ".1x1.blocks"
qblocks = qprefix + ".1x1.blocks"
if need_update(lifted_anchors, [pblocks, qblocks]):
mcscan([abed, lifted_anchors, "--iter=1", "-o", pblocks])
mcscan([bbed, lifted_anchors, "--iter=1", "-o", qblocks])
rbh = pprefix + ".rbh"
if need_update(last, rbh):
cscore([last, "-o", rbh])
portho = pprefix + ".ortholog"
qortho = qprefix + ".ortholog"
if need_update([pblocks, qblocks, rbh], [portho, qortho]):
make_ortholog(pblocks, rbh, portho)
make_ortholog(qblocks, rbh, qortho)
def tandem_main(blast_file, cds_file, bed_file, N=3, P=50, is_self=True, \
evalue=.01, strip_name=".", ofile=sys.stderr, genefam=False):
if genefam:
N = 1e5
# get the sizes for the CDS first
f = Fasta(cds_file)
sizes = dict(f.itersizes())
# retrieve the locations
bed = Bed(bed_file)
order = bed.order
if is_self:
# filter the blast file
g = Grouper()
fp = open(blast_file)
for row in fp:
b = BlastLine(row)
query_len = sizes[b.query]
subject_len = sizes[b.subject]
if b.hitlen < min(query_len, subject_len)*P/100.:
continue
query = gene_name(b.query, strip_name)
subject = gene_name(b.subject, strip_name)
qi, q = order[query]
si, s = order[subject]
if abs(qi - si) <= N and b.evalue <= evalue:
if genefam:
g.join(query, subject)
elif q.seqid == s.seqid:
g.join(query, subject)
else:
homologs = Grouper()
fp = open(blast_file)
for row in fp:
b = BlastLine(row)
query_len = sizes[b.query]
subject_len = sizes[b.subject]
if b.hitlen < min(query_len, subject_len)*P/100.:
continue
if b.evalue > evalue:
continue
query = gene_name(b.query, strip_name)
subject = gene_name(b.subject, strip_name)
homologs.join(query, subject)
if genefam:
g = homologs
else:
g = Grouper()
for i, atom in enumerate(bed):
for x in range(1, N+1):
if all([i-x >= 0, bed[i-x].seqid == atom.seqid, \
homologs.joined(bed[i-x].accn, atom.accn)]):
leni = sizes[bed[i].accn]
lenx = sizes[bed[i-x].accn]
if abs(leni - lenx) > max(leni, lenx)*(1-P/100.):
continue
g.join(bed[i-x].accn, atom.accn)
# dump the grouper
fw = must_open(ofile, "w")
ngenes, nfamilies = 0, 0
families = []
for group in sorted(g):
if len(group) >= 2:
print(",".join(sorted(group)), file=fw)
ngenes += len(group)
nfamilies += 1
families.append(sorted(group))
longest_family = max(families, key=lambda x: len(x))
# generate reports
print("Proximal paralogues (dist=%d):" % N, file=sys.stderr)
print("Total %d genes in %d families" % (ngenes, nfamilies), file=sys.stderr)
print("Longest families (%d): %s" % (len(longest_family),
",".join(longest_family)), file=sys.stderr)
return families
def tandem(args):
"""
%prog tandem blast_file cds_file bed_file [options]
Find tandem gene clusters that are separated by N genes, based on filtered
blast_file by enforcing alignments between any two genes at least 50%
(or user specified value) of either gene.
pep_file can also be used in same manner.
"""
p = OptionParser(tandem.__doc__)
p.add_option("--tandem_Nmax", dest="tandem_Nmax", type="int", default=3,
help="merge tandem genes within distance [default: %default]")
p.add_option("--percent_overlap", type="int", default=50,
help="tandem genes have >=x% aligned sequence, x=0-100 \
[default: %default]")
p.set_align(evalue=.01)
p.add_option("--not_self", default=False, action="store_true",
help="provided is not self blast file [default: %default]")
p.add_option("--strip_gene_name", dest="sep", type="string", default=".",
help="strip alternative splicing. Use None for no stripping. \
[default: %default]")
p.add_option("--genefamily", dest="genefam", action="store_true",
help="compile gene families based on similarity [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
blast_file, cds_file, bed_file = args
N = opts.tandem_Nmax
P = opts.percent_overlap
is_self = not opts.not_self
sep = opts.sep
ofile = opts.outfile
tandem_main(blast_file, cds_file, bed_file, N=N, P=P, is_self=is_self, \
evalue=opts.evalue, strip_name=sep, ofile=ofile, genefam=opts.genefam)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1796817
|
import os
from biicode.client.dev.cmake.cmaketool import CMakeTool
from biicode.common.model.blob import Blob
from biicode.common.utils.file_utils import save_blob_if_modified, save, load_resource
from biicode.client.dev.cpp import DEV_CPP_DIR
default_cmake = """
ADD_BII_TARGETS()
###############################################################################
# HELP #
###############################################################################
#
# This CMakeLists.txt file helps defining your block builds
# To learn more visit http://docs.biicode.com/c++.html
#
# To include published cmake scripts:
# 1. INCLUDE(user/block/myrecipe) # include myrecipe.cmake from remote user/block
# 2. Remember to execute bii find
# Example:
# INCLUDE(biicode/cmake/tools) # Include tools.cmake file from cmake block from "biicode" user
# ACTIVATE_CPP11(INTERFACE ${BII_BLOCK_TARGET})
#
# Useful variables:
# To be modified BEFORE the call to ADD_BII_TARGETS()
# ${BII_LIB_SRC} File list to create the library
#
# To be modified AFTER the call to ADD_BII_TARGETS()
# ${BII_BLOCK_TARGET} Interface (no files) target for convenient configuration of all
# targets in this block, as the rest of targets always depend on it
# has name in the form "user_block_interface"
# ${BII_LIB_TARGET} Target library name, usually in the form "user_block". May not exist
# if BII_LIB_SRC is empty
# ${BII_BLOCK_TARGETS} List of all targets defined in this block
# ${BII_BLOCK_EXES} List of executables targets defined in this block
# ${BII_exe_name_TARGET}: Executable target (e.g. ${BII_main_TARGET}. You can also use
# directly the name of the executable target (e.g. user_block_main)
#
# > EXAMPLE: Add include directories to all targets of this block
#
# TARGET_INCLUDE_DIRECTORIES(${BII_BLOCK_TARGET} INTERFACE myincludedir)
#
# > EXAMPLE: Link with pthread:
#
# TARGET_LINK_LIBRARIES(${BII_BLOCK_TARGET} INTERFACE pthread)
# or link against library:
# TARGET_LINK_LIBRARIES(${BII_LIB_TARGET} PUBLIC pthread)
#
# NOTE: This can be also done adding pthread to ${BII_LIB_DEPS}
# BEFORE calling ADD_BIICODE_TARGETS()
#
# > EXAMPLE: how to activate C++11
#
# IF(APPLE)
# TARGET_COMPILE_OPTIONS(${BII_BLOCK_TARGET} INTERFACE "-std=c++11 -stdlib=libc++")
# ELSEIF (WIN32 OR UNIX)
# TARGET_COMPILE_OPTIONS(${BII_BLOCK_TARGET} INTERFACE "-std=c++11")
# ENDIF(APPLE)
#
# > EXAMPLE: Set properties to target
#
# SET_TARGET_PROPERTIES(${BII_BLOCK_TARGET} PROPERTIES COMPILE_DEFINITIONS "IOV_MAX=255")
#
"""
class CPPCMakeTool(CMakeTool):
def _get_project_cmakelists(self, block_targets):
cmakelists_template = load_resource(DEV_CPP_DIR, "cmake/CMakeLists.txt")
blocks_include = []
blocks_prebuild_step = []
root_block = self.bii_paths.root_block
CMAKELISTS_INCLUDES = "BII_INCLUDE_BLOCK({path_block_name})"
CMAKELISTS_PREBUILD = "BII_PREBUILD_STEP({path_block_name})"
for block_name, block_target in block_targets.iteritems():
if block_target.is_dep:
rel_block_path = self.bii_paths.deps_relative
else:
rel_block_path = self.bii_paths.blocks_relative
if block_name == root_block:
block_path = root_block
else:
block_path = os.path.join(rel_block_path, block_target.block_name).replace('\\',
'/')
blocks_include.append(CMAKELISTS_INCLUDES.format(path_block_name=block_path))
blocks_prebuild_step.append(CMAKELISTS_PREBUILD.format(path_block_name=block_path))
biicode_env_dir = self.bii_paths.user_bii_home.replace('\\', '/')
return cmakelists_template.format(project_name=self.bii_paths.project_name,
prebuild_steps="\n".join(blocks_prebuild_step),
include_blocks="\n".join(blocks_include),
biicode_env_dir=biicode_env_dir,
blocks=self.bii_paths.blocks_relative,
deps=self.bii_paths.deps_relative,
cmake=self.bii_paths.cmake_relative,
bin=self.bii_paths.bin_relative,
lib=self.bii_paths.lib_relative,
project_root=self.bii_paths.project_root.replace('\\',
'/'))
def _create_vars_cmake_files(self, block_targets):
b = False
for block_target in block_targets.itervalues():
bii_vars_path = os.path.join(self.bii_paths.cmake, block_target.filename)
modified = save_blob_if_modified(bii_vars_path, Blob(block_target.dumps()))
b = b or modified
return b
def _create_default_blocks_cmakelists(self, block_targets):
# create default cmakelists
project_block = self.bii_paths.root_block
for block_name, block_target in block_targets.iteritems():
path_folder = self.bii_paths.deps if block_target.is_dep else self.bii_paths.blocks
if block_name == project_block:
cmakelists_path = os.path.join(self.bii_paths.project_root, "CMakeLists.txt")
else:
cmakelists_path = os.path.join(path_folder, block_name, "CMakeLists.txt")
cmakelists_path = cmakelists_path.replace('\\', '/') # replace in win32
if not os.path.exists(cmakelists_path):
save(cmakelists_path, default_cmake)
def _create_cmakelists(self, block_targets):
'''creates 3 files:
CMakeLists.txt, only if not existing, including the other two files
bii_targets.cmake, the file containing the ADD_LIBRARY and ADD_EXECUTABLES, with the
configuration of flags per target and files
bii_vars.cmake, is a file with variables declarations that are afterwards used in
bii_targets.cmake'''
cmakelists_path = os.path.join(self.bii_paths.cmake, "CMakeLists.txt")
bii_macros_path = os.path.join(self.bii_paths.cmake, 'biicode.cmake')
bii_macros_content = load_resource(DEV_CPP_DIR, "cmake/biicode.cmake")
self._create_default_blocks_cmakelists(block_targets)
# following is a virtual call, may call child class method
cmakelists_content = self._get_project_cmakelists(block_targets)
a = save_blob_if_modified(cmakelists_path, Blob(cmakelists_content))
b = self._create_vars_cmake_files(block_targets)
c = save_blob_if_modified(bii_macros_path, Blob(bii_macros_content))
return a or b or c
|
StarcoderdataPython
|
3319841
|
<gh_stars>1-10
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is FasterRCNN network."""
from zeus.common import ClassFactory, ClassType
from zeus.modules.module import Module
@ClassFactory.register(ClassType.NETWORK)
class GCN(Module):
"""Create ResNet Network."""
def __init__(self, blocks=[[1, 32, 64]], kernel_size=4, gru_layers=1, gcn_layers=1, keep_prob=1,
temporal_attention=False, spatial_attention=False, adjacency_matrix=None):
super().__init__()
self.kernel_size = kernel_size
self.blocks = blocks
self.gru_layers = gru_layers
self.gcn_layers = gcn_layers
self.keep_prob = keep_prob
self.temporal_attention = temporal_attention
self.spatial_attention = spatial_attention
self.adjacency_matrix = adjacency_matrix
self.graph = 'spatial'
self.use_gcn = True
def call(self, inputs):
"""Override call function."""
import tensorflow as tf
from zeus.networks.tensorflow.gcn.layers import GCN_GRU, output_layer_gru
x, spatial_mx, temporal_mx = inputs[0], inputs[1][0], inputs[2][0]
x = tf.cast(x, tf.float32)
spatial_mx = tf.cast(spatial_mx, tf.float32)
approx = self.update_with_approximation(spatial_mx, temporal_mx)
tf.add_to_collection(name='graph_kernel', value=approx)
for i, channels in enumerate(self.blocks):
x = GCN_GRU(x, self.kernel_size, channels, self.gru_layers, self.gcn_layers, self.keep_prob,
self.temporal_attention, self.spatial_attention)
return output_layer_gru(x, 'output_layer')
def update_with_approximation(self, spatial_mx, temporal_mx):
"""Update with approximation.
:param Ws: Spatial proximity adjacency matrix.
:param Wt: Functional similarity adjacency matrix.
:param W: Adjacency matrix (in the other cases: spatial OR temporal)
:param n_route: Number of base stations.
"""
from zeus.networks.tensorflow.gcn.layers import scaled_laplacian_tensor, cheb_poly_approx_tensor
laplacian = scaled_laplacian_tensor(spatial_mx)
approx = cheb_poly_approx_tensor(laplacian, self.kernel_size)
return approx
|
StarcoderdataPython
|
15146
|
<gh_stars>100-1000
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qf_lib.backtesting.contract.contract import Contract
from qf_lib.backtesting.order.execution_style import ExecutionStyle
from qf_lib.backtesting.order.time_in_force import TimeInForce
class Order(object):
"""
Order generated by a strategy, then processed by PositionSizer.
Finally executed by ExecutionHandler.
"""
def __init__(self, contract: Contract, quantity: int, execution_style: ExecutionStyle,
time_in_force: TimeInForce, order_state=""):
"""
This __init__ shouldn't be used anywhere beyond this module. Use OrderFactory for creating Order objects.
"""
self.id = None # type:int
self.contract = contract
self.quantity = quantity
self.time_in_force = time_in_force
self.execution_style = execution_style
self.order_state = order_state
def __str__(self):
return '\nOrder:\n' \
'\tid: {}\n' \
'\tcontract: {}\n' \
'\tquantity: {}\n' \
'\ttif: {}\n' \
'\texecution_style: {}\n' \
'\torder_state: {}'.format(self.id, str(self.contract), self.quantity, str(self.time_in_force),
self.execution_style, self.order_state)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Order):
return False
# one Order has id and another hasn't
if (self.id is None) != (other.id is None):
return False
if self.id is not None and other.id == self.id:
return True
# when both ids are none -> compare the values
return (self.contract, self.quantity, self.time_in_force, self.execution_style) == \
(other.contract, other.quantity, other.time_in_force, other.execution_style)
def __hash__(self):
return hash((self.contract, self.quantity, self.time_in_force, self.execution_style))
|
StarcoderdataPython
|
183557
|
# simple benchmark
from generator import *
import time
# number of simulated photons
N = int(5E7)
t1 = time.time()
spectrum = Etalon()
spectrograph = MaroonX()
generate_rv_series(spectrograph, spectrum, [0.], photons_per_spectrum=N)
t2 = time.time()
print("Total time for tracing: {:.2f} s".format(t2-t1))
print("Simulating {:.2E} photons per second".format(N/(t2-t1)))
|
StarcoderdataPython
|
3335053
|
"""An implementation of SegNet (and Bayesian alternative)."""
from keras.applications.vgg16 import VGG16
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import Dropout
from keras.layers import Input
from keras.layers import Lambda
from keras.models import Model
from keras.optimizers import SGD
from keras.regularizers import l2
from .layers import LocalContrastNormalization
from .layers import MemorizedMaxPooling2D
from .layers import MemorizedUpsampling2D
from .losses import build_categorical_crossentropy
from .metrics import build_categorical_accuracy
# static arguments used for all convolution layers in SegNet
_CONV = dict(
kernel_initializer='he_uniform',
kernel_regularizer=l2(5e-4),
)
def _conv_bn_relu(x, num_filters: int):
"""
Append a convolution + batch normalization + ReLu block to an input tensor.
Args:
x: the input tensor to append this dense block to
num_filters: the number of filters in the convolutional layer
Returns:
a tensor with convolution + batch normalization + ReLu block added
"""
x = Conv2D(num_filters, kernel_size=(3, 3), padding='same', **_CONV)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def _encode(x, nums_filters: list):
"""
Append a encoder block with a given size and number of filters.
Args:
x: the input tensor to append this encoder block to
num_filters: a list of the number of filters for each block
Returns:
a tuple of:
- a tensor with convolution blocks followed by max pooling
- the pooling layer to get indexes from for up-sampling
"""
# loop over the filters list to apply convolution + BN + ReLu blocks
for num_filters in nums_filters:
x = _conv_bn_relu(x, num_filters)
# create a max pooling layer to keep indexes from for up-sampling later
pool = MemorizedMaxPooling2D(pool_size=(2, 2), strides=(2, 2))
# pass the block output through the special pooling layer
x = pool(x)
# return the output tensor and reference to pooling layer to get indexes
return x, pool
def _decode(x, pool: MemorizedMaxPooling2D, nums_filters: list):
"""
Append a decoder block with a given size and number of filters.
Args:
x: the input tensor to append this decoder block to
pool: the corresponding memorized pooling layer to reference indexes
num_filters: a list of the number of filters for each block
Returns:
a tensor with up-sampling followed by convolution blocks
"""
# up-sample using the max pooling indexes
x = MemorizedUpsampling2D(pool=pool)(x)
# loop over the filters list to apply convolution + BN + ReLu blocks
for num_filters in nums_filters:
x = _conv_bn_relu(x, num_filters)
return x
def _classify(x, num_classes: int):
"""
Add a Softmax classification block to an input CNN.
Args:
x: the input tensor to append this classification block to (CNN)
num_classes: the number of classes to predict with Softmax
Returns:
a tensor with dense convolution followed by Softmax activation
"""
# dense convolution (1 x 1) to filter logits for each class
x = Conv2D(num_classes, kernel_size=(1, 1), padding='valid', **_CONV)(x)
# Softmax activation to convert the logits to probability vectors
x = Activation('softmax')(x)
return x
def _transfer_vgg16_encoder(model: Model) -> None:
"""
Transfer trained VGG16 weights (ImageNet) to a SegNet encoder.
Args:
model: the SegNet model to transfer encoder weights to
Returns:
None
"""
# load the trained VGG16 model using ImageNet weights
vgg16 = VGG16(include_top=False)
# extract all the convolutional layers (encoder layers) from VGG16
vgg16_conv = [layer for layer in vgg16.layers if isinstance(layer, Conv2D)]
# extract all convolutional layers from SegNet
model_conv = [layer for layer in model.layers if isinstance(layer, Conv2D)]
# iterate over the VGG16 layers to replace the SegNet encoder weights
for idx, layer in enumerate(vgg16_conv):
model_conv[idx].set_weights(layer.get_weights())
def segnet(image_shape: tuple, num_classes: int,
class_weights=None,
lcn: bool=True,
dropout_rate: float=None,
optimizer=SGD(lr=0.1, momentum=0.9),
pretrain_encoder: bool=True,
) -> Model:
"""
Build a SegNet model for the given image shape.
Args:
image_shape: the image shape to create the model for
num_classes: the number of classes to segment for (e.g. c)
class_weights: the weights for each class
lcn: whether to use local contrast normalization on inputs
dropout_rate: the dropout rate to use for permanent dropout
optimizer: the optimizer for training the network
pretrain_encoder: whether to initialize the encoder from VGG16
Returns:
a compiled model of SegNet
"""
# ensure the image shape is legal for the architecture
div = int(2**5)
for dim in image_shape[:-1]:
# raise error if the dimension doesn't evenly divide
if dim % div:
msg = 'dimension ({}) must be divisible by {}'.format(dim, div)
raise ValueError(msg)
# the input block of the network
inputs = Input(image_shape, name='SegNet_input')
# assume 8-bit inputs and convert to floats in [0,1]
x = Lambda(lambda x: x / 255.0, name='pixel_norm')(inputs)
# apply contrast normalization if set
if lcn:
x = LocalContrastNormalization()(x)
# if no dropout rate, make the lambda return the input
if dropout_rate is None:
dropout = lambda x: x
# if there is a dropout rate, make lambda return the output from Dropout
else:
dropout = lambda x: Dropout(dropout_rate)(x, training=True)
# encoder
x, pool_1 = _encode(x, 2 * [64])
x, pool_2 = _encode(x, 2 * [128])
x, pool_3 = _encode(x, 3 * [256])
x = dropout(x)
x, pool_4 = _encode(x, 3 * [512])
x = dropout(x)
x, pool_5 = _encode(x, 3 * [512])
x = dropout(x)
# decoder
x = _decode(x, pool_5, 3 * [512])
x = dropout(x)
x = _decode(x, pool_4, [512, 512, 256])
x = dropout(x)
x = _decode(x, pool_3, [256, 256, 128])
x = dropout(x)
x = _decode(x, pool_2, [128, 64])
x = _decode(x, pool_1, [64])
# classifier
x = _classify(x, num_classes)
# compile the model
model = Model(inputs=[inputs], outputs=[x], name='SegNet')
model.compile(
optimizer=optimizer,
loss=build_categorical_crossentropy(class_weights),
metrics=[build_categorical_accuracy(weights=class_weights)],
)
# transfer weights from VGG16
if pretrain_encoder:
_transfer_vgg16_encoder(model)
return model
# explicitly define the outward facing API of this module
__all__ = [segnet.__name__]
|
StarcoderdataPython
|
32666
|
import pytest
@pytest.mark.parametrize("cli_options", [
('-k', 'notestdeselect',),
])
def test_autoexecute_yml_keywords_skipped(testdir, cli_options):
yml_file = testdir.makefile(".yml", """
---
markers:
- marker1
- marker2
---
- provider: python
type: assert
expression: "1"
""")
assert yml_file.basename.startswith('test_')
assert yml_file.basename.endswith('.yml')
result = testdir.runpytest(*cli_options)
result.assert_outcomes(passed=0, failed=0, error=0)
# Deselected, not skipped. See #3427
# result.assert_outcomes(skipped=1)
|
StarcoderdataPython
|
3392125
|
import itertools
import logging
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from sklearn import metrics
from torch.utils.data import DataLoader, Dataset
from torchvision import models
from model import CompatModel
from polyvore_dataset import CategoryDataset, TripletDataset
from Resnet_18 import resnet18
# Hyperparameters
img_size = 112
emb_size = 64
device = torch.device("cuda")
# Helper functions
def test_compatibility_auc(test_auc_dataset, embeddingnet):
""" Compute AUC of classifying compatibile and incompatible outfits
"""
scores = []
targets = []
for i in range(len(test_auc_dataset)):
print("#{}/{}\r".format(i, len(test_auc_dataset)), end="", flush=True)
conditions = torch.tensor(
[
[-1, 0, 1, 2, 3],
[0, -1, 4, 5, 6],
[1, 4, -1, 7, 8],
[2, 5, 7, -1, 9],
[3, 6, 8, 9, -1],
],
requires_grad=False,
).to(device)
images, names, offsets, set_id, labels, is_compat = test_auc_dataset[i]
images = images.to(device)
labels = list(map(lambda e: 0 if "mean" in e else 1, labels))
outfit_score = calc_outfit_score(images, labels, conditions, embeddingnet)
scores.append(outfit_score)
targets.append(is_compat)
targets, scores = np.array(targets), np.array(scores)
auc = metrics.roc_auc_score(1 - targets, scores)
print()
return auc
def test_fitb_quesitons(test_fitb_dataset, embeddingnet):
""" Compute accuracy of correctly answering the fill-in-the-blank questions
"""
is_correct = []
for i in range(len(test_fitb_dataset)):
print("#{}/{}\r".format(i, len(test_fitb_dataset)), end="", flush=True)
outfit_scores = []
conditions = torch.tensor(
[
[-1, 0, 1, 2, 3],
[0, -1, 4, 5, 6],
[1, 4, -1, 7, 8],
[2, 5, 7, -1, 9],
[3, 6, 8, 9, -1],
],
requires_grad=False,
).to(device)
items, labels, question_part, question_id, options, option_labels = test_fitb_dataset.get_fitb_quesiton(i)
question_part = {
"upper": 0,
"bottom": 1,
"shoe": 2,
"bag": 3,
"accessory": 4,
}.get(question_part)
images = items.to(device)
labels = list(map(lambda e: 0 if "mean" in e else 1, labels))
outfit_score = calc_outfit_score(images, labels, conditions, embeddingnet)
outfit_scores.append(outfit_score)
# Calculate distance for each options
for option in options:
images[question_part] = option
outfit_score = calc_outfit_score(images, labels, conditions, embeddingnet)
outfit_scores.append(outfit_score)
# The original outfit should have lowest distance
if min(outfit_scores) == outfit_scores[0]:
is_correct.append(True)
else:
is_correct.append(False)
print()
return sum(is_correct) / len(is_correct)
def calc_outfit_score(images, labels, conditions, embeddingnet):
"""Calculate outfit score by calculate mean of all pair distance n among this outfit.
Args:
images: [5, 3, 224, 224] torch.FloatTensor
labels: list of 5 element where 0 for mean_img, 1 for original image
conditions: [5, 5] torch.tensor store condition for reach combination
embeddingnet: A metric network get embedding for image
"""
inputs = []
conds = []
mask = []
outfit_score = 0.0
for a, b in itertools.combinations(range(0, 5), 2):
if labels[a] == 0 or labels[b] == 0:
mask.append(0)
else:
mask.append(1)
c = conditions[a][b]
inputs.append(images[a])
inputs.append(images[b])
conds.append(c)
conds.append(c)
inputs = torch.stack(inputs)
conds = torch.stack(conds)
with torch.no_grad():
embs = embeddingnet(inputs, conds)[0]
embs = embs.reshape(10, 2, -1)
embs = F.normalize(embs, dim=2)
dist = F.pairwise_distance(embs[:, 0, :], embs[:, 1, :])
mask = torch.tensor(mask).float().to(device)
outfit_score = torch.sum(dist * mask) / mask.sum()
return outfit_score.item()
def main():
# Dataloader
transform = torchvision.transforms.Compose(
[
torchvision.transforms.Scale((img_size, img_size)),
torchvision.transforms.CenterCrop(112),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
train_dataset = TripletDataset(
root_dir="../../data/images/",
data_dir="../../data/",
transform=transform
)
test_auc_dataset = CategoryDataset(
root_dir="../../data/images/",
data_dir="../../data/",
transform=transform,
use_mean_img=True,
data_file="test_no_dup_with_category_3more_name.json",
neg_samples=True,
)
# Model
tnet = CompatModel(
emb_size,
n_conditions=len(train_dataset.conditions) // 2,
learnedmask=True,
prein=False,
)
tnet.load_state_dict(torch.load("./csn_model_best.pth"))
tnet = tnet.to(device)
tnet.eval()
embeddingnet = tnet.embeddingnet
# Test
auc = test_compatibility_auc(test_auc_dataset, embeddingnet)
print("AUC: {:.4f}".format(auc))
fitb_accuracy = test_fitb_quesitons(test_auc_dataset, embeddingnet)
print("Fitb Accuracy: {:.4f}".format(fitb_accuracy))
# AUC: 0.8413 ACC: 0.5656
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4812740
|
""" Common tools """
__version__ = "2.0.0"
from .config import Config, parse_config # noqa
from .errors import Error, InvalidConfig # noqa
from .executor import Task, run_sequence, ExecutorFailed # noqa
from .groups import GroupList, Group # noqa
from .groups import GroupNotFound, UnknownElement as UnknownGroupElement # noqa
from .repo import Repo, Remote, Copy # noqa
from .manifest import Manifest # noqa
from .manifest import load as load_manifest # noqa
from .workspace import Workspace # noqa
|
StarcoderdataPython
|
41401
|
<reponame>prachir1501/NeuralDater<filename>helper.py<gh_stars>10-100
import numpy as np, sys, unicodedata, requests, os, random, pdb, requests, json, gensim
import matplotlib.pyplot as plt, uuid, time, argparse, pickle, operator
import logging, logging.config, itertools, pathlib
import scipy.sparse as sp
from collections import defaultdict as ddict
from random import randint
from pprint import pprint
from sklearn.metrics import precision_recall_fscore_support
np.set_printoptions(precision=4)
def checkFile(filename):
"""
Check whether file is present or not
Parameters
----------
filename: Path of the file to check
Returns
-------
"""
return pathlib.Path(filename).is_file()
def getEmbeddings(embed_loc, wrd_list, embed_dims):
"""
Gives embedding for each word in wrd_list
Parameters
----------
embed_loc: Path to embedding file
wrd_list: List of words for which embedding is required
embed_dims: Dimension of the embedding
Returns
-------
embed_matrix: (len(wrd_list) x embed_dims) matrix containing embedding for each word in wrd_list in the same order
"""
embed_list = []
model = gensim.models.KeyedVectors.load_word2vec_format(embed_loc, binary=False)
for wrd in wrd_list:
if wrd in model.vocab: embed_list.append(model.word_vec(wrd))
else: embed_list.append(np.random.randn(embed_dims))
return np.array(embed_list, dtype=np.float32)
def set_gpu(gpus):
"""
Sets the GPU to be used for the run
Parameters
----------
gpus: List of GPUs to be used for the run
Returns
-------
"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpus
def debug_nn(res_list, feed_dict):
"""
Function for debugging Tensorflow model
Parameters
----------
res_list: List of tensors/variables to view
feed_dict: Feed dict required for getting values
Returns
-------
Returns the list of values of given tensors/variables after execution
"""
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
summ_writer = tf.summary.FileWriter("tf_board/debug_nn", sess.graph)
res = sess.run(res_list, feed_dict = feed_dict)
pdb.set_trace()
def get_logger(name, log_dir, config_dir):
"""
Creates a logger object
Parameters
----------
name: Name of the logger file
log_dir: Directory where logger file needs to be stored
config_dir: Directory from where log_config.json needs to be read
Returns
-------
A logger object which writes to both file and stdout
"""
config_dict = json.load(open( config_dir + 'log_config.json'))
config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')
logging.config.dictConfig(config_dict)
logger = logging.getLogger(name)
std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logging.Formatter(std_out_format))
logger.addHandler(consoleHandler)
return logger
def partition(inp_list, n):
"""
Paritions a given list into chunks of size n
Parameters
----------
inp_list: List to be splittted
n: Number of equal partitions needed
Returns
-------
Splits inp_list into n equal chunks
"""
division = len(inp_list) / float(n)
return [ inp_list[int(round(division * i)): int(round(division * (i + 1)))] for i in range(n) ]
def getChunks(inp_list, chunk_size):
"""
Splits inp_list into lists of size chunk_size
Parameters
----------
inp_list: List to be splittted
chunk_size: Size of each chunk required
Returns
-------
chunks of the inp_list each of size chunk_size, last one can be smaller (leftout data)
"""
return [inp_list[x:x+chunk_size] for x in range(0, len(inp_list), chunk_size)]
def mergeList(list_of_list):
"""
Merges list of list into a list
Parameters
----------
list_of_list: List of list
Returns
-------
A single list (union of all given lists)
"""
return list(itertools.chain.from_iterable(list_of_list))
|
StarcoderdataPython
|
1667492
|
<filename>ddi_search_engine/Bio/config/FormatRegistry.py
# Copyright 2002 by <NAME>, <NAME>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# This is based on some older code by Andrew Dalke.
"""Implements a Registry to store Martel-type format expressions.
Classes:
FormatRegistry Holds Biopython formats in a dictionary-like interface.
FormatObject Describes a Biopython file format.
FormatGroup Describes a group of Biopython file formats.
"""
# Private Functions:
# _parses_file Return whether an expression can parse a file.
# _parses_string Return whether an expression can parse a string.
# _normalize_expression Turn an expression or path into an expression.
# _load_first_existing Return the first format that loads successfully.
# _load_expression Load a Martel expression.
# _load_object Load a Python object.
from Bio.config.Registry import *
from Bio.config import _support
class FormatRegistry(Registry):
"""This implements a dictionary-like interface to Biopython file
formats.
Methods:
find_builder Find a builder that converts from a format to an object.
find_writer Find a writer that can write an object to a format.
"""
def __init__(self, name, load_path=None,
builder_path="Bio.builders", writer_path="Bio.writers"):
Registry.__init__(self, name, load_path=load_path)
self._builder_path = builder_path
self._writer_path = writer_path
def normalize(self, name_or_format): # XXX appropriate?
if isinstance(name_or_format, type("")):
# It's a name
return self[name_or_format]
return name_or_format
def _build_parent_path(self, format, visited=None):
if visited is None:
visited = {}
if visited.has_key(format.name):
return []
format_list = [format]
for parent in format._parents:
format_list.extend(self._build_parent_path(parent, visited))
return format_list
def _build_child_path(self, format, visited=None):
if visited is None:
visited = {}
if visited.has_key(format.name):
return []
format_list = [format]
for child in getattr(format, 'objs', []):
format_list.extend(self._build_child_path(child, visited))
return format_list
def find_builder(self, from_format, to_io):
# The directory of the builders is organized according to:
# builders/io/format
basemodulename = "%s.%s" % (self._builder_path, to_io.abbrev)
# Search through the formats in the order of most specific to
# most general.
all_formats = self._build_parent_path(from_format)
for format in all_formats:
name = basemodulename + "." + format.abbrev
module = _support.safe_load_module(name)
if module is not None:
break
else:
raise TypeError("Cannot find builder for %r" % to_io.abbrev)
return module.make_builder()
def find_writer(self, from_io, to_format, outfile):
# The directory of the writers is organized according to:
# writers/io/format
basemodulename = "%s.%s" % (self._writer_path, from_io.abbrev)
# Search through the formats in the order of most general to
# most specific.
all_formats = self._build_child_path(to_format)
for format in all_formats:
name = basemodulename + "." + format.abbrev
module = _support.safe_load_module(name)
if module is not None:
break
else:
raise TypeError("Cannot find writer for %r" % from_io.abbrev)
return module.make_writer(outfile)
formats = FormatRegistry("formats", "Bio.formatdefs")
class FormatObject(RegisterableObject):
"""This object stores Biopython file formats and provides methods
to work on them.
Methods:
identify Identify the format at a URL.
identifyFile Identify the format of a file.
identifyString Identify the format of a string.
make_parser Make a parser that can parse the format.
make_iterator Make an iterator over files of this format.
"""
def __init__(self, name, expression, abbrev=None, doc=None,
filter=None, multirecord=1):
"""FormatObject(name, expression[, abbrev][, doc]
[, filter][, multirecord])
name is the name of the object, abbrev is an abbreviation for
the name, and doc is some documentation describing the object.
expression is a Martel.Expression that can parse this format.
filter is an optional Martel.Expression that can be used to
quickly determine whether some input is parseable by this
format.
multirecord is either 0/1 indicating whether this format can
be used to parse multiple records. By default, it is 1.
"""
import operator
RegisterableObject.__init__(self, name, abbrev, doc)
self.expression = _normalize_expression(expression)
self.filter = _normalize_expression(filter) or self.expression
self.filter = _support.make_cached_expression(self.filter)
self.multirecord = operator.truth(multirecord)
self._parser_cache = {}
self._iterator_cache = {}
self._parents = []
def identifyFile(self, infile, debug_level=0):
"""S.identifyFile(infile[, debug_level]) -> FormatObject or None"""
if _parses_file(self.filter, infile, debug_level):
return self
return None
def identifyString(self, s, debug_level=0):
"""S.identifyString(s[, debug_level]) -> FormatObject or None"""
if _parses_string(self.filter, s, debug_level):
return self
return None
def identify(self, source, debug_level=0):
"""S.identify(source[, debug_level]) -> FormatObject or None"""
source = ReseekFile.prepare_input_source(source)
f = source.getCharacterStream() or source.getByteStream()
return self.identifyFile(f, debug_level)
def make_parser(self, select_names=None, debug_level=0):
"""S.make_parser([select_names][, debug_level]) -> parser"""
if select_names is not None:
select_names = list(select_names)
select_names.sort()
key = tuple(select_names), debug_level
else:
key = None, debug_level
if not self._parser_cache.has_key(key):
import Martel
exp = self.expression
if select_names is not None:
exp = Martel.select_names(exp, select_names)
p = exp.make_parser(debug_level = debug_level)
self._parser_cache[key] = p
return self._parser_cache[key].copy()
def make_iterator(self, tag="record", select_names=None, debug_level=0):
"""S.make_iterator([tag][, select_names][, debug_level]) -> iterator"""
if select_names is not None:
select_names = list(select_names)
select_names.sort()
key = tuple(select_names), debug_level
else:
key = None, debug_level
if not self._iterator_cache.has_key(key):
import Martel
exp = self.expression
if select_names is not None:
exp = Martel.select_names(exp, select_names)
p = exp.make_iterator(tag, debug_level = debug_level)
self._iterator_cache[key] = p
return self._iterator_cache[key].copy()
class FormatGroup(RegisterableGroup):
"""This object holds a group of FormatObjects.
Methods:
identify Identify the format at a URL.
identifyFile Identify the format of a file.
identifyString Identify the format of a string.
"""
def __init__(self, name, abbrev=None, filter=None, multirecord=1):
"""FormatGroup(name[, abbrev][, filter][, multirecord])
name is the name of the object, abbrev is an abbreviation for
the name.
filter is an optional Martel.Expression that can be used to
quickly determine whether some input is parseable by this
group.
multirecord is either 0/1 indicating whether this format can
be used to parse multiple records. By default, it is 1.
"""
RegisterableGroup.__init__(self, name, abbrev, None)
self.filter = _normalize_expression(filter)
if filter is not None:
self.filter = _support.make_cached_expression(self.filter)
self.multirecord = multirecord
self._parents = []
def identifyFile(self, infile, debug_level=0):
"""S.identifyFile(infile[, debug_level]) -> FormatObject or None"""
# See if the filter test weeds things out
if self.filter:
if not _parses_file(self.filter, infile, debug_level):
return None
for obj in self.objs:
format = obj.identifyFile(infile, debug_level=debug_level)
if format is not None:
return format
return None
def identifyString(self, s, debug_level=0):
"""S.identifyString(s[, debug_level]) -> FormatObject or None"""
from StringIO import StringIO
return self.identifyFile(StringIO(s), debug_level)
def identify(self, source, debug_level=0):
"""S.identify(source[, debug_level]) -> FormatObject or None"""
source = ReseekFile.prepare_input_source(source)
f = source.getCharacterStream() or source.getByteStream()
return self.identifyFile(f, debug_level)
def add(self, obj, *args, **keywds):
import weakref
RegisterableGroup.add(self, obj, *args, **keywds)
obj._parents.append(weakref.proxy(self))
def _parses_file(expression, infile, debug_level):
# Return a boolean indicating whether expression can parse infile.
from Bio import StdHandler
from Martel import Parser
parser = expression.make_parser(debug_level)
handler = StdHandler.RecognizeHandler()
parser.setErrorHandler(handler)
parser.setContentHandler(handler)
pos = infile.tell()
try:
try:
parser.parseFile(infile)
except Parser.ParserException:
pass
finally:
infile.seek(pos)
return handler.recognized
def _parses_string(expression, s, debug_level):
from StringIO import StringIO
return _parses_string(expression, StringIO(s), debug_level)
def _normalize_expression(expression_or_path):
if expression_or_path is None:
return None
if type(expression_or_path) != type(""):
return expression_or_path
return _load_expression(expression_or_path)
def _load_expression(path):
from Martel import Expression
x = _load_object(path)
if x is not None:
if not isinstance(x, Expression.Expression):
try:
klass = x.__class__.__name__
except AttributeError:
klass = type(x)
raise TypeError("%r should be a Martel Expression but " \
"is a %r" % (path, klass))
return x
# Expression not found; make a useful error message
msg = "Could not find %r\n" % (path,)
msg = msg + "(You may need to add the top-level module to the PYTHONPATH)"
raise TypeError(msg)
def _load_object(path):
terms = path.split(".")
s = terms[0]
# Import all the needed modules
# (Don't know which are modules and which are classes, so simply
# stop when imports fail.)
# The order of appends is correct, since the last element cannot
# be a module.
x = __import__(s)
prev_term = s
for term in terms[1:]:
try:
__import__(s)
except SyntaxError, exc:
## raise SyntaxError("%s during import of %r" % (exc, s)), \
## None, sys.exc_info()[2]
raise
except ImportError, exc:
# This is the only way I know to tell if the module
# could not be loaded because it doesn't exist.
error_text = str(exc)
if error_text.find("No module named %s" % prev_term) == -1:
raise
break
if not term:
raise TypeError("There's a '.' in the wrong place: %r" % \
(path,))
s = s + "." + term
prev_term = term
# Get the requested object
s = terms[0]
for term in terms[1:]:
try:
x = getattr(x, term)
except AttributeError:
raise AttributeError("%s object (%r) has no attribute %r" % \
(type(x).__name__, s, term))
s = s + "." + term
return x
def _load_first_existing(basemodulename, possible_formats):
for format in possible_formats:
try:
module = _support.load_module(basemodulename + "." + format.abbrev)
except ImportError, exc:
# This is the only way I know to tell if the module
# could not be loaded because it doesn't exist.
error_text = str(exc)
if error_text.find("No module named %s" % format.abbrev) == -1:
raise
continue
return module
return None
|
StarcoderdataPython
|
138882
|
import unittest
from models.readingtip import ReadingTip
from models.tag import Tag
from models.user import User
class TestReadingTip(unittest.TestCase):
def setUp(self):
self.user = User("maija", "jahph5Ie")
def test_constructor_sets_fields_correctly(self):
tags = [Tag("kirjat"), Tag("maksulliset")]
readingtip = ReadingTip("Hyvä kirja", "https://kirjakauppa.fi/123", self.user, tags)
self.assertEqual(readingtip.title, "Hyvä kirja")
self.assertEqual(readingtip.link, "https://kirjakauppa.fi/123")
self.assertEqual(readingtip.tags[0].name, "kirjat")
self.assertEqual(readingtip.tags[1].name, "maksulliset")
def test_constructor_adds_http(self):
tags = [Tag("kirjat")]
readingtip = ReadingTip("Keskinkertainen kirja", "kirjakauppa.fi/124", self.user, tags)
self.assertEqual(readingtip.link, "http://kirjakauppa.fi/124")
def test_constructor_preserves_http(self):
tags = [Tag("kirjat")]
readingtip = ReadingTip("Huono kirja", "http://kirjakauppa.fi/125", self.user, tags)
self.assertEqual(readingtip.link, "http://kirjakauppa.fi/125")
|
StarcoderdataPython
|
1760230
|
# Generated by Django 2.2.2 on 2019-06-26 14:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('favouritesapi', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='category',
name='category_count',
field=models.IntegerField(default=0, editable=False),
),
]
|
StarcoderdataPython
|
3384787
|
from rest_framework.response import Response
from rest_framework import views
from .utils import searcher
class SearchView(views.APIView):
def get(self, request, format=None):
query_str = request.query_params.get('q', None)
if not query_str:
return Response(status=400, data=dict(message='Неверный запрос'))
return searcher(query_str, request)
|
StarcoderdataPython
|
1626588
|
<filename>AtC_Beg_Con_071-080/ABC078/B.py<gh_stars>0
x, y, z = map(int, input().split())
i = 0
while x - (y + z) * i > 0:
i += 1
t = x - (y + z) * (i - 1)
if t >= z:
print(i - 1)
else:
print(i - 2)
|
StarcoderdataPython
|
157724
|
<filename>libweasyl/libweasyl/alembic/versions/8e98a1be126e_add_index_on_submission_popularity_score.py
"""Add index on submission popularity score
Revision ID: 8e98a1be126e
Revises: <PASSWORD>
Create Date: 2019-10-24 17:06:22.092041
"""
# revision identifiers, used by Alembic.
revision = '8e98a1be126e'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index(
'ind_submission_score',
'submission',
[
sa.text("""(
log(favorites + 1)
+ log(page_views + 1) / 2
+ unixtime / 180000.0
)"""),
],
unique=False,
postgresql_where=sa.text("favorites IS NOT NULL"),
)
def downgrade():
op.drop_index('ind_submission_score', table_name='submission')
|
StarcoderdataPython
|
1754138
|
# Copyright (c) 2019-2020 <NAME>
# License: MIT License
# Created 2019-02-15
from typing import TYPE_CHECKING, Tuple, Sequence, Iterable, cast, List, Union
import array
import copy
from contextlib import contextmanager
from ezdxf.math import Vector, Matrix44
from ezdxf.math.transformtools import OCSTransform, NonUniformScalingError
from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass, XType
from ezdxf.lldxf.const import SUBCLASS_MARKER, DXF2000, LWPOLYLINE_CLOSED
from ezdxf.lldxf.tags import Tags
from ezdxf.lldxf.types import DXFTag, DXFVertex
from ezdxf.lldxf.packedtags import VertexArray
from ezdxf.explode import virtual_lwpolyline_entities, explode_entity
from ezdxf.query import EntityQuery
from .dxfentity import base_class, SubclassProcessor
from .dxfgfx import DXFGraphic, acdb_entity
from .factory import register_entity
if TYPE_CHECKING:
from ezdxf.eztypes import TagWriter, Drawing, Vertex, DXFNamespace, UCS, Line, Arc, BaseLayout
__all__ = ['LWPolyline']
LWPointType = Tuple[float, float, float, float, float]
FORMAT_CODES = frozenset('xysebv')
DEFAULT_FORMAT = 'xyseb'
LWPOINTCODES = (10, 20, 40, 41, 42)
# Order doesn't matter, not valid for AutoCAD:
# If tag 90 is not the first TAG, AutoCAD does not close the polyline, when the `close` flag is set.
acdb_lwpolyline = DefSubclass('AcDbPolyline', {
'count': DXFAttr(90, xtype=XType.callback, getter='__len__'),
# always return actual length and set tag 90
'elevation': DXFAttr(38, default=0, optional=True),
'thickness': DXFAttr(39, default=0, optional=True),
'flags': DXFAttr(70, default=0),
'const_width': DXFAttr(43, optional=True),
'extrusion': DXFAttr(210, xtype=XType.point3d, default=Vector(0, 0, 1), optional=True),
# 10, 20 : Vertex x, y
# 91: vertex identifier ???
# 40, 41, 42: start width, end width, bulge
})
@register_entity
class LWPolyline(DXFGraphic):
""" DXF LWPOLYLINE entity """
DXFTYPE = 'LWPOLYLINE'
DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_lwpolyline)
MIN_DXF_VERSION_FOR_EXPORT = DXF2000
def __init__(self, doc: 'Drawing' = None):
super().__init__(doc)
self.lwpoints = LWPolylinePoints()
def _copy_data(self, entity: 'LWPolyline') -> None:
""" Copy lwpoints. """
entity.lwpoints = copy.deepcopy(self.lwpoints)
def load_dxf_attribs(self, processor: SubclassProcessor = None) -> 'DXFNamespace':
"""
Adds subclass processing for AcDbPolyline, requires previous base class and AcDbEntity processing by parent
class.
"""
dxf = super().load_dxf_attribs(processor)
if processor:
tags = processor.load_dxfattribs_into_namespace(dxf, acdb_lwpolyline)
tags = self.load_vertices(tags)
if len(tags) and not processor.r12:
processor.log_unprocessed_tags(tags, subclass=acdb_lwpolyline.name)
return dxf
def load_vertices(self, tags: 'Tags') -> Tags:
self.lwpoints, unprocessed_tags = LWPolylinePoints.from_tags(tags)
return unprocessed_tags
def preprocess_export(self, tagwriter: 'TagWriter') -> bool:
# Returns True if entity should be exported
# Do not export polylines without vertices
return len(self.lwpoints) > 0
def export_entity(self, tagwriter: 'TagWriter') -> None:
""" Export entity specific data as DXF tags. """
# base class export is done by parent class
super().export_entity(tagwriter)
# AcDbEntity export is done by parent class
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_lwpolyline.name)
self.dxf.export_dxf_attribs(tagwriter, ['count', 'flags', 'const_width', 'elevation', 'thickness'])
tagwriter.write_tags(Tags(self.lwpoints.dxftags()))
self.dxf.export_dxf_attribs(tagwriter, 'extrusion')
# xdata and embedded objects export will be done by parent class
@property
def closed(self) -> bool:
""" ``True`` if polyline is closed. A closed polyline has a connection from the last vertex to the
first vertex. (read/write)
"""
return self.get_flag_state(LWPOLYLINE_CLOSED, name='flags')
@closed.setter
def closed(self, status: bool) -> None:
self.set_flag_state(LWPOLYLINE_CLOSED, status, name='flags')
# same as POLYLINE
def close(self, state: bool = True) -> None:
""" Compatibility interface to :class:`Polyline`. """
self.closed = state
@property
def has_arc(self) -> bool:
""" Returns ``True`` if LWPOLYLINE has an arc segment. """
return any(bool(b) for x, y, s, e, b in self.lwpoints)
@property
def has_width(self) -> bool:
""" Returns ``True`` if LWPOLYLINE has any segment with width attributes or DXF attribute const_width != 0.
.. versionadded:: 0.14
"""
if self.dxf.hasattr('const_width'):
# const_width overrides all individual start- or end width settings
return self.dxf.const_width != 0.0
return any((s or e) for x, y, s, e, b in self.lwpoints)
def __len__(self) -> int:
""" Returns count of polyline points. """
return len(self.lwpoints)
def __iter__(self) -> Iterable[LWPointType]:
""" Returns iterable of tuples (x, y, start_width, end_width, bulge). """
return iter(self.lwpoints)
def __getitem__(self, index: int) -> LWPointType:
"""
Returns point at position `index` as (x, y, start_width, end_width, bulge) tuple. start_width, end_width and
bulge is ``0`` if not present, supports extended slicing. Point format is fixed as ``'xyseb'``.
All coordinates in :ref:`OCS`.
"""
return self.lwpoints[index]
def __setitem__(self, index: int, value: Sequence[float]) -> None:
"""
Set point at position `index` as (x, y, [start_width, [end_width, [bulge]]]) tuple. If start_width or end_width
is ``0`` or left off the default value is used. If the bulge value is left off, bulge is ``0`` by default
(straight line). Does NOT support extend slicing. Point format is fixed as ``'xyseb'``.
All coordinates in :ref:`OCS`.
Args:
index: point index
value: point value as (x, y, [start_width, [end_width, [bulge]]]) tuple
"""
self.lwpoints[index] = compile_array(value)
def __delitem__(self, index: int) -> None:
""" Delete point at position `index`, supports extended slicing. """
del self.lwpoints[index]
def vertices(self) -> Iterable[Tuple[float, float]]:
"""
Returns iterable of all polyline points as (x, y) tuples in :ref:`OCS` (:attr:`dxf.elevation` is the z-axis value).
"""
for point in self:
yield point[0], point[1]
def vertices_in_wcs(self) -> Iterable['Vertex']:
"""
Returns iterable of all polyline points as Vector(x, y, z) in :ref:`WCS`.
"""
ocs = self.ocs()
elevation = self.get_dxf_attrib('elevation', default=0.)
for x, y in self.vertices():
yield ocs.to_wcs((x, y, elevation))
def vertices_in_ocs(self) -> Iterable['Vertex']:
"""
Returns iterable of all polyline points as Vector(x, y, z) in :ref:`OCS`.
"""
elevation = self.get_dxf_attrib('elevation', default=0.)
for x, y in self.vertices():
yield Vector(x, y, elevation)
def append(self, point: Sequence[float], format: str = DEFAULT_FORMAT) -> None:
"""
Append `point` to polyline, `format`` specifies a user defined point format.
All coordinates in :ref:`OCS`.
Args:
point: (x, y, [start_width, [end_width, [bulge]]]) tuple
format: format string, default is ``'xyseb'``, see: `format codes`_
"""
self.lwpoints.append(point, format=format)
def insert(self, pos: int, point: Sequence[float], format: str = DEFAULT_FORMAT) -> None:
"""
Insert new point in front of positions `pos`, `format` specifies a user defined point format.
All coordinates in :ref:`OCS`.
Args:
pos: insert position
point: point data
format: format string, default is 'xyseb', see: `format codes`_
"""
data = compile_array(point, format=format)
self.lwpoints.insert(pos, data)
def append_points(self, points: Iterable[Sequence[float]], format: str = DEFAULT_FORMAT) -> None:
"""
Append new `points` to polyline, `format` specifies a user defined point format.
All coordinates in :ref:`OCS`.
Args:
points: iterable of point, point is (x, y, [start_width, [end_width, [bulge]]]) tuple
format: format string, default is ``'xyseb'``, see: `format codes`_
"""
for point in points:
self.lwpoints.append(point, format=format)
@contextmanager
def points(self, format: str = DEFAULT_FORMAT) -> List[Sequence[float]]:
"""
Context manager for polyline points. Returns a standard Python list of points,
according to the format string.
All coordinates in :ref:`OCS`.
Args:
format: format string, see `format codes`_
"""
points = self.get_points(format=format)
yield points
self.set_points(points, format=format)
def get_points(self, format: str = DEFAULT_FORMAT) -> List[Sequence[float]]:
"""
Returns all points as list of tuples, format specifies a user defined point format.
All points in :ref:`OCS` as (x, y) tuples (:attr:`dxf.elevation` is the z-axis value).
Args:
format: format string, default is ``'xyseb'``, see `format codes`_
"""
return [format_point(p, format=format) for p in self.lwpoints]
def set_points(self, points: Iterable[Sequence[float]], format: str = DEFAULT_FORMAT) -> None:
"""
Remove all points and append new `points`.
All coordinates in :ref:`OCS`.
Args:
points: iterable of point, point is (x, y, [start_width, [end_width, [bulge]]]) tuple
format: format string, default is ``'xyseb'``, see `format codes`_
"""
self.lwpoints.clear()
self.append_points(points, format=format)
def clear(self) -> None:
""" Remove all points. """
self.lwpoints.clear()
def transform(self, m: 'Matrix44') -> 'LWPolyline':
""" Transform LWPOLYLINE entity by transformation matrix `m` inplace.
.. versionadded:: 0.13
"""
dxf = self.dxf
ocs = OCSTransform(self.dxf.extrusion, m)
if not ocs.scale_uniform:
raise NonUniformScalingError('2D POLYLINE with arcs does not support non uniform scaling')
# Parent function has to catch this Exception and explode this LWPOLYLINE into LINE and ELLIPSE entities.
vertices = list(ocs.transform_vertex(v) for v in self.vertices_in_ocs())
lwpoints = [(v[0], v[1], p[2], p[3], p[4]) for v, p in zip(vertices, self.lwpoints)]
self.set_points(lwpoints)
# all new OCS vertices must have the same z-axis, which is the elevation of the polyline
if vertices:
dxf.elevation = vertices[0][2]
if dxf.hasattr('thickness'):
# thickness can be negative
dxf.thickness = ocs.transform_length((0, 0, dxf.thickness), reflection=dxf.thickness)
dxf.extrusion = ocs.new_extrusion
return self
def virtual_entities(self) -> Iterable[Union['Line', 'Arc']]:
"""
Yields 'virtual' parts of LWPOLYLINE as LINE or ARC entities.
This entities are located at the original positions, but are not stored in the entity database, have no handle
and are not assigned to any layout.
.. versionadded:: 0.12
"""
return virtual_lwpolyline_entities(self)
def explode(self, target_layout: 'BaseLayout' = None) -> 'EntityQuery':
"""
Explode parts of LWPOLYLINE as LINE or ARC entities into target layout, if target layout is ``None``,
the target layout is the layout of the LWPOLYLINE.
Returns an :class:`~ezdxf.query.EntityQuery` container with all DXF parts.
Args:
target_layout: target layout for DXF parts, ``None`` for same layout as source entity.
.. versionadded:: 0.12
"""
return explode_entity(self, target_layout)
class LWPolylinePoints(VertexArray):
__slots__ = ('values',)
VERTEX_CODE = 10
START_WIDTH_CODE = 40
END_WIDTH_CODE = 41
BULGE_CODE = 42
VERTEX_SIZE = 5
@classmethod
def from_tags(cls, tags: Tags) -> Tuple['LWPolylinePoints', Tags]:
""" Setup point array from tags. """
def get_vertex() -> LWPointType:
point.append(attribs.get(cls.START_WIDTH_CODE, 0))
point.append(attribs.get(cls.END_WIDTH_CODE, 0))
point.append(attribs.get(cls.BULGE_CODE, 0))
return tuple(point)
unprocessed_tags = Tags()
data = []
point = None
attribs = {}
for tag in tags:
if tag.code in LWPOINTCODES:
if tag.code == 10:
if point is not None:
data.extend(get_vertex())
point = list(tag.value[0:2]) # just use x, y coordinates, z is invalid but you never know!
attribs = {}
else:
attribs[tag.code] = tag.value
else:
unprocessed_tags.append(tag)
if point is not None:
data.extend(get_vertex())
return cls(data=data), unprocessed_tags
def append(self, point: Sequence[float], format: str = DEFAULT_FORMAT) -> None:
super().append(compile_array(point, format=format))
def dxftags(self) -> Iterable[DXFTag]:
for point in self:
x, y, start_width, end_width, bulge = point
yield DXFVertex(self.VERTEX_CODE, (x, y))
if start_width or end_width:
# export always start- and end width together,
# required for BricsCAD but not AutoCAD!
yield DXFTag(self.START_WIDTH_CODE, start_width)
yield DXFTag(self.END_WIDTH_CODE, end_width)
if bulge:
yield DXFTag(self.BULGE_CODE, bulge)
def format_point(point: Sequence[float], format: str = 'xyseb') -> Sequence[float]:
"""
Reformat point components.
Format codes:
- ``x`` = x-coordinate
- ``y`` = y-coordinate
- ``s`` = start width
- ``e`` = end width
- ``b`` = bulge value
- ``v`` = (x, y) as tuple
Args:
point: list or tuple of (x, y, start_width, end_width, bulge)
format: format string, default is 'xyseb'
Returns:
Sequence[float]: tuple of selected components
"""
x, y, s, e, b = point
v = (x, y)
vars = locals()
return tuple(vars[code] for code in format.lower() if code in FORMAT_CODES)
def compile_array(data: Sequence[float], format='xyseb') -> array.array:
"""
Gather point components from input data.
Format codes:
- ``x`` = x-coordinate
- ``y`` = y-coordinate
- ``s`` = start width
- ``e`` = end width
- ``b`` = bulge value
- ``v`` = (x, y [,z]) tuple (z-axis is ignored)
Args:
data: list or tuple of point components
format: format string, default is 'xyseb'
Returns:
array.array: array.array('d', (x, y, start_width, end_width, bulge))
"""
a = array.array('d', (0., 0., 0., 0., 0.))
format = [code for code in format.lower() if code in FORMAT_CODES]
for code, value in zip(format, data):
if code not in FORMAT_CODES:
continue
if code == 'v':
value = cast('Vertex', value)
a[0] = value[0]
a[1] = value[1]
else:
a['xyseb'.index(code)] = value
return a
|
StarcoderdataPython
|
168191
|
<filename>gen2-custom-models/concat.py
import numpy as np
import cv2
import depthai as dai
SHAPE = 300
p = dai.Pipeline()
p.setOpenVINOVersion(dai.OpenVINO.VERSION_2021_4)
camRgb = p.create(dai.node.ColorCamera)
camRgb.setPreviewSize(SHAPE, SHAPE)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
left = p.create(dai.node.MonoCamera)
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
# ImageManip for cropping (face detection NN requires input image of 300x300) and to change frame type
manipLeft = p.create(dai.node.ImageManip)
manipLeft.initialConfig.setResize(300, 300)
# The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)
manipLeft.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
left.out.link(manipLeft.inputImage)
right = p.create(dai.node.MonoCamera)
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
# ImageManip for cropping (face detection NN requires input image of 300x300) and to change frame type
manipRight = p.create(dai.node.ImageManip)
manipRight.initialConfig.setResize(300, 300)
# The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)
manipRight.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
right.out.link(manipRight.inputImage)
# NN that detects faces in the image
nn = p.create(dai.node.NeuralNetwork)
nn.setBlobPath("models/concat_openvino_2021.4_6shave.blob")
nn.setNumInferenceThreads(2)
manipLeft.out.link(nn.inputs['img1'])
camRgb.preview.link(nn.inputs['img2'])
manipRight.out.link(nn.inputs['img3'])
# Send bouding box from the NN to the host via XLink
nn_xout = p.create(dai.node.XLinkOut)
nn_xout.setStreamName("nn")
nn.out.link(nn_xout.input)
# Pipeline is defined, now we can connect to the device
with dai.Device(p) as device:
qNn = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
shape = (3, SHAPE, SHAPE * 3)
while True:
inNn = np.array(qNn.get().getData())
frame = inNn.view(np.float16).reshape(shape).transpose(1, 2, 0).astype(np.uint8).copy()
cv2.imshow("Concat", frame)
if cv2.waitKey(1) == ord('q'):
break
|
StarcoderdataPython
|
1687575
|
<gh_stars>0
import sys
import json
from df import df_worklist, ANALYSES, fmt, interp
from form_blocks import form_blocks
import cfg
from cfg_to_code import cfg_to_code
from util import fresh
def br_removal(blocks):
in_, out = df_worklist(blocks, ANALYSES['cprop'])
# overwirte blocks
preds, succs = cfg.edges(blocks)
for index, block in blocks.items():
vals = out[index]
if block[-1] is not None and block[-1]['op'] == 'br':
guard = block[-1]['args'][0]
if guard in vals:
block[-1]['op'] = 'jmp'
if vals[guard]:
block[-1]['args'] = [block[-1]['args'][1]]
else:
block[-1]['args'] = [block[-1]['args'][2]]
return blocks
def clean_cfg(blocks):
changed = True
while changed:
preds, succs = cfg.edges(blocks)
visited = dict.fromkeys(blocks.keys(),False)
changed = _clean_cfg(blocks.keys()[0], blocks, preds, succs, visited)
# for block in blocks:
# print('{}:'.format(block))
# print('{}:'.format(blocks[block]))
return blocks
def _clean_cfg(block, blocks, preds, succs, visited):
changed = False
visited[block] = True
for s in succs[block]:
if not visited[s]:
changed = changed or _clean_cfg(s, blocks, preds, succs, visited)
if blocks[block][-1]['op'] == 'br' and blocks[block][-1]['args'][1] == blocks[block][-1]['args'][2]:
next_block = blocks[block][-1]['args'][1]
# eliminating redundant branches
# previous br both jump to this block
blocks[block][-1]['op'] = 'jmp'
blocks[block][-1]['args'] = [blocks[block][-1]['args'][1]]
succs[block].remove(next_block)
preds[next_block].remove(block)
# print(" eliminating redundant branches ")
changed = True
if len(blocks[block]) == 1 and blocks[block][-1]['op'] == 'jmp':
# eliminating empty blocks
next_block = blocks[block][-1]['args'][0]
for p in preds[next_block]:
succ = succs[p]
if blocks[p][-1]['op'] == 'jmp':
blocks[p][-1]['args'] = [block]
succ = [block]
preds[block].append(p)
else:
#br
args = blocks[p][-1]['args']
for i in range(1,3):
if args[i] == next_block:
args[i] = block
succ.remove(next_block)
succ.append(block)
preds[block].append(p)
succs[p] = succ
blocks[block] = blocks[block][:-1] + blocks[next_block]
del blocks[next_block]
# print(" eliminating empty block")
changed = True
if len(succs[block]) == 1 and len(preds[succs[block][0]]) == 1:
# eliminating non-empty blocks
s = succs[block][0]
blocks[block] = blocks[block][:-1] + blocks[s]
del blocks[s]
# print(" eliminating non empty block")
changed = True
return changed
def unreachable_removal(blocks):
preds, succs = cfg.edges(blocks)
reachable = dict.fromkeys(blocks.keys(),False)
reachable[blocks.keys()[0]] = True
stack = [blocks.keys()[0]]
while(len(stack) != 0):
b = stack.pop()
for s in succs[b]:
if not reachable[s]:
reachable[s] = True
stack.append(s)
for b, r in reachable.items():
if not r :
del blocks[b]
return blocks
def canonicalize(instr):
if instr['op'] in ['add', 'mul', 'and', 'or']:
instr['args'] = sorted(instr['args'])
return json.dumps(instr, sort_keys=True)
def merge(block1, block2, names, from_start):
l = 0
for i in range(len(block1)):
if from_start:
instr1 = block1[l]
instr2 = block2[l]
else:
instr1 = block1[len(block1) - l - 1]
instr2 = block2[len(block2) - l - 1]
if canonicalize(instr1) != canonicalize(instr2):
break
l +=1
return fresh('t', names), l
def replace_target(blocks_r, all_blocks, orginals, new):
for p in blocks_r:
new_args = []
for idx, arg in enumerate(all_blocks[p][-1]["args"]):
if len(blocks[p][-1]["args"]) > 1 and idx == 0:
# br gurad
new_args.append(arg)
elif arg in orginals:
new_args.append(new)
else:
new_args.append(arg)
all_blocks[p][-1]["args"] = new_args
def tail_merging_once(blocks):
preds, succs = cfg.edges(blocks)
bns = list(blocks.keys())
for block in bns:
count = {}
# merge two preds
for p in preds[block]:
instr = blocks[p][-1]
if instr is not None and instr['op'] == 'jmp':
k = canonicalize(instr)
if k not in count:
count[k] = p
else:
# find two blocks can be merged
b1 = blocks[count[k]]
b2 = blocks[p]
name, l = merge(b1, b2, list(blocks.keys()), from_start = False)
if l == 1:
continue
if l == min(len(b1),len(b2)):
if len(b1) == len(b2):
replace_target(preds[count[k]] + preds[p], blocks, [count[k], p], name)
del blocks[count[k]]
del blocks[p]
return True
if len(b1) < len(b2):
small_block = count[k]
large_block = p
else:
small_block = p
large_block = count[k]
blocks[large_block] = blocks[large_block][:len(blocks[large_block]) - l]
jmp = {'op': 'jmp', "args": [small_block]}
blocks[large_block].append(jmp)
else:
jmp = {'op': 'jmp', "args": [name]}
new_block = b1[len(b1) - l:]
blocks[name] = new_block
blocks[count[k]] = b1[:len(b1) - l]
blocks[count[k]].append(jmp)
blocks[p] = b2[:len(b2) - l]
blocks[p].append(jmp)
return True
# merge two succs
if blocks[block][-1] is not None and blocks[block][-1]['op'] == 'br':
args = blocks[block][-1]['args']
b1 = blocks[args[1]]
b2 = blocks[args[2]]
name, l = merge(b1, b2, list(blocks.keys()),from_start = True)
if l > 0:
if l == len(b1):
# cannot have jmp in middle of block
# so this is the only case such that a block is entirely duplicated
jmp = {'op': 'jmp', "args": [args[1]]}
blocks[block] = blocks[block][:-1]
blocks[block].append(jmp)
replace_target(preds[args[2]], blocks, [args[2]], args[1])
del blocks[args[2]]
else:
jmp = {'op': 'jmp', "args": [name]}
br = blocks[block][-1]
blocks[block] = blocks[block][:-1]
blocks[block].append(jmp)
replace_target(preds[args[1]] + preds[args[2]], blocks, args[1:3], name)
blocks[name] = b1[:l]
blocks[args[1]] = b1[l:]
blocks[args[2]] = b2[l:]
blocks[name].append(br)
return True
return False
def tail_merging(blocks):
while(tail_merging_once(blocks)):
pass
# print("=======================")
# for block in blocks:
# print('{}:'.format(block))
# print('{}:'.format(blocks[block]))
return blocks
if __name__ == '__main__':
bril = json.load(sys.stdin)
for func in bril['functions']:
# Form the CFG.
blocks = cfg.block_map(form_blocks(func['instrs']))
cfg.add_terminators(blocks)
# for block in blocks:
# print('{}:'.format(block))
# print('{}:'.format(blocks[block]))
# br_removal(blocks)
# print("================after br_removal ==================")
# for block in blocks:
# print('{}:'.format(block))
# print('{}:'.format(blocks[block]))
# unreachable_removal(blocks)
# print("================after unreachable_removal ==================")
# for block in blocks:
# print('{}:'.format(block))
# print('{}:'.format(blocks[block]))
# clean_cfg(blocks)
# print("================after clean_cfg ==================")
# for block in blocks:
# print('{}:'.format(block))
# print('{}:'.format(blocks[block]))
tail_merging(blocks)
# print("================after tail_merging ==================")
# for block in blocks:
# print('{}:'.format(block))
# print('{}:'.format(blocks[block]))
# clean_cfg(blocks)
# print("================after clean_cfg ==================")
# for block in blocks:
# print('{}:'.format(block))
# print('{}:'.format(blocks[block]))
print(cfg_to_code(blocks, func['name']))
|
StarcoderdataPython
|
4457
|
from ._movement import Movement
from .path import MovementPath
from .paths import MovementPaths
|
StarcoderdataPython
|
162793
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 0.5.0.5149 (http://hl7.org/fhir/StructureDefinition/SearchParameter) on 2015-07-06.
# 2015, SMART Health IT.
from . import contactpoint
from . import domainresource
from . import fhirdate
from . import fhirelement
class SearchParameter(domainresource.DomainResource):
""" Search Parameter for a resource.
A Search Parameter that defines a named search item that can be used to
search/filter on a resource.
"""
resource_name = "SearchParameter"
def __init__(self, jsondict=None):
""" Initialize all valid properties.
"""
self.base = None
""" The resource type this search parameter applies to.
Type `str`. """
self.contact = None
""" Contact details of the publisher.
List of `SearchParameterContact` items (represented as `dict` in JSON). """
self.date = None
""" Publication Date(/time).
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Documentation for search parameter.
Type `str`. """
self.experimental = None
""" If for testing purposes, not real usage.
Type `bool`. """
self.name = None
""" Name of search parameter.
Type `str`. """
self.publisher = None
""" Name of the publisher (Organization or individual).
Type `str`. """
self.requirements = None
""" Why this search parameter is defined.
Type `str`. """
self.status = None
""" draft | active | retired.
Type `str`. """
self.target = None
""" Types of resource (if a resource reference).
List of `str` items. """
self.type = None
""" number | date | string | token | reference | composite | quantity |
uri.
Type `str`. """
self.url = None
""" Literal URL used to reference this search parameter.
Type `str`. """
self.xpath = None
""" XPath that extracts the values.
Type `str`. """
super(SearchParameter, self).__init__(jsondict)
def elementProperties(self):
js = super(SearchParameter, self).elementProperties()
js.extend([
("base", "base", str, False),
("contact", "contact", SearchParameterContact, True),
("date", "date", fhirdate.FHIRDate, False),
("description", "description", str, False),
("experimental", "experimental", bool, False),
("name", "name", str, False),
("publisher", "publisher", str, False),
("requirements", "requirements", str, False),
("status", "status", str, False),
("target", "target", str, True),
("type", "type", str, False),
("url", "url", str, False),
("xpath", "xpath", str, False),
])
return js
class SearchParameterContact(fhirelement.FHIRElement):
""" Contact details of the publisher.
Contacts to assist a user in finding and communicating with the publisher.
"""
resource_name = "SearchParameterContact"
def __init__(self, jsondict=None):
""" Initialize all valid properties.
"""
self.name = None
""" Name of a individual to contact.
Type `str`. """
self.telecom = None
""" Contact details for individual or publisher.
List of `ContactPoint` items (represented as `dict` in JSON). """
super(SearchParameterContact, self).__init__(jsondict)
def elementProperties(self):
js = super(SearchParameterContact, self).elementProperties()
js.extend([
("name", "name", str, False),
("telecom", "telecom", contactpoint.ContactPoint, True),
])
return js
|
StarcoderdataPython
|
1649213
|
<reponame>LeeDongGeon1996/Stock.Indicators.Python<filename>stock_indicators/indicators/adx.py
from typing import Iterable, Optional, TypeVar
from stock_indicators._cslib import CsIndicator
from stock_indicators._cstypes import List as CsList
from stock_indicators.indicators.common.helpers import RemoveWarmupMixin
from stock_indicators.indicators.common.results import IndicatorResults, ResultBase
from stock_indicators.indicators.common.quote import Quote
def get_adx(quotes: Iterable[Quote], lookback_periods: int = 14):
"""Get ADX calculated.
Average Directional Movement Index (ADX) is a measure of price directional movement.
It includes upward and downward indicators, and is often used to measure strength of trend.
Parameters:
`quotes` : Iterable[Quote]
Historical price quotes.
`lookback_periods` : int, defaults 14
Number of periods in the lookback window.
Returns:
`ADXResults[ADXResult]`
ADXResults is list of ADXResult with providing useful helper methods.
See more:
- [ADX Reference](https://daveskender.github.io/Stock.Indicators.Python/indicators/Adx/#content)
- [Helper Methods](https://daveskender.github.io/Stock.Indicators.Python/utilities/#content)
"""
adx_results = CsIndicator.GetAdx[Quote](CsList(Quote, quotes), lookback_periods)
return ADXResults(adx_results, ADXResult)
class ADXResult(ResultBase):
"""
A wrapper class for a single unit of ADX results.
"""
@property
def pdi(self) -> Optional[float]:
return self._csdata.Pdi
@pdi.setter
def pdi(self, value):
self._csdata.Pdi = value
@property
def mdi(self) -> Optional[float]:
return self._csdata.Mdi
@mdi.setter
def mdi(self, value):
self._csdata.Mdi = value
@property
def adx(self) -> Optional[float]:
return self._csdata.Adx
@adx.setter
def adx(self, value):
self._csdata.Adx = value
_T = TypeVar("_T", bound=ADXResult)
class ADXResults(RemoveWarmupMixin, IndicatorResults[_T]):
"""
A wrapper class for the list of ADX(Average Directional Movement Index) results.
It is exactly same with built-in `list` except for that it provides
some useful helper methods written in C# implementation.
"""
|
StarcoderdataPython
|
93811
|
#!/usr/bin/python3.4
# -*-coding:Utf-8 -*
'''module to manage Rendering Options'''
import xml.etree.ElementTree as xmlMod
import os
class Options:
'''class to manage Rendering Options'''
RGBA_AVAILABLE = ['IRIS', 'PNG', 'JPEG2000', 'TARGA', 'DPX', 'OPEN_EXR_MULTILAYER', 'OPEN_EXR', 'HDR' ]
def __init__(self, xml= None):
'''initialize Rendering Options with default value or values extracted from an xml object'''
if xml is None:
self.defaultInit()
else:
self.fromXml(xml)
def defaultInit(self):
'''initialize Rendering Options with default value'''
self.z = True
self.objectIndex = True
self.compositing = False
self.alpha = True
self.exposureC = 1.0
self.exposureB = 0.0
def fromXml(self, xml):
'''initialize Rendering Options with values extracted from an xml object'''
self.z = xml.find('z') is not None
self.objectIndex = xml.find('objectIndex') is not None
self.compositing = xml.find('compositing') is not None
self.alpha = xml.find('alpha') is not None
self.exposureC = float(xml.find('exposureC').get('value'))
self.exposureB = float(xml.find('exposureB').get('value'))
def toXml(self):
'''export Rendering Options into xml syntaxed string'''
txt = '<options>\n'
if self.z:
txt += '<z />\n'
if self.objectIndex:
txt += '<objectIndex />\n'
if self.compositing:
txt += '<compositing />\n'
if self.alpha:
txt += '<alpha />\n'
txt += '<exposureB value="'+str(self.exposureB)+'" />'
txt += '<exposureC value="'+str(self.exposureC)+'" />'
txt += '</options>\n'
return txt
def menu(self, log):
'''menu to explore and edit Rendering Options settings'''
change = False
log.menuIn('Rendering Options')
while True:
log.print()
self.print()
print('''\n\n Menu :
1- Switch Z Pass Setting
2- Switch Object Index Pass Setting
3- Switch Compositing Setting
4- Switch Alpha Background Setting
5- Edit Cycles Exposure
6- Edit Blender Internal Exposure
0- Quit
''')
choice = input('Action?').strip().lower()
if choice in ['0', 'q', 'quit', 'cancel']:
log.menuOut()
return change
elif choice in ['1', '2', '3', '4']:
choice = int(choice)-1
attr = ['z', 'objectIndex', 'compositing', 'alpha'][choice]
label = ['Z pass', 'Object index pass', 'Compositing',\
'Alpha background'][choice]
setattr(self, attr, not(getattr(self, attr)))
log.write(label+' '+({True:'Ennabled', False:'Disabled'}[getattr(self, attr)]))
change = True
elif choice in ['5', '6']:
change = (self.editExposure(log, choice == '5') or change)
else:
log.error('Unvalid menu choice', False)
def print(self):
'''a method to print Rendering Options'''
ennable = {True:'Ennabled', False:'Disabled'}
print('Z pass : '+ennable[self.z])
print('Object index pass : '+ennable[self.objectIndex])
print('Compositing : '+ennable[self.compositing])
print('Alpha Background : '+ennable[self.alpha])
print('Cycles Exposure : '+str(self.exposureC))
print('Blender Int. Exp. : '+str(self.exposureB))
def editExposure(self, log, cycles):
'''A method to edit rendering exposure'''
if cycles:
log.menuIn('Edit Cycles Exposure')
attr = 'exposureC'
label = 'Cycles exposure'
else:
log.menuIn('Edit Blender Internal Exposure')
attr = 'exposureB'
label = 'Blender Internal exposure'
while True:
log.print()
print('\n\n Edit '+label.capitalize()+' :')
print('Current setting : '+str(getattr(self,attr)))
choice = input('New exposure : ').strip().lower()
if choice in ['', 'q', 'quit', 'cancel']:
log.menuOut()
return False
try:
choice = float(choice)
except ValueError:
log.error('New value must be numerical.')
continue
if choice < 0:
choice = 0.0
if cycles:
if choice > 10:
choice = 10.0
choice = round(choice, 2)
else:
if choice > 1:
choice = 1.0
choice = round(choice, 3)
setattr(self, attr, choice)
log.write(label+' set to : '+str(getattr(self, attr)))
log.menuOut()
return True
def apply(self, scene):
'''apply settings to a blender scene object'''
import bpy
for RL in scene.render.layers.values():
RL.use_pass_z = self.z
RL.use_pass_object_index = self.objectIndex
scene.cycles.film_exposure = self.exposureC
for w in bpy.data.worlds.values():
w.exposure = self.exposureB
scene.render.use_compositing = self.compositing
scene.cycles.film_transparent = self.alpha
scene.render.alpha_mode = { True : 'TRANSPARENT' , False : 'SKY' }[self.alpha]
if self.alpha and scene.render.image_settings.file_format in self.RGBA_AVAILABLE:
scene.render.image_settings.color_mode = 'RGBA'
|
StarcoderdataPython
|
66383
|
# XXX depends on internet connectivity, so not run as part of standard tests
from __future__ import absolute_import, division, print_function
def exercise():
from mmtbx.wwpdb import rcsb_web_services
lysozyme = """<KEY>"""
homologs = rcsb_web_services.sequence_search(lysozyme, d_max=2.0)
assert (len(homologs) > 500)
atp_binding = rcsb_web_services.chemical_id_search("ATP", protein_only=True)
assert (len(atp_binding) > 650)
report = rcsb_web_services.get_high_resolution_for_structures(atp_binding)
assert (len(report) == len(atp_binding)) and (len(report[0]) == 2)
ligand_info = rcsb_web_services.get_ligand_info_for_structures(['1mru'])
assert (len(ligand_info) == 4)
print("OK")
if (__name__ == "__main__"):
exercise()
|
StarcoderdataPython
|
1746092
|
import requests
import json
import ConfigParser
class LabelManager:
def __init__(self, config):
"""
:param ConfigParser.RawConfigParser config: read config file
"""
self.nextcloudBaseUrl = 'https://api.github.com/repos/nextcloud/'
config.read('github.cfg')
self.authToken = config.get('auth', 'token')
self.headers = {'Authorization': 'token ' + self.authToken}
def get_all_labels(self, repo='server'):
"""
get all labels from the Nextcloud server repository
:return: list of all server labels
"""
url = self.nextcloudBaseUrl + repo + '/labels?per_page=100'
response = requests.get(url)
data = json.loads(response.content)
return data
def create_labels(self, repo, labels):
"""
create labels on the given Nextcloud repository
:param string repo:
:param string labels:
"""
url = self.nextcloudBaseUrl + repo + '/labels'
for label in labels:
# skip the feature: labels because they are server specific
if not label['name'].startswith('feature:'):
payload = {
'name': label['name'],
'color': label['color'],
}
response = requests.get(url + '/' + label['name'], headers=self.headers)
if response.status_code == 403:
print '[' + repo + '] Rate limit reached'
return
# if the label already exists we update it, otherwise we create a new one
if response.status_code == 200:
checkLabel = json.loads(response.content)
if not label['color'] == label['color']:
print '[' + repo + '] Update label: "' + label['name'] + '" (Color: #' + label['color'] + ')'
requests.patch(url + '/' + label['name'], data=json.dumps(payload), headers=self.headers)
else:
print '[' + repo + '] Skip unchanged label: "' + label['name'] + '"'
else:
print '[' + repo + '] Create label: "' + label['name'] + '" (Color: #' + label['color'] + ')'
requests.post(url, data=json.dumps(payload), headers=self.headers)
else:
print '[' + repo + '] Skip feature label: "' + label['name'] + '"'
def delete_all_labels(self, repo):
"""
delete all labels from a give repository
:param repo:
:return:
"""
if repo == 'server':
return
url = self.nextcloudBaseUrl + repo + '/labels/'
labels = self.get_all_labels(repo)
for label in labels:
print 'Delete label: "' + label['name']
requests.delete(url + label['name'], headers=self.headers)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.