id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1670547
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import time
import webbrowser
caffe_root = './'
import sys
sys.path.insert(0, caffe_root + 'python')
# 0 - debug
# 1 - info (still a LOT of outputs)
# 2 - warnings
# 3 - errors
import os
os.environ['GLOG_minloglevel'] = '0'
import caffe
net = sys.argv[1]
img = sys.argv[2]
# select the net
if net == '-caffenet':
net_fn = 'models/bvlc_reference_caffenet/deploy.prototxt'
param_fn = 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
height = width = 227
elif net == '-alexnet':
net_fn = 'models/bvlc_alexnet/deploy.prototxt'
param_fn = 'models/bvlc_alexnet/bvlc_alexnet.caffemodel'
height = width = 227
elif net == '-googlenet':
net_fn = 'models/bvlc_googlenet/deploy.prototxt'
param_fn = 'models/bvlc_googlenet/bvlc_googlenet.caffemodel'
height = width = 224
elif net == '-flickrnet':
net_fn = 'models/finetune_flickr_style/deploy.prototxt'
param_fn = 'models/finetune_flickr_style/finetune_flickr_style.caffemodel'
height = width = 227
else:
net_fn = 'models/bvlc_reference_caffenet/deploy.prototxt'
param_fn = 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
height = width = 227
# set caffe to GPU mode
caffe.set_device(0)
caffe.set_mode_gpu()
# load the net in the test phase for inference
net = caffe.Net(
caffe_root + net_fn,
caffe_root + param_fn,
caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
# set net to batch size of 50
net.blobs['data'].reshape(50,3,height,width)
# feed in the image (with some preprocessing) and classify with a forward pass
net.blobs['data'].data[...] = transformer.preprocess('data', caffe.io.load_image(img))
start = time.time()
out = net.forward()
print('\nDone in %.5f s.' % (time.time() - start))
print('\nPredicted class is #{}.'.format(out['prob'][0].argmax()))
# load labels
imagenet_labels_filename = caffe_root + 'data/ilsvrc12/synset_words.txt'
labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t')
# sort top k predictions from softmax output
top_k = net.blobs['prob'].data[0].flatten().argsort()[-1:-6:-1]
for i in range(len(top_k)):
print str(i+1) + ': ' + labels[top_k[i]] + ' (%.2f' % (out['prob'][0][top_k[i]]*100) + '%)'
|
StarcoderdataPython
|
3354346
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""Top-level package for cards."""
__version__ = '0.2.4'
from .api import * # noqa
|
StarcoderdataPython
|
1710090
|
import unittest
from bubblesort import bubblesort
class TwoQueens(unittest.TestCase):
def test_bubblesort(self):
testcase = [1,3,8,2,9,2,5,6]
expected = [1,2,2,3,5,6,8,9]
self.assertEqual(bubblesort(testcase), expected)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
55824
|
from .video import VideoEntity
ENTITY_CLASSES = [VideoEntity]
ENTITY_TYPE_CHOICES = [
(VideoEntity.name, 'Video'),
]
ENTITY_TYPE_NAME_TO_CLASS = {
k.name: k for k in ENTITY_CLASSES
}
|
StarcoderdataPython
|
3331689
|
import torch
import torch.nn as nn
from einops import rearrange, repeat
from ...common.base_model import BaseClassificationModel
from ...decoder.mlp import MLPDecoder
from ...encoder.embedding import LinearVideoEmbedding, PosEmbedding, TubeletEmbedding
from ...encoder.vanilla import VanillaEncoder
from ...encoder.vivit import ViViTEncoder
from ...utils.registry import MODEL_REGISTRY
from ...utils.utils import pair
@MODEL_REGISTRY.register()
class ViViTModel2(BaseClassificationModel):
"""
Model 2 implementation of A Video vision Transformer -
Parameters
----------
img_size:int
Size of single frame/ image in video
in_channels:int
Number of channels
patch_size: int
Patch size
embedding_dim: int
Embedding dimension of a patch
num_frames:int
Number of seconds in each Video
depth:int
Number of encoder layers
num_heads:int
Number of attention heads
head_dim:int
Dimension of head
num_classes:int
Number of classes
mlp_dim: int
Dimension of hidden layer
pool: str
Pooling operation,must be one of {"cls","mean"},default is "cls"
p_dropout:float
Dropout probability
attn_dropout:float
Dropout probability
drop_path_rate:float
Stochastic drop path rate
"""
def __init__(
self,
img_size,
in_channels,
patch_size,
embedding_dim,
num_frames,
depth,
num_heads,
head_dim,
num_classes,
mlp_dim=None,
pool="cls",
p_dropout=0.0,
attn_dropout=0.0,
drop_path_rate=0.02,
):
super(ViViTModel2, self).__init__(
img_size=img_size,
in_channels=in_channels,
patch_size=patch_size,
pool=pool,
)
patch_dim = in_channels * patch_size ** 2
self.patch_embedding = LinearVideoEmbedding(
embedding_dim=embedding_dim,
patch_height=patch_size,
patch_width=patch_size,
patch_dim=patch_dim,
)
self.pos_embedding = PosEmbedding(
shape=[num_frames, self.num_patches + 1], dim=embedding_dim, drop=p_dropout
)
self.space_token = nn.Parameter(
torch.randn(1, 1, embedding_dim)
) # this is similar to using cls token in vanilla vision transformer
self.spatial_transformer = VanillaEncoder(
embedding_dim=embedding_dim,
depth=depth,
num_heads=num_heads,
head_dim=head_dim,
mlp_dim=mlp_dim,
p_dropout=p_dropout,
attn_dropout=attn_dropout,
drop_path_rate=drop_path_rate,
)
self.time_token = nn.Parameter(torch.randn(1, 1, embedding_dim))
self.temporal_transformer = VanillaEncoder(
embedding_dim=embedding_dim,
depth=depth,
num_heads=num_heads,
head_dim=head_dim,
mlp_dim=mlp_dim,
p_dropout=p_dropout,
attn_dropout=attn_dropout,
drop_path_rate=drop_path_rate,
)
self.decoder = MLPDecoder(
config=[
embedding_dim,
],
n_classes=num_classes,
)
def forward(self, x):
x = self.patch_embedding(x)
(
b,
t,
n,
d,
) = x.shape # shape of x will be number of videos,time,num_frames,embedding dim
cls_space_tokens = repeat(self.space_token, "() n d -> b t n d", b=b, t=t)
x = nn.Parameter(torch.cat((cls_space_tokens, x), dim=2))
x = self.pos_embedding(x)
x = rearrange(x, "b t n d -> (b t) n d")
x = self.spatial_transformer(x)
x = rearrange(x[:, 0], "(b t) ... -> b t ...", b=b)
cls_temporal_tokens = repeat(self.time_token, "() n d -> b n d", b=b)
x = torch.cat((cls_temporal_tokens, x), dim=1)
x = self.temporal_transformer(x)
x = x.mean(dim=1) if self.pool == "mean" else x[:, 0]
x = self.decoder(x)
return x
# model 3
@MODEL_REGISTRY.register()
class ViViTModel3(BaseClassificationModel):
"""
model 3 of A video Vision Trnasformer- https://arxiv.org/abs/2103.15691
Parameters
----------
img_size:int or tuple[int]
size of a frame
patch_t:int
Temporal length of single tube/patch in tubelet embedding
patch_h:int
Height of single tube/patch in tubelet embedding
patch_w:int
Width of single tube/patch in tubelet embedding
in_channels: int
Number of input channels, default is 3
num_classes:int
Number of classes
num_frames :int
Number of seconds in each Video
embedding_dim:int
Embedding dimension of a patch
depth:int
Number of Encoder layers
num_heads: int
Number of attention heads
head_dim:int
Dimension of attention head
p_dropout:float
Dropout rate/probability, default is 0.0
mlp_dim: int
Hidden dimension, optional
"""
def __init__(
self,
img_size,
patch_t,
patch_h,
patch_w,
in_channels,
num_classes,
num_frames,
embedding_dim,
depth,
num_heads,
head_dim,
p_dropout,
mlp_dim=None,
):
super(ViViTModel3, self).__init__(
in_channels=in_channels,
patch_size=(patch_h, patch_w),
pool="mean",
img_size=img_size,
)
h, w = pair(img_size)
self.tubelet_embedding = TubeletEmbedding(
embedding_dim=embedding_dim,
tubelet_t=patch_t,
tubelet_h=patch_h,
tubelet_w=patch_w,
in_channels=in_channels,
)
self.pos_embbedding = PosEmbedding(
shape=[num_frames // patch_t, (h * w) // (patch_w * patch_h)],
dim=embedding_dim,
)
self.encoder = ViViTEncoder(
dim=embedding_dim,
num_heads=num_heads,
head_dim=head_dim,
p_dropout=p_dropout,
depth=depth,
hidden_dim=mlp_dim,
)
self.decoder = MLPDecoder(
config=[
embedding_dim,
],
n_classes=num_classes,
)
def forward(self, x):
x = self.tubelet_embedding(x)
x = self.pos_embbedding(x)
x = self.encoder(x)
x = x.mean(dim=1)
x = self.decoder(x)
return x
|
StarcoderdataPython
|
3344762
|
<reponame>Jakobis/OrderedSequences
from datastructures import AutoLoad
k = [i for i in range(10 ** 8)]
l = AutoLoad.AutoLoad(k)
print(l.size())
|
StarcoderdataPython
|
1681772
|
<filename>.venv/lib/python3.8/site-packages/findatapy/market/indices/indicesfx.py<gh_stars>0
__author__ = 'saeedamen' # <NAME>
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
"""
IndicesFX
Construct total return (spot) indices for FX. In future will also convert assets from local currency to foreign currency
denomination and construct indices from forwards series.
"""
import pandas
class IndicesFX:
def create_total_return_indices(self, crosses, spot, deposit, start_date, finish_date, home_curr = "USD"):
pass
def unhedged_asset_fx(self, assets, spot, asset_currency, home_curr, start_date, finish_date):
pass
def hedged_asset_fx(self, assets, total_return_indices, spot, asset_currency, home_curr, start_date, finish_date):
pass
def get_day_count_conv(self, currency):
if currency in ['AUD', 'CAD', 'GBP', 'NZD']:
return 365.0
return 360.0
def create_total_return_index(self, cross_fx, tenor, spot_df, deposit_df):
"""
create_total_return_index - Creates total return index for selected FX crosses from spot and deposit data
Parameters
----------
cross_fx : String
Crosses to construct total return indices (can be a list)
tenor : String
Tenor of deposit rates to use to compute carry (typically ON for spot)
spot_df : pandas.DataFrame
Spot data (must include crosses we select)
deposit_df : pandas.DataFrame
Deposit data
"""
if not(isinstance(cross_fx, list)):
cross_fx = [cross_fx]
total_return_index_agg = None
for cross in cross_fx:
# get the spot series, base deposit
spot = spot_df[cross + ".close"].to_frame()
base_deposit = deposit_df[cross[0:3] + tenor + ".close"].to_frame()
terms_deposit = deposit_df[cross[3:6] + tenor + ".close"].to_frame()
carry = base_deposit.join(terms_deposit, how='inner')
base_daycount = self.get_day_count_conv(cross[0:3])
terms_daycount = self.get_day_count_conv(cross[4:6])
# align the base & terms deposits series to spot
spot, carry = spot.align(carry, join='left', axis = 0)
carry = carry.fillna(method = 'ffill') / 100.0
spot = spot[cross + ".close"].to_frame()
base_deposit = carry[base_deposit.columns]
terms_deposit = carry[terms_deposit.columns]
# calculate the time difference between each data point
spot['index_col'] = spot.index
time = spot['index_col'].diff()
spot = spot.drop('index_col', 1)
total_return_index = pandas.DataFrame(index = spot.index, columns = spot.columns)
total_return_index.iloc[0] = 100
for i in range(1, len(total_return_index.index)):
time_diff = time.values[i].astype(float) / 86400000000000.0 # get time difference in days
# TODO vectorise this formulae
# calculate total return index as product of yesterday, changes in spot and carry accrued
total_return_index.values[i] = total_return_index.values[i-1] * \
(1 + (1 + base_deposit.values[i] * time_diff/base_daycount)*
(spot.values[i]/spot.values[i-1]) \
- (1+ terms_deposit.values[i]*time_diff/terms_daycount))
if total_return_index_agg is None:
total_return_index_agg = total_return_index
else:
total_return_index_agg = total_return_index_agg.join(total_return_index, how = 'outer')
return total_return_index_agg
|
StarcoderdataPython
|
3340846
|
<gh_stars>0
#!/usr/bin/env python3
"""@@@
Main Module: SettingsPacket.py
Classes: InputSettings
Author: <NAME>
creation date: 190829
last update: 200603 (various updates, minor bug fixes)
version: 0.0
Purpose:
Works as a packet of default settings for class FreeEnergy and
BranchEntropy.
Puts the default settings in one place and allows the
user to change them before initializing the computational programs.
"""
import sys
from os import path
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
from FileTools import FileTools
from FileTools import getHeadExt
from Seq import Seq
# set up Vienna and gMatrix and other FE parameterS
from MolSystem import genRNASeq # make fake RNA sequences from structures
from MolSystem import genChrSeq # make fake Chromatin seq from structures
from MolSystem import sysDefLabels # system dictionary: RNA, Chromatin
from MolSystem import dEXTS # allowed parameter file extension dictionary
from MolSystem import MolSystem # system information packet
#from ViennaDataFmt import ViennaDataObj # Vienna parameter representation for RNA
#from ViennaParFiles import ParamData # Vienna parameter file for RNA
#from Proc_gMatrix import Build_gMatrix # gMatrix parameter representation for RNA
# system independent constants
from Constants import kB # [kcal/molK] (Boltzmann constant)
from Constants import T37C # [K] absolute temp at 37 C.
from Constants import T_0C # [K] absolute temp at 0 C.
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PROGRAM = "SettingsPacket.py"
#
class InputSettings(object):
def __init__(self, system = "Chromatin"):
self.show_info = False # True #
# initially, we enter in some bogus names and paramters so
# that they are defined.
self.system = "unassigned" # left undefined because needs to be checked
# presently, these two are mainly used for referencing if
# problems occur.
self.source = "InputSettings"
self.program = "fake"
self.jobtype = "undefined"
# chromatin
self.f_heatmap = "noname.heat"
self.chrseq = "" # e.g., "cccccccccccc"
self.chrstr = "" # e.g., "((((....))))"
self.cSeq = None # Seq(self.chrseq)
# RNA
self.f_vseq = "noname.vseq"
self.rnaseq = "" # e.g., "GGGGuuuuCCCC"
self.rnastr = "" # e.g., "((((....))))"
self.rSeq = None # Seq(self.rnaseq)
self.N = 0 # e.g., len(self.rnaseq)
# ####################################################################
# ##### Command line control parameters that affect FreeEnergy #####
# ####################################################################
# VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV
# set the basic free energy parameter set: Turner Energy Rules
# or Vis Energy Rules
self.molsys = MolSystem()
self.parFlnm = "none"
self.vdf = None # ViennaDataObj
self.bm = None # Build_gMatrix
# thermodynamic parameters in entropy and free energy
# evaluation
self.kB = kB # [kcal/molK] Boltzmann constant
# input variables
self.T = T37C # [K] temperature
#############################################################
#### parameters requiring settings according to system ####
#############################################################
# VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV
# constants: in entropy evaluation
# coarse-grained resolution factor (segment length)
self.seg_len = 0
self.xi = 0.0 # [bp] stem Kuhn length
self.lmbd = 0.0 # bp separation distance in units of mer-to-mer distance
self.gmm = 0.0 # dimensionless self avoiding walk constant
self.delta = 0.0 # exponental weight (related to excluded volume)
"""@
# next: entropic weight function
With Chromatin, the Kuhn length (xi) is basically ignored
except for its entropy weight in the global entropy
function. As a result, I have set the entropic weight to be
self.w = self.seg_len*self.xi/(self.lmbd)**2
and this is used throughout in ChromatinModule.
However, with RNA, we have a little problem because the value
of xi depends on the proposed length of the stems in this new
approach (this new approach is also different from vsfold5
where we could pretend that the Kuhn length was fixed, set it,
and be done with it). For RNA, I think the entropic weight
should be
self.w = self.seg_len/(self.lmbd)**2
It is in fact true that I am including the actual xi of the
stem. Therefore, xi must be specified as an argument in the
calculation of the global entropy for RNA.
"""
self.w = 0.0
# secondary structure stem parameters
self.minStemLen = 0 # [bp]
self.max_bp_gap = 0 # [nt]
self.max_aa_gap = 0 # [beads]
self.max_pp_gap = 0 # [beads]
# secondary structure loop parameters
self.minLoopLen = 0 # [nt]
# minimum loop length (for chromatin it is 1, for RNA = 3)
self.dGMI_threshold = 0.0 # [kcal/mol]
"""threshold for MBL stability"""
# pseudoknot parameters
self.minPKloop = 2 # (always) minimum PK loop (2 nt)
self.add_PK = True # (always) include PK search
self.scan_ahead = 10 # default 10 nt
self.dGpk_threshold = 0.0 # [kcal/mol]
"""threshold for PK stability"""
# other settings
self.Mg_binding = False # Mg binding interactions
self.dangles = 2 # always 2
# For programs usind the DPA, the search for suboptimal
# structures will range between dGmin and dGmin + dGrange.
self.dGrange = 0.0 # kcal/mol
# ###########################################
# ##### Chromatin spesific paramters #####
# ###########################################
# VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV
# VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV
"""@
In general, this are not used so much even with chromatin, but
they are here for historical reasons, backward complatibility
and so forth. """
# constants: weights for the enthalpy of binding
self.dHbase = -6.0
self.dHshift = 1.0
# PET cluster weight
self.add_PET_wt = False
self.add_PET_wt_to_edges = False
self.PETwt = 100.0
# weights for selecting out PET clusters
self.CTCF_scale = 100.0
self.ctcf_tthresh = 0.20*self.CTCF_scale
self.ctcf_cthresh = 0.40*self.CTCF_scale
self.minLoopLen = 1
# This started when I was confronted with the somewhat strange
# data I got from Nenski that had counts in almost every bin
# and very large numbers.
# these are probably not necessary
self.pssbl_ctcf = {}
self.edge_ctcf = {}
self.from_Nenski = False
# this re-scales the data by some fraction
self.rescale_wt = 1.0
# AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
# AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
# ###########################################
# ##### Chromatin spesific paramters #####
# ###########################################
self.allowed_extns = []
if system == "RNA":
# general constants for RNA
from RConstants import xi # [bps]
from RConstants import lmbd # [bps]
from RConstants import gmm
# dimensionless: self avoiding random walk (app D/2)
from RConstants import delta
# dimensionless: related to excluded volume
from RConstants import seg_len # for RNA = 1
from RConstants import minStemLen # minimum stem length
from RConstants import max_bp_gap # max gap between bps
from RConstants import minLoopLen # minimum loop length
from RConstants import pk_scan_ahead # hot lead length for pk
from RConstants import dGpk_threshold # threshold FE for pks
from RConstants import dGMI_threshold # threshold FE for M-/I-loops
from RConstants import set_dangles # dangle parameter (always = 2)
from RConstants import dG_range # FE range in suboptimal structures
# default FE potential
self.set_system(system) # "RNA"
self.xi = xi # [bp] stem Kuhn length
self.lmbd = lmbd # bp separation distance
self.gmm = gmm # self avoiding walk constant
self.delta = delta # exponental weight
self.seg_len = seg_len # [bp], for RNA = 1 nt
self.w = self.seg_len/(self.lmbd)**2
self.minStemLen = minStemLen # [bp]
self.max_bp_gap = max_bp_gap # [nt]
# secondary structure loop parameters
self.minLoopLen = minLoopLen # [nt]
# minimum loop length (for chromatin it is 1, for RNA = 3)
self.dGMI_threshold = dGMI_threshold # [kcal/mol]
"""threshold for MBL stability"""
# pseudoknot parameters
self.scan_ahead = pk_scan_ahead # default 10 nt
self.dGpk_threshold = dGpk_threshold # [kcal/mol]
"""threshold for PK stability"""
self.dangles = set_dangles # in general, this should be True
self.dGrange = dG_range # kcal/mol
# file handling matters
self.allowed_extns = ["vseq", "seq"]
elif system == "Chromatin":
# general constants for Chromatin
from ChrConstants import xi # Kuhn length
from ChrConstants import lmbd # binding distance
from ChrConstants import gmm # SAW parameter (D/2)
from ChrConstants import delta # exp weight
from ChrConstants import seg_len # 5 kb
from ChrConstants import minStemLen # minimum stem length
from ChrConstants import max_aa_gap # max gap between beads anti parallel
from ChrConstants import max_pp_gap # max gap between beads parallel
from ChrConstants import minLoopLen # minimum loop length
from ChrConstants import pk_scan_ahead # hot lead length for pk
from ChrConstants import dGpk_threshold # threshold FE for pks
from ChrConstants import dGMI_threshold # threshold FE for M-/I-loops
from ChrConstants import set_dangles # dangle parameter (always = 2)
from ChrConstants import dG_range # FE range in suboptimal structures
from ChrConstants import febase # [kcal/mol]
from ChrConstants import feshift # (dimensionless, usually = 1)
# default FE potential
self.set_system(system) # "Chromatin"
self.xi = xi # [bp] stem Kuhn length
self.lmbd = lmbd # ratio "bond distance"/"bead-to-bead distance"
self.gmm = gmm # self avoiding walk constant
self.delta = delta # exponental weight
self.seg_len = seg_len # [bp], for Chromatin = 5kbp
self.w = self.seg_len*self.xi/(self.lmbd)**2
"""@
Note that with Chromatin, xi is not really an important
parameter for most issues and is there primarily as a
formatlity. So we can afford to encorporate xi into the
expression for w. For RNA, xi is a variable quantity, so
we cannot afford to just bury it into w.
"""
self.minStemLen = minStemLen # [bp]
self.max_aa_gap = max_aa_gap # [nt]
self.max_pp_gap = max_pp_gap # [nt]
# secondary structure loop parameters
self.minLoopLen = minLoopLen # [nt]
# minimum loop length (for chromatin it is 1, for RNA = 3)
self.dGMI_threshold = dGMI_threshold # [kcal/mol]
"""threshold for MBL stability"""
# pseudoknot parameters
self.scan_ahead = pk_scan_ahead # default 10 nt
self.dGpk_threshold = dGpk_threshold # [kcal/mol]
"""threshold for PK stability"""
self.dangles = set_dangles # in general, this should be True
self.dGrange = dG_range # kcal/mol
self.dHbase = febase # default -6.0
self.dHshift = feshift # default 1.0
# file handling matters
self.allowed_extns = ["heat", "data"]
else:
print ("ERROR(SettingsPacket): unrecognized system (%s)" % system)
sys.exit(1)
#
# AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
#############################################################
#### parameters requiring settings according to system ####
#############################################################
# AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
# ####################################################################
# ##### Command line control parameters that affect FreeEnergy #####
# ####################################################################
self.set_GetOpts = False # essentially fake!
if self.show_info:
print ("dinged here InputSettings")
#
#
def set_system(self, sysName):
# merely sets the system name, nothing more
flag_pass = False
if not sysName in sysDefLabels:
print ("ERROR: unrecognized system name (%s)" % sysName)
print (" allowed names ")
for snm in sysDefLabels.keys():
print (snm)
#|endfor
#
self.system = sysName
self.molsys.set_system(sysName)
#
def set_parFlnm(self, parflnm):
self.parFlnm = parflnm
if not self.parFlnm == "none":
ft = FileTools()
print (dEXTS[self.system])
if not ft.check_ext(self.parFlnm, dEXTS[self.system]):
# RNA -> gMtrx or par file
# Chromatin -> heat or eheat
print ("ERROR: gMatrix free energy parameter files require")
print (" the extension \"gMtrx\",")
print (" input file: %s" % args.parFlnm)
sys.exit(1)
#
if not path.isfile(self.parFlnm):
print ("ERROR: cannot find gMatrix file (%s)." % self.parFlnm)
sys.exit(1)
#
flhd, ext = getHeadExt(self.parFlnm)
print (flhd, ext)
print ("xxx molsys.set_useParamFl", self.system, self.parFlnm)
if ext == "par":
self.molsys.set_useParamFl(self.system, self.parFlnm)
elif ext == "gMtrx":
self.molsys.set_usegMatrix(self.system, self.parFlnm)
#
if self.show_info:
print ("input parameter file: ", self.parFlnm)
#
else:
print ("ERROR: gMatrix file is not specified")
sys.exit(1)
#
#print ("planned exit at InputSettings.set_parFlnm():"); sys.exit(0)
#
# set the basic free energy parameter set: Turner Energy Rules
# or Vis Energy Rules or gMatrix
def set_FEparamData(self, ptype = "Turner", paramflnm = "none"):
if self.system == "Chromatin":
self.set_ChrParams(ptype, paramflnm) # heat, or eheat file
elif self.system == "RNA":
self.set_RNAParams(ptype, paramflnm) # gMtrx or par file
else:
print ("ERROR: system must be defined before assigning FE parameter types")
sys.exit(1)
#
#
def set_ChrParams(self, ptype, paramflnm = "none"): # class str
"""@
With Chromatin, the parameter file is generally obtained from
a heat or eheat file.
"""
#self.set_parFlnm(paramflnm)
#self.molsys.set_useChromatin(paramfile)
self.molsys.set_ParamType(ptype, paramflnm)
if self.molsys.paramType == "setheat":
self.f_heatmap = self.molsys.parFlnm
#
#
def set_RNAParams(self, ptype, paramflnm):
print ("Sorry, This system is only designed to handle Chromatin")
print (" The RNA tools are available by agreement and from")
print (" the author and supporters.")
sys.exit(0)
#
def set_source(self, s):
self.source = s
#
def set_program(self, s):
# job type is a lot more clear about the meaning
self.program = s
#print ("jobtype: ", s)
self.molsys.set_program(s)
#sys.exit(0)
#
def set_JobType(self, s):
# job type is a lot more clear about the meaning
self.jobtype = s
#print ("jobtype: ", s)
self.molsys.set_JobType(s)
#sys.exit(0)
#
def set_flnm(self, inflnm):
if self.system == "Chromatin":
self.f_heatmap = inflnm
elif self.system == "RNA":
self.f_vseq = inflnm
else:
print ("ERROR: system has not been set properly")
sys.exit(1)
#
#
def set_sequence(self, mseq):
if self.show_info:
print ("set_sequence")
print ("system ", self.system)
#
if self.system == "Chromatin":
self.chrseq = mseq
self.cSeq = Seq(self.chrseq, self.system)
self.N = len(self.chrseq)
self.molsys.set_mseq(self.chrseq)
elif self.system == "RNA":
self.rnaseq = mseq
self.rSeq = Seq(self.rnaseq, self.system)
self.N = len(self.rnaseq)
self.molsys.set_mseq(self.rnaseq)
else:
print ("ERROR: system has not been set properly")
sys.exit(1)
#
#
def set_structure(self, struct):
if self.system == "Chromatin":
self.chrstr = struct
elif self.system == "RNA":
self.rnastr = struct
else:
print ("ERROR: system has not been set properly")
sys.exit(1)
#
self.molsys.set_mstr(struct)
#
def set_T(self, T):
self.T = T
#
def set_xi(self, xi):
self.xi = xi # [bp]
# as I mentioned above, I think this is WRONG!
self.w = self.seg_len*self.xi/(self.lmbd)**2
#
def set_gmm(self, gmm):
self.gmm = gmm # dimensionless self avoiding walk constant
#
def set_delta(self, delta):
self.delta = delta # exponental weight (related to excluded volume)
#
def set_dGMI_threshold(self, dGMI_threshold):
# minimum loop length (for chromatin it is 1, for RNA = 3)
self.dGMI_threshold = dGMI_threshold # [kcal/mol]
"""threshold for MBL stability"""
#
def set_add_PK(self, b):
self.add_PK = b # include PK search
#
def set_scan_ahead(self, i):
self.scan_ahead = i # default 10
#
def set_dGpk_threshold(self, dGpk_threshold):
self.dGpk_threshold = dGpk_threshold # [kcal/mol]
"""threshold for PK stability"""
#
def set_Mg_binding(self, b):
self.Mg_binding = b
#
def set_dGrange(self, dGrange):
# free energy range of the search
self.dGrange = dGrange
#
"""
## VVVVVVVV Hard Wired!!! VVVVVVVVVVV
## free energy
self.kB = kB # [kcal/molK]
self.lmbd = lmbd
self.seg_len = 1.0 # [nt]
## secondary structure stem parameters
self.minStemLen = minStemLen # [bp]
self.max_bp_gap = max_bp_gap # [nt]
## secondary structure loop parameters
self.minLoopLen = minLoopLen # [nt]
## pseudoknot parameters
self.minPKloop = 2 # minimum PK loop (2 nt)
## other parameters
self.dangles = set_dangles
## AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
"""
def show_InputSettings(self):
print ("source: %s" % self.source)
print ("program: %s" % self.program)
print ("purpose: %s" % self.molsys.jobtype)
print ("paramType: %s" % self.molsys.paramType)
print ("system: %s" % self.system)
if self.system == "Chromatin":
print ("f_heatmap: %s" % self.f_heatmap)
print ("chrseq: %s" % self.molsys.mseq)
print ("chrstr: %s" % self.molsys.mstr)
print ("cSeq: %s" % self.molsys.mSeq)
elif self.system == "RNA":
print ("f_vseq: %s" % self.f_vseq)
print ("rnaseq: %s" % self.molsys.mseq)
print ("rnastr: %s" % self.molsys.mstr)
print ("rSeq: %s" % self.molsys.mSeq)
#
print ("seq len: %d" % self.N)
print ("FE data set:")
print (self.molsys)
#
#
def testRNA(ss_seq = ""):
# some allowed RNA readable sequences
# 0 10 20 30 40 50 60 70 80 90
# | | | | | | | | | |
#ss_seq = ".(((.(((((((.[[...)))))..]].((((...))))..))))).....([)]([)].......(..)..((..)).(..)..........................."
#ss_seq = ".((............)).."
#ss_seq = ".((((........)))).."
#ss_seq = ".((((..(...).)))).."
#ss_seq = ".(((.(((.(((....))).))).)))."
if ss_seq == "":
# default RNA test sequence
ss_seq = ".(((.(((......(((....))).......))).)))."
#
rnaseq = genRNASeq(ss_seq)
print ("RNA")
iSetUpRNA = InputSettings("RNA")
iSetUpRNA.set_source("main SettingsPacket)")
iSetUpRNA.set_JobType("evaluation")
iSetUpRNA.set_program("testRNA")
# options: cantata, sarabande, sonata.
iSetUpRNA.set_FEparamData("Turner") # default is "Turner", "RNA"
# set up the actual FE Data according to the settings
iSetUpRNA.set_sequence(rnaseq)
iSetUpRNA.set_structure(ss_seq)
iSetUpRNA.show_InputSettings()
#print ("stop here at 1"); sys.exit(0)
gmfl = "test3s_1mm+1mmp1+2mm+3mm_3nt-w5x5.gMtrx"
iSetUpRNA.set_FEparamData("gMatrix", gmfl)
iSetUpRNA.show_InputSettings()
#print ("stop here at 2"); sys.exit(0)
parfl = "ViSparams.par"
iSetUpRNA.set_FEparamData("ParFile", parfl) # default is "Turner", "RNA"
iSetUpRNA.show_InputSettings()
#print ("stop here at 3"); sys.exit(0)
iSetUpRNA.set_JobType("sarabande")
bb_seq = '.'*len(rnaseq)
iSetUpRNA.set_structure(bb_seq)
iSetUpRNA.show_InputSettings()
#
def testChr(ss_seq = ""):
# some chromatin test sequences
# 0 10 20 30 40 50 60 70 80 90
# | | | | | | | | | |
#ss_seq = ".(((.(((((((.[[...)))))..]].((((...))))..))))).....([)]([)].......(..)..((..)).(..)..........................."
#ss_seq = ".((............)).."
#ss_seq = ".((((........)))).."
#ss_seq = ".((((..(...).)))).."
#ss_seq = ".(((.(((.(((....))).))).)))."
if ss_seq == "":
# default chromatin test sequence
ss_seq = ".(((.(((......(((....))).......))).)))."
#
chrseq = genChrSeq(ss_seq)
print ("Chromatin")
iSetUpChr = InputSettings("Chromatin")
iSetUpChr.set_source("main SettingsPacket)")
# set up the actual FE Data according to the settings
iSetUpChr.set_JobType("evaluation")
iSetUpRNA.set_program("testChr")
iSetUpChr.set_FEparamData("genheat") # default is "Turner", "RNA"
iSetUpChr.set_sequence(chrseq)
iSetUpChr.set_structure(ss_seq)
iSetUpChr.show_InputSettings()
#print ("stop here at 1b"); sys.exit(0)
iSetUpChr.set_JobType("chreval")
# options: chreval.
heatflnm = "/home/dawson/python/chromatin/test.heat"
iSetUpChr.set_FEparamData("setheat", heatflnm) # default is "Turner", "RNA"
print ("vv")
# set up the actual FE Data according to the settings
iSetUpChr.set_sequence(chrseq)
bb_seq = '.'*len(chrseq)
iSetUpChr.set_structure(bb_seq)
iSetUpChr.show_InputSettings()
#print ("stop here at 2b"); sys.exit(0)
#
def usage_main():
print ("%s system [sequence]")
def main(cl):
print (cl)
if len(cl) < 2:
print ("ERROR: must specify the system and optional sequence")
sys.exit(1)
#
if not cl[1] in sysDefLabels:
print ("ERROR: first argument must be either RNA or Chromatin")
sys.exit(0)
#
struct = ""
if len(cl) == 3:
struct = cl[2]
#
if cl[1] == "RNA":
testRNA(struct)
elif cl[1] == "Chromatin":
testChr(struct)
#
#
if __name__ == '__main__':
# running the program
main(sys.argv)
#
|
StarcoderdataPython
|
22658
|
from random import randint
s = t = ma = 0
m = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for l in range(0, 3):
for c in range(0, 3):
m[l][c] = randint(0, 100)
print('-='*15)
for l in range(0, 3):
t += m[l][2]
for c in range(0, 3):
print(f'[{m[l][c]:^5}]', end='')
if m[l][c] % 2 == 0:
s += m[l][c]
if m[1][c] > ma:
ma = m[1][c]
print()
print('-='*15)
print(f'A soma dos núemros pares é {s}')
print(f'A soma dos valores da terceira coluna é {t}')
print(f'O maior valor da segunda linha é {ma}')
|
StarcoderdataPython
|
71158
|
<reponame>sriiora/tcf<filename>examples/test_dump_kws_one_target.py
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# pylint: disable = missing-docstring
"""Testcase using one target
-------------------------
Note the data offered for the target is a superse of the testcase's
augmented with all the target metadata exported by the server
.. literalinclude:: /examples/test_dump_kws_one_target.py
:language: python
:pyobject: _test
Execute :download:`the testcase
<../examples/test_dump_kws_one_target.py>` with::
$ tcf run -vv /usr/share/tcf/examples/test_dump_kws_one_target.py
INFO0/gcoyBwifr /usr/share/tcf/examples/test_dump_kws_one_target.py#_test @localhost/qz31b-x86: Keywords for testcase:
{'cwd': '/home/inaky/z/s/local',
'runid': '',
'srcdir': '../../../../../usr/share/tcf/examples',
'srcdir_abs': '/usr/share/tcf/examples',
...
'target_group_targets': u'localhost/qz31b-x86:x86',
'target_group_types': u'qemu-zephyr-x86',
'tc_hash': 'gcoy',
'tc_name': '/usr/share/tcf/examples/test_dump_kws_one_target.py#_test',
'tc_name_short': '/usr/share/tcf/examples/test_dump_kws_one_target.py#_test',
'tc_origin': '/usr/share/tcf/examples/test_dump_kws_one_target.py:50',
'thisfile': '/usr/share/tcf/examples/test_dump_kws_one_target.py',
'tmpdir': '/tmp/tcf.run-DmwH93/gcoy',
'type': u'qemu-zephyr-x86'}
INFO0/gcoyBwifr /usr/share/tcf/examples/test_dump_kws_one_target.py#_test @localhost/qz31b-x86: Keywords for target 0:
{u'board': u'qemu_x86',
'bsp': u'x86',
u'bsp_models': {u'x86': [u'x86']},
u'bsps': {u'x86': {u'board': u'qemu_x86',
u'console': u'x86',
...
u'interconnects': {u'nwb': {u'ic_index': 31,
u'ipv4_addr': u'192.168.98.31',
u'ipv4_prefix_len': 24,
u'ipv6_addr': u'fc00::62:1f',
u'ipv6_prefix_len': 112,
u'mac_addr': u'02:62:00:00:00:1f'}},
u'interfaces': [u'tt_power_control_mixin',
u'test_target_images_mixin',
u'test_target_console_mixin',
u'tt_debug_mixin'],
...
'url': u'https://localhost:5000/ttb-v1/targets/qz31b-x86',
u'zephyr_board': u'qemu_x86',
u'zephyr_kernelname': u'zephyr.elf'}
PASS0/ toplevel @local: 1 tests (1 passed, 0 error, 0 failed, 0 blocked, 0 skipped, in 0:00:00.302253) - passed
(depending on your installation method, location might be
*~/.local/share/tcf/examples*)
"""
import pprint
import tcfl.tc
@tcfl.tc.target()
@tcfl.tc.tags(build_only = True, ignore_example = True)
class _test(tcfl.tc.tc_c):
def build(self, target):
self.report_info("Keywords for testcase:\n%s"
% pprint.pformat(self.kws),
level = 0)
target.report_info("Keywords for target 0:\n%s"
% pprint.pformat(target.kws),
level = 0)
|
StarcoderdataPython
|
1603242
|
import pygame
from pygame.locals import *
import time
import random
import numpy as np
import player
import food
class Agaria:
def __init__(self, rendering = True):
self.agents: [player.Player] = []
self.foods: [food.Food] = []
self.player_lastID = 0
self.rendering = rendering
self.start_time = time.time()
self.screen_size = (800,600)
self.background_color = (255,255,200)
self.is_running = True
self.RAM = np.zeros(128, dtype=np.uint32)
if self.rendering:
pygame.init()
self.screen = pygame.display.set_mode(self.screen_size, pygame.DOUBLEBUF | pygame.RESIZABLE)
pygame.display.set_caption("Agar.IA")
self.setup()
def setup(self):
agent = self.newPlayer()
agent.updateRAM()
pass
# def loop(self):
# while self.is_running:
# self.update()
# self.updateRAM()
# if self.rendering:
# self.render()
# pass
def update(self):
for a in self.agents:
a.updateRAM()
def updateRAM(self):
pass
def render(self):
for e in pygame.event.get():
if e.type == pygame.VIDEORESIZE:
self.screen_size = self.screen.get_size()
if e.type == pygame.QUIT:
self.quit()
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
self.quit()
if self.is_running:
self.screen.fill(self.background_color)
# game logic
for a in self.agents:
pygame.draw.circle(self.screen, a.getColor(), a.getPosition(), a.getSize())
for f in self.foods:
pygame.draw.circle(self.screen, f.color, f.position, f.size)
pygame.display.flip()
else:
raise RuntimeError("Render was called while self.is_running == False")
def quit(self):
self.is_running = False
print("Game ended after %s seconds." % int(time.time() - self.start_time))
pygame.quit()
quit(0)
def getRAM(self) -> np.ndarray:
return self.RAM
def newPlayer(self):
p = player.Player(self)
self.agents.append(p)
return p
def newFood(self):
f = food.Food(position=(50,50))
self.foods.append(f)
return f
def getPlayerRAM(self, p):
# update the ram ? probably not
return p.getRAM()
# ---- IA SPECIFIC BELOW ----
def observation(self, player):
pass
def reward(self, player):
pass
def done(self, player):
pass
def info(self, player):
pass
def new_player(self):
pass
|
StarcoderdataPython
|
1759871
|
<filename>mp4box/parsing/urn.py
from mp4box.box import DataEntryUrnBox
def parse_urn(reader, my_size):
box = DataEntryUrnBox(my_size)
return box
|
StarcoderdataPython
|
161108
|
# -*- coding: utf-8 -*-
"""
ABSTRACT LOGGER
"""
# %% LIBRARY IMPORT
import abc
# %% FILE IMPORT
# %% CLASSES
class AbstractLogger(metaclass = abc.ABCMeta):
""" Abstract class for Loggers """
@abc.abstractmethod
def __init__(self):
pass
@abc.abstractmethod
def debug(self):
pass
@abc.abstractmethod
def info(self):
pass
@abc.abstractmethod
def warning(self):
pass
@abc.abstractmethod
def critical(self):
pass
@abc.abstractmethod
def error(self):
pass
@abc.abstractmethod
def exception(self):
pass
@abc.abstractmethod
def wrong_input(self):
pass
|
StarcoderdataPython
|
58045
|
#!/usr/bin/env python3
import argparse
import csv
from logging import error, warning
import requests
import urllib3
import act
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def parseargs():
""" Parse arguments """
parser = argparse.ArgumentParser(
description='Get Threat Actors (MISP Galaxy)')
parser.add_argument(
'--userid',
dest='user_id',
required=True,
help="User ID")
parser.add_argument(
'--act-baseurl',
dest='act_baseurl',
required=True,
help='ACT API URI')
parser.add_argument(
"--logfile",
dest="log_file",
help="Log to file (default = stdout)")
parser.add_argument(
"--loglevel",
dest="log_level",
default="info",
help="Loglevel (default = info)")
return parser.parse_args()
def get_misp_threat_actors():
url = "https://raw.githubusercontent.com/MISP/misp-galaxy/master/clusters/threat-actor.json"
r = requests.get(url, verify=False)
return r.json()
def countrylist():
url = "http://download.geonames.org/export/dump/countryInfo.txt"
r = requests.get(url, verify=False)
countries = {
"iso": {},
"iso3": {},
"fips": {}
}
for row in csv.reader(
[line for line in r.text.splitlines() if line[0] != '#'],
delimiter='\t'):
countries["iso"][row[0]] = row[4]
countries["iso3"][row[1]] = row[4]
countries["fips"][row[3]] = row[4]
return countries
def add_to_act(client, ta_list):
countries = countrylist()
for ta in ta_list["values"]:
name = ta["value"]
if "meta" not in ta:
warning("Missing meta information in MISP on Threat Actor {}".format(name))
continue
aliases = ta["meta"].get("synonyms", [])
country = ta["meta"].get("country", None)
location = None
if country and country in countries["iso"]:
location = countries["iso"][country]
elif country and country in countries["iso3"]:
location = countries["iso3"][country]
error(
"country code is not valid ISO code, but found match in iso3: %s\n" %
country)
elif country and country in countries["fips"]:
location = countries["fips"][country]
error(
"country code is not valid ISO code, but found match in fips3: %s\n" %
country)
else:
location = None
if location:
client.fact("sourceGeography")\
.destination("location", location)\
.source("threatActor", name)\
.add()
elif country:
warning(
"country code not found in ISO, ISO3 or FIPS: %s\n" %
country)
# Loop over all items under indicators in report
for alias in aliases:
if alias == name:
continue # Do not alias to ourself
client.fact("threatActorAlias")\
.bidirectional("threatActor", alias, "threatActor", name)\
.add()
if __name__ == '__main__':
args = parseargs()
client = act.Act(
args.act_baseurl,
args.user_id,
args.log_level,
args.log_file,
"misp-threat-actors")
# Get all reports from SCIO
ta = get_misp_threat_actors()
# Add IOCs from reports to the ACT platform
add_to_act(client, ta)
|
StarcoderdataPython
|
3338156
|
# ---------------------------------------------------------------------
# Vendor: Enterasys
# OS: EOS
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "Enterasys.EOS"
|
StarcoderdataPython
|
1604698
|
<reponame>DhananjayMukhedkar/feature-store-api
#
# Copyright 2022 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from hsfs import engine, training_dataset_feature, client, util
from hsfs.training_dataset_split import TrainingDatasetSplit
from hsfs.core import (
tags_api,
storage_connector_api,
transformation_function_engine,
feature_view_api,
code_engine,
statistics_engine,
training_dataset_engine,
query_constructor_api,
)
class FeatureViewEngine:
ENTITY_TYPE = "featureview"
_TRAINING_DATA_API_PATH = "trainingdatasets"
_OVERWRITE = "overwrite"
_APPEND = "append"
def __init__(self, feature_store_id):
self._feature_store_id = feature_store_id
self._feature_view_api = feature_view_api.FeatureViewApi(feature_store_id)
self._tags_api = tags_api.TagsApi(feature_store_id, self.ENTITY_TYPE)
self._storage_connector_api = storage_connector_api.StorageConnectorApi(
feature_store_id
)
self._transformation_function_engine = (
transformation_function_engine.TransformationFunctionEngine(
feature_store_id
)
)
self._td_code_engine = code_engine.CodeEngine(
feature_store_id, self._TRAINING_DATA_API_PATH
)
self._statistics_engine = statistics_engine.StatisticsEngine(
feature_store_id, self._TRAINING_DATA_API_PATH
)
self._training_dataset_engine = training_dataset_engine.TrainingDatasetEngine(
feature_store_id
)
self._query_constructor_api = query_constructor_api.QueryConstructorApi()
def save(self, feature_view_obj):
if feature_view_obj.labels:
feature_view_obj._features.append(
[
training_dataset_feature.TrainingDatasetFeature(
name=label_name, label=True
)
for label_name in feature_view_obj.labels
]
)
self._transformation_function_engine.attach_transformation_fn(feature_view_obj)
updated_fv = self._feature_view_api.post(feature_view_obj)
print(
"Feature view created successfully, explore it at "
+ self._get_feature_view_url(updated_fv)
)
return updated_fv
def get(self, name, version=None):
if version:
fv = self._feature_view_api.get_by_name_version(name, version)
fv.transformation_functions = self.get_attached_transformation_fn(
fv.name, fv.version
)
else:
fv = self._feature_view_api.get_by_name(name)
for _fv in fv:
_fv.transformation_functions = self.get_attached_transformation_fn(
_fv.name, _fv.version
)
return fv
def delete(self, name, version=None):
if version:
return self._feature_view_api.delete_by_name_version(name, version)
else:
return self._feature_view_api.delete_by_name(name)
def get_batch_query(self, feature_view_obj, start_time, end_time, with_label=False):
return self._feature_view_api.get_batch_query(
feature_view_obj.name,
feature_view_obj.version,
start_time,
end_time,
is_python_engine=engine.get_type() == "python",
with_label=with_label,
)
def get_batch_query_string(self, feature_view_obj, start_time, end_time):
query_obj = self._feature_view_api.get_batch_query(
feature_view_obj.name,
feature_view_obj.version,
start_time,
end_time,
is_python_engine=engine.get_type() == "python",
)
fs_query = self._query_constructor_api.construct_query(query_obj)
if fs_query.pit_query is not None:
return fs_query.pit_query
return fs_query.query
def get_attached_transformation_fn(self, name, version):
transformation_functions = (
self._feature_view_api.get_attached_transformation_fn(name, version)
)
if isinstance(transformation_functions, list):
transformation_functions_dict = dict(
[
(tf.name, tf.transformation_function)
for tf in transformation_functions
]
)
else:
transformation_functions_dict = {
transformation_functions.name: transformation_functions.transformation_function
}
return transformation_functions_dict
def create_training_dataset(
self, feature_view_obj, training_dataset_obj, user_write_options
):
self._set_event_time(feature_view_obj, training_dataset_obj)
updated_instance = self._create_training_data_metadata(
feature_view_obj, training_dataset_obj
)
td_job = self.compute_training_dataset(
feature_view_obj,
user_write_options,
training_dataset_obj=training_dataset_obj,
)
return updated_instance, td_job
def get_training_data(
self,
feature_view_obj,
read_options=None,
splits=[],
training_dataset_obj=None,
training_dataset_version=None,
):
# check if provided td version has already existed.
if training_dataset_version:
td_updated = self._get_training_data_metadata(
feature_view_obj, training_dataset_version
)
else:
self._set_event_time(feature_view_obj, training_dataset_obj)
td_updated = self._create_training_data_metadata(
feature_view_obj, training_dataset_obj
)
# check splits
if len(splits) != len(td_updated.splits):
if len(td_updated.splits) == 0:
method_name = "get_training_data"
elif len(td_updated.splits) == 2:
method_name = "get_train_test_split"
elif len(td_updated.splits) == 3:
method_name = "get_train_validation_test_splits"
raise ValueError(
f"Incorrect `get` method is used. Use `fv.{method_name}` instead."
)
read_options = engine.get_instance().read_options(
td_updated.data_format, read_options
)
if td_updated.training_dataset_type != td_updated.IN_MEMORY:
split_df = self._read_from_storage_connector(
td_updated, td_updated.splits, read_options
)
else:
self._check_feature_group_accessibility(feature_view_obj)
query = self.get_batch_query(
feature_view_obj,
start_time=td_updated.event_start_time,
end_time=td_updated.event_end_time,
with_label=True,
)
split_df = engine.get_instance().get_training_data(
td_updated, feature_view_obj, query, read_options
)
self.compute_training_dataset_statistics(
feature_view_obj, td_updated, split_df, calc_stat=True
)
# split df into features and labels df
if td_updated.splits:
for split in td_updated.splits:
split_name = split.name
split_df[split_name] = engine.get_instance().split_labels(
split_df[split_name], feature_view_obj.labels
)
else:
split_df = engine.get_instance().split_labels(
split_df, feature_view_obj.labels
)
return td_updated, split_df
def _set_event_time(self, feature_view_obj, training_dataset_obj):
event_time = feature_view_obj.query._left_feature_group.event_time
df = None
if event_time:
if training_dataset_obj.splits:
for split in training_dataset_obj.splits:
if (
split.name == TrainingDatasetSplit.TRAIN
and not split.start_time
):
df = (
feature_view_obj.query._left_feature_group.select_all().read()
if df is None
else df
)
split.start_time = self._get_start_time(df, event_time)
if split.name == TrainingDatasetSplit.TEST and not split.end_time:
df = (
feature_view_obj.query._left_feature_group.select_all().read()
if df is None
else df
)
split.end_time = self._get_end_time(df, event_time)
else:
if not training_dataset_obj.event_start_time:
df = (
feature_view_obj.query._left_feature_group.select_all().read()
if df is None
else df
)
training_dataset_obj.event_start_time = self._get_start_time(
df, event_time
)
df = (
feature_view_obj.query._left_feature_group.select_all().read()
if df is None
else df
)
training_dataset_obj.event_end_time = self._get_end_time(
df, event_time
)
def _get_start_time(self, df, event_time):
if engine.get_type() == "spark":
return df.agg({event_time: "min"}).collect()[0][0]
else:
return df[event_time].min()
def _get_end_time(self, df, event_time):
if engine.get_type() == "spark":
return df.agg({event_time: "max"}).collect()[0][0]
else:
return df[event_time].max()
def recreate_training_dataset(
self, feature_view_obj, training_dataset_version, user_write_options
):
training_dataset_obj = self._get_training_data_metadata(
feature_view_obj, training_dataset_version
)
td_job = self.compute_training_dataset(
feature_view_obj,
user_write_options,
training_dataset_obj=training_dataset_obj,
)
return training_dataset_obj, td_job
def _read_from_storage_connector(self, training_data_obj, splits, read_options):
if splits:
result = {}
for split in splits:
path = training_data_obj.location + "/" + str(split.name)
result[split.name] = self._read_dir_from_storage_connector(
training_data_obj, path, read_options
)
return result
else:
path = training_data_obj.location + "/" + training_data_obj.name
return self._read_dir_from_storage_connector(
training_data_obj, path, read_options
)
def _read_dir_from_storage_connector(self, training_data_obj, path, read_options):
try:
return training_data_obj.storage_connector.read(
# always read from materialized dataset, not query object
query=None,
data_format=training_data_obj.data_format,
options=read_options,
path=path,
)
except Exception as e:
if isinstance(e, FileNotFoundError):
raise FileNotFoundError(
f"Failed to read dataset from {path}."
" Check if path exists or recreate a training dataset."
)
else:
raise e
# This method is used by hsfs_utils to launch a job for python client
def compute_training_dataset(
self,
feature_view_obj,
user_write_options,
training_dataset_obj=None,
training_dataset_version=None,
):
if training_dataset_obj:
pass
elif training_dataset_version:
training_dataset_obj = self._get_training_data_metadata(
feature_view_obj, training_dataset_version
)
else:
raise ValueError("No training dataset object or version is provided")
batch_query = self.get_batch_query(
feature_view_obj,
training_dataset_obj.event_start_time,
training_dataset_obj.event_end_time,
with_label=True,
)
td_job = engine.get_instance().write_training_dataset(
training_dataset_obj,
batch_query,
user_write_options,
self._OVERWRITE,
feature_view_obj=feature_view_obj,
)
self._td_code_engine.save_code(training_dataset_obj)
if engine.get_type() == "spark":
if training_dataset_obj.splits:
td_df = dict(
[
(
split.name,
self._training_dataset_engine.read(
training_dataset_obj, split.name, {}
),
)
for split in training_dataset_obj.splits
]
)
else:
td_df = self._training_dataset_engine.read(
training_dataset_obj, None, {}
)
else:
td_df = None
# currently we do not save the training dataset statistics config for training datasets
self.compute_training_dataset_statistics(
feature_view_obj,
training_dataset_obj,
td_df,
calc_stat=engine.get_type() == "spark",
)
return td_job
def compute_training_dataset_statistics(
self, feature_view_obj, training_dataset_obj, td_df, calc_stat=False
):
if training_dataset_obj.statistics_config.enabled and calc_stat:
if training_dataset_obj.splits:
if not isinstance(td_df, dict):
raise ValueError(
"Provided dataframes should be in dict format "
"'split': dataframe"
)
return self._statistics_engine.register_split_statistics(
training_dataset_obj,
feature_dataframes=td_df,
feature_view_obj=feature_view_obj,
)
else:
return self._statistics_engine.compute_statistics(
training_dataset_obj,
feature_dataframe=td_df,
feature_view_obj=feature_view_obj,
)
def _get_training_data_metadata(self, feature_view_obj, training_dataset_version):
td = self._feature_view_api.get_training_dataset_by_version(
feature_view_obj.name, feature_view_obj.version, training_dataset_version
)
# schema and transformation functions need to be set for writing training data or feature serving
td.schema = feature_view_obj.schema
td.transformation_functions = feature_view_obj.transformation_functions
return td
def _create_training_data_metadata(self, feature_view_obj, training_dataset_obj):
td = self._feature_view_api.create_training_dataset(
feature_view_obj.name, feature_view_obj.version, training_dataset_obj
)
td.schema = feature_view_obj.schema
td.transformation_functions = feature_view_obj.transformation_functions
return td
def delete_training_data(self, feature_view_obj, training_data_version=None):
if training_data_version:
self._feature_view_api.delete_training_data_version(
feature_view_obj.name, feature_view_obj.version, training_data_version
)
else:
self._feature_view_api.delete_training_data(
feature_view_obj.name, feature_view_obj.version
)
def delete_training_dataset_only(
self, feature_view_obj, training_data_version=None
):
if training_data_version:
self._feature_view_api.delete_training_dataset_only_version(
feature_view_obj.name, feature_view_obj.version, training_data_version
)
else:
self._feature_view_api.delete_training_dataset_only(
feature_view_obj.name, feature_view_obj.version
)
def get_batch_data(
self,
feature_view_obj,
start_time,
end_time,
training_dataset_version,
transformation_functions,
read_options=None,
):
self._check_feature_group_accessibility(feature_view_obj)
feature_dataframe = self.get_batch_query(
feature_view_obj, start_time, end_time, with_label=False
).read(read_options=read_options)
training_dataset_obj = self._get_training_data_metadata(
feature_view_obj, training_dataset_version
)
training_dataset_obj.transformation_functions = transformation_functions
return engine.get_instance()._apply_transformation_function(
training_dataset_obj, dataset=feature_dataframe
)
def add_tag(
self, feature_view_obj, name: str, value, training_dataset_version=None
):
self._tags_api.add(
feature_view_obj,
name,
value,
training_dataset_version=training_dataset_version,
)
def delete_tag(self, feature_view_obj, name: str, training_dataset_version=None):
self._tags_api.delete(
feature_view_obj, name, training_dataset_version=training_dataset_version
)
def get_tag(self, feature_view_obj, name: str, training_dataset_version=None):
return self._tags_api.get(
feature_view_obj, name, training_dataset_version=training_dataset_version
)[name]
def get_tags(self, feature_view_obj, training_dataset_version=None):
return self._tags_api.get(
feature_view_obj, training_dataset_version=training_dataset_version
)
def _check_feature_group_accessibility(self, feature_view_obj):
if (
engine.get_type() == "python" or engine.get_type() == "hive"
) and not feature_view_obj.query.from_cache_feature_group_only():
raise NotImplementedError(
"Python kernel can only read from cached feature group."
" Please use `feature_view.create_training_dataset` instead."
)
def _get_feature_view_url(self, feature_view):
path = (
"/p/"
+ str(client.get_instance()._project_id)
+ "/fs/"
+ str(feature_view.featurestore_id)
+ "/fv/"
+ str(feature_view.name)
+ "/version/"
+ str(feature_view.version)
)
return util.get_hostname_replaced_url(path)
|
StarcoderdataPython
|
3245564
|
"""
O(n)
"""
from ds.linkedList.linkedlist import LinkedList
def find_middle_node_linkedlist(l):
node = l.get_first()
single = double = node
while single.next_node is not None and double.next_node is not None:
single = single.next_node
double = double.next_node.next_node
if double is None:
break
return single
if __name__ == "__main__":
values = ["a", "b", "c", "d", "e", "f", "g", "h"]
llist = LinkedList()
[llist.add_last(x) for x in values]
print(find_middle_node_linkedlist(llist).data)
|
StarcoderdataPython
|
143694
|
from dateutil.relativedelta import *
from eventtools.models import Rule
from eventtools_testapp.models import *
from eventtools.utils.dateranges import *
from datetime import datetime, date, timedelta
def fixture(obj):
obj.gallery = ExampleVenue.objects.create(name="Gallery A", slug="gallery-A")
obj.auditorium = ExampleVenue.objects.create(name="Auditorium", slug="auditorium")
obj.cinema_1 = ExampleVenue.objects.create(name="Cinema 1", slug="cinema-1")
obj.cinema_2 = ExampleVenue.objects.create(name="Cinema 2", slug="cinema-2")
#some simple events
obj.talk = ExampleEvent.eventobjects.create(name="Curator's Talk", venue=obj.gallery)
obj.performance = ExampleEvent.eventobjects.create(name="A performance", venue=obj.auditorium)
#some useful dates
obj.day1 = date(2010,10,10)
obj.day2 = obj.day1+timedelta(1)
#some simple occurrences
obj.talk_morning = ExampleOccurrence.objects.create(event=obj.talk, start=datetime(2010,10,10,10,00))
obj.talk_afternoon = ExampleOccurrence.objects.create(event=obj.talk, start=datetime(2010,10,10,14,00))
obj.talk_tomorrow_morning_cancelled = ExampleOccurrence.objects.create(event=obj.talk, start=datetime(2010,10,11,10,00), status='cancelled')
obj.performance_evening = ExampleOccurrence.objects.create(event=obj.performance, start=datetime(2010,10,10,20,00))
obj.performance_tomorrow = ExampleOccurrence.objects.create(event=obj.performance, start=datetime(2010,10,11,20,00))
obj.performance_day_after_tomorrow = ExampleOccurrence.objects.create(event=obj.performance, start=datetime(2010,10,12,20,00))
#an event with many occurrences
# deleting the 2nd jan, because we want to test it isn't displayed
obj.daily_tour = ExampleEvent.eventobjects.create(name="Daily Tour", slug="daily-tour")
for day in range(50):
if day !=1: #2nd of month.
d = date(2010,1,1) + timedelta(day)
obj.daily_tour.occurrences.create(start=d)
obj.weekly_talk = ExampleEvent.eventobjects.create(name="Weekly Talk", slug="weekly-talk")
for day in range(50):
d = date(2010,1,1) + timedelta(day*7)
obj.weekly_talk.occurrences.create(start=datetime.combine(d, time(10,00)), end=datetime.combine(d, time(12,00)))
#an event with some variations
obj.film = ExampleEvent.eventobjects.create(name="Film Night", venue=obj.cinema_1)
obj.film_with_popcorn = ExampleEvent.eventobjects.create(parent=obj.film, name="Film Night", difference_from_parent="free popcorn", venue=obj.cinema_1)
obj.film_with_talk = ExampleEvent.eventobjects.create(parent=obj.film, name="Film Night", difference_from_parent="director's talk", venue=obj.auditorium)
obj.film_with_talk_and_popcorn = ExampleEvent.eventobjects.create(parent=obj.film_with_talk, name="Film Night", difference_from_parent="popcorn and director's talk", venue=obj.cinema_2)
# obj.film_with_popcorn.move_to(obj.film, position='first-child')
# obj.film_with_talk.move_to(obj.film, position='first-child')
# obj.film_with_talk_and_popcorn.move_to(obj.film_with_talk, position='first-child')
# the mptt gotcha. reload the parents
reload_films(obj)
obj.film_occ = obj.film.occurrences.create(start=datetime(2010,10,10,18,30))
obj.film_occ.save()
obj.film_with_popcorn_occ = obj.film_with_popcorn.occurrences.create(start=datetime(2010,10,11,18,30))
obj.film_with_talk_occ = obj.film_with_talk.occurrences.create(start=datetime(2010,10,12,18,30))
obj.film_with_talk_and_popcorn_occ = obj.film_with_talk_and_popcorn.occurrences.create(start=datetime(2010,10,13,18,30))
def generator_fixture(obj):
#TestEvents with generators (separate models to test well)
obj.weekly = Rule.objects.create(frequency = "WEEKLY")
obj.bin_night = ExampleGEvent.eventobjects.create(name='Bin Night')
obj.one_off_generator = obj.bin_night.generators.create(event_start=datetime(2010,1,2,10,30), event_end=datetime(2010,1,2,11,30))
obj.weekly_generator = obj.bin_night.generators.create(event_start=datetime(2010,1,1,10,30), event_end=datetime(2010,1,1,11,30), rule=obj.weekly, repeat_until=date(2010,1,29))
obj.endless_generator = obj.bin_night.generators.create(event=obj.bin_night, event_start=datetime(2010,1,3,10,30), event_end=datetime(2010,1,3,11,30), rule=obj.weekly)
obj.all_day_generator = obj.bin_night.generators.create(event=obj.bin_night, event_start=date(2010,1,4), rule=obj.weekly, repeat_until=date(2010,1,25))
#this should create 0 occurrences, since it is a duplicate of weekly.
obj.dupe_weekly_generator = obj.bin_night.generators.create(event_start=datetime(2010,1,1,10,30), event_end=datetime(2010,1,1,11,30), rule=obj.weekly, repeat_until=date(2010,1,29))
obj.furniture_collection = ExampleGEvent.eventobjects.create(name='Furniture Collection Day')
def reload_films(obj):
obj.film = obj.film.reload()
obj.film_with_popcorn = obj.film_with_popcorn.reload()
obj.film_with_talk = obj.film_with_talk.reload()
obj.film_with_talk_and_popcorn = obj.film_with_talk_and_popcorn.reload()
def bigfixture(obj):
# have to create some more events since we are working from 'today'.
obj.pe = ExampleEvent.eventobjects.create(name="proliferating event")
obj.todaynow = datetime.now()
obj.today = date.today()
obj.tomorrow = obj.today + timedelta(1)
obj.yesterday = obj.today - timedelta(1)
obj.this_week = dates_in_week_of(obj.today)
obj.last_week = dates_in_week_of(obj.today-timedelta(7))
obj.next_week = dates_in_week_of(obj.today+timedelta(7))
obj.this_weekend = dates_in_weekend_of(obj.today)
obj.last_weekend = dates_in_weekend_of(obj.today-timedelta(7))
obj.next_weekend = dates_in_weekend_of(obj.today+timedelta(7))
obj.this_fortnight = dates_in_fortnight_of(obj.today)
obj.last_fortnight = dates_in_fortnight_of(obj.today-timedelta(14))
obj.next_fortnight = dates_in_fortnight_of(obj.today+timedelta(14))
obj.this_month = dates_in_month_of(obj.today)
obj.last_month = dates_in_month_of(obj.today+relativedelta(months=-1))
obj.next_month = dates_in_month_of(obj.today+relativedelta(months=+1))
obj.this_year = dates_in_year_of(obj.today)
obj.last_year = dates_in_year_of(obj.today+relativedelta(years=-1))
obj.next_year = dates_in_year_of(obj.today+relativedelta(years=+1))
obj.now = datetime.now().time()
obj.hence1 = (datetime.now() + timedelta(seconds=600)).time()
obj.hence2 = (datetime.now() + timedelta(seconds=1200)).time()
obj.earlier1 = (datetime.now() - timedelta(seconds=600)).time()
obj.earlier2 = (datetime.now() - timedelta(seconds=1200)).time()
#on each of the given days, we'll create 5 occurrences:
# all day
# earlier
# hence
# current
# multiday
present_days = \
obj.this_week + \
obj.this_weekend + \
obj.this_fortnight + \
obj.this_month + \
obj.this_year + \
[obj.today]
past_days = \
obj.last_week + \
obj.last_weekend + \
obj.last_fortnight + \
obj.last_month + \
obj.last_year + \
[obj.yesterday]
future_days = \
obj.next_week + \
obj.next_weekend + \
obj.next_fortnight + \
obj.next_month + \
obj.next_year + \
[obj.tomorrow]
for day in present_days + past_days + future_days:
#all day
obj.pe.occurrences.create(start=day)
# earlier
obj.pe.occurrences.create(start=datetime.combine(day, obj.earlier2), end=datetime.combine(day, obj.earlier1))
# later
obj.pe.occurrences.create(start=datetime.combine(day, obj.hence1), end=datetime.combine(day, obj.hence2))
# now-ish
obj.pe.occurrences.create(start=datetime.combine(day, obj.earlier1), end=datetime.combine(day, obj.hence1))
# multiday
obj.pe.occurrences.create(start=datetime.combine(day, obj.earlier1), end=datetime.combine(day+timedelta(1), obj.hence1))
|
StarcoderdataPython
|
3359747
|
"""
firmware
========
Provides a device firmware revision, which can be change by incoming "upgradeFirmware" events,
and rolled-back to factory firmware by incoming "factoryReset" events.
Configurable parameters::
{
}
Device properties created::
{
"firmware" : The current firmware of the device
}
"""
from .device import Device
import random
import logging
class Firmware(Device):
def __init__(self, instance_name, time, engine, update_callback, context, params):
super(Firmware,self).__init__(instance_name, time, engine, update_callback, context, params)
fw = random.choice(["0.51","0.52","0.6","0.6","0.6","0.7","0.7","0.7","0.7"])
self.set_properties({'factoryFirmware' : fw, 'firmware' : fw } )
def comms_ok(self):
return super(Firmware,self).comms_ok()
def external_event(self, event_name, arg):
super(Firmware,self).external_event(event_name, arg)
if event_name=="upgradeFirmware":
logging.info("Upgrading firmware on device "+self.properties["$id"]+" to "+str(arg))
self.set_property("firmware", arg)
if event_name=="factoryReset":
logging.info("Factory-resetting firmware on device "+self.properties["$id"])
self.set_property("firmware", self.get_property("factoryFirmware"))
def close(self):
super(Firmware,self).close()
|
StarcoderdataPython
|
65142
|
import torch
import unittest
import numpy as np
from torch.autograd import Variable
from losses.svm import SmoothTop1SVM, SmoothTopkSVM, MaxTop1SVM, MaxTopkSVM
from losses.functional import Topk_Smooth_SVM
from tests.utils import assert_all_close, V
from tests.py_ref import svm_topk_smooth_py_1, svm_topk_smooth_py_2,\
smooth_svm_py, max_svm_py, svm_topk_max_py
from torch.autograd.gradcheck import gradcheck
class TestMaxSVM(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
np.random.seed(1234)
self.n_samples = 20
self.n_classes = 7
self.alpha = 1.
self.x = torch.randn(self.n_samples, self.n_classes)
self.y = torch.from_numpy(np.random.randint(0, self.n_classes,
size=self.n_samples))
self.k = 3
def testMaxSVM(self):
max_svm_th = MaxTop1SVM(self.n_classes, alpha=self.alpha)
res_th = max_svm_th(V(self.x), V(self.y))
res_py = max_svm_py(V(self.x), V(self.y), alpha=self.alpha)
assert_all_close(res_th, res_py)
def testMaxSVMtopk(self):
max_svm_th = MaxTopkSVM(self.n_classes, k=self.k)
res_th = max_svm_th(V(self.x), V(self.y))
res_py = svm_topk_max_py(V(self.x), V(self.y), k=self.k)
assert_all_close(res_th, res_py)
class TestSmoothSVM(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
np.random.seed(1234)
self.n_samples = 20
self.n_classes = 7
self.tau = float(2.)
self.x = torch.randn(self.n_samples, self.n_classes)
self.y = torch.from_numpy(np.random.randint(0, self.n_classes,
size=self.n_samples))
def testSmoothSVM(self):
smooth_svm_th = SmoothTop1SVM(self.n_classes, tau=self.tau)
res_th = smooth_svm_th(V(self.x), V(self.y))
res_py = smooth_svm_py(V(self.x), V(self.y), self.tau)
assert_all_close(res_th, res_py)
class TestSmoothSVMTopk(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
np.random.seed(1234)
self.n_samples = 2
self.n_classes = 7
self.k = 5
self.tau = float(2.)
self.x = torch.randn(self.n_samples, self.n_classes)
self.y = torch.from_numpy(np.random.randint(0, self.n_classes,
size=self.n_samples))
self.labels = torch.from_numpy(np.arange(self.n_classes))
def testSmoothSVMpy(self):
res_py_1 = svm_topk_smooth_py_1(V(self.x), V(self.y), self.tau, self.k)
res_py_2 = svm_topk_smooth_py_2(V(self.x), V(self.y), self.tau, self.k)
assert_all_close(res_py_1, res_py_2)
def testSmoothSVMth_functional(self):
F = Topk_Smooth_SVM(self.labels, self.k, self.tau)
res_th = F(V(self.x), V(self.y))
res_py = svm_topk_smooth_py_1(V(self.x), V(self.y), self.tau, self.k)
assert_all_close(res_th, res_py)
def testSmoothSVMth_loss(self):
svm_topk_smooth_th = SmoothTopkSVM(self.n_classes, tau=self.tau,
k=self.k)
res_th = svm_topk_smooth_th(V(self.x), V(self.y))
res_py = svm_topk_smooth_py_1(V(self.x),
V(self.y),
self.tau, self.k).mean()
assert_all_close(res_th, res_py)
def testSmoothSVMth_loss_scales(self):
svm_topk_smooth_th = SmoothTopkSVM(self.n_classes, tau=self.tau, k=self.k)
for scale in (1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3):
x = self.x * scale
res_th = svm_topk_smooth_th(V(x), V(self.y))
res_py = svm_topk_smooth_py_1(V(x), V(self.y), self.tau, self.k).mean()
assert_all_close(res_th, res_py)
def testGradSmoothSVMth_loss(self):
svm_topk_smooth_th = SmoothTopkSVM(self.n_classes, tau=self.tau, k=self.k)
for scale in (1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4):
x = self.x * scale
x = Variable(x, requires_grad=True)
assert gradcheck(lambda x: svm_topk_smooth_th(x, V(self.y)),
(x,), atol=1e-2, rtol=1e-3, eps=max(1e-4 * scale, 1e-2)), \
"failed with scale {}".format(scale)
|
StarcoderdataPython
|
3377825
|
<filename>python/researchDev/Parse.py<gh_stars>100-1000
class Parse:
def Parse():
with open('C:\users\dryft\desktop\URLlist.txt','r') as infile:
data = infile.read()
testdata = "www.bit.ly"
my_list = data.splitlines()
for word in my_list:
if word in testdata:
return True
else:
return False
x=Parse()
print (x)
|
StarcoderdataPython
|
119168
|
<reponame>py2ai/putBText
import pathlib
from setuptools import setup
# from distutils.core import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="pyshine",
version="0.0.7",
description="This library contains various Audio and Video Signal Processing utilities",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/py2ai/audioCapture.git",
author="PyShine",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=["pyshine"],
include_package_data=True,
install_requires=['numpy','sounddevice','matplotlib','keras'],
entry_points={
"console_scripts": [
]
},
)
|
StarcoderdataPython
|
3340341
|
'''
Created on Apr 10, 2011
@author: <NAME> <<EMAIL>>
'''
import png
import sys
import csv
def png2rgb(file):
r=png.Reader(file);
img = r.asRGB()
print(file, img)
return img
pass
def rgb2png(img, file, depth=8):
f = open(file, 'wb')
pngWriter = png.Writer(img[0], img[1], bitdepth=depth, greyscale=False)
pngWriter.write(f, img[2])
f.close()
pass
def rgb2gray(img):
r = 0.30
g = 0.59
b = 0.11
pixels = []
for row in img[2]:
grayRow = []
for i in range(0, len(row), 3):
luminance = int(r * row[i] + g * row[i+1] + b * row[i+2] + 0.5) % 256
for j in range(3): grayRow.append(luminance)
pass
pixels.append(tuple(grayRow))
pass
return (img[0], img[1], pixels, img[3])
pass
def rgbsave(img, file):
f = open(file, 'w')
f.write(str(img[0]) + ',' + str(img[1]) + '\n')
pixels = list(img[2])
for row in pixels:
for p in row[:-1]:
f.write(str(p) + ',')
f.write(str(p) + '\n')
pass
f.write('"' + str(img[3]) + '"')
f.close()
pass
def rgbload(file):
csvReader = csv.reader(open(file, 'r'), delimiter=',', quotechar='"')
i = 0
pixels = []
width = 0
height = 0
meta = {}
for row in csvReader:
if (i == 0):
width = int(row[0])
height = int(row[1])
print(width, height)
elif (i == height + 1):
meta = row[0]
print(meta)
break
else:
row = [int(e) for e in row]
pixels.append(tuple(row))
pass
i = i + 1
pass
print(width, height, meta)
return(width, height, pixels, meta)
pass
if __name__ == '__main__':
if (len(sys.argv) < 4):
print('Error: Oops! Too few arguments!')
print('Usage: ' + sys.argv[0] + ' OPERATION INPUT_FILE OUTPUT_FILE')
exit(-1)
pass
opr = str(sys.argv[1])
input = str(sys.argv[2])
output = str(sys.argv[3])
if (opr == 'rgb'):
img = png2rgb(input)
rgbsave(img, output)
pass
if (opr == 'png'):
img = rgbload(input)
rgb2png(img, output)
pass
if (opr == 'png16'):
img = rgbload(input)
rgb2png(img, output, 16)
pass
if (opr == 'gray'):
img = png2rgb(input)
img = rgb2gray(img)
rgb2png(img, output)
pass
exit(0)
pass
|
StarcoderdataPython
|
3339225
|
<reponame>nypzxy/detection_transformer<filename>dataset/FaceDataset.py
from pathlib import Path
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision
from pycocotools import mask as coco_mask
import datasets.transforms as T
class FaceDataset(Dataset):
def __init__(self, img_folder, ann_file, transforms):
super(FaceDataset, self).__init__()
self._transforms = transforms
def __getitem__(self, idx):
image_id = self.ids[idx]
target = {'image_id': image_id, 'annotations': target}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def make_coco_transforms(image_set):
normalize = T.Compose([
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
if image_set == 'train':
return T.Compose([
T.RandomHorizontalFlip(),
T.RandomSelect(
T.RandomResize(scales, max_size=1333),
T.Compose([
T.RandomResize([400, 500, 600]),
T.RandomSizeCrop(384, 600),
T.RandomResize(scales, max_size=1333),
])
),
normalize,
])
if image_set == 'val':
return T.Compose([
T.RandomResize([800], max_size=1333),
normalize,
])
raise ValueError(f'unknown {image_set}')
|
StarcoderdataPython
|
4805967
|
import cv2
import base64
from fastapi import WebSocket, APIRouter
from fastapi.responses import HTMLResponse
from fastapi.websockets import WebSocketDisconnect
from starlette.websockets import WebSocketState
from logger import get_logger
from services.factories import video_reader
router = APIRouter()
logger = get_logger(__name__)
html = """
<!DOCTYPE html>
<html>
<head>
<title>Video</title>
</head>
<body>
<h1>WebSocket Video</h1>
<form action="" onsubmit="sendMessage(event)">
<input type="text" id="messageText" autocomplete="off"/>
<button>Send</button>
</form>
<ul id='messages'>
</ul>
<div id="count"></div>
<div id="img"></div>
<img id="img2"> </img>
<script>
var ws = new WebSocket("ws://localhost:8000/ws/testchannel");
ws.onmessage = function(msg) {
var image = document.getElementById('img2');
image.src = 'data:image/jpg;base64,' + msg.data;
console.log(image.src);
};
function sendMessage(event) {
var input = document.getElementById("messageText")
ws.send(input.value)
input.value = ''
event.preventDefault()
}
</script>
</body>
</html>
"""
# image.src = 'data:image/jpg;base64,' + msg.data;
@router.get("/ws")
async def get():
return HTMLResponse(html)
@router.websocket("/ws/testchannel")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
try:
logger.info(f"Client start connection {websocket.client.host}")
while True:
frame = video_reader.get_frame()
if not frame is None:
_, buffer = cv2.imencode('.jpg', frame)
data = base64.b64encode(buffer).decode("utf-8")
await websocket.send_text(data)
_ = await websocket.receive_text() # Need to gracefully close client connections
#await websocket.send_text(f"you sent message: {data}")
except WebSocketDisconnect:
logger.info("Client drop connection")
finally:
#if websocket.client_state != WebSocketState.DISCONNECTED:
# _ = await websocket.receive_text() # Need to gracefully close client connections
# await websocket.close()
pass
|
StarcoderdataPython
|
1659751
|
import os
import numpy as np
import pandas as pd
import torch
from data_management import IPDataset
from operators import (
Fourier,
RadialMaskFunc,
TVAnalysisPeriodic,
noise_gaussian,
to_complex,
unprep_fft_channel,
)
from reconstruction_methods import admm_l1_rec_diag, grid_search
# ----- load configuration -----
import config # isort:skip
# ------ setup ----------
device = torch.device("cuda")
file_name = "grid_search_l1_fourier_"
save_path = os.path.join(config.RESULTS_PATH, "grid_search_l1")
# ----- operators --------
mask_func = RadialMaskFunc(config.n, 40)
mask = unprep_fft_channel(mask_func((1, 1) + config.n + (1,)))
OpA = Fourier(mask)
OpTV = TVAnalysisPeriodic(config.n, device=device)
# ----- load test data --------
samples = range(50, 100)
test_data = IPDataset("test", config.DATA_PATH)
X_0 = torch.stack([test_data[s][0] for s in samples])
X_0 = to_complex(X_0.to(device))
# ----- noise setup --------
noise_min = 1e-3
noise_max = 0.08
noise_steps = 50
noise_rel = torch.tensor(
np.logspace(np.log10(noise_min), np.log10(noise_max), num=noise_steps)
).float()
# add extra noise levels 0.00 and 0.16 for tabular evaluation
noise_rel = (
torch.cat(
[torch.zeros(1).float(), noise_rel, 0.16 * torch.ones(1).float()]
)
.float()
.to(device)
)
def meas_noise(y, noise_level):
return noise_gaussian(y, noise_level)
# ----- set up reconstruction method and grid params --------
def _reconstruct(y, lam, rho):
x, _ = admm_l1_rec_diag(
y,
OpA,
OpTV,
OpA.adj(y),
OpTV(OpA.adj(y)),
lam,
rho,
iter=1000,
silent=True,
)
return x
# parameter search grid
grid = {
"lam": np.logspace(-6, -1, 25),
"rho": np.logspace(-5, 1, 25),
}
def combine_results():
results = pd.DataFrame(
columns=["noise_rel", "grid_param", "err_min", "grid", "err"]
)
for idx in range(len(noise_rel)):
results_cur = pd.read_pickle(
os.path.join(save_path, file_name + str(idx) + ".pkl")
)
results.loc[idx] = results_cur.loc[idx]
os.makedirs(save_path, exist_ok=True)
results.to_pickle(os.path.join(save_path, file_name + "all.pkl"))
return results
# ------ perform grid search ---------
if __name__ == "__main__":
idx_noise = (int(os.environ.get("SGE_TASK_ID")) - 1,)
for idx in idx_noise:
noise_level = noise_rel[idx] * OpA(X_0).norm(
p=2, dim=(-2, -1), keepdim=True
)
Y_ref = meas_noise(OpA(X_0), noise_level)
grid_param, err_min, err = grid_search(X_0, Y_ref, _reconstruct, grid)
results = pd.DataFrame(
columns=["noise_rel", "grid_param", "err_min", "grid", "err"]
)
results.loc[idx] = {
"noise_rel": noise_rel[idx],
"grid_param": grid_param,
"err_min": err_min,
"grid": grid,
"err": err,
}
os.makedirs(save_path, exist_ok=True)
results.to_pickle(
os.path.join(save_path, file_name + str(idx) + ".pkl")
)
|
StarcoderdataPython
|
1676861
|
<reponame>ethanlu/pazudora-solver
from pazudorasolver.piece import Fire, Wood, Water, Dark, Light, Heart, Poison, Jammer, Unknown
from pazudorasolver.board import Board
from pazudorasolver.heuristics.pruned_bfs import PrunedBfs
import pytest
@pytest.fixture(scope='module')
def weights():
return {Fire.symbol: 1.0,
Wood.symbol: 1.0,
Water.symbol: 1.0,
Dark.symbol: 1.0,
Light.symbol: 1.0,
Heart.symbol: 1.0,
Poison.symbol: .5,
Jammer.symbol: .5,
Unknown.symbol: 0.1}
def test_pruned_bfs(weights):
def compare(original, final, moves, row, column, assert_error_message):
for delta_r, delta_c in moves:
original.swap(row, column, row + delta_r, column + delta_c)
row += delta_r
column += delta_c
for r in range(original.rows):
for c in range(original.columns):
assert isinstance(original.cell(r, c), type(final.cell(r, c))), assert_error_message
# solution should contain move list that gets you to the final board state starting from original board state
original = Board.create_randomized_board(5, 6)
_, moves, final_board = PrunedBfs(weights).solve(original, 20)
compare(original, final_board, moves[1:], moves[0][0], moves[0][1], "Pruned BFS yielded an incorrect 4-way move list!")
# same with diagonals enabled
original = Board.create_randomized_board(5, 6)
h = PrunedBfs(weights)
h.diagonals = True
_, moves, final_board = h.solve(original, 20)
compare(original, final_board, moves[1:], moves[0][0], moves[0][1], "Pruned BFS yielded an incorrect 8-way move list!")
|
StarcoderdataPython
|
3329811
|
"""Main."""
import logging
import os
import sys
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from .interfaces.db import db_models
from .interfaces.db.database import engine
from .external_interfaces import user_router, record_router
db_models.Base.metadata.create_all(bind=engine)
app = FastAPI()
origins = [
"http://localhost",
"http://localhost:3000",
"http://192.168.0.21:3000",
"http://0.0.0.0:8000",
]
if 'ALLOW_ORIGIN' in os.environ:
origins.append(os.environ['ALLOW_ORIGIN'])
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(user_router.router, prefix="/api")
app.include_router(record_router.router, prefix="/api")
logger = logging.getLogger(__name__)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.DEBUG)
|
StarcoderdataPython
|
3273879
|
<reponame>avcopan/autofile<gh_stars>0
""" A script to wipe out all cscan directories
"""
import itertools
from autofile import fs
RUN_PFX = '/lcrc/project/PACC/AutoMech/data/run/'
SAVE_PFX = '/lcrc/project/PACC/AutoMech/data/save/'
for sca_fs in itertools.chain(
fs.iterate_managers(RUN_PFX, ['SPECIES', 'THEORY', 'CONFORMER',
'ZMATRIX'], 'CSCAN'),
fs.iterate_managers(SAVE_PFX, ['SPECIES', 'THEORY', 'CONFORMER',
'ZMATRIX'], 'CSCAN'),
fs.iterate_managers(RUN_PFX, ['REACTION', 'THEORY', 'TRANSITION STATE',
'CONFORMER', 'ZMATRIX'], 'CSCAN'),
fs.iterate_managers(SAVE_PFX, ['REACTION', 'THEORY', 'TRANSITION STATE',
'CONFORMER', 'ZMATRIX'], 'CSCAN')):
if sca_fs[0].exists():
print(sca_fs[0].path())
sca_fs[0].removable = True
sca_fs[0].remove()
print('removing...')
|
StarcoderdataPython
|
3283982
|
<filename>Python/Zelle/Chapter5_SequencesStringsListsFiles/ProgrammingExercises/3_ExamScoring/examScoring.py<gh_stars>0
# examScoring.py
# A program that accepts a quiz score as an input and prints out the
# corresponding grade.
# 90-100:A, 80-89:B, 70-79:C, 60-69:D, <60:F
def main():
print("Quiz scoring is a program that accepts a quiz score as an input and \
prints out the corresponding grade.")
print("90-100:A, 80-89:B, 70-79:C, 60-69:D, <60:F")
score = eval(input("Please enter the quiz score: "))
grades = ["F", "F", "F", "F", "F", "F", "F", "F", "F", "F",\
"F", "F", "F", "F", "F", "F", "F", "F", "F", "F",\
"F", "F", "F", "F", "F", "F", "F", "F", "F", "F",\
"F", "F", "F", "F", "F", "F", "F", "F", "F", "F",\
"F", "F", "F", "F", "F", "F", "F", "F", "F", "F",\
"F", "F", "F", "F", "F", "F", "F", "F", "F", "F",\
"D", "D", "D", "D", "D", "D", "D", "D", "D", "D",\
"C", "C", "C", "C", "C", "C", "C", "C", "C", "C",\
"B", "B", "B", "B", "B", "B", "B", "B", "B", "B",\
"A", "A", "A", "A", "A", "A", "A", "A", "A", "A", "A"]
grade = grades[score]
print("\nThe quiz grade is {0}".format(grade))
main()
|
StarcoderdataPython
|
1719258
|
# Python modules
# Constants specific to pulse_funcs
class ApodizationFilterType(object):
NONE = 0
COSINE = 1
HAMMING = 2
class AnalyticType(object):
NONE = 0
GAUSSIAN = 1
SINC_GAUSSIAN = 2
HYPERBOLIC_SECANT = 3
class ProfileType(object):
NONE = 0
M_XY = 1
M_X_MINUS_Y = 2
M_Z = 3
M_MINUS_Z = 3
# Gyromagnetic ratio of 1H - the hydrogen nucleus. (units: kHz/mT)
GAMMA1H = 42.576
# Size limit on b1 for root reflection
# b1 to polynomial will automagically truncate to this
# so calling code should check and issue a warning
b1rootlimit = 65
# Small number used for floating point comparisons.
epsilon = 0.000001
# Very small number used for double precision comparisons.
small_epsilon = 0.00000000001
# An even smaller number; used to represent an acceptable fractional error
# or difference from an expected or desired value.
EPS = pow(2,-52)
|
StarcoderdataPython
|
1697332
|
<filename>DataAugScripts/cosine_similarity.py
import datetime
from absl import logging
import numpy as np
import pandas as pd
def get_cosine_similarity(sents, embed):
# Reduce logging output.
logging.set_verbosity(logging.ERROR)
# with tf.Session() as session:
# session.run([tf.global_variables_initializer(), tf.tables_initializer()])
embeddings = embed(sents)
# this is the cosine score but since these embeddings are normalised
# we can just take the inner product (similar to dot product for 1D arrays)
# this gives us a score of how closely related the sentences are.
return np.inner(embeddings[0], embeddings[1])
|
StarcoderdataPython
|
1713272
|
<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Step generator
import numpy as np
from numpy.linalg import norm
from scipy.io import savemat
import matplotlib.pyplot as plot
import struct
import UR5Class
import socket
import time
import sys
import csv
#import json
import Transformations as tf
import os
import threading
def stepGenerator(ur5,stepType,initArray,positionToStep,step):
if (stepType == 'joint'):
print('\n\n\n')
print('Step Generator:')
print('Init Array is (joint space): ' + str(initArray))
print('Step is (radians): ' + str(step))
initArray[positionToStep] = initArray[positionToStep] + step
targetPose = ur5.ur5_direct_kinematics(initArray, vector = True, rpy = True, apply_offset = True)
print('Final Array is (joint space): ' + str(initArray))
print('Final Array is (Cartesian space rpy): ' + str(targetPose))
elif(stepType == 'cartesian'):
print('\n\n\n')
print('Step Generator:')
print('Init Array is (Cartesian space): ' + str(initArray))
print('Step is (Cartesian): ' + str(step))
targetPose = initArray
targetPose[positionToStep] = targetPose[positionToStep] + step
print('Final Array is (Cartesian space rpy): ' + str(targetPose))
else:
exit(1)
return targetPose
|
StarcoderdataPython
|
1612014
|
# Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from mathutils import Color
from io_scene_gltf2.io.com import gltf2_io
from io_scene_gltf2.blender.exp import gltf2_blender_gather_texture_info, gltf2_blender_search_node_tree
from io_scene_gltf2.blender.exp import gltf2_blender_get
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
from io_scene_gltf2.io.com.gltf2_io_debug import print_console
@cached
def gather_material_pbr_metallic_roughness(blender_material, orm_texture, export_settings):
if not __filter_pbr_material(blender_material, export_settings):
return None
material = gltf2_io.MaterialPBRMetallicRoughness(
base_color_factor=__gather_base_color_factor(blender_material, export_settings),
base_color_texture=__gather_base_color_texture(blender_material, export_settings),
extensions=__gather_extensions(blender_material, export_settings),
extras=__gather_extras(blender_material, export_settings),
metallic_factor=__gather_metallic_factor(blender_material, export_settings),
metallic_roughness_texture=__gather_metallic_roughness_texture(blender_material, orm_texture, export_settings),
roughness_factor=__gather_roughness_factor(blender_material, export_settings)
)
return material
def __filter_pbr_material(blender_material, export_settings):
return True
def __gather_base_color_factor(blender_material, export_settings):
base_color_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, "Base Color")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, "BaseColor")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket_or_texture_slot_old(blender_material, "BaseColorFactor")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, "Background")
if not isinstance(base_color_socket, bpy.types.NodeSocket):
return None
if not base_color_socket.is_linked:
return list(base_color_socket.default_value)
texture_node = __get_tex_from_socket(base_color_socket)
if texture_node is None:
return None
def is_valid_multiply_node(node):
return isinstance(node, bpy.types.ShaderNodeMixRGB) and \
node.blend_type == "MULTIPLY" and \
len(node.inputs) == 3
multiply_node = next((link.from_node for link in texture_node.path if is_valid_multiply_node(link.from_node)), None)
if multiply_node is None:
return None
def is_factor_socket(socket):
return isinstance(socket, bpy.types.NodeSocketColor) and \
(not socket.is_linked or socket.links[0] not in texture_node.path)
factor_socket = next((socket for socket in multiply_node.inputs if is_factor_socket(socket)), None)
if factor_socket is None:
return None
if factor_socket.is_linked:
print_console("WARNING", "BaseColorFactor only supports sockets without links (in Node '{}')."
.format(multiply_node.name))
return None
return list(factor_socket.default_value)
def __gather_base_color_texture(blender_material, export_settings):
base_color_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, "Base Color")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, "BaseColor")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket_or_texture_slot_old(blender_material, "BaseColor")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, "Background")
return gltf2_blender_gather_texture_info.gather_texture_info((base_color_socket,), export_settings)
def __get_tex_from_socket(blender_shader_socket: bpy.types.NodeSocket):
result = gltf2_blender_search_node_tree.from_socket(
blender_shader_socket,
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeTexImage))
if not result:
return None
return result[0]
def __gather_extensions(blender_material, export_settings):
return None
def __gather_extras(blender_material, export_settings):
return None
def __gather_metallic_factor(blender_material, export_settings):
metallic_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, "Metallic")
if metallic_socket is None:
metallic_socket = gltf2_blender_get.get_socket_or_texture_slot_old(blender_material, "MetallicFactor")
if isinstance(metallic_socket, bpy.types.NodeSocket) and not metallic_socket.is_linked:
return metallic_socket.default_value
return None
def __gather_metallic_roughness_texture(blender_material, orm_texture, export_settings):
if orm_texture is not None:
texture_input = orm_texture
else:
metallic_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, "Metallic")
roughness_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, "Roughness")
hasMetal = metallic_socket is not None and __has_image_node_from_socket(metallic_socket)
hasRough = roughness_socket is not None and __has_image_node_from_socket(roughness_socket)
if not hasMetal and not hasRough:
metallic_roughness = gltf2_blender_get.get_socket_or_texture_slot_old(blender_material, "MetallicRoughness")
if metallic_roughness is None or not __has_image_node_from_socket(metallic_roughness):
return None
texture_input = (metallic_roughness,)
elif not hasMetal:
texture_input = (roughness_socket,)
elif not hasRough:
texture_input = (metallic_socket,)
else:
texture_input = (metallic_socket, roughness_socket)
return gltf2_blender_gather_texture_info.gather_texture_info(texture_input, export_settings)
def __gather_roughness_factor(blender_material, export_settings):
roughness_socket = gltf2_blender_get.get_socket_or_texture_slot(blender_material, "Roughness")
if roughness_socket is None:
roughness_socket = gltf2_blender_get.get_socket_or_texture_slot_old(blender_material, "RoughnessFactor")
if isinstance(roughness_socket, bpy.types.NodeSocket) and not roughness_socket.is_linked:
return roughness_socket.default_value
return None
def __has_image_node_from_socket(socket):
result = gltf2_blender_search_node_tree.from_socket(
socket,
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeTexImage))
if not result:
return False
return True
|
StarcoderdataPython
|
3266936
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-07-24 07:21
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('smelltest', '0006_auto_20180118_0838'),
]
operations = [
migrations.AlterModelOptions(
name='scent',
options={'ordering': ['id']},
),
]
|
StarcoderdataPython
|
47123
|
from os import system
from requests import get
from pyfiglet import figlet_format
from colored import fore, back, style, attr
attr(0)
print(back.BLACK)
print(fore.BLUE_VIOLET + style.BOLD)
system("clear")
print(figlet_format("DIRETORY BRUTE\nBY MOLEEY", width=58, justify="center", font="smslant"))
site = input("Link Do Site: ")
txt = open("wordlist.txt", "r")
line = txt.readlines()
for diretory in line:
req = get(site+diretory)
status = req.status_code
if status != 404:
print(f"URL: {site+diretory} Status: {status}")
|
StarcoderdataPython
|
3383812
|
<gh_stars>1-10
# Copyright YukonTR 2015
from operator import itemgetter
class DeliveryManager(object):
''' Delivery Manager manages vehicle resources and their routes
'''
def __init__(self, env):
# simulation environment
self.env = env
self._vehicle_list = None
self.vindexerGet = lambda x: dict((p.vid,i) for i,p in enumerate(
self.vehicle_list)).get(x)
self.dashboard_run_process = env.process(self.dashboard_run(env))
def set_workorder(self, workorder, remaining_workorder):
# workorder ia assigned from producer (after producer receives workorder event after arrival rate expiration
print "*******************"
print "workorder received at %f for producer %d at %s ready by %f customer location=%s" % \
(self.env.now, workorder.dest_id, workorder.location, workorder.ready_time,
remaining_workorder.location)
plocation = workorder.location
closest_dict = self.find_closest_available_vehicle(plocation)
closest_vid = closest_dict["vid"]
dist_to_producer = closest_dict["dist"]
closest_vehicle = self._vehicle_list[self.vindexerGet(closest_vid)]
closest_vehicle.dispatch(dest=plocation, wtype=workorder.wtype, dist=dist_to_producer,
ready_time=workorder.ready_time,
remaining_workorder=remaining_workorder)
def find_closest_available_vehicle(self, plocation):
distance_list = [{"vid":x.vid, "dist":x.location.distance_to(plocation)}
for x in self.vehicle_list if x.drive_event and x.has_space()]
min_dict = min(distance_list, key=itemgetter("dist"))
return min_dict
def dashboard_run(self, env):
pass
@property
def vehicle_list(self):
return self._vehicle_list
@vehicle_list.setter
def vehicle_list(self, value):
self._vehicle_list = value
|
StarcoderdataPython
|
1763399
|
<filename>lib/ram/wiz/disk_choice/utils.py
import lsblk
def TryDevice(dev_path, size, subs):
dev_errs = []
dev_warn = []
dev = lsblk.GetBlockDevice(dev_path)
if dev.btype != 'disk':
dev_errs.append("Cannot operate on disks of type: %s." % dev.btype)
if dev.ro:
dev_errs.append("The device is read-only.")
if dev.busy():
dev_errs.append("Cannot operate on disks in use.")
if size and dev.size < size:
dev_errs.append("Disk size doesn't fit requirements.")
if not dev.subs:
pass
elif subs:
dev_errs.append("Disk has %s partitions." % len(dev.subs))
else:
dev_warn.append("Disk has %s partitions." % len(dev.subs))
return dev, dev_errs, dev_warn
def IterDevices(btype='disk'):
btype_list = btype.split(',') if btype else []
return lsblk.GetBlockDeviceList(btype_list).iterkeys()
|
StarcoderdataPython
|
1615322
|
<gh_stars>0
#Quiz 2
#Nombre: <NAME>
#Cedula: 8-840-2233
for i in range(4):
print("monto de la compra: ")
monto = int(input())
if monto >= 500:
descuento = monto * 0.30
total = monto - descuento
print ("el total es " + str(total))
if monto <500 and monto >=200:
descuento = monto * 0.20
total = monto - descuento
print ("el total es " + str(total))
if monto <200 and monto >=100:
descuento = monto * 0.10
total = monto - descuento
print ("el total es " + str(total))
else:
print ("el total es " + str(monto))
|
StarcoderdataPython
|
187558
|
<filename>lib/googlecloudsdk/command_lib/compute/os_config/declarative.py
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hooks for declarative commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
def SetDefaultPageSizeRequestHook(default_page_size):
"""Create a modify_request_hook that applies default_page_size to args.
Args:
default_page_size: The page size to use when not specified by the user.
Returns:
A modify_request_hook that updates `args.page_size` when not set by user.
"""
def Hook(unused_ref, args, request):
if not args.page_size:
args.page_size = int(default_page_size)
return request
return Hook
|
StarcoderdataPython
|
3271716
|
import io
import os
import re
import sys
import csv
import urllib
import itertools
from collections import defaultdict
from indra.databases import hgnc_client
hgnc_fam_url = ('ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/csv/'
'genefamily_db_tables/')
gene_fam_file = 'gene_has_family.csv'
family_file = 'family.csv'
hier_closure_file = 'hierarchy_closure.csv'
hier_file = 'hierarchy.csv'
def read_csv_from_ftp(fname):
"""Return a generator for a CSV file opened from HGNC's FTP server."""
url = hgnc_fam_url + fname
print('Loading %s' % url)
req = urllib.request.Request(url)
res = urllib.request.urlopen(req)
reader = csv.reader(io.TextIOWrapper(res))
for row in reader:
yield row
def _read_hgnc_family_genes():
"""Return dicts representing gene/familiy relationships in HGNC."""
family_to_gene = defaultdict(list)
gene_to_family = defaultdict(list)
for gene_id, family_id in read_csv_from_ftp(gene_fam_file):
family_to_gene[family_id].append(gene_id)
gene_to_family[gene_id].append(family_id)
return gene_to_family, family_to_gene
def _read_family_info():
"""Return dict representing HGNC family information"""
families = {}
for idx, row in enumerate(read_csv_from_ftp(family_file)):
if idx == 0:
header = row
continue
families[row[0]] = {k: v for k, v in zip(header, row)}
return families
def _read_hierarchy_info():
"""Return dict representing HGNC family membership information."""
children = defaultdict(list)
for idx, (parent, child) in enumerate(read_csv_from_ftp(hier_file)):
if idx == 0:
continue
children[parent].append(child)
return children
# Read HGNC resource files
families = _read_family_info()
children = _read_hierarchy_info()
gene_to_family, family_to_gene = _read_hgnc_family_genes()
def get_famplex_id(family):
"""Generate an appropriate FPLX ID for an HGNC family"""
if family['abbreviation']:
return family['abbreviation'].strip().replace(', ', '_')
else:
replaces = {' ': '_', '-': '_', ',': ''}
name = family['name'].strip()
for k, v in replaces.items():
name = name.replace(k, v)
return name
def is_pseudogene(gene):
return re.match(r'^.*\d+P$', gene) is not None
def get_relations_from_root(root_id, relations=None):
"""Return a set of relations starting from a given root."""
if relations is None:
relations = []
family_info = families[root_id]
child_ids = children.get(root_id)
famplex_id = get_famplex_id(family_info)
# In this case this HGNC family has genes as its children
gene_members = family_to_gene[root_id]
for gene in gene_members:
gene_name = hgnc_client.get_hgnc_name(gene)
if is_pseudogene(gene_name):
print('Assuming %s is a pseudogene, skipping' % gene_name)
continue
rel = ('HGNC', gene_name, 'isa', 'FPLX', famplex_id, root_id)
relations.append(rel)
# In this case this HGNC family is an intermediate that has further
# families as its children
if child_ids is not None:
for child_id in child_ids:
# We want to skip families that only consist of a single gene,
# and therefore these genes are directly linked to their
# "grandparent" without recursively adding the intermediate
# family parent.
grandchild_ids = children.get(child_id)
child_gene_members = family_to_gene[child_id]
if not grandchild_ids and len(child_gene_members) == 1:
gene_name = hgnc_client.get_hgnc_name(child_gene_members[0])
if is_pseudogene(gene_name):
print('Assuming %s is a pseudogene, skipping' % gene_name)
continue
print('HGNC family %s has one gene member %s which will be '
'linked directly to %s' % (child_id, gene_name,
famplex_id))
rel = ('HGNC', gene_name, 'isa', 'FPLX', famplex_id, root_id)
relations.append(rel)
# In this case, the child contains either further families or
# multiple genes, and we recursively add its relations
else:
child_info = families[child_id]
child_famplex_id = get_famplex_id(child_info)
rel = ('FPLX', child_famplex_id, 'isa', 'FPLX', famplex_id,
root_id)
relations.append(rel)
get_relations_from_root(child_id, relations)
return relations
def add_relations_to_famplex(relations):
"""Append a list of relations to relations.csv"""
rel_file = os.path.join(os.path.dirname(__file__), os.pardir,
'relations.csv')
with open(rel_file, 'a') as fh:
for rel in relations:
fh.write(','.join(rel[:-1]) + '\r\n')
def add_entities_to_famplex(entities):
"""Append a list of entities to entities.csv"""
ents_file = os.path.join(os.path.dirname(__file__), os.pardir,
'entities.csv')
with open(ents_file, 'a') as fh:
for ent in entities:
fh.write('%s\r\n' % ent)
def add_equivalences(relations):
"""Based on a list of relations, append equivalences to equivalences.csv"""
hgnc_fam_ids = sorted(list(set(int(r[5]) for r in relations)))
equivs = []
for fid in hgnc_fam_ids:
equivs.append(('HGNC_GROUP', str(fid),
get_famplex_id(families[str(fid)])))
equivs_file = os.path.join(os.path.dirname(__file__), os.pardir,
'equivalences.csv')
with open(equivs_file, 'a') as fh:
for eq in equivs:
fh.write('%s\r\n' % ','.join(eq))
def find_overlaps(relations):
"""Try to detect overlaps between existing FamPlex and HGNC families."""
all_gene_names = {r[1]: r[4] for r in relations if r[0] == 'HGNC'}
rel_file = os.path.join(os.path.dirname(__file__), os.pardir,
'relations.csv')
covered_genes = set()
covered_families = set()
fam_members = defaultdict(list)
hgnc_families = set()
with open(rel_file, 'r') as fh:
for sns, sid, rel, tns, tid in csv.reader(fh):
if sns == 'HGNC' and tns == 'FPLX':
fam_members[tid].append(sid)
if sns == 'HGNC' and sid in all_gene_names:
covered_genes.add(sid)
print('%s covered already' % sid)
covered_families.add(tid)
hgnc_families.add(all_gene_names[sid])
fplx_fam_members = {}
for famplex_fam in covered_families:
fplx_fam_members[famplex_fam] = set(fam_members[famplex_fam])
fplx_fam_members = sorted(fplx_fam_members.items(),
key=lambda x: list(x[1])[0])
hgnc_fam_members = {}
for hgnc_fam in hgnc_families:
hgnc_fam_members[hgnc_fam] = set(g for g, f in all_gene_names.items()
if f == hgnc_fam)
hgnc_fam_members = sorted(hgnc_fam_members.items(),
key=lambda x: list(x[1])[0])
totally_redundant = set()
for ff, hf in zip(fplx_fam_members, hgnc_fam_members):
if set(ff[1]) == set(hf[1]):
totally_redundant.add(hf[0])
print('FamPlex %s and HGNC-derived %s are exactly the same.' %
(ff[0], hf[0]))
else:
print('FamPlex %s and HGNC-derived %s are overlapping.' %
(ff[0], hf[0]))
print('Members of %s are: %s' % (ff[0], ','.join(sorted(ff[1]))))
print('Members of %s are: %s' % (hf[0], ','.join(sorted(hf[1]))))
return totally_redundant
if __name__ == '__main__':
# Start from one or more root family IDs to process from
hgnc_group_ids = sys.argv[1:]
relations = []
for hgnc_group_id in hgnc_group_ids:
print('Loading relations for HGNC group: %s' % hgnc_group_id)
relations += get_relations_from_root(hgnc_group_id)
# Sort the relations
relations = sorted(list(set(relations)), key=lambda x: (x[4], x[1]))
# Find and eliminate families that are exactly the same as existing ones
totally_redundant = find_overlaps(relations)
relations = [r for r in relations if r[4] not in totally_redundant]
# Get a flat list of entities
entities = sorted(list(set(r[4] for r in relations)))
# Extend FamPlex resource files with new information
add_relations_to_famplex(relations)
add_entities_to_famplex(entities)
add_equivalences(relations)
|
StarcoderdataPython
|
3271015
|
<reponame>YuxinZou/mmclassification
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn.functional as F
def timm_resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):
"""Timm version pos embed resize function.
copied from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
""" # noqa:E501
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0,
num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if not len(gs_new): # backwards compatibility
gs_new = [int(math.sqrt(ntok_new))] * 2
assert len(gs_new) >= 2
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old,
-1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(
posemb_grid, size=gs_new, mode='bicubic', align_corners=False)
posemb_grid = posemb_grid.permute(0, 2, 3,
1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
|
StarcoderdataPython
|
3245026
|
<filename>gym_goal/envs/goal_env.py
"""
Robot Soccer Goal domain by <NAME> et al. [2016], Reinforcement Learning with Parameterized Actions
Based on code from https://github.com/WarwickMasson/aaai-goal
Author: <NAME>
June 2018
"""
import numpy as np
import math
import gym
import pygame
from gym import spaces, error
from gym.utils import seeding
import sys
from .config import PLAYER_CONFIG, BALL_CONFIG, GOAL_AREA_LENGTH, GOAL_AREA_WIDTH, GOAL_WIDTH, GOAL_DEPTH, KICKABLE, \
INERTIA_MOMENT, MINPOWER, MAXPOWER, PITCH_LENGTH, PITCH_WIDTH, CATCHABLE, CATCH_PROBABILITY, SHIFT_VECTOR, \
SCALE_VECTOR, LOW_VECTOR, HIGH_VECTOR
from .util import bound, bound_vector, angle_position, angle_between, angle_difference, angle_close, norm_angle, \
vector_to_tuple
# actions
KICK = "kick"
DASH = "dash"
TURN = "turn"
TO_BALL = "toball"
SHOOT_GOAL = "shootgoal"
TURN_BALL = "turnball"
DRIBBLE = "dribble"
KICK_TO = "kickto"
ACTION_LOOKUP = {
0: KICK_TO,
1: SHOOT_GOAL,
2: SHOOT_GOAL,
}
# field bounds seem to be 0, PITCH_LENGTH / 2, -PITCH_WIDTH / 2, PITCH_WIDTH / 2
PARAMETERS_MIN = [
np.array([0, -PITCH_WIDTH / 2]), # -15
np.array([-GOAL_WIDTH / 2]), # -7.01
np.array([-GOAL_WIDTH / 2]), # -7.01
]
PARAMETERS_MAX = [
np.array([PITCH_LENGTH, PITCH_WIDTH / 2]), # 40, 15
np.array([GOAL_WIDTH / 2]), # 7.01
np.array([GOAL_WIDTH / 2]), # 7.01
]
def norm(vec2d):
# from numpy.linalg import norm
# faster to use custom norm because we know the vectors are always 2D
assert len(vec2d) == 2
return math.sqrt(vec2d[0]*vec2d[0] + vec2d[1]*vec2d[1])
class GoalEnv(gym.Env):
# metadata = {'render.modes': ['human', 'rgb_array']}
metadata = {'render.modes': ['human']} # cannot use rgb_array at the moment due to frame skip between actions
_VISUALISER_SCALE_FACTOR = 20
_VISUALISER_DELAY = 120 # fps
def __init__(self):
""" The entities are set up and added to a space. """
self.np_random = None
self.entities = []
self.player = None
self.ball = None
self.goalie = None
self.states = []
self.render_states = []
self.window = None
self.time = 0
self.max_time = 100
num_actions = len(ACTION_LOOKUP)
self.action_space = spaces.Tuple((
spaces.Discrete(num_actions), # actions
spaces.Tuple( # parameters
tuple(spaces.Box(PARAMETERS_MIN[i], PARAMETERS_MAX[i], dtype=np.float32) for i in range(num_actions))
)
))
self.observation_space = spaces.Tuple((
# spaces.Box(low=0., high=1., shape=self.get_state().shape, dtype=np.float32), # scaled states
spaces.Box(low=LOW_VECTOR, high=HIGH_VECTOR, dtype=np.float32), # unscaled states
spaces.Discrete(200), # internal time steps (200 limit is an estimate)
))
self.seed()
def step(self, action):
"""
Take a full, stabilised update.
Parameters
----------
action (ndarray) :
Returns
-------
ob, reward, episode_over, info : tuple
ob (object) :
reward (float) :
terminal (bool) :
info (dict) :
"""
act_index = action[0]
act = ACTION_LOOKUP[act_index]
param = action[1][act_index]
param = np.clip(param, PARAMETERS_MIN[act_index], PARAMETERS_MAX[act_index])
steps = 0
self.time += 1
if self.time == self.max_time:
reward = -self.ball.goal_distance()
end_episode = True
state = self.get_state()
return (state, 0), reward, end_episode, {}
end_episode = False
run = True
reward = 0.
while run:
steps += 1
reward, end_episode = self._update(act, param)
run = not end_episode
if run:
run = not self.player.can_kick(self.ball)
if act == DRIBBLE:
run = not self.ball.close_to(param) or run
elif act == KICK_TO:
run = norm(self.ball.velocity) > 0.1 or run
elif act == TURN_BALL:
theta = angle_between(self.player.position, self.ball.position)
run = not angle_close(theta, param[0]) or run
elif act == SHOOT_GOAL:
run = not end_episode
else:
run = False
state = self.get_state()
return (state, steps), reward, end_episode, {}
def _update(self, act, param):
"""
Performs a single transition with the given action,
returns the reward and terminal status.
"""
self.states.append([
self.player.position.copy(),
self.player.orientation,
self.goalie.position.copy(),
self.goalie.orientation,
self.ball.position.copy()])
self.render_states.append(self.states[-1])
self._perform_action(act, param, self.player)
self.goalie.move(self.ball, self.player)
for entity in self.entities:
entity.update()
self._resolve_collisions()
return self._terminal_check()
def reset(self):
# TODO: implement reset for each entity to avoid creating new objects and reduce duplicate code
initial_player = np.array((0, self.np_random.uniform(-PITCH_WIDTH / 2, PITCH_WIDTH / 2)))
angle = angle_between(initial_player, np.array((PITCH_LENGTH / 2, 0)))
self.player = Player(initial_player, angle)
MACHINE_EPSILON = 1e-12 # ensure always kickable on first state
# fixes seeded runs changing between machines due to minor precision differences,
# specifically from angle_position due to cos and sin approximations
initial_ball = initial_player + (KICKABLE - MACHINE_EPSILON) * angle_position(angle)
#initial_ball = initial_player + KICKABLE * angle_position(angle)
self.ball = Ball(initial_ball)
initial_goalie = self._keeper_target(initial_ball)
angle2 = angle_between(initial_goalie, initial_ball)
self.goalie = Goalie(initial_goalie, angle2)
self.entities = [self.player, self.goalie, self.ball]
self._update_entity_seeds()
self.states = []
self.render_states = []
self.time = 0
self.states.append([
self.player.position.copy(),
self.player.orientation,
self.goalie.position.copy(),
self.goalie.orientation,
self.ball.position.copy()])
self.render_states.append(self.states[-1])
return self.get_state(), 0
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
self.reset()
self._update_entity_seeds()
return [seed]
def _update_entity_seeds(self):
# will be empty at initialisation, call again after creating all entities
for entity in self.entities:
entity.np_random = self.np_random
@staticmethod
def _keeper_line(ball):
""" Finds the line the keeper wants to stay to. """
grad = -ball[1] / (PITCH_LENGTH / 2 - ball[0])
yint = ball[1] - grad * ball[0]
return grad, yint
def _keeper_target(self, ball):
""" Target the keeper wants to move towards. """
grad, yint = self._keeper_line(ball)
if ball[0] < PITCH_LENGTH / 2 - GOAL_AREA_LENGTH:
xval = ball[0]
else:
if ball[1] < -GOAL_AREA_WIDTH / 2:
xval = (-GOAL_AREA_WIDTH / 2 - yint) / grad
else:
xval = (GOAL_AREA_WIDTH / 2 - yint) / grad
xval = bound(xval, PITCH_LENGTH / 2 - GOAL_AREA_LENGTH, PITCH_LENGTH / 2)
yval = bound(grad * xval + yint, -GOAL_AREA_WIDTH / 2, GOAL_AREA_WIDTH / 2)
return np.array((xval, yval))
def get_state(self):
""" Returns the representation of the current state. """
state = np.concatenate((
self.player.position,
self.player.velocity,
[self.player.orientation],
self.goalie.position,
self.goalie.velocity,
[self.goalie.orientation],
self.ball.position,
self.ball.velocity))
#return self.scale_state(state)
return state
def _load_from_state(self, state):
assert len(state) == len(self.get_state())
self.player.position[0] = state[0]
self.player.position[1] = state[1]
self.player.velocity[0] = state[2]
self.player.velocity[1] = state[3]
self.player.orientation = state[4]
self.goalie.position[0] = state[5]
self.goalie.position[1] = state[6]
self.goalie.velocity[0] = state[7]
self.goalie.velocity[1] = state[8]
self.goalie.orientation = state[9]
self.ball.position[0] = state[10]
self.ball.position[1] = state[11]
self.ball.velocity[0] = state[12]
self.ball.velocity[1] = state[13]
def _perform_action(self, act, parameters, agent):
""" Applies for selected action for the given agent. """
if act == KICK:
agent.kick_ball(self.ball, parameters[0], parameters[1])
elif act == DASH:
agent.dash(parameters[0])
elif act == TURN:
agent.turn(parameters[0])
elif act == TO_BALL:
agent.to_ball(self.ball)
elif act == SHOOT_GOAL:
agent.shoot_goal(self.ball, parameters[0])
elif act == TURN_BALL:
agent.turn_ball(self.ball, parameters[0])
elif act == DRIBBLE:
agent.dribble(self.ball, parameters)
elif act == KICK_TO:
agent.kick_to(self.ball, parameters[0])
else:
raise error.InvalidAction("Action not recognised: ", act)
def _resolve_collisions(self):
""" Shift apart all colliding entities with one pass. """
for index, entity1 in enumerate(self.entities):
for entity2 in self.entities[index + 1:]:
if entity1.colliding(entity2):
entity1.decollide(entity2)
def _terminal_check(self):
""" Determines if the episode is ended, and the reward. """
if self.ball.in_net():
end_episode = True
reward = 50
elif self.goalie.can_catch(self.ball) or not self.ball.in_field():
end_episode = True
reward = -self.ball.goal_distance()
else:
end_episode = False
reward = 0
if end_episode:
self.states.append([
self.player.position.copy(),
self.player.orientation,
self.goalie.position.copy(),
self.goalie.orientation,
self.ball.position.copy()])
return reward, end_episode
def _is_stable(self):
""" Determines whether objects have stopped moving. """
speeds = [norm(entity.velocity) for entity in self.entities]
return max(speeds) < 0.1
@staticmethod
def scale_state(state):
""" Scale state variables between 0 and 1. """
scaled_state = (state + SHIFT_VECTOR) / SCALE_VECTOR
return scaled_state
@staticmethod
def unscale_state(scaled_state):
""" Unscale state variables. """
state = (scaled_state * SCALE_VECTOR) - SHIFT_VECTOR
return state
def __draw_internal_state(self, internal_state, fade=False):
""" Draw the field and players. """
player_position = internal_state[0]
player_orientation = internal_state[1]
goalie_position = internal_state[2]
goalie_orientation = internal_state[3]
ball_position = internal_state[4]
ball_size = BALL_CONFIG['SIZE']
self.window.blit(self.__background, (0, 0))
# Draw goal and penalty areas
length = self.__visualiser_scale(PITCH_LENGTH / 2)
width = self.__visualiser_scale(PITCH_WIDTH)
self.__draw_vertical(length, 0, width)
self.__draw_box(GOAL_AREA_WIDTH, GOAL_AREA_LENGTH)
# self.draw_box(PENALTY_AREA_WIDTH, PENALTY_AREA_LENGTH)
depth = length + self.__visualiser_scale(GOAL_DEPTH)
self.__draw_horizontal(width / 2 - self.__visualiser_scale(GOAL_WIDTH / 2), length, depth)
self.__draw_horizontal(width / 2 + self.__visualiser_scale(GOAL_WIDTH / 2), length, depth)
# self.draw_radius(vector(0, 0), CENTRE_CIRCLE_RADIUS)
# Draw Players
self.__draw_player(player_position, player_orientation, self.__white)
if not fade:
self.__draw_radius(player_position, KICKABLE)
self.__draw_player(goalie_position, goalie_orientation, self.__red)
if not fade:
self.__draw_radius(goalie_position, CATCHABLE)
# Draw ball
self.__draw_entity(ball_position, ball_size, self.__black)
pygame.display.update()
def __visualiser_scale(self, value):
''' Scale up a value. '''
return int(self._VISUALISER_SCALE_FACTOR * value)
def __upscale(self, position):
''' Maps a simulator position to a field position. '''
pos1 = self.__visualiser_scale(position[0])
pos2 = self.__visualiser_scale(position[1] + PITCH_WIDTH / 2)
return np.array([pos1, pos2])
def __draw_box(self, area_width, area_length):
""" Draw a box at the goal line. """
lower_corner = self.__visualiser_scale(PITCH_WIDTH / 2 - area_width / 2)
upper_corner = lower_corner + self.__visualiser_scale(area_width)
line = self.__visualiser_scale(PITCH_LENGTH / 2 - area_length)
self.__draw_vertical(line, lower_corner, upper_corner)
self.__draw_horizontal(lower_corner, line, self.__visualiser_scale(PITCH_LENGTH / 2))
self.__draw_horizontal(upper_corner, line, self.__visualiser_scale(PITCH_LENGTH / 2))
def __draw_player(self, position, orientation, colour):
''' Draw a player with given position and orientation. '''
size = PLAYER_CONFIG['SIZE']
self.__draw_entity(position, size, colour)
radius_end = size * angle_position(orientation)
pos = vector_to_tuple(self.__upscale(position))
end = vector_to_tuple(self.__upscale(position + radius_end))
pygame.draw.line(self.window, self.__black, pos, end)
def __draw_radius(self, position, radius):
""" Draw an empty circle. """
pos = vector_to_tuple(self.__upscale(position))
radius = self.__visualiser_scale(radius)
pygame.draw.circle(self.window, self.__white, pos, radius, 1)
def __draw_entity(self, position, size, colour):
""" Draws an entity as a ball. """
pos = vector_to_tuple(self.__upscale(position))
radius = self.__visualiser_scale(size)
pygame.draw.circle(self.window, colour, pos, radius)
def __draw_horizontal(self, yline, xline1, xline2):
""" Draw a horizontal line. """
pos1 = (xline1, yline)
pos2 = (xline2, yline)
pygame.draw.line(self.window, self.__white, pos1, pos2)
def __draw_vertical(self, xline, yline1, yline2):
""" Draw a vertical line. """
pos1 = (xline, yline1)
pos2 = (xline, yline2)
pygame.draw.line(self.window, self.__white, pos1, pos2)
def __draw_render_states(self):
"""
Draw the internal states from the last action.
"""
length = len(self.render_states)
for i in range(0, length):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
sys.exit()
self.__draw_internal_state(self.render_states[i])
self.__clock.tick(self._VISUALISER_DELAY)
self.render_states = [] # clear states for next render
def render(self, mode='human', close=False):
if close:
pygame.display.quit()
pygame.quit()
self.window = None
return
self._initialse_window()
self.__draw_render_states()
#img = self._get_image()
#if mode == 'rgb_array':
# return img
# elif mode == 'human':
# from gym.envs.classic_control import rendering
# if self.viewer is None:
# self.viewer = rendering.SimpleImageViewer(SCREEN_WIDTH, SCREEN_HEIGHT)
# self.viewer.imshow(img)
def _initialse_window(self):
# initialise visualiser
if self.window is None:
pygame.init()
width = self.__visualiser_scale(PITCH_LENGTH / 2 + GOAL_DEPTH)
height = self.__visualiser_scale(PITCH_WIDTH)
self.window = pygame.display.set_mode((width, height))
self.__clock = pygame.time.Clock()
size = (width, height)
self.__background = pygame.Surface(size)
self.__white = pygame.Color(255, 255, 255, 0)
self.__black = pygame.Color(0, 0, 0, 0)
self.__red = pygame.Color(255, 0, 0, 0)
self.__background.fill(pygame.Color(0, 125, 0, 0))
def save_render_states(self, dir, prefix, index=0):
self._initialse_window()
import os
for s in self.render_states:
self.__draw_internal_state(s)
pygame.image.save(self.window, os.path.join(dir, prefix+"_"+str("{:04d}".format(index))+".jpeg"))
index += 1
return index
class Entity:
""" This is a base entity class, representing moving objects. """
def __init__(self, config):
self.rand = config['RAND']
self.accel_max = config['ACCEL_MAX']
self.speed_max = config['SPEED_MAX']
self.power_rate = config['POWER_RATE']
self.decay = config['DECAY']
self.size = config['SIZE']
self.position = np.array([0., 0.])
self.velocity = np.array([0., 0.])
self.np_random = None # overwritten by seed()
def update(self):
""" Update the position and velocity. """
self.position += self.velocity
self.velocity *= self.decay
def accelerate(self, power, theta):
""" Applies a power to the entity in direction theta. """
rrand = self.np_random.uniform(-self.rand, self.rand)
theta = (1 + rrand) * theta
rmax = self.rand * norm(self.velocity)
noise = self.np_random.uniform(-rmax, rmax, size=2)
rate = float(power) * self.power_rate
acceleration = rate * angle_position(theta) + noise
acceleration = bound_vector(acceleration, self.accel_max)
self.velocity += acceleration
self.velocity = bound_vector(self.velocity, self.speed_max)
def decollide(self, other):
""" Shift overlapping entities apart. """
overlap = (self.size + other.size - self.distance(other)) / 2
theta1 = angle_between(self.position, other.position)
theta2 = angle_between(other.position, self.position)
self.position += overlap * angle_position(theta2)
other.position += overlap * angle_position(theta1)
self.velocity *= -1
other.velocity *= -1
def colliding(self, other):
""" Check if two entities are overlapping. """
dist = self.distance(other)
return dist < self.size + other.size
def distance(self, other):
""" Computes the euclidean distance to another entity. """
return norm(self.position - other.position)
def in_area(self, left, right, bot, top):
""" Checks if the entity is in the area. """
xval, yval = self.position
in_length = left <= xval <= right
in_width = bot <= yval <= top
return in_length and in_width
class Player(Entity):
""" This represents a player with a position,
velocity and an orientation. """
def __init__(self, position, orientation):
""" The values for this class are defined by the player constants. """
Entity.__init__(self, PLAYER_CONFIG)
self.position = position
self.orientation = orientation
def homothetic_centre(self, ball):
""" Computes the homothetic centre between the player and the ball. """
ratio = 1. / (self.size + ball.size)
position = (ball.position * self.size + self.position * ball.size)
return ratio * position
def tangent_points(self, htc):
""" Finds the tangent points on the player wrt to homothetic centre. """
diff = htc - self.position
square = sum(diff ** 2)
if square <= self.size ** 2:
delta = 0.0
else:
delta = np.sqrt(square - self.size ** 2)
xt1 = (diff[0] * self.size ** 2 + self.size * diff[1] * delta) / square
xt2 = (diff[0] * self.size ** 2 - self.size * diff[1] * delta) / square
yt1 = (diff[1] * self.size ** 2 + self.size * diff[0] * delta) / square
yt2 = (diff[1] * self.size ** 2 - self.size * diff[0] * delta) / square
tangent1 = np.array((xt1, yt1)) + self.position
tangent2 = np.array((xt1, yt2)) + self.position
tangent3 = np.array((xt2, yt1)) + self.position
tangent4 = np.array((xt2, yt2)) + self.position
if norm(tangent1 - self.position) == self.size:
return tangent1, tangent4
else:
return tangent2, tangent3
def ball_angles(self, ball, angle):
""" Determines which angle to kick the ball along. """
htc = self.homothetic_centre(ball)
tangent1, tangent2 = self.tangent_points(htc)
target = self.position + self.size * angle_position(angle)
if norm(tangent1 - target) < norm(tangent2 - target):
return angle_between(htc, tangent1)
else:
return angle_between(htc, tangent2)
def kick_power(self, ball):
""" Determines the kick power weighting given ball position. """
angle = angle_between(self.position, ball.position)
dir_diff = abs(angle_difference(angle, self.orientation))
dist = self.distance(ball)
return 1 - 0.25 * dir_diff / np.pi - 0.25 * dist / KICKABLE
def facing_ball(self, ball):
""" Determines whether the player is facing the ball. """
angle = angle_between(self.position, ball.position)
return self.facing_angle(angle)
def facing_angle(self, angle):
""" Determines whether the player is facing an angle. """
return angle_close(self.orientation, angle)
def turn(self, angle):
""" Turns the player. """
moment = norm_angle(angle)
speed = norm(self.velocity)
angle = moment / (1 + INERTIA_MOMENT * speed)
self.orientation = self.orientation + angle
def dash(self, power):
""" Dash forward. """
power = bound(power, MINPOWER, MAXPOWER)
self.accelerate(power, self.orientation)
def can_kick(self, ball):
""" Determines whether the player can kick the ball. """
return self.distance(ball) <= KICKABLE
def kick_ball(self, ball, power, direction):
""" Kicks the ball. """
if self.can_kick(ball):
power = bound(power, MINPOWER, MAXPOWER)
power *= self.kick_power(ball)
ball.accelerate(power, self.orientation + direction)
def kick_towards(self, ball, power, direction):
""" Kick the ball directly to a direction. """
self.kick_ball(ball, power, direction - self.orientation)
def shoot_goal(self, ball, ypos):
""" Shoot the goal at a targeted position on the goal line. """
ypos = bound(ypos, -GOAL_WIDTH / 2, GOAL_WIDTH / 2)
target = np.array((PITCH_LENGTH / 2 + ball.size, ypos))
self.kick_to(ball, target)
def face_ball(self, ball):
""" Turn the player towards the ball. """
theta = angle_between(self.position, ball.position)
self.face_angle(theta)
def face_angle(self, angle):
""" Turn the player towards and angle. """
self.turn(angle - self.orientation)
def to_ball(self, ball):
""" Move towards the ball. """
if not self.facing_ball(ball):
self.face_ball(ball)
elif not self.can_kick(ball):
self.dash(10)
def kick_to(self, ball, target):
""" Kick the ball to a target position. """
if not self.can_kick(ball):
self.to_ball(ball)
else:
accel = (1 - ball.decay) * (target - self.position) - ball.velocity
power = norm(accel) / (self.kick_power(ball) * ball.power_rate)
theta = np.arctan2(accel[1], accel[0])
self.kick_towards(ball, power, theta)
def turn_ball(self, ball, angle):
""" Turn the ball around the player. """
if not self.can_kick(ball):
self.to_ball(ball)
elif not self.facing_angle(angle):
self.face_angle(angle)
elif self.size < self.distance(ball):
theta = self.ball_angles(ball, angle)
power = 0.1 / self.kick_power(ball)
self.kick_towards(ball, power, theta)
def dribble(self, ball, target):
""" Dribble the ball to a position. """
angle = angle_between(self.position, ball.position)
theta = angle_between(self.position, target)
if not self.can_kick(ball):
self.to_ball(ball)
elif ball.close_to(target):
pass
elif not angle_close(angle, theta):
self.turn_ball(ball, theta)
elif not self.facing_angle(theta):
self.face_angle(theta)
elif self.distance(ball) < (KICKABLE + self.size + ball.size) / 2:
self.kick_towards(ball, 1.5, theta)
else:
self.dash(10)
class Goalie(Player):
""" This class defines a special goalie player. """
def move(self, ball, player):
""" This moves the goalie. """
ball_end = ball.position + ball.velocity / (1 - ball.decay)
diff = ball_end - ball.position
grad = diff[1] / diff[0] if diff[0] != 0. else 0 # avoid division by 0
yint = ball.position[1] - grad * ball.position[0]
goal_y = grad * PITCH_LENGTH / 2 + yint
if ball_end[0] > PITCH_LENGTH / 2 and -GOAL_WIDTH / 2 - CATCHABLE <= goal_y <= GOAL_WIDTH / 2 + CATCHABLE \
and grad != 0:
grad2 = -1 / grad
yint2 = self.position[1] - grad2 * self.position[0]
ballx = (yint2 - yint) / (grad - grad2)
bally = grad * ballx + yint
target = np.array((ballx, bally))
self.move_towards(20, target)
self.orientation = angle_between(self.position, target)
else:
self.orientation = angle_between(self.position, ball_end)
self.move_towards(8, ball_end)
def move_towards(self, power, target):
""" Move towards target position. """
theta = angle_between(self.position, target)
self.accelerate(power, theta)
def can_catch(self, ball):
""" Determines whether the goalie can catch the ball. """
can_catch = self.distance(ball) < CATCHABLE
return self.np_random.random_sample() <= CATCH_PROBABILITY and can_catch
class Ball(Entity):
""" This class represents the ball, which has no orientation. """
def __init__(self, position):
""" The values for this class are defined by the ball constants. """
Entity.__init__(self, BALL_CONFIG)
self.position = position
def close_to(self, position):
""" Determines whether the ball is close to a postion. """
return norm(self.position - position) <= 1.5
def goal_distance(self):
""" Returns the distance from the goal box. """
if self.position[0] < PITCH_LENGTH / 2:
if self.position[1] < -GOAL_WIDTH / 2:
bot_corner = np.array((PITCH_LENGTH / 2, -GOAL_WIDTH / 2))
return norm(self.position - bot_corner)
elif self.position[1] > GOAL_WIDTH / 2:
top_corner = np.array((PITCH_LENGTH / 2, GOAL_WIDTH / 2))
return norm(self.position - top_corner)
else:
return PITCH_LENGTH / 2 - self.position[0]
else:
if self.position[1] < -GOAL_WIDTH / 2:
return GOAL_WIDTH / 2 - self.position[1]
elif self.position[1] > GOAL_WIDTH / 2:
return self.position[1] - GOAL_WIDTH / 2
else:
return 0
def in_field(self):
""" Checks if the ball has left the field. """
return self.in_area(0, PITCH_LENGTH / 2, -PITCH_WIDTH / 2, PITCH_WIDTH / 2)
def in_net(self):
""" Checks if the ball is in the net. """
return self.in_area(PITCH_LENGTH / 2, PITCH_LENGTH / 2 + GOAL_DEPTH, -GOAL_WIDTH / 2, GOAL_WIDTH / 2)
def in_goalbox(self):
""" Checks if the ball is in the goal box. """
return self.in_area(PITCH_LENGTH / 2 - GOAL_AREA_LENGTH, PITCH_LENGTH / 2, -GOAL_AREA_WIDTH / 2,
GOAL_AREA_WIDTH)
|
StarcoderdataPython
|
3343019
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ExportArgs', 'Export']
@pulumi.input_type
class ExportArgs:
def __init__(__self__, *,
export_set_id: pulumi.Input[str],
file_system_id: pulumi.Input[str],
path: pulumi.Input[str],
export_options: Optional[pulumi.Input[Sequence[pulumi.Input['ExportExportOptionArgs']]]] = None):
"""
The set of arguments for constructing a Export resource.
:param pulumi.Input[str] export_set_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's export set.
:param pulumi.Input[str] file_system_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's file system.
:param pulumi.Input[str] path: Path used to access the associated file system.
:param pulumi.Input[Sequence[pulumi.Input['ExportExportOptionArgs']]] export_options: (Updatable) Export options for the new export. If left unspecified, defaults to:
"""
pulumi.set(__self__, "export_set_id", export_set_id)
pulumi.set(__self__, "file_system_id", file_system_id)
pulumi.set(__self__, "path", path)
if export_options is not None:
pulumi.set(__self__, "export_options", export_options)
@property
@pulumi.getter(name="exportSetId")
def export_set_id(self) -> pulumi.Input[str]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's export set.
"""
return pulumi.get(self, "export_set_id")
@export_set_id.setter
def export_set_id(self, value: pulumi.Input[str]):
pulumi.set(self, "export_set_id", value)
@property
@pulumi.getter(name="fileSystemId")
def file_system_id(self) -> pulumi.Input[str]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's file system.
"""
return pulumi.get(self, "file_system_id")
@file_system_id.setter
def file_system_id(self, value: pulumi.Input[str]):
pulumi.set(self, "file_system_id", value)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
Path used to access the associated file system.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter(name="exportOptions")
def export_options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExportExportOptionArgs']]]]:
"""
(Updatable) Export options for the new export. If left unspecified, defaults to:
"""
return pulumi.get(self, "export_options")
@export_options.setter
def export_options(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExportExportOptionArgs']]]]):
pulumi.set(self, "export_options", value)
@pulumi.input_type
class _ExportState:
def __init__(__self__, *,
export_options: Optional[pulumi.Input[Sequence[pulumi.Input['ExportExportOptionArgs']]]] = None,
export_set_id: Optional[pulumi.Input[str]] = None,
file_system_id: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Export resources.
:param pulumi.Input[Sequence[pulumi.Input['ExportExportOptionArgs']]] export_options: (Updatable) Export options for the new export. If left unspecified, defaults to:
:param pulumi.Input[str] export_set_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's export set.
:param pulumi.Input[str] file_system_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's file system.
:param pulumi.Input[str] path: Path used to access the associated file system.
:param pulumi.Input[str] state: The current state of this export.
:param pulumi.Input[str] time_created: The date and time the export was created, expressed in [RFC 3339](https://tools.ietf.org/rfc/rfc3339) timestamp format. Example: `2016-08-25T21:10:29.600Z`
"""
if export_options is not None:
pulumi.set(__self__, "export_options", export_options)
if export_set_id is not None:
pulumi.set(__self__, "export_set_id", export_set_id)
if file_system_id is not None:
pulumi.set(__self__, "file_system_id", file_system_id)
if path is not None:
pulumi.set(__self__, "path", path)
if state is not None:
pulumi.set(__self__, "state", state)
if time_created is not None:
pulumi.set(__self__, "time_created", time_created)
@property
@pulumi.getter(name="exportOptions")
def export_options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExportExportOptionArgs']]]]:
"""
(Updatable) Export options for the new export. If left unspecified, defaults to:
"""
return pulumi.get(self, "export_options")
@export_options.setter
def export_options(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExportExportOptionArgs']]]]):
pulumi.set(self, "export_options", value)
@property
@pulumi.getter(name="exportSetId")
def export_set_id(self) -> Optional[pulumi.Input[str]]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's export set.
"""
return pulumi.get(self, "export_set_id")
@export_set_id.setter
def export_set_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "export_set_id", value)
@property
@pulumi.getter(name="fileSystemId")
def file_system_id(self) -> Optional[pulumi.Input[str]]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's file system.
"""
return pulumi.get(self, "file_system_id")
@file_system_id.setter
def file_system_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file_system_id", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path used to access the associated file system.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The current state of this export.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> Optional[pulumi.Input[str]]:
"""
The date and time the export was created, expressed in [RFC 3339](https://tools.ietf.org/rfc/rfc3339) timestamp format. Example: `2016-08-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@time_created.setter
def time_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_created", value)
class Export(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
export_options: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExportExportOptionArgs']]]]] = None,
export_set_id: Optional[pulumi.Input[str]] = None,
file_system_id: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
This resource provides the Export resource in Oracle Cloud Infrastructure File Storage service.
Creates a new export in the specified export set, path, and
file system.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_export = oci.filestorage.Export("testExport",
export_set_id=oci_file_storage_export_set["test_export_set"]["id"],
file_system_id=oci_file_storage_file_system["test_file_system"]["id"],
path=var["export_path"],
export_options=[oci.filestorage.ExportExportOptionArgs(
source=var["export_export_options_source"],
access=var["export_export_options_access"],
anonymous_gid=var["export_export_options_anonymous_gid"],
anonymous_uid=var["export_export_options_anonymous_uid"],
identity_squash=var["export_export_options_identity_squash"],
require_privileged_source_port=var["export_export_options_require_privileged_source_port"],
)])
```
## Import
Exports can be imported using the `id`, e.g.
```sh
$ pulumi import oci:filestorage/export:Export test_export "id"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExportExportOptionArgs']]]] export_options: (Updatable) Export options for the new export. If left unspecified, defaults to:
:param pulumi.Input[str] export_set_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's export set.
:param pulumi.Input[str] file_system_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's file system.
:param pulumi.Input[str] path: Path used to access the associated file system.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ExportArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
This resource provides the Export resource in Oracle Cloud Infrastructure File Storage service.
Creates a new export in the specified export set, path, and
file system.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_export = oci.filestorage.Export("testExport",
export_set_id=oci_file_storage_export_set["test_export_set"]["id"],
file_system_id=oci_file_storage_file_system["test_file_system"]["id"],
path=var["export_path"],
export_options=[oci.filestorage.ExportExportOptionArgs(
source=var["export_export_options_source"],
access=var["export_export_options_access"],
anonymous_gid=var["export_export_options_anonymous_gid"],
anonymous_uid=var["export_export_options_anonymous_uid"],
identity_squash=var["export_export_options_identity_squash"],
require_privileged_source_port=var["export_export_options_require_privileged_source_port"],
)])
```
## Import
Exports can be imported using the `id`, e.g.
```sh
$ pulumi import oci:filestorage/export:Export test_export "id"
```
:param str resource_name: The name of the resource.
:param ExportArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ExportArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
export_options: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExportExportOptionArgs']]]]] = None,
export_set_id: Optional[pulumi.Input[str]] = None,
file_system_id: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ExportArgs.__new__(ExportArgs)
__props__.__dict__["export_options"] = export_options
if export_set_id is None and not opts.urn:
raise TypeError("Missing required property 'export_set_id'")
__props__.__dict__["export_set_id"] = export_set_id
if file_system_id is None and not opts.urn:
raise TypeError("Missing required property 'file_system_id'")
__props__.__dict__["file_system_id"] = file_system_id
if path is None and not opts.urn:
raise TypeError("Missing required property 'path'")
__props__.__dict__["path"] = path
__props__.__dict__["state"] = None
__props__.__dict__["time_created"] = None
super(Export, __self__).__init__(
'oci:filestorage/export:Export',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
export_options: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExportExportOptionArgs']]]]] = None,
export_set_id: Optional[pulumi.Input[str]] = None,
file_system_id: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None) -> 'Export':
"""
Get an existing Export resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExportExportOptionArgs']]]] export_options: (Updatable) Export options for the new export. If left unspecified, defaults to:
:param pulumi.Input[str] export_set_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's export set.
:param pulumi.Input[str] file_system_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's file system.
:param pulumi.Input[str] path: Path used to access the associated file system.
:param pulumi.Input[str] state: The current state of this export.
:param pulumi.Input[str] time_created: The date and time the export was created, expressed in [RFC 3339](https://tools.ietf.org/rfc/rfc3339) timestamp format. Example: `2016-08-25T21:10:29.600Z`
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ExportState.__new__(_ExportState)
__props__.__dict__["export_options"] = export_options
__props__.__dict__["export_set_id"] = export_set_id
__props__.__dict__["file_system_id"] = file_system_id
__props__.__dict__["path"] = path
__props__.__dict__["state"] = state
__props__.__dict__["time_created"] = time_created
return Export(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="exportOptions")
def export_options(self) -> pulumi.Output[Sequence['outputs.ExportExportOption']]:
"""
(Updatable) Export options for the new export. If left unspecified, defaults to:
"""
return pulumi.get(self, "export_options")
@property
@pulumi.getter(name="exportSetId")
def export_set_id(self) -> pulumi.Output[str]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's export set.
"""
return pulumi.get(self, "export_set_id")
@property
@pulumi.getter(name="fileSystemId")
def file_system_id(self) -> pulumi.Output[str]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of this export's file system.
"""
return pulumi.get(self, "file_system_id")
@property
@pulumi.getter
def path(self) -> pulumi.Output[str]:
"""
Path used to access the associated file system.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The current state of this export.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> pulumi.Output[str]:
"""
The date and time the export was created, expressed in [RFC 3339](https://tools.ietf.org/rfc/rfc3339) timestamp format. Example: `2016-08-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
|
StarcoderdataPython
|
1641781
|
class Game:
""" Represents main Game class"""
def __init__(self):
self.state = 'Ready'
self.score = 0
self.objects_found = []
self.keys = {}
self.messages = []
self.player = None
def play(self):
self.new_attribute = 1
|
StarcoderdataPython
|
1618408
|
import time, os, requests, re, csv, time
from lxml import etree
from selenium import webdriver
img_folder = 'D:\\dataset_object_detect\\open_image_dataset\\train_00\\train_00'
img_files = list()
for root, dirs, files in os.walk(img_folder):
for name in files:
file_path = os.path.join(img_folder, name)
img_files.append(file_path)
# print(img_files[0])
# print(img_files[0].split('\\')[-1])
# img_files = ["D:\\dataset_object_detect\\food-101\\images\\apple_pie\\134.jpg", "D:\\dataset_object_detect\\food-101\\images\\apple_pie\\134.jpg"]
nb = 1
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
with open("result.csv", "a", newline="", encoding="utf-8") as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel')
for img_path in img_files:
print("\n", img_path)
option = webdriver.ChromeOptions()
option.add_argument('log-level=3')
# option.add_argument('--headless')
# option.add_argument('--disable-gpu')
# option.add_argument('--no-sandbox')
# driver = webdriver.Chrome("C:\Users\VIP\AppData\Local\Google\Chrome\Application\chrome.exe")
driver = webdriver.Chrome(options=option)
driver.implicitly_wait(2)
driver.maximize_window()
driver.get('https://www.taobao.com')
time.sleep(1)
current_address_tb = driver.current_url
s = driver.find_element_by_class_name('drop-wrapper')
s.click()
# os.system("load_image.exe D:\\dataset_object_detect\\food-101\\images\\apple_pie\\134.jpg" )
os.system("load_image.exe "+img_path)
time.sleep(2)
current_address_s = driver.current_url
if current_address_tb == current_address_s:
driver.quit()
continue
else:
def get_html(url):
r = requests.get(url)
r.encoding = 'utf-8'
html = etree.HTML(r.text)
return r.text
html_text = get_html(driver.current_url)
try:
raw_title = re.findall(r'\"raw_title\"\:\".*?\"', html_text)
pic_url = re.findall(r'\"pic_url\"\:\".*?\"', html_text)
view_price = re.findall(r'\"view_price\"\:\"[\d\.]*\"', html_text)
print("第{}张图: {} -价格:{}".format(nb, raw_title[0].split(':')[1].strip('"'), view_price[0].split(':')[1].strip('"')))
# print(nb)
# print(raw_title[0])
# print(view_price[0])
print(pic_url[0])
# result = dict()
# for k,v in zip(raw_title, view_price):
# result.update({k.split(':')[1]: v.split(':')[1]})
# print(result)
driver.quit()
csv_writer.writerow([nb, img_path, raw_title[0].split(':')[1].strip('"'), view_price[0].split(':')[1].strip('"'), pic_url[0].split(':')[1].strip('"')[2:]])
nb += 1
os.system("move "+img_path+" "+os.path.join(img_folder, "old"))
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
except IndexError:
driver.quit()
|
StarcoderdataPython
|
104513
|
# -*- coding: UTF-8 -*-
"""
Based on ``behave tutorial``
Feature: A Step uses a User-Defined Type as Step Parameter (tutorial10)
Scenario Outline: Calculator
Given I have a calculator
When I add "<x>" and "<y>"
Then the calculator returns "<sum>"
Examples: Add Numbers
| x | y | sum |
| 1 | 1 | 2 |
| 1 | 2 | 3 |
| 2 | 1 | 3 |
| 2 | 7 | 9 |
"""
# @mark.user_defined_types
# ----------------------------------------------------------------------------
# USER-DEFINED TYPES:
# ----------------------------------------------------------------------------
from behave import register_type
def parse_number(text):
"""
Convert parsed text into a number.
:param text: Parsed text, called by :py:meth:`parse.Parser.parse()`.
:return: Number instance (integer), created from parsed text.
"""
return int(text)
# -- REGISTER: User-defined type converter (parse_type).
register_type(Number=parse_number)
# @mark.steps
# ----------------------------------------------------------------------------
# STEPS:
# ----------------------------------------------------------------------------
from behave import given, when, then
from hamcrest import assert_that, equal_to
from calculator import Calculator
@given('I have a calculator')
def step_impl(context):
context.calculator = Calculator()
@when('I add "{x:Number}" and "{y:Number}"')
def step_impl(context, x, y):
assert isinstance(x, int)
assert isinstance(y, int)
context.calculator.add2(x, y)
@then('the calculator returns "{expected:Number}"')
def step_impl(context, expected):
assert isinstance(expected, int)
assert_that(context.calculator.result, equal_to(expected))
|
StarcoderdataPython
|
3301482
|
marks = []
sum = 0
for i in range(0, 3):
mark = eval(input("Enter marks in subject {}: ".format(i+1)))
marks.append(mark)
sum += mark
avg = sum/3
print(int(avg))
if avg >= 80:
print("Level 4, above agency-noramlized standards")
elif avg >=70:
print("Level 3, at agency-noramlized standards")
elif avg >=60:
print("Level 2, below, but approaching agency-noramlized standards")
elif avg >=50:
print("Level 1, well below agency-noramlized standards")
elif avg >=40:
print("Level 1-, too below agency-noramlized standards")
elif avg >=0:
print("Remedial standards")
|
StarcoderdataPython
|
3264503
|
from pylab import *
import skrf as rf
import pdb
c = 3e8
def create_sdrkits_ideal(skrf_f):
# create ideal cal kit
media = rf.media.Freespace(skrf_f)
sdrkit_open = media.line(42.35, 'ps', z0 = 50) ** media.open() # 42.35
sdrkit_short = media.line(26.91, 'ps', z0 = 50) ** media.short()
# TODO: add parallel 2fF capacitance to load?
sdrkit_load = rf.Network(f=skrf_f.f, s=(np.ones(len(skrf_f)) * -.0126), z0=50, f_unit = 'Hz')
sdrkit_thru = media.line(41.00, 'ps', z0 = 50) # open - 1.35 ps
sdrkit_open = rf.two_port_reflect(sdrkit_open, sdrkit_open)
sdrkit_short = rf.two_port_reflect(sdrkit_short, sdrkit_short)
sdrkit_load = rf.two_port_reflect(sdrkit_load, sdrkit_load)
ideals = [sdrkit_short, sdrkit_open, sdrkit_load, sdrkit_thru]
return ideals
def plot_s2p_file(filename, cal_kit = None, show = True):
s2p = rf.Network(filename)
if cal_kit:
s2p = cal_kit.apply_cal(s2p)
s2p.plot_s_db()
if show:
plt.show()
def plot_error_terms(cal_kit):
#source_match = cal.coefs_8term_ntwks['source match']
source_match.plot_s_db()
directivity.plot_s_db()
reflection_tracking.plot_s_db()
def main():
# load cal measurements
lmr_mm = rf.Network('../cal_twoport/match_match.s2p')
lmr_mr = rf.Network('../cal_twoport/match_reflect.s2p')
lmr_rm = rf.Network('../cal_twoport/reflect_match.s2p')
lmr_rr = rf.Network('../cal_twoport/reflect_reflect.s2p')
lmr_thru = rf.Network('../cal_twoport/thru.s2p')
cal_sw_fwd = rf.Network('../cal_twoport/lmr_sw_fwd.s1p')
cal_sw_rev = rf.Network('../cal_twoport/lmr_sw_rev.s1p')
measured_cal = [lmr_thru, lmr_mm, lmr_rr, lmr_rm, lmr_mr]
media = rf.media.Freespace(lmr_thru.frequency)
sdrkit_thru = media.line(41.00, 'ps', z0 = 50) # open - 1.35 ps
cal = rf.calibration.LMR16(measured_cal, [sdrkit_thru], ideal_is_reflect = False, switch_terms = (cal_sw_fwd, cal_sw_rev))
cal.run()
barrel = rf.Network('lmr_barrel.s2p')
barrel_cal = cal.apply_cal(barrel)
barrel_cal.plot_s_db()
grid(True)
title("$|S|$ of Omni-Spectra 20600-10, 10 dB attenuator")
show()
barrel_cal.plot_s_smith()
show()
pdb.set_trace()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
104543
|
<reponame>wx-b/cockpit<gh_stars>100-1000
"""Base class for executing and hooking into a training loop to execute checks."""
from backpack import extend
from cockpit import Cockpit
from tests.utils.rand import restore_rng_state
class SimpleTestHarness:
"""Class for running a simple test loop with the Cockpit.
Args:
problem (string): The (instantiated) problem to test on.
"""
def __init__(self, problem):
"""Store the instantiated problem."""
self.problem = problem
def test(self, cockpit_kwargs, *backpack_exts):
"""Run the test loop.
Args:
cockpit_kwargs (dict): Arguments for the cockpit.
*backpack_exts (list): List of user-defined BackPACK extensions.
"""
problem = self.problem
data = problem.data
device = problem.device
iterations = problem.iterations
# Extend
model = extend(problem.model)
loss_fn = extend(problem.loss_function)
individual_loss_fn = extend(problem.individual_loss_function)
# Create Optimizer
optimizer = problem.optimizer
# Initialize Cockpit
self.cockpit = Cockpit(model.parameters(), **cockpit_kwargs)
# print(cockpit_exts)
# Main training loop
global_step = 0
for inputs, labels in iter(data):
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# forward pass
outputs = model(inputs)
loss = loss_fn(outputs, labels)
losses = individual_loss_fn(outputs, labels)
# code inside this block does not alter random number generation
with restore_rng_state():
# backward pass
with self.cockpit(
global_step,
*backpack_exts,
info={
"batch_size": inputs.shape[0],
"individual_losses": losses,
"loss": loss,
"optimizer": optimizer,
},
):
loss.backward(create_graph=self.cockpit.create_graph(global_step))
self.check_in_context()
self.check_after_context()
# optimizer step
optimizer.step()
global_step += 1
if global_step >= iterations:
break
def check_in_context(self):
"""Check that will be executed within the cockpit context."""
pass
def check_after_context(self):
"""Check that will be executed directly after the cockpit context."""
pass
|
StarcoderdataPython
|
1679591
|
<reponame>srikanthallu/proteuslib
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""
This file demonstrates the basics of working with and using the electrolyte
database (EDB).
(1) Before we can start, you must install MongoDB (which is installed separately)
[See more information on the ReadTheDocs under 'Getting Started --> Installing WaterTAP']
(2) After installing MongoDB, you will need to 'load' the database using the
command line function 'edb load -b'. This will load the default database
that WaterTAP is bundled with.
[NOTE: If you need to 'reload' the database, simply use the command 'edb drop -d electrolytedb'
in the command line. The database on MongoDB is named "electrolytedb"]
[NOTE 2: You can invoke the command line utility with the "help" keyword to
get more information on funtionality. Command: 'edb --help' or 'edb [arg] --help']
(3) To use EDB in python, start by importing the interface class object 'ElectrolyteDB'
(4) Invoke the 'ElectrolyteDB' object to connect to the database
(5) Grab a 'base' for a configuration dictionary, and place it into a class object
(6) Get the chemcial species/components for a simulation case. There are a number of ways
to do this. In this example, we will grab them by finding all components that contain
only specific elements. Then, we add those components and their associated parameters
to the configuration dictionary being built from the 'base'.
[NOTE: An alternative method is to provide a list of the names of components you want]
(7) Get the set of reactions you want in your system and put into a 'base' object.
That 'base' can be either a 'thermo' base or a 'reaction' (as in this case)
base. IF you are adding reactions to a 'thermo' base, they should be added
as 'inherent' reactions. IF you are adding reactions to a 'reaction' base,
they should be added as 'equilibrium' (or other) reactions.
(8) When using an reactor object in IDAES, you must always provide a 'reaction_config'
to match with the 'thermo_config'. We can create a base 'reaction' config from
the database and add reactions to that config in the same way we do for the
'thermo_config' when adding reactions as inherent.
[NOTE: If a reaction is added to a 'thermo_config' as 'inherent', this it should
NOT be added to a 'reaction_config' as 'equilibrium']
"""
# ========= These imports (below) are for testing the configs from EDB ===============
# Import specific pyomo objects
from pyomo.environ import (
ConcreteModel,
)
# Import the idaes objects for Generic Properties and Reactions
from idaes.generic_models.properties.core.generic.generic_property import (
GenericParameterBlock,
)
from idaes.generic_models.properties.core.generic.generic_reaction import (
GenericReactionParameterBlock,
)
# Import the idaes object for the EquilibriumReactor unit model
from idaes.generic_models.unit_models.equilibrium_reactor import EquilibriumReactor
# Import the core idaes objects for Flowsheets and types of balances
from idaes.core import FlowsheetBlock
# ========= These imports (above) are for testing the configs from EDB ===============
# ========================== (3) ================================
# Import ElectrolyteDB object
from watertap.edb import ElectrolyteDB
__author__ = "<NAME>"
# ========================== (4) ================================
# By default, invoking the 'ElectrolyteDB' object (with no args)
# will attempt to connect to the local host database. You can
# check the connection by calling the 'can_connect' function
# and passing the 'host' and 'port' as args. If no 'host' or
# 'port' are given, then it uses the defaults.
def connect_to_edb(test_invalid_host=False):
print("connecting to " + str(ElectrolyteDB.DEFAULT_URL))
db = ElectrolyteDB()
connected = db.can_connect()
return (db, connected)
# ========================== (5) ================================
# All configuration files used in WaterTAP for electrolyte chemistry
# require a 'base' dictionary to start. For example, we need to
# create a 'thermo_config' dictionary to pass to the GenericProperty
# package in IDAES. That 'thermo_config' file will always have a
# few specific items in common with most other configuration files.
# Thus, this operation will populate a class object that holds the
# data assocated with that 'base' dictionary.
#
# In the EDB, there are several different 'base' structures to start
# from. In this example, we will build from the 'default_thermo'
# configuration base.
def grab_base_thermo_config(db):
# Get the base and place into a result object
base = db.get_base("default_thermo")
return base
# ========================== (6) ================================
# Get chemical components/species for a simulation case
# NOTE: This function here also returns a 'list' of the
# components that it finds. This is not a built in
# feature of the EDB, but is very useful because
# getting reactions is dependent on the component list.
def get_components_and_add_to_idaes_config(db, base_obj, by_elements=False):
# Going to grab all components that contain ONLY "H" and "O"
# Expected behavior = Will get "H2O", "H_+", and "OH_-"
element_list = ["H","O"]
# Alternatively, you can pass a list of individual componets
# you want to grab and the EDB functions should grab explicitly
# those components/species you want.
comp_list = ["H2O","H_+","OH_-"]
# Just like before, this function returns a results object
# that contains other objects that must be iterated through
# in order to access the information. Then, call the 'add'
# function to add those components to the 'base' object
if (by_elements==True):
res_obj_comps = db.get_components(element_names=element_list)
else:
res_obj_comps = db.get_components(component_names=comp_list)
# Iterate through the results object and add the components
# to the base_obj
db_comp_list = []
for comp_obj in res_obj_comps:
print("Adding " + str(comp_obj.name) + "" )
base_obj.add(comp_obj)
db_comp_list.append(comp_obj.name)
print()
return (base_obj, db_comp_list)
# ========================== (7) ================================
# Grab the reactions associated with the list of components and add
# them to a base object (which could be a 'thermo' base or 'reaction' base)
#
def get_reactions_return_object(db, base_obj, comp_list, is_inherent=True):
react_obj = db.get_reactions(component_names=comp_list)
for r in react_obj:
print("Found reaction: " + str(r.name))
if (is_inherent == True):
r._data["type"] = "inherent"
base_obj.add(r)
return base_obj
# ========================== (8) ================================
# Create a base config for reactions.
def grab_base_reaction_config(db):
# Get the base and place into a result object
base = db.get_base("reaction")
return base
# This function will produce an error if the thermo config is not correct
def is_thermo_config_valid(thermo_config):
model = ConcreteModel()
model.fs = FlowsheetBlock(default={"dynamic": False})
model.fs.thermo_params = GenericParameterBlock(default=thermo_config)
return True
# This function will produce an error if the thermo config is not correct
# or if the pairing of the thermo and reaction config are invalid
def is_thermo_reaction_pair_valid(thermo_config, reaction_config):
model = ConcreteModel()
model.fs = FlowsheetBlock(default={"dynamic": False})
model.fs.thermo_params = GenericParameterBlock(default=thermo_config)
model.fs.rxn_params = GenericReactionParameterBlock(
default={"property_package": model.fs.thermo_params, **reaction_config}
)
return True
# Run script for testing
def run_the_basics_with_mockdb(db):
base_obj = grab_base_thermo_config(db)
(base_obj, comp_list) = get_components_and_add_to_idaes_config(db, base_obj)
# Create a reaction config
react_base = grab_base_reaction_config(db)
# Add reactions to the reaction base as 'equilibrium'
react_base = get_reactions_return_object(db, react_base, comp_list, is_inherent=False)
# If all goes well, this function returns true
return is_thermo_reaction_pair_valid(base_obj.idaes_config, react_base.idaes_config)
# Run script for testing
def run_the_basics_alt_with_mockdb(db):
base_obj = grab_base_thermo_config(db)
(base_obj, comp_list) = get_components_and_add_to_idaes_config(db, base_obj, by_elements=True)
# Add reactions to the thermo base as 'inherent'
base_obj = get_reactions_return_object(db, base_obj, comp_list, is_inherent=True)
# If all goes well, this function returns true
return is_thermo_config_valid(base_obj.idaes_config)
# Run script for testing
def run_the_basics_dummy_rxn_with_mockdb(db):
base_obj = grab_base_thermo_config(db)
(base_obj, comp_list) = get_components_and_add_to_idaes_config(db, base_obj, by_elements=True)
# Add reactions to the thermo base as 'inherent'
base_obj = get_reactions_return_object(db, base_obj, comp_list, is_inherent=True)
# Create a reaction config
react_base = grab_base_reaction_config(db)
# If no reactions are in the reaction base, this will cause an error in IDAES.
# However, we can add a 'dummy' reaction just to satisfy the IDAES code base.
react_obj = db.get_reactions(reaction_names=["dummy"])
for r in react_obj:
print("Found reaction: " + str(r.name))
react_base.add(r)
# IDAES will throw an exception when we try to do this if something is wrong
thermo_config = base_obj.idaes_config
reaction_config = react_base.idaes_config
model = ConcreteModel()
model.fs = FlowsheetBlock(default={"dynamic": False})
model.fs.thermo_params = GenericParameterBlock(default=thermo_config)
model.fs.rxn_params = GenericReactionParameterBlock(
default={"property_package": model.fs.thermo_params, **reaction_config}
)
model.fs.unit = EquilibriumReactor(
default={
"property_package": model.fs.thermo_params,
"reaction_package": model.fs.rxn_params,
"has_rate_reactions": False,
"has_equilibrium_reactions": False,
"has_heat_transfer": False,
"has_heat_of_reaction": False,
"has_pressure_change": False,
}
)
# If all goes well, this function returns true
return is_thermo_reaction_pair_valid(base_obj.idaes_config, react_base.idaes_config)
|
StarcoderdataPython
|
4840242
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use the AWS SDK for Python (Boto3) with Amazon Simple Email Service
(Amazon SES) to manage email templates that contain replaceable tags.
"""
import logging
from pprint import pprint
import re
import boto3
from botocore.exceptions import ClientError
# Defines template tags, which are enclosed in two curly braces, such as {{tag}}.
TEMPLATE_REGEX = r'(?<={{).+?(?=}})'
logger = logging.getLogger(__name__)
class SesTemplate:
"""Encapsulates Amazon SES template functions."""
def __init__(self, ses_client):
"""
:param ses_client: A Boto3 Amazon SES client.
"""
self.ses_client = ses_client
self.template = None
self.template_tags = set()
def _extract_tags(self, subject, text, html):
"""
Extracts tags from a template as a set of unique values.
:param subject: The subject of the email.
:param text: The text version of the email.
:param html: The html version of the email.
"""
self.template_tags = set(re.findall(TEMPLATE_REGEX, subject + text + html))
logger.info("Extracted template tags: %s", self.template_tags)
def verify_tags(self, template_data):
"""
Verifies that the tags in the template data are part of the template.
:param template_data: Template data formed of key-value pairs of tags and
replacement text.
:return: True when all of the tags in the template data are usable with the
template; otherwise, False.
"""
diff = set(template_data) - self.template_tags
if diff:
logger.warning(
"Template data contains tags that aren't in the template: %s", diff)
return False
else:
return True
def name(self):
"""
:return: Gets the name of the template, if a template has been loaded.
"""
return self.template['TemplateName'] if self.template is not None else None
def create_template(self, name, subject, text, html):
"""
Creates an email template.
:param name: The name of the template.
:param subject: The subject of the email.
:param text: The plain text version of the email.
:param html: The HTML version of the email.
"""
try:
template = {
'TemplateName': name,
'SubjectPart': subject,
'TextPart': text,
'HtmlPart': html}
self.ses_client.create_template(Template=template)
logger.info("Created template %s.", name)
self.template = template
self._extract_tags(subject, text, html)
except ClientError:
logger.exception("Couldn't create template %s.", name)
raise
def delete_template(self):
"""
Deletes an email template.
"""
try:
self.ses_client.delete_template(TemplateName=self.template['TemplateName'])
logger.info("Deleted template %s.", self.template['TemplateName'])
self.template = None
self.template_tags = None
except ClientError:
logger.exception(
"Couldn't delete template %s.", self.template['TemplateName'])
raise
def get_template(self, name):
"""
Gets a previously created email template.
:param name: The name of the template to retrieve.
:return: The retrieved email template.
"""
try:
response = self.ses_client.get_template(TemplateName=name)
self.template = response['Template']
logger.info("Got template %s.", name)
self._extract_tags(
self.template['SubjectPart'], self.template['TextPart'],
self.template['HtmlPart'])
except ClientError:
logger.exception("Couldn't get template %s.", name)
raise
else:
return self.template
def list_templates(self):
"""
Gets a list of all email templates for the current account.
:return: The list of retrieved email templates.
"""
try:
response = self.ses_client.list_templates()
templates = response['TemplatesMetadata']
logger.info("Got %s templates.", len(templates))
except ClientError:
logger.exception("Couldn't get templates.")
raise
else:
return templates
def update_template(self, name, subject, text, html):
"""
Updates a previously created email template.
:param name: The name of the template.
:param subject: The subject of the email.
:param text: The plain text version of the email.
:param html: The HTML version of the email.
"""
try:
template = {
'TemplateName': name,
'SubjectPart': subject,
'TextPart': text,
'HtmlPart': html}
self.ses_client.update_template(Template=template)
logger.info("Updated template %s.", name)
self.template = template
self._extract_tags(subject, text, html)
except ClientError:
logger.exception("Couldn't update template %s.", name)
raise
def usage_demo():
print('-'*88)
print("Welcome to the Amazon Simple Email Service (Amazon SES) email template "
"demo!")
print('-'*88)
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
ses_template = SesTemplate(boto3.client('ses'))
template = {
'name': 'doc-example-template',
'subject': 'Example of an email template.',
'text': "This is what {{name}} will {{action}} if {{name}} can't display HTML.",
'html': "<p><i>This</i> is what {{name}} will {{action}} if {{name}} "
"<b>can</b> display HTML.</p>"}
print("Creating an email template.")
ses_template.create_template(**template)
print("Getting the list of template metadata.")
template_metas = ses_template.list_templates()
for temp_meta in template_metas:
print(f"Got template {temp_meta['Name']}:")
temp_data = ses_template.get_template(temp_meta['Name'])
pprint(temp_data)
print(f"Deleting template {template['name']}.")
ses_template.delete_template()
print("Thanks for watching!")
print('-'*88)
if __name__ == '__main__':
usage_demo()
|
StarcoderdataPython
|
1705836
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
def get_rate(check, metric_name, modifiers, global_options):
"""
Send with the `AgentCheck.rate` method.
"""
rate_method = check.rate
def rate(metric, sample_data, runtime_data):
for sample, tags, hostname in sample_data:
rate_method(metric_name, sample.value, tags=tags, hostname=hostname)
del check
del modifiers
del global_options
return rate
|
StarcoderdataPython
|
1789273
|
<filename>problems/days_between_two_dates.py
from datetime import date
first_date = date(2019, 10, 14)
second_date = date(2019, 9, 14)
diff = first_date - second_date
print(diff.days)
|
StarcoderdataPython
|
197134
|
<reponame>NatsubiSogan/comp_library<filename>Python/data_structure/wavelet_matrix.py
from bit_vector import BitVector
# Wavelet Matrix
class WaveletMatrix:
def __init__(self, array: list, log: int = 32) -> None:
self.n = len(array)
self.mat = []
self.zs = []
self.log = log
for d in range(self.log)[::-1]:
ls, rs = [], []
BV = BitVector(self.n + 1)
for ind, val in enumerate(array):
if val & (1 << d):
rs.append(val)
BV.set(ind)
else:
ls.append(val)
BV.build()
self.mat.append(BV)
self.zs.append(len(ls))
array = ls + rs
def access(self, i: int) -> int:
res = 0
for d in range(self.log):
res <<= 1
if self.mat[d][i]:
res |= 1
i = self.mat[d].rank1(i) + self.zs[d]
else:
i = self.mat[d].rank0(i)
return res
def rank(self, val: int, l: int, r: int) -> int:
for d in range(self.log):
if val >> (self.log - d - 1) & 1:
l = self.mat[d].rank1(l) + self.zs[d]
r = self.mat[d].rank1(r) + self.zs[d]
else:
l = self.mat[d].rank0(l)
r = self.mat[d].rank0(r)
return r - l
def quantile(self, l: int, r: int, k: int) -> int:
res = 0
for d in range(self.log):
res <<= 1
cntl, cntr = self.mat[d].rank1(l), self.mat[d].rank1(r)
if cntr - cntl >= k:
l = cntl + self.zs[d]
r = cntr + self.zs[d]
res |= 1
else:
l -= cntl
r -= cntr
k -= cntr - cntl
return res
def kth_smallest(self, l: int, r: int, k: int) -> int:
return self.quantile(l, r, r - l - k)
class CompressedWaveletMatrix:
def __init__(self, array: list) -> None:
self.array = sorted(set(array))
self.comp = {val: ind for ind, val in enumerate(self.array)}
array = [self.comp[val] for val in array]
log = len(self.array).bit_length()
self.WM = WaveletMatrix(array, log)
def access(self, i: int) -> int:
return self.array[self.WM.access(i)]
def rank(self, l: int, r: int, val: int) -> int:
if val not in self.comp: return 0
return self.WM.rank(self.comp[val], l, r)
def kth_smallest(self, l: int, r: int, k: int) -> int:
return self.array[self.WM.kth_smallest(l, r, k)]
|
StarcoderdataPython
|
3298358
|
<reponame>Puppet-Finland/puppet-trac
# -*- coding: utf-8 -*-
"""
License: BSD
(c) 2005-2008 ::: <NAME> (<EMAIL>)
(c) 2009 ::: www.CodeResort.com - BV Network AS (<EMAIL>)
"""
import os
from datetime import datetime
from trac.attachment import Attachment
from trac.core import *
from trac.mimeview import Context
from trac.resource import Resource, ResourceNotFound
from trac.wiki.api import WikiSystem, IWikiPageManipulator
from trac.wiki.model import WikiPage
from trac.wiki.formatter import wiki_to_html, format_to_html
from tracrpc.api import IXMLRPCHandler, expose_rpc, Binary
from tracrpc.util import StringIO, to_utimestamp, from_utimestamp
__all__ = ['WikiRPC']
class WikiRPC(Component):
"""Superset of the
[http://www.jspwiki.org/Wiki.jsp?page=WikiRPCInterface2 WikiRPC API]. """
implements(IXMLRPCHandler)
manipulators = ExtensionPoint(IWikiPageManipulator)
def __init__(self):
self.wiki = WikiSystem(self.env)
def xmlrpc_namespace(self):
return 'wiki'
def xmlrpc_methods(self):
yield (None, ((dict, datetime),), self.getRecentChanges)
yield ('WIKI_VIEW', ((int,),), self.getRPCVersionSupported)
yield (None, ((str, str), (str, str, int),), self.getPage)
yield (None, ((str, str, int),), self.getPage, 'getPageVersion')
yield (None, ((str, str), (str, str, int)), self.getPageHTML)
yield (None, ((str, str), (str, str, int)), self.getPageHTML, 'getPageHTMLVersion')
yield (None, ((list,),), self.getAllPages)
yield (None, ((dict, str), (dict, str, int)), self.getPageInfo)
yield (None, ((dict, str, int),), self.getPageInfo, 'getPageInfoVersion')
yield (None, ((bool, str, str, dict),), self.putPage)
yield (None, ((list, str),), self.listAttachments)
yield (None, ((Binary, str),), self.getAttachment)
yield (None, ((bool, str, Binary),), self.putAttachment)
yield (None, ((bool, str, str, str, Binary),
(bool, str, str, str, Binary, bool)),
self.putAttachmentEx)
yield (None, ((bool, str),(bool, str, int)), self.deletePage)
yield (None, ((bool, str),), self.deleteAttachment)
yield ('WIKI_VIEW', ((list, str),), self.listLinks)
yield ('WIKI_VIEW', ((str, str),), self.wikiToHtml)
def _fetch_page(self, req, pagename, version=None):
# Helper for getting the WikiPage that performs basic checks
page = WikiPage(self.env, pagename, version)
req.perm(page.resource).require('WIKI_VIEW')
if page.exists:
return page
else:
msg = 'Wiki page "%s" does not exist' % pagename
if version is not None:
msg += ' at version %s' % version
raise ResourceNotFound(msg)
def _page_info(self, name, when, author, version, comment):
return dict(name=name, lastModified=when,
author=author, version=int(version), comment=comment)
def getRecentChanges(self, req, since):
""" Get list of changed pages since timestamp """
since = to_utimestamp(since)
wiki_realm = Resource('wiki')
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute('SELECT name, time, author, version, comment '
'FROM wiki w1 '
'WHERE time >= %s '
'AND version = (SELECT MAX(version) '
' FROM wiki w2 '
' WHERE w2.name=w1.name) '
'ORDER BY time DESC', (since,))
result = []
for name, when, author, version, comment in cursor:
if 'WIKI_VIEW' in req.perm(wiki_realm(id=name, version=version)):
result.append(
self._page_info(name, from_utimestamp(when),
author, version, comment))
return result
def getRPCVersionSupported(self, req):
""" Returns 2 with this version of the Trac API. """
return 2
def getPage(self, req, pagename, version=None):
""" Get the raw Wiki text of page, latest version. """
page = self._fetch_page(req, pagename, version)
return page.text
def getPageHTML(self, req, pagename, version=None):
""" Return latest version of page as rendered HTML, utf8 encoded. """
page = self._fetch_page(req, pagename, version)
fields = {'text': page.text}
for manipulator in self.manipulators:
manipulator.prepare_wiki_page(req, page, fields)
context = Context.from_request(req, page.resource, absurls=True)
html = format_to_html(self.env, context, fields['text'])
return '<html><body>%s</body></html>' % html.encode('utf-8')
def getAllPages(self, req):
""" Returns a list of all pages. The result is an array of utf8 pagenames. """
pages = []
for page in self.wiki.get_pages():
if 'WIKI_VIEW' in req.perm(Resource('wiki', page)):
pages.append(page)
return pages
def getPageInfo(self, req, pagename, version=None):
""" Returns information about the given page. """
page = WikiPage(self.env, pagename, version)
req.perm(page.resource).require('WIKI_VIEW')
if page.exists:
last_update = page.get_history().next()
return self._page_info(page.name, last_update[1],
last_update[2], page.version, page.comment)
def putPage(self, req, pagename, content, attributes):
""" writes the content of the page. """
page = WikiPage(self.env, pagename)
if page.readonly:
req.perm(page.resource).require('WIKI_ADMIN')
elif not page.exists:
req.perm(page.resource).require('WIKI_CREATE')
else:
req.perm(page.resource).require('WIKI_MODIFY')
page.text = content
if req.perm(page.resource).has_permission('WIKI_ADMIN'):
page.readonly = attributes.get('readonly') and 1 or 0
page.save(attributes.get('author', req.authname),
attributes.get('comment'), req.remote_addr)
return True
def deletePage(self, req, name, version=None):
"""Delete a Wiki page (all versions) or a specific version by
including an optional version number. Attachments will also be
deleted if page no longer exists. Returns True for success."""
wp = WikiPage(self.env, name, version)
req.perm(wp.resource).require('WIKI_DELETE')
try:
wp.delete(version)
return True
except:
return False
def listAttachments(self, req, pagename):
""" Lists attachments on a given page. """
for a in Attachment.select(self.env, 'wiki', pagename):
if 'ATTACHMENT_VIEW' in req.perm(a.resource):
yield pagename + '/' + a.filename
def getAttachment(self, req, path):
""" returns the content of an attachment. """
pagename, filename = os.path.split(path)
attachment = Attachment(self.env, 'wiki', pagename, filename)
req.perm(attachment.resource).require('ATTACHMENT_VIEW')
return Binary(attachment.open().read())
def putAttachment(self, req, path, data):
""" (over)writes an attachment. Returns True if successful.
This method is compatible with WikiRPC. `putAttachmentEx` has a more
extensive set of (Trac-specific) features. """
pagename, filename = os.path.split(path)
self.putAttachmentEx(req, pagename, filename, None, data)
return True
def putAttachmentEx(self, req, pagename, filename, description, data, replace=True):
""" Attach a file to a Wiki page. Returns the (possibly transformed)
filename of the attachment.
Use this method if you don't care about WikiRPC compatibility. """
if not WikiPage(self.env, pagename).exists:
raise ResourceNotFound, 'Wiki page "%s" does not exist' % pagename
if replace:
try:
attachment = Attachment(self.env, 'wiki', pagename, filename)
req.perm(attachment.resource).require('ATTACHMENT_DELETE')
attachment.delete()
except TracError:
pass
attachment = Attachment(self.env, 'wiki', pagename)
req.perm(attachment.resource).require('ATTACHMENT_CREATE')
attachment.author = req.authname
attachment.description = description
attachment.insert(filename, StringIO(data.data), len(data.data))
return attachment.filename
def deleteAttachment(self, req, path):
""" Delete an attachment. """
pagename, filename = os.path.split(path)
if not WikiPage(self.env, pagename).exists:
raise ResourceNotFound, 'Wiki page "%s" does not exist' % pagename
attachment = Attachment(self.env, 'wiki', pagename, filename)
req.perm(attachment.resource).require('ATTACHMENT_DELETE')
attachment.delete()
return True
def listLinks(self, req, pagename):
""" ''Not implemented'' """
return []
def wikiToHtml(self, req, text):
""" Render arbitrary Wiki text as HTML. """
return unicode(wiki_to_html(text, self.env, req, absurls=1))
|
StarcoderdataPython
|
3369686
|
import sqlite3
# Total number of characters
conn = sqlite3.connect('rpg_db.sqlite3')
cur = conn.cursor()
cur.execute('SELECT name FROM charactercreator_character')
all_rows = cur.fetchall()
print(f'The number of characters is {len(all_rows)}.')
# Number by class
classlist = ['cleric', 'fighter', 'mage', 'necromancer', 'thief']
curx = conn.cursor()
class_ = []
for charclass in classlist:
curx.execute(f'SELECT * FROM charactercreator_{charclass}');
class_ = curx.fetchall();
print(f'Members of the {charclass} class: {len(class_)}')
# Number of items
cur6 = conn.cursor()
cur6.execute("SELECT * FROM armory_item")
itemrows = cur6.fetchall()
print(f'There are {len(itemrows)} items in total')
# Number of weapons
cur7 = conn.cursor()
cur7.execute("SELECT * FROM armory_weapon")
weaponrows = cur7.fetchall()
print(f'There are {len(weaponrows)} weapons in total')
# First 20 characters' number of item
print("First 20 characters' number of items:")
cur8 = conn.cursor()
for i in range(20):
cur8.execute(f"SELECT COUNT(*) FROM charactercreator_character_inventory \
WHERE character_id = {i}")
inven = cur8.fetchall()
print(inven[0][0])
# First 20 characters' number of weapons
print("First 20 characters' number of weapons:")
cur9 = conn.cursor()
for i in range(20):
cur9.execute(f"SELECT COUNT(*) FROM charactercreator_character_inventory \
WHERE character_id = {i} AND item_id IN \
(SELECT item_ptr_id FROM armory_weapon)")
weap = cur9.fetchall()
print(weap[0][0])
# Average number of items
cur1 = conn.cursor()
inv = []
for i in range(len(all_rows)):
cur1.execute(f"SELECT COUNT(*) FROM charactercreator_character_inventory \
WHERE character_id = {i}")
invno = cur1.fetchall()
inv.append(invno)
inv_ = [item[0] for item in inv]
inv3 = [item[0] for item in inv_]
print(f'Average number of items: {sum(inv3) / len(inv3)}')
# Average number of weapons
cur2 = conn.cursor()
inven = []
for i in range(len(all_rows)):
cur2.execute(f"SELECT COUNT(*) FROM charactercreator_character_inventory \
WHERE character_id = {i} AND item_id IN \
(SELECT item_ptr_id FROM armory_weapon)")
invno = cur2.fetchall()
inven.append(invno)
inven_ = [item[0] for item in inven]
inven3 = [item[0] for item in inven_]
print(f'Average number of weapons: {sum(inven3) / len(inven3)}')
conn.close()
# No need to commit since we're just reading off data
|
StarcoderdataPython
|
1628969
|
<reponame>TalHadad/yolov5_pytorch
#############################################################################################
# page 4: Feedforward neural network
#############################################################################################
######################################
# 1. Neural Networks
######################################
# 1.1.
# $ Methods: nn.Module, nn.Linear, F.sigmoid
class Net(nn.Module):
def __init__(self, in_size, hidden_size, out_size):
super(Net, self).__init__()
self.linear1 = nn.Linear(in_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, out_size)
def forward(self, x):
hidden_out = F.sigmoid(self.linear1(x))
out = F.sigmoid(self.linear2(hidden_out))
return out
# 1.2. another way to build neural netwoek with sequential module (same as 1.1.)
# $ Methods: nn.Sequential, nn.Linear, nn.Sigmoid
model = torch.nn.Sequential(torch.nn.Linear(in_size, hidden_size),
torch.nn.Sigmoid(),
torch.nn.Linear(hidden_size, out_size),
torch.nn.Sigmoid())
# 1.3. because our output is a logistic unit (why not Maximun Likelihood Estimator?)
# $ Methods: nn.BCELoss
loss_function = nn.BCELoss()
# 1.4. Mean Square Error loss (also for logistic regresion?) (for linear regression is ordinary least squares)
# $ Methods: nn.MSELoss
loss_function = nn.MSELoss()
# Note: 1. y:torch.LongTensor
# 2. y:N (not Nx1 as in logistic regression)
# 1.5. For using neural network for regression, simply remove the sigmoid from the last layer (in 1.1.)
# $ Methods: none
out = (self.linear2(hidden_out))
######################################
# 2. Back Propagation
######################################
# didn't see in my notes
######################################
# 3. Activation Functions
######################################
# 1. Sigmoid: start at 0, end with 1 -> derivative = *y=bell/normal distribution diagram with x=0 (? not 0.5) at the middle* -> vanishing gradient
# 2. Tanh: start at -1, end with 1 -> derivative = *y=bell/normal distribution diagram with x=0 at the middle* -> vanishing gradient
# 3. Relu: start at 0 end with inf -> derivative = *y=flat 0 line up to x=0, and flat 1 line from x=0* -> no vanishing gradient
# 3.1.
# $ Methods: F.tanh
hidden_out = F.tanh(self.linear1(x))
# 3.2.
# $ Methods: F.relu
hidden_out = F.relu(self.linear1(x))
# 3.3.
# $ Methods: nn.Sequential, nn.Linear, nn.Tanh
model = nn.Sequential(nn.Linear(in_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size,out_size))
# 3.4.
# $ Methods: nn.Sequential, nn.Linear, nn.ReLU
model = nn.Sequential(nn.Linear(in_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, out_size))
######################################
# 4. Build Deep Networks in Pytorch
######################################
# 4.1. using nn.Module
# $ Methods: nn.Module, nn.Linear, F.sigmoid
class Net(nn.Module):
def __init__(self, in_size, hidden1_size, hidden2_size, out_size):
super(Net, self).__init__()
self.linear1 = nn.Linear(in_size, hidden1_size)
self.linear2 = nn.Linear(hidden1_size, hidden2_size)
self.linear3 = nn.Linear(hidden2_size, out_size)
def forward(self, x):
hidden1_out = F.sigmoid(self.linear1(x))
hidden2_out = F.sigmoid(self.linear2(hidden1_out))
out = self.linear3(hidden2_out)
return out
# 4.2. using nn.Sequential
# $ Methods: nn.Sequential, nn.Linear, nn.Sigmoid
model = nn.Sequential(nn.Linear(in_size, hidden1_size),
nn.Sigmoid(),
nn.Linear(hidden1_size, hidden2_size),
nn.Sigmoid(),
nn.Linear(hidden_2, out_size))
# 4.3. using nn.ModuleList
# $ Methods: nn.Module, nn.ModuleList, .append, nn.Linear, F.relu
class Net(nn.Module):
def __init__(self, layers):
super(Net, self).__init__()
self.hiddens = nn.ModuleList()
for in_size, out_size in zip(layers, layers[1:]):
self.hiddens.append(nn.Linear(in_size, out_size))
def forward(self, x):
for i, hidden in enumerate(self.hiddens):
if i<len(self.hidden)-1:
x = F.relu(hidden(x))
else
out = hidden(x)
return out
|
StarcoderdataPython
|
1757556
|
<gh_stars>1-10
#! /usr/bin/env python
from __future__ import generators
"""
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function ndiff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function restore(delta, which):
Return one of the two sequences that generated an ndiff delta.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
"""
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
'Differ']
class SequenceMatcher:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print round(s.ratio(), 3)
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print "a[%d] and b[%d] match for %d elements" % block
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 6 elements
a[14] and b[23] match for 15 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print "%6s a[%d:%d] b[%d:%d]" % opcode
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:14] b[17:23]
equal a[14:29] b[23:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a='', b=''):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk elements do not appear
# b2jhas
# b2j.has_key
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use isbjunk.
# isbjunk
# for x in b, isbjunk(x) == isjunk(x) but much faster;
# it's really the has_key method of a hidden dict.
# DOES NOT WORK for x in a!
self.isjunk = isjunk
self.a = self.b = None
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# also creates the fast isbjunk function ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# <NAME>, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
self.b2jhas = b2jhas = b2j.has_key
for i in xrange(len(b)):
elt = b[i]
if b2jhas(elt):
b2j[elt].append(i)
else:
b2j[elt] = [i]
# Now b2j.keys() contains elements uniquely, and especially when
# the sequence is a string, that's usually a good deal smaller
# than len(string). The difference is the number of isjunk calls
# saved.
isjunk, junkdict = self.isjunk, {}
if isjunk:
for elt in b2j.keys():
if isjunk(elt):
junkdict[elt] = 1 # value irrelevant; it's a set
del b2j[elt]
# Now for x in b, isjunk(x) == junkdict.has_key(x), but the
# latter is much faster. Note too that while there may be a
# lot of junk in the sequence, the number of *unique* junk
# elements is probably small. So the memory burden of keeping
# this dict alive is likely trivial compared to the size of b2j.
self.isbjunk = junkdict.has_key
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
(0, 4, 5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
(1, 0, 4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
(0, 0, 0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in xrange(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return besti, bestj, bestsize
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> s.get_matching_blocks()
[(0, 0, 2), (3, 2, 2), (5, 4, 0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
self.matching_blocks = []
la, lb = len(self.a), len(self.b)
self.__helper(0, la, 0, lb, self.matching_blocks)
self.matching_blocks.append( (la, lb, 0) )
return self.matching_blocks
# builds list of matching blocks covering a[alo:ahi] and
# b[blo:bhi], appending them in increasing order to answer
def __helper(self, alo, ahi, blo, bhi, answer):
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k:
if alo < i and blo < j:
self.__helper(alo, i, blo, j, answer)
answer.append(x)
if i+k < ahi and j+k < bhi:
self.__helper(i+k, ahi, j+k, bhi, answer)
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2,0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = reduce(lambda sum, triple: sum + triple[-1],
self.get_matching_blocks(), 0)
return 2.0 * matches / (len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.has_key, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return 2.0 * matches / (len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return 2.0 * min(la, lb) / (la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: " + `n`)
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: " + `cutoff`)
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Sort by score.
result.sort()
# Retain only the best n.
result = result[-n:]
# Move best-scorer to head of list.
result.reverse()
# Strip scores.
return [x for score, x in result]
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
class Differ:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code:
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(1)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(1)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print ''.join(result),
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
Methods:
__init__(linejunk=None, charjunk=None)
Construct a text differencer, with optional filters.
compare(a, b)
Compare two sequences of lines; generate the resulting delta.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#').
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!).
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> print ''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))),
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError, 'unknown tag ' + `tag`
for line in g:
yield line
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in xrange(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
for line in g:
yield line
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> d._fancy_replace(['abcDefghiJkl\n'], 0, 1, ['abcdefGhijkl\n'], 0, 1)
>>> print ''.join(d.results),
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in xrange(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in xrange(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
yield line
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
yield line
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError, 'unknown tag ' + `tag`
for line in self._qformat(aelt, belt, atags, btags):
yield line
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
yield line
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
for line in g:
yield line
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with leading tabs.
Example:
>>> d = Differ()
>>> d._qformat('\tabcDefghiJkl\n', '\t\tabcdefGhijkl\n',
... ' ^ ^ ^ ', '+ ^ ^ ^ ')
>>> for line in d.results: print repr(line)
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \t\tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
# Can hurt, but will probably help most of the time.
common = min(_count_leading(aline, "\t"),
_count_leading(bline, "\t"))
common = min(common, _count_leading(atags[:common], " "))
atags = atags[common:].rstrip()
btags = btags[common:].rstrip()
yield "- " + aline
if atags:
yield "? %s%s\n" % ("\t" * common, atags)
yield "+ " + bline
if btags:
yield "? %s%s\n" % ("\t" * common, btags)
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
import re
def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
1
>>> IS_LINE_JUNK(' # \n')
1
>>> IS_LINE_JUNK('hello\n')
0
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
1
>>> IS_CHARACTER_JUNK('\t')
1
>>> IS_CHARACTER_JUNK('\n')
0
>>> IS_CHARACTER_JUNK('x')
0
"""
return ch in ws
del re
def ndiff(a, b, linejunk=IS_LINE_JUNK, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is module-level function
IS_LINE_JUNK, which filters out lines without visible characters, except
for at most one splat ('#').
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> print ''.join(diff),
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> diff = list(diff)
>>> print ''.join(restore(diff, 1)),
one
two
three
>>> print ''.join(restore(diff, 2)),
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError, ('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
def _test():
import doctest, difflib
return doctest.testmod(difflib)
if __name__ == "__main__":
_test()
|
StarcoderdataPython
|
1757726
|
import os
from flask import Flask
from .extensions import login_manager, mongo
from .config import TestingConfig
def create_app(config=TestingConfig):
""" Flask application factory """
# Setup Flask and load app.config
app = Flask(__name__)
app.config.from_object(config)
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.route("/test")
def test():
return "This is the test Page"
# Register Blueprints
register_blueprints(app)
# init Extensions
login_manager.init_app(app)
login_manager.login_view = "auth.login"
mongo.init_app(app)
@login_manager.user_loader
def load_user(user_id):
from weddingwebsite.models import Guest
from flask_pymongo import ObjectId
guest = mongo.db.guests.find_one({"_id": ObjectId(user_id)})
if guest is None:
return None
return Guest(**guest)
return app
def register_blueprints(app):
# Register Blueprints
from . import views
from weddingwebsite import admin
from weddingwebsite import auth
from weddingwebsite.lfgs import bp as lfgs_bp
app.register_blueprint(views.views)
app.register_blueprint(auth.auth)
app.register_blueprint(admin.admin)
app.register_blueprint(lfgs_bp)
|
StarcoderdataPython
|
3263506
|
import tempfile
import warnings
from pathlib import Path
import pandas as pd
import pytest
from gobbli.dataset.cmu_movie_summary import MovieSummaryDataset
from gobbli.dataset.newsgroups import NewsgroupsDataset
from gobbli.experiment.classification import (
ClassificationExperiment,
ClassificationExperimentResults,
)
from gobbli.inspect.evaluate import DEFAULT_METRICS
from gobbli.model.bert import BERT
from gobbli.model.fasttext import FastText
from gobbli.model.majority import MajorityClassifier
from gobbli.model.mtdnn import MTDNN
from gobbli.model.use import USE
from gobbli.util import dir_to_blob
def test_classification_results_checkpoint(tmpdir):
# Verify checkpoints can be extracted correctly regardless of format
tempdir_path = Path(tmpdir)
checkpoint_path = tempdir_path / "test_checkpoint"
checkpoint_path.mkdir(parents=True)
checkpoint_file = checkpoint_path / "checkpoint.txt"
checkpoint_contents = "test"
checkpoint_file.write_text(checkpoint_contents)
checkpoint_bytes = dir_to_blob(checkpoint_path)
common_args = {
"training_results": [],
"labels": [],
"X": [],
"y_true": [],
"y_pred_proba": pd.DataFrame(),
}
bytes_results = ClassificationExperimentResults(
**common_args,
best_model_checkpoint=checkpoint_bytes,
best_model_checkpoint_name=checkpoint_file.name,
)
path_results = ClassificationExperimentResults(
**common_args,
best_model_checkpoint=checkpoint_path,
best_model_checkpoint_name=checkpoint_file.name,
)
# Bytes checkpoint, no base_path (results object creates tempdir)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"No base_path provided; checkpoint extracting to temporary directory.",
)
bytes_checkpoint = bytes_results.get_checkpoint()
assert bytes_checkpoint.read_text() == checkpoint_contents
# Bytes checkpoint, base path
with tempfile.TemporaryDirectory() as test_dir:
test_dir_path = Path(test_dir) / "test"
bytes_checkpoint = bytes_results.get_checkpoint(base_path=test_dir_path)
assert bytes_checkpoint.parent == test_dir_path
assert bytes_checkpoint.read_text() == checkpoint_contents
# Path checkpoint, no base path
path_checkpoint = path_results.get_checkpoint()
assert path_checkpoint == checkpoint_path / checkpoint_file
assert path_checkpoint.read_text() == checkpoint_contents
# Path checkpoint, base path
with tempfile.TemporaryDirectory() as test_dir:
test_dir_path = Path(test_dir) / "test"
path_checkpoint = path_results.get_checkpoint(base_path=test_dir_path)
assert path_checkpoint.parent == test_dir_path
assert path_checkpoint.read_text() == checkpoint_contents
@pytest.mark.parametrize(
"model_cls,valid", [(FastText, True), (BERT, True), (MTDNN, True), (USE, False)]
)
def test_classification_init_validation(model_cls, valid):
if valid:
ClassificationExperiment._validate_model_cls(model_cls)
else:
with pytest.raises(ValueError):
ClassificationExperiment._validate_model_cls(model_cls)
@pytest.mark.parametrize(
"bad_value",
[
# Not enough values
((0.8, 0.2),),
# Too many values
((0.6, 0.2, 0.1, 0.1),),
# sum > 1
((0.7, 0.2, 0.2),),
# sum < 1
((0.6, 0.2, 0.1),),
],
)
def test_classification_run_validation(bad_value):
with pytest.raises(ValueError):
ClassificationExperiment._validate_split(bad_value)
@pytest.mark.parametrize(
"model_cls,dataset_cls,param_grid,limit,ray_local_mode",
[
# Can't use the TrivialDataset here because it's too small for the standard
# train/valid/test split
# Trivial model, no ray
(MajorityClassifier, NewsgroupsDataset, {}, 1000, True),
# "Real" model/dataset, use ray cluster
(
FastText,
NewsgroupsDataset,
{
"lr": [0.1, 0.01],
"ws": [5],
"dim": [50],
"word_ngrams": [1],
"autotune_duration": [5],
},
200,
False,
),
# "Real" model/multilabel dataset, use ray cluster
(
FastText,
MovieSummaryDataset,
{
"lr": [0.1, 0.01],
"ws": [5],
"dim": [50],
"word_ngrams": [1],
"autotune_duration": [5],
},
200,
False,
),
# "Real" model/dataset with more complicated checkpoint structure, use ray cluster
# Use smaller limit since this model takes a while to train
(BERT, NewsgroupsDataset, {}, 50, False),
],
)
def test_classification_run(
request, model_cls, dataset_cls, param_grid, limit, ray_local_mode, gobbli_dir
):
if model_cls == BERT:
pytest.skip(
"BERT model takes up too much disk space; this test is currently disabled"
)
dataset = dataset_cls.load()
exp = ClassificationExperiment(
model_cls=model_cls,
dataset=dataset,
param_grid=param_grid,
task_num_cpus=1,
task_num_gpus=0,
worker_gobbli_dir=gobbli_dir,
worker_log_level=request.config.getoption("worker_log_level"),
limit=limit,
ignore_ray_initialized_error=True,
ray_kwargs={"local_mode": ray_local_mode, "include_webui": False},
)
results = exp.run()
if not model_cls == MajorityClassifier:
assert results.best_model_checkpoint is not None
assert results.best_model_checkpoint_name is not None
metrics = results.metrics()
assert len(metrics) == len(DEFAULT_METRICS)
for metric, value in metrics.items():
assert isinstance(value, float)
metrics_report = results.metrics_report()
assert len(metrics_report) > 0
k = 5
errors = results.errors(k=k)
for label, (false_positives, false_negatives) in errors.items():
assert len(false_positives) <= k
assert len(false_negatives) <= k
errors_report = results.errors_report(k=k)
assert len(errors_report) > 0
# Verify the plot runs without errors
results.plot()
|
StarcoderdataPython
|
4823025
|
from __future__ import division
import numpy as np
from tf.transformations import quaternion_from_euler, euler_from_quaternion, random_quaternion
from msg_helpers import numpy_quat_pair_to_pose
from geometry_msgs.msg import Quaternion
'''
A file to assist with some math that is commonly used in robotics
Some of the functions here are shared with the UF Machine
Intelligence Lab's SubjuGator robot. All hail <NAME>,
may he reclaim his honor.
'''
def normalize(vector):
return vector / np.linalg.norm(vector)
def compose_transformation(R, t):
'''Compose a transformation from a rotation matrix and a translation matrix'''
transformation = np.eye(4)
transformation[:3, :3] = R
transformation[3, :3] = t
return transformation
def quat_to_euler(q):
''' Approximate a quaternion as a euler rotation vector'''
euler_rot_vec = euler_from_quaternion([q.x, q.y, q.z, q.w])
final = np.array(([euler_rot_vec[0], euler_rot_vec[1], euler_rot_vec[2]]))
return final
def euler_to_quat(rotvec):
''' convert a euler rotation vector into a ROS quaternion '''
quat = quaternion_from_euler(rotvec[0], rotvec[1], rotvec[2])
return Quaternion(quat[0], quat[1], quat[2], quat[3])
def random_pose(_min, _max):
''' Gives a random pose in the xyz range `_min` to `_max` '''
pos = np.random.uniform(low=_min, high=_max, size=3)
quat = random_quaternion()
return numpy_quat_pair_to_pose(pos, quat)
|
StarcoderdataPython
|
24352
|
<filename>tests/test_article.py
import unittest
from app.models import Article
class ArticleTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Article class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_article = Article('NewsDaily', 'NewsDailyTrue','<NAME>', 'Hummus...thoughts?','Literally talking about hummus sir','www.newsdaily.net','www.newsdaily.net/picOfHummus6', '2020/2/3', 'lorem gang et all')
def test_instance(self):
self.assertTrue(isinstance(self.new_article,Article))
|
StarcoderdataPython
|
63334
|
#!/usr/bin/env python3
import sys
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class ImageGrabber:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("main_camera/image_raw", Image, self.callback)
def callback(self, data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
cv2.imshow("Image window", cv_image)
cv2.waitKey(3)
if __name__ == '__main__':
ig = ImageGrabber()
rospy.init_node('image_grabber', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
|
StarcoderdataPython
|
1784281
|
from openpyxl import Workbook, load_workbook
from halo import Halo
import time
from datetime import date, timedelta, datetime
from copy import copy
class XCel:
def __init__(self, config):
p1 = time.time()
self.config = config
spin = Halo(text="Opening template file: {0}".format(self.config['SALE_REPORT_TEMPLATE']), spinner='dots')
spin.start()
self.wb = load_workbook(self.config['SALE_REPORT_TEMPLATE'], keep_vba=True)
self.ws = self.wb[self.config['SHEET_DATA_RAW']]
self.ws2 = self.wb[self.config['SHEET_MERCHANT']]
p2 = time.time()
spin.stop()
print("[{0}] Excel file opened!".format(p2-p1))
def __del__(self):
self.wb.close()
def save(self, filename="./.data/Summary-sale-report-{0}.xls"):
p1 = time.time()
spin = Halo(text="Begin saving file to: {0}".format(filename), spinner = 'dots')
spin.start()
# For more date format:https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
yesterday = date.today() - timedelta(1)
self.wb.save(filename=filename.format(yesterday.strftime("%Y%m%d")))
p2 = time.time()
spin.stop()
print("[{0}] Excel file saved!".format(p2-p1))
def detectBlank(self, col=1, ws=1):
i = 1
if ws == 1:
wsx = self.ws
elif ws == 2:
wsx = self.ws2
while 1:
if wsx.cell(column=col, row=i).value is None:
return i
break
else:
i = i + 1
# Begin appending datas into first blank cell of the col
# By default, append to ws1 = Data raw
def appendNext(self, datas, col, ws=1, firstColDatetime=True):
p1 = time.time()
print("Begin appendNext with firstCol={0}".format(col))
# First, detect for first bank row
row = self.detectBlank(ws=ws, col=col)
oriRow = row
oriCol = col
# check ws
if ws == 1:
wsx = self.ws
elif ws == 2:
wsx = self.ws2
# get one sample format of 1st col
cellx = wsx.cell(column=col, row=row-1)
# Now writting, datas is a list, data is tuple
for data in datas:
print("Writing: {0} at row/col: {1}/{2}".format(data, row, col))
# data is a tuple, dat is an element
for dat in data:
if firstColDatetime == True and col == oriCol:
datx = datetime.strptime(dat,"%m/%d/%Y")
else:
datx = dat
wsx.cell(column=col, row=row, value=datx)
col = col + 1
row = row + 1
col = oriCol
# Now copy format for first col
print("Begin formatting rows ...")
for rows in wsx.iter_rows(min_row=oriRow, max_row=row, min_col=oriCol, max_col=oriCol):
for cell in rows:
if cellx.has_style:
cell.number_format = copy(cellx.number_format)
cell.alignment = copy(cellx.alignment)
p2 = time.time()
print("[{0}] appendNext complete successfully".format(p2-p1))
return True
def appendMegabank(self, datas, firstColDatetime=True):
print("Begin append data for Megabank")
return self.appendNext(datas, col=1)
def appendTopup(self, datas, firstColDatetime=True):
print("Begin append data for Topup")
return self.appendNext(datas, col=16)
def appendSQLTopup(self, datas, firstColDatetime=True):
print("Begin append data for SQL Topup")
return self.appendNext(datas, col=16)
def appendSAT(self, datas, firstColDatetime=True):
print("Begin append data for SAT")
return self.appendNext(datas, col=24)
def appendMC(self, datas, firstColDatetime=True):
print("Begin append data for MC")
return self.appendNext(datas, col=51)
def appendVA(self, datas, firstColDatetime=True):
print("Begin append data for VA")
return self.appendNext(datas, col=59)
def appendMD(self, datas, firstColDatetime=True):
print("Begin append data for MD")
return self.appendNext(datas, col=67)
def appendFD(self, datas, firstColDatetime=True):
print("Begin append data for FD")
return self.appendNext(datas, col=75)
def appendVERIFY(self, datas, firstColDatetime=True):
print("Begin append data for VERIFY")
return self.appendNext(datas, col=82)
def appendTHS(self, datas, firstColDatetime=True):
print("Begin append data for THS")
return self.appendNext(datas, col=88)
def appendNewMerchant(self, datas):
print("Begin append data for New Merchant")
return self.appendNext(datas, col=1, ws=2)
# Write something
# ws.cell(column=2, row=3, value="ahihi")
if __name__ == "__main__":
pass
|
StarcoderdataPython
|
140553
|
from django import forms
from apps.Testings.models import Phase
from .models import Argument, Source, Command
from django.utils.safestring import mark_safe
class ArgumentForm(forms.ModelForm):
class Meta:
model = Argument
fields = '__all__'
widgets = {
'command' : forms.HiddenInput(),
'name': forms.TextInput(attrs={'data-length': 30, 'id': 'args_name'}),
'description': forms.Textarea(attrs={'class': 'materialize-textarea',
'data-length': 70, 'id': 'args_description'}),
}
def __init__(self, *args, **kwargs):
cmd = None
try:
cmd = kwargs.pop('cmd')
except KeyError:
pass
super(ArgumentForm, self).__init__(*args, **kwargs)
if cmd:
self.initial["command"] = cmd.id
self.fields['include'].queryset = Argument.objects.filter(command=cmd)
self.fields['exclude'].queryset = Argument.objects.filter(command=cmd)
try:
if self.instance:
self.fields['include'].queryset = Argument.objects.filter(command=self.instance.command).exclude(id = self.instance.id)
self.fields['exclude'].queryset = Argument.objects.filter(command=self.instance.command).exclude(id = self.instance.id)
else:
self.initial["command"] = cmd.id
except:
pass
class PhaseForm(forms.ModelForm):
class Meta:
model = Phase
fields = {
'name',
'product'
}
def __init__(self, *args, **kwargs):
"""This filter only for sources in the category 4(Robot)"""
super(PhaseForm, self).__init__(*args, **kwargs)
self.fields['product'].queryset = Source.objects.filter(category=3)
class SourceProductForm(forms.ModelForm):
path = forms.CharField(widget=forms.TextInput(), required=False)
regex = forms.CharField(widget=forms.Textarea(attrs={'rows': 6, 'class': 'materialize-textarea'}), required=False)
host = forms.CharField(required=False)
port = forms.IntegerField(required=False)
username = forms.CharField(required=False)
password = forms.CharField(widget=forms.PasswordInput(), required=False)
class Meta:
model = Source
fields = [
'name',
'version',
'depends',
'host',
'port',
'username',
'password',
'path',
'regex',
]
labels = {"name": "Product Name", "depends": "Dependence Requirement (Optional)"}
def __init__(self, *args, **kwargs):
"""This filter only for sources in the category 4(Robot)"""
super(SourceProductForm, self).__init__(*args, **kwargs)
self.fields['depends'].queryset = Source.objects.filter(category=4)
self.fields[
'regex'].initial = '( {2}-\w+, --\w+[ \\n=]| {2}-\w+[ \\n=]| {2}--\w+[ \\n=]| {2}--\w+-\w+[ \\n=]| {2}-\w+, --\w+-\w+[ \\n=])(?=[ <]*)'
class SourceEditProductForm(forms.ModelForm):
class Meta:
model = Source
fields = [
'name',
'version',
'depends',
]
labels = {"name": "Product Name", "depends": "Dependence Requirement (Optional)"}
def __init__(self, *args, **kwargs):
"""This filter only for sources in the category 4(Robot)"""
super(SourceEditProductForm, self).__init__(*args, **kwargs)
self.fields['depends'].queryset = Source.objects.filter(category=4)
class SourceRobotForm(forms.ModelForm):
zip_file = forms.FileField()
class Meta:
model = Source
fields = [
'version',
'zip_file'
]
labels = {"version": "Robot Framework Version"}
class SourceLibraryForm(forms.ModelForm):
url = forms.CharField(label='Documentation URL')
class Meta:
model = Source
fields = [
'name',
'version',
'url',
'depends',
]
labels = {"name": "Library Name", "depends": "Robot Version Requirement"}
def __init__(self, *args, **kwargs):
"""This filter only for sources in the category 4(Robot)"""
super(SourceLibraryForm, self).__init__(*args, **kwargs)
self.fields['depends'].queryset = Source.objects.filter(category=4)
class SourceEditLibraryForm(forms.ModelForm):
class Meta:
model = Source
fields = [
'name',
'version',
'depends',
]
labels = {"name": "Library Name", "depends": "Robot Version Requirement"}
def __init__(self, *args, **kwargs):
"""This filter only for sources in the category 4(Robot)"""
super(SourceEditLibraryForm, self).__init__(*args, **kwargs)
self.fields['depends'].queryset = Source.objects.filter(category=4)
class CommandForm(forms.ModelForm):
class Meta:
model = Command
fields = [
'name',
'source',
'description',
]
labels = {
"name" : mark_safe('<b>Command <i style="float: right" class="tiny material-icons tooltipped" data-position="bottom" data-tooltip="Provide the command that will be used in the product">help_outline</i></b>'),
"source" : mark_safe('<b>Source <i style="float: right" class="tiny material-icons tooltipped" data-position="bottom" data-tooltip="Select the product associated with the new command">help_outline</i></b>'),
"description" : mark_safe('<b>Description <i style="float: right" class="tiny material-icons tooltipped" data-position="bottom" data-tooltip="Provide a brief description about the command">help_outline</i></b>'),
}
widgets = {
'name': forms.TextInput(attrs={'data-length': 30}),
'description': forms.Textarea(attrs={'class': 'materialize-textarea', 'data-length': 70})
}
def __init__(self, *args, **kwargs):
""" This filter exclude the control flow sentences """
super(CommandForm, self).__init__(*args, **kwargs)
self.fields['source'].queryset = Source.objects.exclude(category=1)
""" This Make required the source field """
self.fields['source'].required = True
|
StarcoderdataPython
|
59572
|
# -*- coding: utf-8 -*-
""" make Classes and functions available via a simple import
Copyright 2017, <<EMAIL>>
See COPYRIGHT for details
"""
# first party
from simplecasper.api import (
CasperAPI,
get_casper_credentials
)
from simplecasper.util import (
SimpleHTTPJSON,
SWVersions,
to_file
)
__all__ = [
'SimpleHTTPJSON', 'to_file', 'SWVersions', 'CasperAPI',
'get_casper_credentials'
]
|
StarcoderdataPython
|
1717490
|
# 🚨 Don't change the code below 👇
year = int(input("Which year do you want to check? "))
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
#Test if the year is evenly divisible by 100
if year % 100 == 0:
#Tests if the year is evenly divisible by 400
if year % 400 == 0:
#The year is both evenly divisible by 100 and 400 -> leap year
print("Leap year.")
else:
#The year is evenly divisible by 100 but not by 400 -> not leap year
print("Not leap year.")
#Tests if the year is evenly divisible by 4
elif year % 4 == 0:
#The year isn't evenly divisible by 100 but is evenly divisible by 4 -> leap year
print("Leap year.")
else:
#The year isn't evenly divisible neither by 100 nor 4 -> not leap year
print("Not leap year.")
|
StarcoderdataPython
|
18061
|
# -*- coding: utf-8 -*-
import botocore.exceptions
import logging
import dockerfilegenerator.lib.constants as constants
import dockerfilegenerator.lib.exceptions as exceptions
import dockerfilegenerator.lib.versions as versions
import dockerfilegenerator.lib.jsonstore as jsonstore
import dockerfilegenerator.lib.s3store as s3store
import dockerfilegenerator.lib.github as github
logger = logging.getLogger()
TRACKED_TOOLS = {
"terraform": versions.get_latest_hashicorp_terraform_version,
"packer": versions.get_latest_hashicorp_packer_version,
"go": versions.get_latest_golango_go_version
}
class UtilsMixin:
@property
def tools_current_versions(self):
if not hasattr(self, "_tools_current_versions"):
self._tools_current_versions = None
if self._tools_current_versions is None:
self._tools_current_versions = dict(
(tool_name, self.dockerfile.version(tool_name))
for tool_name in self.dockerfile.json)
return self._tools_current_versions
@property
def tools_next_versions(self):
if not hasattr(self, "_tools_next_versions"):
self._tools_next_versions = None
if self._tools_next_versions is None:
self._tools_next_versions = dict(
(tool_name, TRACKED_TOOLS[tool_name]())
for tool_name in TRACKED_TOOLS)
return self._tools_next_versions
def update_dockerfile_versions(self):
dockerfile_changed = False
for tool in self.tools_current_versions:
# TODO: Refactor this method...
if self.dockerfile.force_version(tool):
logger.info("Update versions: %s has force_version" % tool)
continue
if tool == self.dockerfile.dockerfile_repo_name:
continue
current_version = self.tools_current_versions[tool]
next_version = self.tools_next_versions.get(tool, None)
if next_version is None:
logger.info("Update versions: %s has no next version" % tool)
continue
if current_version == next_version:
logger.info(
"Update versions: %s has no changed version" % tool)
continue
self.dockerfile.set_version(tool, next_version)
logger.info("Update versions: %s has next version %s" %
(tool, next_version))
dockerfile_changed = True
if dockerfile_changed:
self.dockerfile.set_next_version_dockerfile()
return dockerfile_changed
class DockerfileGeneratorLambda(UtilsMixin):
def __init__(self):
self.s3bucket = s3store.get_s3_bucket_manager()
self.dockerfile_repo = github.get_github_repository(
constants.DOCKERFILE_GITHUB_REPO)
self.dockerfile = jsonstore.get_dockerfile(self.dockerfile_repo)
self._internal_state = None
self.exit_code = 0
@property
def internal_state(self):
""" Get the state from AWS S3 json file, or use the one from Github,
if there is none."""
if self._internal_state is None:
internal_state = self.s3bucket.read_object(
constants.INTERNAL_STATE_FILE)
if internal_state is None:
logger.info("Internal state: No state from S3")
internal_state = self.dockerfile.dump
self.save_state_to_s3(internal_state)
self._internal_state = jsonstore.Store(internal_state)
return self._internal_state
def update_files_on_github(self):
template_dockerfile = self.dockerfile_repo.get_file_contents(
constants.TEMPLATE_GITHUB_DOCKERFILE_PATH)
template_readme = self.dockerfile_repo.get_file_contents(
constants.TEMPLATE_GITHUB_README_PATH)
commit_msg = self.dockerfile.update_summary(self.internal_state)
commit_files = [
(constants.INTERNAL_STATE_FILE, self.dockerfile.dump),
("Dockerfile", template_dockerfile.format(
**self.dockerfile.template_variables)),
("README.md", template_readme.format(
**self.dockerfile.template_variables))]
logger.info("Updating files on Github with message:\n\t%s" %
commit_msg)
self.dockerfile_repo.commit(commit_files, commit_msg)
def save_state_to_s3(self, content):
try:
logger.info("Saving state to S3")
self.s3bucket.write_object(constants.INTERNAL_STATE_FILE, content)
except (botocore.exceptions.ClientError, Exception) as e:
raise exceptions.LambdaException(
"Error: Uploading object to s3 bucket: %s" % (str(e)))
def main(self):
if self.update_dockerfile_versions():
self.update_files_on_github()
self.save_state_to_s3(self.dockerfile.dump)
return self.exit_code # Making Lambda Service happy
def lambda_handler():
return DockerfileGeneratorLambda().main()
|
StarcoderdataPython
|
179698
|
# 020 - O mesmo professor do desafio anterior quer sortear a ordem de apresentação de trabalhos dos alunos. Faça um programa que leia
#o nome dos quatro alunos e mostre a ordem sorteada:
import random
from random import shuffle
lista = []
for c in range(1,5):
nome = str(input(f'Digite o nome do aluno {c}: '))
lista.append(nome)
print(lista)
random.shuffle(lista)
print(lista)
|
StarcoderdataPython
|
1616572
|
# Linear Search Approach
def first_and_last(arr, target):
for i in range(len(arr)):
if arr[i] == target:
start = i
while i+ 1 < len(arr) and arr[i+1]== target:
i += 1
return [start, i]
return [-1, -1]
arr = [1,2,3,4,5,5,5,6,7]
target = 5
# T(n) = O(n)
|
StarcoderdataPython
|
3202648
|
import numpy as np
from fitter import *
from scipy.constants import hbar
cons_w = 2*3.14*6.002e9
cons_ke = 2*3.14*0.017e6
cons_k = 2*3.14*1.4e6
cons_delta = 0
def Plin(p):
return 10.**(p/10.-3.)
def photons(power):
return Plin(power)/(hbar*cons_w)*(cons_ke/((cons_k/2)**2+cons_delta**2))
path = r'D:\data\20200223\074606_Power_Sweep_229mV'
data_name = path+path[16:]+r'.dat'
data = np.loadtxt(data_name, unpack=True)
n = 27
power= np.array(np.array_split(data[0],n))
freq = np.array_split(data[1],n)[0]
real = np.array_split(data[2],n)
imag = np.array_split(data[3],n)
absol = np.array_split(data[4],n)
f = Fitter(S21r)
# fr = Fitter(custom_real)
# fm = Fitter(custom_imag)
# plt.plot(np.real(d.data),np.imag(d.data))
# plt.show()
k = np.zeros(n)
f0 = np.zeros(n)
Q = np.zeros(n)
k_err = np.zeros(n)
f0_err = np.zeros(n)
Q_err = np.zeros(n)
left1 = 151
right1 = 246
for i in range(11):
result = f.fit(freq[left1:right1], absol[i][left1:right1], print_report = True)
# f.plot()
k[i] = np.abs(result.params['k'].value)
f0[i] = result.params['f0'].value
Q[i] = f0[i]/k[i]
k_err[i] = result.params['k'].stderr
f0_err[i] = result.params['f0'].stderr
Q_err[i] = (f0_err[i]/f0[i] + k_err[i]/k[i])*(f0[i]/k[i])
left = 81
right = 141
for i in range(20,27):
result = f.fit(freq[left:right], absol[i][left:right], print_report = True)
# f.plot()
k[i] = np.abs(result.params['k'].value)
f0[i] = result.params['f0'].value
Q[i] = f0[i]/k[i]
k_err[i] = result.params['k'].stderr
f0_err[i] = result.params['f0'].stderr
Q_err[i] = (f0_err[i]/f0[i] + k_err[i]/k[i])*(f0[i]/k[i])
# power = np.delete(power.T[0],[35])
# Q = np.delete(Q,[35])
# Q_err = np.delete(Q_err,[35])
# k = np.delete(k,[35])
# k_err = np.delete(k_err,[35])
# print(power)
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True)
ax.errorbar(photons(power.T[0]-80), Q/1e3, fmt='.', yerr = Q_err/1e3 ,capsize=2, elinewidth=1, markeredgewidth=2)
# ax.plot(power.T[0],(f0/ki)/1e3)
ax.set_xlabel(r'Photon number')
ax.set_ylabel(r'Q (kU)')
ax.set_xscale('log')
ax.set_title(path[8:])
plt.show()
# fig1, ax1 = plt.subplots(nrows=1, ncols=1, sharex=True)
# ax1.errorbar(photons(power.T[0]-80), k/1e6, fmt='.', yerr = k_err/1e6 ,capsize=2, elinewidth=1, markeredgewidth=2)
# # ax.plot(power.T[0],(f0/ki)/1e3)
# ax1.set_xlabel(r'Photon number')
# ax1.set_ylabel(r'Linewidth (MHz)')
# ax1.set_xscale('log')
# ax1.set_title(path[8:])
# plt.show()
# np.savetxt(r'D:\data\20200217\Analysis_quality_factor\500M_below_cavity_229.txt', (photons(power.T[0]-80), k/1e6, k_err/1e6, Q/1e3, Q_err/1e3))
# fr.fit(freq, real[-1], print_report = True)
# fr.plot()
# fm.fit(freq, imag[-1], print_report = True)
# fm.plot()
|
StarcoderdataPython
|
3219300
|
<gh_stars>0
# © 2021 <NAME> (initOS GmbH)
# License Apache-2.0 (http://www.apache.org/licenses/).
import configparser
import os
import re
import sys
from contextlib import closing, contextmanager
import yaml
from . import base, utils
SubstituteRegex = re.compile(r"\$\{(?P<var>(\w|:)+)\}")
def load_config_arguments(args):
parser = utils.default_parser("config")
parser.add_argument("option", nargs="?", help="Show only specific information")
return parser.parse_known_args(args)
# pylint: disable=too-many-public-methods
class Environment:
""" Bootstrap environment """
def __init__(self, cfg):
utils.info("Loading configuration file")
self._config = {}
self._load_config(cfg)
self._load_config("odoo.versions.yaml", False)
self._post_process_config()
def _substitute(self, match, sub=True):
""" Replaces the matched parts with the variable """
var = match.groupdict().get("var", "").split(":")
if not all(var):
raise SyntaxError()
result = self.get(*var)
return str(result) if sub else result
def _substitute_string(self, line):
""" Substitute variables in strings """
match = SubstituteRegex.fullmatch(line)
if match:
return self._substitute(match, False)
return SubstituteRegex.sub(self._substitute, line)
def _substitute_dict(self, data):
""" Substitute variables in dictionaries """
tmp = {}
for sec, section in data.items():
if isinstance(section, str):
tmp[sec] = self._substitute_string(section)
elif isinstance(section, list):
tmp[sec] = self._substitute_list(section)
elif isinstance(section, dict):
tmp[sec] = self._substitute_dict(section)
else:
tmp[sec] = section
return tmp
def _substitute_list(self, ls):
""" Substitute variables in lists """
tmp = []
for x in ls:
if isinstance(x, dict):
tmp.append(self._substitute_dict(x))
elif isinstance(x, str):
tmp.append(self._substitute_string(x))
elif isinstance(x, list):
tmp.append(self._substitute_list(x))
else:
tmp.append(x)
return tmp
def _post_process_config(self):
""" Post process the configuration by replacing variables """
# Include environment variables first for later substitutions
for env, keys in base.ENVIRONMENT.items():
if os.environ.get(env):
self.set(*keys, value=os.environ[env])
options = self.get("odoo", "options", default={})
for key, value in options.items():
options[key] = os.environ.get(f"ODOO_{key.upper()}") or value
# Run the substitution on the configuration
self._config = self._substitute_dict(self._config)
# Combine the addon paths
current = set(self.get("odoo", "addons_path", default=[]))
current.update(
{
section.get("addon_path", sec)
for sec, section in self.get("repos", default={}).items()
}
)
# Generate the addon paths
current = set(map(os.path.abspath, current))
self.set("odoo", "options", "addons_path", value=current)
def get(self, *key, default=None):
""" Get a specific value of the configuration """
data = self._config
try:
for k in key:
data = data[k]
if data is None:
return default
return data
except KeyError:
return default
def opt(self, *key, default=None):
""" Short cut to directly access odoo options """
return self.get("odoo", "options", *key, default=default)
def set(self, *key, value=None):
""" Set a specific value of the configuration """
data = self._config
for k in key[:-1]:
data = data[k]
data[key[-1]] = value
def _load_config(self, cfg, raise_if_missing=True):
""" Load and process a configuration file """
if not os.path.isfile(cfg) and not raise_if_missing:
utils.warn(f" * {cfg}")
return
utils.info(f" * {cfg}")
with open(cfg) as fp:
options = yaml.load(fp, Loader=yaml.FullLoader)
# Load all base configuration files first
extend = options.get(base.SECTION, {}).get("extend")
if isinstance(extend, str):
self._load_config(extend)
elif isinstance(extend, list):
for e in extend:
self._load_config(e)
elif extend is not None:
raise TypeError(f"{base.SECTION}:extend must be str or list")
# Merge the configurations
self._config = utils.merge(self._config, options, replace=["merges"])
def _init_odoo(self):
""" Initialize Odoo to enable the module import """
path = self.get(base.SECTION, "odoo")
if not path:
utils.error(f"No {base.SECTION}:odoo defined")
return False
path = os.path.abspath(path)
if not os.path.isdir(path):
utils.error("Missing odoo folder")
return False
if path not in sys.path:
sys.path.append(path)
return path
@contextmanager
def env(self, db_name, rollback=False):
""" Create an environment from a registry """
# pylint: disable=C0415,E0401
import odoo
# Get all installed modules
reg = odoo.registry(db_name)
with closing(reg.cursor()) as cr:
yield odoo.api.Environment(cr, odoo.SUPERUSER_ID, {})
if rollback:
cr.rollback()
else:
cr.commit()
@contextmanager
def _manage(self):
"""Wrap the manage to resolve version differrences"""
import odoo
import odoo.release
if odoo.release.version_info >= (15,):
yield
else:
with odoo.api.Environment.manage():
yield
def generate_config(self):
""" Generate the Odoo configuration file """
utils.info("Generating configuration file")
cp = configparser.ConfigParser()
# Generate the configuration with the sections
options = self.get("odoo", "options", default={})
for key, value in sorted(options.items()):
if key == "load_language":
continue
if "." in key:
sec, key = key.split(".", 1)
else:
sec = "options"
if not cp.has_section(sec):
cp.add_section(sec)
if isinstance(value, (set, list)):
cp.set(sec, key, ",".join(map(str, value)))
elif value is None:
cp.set(sec, key, "")
else:
cp.set(sec, key, str(value))
os.makedirs(os.path.dirname(base.ODOO_CONFIG), exist_ok=True)
# Write the configuration
with open(base.ODOO_CONFIG, "w+") as fp:
cp.write(fp)
def config(self, args=None):
""" Simply output the rendered configuration file """
args, _ = load_config_arguments(args or [])
if args.option:
return yaml.dump(self.get(*args.option.split(":")))
return yaml.dump(self._config)
|
StarcoderdataPython
|
3259635
|
<reponame>sunlightlabs/regulations-scraper
def all_aliases():
import itertools
from regs_common.util import get_db
db = get_db()
return itertools.chain.from_iterable(
itertools.imap(
lambda entity: [(alias, entity['_id']) for alias in entity.get('filtered_aliases', [])],
db.entities.find()
)
)
def load_trie_from_mongo():
from oxtail import matching
matching._entity_trie = matching.build_token_trie(
all_aliases(),
matching._blacklist
)
|
StarcoderdataPython
|
4813178
|
<reponame>bogdandm/attrs-api-client<filename>json_to_models/utils.py
import json
from functools import wraps
from typing import Callable, Optional, Set
class Index:
def __init__(self):
self.ch = 'A'
self.i = 1
def __call__(self, *args, **kwargs):
value = f'{self.i}{self.ch}'
ch = chr(ord(self.ch) + 1)
if ch <= 'Z':
self.ch = ch
else:
self.ch = 'A'
self.i += 1
return value
def json_format(x) -> str:
return json.dumps(x, indent=4, default=str, ensure_ascii=False)
def distinct_words(*words: str) -> Set[str]:
"""
Filters strings so only unique strings without extended ones will be exists in resulted set, e.g.
>>> distinct_words('test', 'another_test', 'foo', 'qwerty_foo_bar')
{'test', 'foo'}
:param words:
:return:
"""
words = set(words)
filtered_words = set()
for name in words:
flag = True
for other in list(filtered_words):
if name in other:
filtered_words.add(name)
filtered_words.remove(other)
flag = False
elif other in name:
flag = False
if flag:
filtered_words.add(name)
return filtered_words
def convert_args(fn: Callable, *args_converters: Optional[type], **kwargs_converters: Optional[type]) -> Callable:
"""
Decorator. Apply ``args_converters`` to callable arguments and kwargs_converters to kwargs.
If converter is None then argument will passed as is.
:param fn: Function or class
:param args_converters: Arguments converters
:param kwargs_converters: Keyword arguments converters
:return: Callable wrapper
"""
@wraps(fn)
def wrapper(*args, **kwargs):
converted = (
t(value) if t else value
for value, t in zip(args, args_converters)
)
kwargs_converted = {
name: kwargs_converters[name](kwargs[name]) if kwargs_converters.get(name, None) else kwargs[name]
for name in kwargs.keys()
}
if len(args_converters) < len(args):
remain = args[len(args_converters):]
else:
remain = ()
return fn(*converted, *remain, **kwargs_converted)
return wrapper
def convert_args_decorator(*args_converters: type, method=False, **kwargs_converters):
"""
Decorator factory.
:param args_converters: Arguments converters
:param method: Set to True if decorated function is method or classmethod
:param kwargs_converters: Keyword arguments converters
:return:
"""
def decorator(fn):
if method:
return convert_args(fn, None, *args_converters, **kwargs_converters)
else:
return convert_args(fn, *args_converters, **kwargs_converters)
return decorator
def cached_method(func: Callable):
"""
Decorator to cache method return values
"""
@wraps(func)
def cached_fn(self, *args):
if getattr(self, '__cache__', None) is None:
setattr(self, '__cache__', {})
value = self.__cache__.get(args, ...)
if value is Ellipsis:
value = func(self, *args)
self.__cache__[args] = value
return value
return cached_fn
def cached_classmethod(func: Callable):
"""
Decorator to cache classmethod return values
"""
cache = {}
@wraps(func)
def cached_fn(cls, *args):
value = cache.get(args, ...)
if value is Ellipsis:
value = func(cls, *args)
cache[args] = value
return value
return classmethod(cached_fn)
|
StarcoderdataPython
|
3382476
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import os
class Loss_Synonymy(nn.Module):
"""
This class contains a loss function that uses the sum of ReLu loss to make predictions for the encoded embeddings
in the synonym subspace. A lower and higher bound for synonymy are to be determined. Need to better understand the
equation found in the Asif Ali et al. paper.
"""
def __init__ (self):
super(Loss_Synonymy, self).__init__()
def forward(self, S1_out, S2_out, labels):
result_list = torch.zeros(S1_out.size(0))
#x=synonymy_score, a=S1_out, b=S2_out
for i, (x, a, b) in enumerate(zip(labels, S1_out, S2_out)):
error = torch.zeros(1,1)
#synonymous pairs
if x == 1:
error = F.relu(torch.add(torch.tensor(1), torch.neg(torch.tanh(torch.dist(a, b, 2)))))
#antonymous or irrelevant pairs
else:
error = F.relu(torch.add(torch.tensor(1), torch.tanh(torch.dist(a, b, 2))))
result_list[i] = error
result = result_list.sum()
return result
class Loss_Antonymy(nn.Module):
"""
This class contains a loss function that uses the sum of ReLu loss to make predictions for the encoded embeddings
in the antonym subspace. A lower and higher bound for antonymy are to be determined. Need to better understand the
equation found in the Asif Ali et al. paper.
"""
def __init__(self):
super(Loss_Antonymy, self).__init__()
def forward(self, S2_out, A1_out, labels):
result_list = torch.zeros(S2_out.size(0))
#x=antonymy_score, a=A1_out, b=S2_out (to ensure trans-transitivity)
for i, (x, a, b) in enumerate(zip(labels, A1_out, S2_out)):
#error1 = antonymous pairs, error2 = non-antonymous pairs
error = torch.zeros((1, 1))
#antonymous pair
if x == 2:
error = F.relu(torch.add(torch.tensor(1), torch.neg(torch.tanh(torch.dist(a, b, 2)))))
#synonymous or irrelevant pair
else:
error = F.relu(torch.add(torch.tensor(1), torch.tanh(torch.dist(a, b, 2))))
result_list[i] = error
loss = result_list.sum()
return loss
class Loss_Labels(nn.Module):
"""
This class is the last portion (L_m) of the general loss function. Here the
predicted synonymy and antonymy scoresare concatenated and compared to the
concatenated labeled synonymy and antonymy scores
"""
def __init__(self):
super(Loss_Labels, self).__init__()
def forward(self, synonymy_score, antonymy_score, labels):
batch_size = labels.size(0)
result_list = torch.zeros((batch_size, 2))
for i, (x, a, b) in enumerate(zip(labels, synonymy_score, antonymy_score)):
total_vec = torch.cat((a, b), dim = 0)
probs = F.log_softmax(total_vec, dim = 0) #class probability
pred = torch.argmax(probs, dim = 0) #predicted class
if x == 1:
error = probs[0]
if x == 2:
error = probs[1]
if x == 0: #phase 1 is not meant to distinguish irrelevant pairs
error = 0
result_list[i] = error
loss = torch.neg(result_list.mean())
return loss
class Phase1Accuracy(nn.Module):
"""
This class takes in a batch of synonymy scores, antonymy scores, predictons, and labels
to identify the accuracy for the batch in Phase 1 (Distiller)
"""
def __init__(self):
super(Phase1Accuracy, self).__init__()
def forward(self, synonymy_scores, antonymy_scores, labels):
correct_syn = 0
wrong_syn = 0
correct_ant = 0
wrong_ant = 0
correct_irrel = 0
wrong_irrel = 0
syn_size = 0
ant_size = 0
irrel_size = 0
for label, syn_sc, ant_sc in zip(labels, synonymy_scores, antonymy_scores):
total_vec = torch.cat((syn_sc, ant_sc), dim = 0)
probs = F.log_softmax(total_vec, dim = 0) #class probability
pred = torch.argmax(probs, dim = 0) #predicted class
if syn_sc <= 0.4 and ant_sc <= 0.4:
pred = 2
#word pair is synonymous
if label == 1:
syn_size += 1
if pred == 0:
correct_syn += 1
else:
wrong_syn += 1
#word pair is antonymous
if label == 2:
ant_size +=1
if pred == 1:
correct_ant +=1
else:
wrong_ant += 1
#word pair has no relationship
if label == 0:
irrel_size +=1
if pred == 2:
correct_irrel += 1
else:
wrong_irrel += 1
#need to account for division by zero in training batches
if syn_size == 0:
syn_acc = 0
else:
syn_acc = (correct_syn/syn_size)*100
if ant_size == 0:
ant_acc = 0
else:
ant_acc = (correct_ant/ant_size)*100
if irrel_size == 0:
irrel_acc = 0
else:
irrel_acc = (correct_irrel/irrel_size)*100
return [syn_acc, ant_acc, irrel_acc]
def confusion(self, synonymy_scores, antonymy_scores, labels):
"""
helper function to get lists of ground-truths and predictions for the
creation of a confusion matrix
"""
preds = np.ndarray((labels.size()[0], 2))
truths = np.ndarray((labels.size()[0], 2))
for i, (label, syn_sc, ant_sc) in enumerate(zip(labels, synonymy_scores, antonymy_scores)):
preds[i, 0] = syn_sc.item()
preds[i, 1] = ant_sc.item()
if label == 1: #synonymous pair
truths[i, 0] = 1
truths[i, 1] = 0
elif label == 2: #antonymous pair
truths[i, 0] = 0
truths[i, 1] = 1
else: #irrelevant pair
truths[i, 0] = 0
truths[i, 1] = 0
return preds, truths
#feeding the model pretrained weights
class w2v_embedding_pre_trained_weights(nn.Module):
"""
This class contains the pre-training of the Phase_I_NN neural network weights using
a list of words from which a list of weights can be obtained. It is then converted
that can then be embedded using the from_pretrained() function into the NN model
"""
def __init__(self, words, model):
super(w2v_embedding_pre_trained_weights, self).__init__()
for i in range(len(words)):
words[i] = model.wv.__getitem__(words[i]).tolist()
weight = torch.tensor(words)
self.embedding = nn.Embedding.from_pretrained(weight)
def forward(self, index):
# index_vector = self.embedding(torch.LongTensor(index))
#Internal function to F.log_softmax not implemented for "Long"
index_vector = self.embedding(index)
return index_vector
class glove_embedding_pre_trained_weights(nn.Module):
"""
This class contains the pre-training of the Phase_I_NN neural network weights using a list of words from which a list of weights can be obtained from a downloaded GloVe embedding dictionary
"""
def __init__(self, words):
super(glove_embedding_pre_trained_weights, self).__init__()
data = '/Users/wesleytatum/Desktop/post_doc/data/glove.6B'
os.chdir(data)
embeddings_dict = {}
with open("glove.6B.50d.txt", 'r') as f:
for line in f:
values = line.split()
word = values[0]
vector = np.asarray(values[1:], "float32")
embeddings_dict[word] = vector
for i in range(len(words)):
words[i] = embeddings_dict[words[i]].tolist()
weight = torch.tensor(words)
self.embedding = nn.Embedding.from_pretrained(weight)
def forward(self, index):
# index_vector = self.embedding(torch.LongTensor(index))
#Internal function to F.log_softmax not implemented for "Long"
index_vector = self.embedding(index)
return index_vector
#Network Utilities
def init_weights(model):
classname = model.__class__.__name__
if classname.find('Linear') != -1:
torch.nn.init.xavier_uniform_(model.weight)
torch.nn.init.zeros_(model.bias)
elif classname.find('Conv2d') != -1:
torch.nn.init.xavier_uniform_(model.weight)
torch.nn.init.zeros_(model.bias)
elif classname.find('BatchNorm') != -1:
torch.nn.init.xavier_uniform_(model.weight)
torch.nn.init.zeros_(model.bias)
|
StarcoderdataPython
|
1600319
|
<filename>oTree/reffort/ajax.py
from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^validate_transcription/$" ,
views.validate_transcription, name="validate_transcription")]
|
StarcoderdataPython
|
13935
|
<reponame>tlh45342/polygon-pull
import datetime
import os
import pandas
from polygon.rest.client import RESTClient
def ts_to_datetime(ts) -> str:
return datetime.datetime.fromtimestamp(ts / 1000.0).strftime('%Y-%m-%d %H:%M')
def pull_day(Symbol, from_):
POLYGON_API_KEY = os.environ.get('POLYGON_API_KEY')
enddate = datetime.datetime.fromisoformat(from_)
enddate += datetime.timedelta(days=1)
enddate = str(enddate)[0:10]
with RESTClient(POLYGON_API_KEY) as client:
resp = client.stocks_equities_aggregates(Symbol, 1, "minute", from_, enddate, unadjusted=False)
#print(f"Minute aggregates for {resp.ticker} between {from_} and {enddate}.")
out = {}
df = pandas.DataFrame(out, columns = ['Datetime', 'Open', 'High', 'Low','Close','Adj Close','Volume'])
for result in resp.results:
#dt = ts_to_datetime(result["t"])
#print(f"{dt}\n\tO: {result['o']}\n\tH: {result['h']}\n\tL: {result['l']}\n\tC: {result['c']} ")
date = {"Datetime": result['t']}
open = {"Open": result['o']}
high = {"High": result['h']}
low = {"Low": result['l']}
close = {"Close": result['c']}
volume = {"Volume": result['v']}
bar = {**date, **open, **high, **low, **close, **volume}
df = df.append(bar,ignore_index=True)
return(df)
# ----------------------------
daystr = "2021-09-10"
df = pull_day("LC", daystr)
fname = r"M:\data\out.csv"
print("Writing: ", fname)
df.to_csv (fname, index = False, header=True)
|
StarcoderdataPython
|
1620129
|
#!/usr/bin/env python
from scipy.stats import poisson
import pickle
from trip_rules import *
from trip_definitions import *
rule_funcs = [rule1, rule2, rule3, rule4, rule5, rule6, rule7, rule8]
def rule_likelihood(test_data, rule_funcs, core_params):
(values, rains) = test_data
res = 0
num_regions = len(values[0][0])
for d in range(len(rains)):
vals = values[d]
rns = rains[d]
today = PlanningDay(weekday = d%5, periods=None)
for i in range(3):
is_raining = rns[i]
current_period = PlanningPeriod(number=i, rain=is_raining, parameters=None, values=None)
params = get_params(today, current_period, rule_funcs, core_params)
res += sum([np.log(poisson.pmf(vals[i][m,n], params[m,n]))
for m in range(num_regions)
for n in range(num_regions) if m!=n])
return res
def plain_likelihood(data, learned_params, with_rain=True):
(values, rains) = data
res = 0
num_regions = len(values[0][0])
for d in range(len(rains)):
vals = values[d]
rns = rains[d]
today = PlanningDay(weekday = d%7, periods=None)
for i in range(3):
is_raining = rns[i]
current_period = PlanningPeriod(number=i, rain=is_raining, parameters=None, values=None)
if with_rain:
params = learned_params[is_raining, i]
else:
params = learned_params[i]
res += sum([np.log(poisson.pmf(vals[i][m,n], params[m,n]))
for m in range(num_regions)
for n in range(num_regions) if m!=n])
return res
def learn_params(train_data, with_rain=True):
values, rains = train_data
values = np.array(values)
rains = np.array(rains)
if with_rain:
rain_options = [False, True]
params = np.zeros_like([values[0]]*len(rain_options), dtype='float64')
for rain in range(len(rain_options)):
for period in range(3):
vals = values[:,period,:,:][rains[:,period]==rain_options[rain]]
params[rain, period] = np.ma.average(vals, axis = 0)
else:
params = np.zeros_like(values[0], dtype='float64')
for period in range(3):
vals = values[:,period,:,:]
params[period] = np.ma.average(vals, axis = 0)
return params
|
StarcoderdataPython
|
1699415
|
<gh_stars>1-10
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QLineEdit, QFrame, QPushButton, QTableWidget, QHeaderView, QMenu, \
QInputDialog, QTableWidgetItem, QSizePolicy, QMessageBox, QSpacerItem
from PyQt5.QtCore import Qt
from .filter import Filter
from lxml import etree as ET
import re, os
from dependencies.db_editor import DBEditor
from dependencies.auxiliaries import RarityList
from RainyCore.monster import Monster
from RainyCore.spell import Spell
from RainyCore.item import Item
from RainyCore.signals import sNexus
class MyTableWidget(QTableWidget):
NAME_COLUMN = 0
INDEX_COLUMN = 1
COLUMNS = 2
def __init__(self, parent):
QTableWidget.__init__(self)
self.parent = parent
self.setObjectName("SearchableTable_table")
self.setAlternatingRowColors(True)
self.format()
def format(self):
self.setColumnCount(self.COLUMNS)
# self.horizontalHeader().hide()
self.horizontalHeader().setSectionResizeMode(self.NAME_COLUMN, QHeaderView.Stretch)
self.setShowGrid(False)
self.verticalHeader().hide()
# self.setColumnHidden(1, True)
class SearchableTable(QFrame):
NAME_COLUMN = 0
HEADERS = ['Name', 'REFERENCE']
EDITABLE = False
DATABASE_ENTRY_FIELD = 'entry'
ENTRY_CLASS = None
VIEWER_INDEX = None
prev_entry = None
def __init__(self, parent, viewer):
self.old_n = None
self.order = None
self.COLUMNS = len(self.HEADERS)
self.viewer = viewer
self.idx_dict = dict()
QFrame.__init__(self)
self.filter = Filter(self.search_handle)
self.parent = parent
self.search_box = QLineEdit()
self.search_box.setMaximumWidth(parent.SEARCH_BOX_WIDTH)
self.filter_button = QPushButton("Filters")
self.table_layout = QHBoxLayout()
self.table = MyTableWidget(parent)
self.table.horizontalHeader().sectionClicked.connect(self.sort_columns)
self.table.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
# self.table.clicked.connect(self.deselect_check_handle)
self.table.selectionModel().selectionChanged.connect(self.selection_change_handle)
self.table_layout.addWidget(self.table)
self.table_layout.addWidget(self.filter.get_frame())
self.button_bar_layout = QHBoxLayout()
horizontal_layout = QHBoxLayout()
horizontal_layout.addWidget(self.search_box)
horizontal_layout.addWidget(self.filter_button)
list_layout = QVBoxLayout()
list_layout.addLayout(horizontal_layout)
list_layout.addLayout(self.table_layout)
list_layout.addLayout(self.button_bar_layout)
self.setLayout(list_layout)
self.search_box.textChanged.connect(self.search_handle)
self.filter_button.clicked.connect(self.filter_handle)
self.setup_button_bar()
self.format()
def set_database(self, db):
self.full_database = db
self.database = db[str(self.ENTRY_CLASS)]
def get_current_entry(self):
current_row = self.table.currentRow()
if current_row == -1:
return None
name = self.table.item(current_row, 0).text()
entry = self.database[name]
return entry
def selection_change_handle(self, e):
current_entry = self.get_current_entry()
self.viewer.draw_view(current_entry)
def deselect_check_handle(self, e):
current_entry = self.get_current_entry()
if self.prev_entry is current_entry:
self.viewer.set_hidden(not self.viewer.isHidden())
else:
self.prev_entry = current_entry
self.viewer.set_hidden(False)
def setup_button_bar(self):
pass
def format(self):
pass
def sort_columns(self, n, order=None):
if order is not None:
self.order = order
elif self.old_n is n: # second click, switch order
if self.order is Qt.AscendingOrder:
self.order = Qt.DescendingOrder
else:
self.order = Qt.AscendingOrder
else:
self.order = Qt.AscendingOrder
self.table.sortByColumn(n, self.order)
for itt in range(self.table.rowCount()):
name = str(self.table.item(itt, self.NAME_COLUMN))
self.idx_dict[name] = itt
self.old_n = n
def define_filters(self):
pass
# def load_all(self, s, dir, Class):
# self.dir = dir
# self.database.values() = []
# for resource in os.listdir(dir):
# self.load_list(s, dir + resource, Class)
# def load_list(self, s, resource, Class):
# xml = ElementTree.parse(resource)
# root = xml.getroot()
# for itt, entry in enumerate(root.findall(s)):
# self.database.values().append(Class(entry, itt))
# self.database.values()_dict[str(Class)] = self.database.values()
def fill_table(self):
self.table.clear()
self.table.setRowCount(len(self.database))
for itt, entry in enumerate(self.database.values()):
self.update_entry(itt, entry)
self.idx_dict[entry.name] = itt
self.table.setHorizontalHeaderLabels(self.HEADERS)
self.sort_columns(self.NAME_COLUMN, Qt.AscendingOrder)
def update_entry(self, row, entry):
item = QTableWidgetItem(entry.name)
self.table.setItem(row, self.NAME_COLUMN, item)
# self.table.setItem(row, self.INDEX_COLUMN, QTableWidgetItem(str(entry.index)))
def unique_attr(self, attr):
result = []
for entry in self.database.values():
if hasattr(entry, attr):
entry_attr = getattr(entry, attr)
if entry_attr is None:
continue
if type(entry_attr) is not list:
entry_attr = [entry_attr]
for _entry_attr in entry_attr:
if _entry_attr not in result:
result.append(_entry_attr)
return result
def filter_handle(self):
self.filter.toggle_hidden()
def search_handle(self):
s = self.search_box.text()
p = re.compile('.*{}.*'.format(s), re.IGNORECASE)
for idx in range(self.table.rowCount()):
name = self.table.item(idx, self.NAME_COLUMN).text()
entry = self.database[name]
total_cond = True if p.match(name) else False
total_cond = total_cond and self.filter.evaluate_filter(entry)
self.table.setRowHidden(idx, not total_cond)
# self._toggle_table(result)
# def _toggle_table(self, result):
# for name, cond in result.items():
# idx = self.idx_dict[name]
# self.table.setRowHidden(idx, not cond)
def extract_subtypes(self, options):
subtype_dict = dict()
type_return = []
for s in options:
if s is None:
continue
if "(" in s: # indicates that there is a subtype
type = s[:s.find("(")].strip() # find the original type
type = type.lower()
if type not in type_return:
type_return.append(type)
subtype_raw = s[s.find("(") + 1:s.find(")")]
subtype_list = subtype_raw.split(", ")
for subtype in subtype_list:
if type not in subtype_dict.keys():
subtype_dict[type] = [subtype]
elif subtype not in subtype_dict[type]:
subtype_dict[type].append(subtype)
else:
if s not in type_return:
type_return.append(s)
for key, value in subtype_dict.items():
value = [option.capitalize() for option in value]
value = list(set(value))
value.sort()
subtype_dict[key] = value
return type_return, subtype_dict
def subset(self, attr_dict):
output_list = []
for entry in self.database.values():
valid = True
for key in attr_dict:
if not hasattr(entry, key) or getattr(entry, key) != attr_dict[key]:
valid = False
if valid:
output_list.append(entry)
return output_list
# find entry in the instantiated list of whatever is in the table
def find_entry(self, attr, value):
attr = attr.lower()
if type(value) is str:
value = value.lower()
for entry in self.database.values():
if hasattr(entry, attr):
_value = getattr(entry, attr)
if type(_value) is str:
_value = _value.lower()
if _value == value:
return entry
def new_entry(self):
if self.ENTRY_CLASS is None:
return
new_entry = self.ENTRY_CLASS(None, self.table.rowCount())
new_entry.source = 'custom'
new_entry.custom = True
if not hasattr(new_entry, "required_database_fields"):
return
self.db_editor = DBEditor(self, new_entry, copy=True)
self.db_editor.show()
def edit_entry(self, entry=None):
if entry is None:
entry = self.get_current_entry()
if entry is None:
self.parent.print("No entry selected")
return
else:
if not hasattr(entry, "custom"):
self.parent.print("Only custom entries are editable")
return
if not hasattr(entry, "required_database_fields"):
return
self.db_editor = DBEditor(self, entry)
self.db_editor.show() # the DBEditor calls the copy_entry and save_entry functions defined below
def copy_entry(self, entry):
if self.find_entry('name', entry.name) is not None:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText('Entry with name {} already exists'.format(entry.name))
msg.setWindowTitle("Duplicate Entry")
msg.exec_()
return False
self.database[entry.name] = entry
self.table.setRowCount(len(self.database.values()))
self.update_entry(len(self.database.values()) - 1, entry)
self.sort_columns(self.NAME_COLUMN, order=Qt.AscendingOrder)
# find and return the SubElement handle for the first entry with the attribute equal to the value
def find_db_entry(self, value, root, attr='name'):
for db_entry in root.findall(self.DATABASE_ENTRY_FIELD):
for _attr in db_entry:
if _attr.tag == attr and _attr.text == value:
return db_entry
def save_entry(self, entry, old_name=None):
flat_fields = [] # list of database fields for the specific entry
for field in entry.database_fields:
if type(field) is str:
flat_fields.append(field)
else:
for _field in field:
flat_fields.append(_field)
path = os.path.join(self.dir, 'custom.xml')
if not os.path.exists(path):
with open(path, 'w') as f: # create the file
pass
root = ET.Element('compendium')
root.set('version', '5')
else:
parser = ET.XMLParser(remove_blank_text=True)
root = ET.parse(path, parser).getroot()
if old_name != None: # update old entry
db_entry = self.find_db_entry(value=old_name, root=root, attr='name')
else: # new entry
db_entry = ET.SubElement(root, self.DATABASE_ENTRY_FIELD)
# for each field
for field in flat_fields:
if hasattr(entry, field):
db_field = db_entry.find(field)
if db_field is None:
db_field = ET.SubElement(db_entry, field)
db_field.text = str(getattr(entry, field))
# a few custom fields with content pre-defined
fields = ['source', 'custom']
values = ['Custom', None]
for field, value in zip(fields, values):
db_field = db_entry.find(field)
if db_field is None:
db_field = ET.SubElement(db_entry, field)
db_field.text = value
# save lists
listnames = ['trait_list', 'action_list', 'legendary_list']
fieldnames = ['trait', 'action', 'legendary']
subfields_list = [
['name', 'text', 'attack'],
['name', 'text', 'attack'],
['name', 'text', 'attack']
]
for listname, fieldname, subfields in zip(listnames, fieldnames, subfields_list):
if hasattr(entry, listname):
for field in getattr(entry, listname):
db_field = ET.SubElement(db_entry, fieldname)
for subfield in subfields:
if hasattr(field, subfield):
pieces = getattr(field, subfield).split('<br>')
db_subfield = ET.SubElement(db_field, subfield)
db_subfield.text = getattr(field, subfield)
# save actions
# fields = ['name', 'text', 'attack']
# if hasattr(entry, 'action_list'):
# for action in entry.action_list:
# db_action = ET.SubElement(db_entry, 'action')
# for field in fields:
# if hasattr(action, field):
# # if '<br>' in getattr(trait, field): # html format break line
# pieces = getattr(action, field).split('<br>')
# for piece in pieces:
# if piece == '':
# continue
# db_field = ET.SubElement(db_trait, field)
# db_field.text = piece
mydata = ET.tostring(root, encoding="unicode", pretty_print=True)
myfile = open(path, "w")
myfile.write(mydata)
def edit_copy_of_entry(self, entry=None):
if entry is None:
entry = self.get_current_entry()
if entry is None:
self.parent.print("No entry selected")
return
new_entry = entry.copy()
new_entry.source = 'custom'
new_entry.custom = True
if not hasattr(entry, "required_database_fields"):
return
self.db_editor = DBEditor(self, new_entry, copy=True)
self.db_editor.show()
class MonsterTableWidget(SearchableTable):
NAME_COLUMN = 0
TYPE_COLUMN = 1
CR_DISPLAY_COLUMN = 2
CR_COLUMN = 3
HEADERS = ['Name', 'Type', 'CR', 'FLOAT CR']
DATABASE_ENTRY_FIELD = 'monster'
ENTRY_CLASS = Monster
VIEWER_INDEX = 0
def sort_columns(self, n, order=None):
if n is self.CR_DISPLAY_COLUMN:
n = self.CR_COLUMN
super().sort_columns(n, order=order)
def setup_button_bar(self):
# find current selected monster
current_row = self.table.currentRow()
add_enc_button = QPushButton("Add to initiative")
add_enc_button.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Minimum)
add_enc_button.clicked.connect(lambda state, x=1: self.add_monster_to_encounter(x))
add_x_enc_button = QPushButton("Add more to initiative")
add_x_enc_button.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Minimum)
add_x_enc_button.clicked.connect(self.add_monster_to_encounter)
add_tool_button = QPushButton("Add to bookmark")
add_tool_button.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Minimum)
add_tool_button.clicked.connect(self.add_monster_to_bookmark)
hspacer = QSpacerItem(0, 0, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.button_bar_layout.addWidget(add_enc_button)
self.button_bar_layout.addWidget(add_x_enc_button)
self.button_bar_layout.addWidget(add_tool_button)
self.button_bar_layout.addItem(hspacer)
def format(self):
h = self.table.horizontalHeader()
t = self.table
t.setColumnCount(self.COLUMNS)
t.setRowCount(1)
resize = QHeaderView.ResizeToContents
stretch = QHeaderView.Stretch
for column, policy in zip([self.NAME_COLUMN, self.TYPE_COLUMN, self.CR_DISPLAY_COLUMN], [stretch, resize, resize]):
h.setSectionResizeMode(column, policy)
t.setShowGrid(False)
# t.setColumnHidden(self.INDEX_COLUMN, True)
t.setColumnHidden(self.TYPE_COLUMN, True)
t.setColumnHidden(self.CR_COLUMN, True)
self.sort_columns(self.NAME_COLUMN)
def update_entry(self, row, entry):
name_item = QTableWidgetItem(entry.name)
# name_item.setFlags(Qt.TextEditable)
self.table.setItem(row, self.NAME_COLUMN, name_item)
# self.table.setItem(row, self.INDEX_COLUMN, QTableWidgetItem(str(entry.index)))
# self.table.setItem(row, self.TYPE_COLUMN, QTableWidgetItem(str(entry.type)))
if hasattr(entry, "cr"):
if entry.cr == "00" or entry.cr is None:
shown_cr = "-"
true_cr = 0
else:
shown_cr = str(entry.cr)
true_cr = eval("float({})".format(entry.cr))
cr_item = QTableWidgetItem(shown_cr)
# cr_item.setFlags(Qt.ItemIsEditable)
self.table.setItem(row, self.CR_DISPLAY_COLUMN, cr_item)
cr_item = QTableWidgetItem()
cr_item.setData(Qt.DisplayRole, true_cr)
self.table.setItem(row, self.CR_COLUMN, cr_item)
self.idx_dict[entry.name] = row
def define_filters(self, version):
if version == "5":
self.filter.add_dropdown("Type", *self.extract_subtypes(self.unique_attr("type")))
self.filter.add_dropdown("Size", self.unique_attr("size"))
self.filter.add_dropdown("Source", self.unique_attr("source"))
self.filter.add_range("CR")
# self.filter.lock("srd", "yes")
self.filter.add_dropdown("SRD", self.unique_attr("srd"))
# self.filter.add_dropdown("Alignment", self.unique_attr("alignment"))
elif version == "3.5":
self.filter.add_dropdown("Type", *self.extract_subtypes(self.unique_attr("type")))
self.filter.add_dropdown("Size", self.unique_attr("size"))
# self.filter.add_dropdown("Source", self.unique_attr("source"))
self.filter.add_range("CR")
self.search_handle()
self.define_filter_buttons()
def define_filter_buttons(self):
if not self.EDITABLE:
return
edit_entry_button = QPushButton("Edit Entry")
new_entry_button = QPushButton("New Entry")
edit_copy_button = QPushButton("Edit Copy")
edit_entry_button.clicked.connect(lambda state, entry=None: self.edit_entry(entry))
new_entry_button.clicked.connect(self.new_entry)
edit_copy_button.clicked.connect(lambda state, entry=None: self.edit_copy_of_entry(entry))
self.filter.layout.addWidget(edit_entry_button)
self.filter.layout.addWidget(new_entry_button)
self.filter.layout.addWidget(edit_copy_button)
def get_selected_monster(self):
current_row = self.table.currentRow()
if current_row == -1:
return None
monster = self.table.item(current_row, 0).text()
return self.database[monster]
def add_monster_to_encounter(self, number=False):
monster = self.get_selected_monster()
if monster is None:
return
if number is False:
number, ok = QInputDialog.getInt(self, 'Add Monster', 'How many?')
if not (ok and number < 2000):
return False
self.parent.encounterWidget.addMonsterToEncounter(monster, number)
def add_monster_to_bookmark(self):
monster = self.get_selected_monster()
if monster is None:
return
self.parent.addMonsterToBookmark(monster)
def contextMenuEvent(self, event):
menu = QMenu(self)
monster = self.get_current_entry()
addAction = menu.addAction("Add to initiative")
addXAction = menu.addAction("Add X to initiative")
menu.addSeparator()
addBookmark = menu.addAction("Add to bookmark")
if hasattr(monster, "spells"):
add_spellbook = menu.addAction("Add monster's spells to bookmark")
edit_entry = None
edit_copy_entry = None
if self.EDITABLE:
menu.addSeparator()
if hasattr(monster, 'custom'):
edit_entry = menu.addAction("Edit entry")
edit_copy_entry = menu.addAction('Edit copy of entry')
action = menu.exec_(self.mapToGlobal(event.pos()))
if action is None:
return
if action == addAction:
self.parent.encounterWidget.addMonsterToEncounter(monster, 1)
elif action == addXAction:
X, ok = QInputDialog.getInt(self, 'Add Monster', 'How many?')
if ok and X < 2000:
self.parent.encounterWidget.addMonsterToEncounter(monster, X)
elif action == addBookmark:
self.parent.addMonsterToBookmark(monster)
elif hasattr(monster, "spells") and action is add_spellbook:
self.parent.extract_and_add_spellbook(monster)
elif self.EDITABLE and action is edit_entry:
self.edit_entry(monster)
elif self.EDITABLE and action is edit_copy_entry:
self.edit_copy_of_entry(monster)
class SpellTableWidget(SearchableTable):
NAME_COLUMN = 0
INDEX_COLUMN = 1
LEVEL_COLUMN = 2
HEADERS = ['Name', 'REFERENCE', 'Spell Level']
DATABASE_ENTRY_FIELD = 'spell'
EDITABLE = True
ENTRY_CLASS = Spell
VIEWER_INDEX = 1
def update_entry(self, row, entry):
self.table.setItem(row, self.NAME_COLUMN, QTableWidgetItem(str(entry.name)))
self.table.setItem(row, self.INDEX_COLUMN, QTableWidgetItem(str(entry.index)))
self.table.setItem(row, self.LEVEL_COLUMN, QTableWidgetItem(str(entry.level)))
def format(self):
t = self.table
h = self.table.horizontalHeader()
t.setColumnCount(self.COLUMNS)
t.setColumnHidden(self.INDEX_COLUMN, True)
t.setColumnHidden(self.LEVEL_COLUMN, False)
def define_filters(self, version):
if version == "5":
self.filter.add_dropdown("School", self.unique_attr("school"))
self.filter.add_dropdown("Level", self.unique_attr("level"))
self.filter.add_dropdown('Classes', *self.extract_subtypes(self.unique_attr('classes')))
self.filter.add_dropdown("Range", *self.extract_subtypes(self.unique_attr('range')))
self.filter.add_dropdown("Source", *self.extract_subtypes(self.unique_attr("source")))
elif version == "3.5":
self.filter.add_dropdown("School", self.unique_attr("school"))
# self.filter.add_dropdown("Level", self.unique_attr("level"))
# self.filter.add_dropdown("Range", self.unique_attr("range"))
def contextMenuEvent(self, event):
menu = QMenu(self)
add_bookmark = menu.addAction("Add to bookmark")
spell = self.get_current_entry()
edit_entry = None
edit_copy_entry = None
if self.EDITABLE:
menu.addSeparator()
if hasattr(spell, 'custom'):
edit_entry = menu.addAction("Edit entry")
edit_copy_entry = menu.addAction("Edit copy of entry")
action = menu.exec_(self.mapToGlobal(event.pos()))
if action is None:
return
if action == add_bookmark:
self.parent.add_to_bookmark_spell(spell)
elif self.EDITABLE and action is edit_entry:
self.edit_entry(spell)
elif self.EDITABLE and action is edit_copy_entry:
self.edit_copy_of_entry(spell)
class ItemTableWidget(SearchableTable):
NAME_COLUMN = 0
INDEX_COLUMN = 1
DATABASE_ENTRY_FIELD = 'item'
EDITABLE = True
ENTRY_CLASS = Item
VIEWER_INDEX = 2
def format(self):
t = self.table
h = self.table.horizontalHeader()
h.hide()
t.setColumnHidden(self.INDEX_COLUMN, True)
def define_filters(self, version):
if version == "5":
self.filter.add_dropdown("Type", self.unique_attr("type"))
self.filter.add_dropdown("Rarity", RarityList, alphabetical=False)
self.filter.add_dropdown("Magic", self.unique_attr("magic"), default="Any")
self.filter.add_range("value", capitalize=True)
elif version == "3.5":
self.filter.add_dropdown("Category", self.unique_attr("category"))
def subset(self, attr_dict):
output_list = []
for entry in self.database.values():
valid = True
for key in attr_dict:
if key == "type" and attr_dict[key] == "Armor":
valid = valid and entry.type in ["Heavy Armor", "Medium Armor", "Light Armor"]
elif key == "type" and attr_dict[key] == "Weapon":
valid = valid and entry.type in ["Melee", "Ranged", "Rod", "Staff"]
elif not hasattr(entry, key) or getattr(entry, key) != attr_dict[key]:
valid = False
if valid:
output_list.append(entry)
return output_list
def contextMenuEvent(self, event):
menu = QMenu(self)
item = self.get_current_entry()
edit_entry = None
edit_copy_entry = None
if self.EDITABLE:
menu.addSeparator()
if hasattr(item, 'custom'):
edit_entry = menu.addAction("Edit entry")
edit_copy_entry = menu.addAction("Edit copy of entry")
action = menu.exec_(self.mapToGlobal(event.pos()))
if action is None:
return
if self.EDITABLE and action is edit_entry:
self.edit_entry(item)
elif self.EDITABLE and action is edit_copy_entry:
self.edit_copy_of_entry(item)
|
StarcoderdataPython
|
163284
|
import requests
from lxml import html
class GithubRepo:
name = ''
author = ''
summary = ''
tag_list = []
license = ''
lastUpdateTime = ''
language = ''
star_num = 0
def tostring(self):
print(self.__dict__)
keyWorld = 'swift'
language = 'Swift'
URL = ('https://github.com/search?l=%s&o=desc&q=%s&s=stars&type=Repositories&p=5' % (language, keyWorld))
session = requests.session()
response = session.get(URL)
tree = html.fromstring(response.text)
repo_list = []
# 仓库列表
repo_html_list = tree.xpath('//ul[@class="repo-list"]/child::*')
for repo_html in repo_html_list:
repo = GithubRepo()
# 注意这里的. 表示从当前元素下获取子元素,如果没有,则会从全局html中获取
repo.name = repo_html.xpath('.//h3/a/@href')[0].split('/')[2]
repo.author = repo_html.xpath('.//h3/a/@href')[0].split('/')[1]
# text属性只包含了当前节点的text,而没有包含子节点的,因此需要使用text_content()方法
repo.summary = repo_html.xpath('.//p[contains(@class,"d-inline-block")]')[0].text_content().strip()
# 由于标签包含许多空格以及\n,需要去除
repo.tag_list = list(map(lambda x: x.strip(), repo_html.xpath('.//a[contains(@class,"topic-tag")]/text()')))
repo.license = repo_html.xpath('.//div[@class="d-flex flex-wrap"]//p[position()=1]')[0].text.strip()
repo.lastUpdateTime = repo_html.xpath('.//div[@class="d-flex flex-wrap"]//relative-time/@datetime')[0]
repo.language = repo_html.xpath('.//span[@class="repo-language-color"]/parent::div[1]')[0].text_content().strip()
repo.star_num = repo_html.xpath('.//a[@class="muted-link"]')[0].text_content().strip()
repo_list.append(repo)
# 输出爬取结果
for repo in repo_list:
repo.tostring()
|
StarcoderdataPython
|
90455
|
<filename>stegbench/executor/embeddor_cmds.py
from collections import defaultdict
from os.path import abspath, join
import stegbench.executor.runner as runner
import stegbench.utils.filesystem as fs
import stegbench.utils.lookup as lookup
def replace(cmd: str, replacements):
for replacement_key in replacements:
cmd = cmd.replace(replacement_key, str(replacements[replacement_key]))
return cmd
def process_directories(algorithm_info, to_embed_list):
sorted_by_directories = defaultdict(list)
for to_embed in to_embed_list:
input_directory = abspath(fs.get_directory(to_embed[lookup.INPUT_IMAGE_PATH]))
output_directory = abspath(fs.get_directory(to_embed[lookup.OUTPUT_IMAGE_PATH]))
sorted_by_directories[(input_directory, output_directory)] = to_embed_list
updated_embed_list = []
db_directory = lookup.get_db_dirs()[lookup.dataset]
for directory_pair in sorted_by_directories:
temp_directory = abspath(join(db_directory, fs.get_uuid()))
fs.make_dir(temp_directory)
to_embed_files = sorted_by_directories[directory_pair]
for file in to_embed_files:
fs.copy_file(file[lookup.INPUT_IMAGE_PATH], temp_directory)
payloads = set(list(map(lambda f: f[lookup.PAYLOAD],
sorted_by_directories[directory_pair])))
assert(len(payloads) == 1)
payload = payloads.pop()
to_embed = {
lookup.INPUT_IMAGE_DIRECTORY: temp_directory,
lookup.OUTPUT_IMAGE_DIRECTORY: directory_pair[1],
lookup.PAYLOAD: payload}
updated_embed_list.append(to_embed)
return updated_embed_list
#### NATIVE ####
def preprocess_native(algorithm_info, to_embed_list):
cmd = lookup.get_cmd(algorithm_info)
for to_embed in to_embed_list:
if lookup.SECRET_TXT_FILE in cmd and lookup.SECRET_TXT_FILE not in to_embed:
txt_file_path = lookup.create_asset_file(
algorithm_info[lookup.ALGORITHM_TYPE], to_embed[lookup.SECRET_TXT_PLAINTEXT])
to_embed[lookup.SECRET_TXT_FILE] = txt_file_path
if lookup.OUTPUT_IMAGE_DIRECTORY in cmd and lookup.OUTPUT_IMAGE_DIRECTORY not in to_embed:
to_embed[lookup.OUTPUT_IMAGE_DIRECTORY] = fs.get_directory(
abspath(to_embed[lookup.OUTPUT_IMAGE_PATH]))
pre_cmd = lookup.get_pre_cmd(algorithm_info)
pre_cmds = []
if pre_cmd:
pre_cmds = [{lookup.COMMAND_TYPE: lookup.NATIVE, lookup.COMMAND: replace(
pre_cmd, to_embed)} for to_embed in to_embed_list]
return pre_cmds, to_embed_list
def generate_native_cmd(algorithm_info, to_embed):
cmd = lookup.get_cmd(algorithm_info)
new_cmd = replace(cmd, to_embed)
if lookup.WORKING_DIR in algorithm_info:
wdir = algorithm_info[lookup.WORKING_DIR]
new_cmd = ['(', 'cd', wdir, '&&', new_cmd, ')']
new_cmd = ' '.join(new_cmd)
return {lookup.COMMAND_TYPE: lookup.NATIVE, lookup.COMMAND: [new_cmd]}
def postprocess_native(algorithm_info, embedded_list):
cmd = lookup.get_cmd(algorithm_info)
post_cmd = lookup.get_post_cmd(algorithm_info)
post_cmds = []
if post_cmd:
for to_embed in embedded_list:
new_cmd = replace(post_cmd, to_embed)
post_cmds.append({lookup.COMMAND_TYPE: lookup.NATIVE, lookup.COMMAND: [new_cmd]})
for embedded in embedded_list:
if lookup.SECRET_TXT_FILE in embedded:
removal_cmd = ' '.join([lookup.removal_prefix, embedded[lookup.SECRET_TXT_FILE]])
post_cmds.append({lookup.COMMAND_TYPE: lookup.NATIVE, lookup.COMMAND: [removal_cmd]})
if lookup.INPUT_IMAGE_DIRECTORY in cmd:
removal_cmd = ' '.join([lookup.removal_directory_prefix,
embedded[lookup.INPUT_IMAGE_DIRECTORY]])
post_cmds.append({lookup.COMMAND_TYPE: lookup.NATIVE, lookup.COMMAND: [removal_cmd]})
return post_cmds
def termination_native(algorithm_info, embedded_list):
termination_cmds = []
return termination_cmds
##### DOCKER ####
def preprocess_docker(algorithm_info, to_embed_list):
"""starts docker command and updates parameters appropriately"""
cmd = lookup.get_cmd(algorithm_info)
pre_cmd = lookup.get_pre_cmd(algorithm_info)
volumes = {}
updated_embed_list = to_embed_list
for to_embed in updated_embed_list:
if lookup.SECRET_TXT_FILE in cmd:
txt_file_path = lookup.create_asset_file(
algorithm_info[lookup.ALGORITHM_TYPE], to_embed[lookup.SECRET_TXT_PLAINTEXT])
local_asset_dir = fs.get_directory(abspath(txt_file_path))
volumes[local_asset_dir] = {'bind': lookup.asset_dir, 'mode': 'rw'}
asset_filename = fs.get_filename(txt_file_path)
new_asset_path = join(lookup.asset_dir, asset_filename)
to_embed[lookup.SECRET_TXT_FILE] = new_asset_path
if lookup.INPUT_IMAGE_DIRECTORY in cmd:
docker_input_dir = '/' + fs.get_uuid()
volumes[abspath(to_embed[lookup.INPUT_IMAGE_DIRECTORY])] = {
'bind': docker_input_dir, 'mode': 'rw'}
to_embed[lookup.INPUT_IMAGE_DIRECTORY] = docker_input_dir
else:
original_input_path = to_embed[lookup.INPUT_IMAGE_PATH]
original_input_path = abspath(original_input_path)
local_input_dir = fs.get_directory(original_input_path)
volumes[local_input_dir] = {'bind': lookup.input_dir, 'mode': 'rw'}
input_filename = fs.get_filename(original_input_path)
new_input_path = join(lookup.input_dir, input_filename)
to_embed[lookup.INPUT_IMAGE_PATH] = new_input_path
if lookup.OUTPUT_IMAGE_DIRECTORY in cmd:
if lookup.OUTPUT_IMAGE_DIRECTORY in to_embed:
local_output_dir = to_embed[lookup.OUTPUT_IMAGE_DIRECTORY]
else:
local_output_dir = fs.get_directory(abspath(to_embed[lookup.OUTPUT_IMAGE_PATH]))
docker_directory = '/' + fs.get_uuid()
volumes[local_output_dir] = {'bind': docker_directory, 'mode': 'rw'}
to_embed[lookup.OUTPUT_IMAGE_DIRECTORY] = docker_directory
elif lookup.OUTPUT_IMAGE_PATH in to_embed:
original_output_path = abspath(to_embed[lookup.OUTPUT_IMAGE_PATH])
local_output_dir = fs.get_directory(original_output_path)
volumes[local_output_dir] = {'bind': lookup.output_dir, 'mode': 'rw'}
output_filename = fs.get_filename(original_output_path)
new_output_path = join(lookup.output_dir, output_filename)
to_embed[lookup.OUTPUT_IMAGE_PATH] = new_output_path
container_id = runner.start_docker(algorithm_info[lookup.DOCKER_IMAGE], volumes=volumes)
for to_embed in updated_embed_list:
to_embed[lookup.container_id] = container_id
pre_cmd = lookup.get_pre_cmd(algorithm_info)
pre_cmds = []
if pre_cmd:
for to_embed in to_embed_list:
params = [to_embed[lookup.container_id], replace(pre_cmd, to_embed)]
pre_cmds.append({lookup.COMMAND_TYPE: lookup.NATIVE, lookup.COMMAND: params})
return pre_cmds, updated_embed_list
def generate_docker_cmd(algorithm_info, to_embed):
cmd = lookup.get_cmd(algorithm_info)
new_cmd = replace(cmd, to_embed)
params = [to_embed[lookup.container_id], new_cmd]
if lookup.WORKING_DIR in algorithm_info:
params.append(algorithm_info[lookup.WORKING_DIR])
return {lookup.COMMAND_TYPE: lookup.DOCKER, lookup.COMMAND: params}
def postprocess_docker(algorithm_info, embedded_list):
# need to end the docker process
post_cmds = []
post_cmd = lookup.get_post_cmd(algorithm_info)
cmd = lookup.get_cmd(algorithm_info)
if post_cmd:
for embedded in embedded_list:
new_cmd = replace(post_cmd, embedded)
params = [embedded[lookup.container_id], new_cmd]
if lookup.WORKING_DIR in algorithm_info:
params.append(algorithm_info[lookup.WORKING_DIR])
docker_cmd = {lookup.COMMAND_TYPE: lookup.DOCKER, lookup.COMMAND: params}
post_cmds.append(docker_cmd)
cmd = lookup.get_cmd(algorithm_info)
for embedded in embedded_list:
if lookup.SECRET_TXT_FILE in embedded:
asset_file_name = fs.get_filename(embedded[lookup.SECRET_TXT_FILE])
asset_directory = lookup.get_algo_asset_dirs()[algorithm_info[lookup.ALGORITHM_TYPE]]
old_asset_file_path = join(asset_directory, asset_file_name)
removal_cmd = ' '.join([lookup.removal_prefix, old_asset_file_path])
post_cmds.append({lookup.COMMAND_TYPE: lookup.NATIVE, lookup.COMMAND: [removal_cmd]})
if lookup.INPUT_IMAGE_DIRECTORY in cmd:
embedded[lookup.INPUT_IMAGE_DIRECTORY]
removal_cmd = ' '.join([lookup.removal_directory_prefix,
embedded[lookup.INPUT_IMAGE_DIRECTORY]])
params = [embedded[lookup.container_id], removal_cmd]
post_cmds.append({lookup.COMMAND_TYPE: lookup.DOCKER, lookup.COMMAND: params})
return post_cmds
def terimination_docker(algorithm_info, embedded_list):
termination_cmds = []
docker_containers = list(
set(list(map(lambda embedded: embedded[lookup.container_id], embedded_list))))
for container_id in docker_containers:
termination_cmds.append({lookup.COMMAND_TYPE: lookup.END_DOCKER,
lookup.COMMAND: [container_id]})
return termination_cmds
def generate_native(algorithm_info, to_embed_list):
pre_cmds, updated_embed_list = preprocess_native(algorithm_info, to_embed_list)
cmds = [generate_native_cmd(algorithm_info, to_embed) for to_embed in updated_embed_list]
post_cmds = postprocess_native(algorithm_info, updated_embed_list)
termination_cmds = termination_native(algorithm_info, updated_embed_list)
return pre_cmds, cmds, post_cmds, termination_cmds
def generate_docker(algorithm_info, to_embed_list):
pre_cmds, updated_embed_list = preprocess_docker(algorithm_info, to_embed_list)
cmds = [generate_docker_cmd(algorithm_info, to_embed) for to_embed in updated_embed_list]
post_cmds = postprocess_docker(algorithm_info, updated_embed_list)
termination_cmds = terimination_docker(algorithm_info, updated_embed_list)
return pre_cmds, cmds, post_cmds, termination_cmds
def generate_commands(algorithm_info, to_embed_list):
command_type = algorithm_info[lookup.COMMAND_TYPE]
generate_function = {
lookup.DOCKER: generate_docker,
lookup.NATIVE: generate_native,
}[command_type]
if lookup.INPUT_IMAGE_DIRECTORY in lookup.get_cmd(algorithm_info):
to_embed_list = process_directories(algorithm_info, to_embed_list)
return generate_function(algorithm_info, to_embed_list)
|
StarcoderdataPython
|
3306795
|
<filename>src/predict.py
import cv2
import numpy as np
import pandas as pd
from skimage import io
def prediction(test, model, model_seg):
"""
Pipeline to connect the classification and segmentation model.
All the preprocessing and detection takes place in this function.
The classification model first classifies the image as have or not having a tumor
if the classificaiton model is 99% sure there is no tumor the imgae is marked as no tumor.
If it's not sure the image is given to the segmentation model which then predicts the the pixel area of the tumor.
"""
# empty list to store results
mask, image_id, has_mask = [], [], []
# itetrating through each image in test data
for i in test.image_path:
img = io.imread(i)
# normalizing
img = img * 1.0 / 255.0
# reshaping
img = cv2.resize(img, (256, 256))
# converting img into array
img = np.array(img, dtype=np.float64)
# reshaping the image from 256,256,3 to 1,256,256,3
img = np.reshape(img, (1, 256, 256, 3))
# making prediction for tumor in image
is_defect = model.predict(img)
# if tumour is not present we append the details of the image to the list
if np.argmax(is_defect) == 0:
image_id.append(i)
has_mask.append(0)
mask.append("No mask.")
continue
# Creating a empty array of shape 1,256,256,1
X = np.empty((1, 256, 256, 3))
# read the image
img = io.imread(i)
# resizing the image and coverting them to array of type float64
img = cv2.resize(img, (256, 256))
img = np.array(img, dtype=np.float64)
# standardising the image
img -= img.mean()
img /= img.std()
# converting the shape of image from 256,256,3 to 1,256,256,3
X[
0,
] = img
# make prediction of mask
predict = model_seg.predict(X)
# if sum of predicted mask is 0 then there is not tumour
if predict.round().astype(int).sum() == 0:
image_id.append(i)
has_mask.append(0)
mask.append("No mask :)")
else:
# if the sum of pixel values are more than 0, then there is tumour
image_id.append(i)
has_mask.append(1)
mask.append(predict)
return pd.DataFrame(
{"image_path": image_id, "predicted_mask": mask, "has_mask": has_mask}
)
|
StarcoderdataPython
|
3396503
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# libcloud.org licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from distutils.core import setup
from distutils.core import Command
from unittest import TextTestRunner, TestLoader
from glob import glob
from os.path import splitext, basename, join as pjoin
HTML_VIEWSOURCE_BASE = 'https://svn.apache.org/viewvc/incubator/libcloud/trunk'
PROJECT_BASE_DIR = 'http://incubator.apache.org/libcloud/'
class TestCommand(Command):
user_options = []
def initialize_options(self):
THIS_DIR = os.path.abspath(os.path.split(__file__)[0])
sys.path.insert(0, THIS_DIR)
sys.path.insert(0, pjoin(THIS_DIR, 'test'))
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
testfiles = []
for t in glob(pjoin(self._dir, 'test', 'test_*.py')):
testfiles.append('.'.join(
['test', splitext(basename(t))[0]])
)
tests = TestLoader().loadTestsFromNames(testfiles)
t = TextTestRunner(verbosity = 1)
res = t.run(tests)
sys.exit(not res.wasSuccessful())
class ApiDocsCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system(
'pydoctor'
' --add-package=libcloud'
' --project-name=libcloud'
' --make-html'
' --html-viewsource-base="%s"'
' --project-base-dir=`pwd`'
' --project-url="%s"'
% (HTML_VIEWSOURCE_BASE, PROJECT_BASE_DIR)
)
setup(
name='apache-libcloud',
version='0.2.1',
description='A unified interface into many cloud server providers',
author='<NAME>',
author_email='<EMAIL>',
packages=[
'libcloud',
'libcloud.drivers'
],
package_dir={
'libcloud': 'libcloud',
'libcloud.drivers': 'libcloud/drivers'
},
license='Apache License (2.0)',
url='http://incubator.apache.org/libcloud/',
cmdclass={
'test': TestCommand,
'apidocs': ApiDocsCommand
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
StarcoderdataPython
|
3255174
|
import versioneer
from setuptools import find_packages, setup
setup(
name="fastscore",
description="FastScore SDK",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(),
use_2to3=True,
author="Open Data Group",
author_email="<EMAIL>",
install_requires=[
"iso8601>=0.1.11",
"PyYAML>=3.11",
"requests>=2.11.1",
"tabulate>=0.7.5",
"websocket-client>=0.37.0",
"six",
"urllib3>=1.20",
"certifi>=2017.4.17",
"ordered_set"
],
test_suite="test",
tests_require=[
"mock>=2.0.0",
"iso8601>=0.1.11",
"PyYAML>=3.11",
"requests>=2.11.1",
"tabulate>=0.7.5",
"websocket-client>=0.37.0",
"urllib3>=1.20",
"six",
"mock"
]
)
|
StarcoderdataPython
|
3226060
|
<filename>RestPy/SampleScripts/evpnNgpf2.py
"""
Evpn_NGPF.py:
Tested with two back-2-back Ixia ports and put variables for the VTEP scale (L2/L3 VNI, number of VTEPs, ETC)
- Connect to the API server
- Assign ports:
- If variable forceTakePortOwnership is True, take over the ports if they're owned by another user.
- If variable forceTakePortOwnership if False, abort test.
- Configure two Topology Groups: IPv4/BGP/EVPN
- Configure Network Group for each topology
- Configure a Traffic Item
- Start all protocols
- Verify all protocols
- Start traffic
- Get Traffic Item
- Get Flow Statistics stats
Supports IxNetwork API servers:
- Windows, Windows Connection Mgr and Linux
Requirements:
- Minimum IxNetwork 8.52
- Python 2.7 and 3+
- pip install requests
- pip install ixnetwork_restpy
RestPy Doc:
https://www.openixia.github.io/ixnetwork_restpy/#/
Usage:
- Enter: python <script>
"""
import sys, os, time, traceback
from ixnetwork_restpy import SessionAssistant
apiServerIp = '172.16.101.3'
ixChassisIpList = ['172.16.102.5']
portList = [[ixChassisIpList[0], 1,1], [ixChassisIpList[0], 1, 2]]
# For Linux API server only
username = 'admin'
password = '<PASSWORD>'
# For linux and connection_manager only. Set to True to leave the session alive for debugging.
debugMode = False
forceTakePortOwnership = True
vtepMultiplier = '64'
l2LabelStartValue = '1001'
l3LabelStartValue = '1001001'
# eviCount has to be evenly divisable by l3RtCount for the script to work.
eviCount = '128'
l3RtCount = 8
l3RtRepeatValue = (int(eviCount) / l3RtCount)
try:
# LogLevel: none, info, warning, request, request_response, all
session = SessionAssistant(IpAddress=apiServerIp, RestPort=None, UserName=username, Password=password,
SessionName=None, SessionId=None, ApiKey=None,
ClearConfig=True, LogLevel='all', LogFilename='restpy.log')
ixNetwork = session.Ixnetwork
ixNetwork.info('Assign ports')
portMap = session.PortMapAssistant()
vport = dict()
for index,port in enumerate(portList):
portName = 'Port_{}'.format(index+1)
vport[portName] = portMap.Map(IpAddress=port[0], CardId=port[1], PortId=port[2], Name=portName)
portMap.Connect(forceTakePortOwnership)
ixNetwork.info('Creating Topology Group 1')
topology1 = ixNetwork.Topology.add(Name='Topo1', Ports=vport['Port_1'])
deviceGroup1 = topology1.DeviceGroup.add(Name='DG1', Multiplier=vtepMultiplier)
ethernet1 = deviceGroup1.Ethernet.add(Name='Eth1')
ethernet1.Mac.Increment(start_value='00:01:01:01:00:01', step_value='00:00:00:00:00:01')
ixNetwork.info('Configuring IPv4')
ipv4 = ethernet1.Ipv4.add(Name='Ipv4')
ipv4.Address.Increment(start_value='10.1.1.1', step_value='0.0.0.1')
ipv4.GatewayIp.Increment(start_value='10.1.2.1', step_value='0.0.0.1')
ixNetwork.info('Configuring BgpIpv4Peer 1')
bgp1 = ipv4.BgpIpv4Peer.add(Name='Bgp1')
bgp1.DutIp.Increment(start_value='10.1.2.1', step_value='0.0.0.1')
bgp1.Type.Single('internal')
bgp1.LocalAs2Bytes.Increment(start_value=101, step_value=0)
bgp1.AdvertiseEvpnRoutesForOtherVtep = 'True'
bgp1.FilterEvpn.Single(True)
ixNetwork.info('Configuring EVPN 1')
evpn1 = bgp1.BgpIPv4EvpnVXLAN.add(Name='EVPN_VXLAN_1')
bgp1.BgpEthernetSegmentV4.EvisCount = eviCount
bgp1.BgpEthernetSegmentV4.VtepIpv4Address.Increment(start_value='10.1.1.1', step_value='0.0.0.1')
evpn1.NumBroadcastDomainV4 = '1'
evpn1.RdEvi.Custom(start_value='1', step_value='0',increments=[('1', l3RtCount, [('0', l3RtRepeatValue , [])])])
evpn1.BgpL3VNIExportRouteTargetList.find()[0].TargetAssignedNumber.Custom(start_value=l3LabelStartValue, step_value='0',increments=[('1', l3RtCount, [('0', l3RtRepeatValue , [])])])
evpn1.BgpExportRouteTargetList.find()[0].TargetAssignedNumber.Custom(start_value=l2LabelStartValue, step_value='0',increments=[('1', eviCount, [])])
ixNetwork.info('Configuring Network Group 1')
networkGroup1 = deviceGroup1.NetworkGroup.add(Name='BGP-Routes1', Multiplier='1')
macPool = networkGroup1.MacPools.add(NumberOfAddresses='1')
CMacProperties = macPool.CMacProperties.add()
connectorMac= macPool.Connector.find()
connectorMac.ConnectedTo='/api/v1/sessions/1/ixnetwork/topology/1/deviceGroup/1/ethernet/1/ipv4/1/bgpIpv4Peer/1/bgpIPv4EvpnVXLAN/1'
ipv4PrefixPool = macPool.Ipv4PrefixPools.add(NumberOfAddresses='1')
ipv4PrefixPool.NetworkAddress.Increment(start_value='172.16.58.3', step_value='0.0.1.0')
ipv4PrefixPool.PrefixLength.Single(16)
macPool.CMacProperties.find()[0].FirstLabelStart.Custom(start_value=l2LabelStartValue, step_value='0',increments=[('1', eviCount, [])])
macPool.CMacProperties.find()[0].EnableSecondLabel.Single (True)
macPool.CMacProperties.find()[0].SecondLabelStart.Custom(start_value=l3LabelStartValue, step_value='0',increments=[('1', l3RtCount, [('0', l3RtRepeatValue , [])])])
macPool.CMacProperties.find()[0].AdvertiseIpv4Address.Single (True)
macPool.CMacProperties.find()[0].Ipv4AddressPrefixLength.Single(32)
ixNetwork.info('Creating Topology Group 2')
topology2 = ixNetwork.Topology.add(Name='Topo2', Ports=vport['Port_2'])
deviceGroup2 = topology2.DeviceGroup.add(Name='DG2', Multiplier=vtepMultiplier)
ethernet2 = deviceGroup2.Ethernet.add(Name='Eth2')
ethernet2.Mac.Increment(start_value='00:01:01:02:00:01', step_value='00:00:00:00:00:01')
ixNetwork.info('Configuring IPv4 2')
ipv4 = ethernet2.Ipv4.add(Name='Ipv4-2')
ipv4.Address.Increment(start_value='10.1.2.1', step_value='0.0.0.1')
ipv4.GatewayIp.Increment(start_value='10.1.1.1', step_value='0.0.0.1')
ixNetwork.info('Configuring BgpIpv4Peer 2')
bgp2 = ipv4.BgpIpv4Peer.add(Name='Bgp2')
bgp2.DutIp.Increment(start_value='10.1.1.1', step_value='0.0.0.1')
bgp2.Type.Single('internal')
bgp2.LocalAs2Bytes.Increment(start_value=101, step_value=0)
bgp2.AdvertiseEvpnRoutesForOtherVtep = 'True'
bgp2.FilterEvpn.Single(True)
ixNetwork.info('Configuring EVPN 2')
evpn2 = bgp2.BgpIPv4EvpnVXLAN.add(Name= 'EVPN_VXLAN_2')
bgp2.BgpEthernetSegmentV4.EvisCount = eviCount
bgp2.BgpEthernetSegmentV4.VtepIpv4Address.Increment(start_value='10.1.2.1', step_value='0.0.0.1')
evpn2.NumBroadcastDomainV4 = '1'
evpn2.RdEvi.Custom(start_value='1', step_value='0',increments=[('1', l3RtCount, [('0', l3RtRepeatValue , [])])])
evpn2.BgpL3VNIExportRouteTargetList.find()[0].TargetAssignedNumber.Custom(start_value=l3LabelStartValue, step_value='0',increments=[('1', l3RtCount, [('0', l3RtRepeatValue , [])])])
evpn2.BgpExportRouteTargetList.find()[0].TargetAssignedNumber.Custom(start_value=l2LabelStartValue, step_value='0',increments=[('1', eviCount, [])])
ixNetwork.info('Configuring Network Group 2')
networkGroup2=deviceGroup2.NetworkGroup.add(Name='BGP-Routes-2', Multiplier='1')
macPool2 = networkGroup2.MacPools.add(NumberOfAddresses='1')
CMacProperties2 = macPool2.CMacProperties.add()
connectorMac2= macPool2.Connector.find()
connectorMac2.ConnectedTo='/api/v1/sessions/1/ixnetwork/topology/2/deviceGroup/1/ethernet/1/ipv4/1/bgpIpv4Peer/1/bgpIPv4EvpnVXLAN/1'
ipv4PrefixPool = macPool2.Ipv4PrefixPools.add(NumberOfAddresses='1')
ipv4PrefixPool.NetworkAddress.Increment(start_value='192.168.127.12', step_value='0.0.1.0')
ipv4PrefixPool.PrefixLength.Single(16)
macPool2.CMacProperties.find()[0].FirstLabelStart.Custom(start_value=l2LabelStartValue, step_value='0',increments=[('1', eviCount, [])])
macPool2.CMacProperties.find()[0].EnableSecondLabel.Single (True)
macPool2.CMacProperties.find()[0].SecondLabelStart.Custom(start_value=l3LabelStartValue, step_value='0',increments=[('1', l3RtCount, [('0', l3RtRepeatValue , [])])])
macPool2.CMacProperties.find()[0].AdvertiseIpv4Address.Single (True)
macPool2.CMacProperties.find()[0].Ipv4AddressPrefixLength.Single(32)
ixNetwork.StartAllProtocols(Arg1='sync')
ixNetwork.info('Verify protocol sessions\n')
protocolSummary = session.StatViewAssistant('Protocols Summary')
protocolSummary.CheckCondition('Sessions Not Started', protocolSummary.EQUAL, 0)
protocolSummary.CheckCondition('Sessions Down', protocolSummary.EQUAL, 0)
ixNetwork.info(protocolSummary)
ixNetwork.info('Create Traffic Item')
trafficItem = ixNetwork.Traffic.TrafficItem.add(Name='Traffic Item 1', BiDirectional=False, TrafficType='ipv4')
ixNetwork.info('Add endpoint flow group')
trafficItem.EndpointSet.add(Sources=topology1, Destinations=topology2)
ixNetwork.info('Configuring config elements')
configElement = trafficItem.ConfigElement.find()[0]
configElement.FrameRate.update(Type='percentLineRate', Rate=50)
configElement.FrameRateDistribution.PortDistribution = 'splitRateEvenly'
configElement.FrameSize.FixedSize = 128
trafficItem.Tracking.find()[0].TrackBy = ['flowGroup0']
trafficItem.Generate()
ixNetwork.Traffic.Apply()
ixNetwork.Traffic.StartStatelessTrafficBlocking()
flowStatistics = session.StatViewAssistant('Flow Statistics')
ixNetwork.info('{}\n'.format(flowStatistics))
for rowNumber,flowStat in enumerate(flowStatistics.Rows):
ixNetwork.info('\n\nSTATS: {}\n\n'.format(flowStat))
ixNetwork.info('\nRow:{} TxPort:{} RxPort:{} TxFrames:{} RxFrames:{}\n'.format(
rowNumber, flowStat['Tx Port'], flowStat['Rx Port'],
flowStat['Tx Frames'], flowStat['Rx Frames']))
ixNetwork.Traffic.StopStatelessTrafficBlocking()
if debugMode == False:
# For linux and connection_manager only
for vport in ixNetwork.Vport.find():
vport.ReleasePort()
session.Session.remove()
print ('Releasing ports and Removing Session')
except Exception as errMsg:
print('\n%s' % traceback.format_exc(None, errMsg))
if debugMode == False and 'session' in locals():
session.Session.remove()
|
StarcoderdataPython
|
3287231
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
Created on : Tue Apr 17 22:44:31 2018
@author : Sourabh
"""
# %%
class HttpException(Exception):
def __init__(self, message, code):
super().__init__(message)
self.errorCode = code
|
StarcoderdataPython
|
1637269
|
# Make a program that reads any angle and shows on the screen the value of the sine, cosine and tangent of that angle
from math import radians, sin, cos, tan
a = float(input('Enter a angle: '))
s = sin(radians(a))
c = cos(radians(a))
t = tan(radians(a))
print('The SINE of this angle is {:.2f} \nThe COSINE of this angle is {:.2f} \nThe TANGENT os this angle is {:.2f}'
.format(s, c, t))
|
StarcoderdataPython
|
1748993
|
#!/usr/bin/env python
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import json
import random
import h5py
import numpy as np
class GenerateSampleCoords(object): # pylint: disable=too-few-public-methods
"""
Generate a sample 3D models file if one does not exist
"""
@staticmethod
def main(): # pylint: disable=too-many-locals,too-many-statements
"""
Main Function
"""
resolution = 1000
clusters = [0, 1, 2, 3, 4, 5, 6]
clusters_hierarchy = [[0, 1, 2], [3, 4], [5], [6]]
centroids = [1, 10, 100, 150, 200]
chromosomes = ['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'X']
for uuid in range(10):
# Create the HDF5 file
filename = os.path.join(os.path.dirname(__file__), "../tests/data/sample_coords.hdf5")
hdf5_handle = h5py.File(filename, "a")
if str(resolution) in hdf5_handle:
grp = hdf5_handle[str(resolution)]
dset = grp['data']
meta = grp['meta']
mpgrp = meta['model_params']
clustersgrp = meta['clusters']
centroidsgrp = meta['centroids']
else:
# Create the initial dataset with minimum values
grp = hdf5_handle.create_group(str(resolution))
meta = grp.create_group('meta')
mpgrp = meta.create_group('model_params')
clustersgrp = meta.create_group('clusters')
centroidsgrp = meta.create_group('centroids')
dset = grp.create_dataset(
'data',
(1, 1000, 3),
maxshape=(None, 1000, 3),
dtype='int32',
chunks=True,
compression="gzip"
)
dset.attrs['title'] = 'title'
dset.attrs['experimentType'] = 'experimentType'
dset.attrs['species'] = 'species'
dset.attrs['project'] = 'project'
dset.attrs['identifier'] = 'identifier'
dset.attrs['assembly'] = 'assembly'
dset.attrs['cellType'] = 'cellType'
dset.attrs['resolution'] = 'resolution'
dset.attrs['datatype'] = 'datatype'
dset.attrs['components'] = 'components'
dset.attrs['source'] = 'source'
dset.attrs['dependencies'] = json.dumps({'test': 'test'})
clustergrps = clustersgrp.create_group(str(uuid))
ch_size = len(clusters_hierarchy)
for cluster in range(ch_size):
clustergrps.create_dataset(
str(cluster),
data=clusters_hierarchy[cluster],
chunks=True,
compression="gzip"
)
centroidsgrp.create_dataset(
str(uuid),
data=centroids,
chunks=True,
compression="gzip"
)
current_size = len(dset)
if current_size == 1:
current_size = 0
model_size = random.randint(500, 2000)
dset.resize((current_size + model_size, 1000, 3))
dnp = np.zeros([model_size, 1000, 3], dtype='int32')
model_param = []
for ref in range(1000):
cluster_id = random.choice(clusters)
model_param.append([ref, cluster_id])
for pos in range(model_size):
x_pos = random.randint(-1000, 1000)
y_pos = random.randint(-1000, 1000)
z_pos = random.randint(-1000, 1000)
dnp[pos][ref] = [x_pos, y_pos, z_pos]
start = random.randint(1, 30000000)
end = start + random.randint(5000, 100000)
model_param_ds = mpgrp.create_dataset(str(uuid), data=model_param)
model_param_ds.attrs['i'] = current_size
model_param_ds.attrs['j'] = current_size + model_size
model_param_ds.attrs['chromosome'] = random.choice(chromosomes)
model_param_ds.attrs['start'] = start
model_param_ds.attrs['end'] = end
dset[current_size:current_size + model_size, 0:1000, 0:3] += dnp
hdf5_handle.close()
if __name__ == '__main__':
GSC = GenerateSampleCoords()
GSC.main()
|
StarcoderdataPython
|
4198
|
<gh_stars>1000+
"""Implementation of Rule L024."""
from sqlfluff.core.rules.doc_decorators import document_fix_compatible
from sqlfluff.rules.L023 import Rule_L023
@document_fix_compatible
class Rule_L024(Rule_L023):
"""Single whitespace expected after USING in JOIN clause.
| **Anti-pattern**
.. code-block:: sql
SELECT b
FROM foo
LEFT JOIN zoo USING(a)
| **Best practice**
| The • character represents a space.
| Add a space after USING, to avoid confusing it
| for a function.
.. code-block:: sql
:force:
SELECT b
FROM foo
LEFT JOIN zoo USING•(a)
"""
expected_mother_segment_type = "join_clause"
pre_segment_identifier = ("name", "using")
post_segment_identifier = ("type", "bracketed")
expand_children = None
allow_newline = True
|
StarcoderdataPython
|
152777
|
import os
from nmapy.classification import *
if __name__ == "__main__":
in_image = "/mnt/GATES/UserDirs/4ja/data/johannesburg_cw_wv2_000024000_000078000_00114.tif"
out = None
block = "30 meters"
model_dir = "/mnt/GATES/UserDirs/4ja/models"
classifier_file = os.path.join(model_dir, "svm_model.pkl")
scaler_file = os.path.join(model_dir, "data_scaler.pkl")
feature_file = os.path.join(model_dir, "feature_params.pkl")
sequentials = []
twos = []
fours = []
sixes = []
eights = []
tens = []
twelves = []
times = [sequentials, twos, fours, sixes, eights, tens, twelves]
cpus = [-1, 2, 4, 6, 8, 10, 12]
total_trials = 30
results_file = open("/mnt/GATES/UserDirs/4ja/experiments/speedup.txt", "w")
header_str = "t0"
for n in range(1, total_trials):
header_str += ", t" + str(n)
results_file.write(header_str + "\n")
t = 0
while t < len(times):
num_trials = 0
while num_trials < total_trials:
print("CPUs: " + str(cpus[t]) + " trial: " + str(num_trials))
_, tot_time = classify.classify_pixels(in_image,
out,
block,
classifier_file,
scaler_file,
feature_file,
tile_size=None,
n_data_chunks=cpus[t],
n_jobs=cpus[t])
results_file.write(str(tot_time) + ",")
times[t].append(tot_time)
num_trials+=1
results_file.write("\n")
t+=1
results_file.close()
for r in times:
print(r)
print()
|
StarcoderdataPython
|
3314220
|
<reponame>isaacmorneau/3-CPO<filename>c3po/output.py
from __future__ import print_function
verbose = True
#so you can globally disable all prints
def vprint(*args, **kwargs):
if verbose:
print(*args, **kwargs)
|
StarcoderdataPython
|
1664575
|
entity_uri = '/ngsi-ld/v1/entities/'
header={'content-type': 'application/ld+json', 'Accept-Charset': 'UTF-8'}
subscribe_uri='/ngsi10/subscribeContext'
context_url="https://forge.etsi.org/gitlab/NGSI-LD/NGSI-LD/raw/master/coreContext/ngsi-ld-core-context.jsonld"
brand_url="http://example.org/"
id_value="urn:ngsi-ld:"
update_uri='/ngsi10/updateContext'
create_status=201
update_status=204
internal_server_error=500
resource_not_found=404
multistatus=207
fog_header={'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
|
StarcoderdataPython
|
1797173
|
'''
Module containing the online game class called mayhem
Written by <NAME>
'''
import pygame as pg
import numpy as np
import pickle
import time
import itertools
from multiprocessing import Pipe, Process
from importlib import reload
import user_settings as cng
from states import state_config as state_cng
from instances import instance_config as game_cng
from states import state
from states import game_state
from classes import spaceship
from classes import projectile
from classes import maps
from images import images as imgs
from instances import game_instances as game_i
from instances import ui_instances as ui_i
from server import network as net
FRAME_TIME = 1/cng.fps
COLORS = pg.colordict.THECOLORS
def handshake(bridge, ship):
'''
Initialize handshake with server
This function returns the client_id (int)
'''
out_data = pickle.dumps(ship)
bridge.send(out_data)
client_id = pickle.loads(bridge.recv(state_cng.recv_size))
return client_id
def server_comm_protocol(bridge, pipe, ship, bullets):
'''
Stands for server communication protocoll. This function handles the
client-server communication, and is meant to be run by a parallell process to
reduce in-game stuttering.
'''
# TODO: Contemplate buffersize, should at least be 16kb
recv_size = 1024*32
bridge.client_socket.settimeout(5.0)
while True:
try:
kill_signal, ship, bullets = pipe.recv()
if(kill_signal):
pipe.close()
return
data_to_send = pickle.dumps([ship, bullets])
bridge.send(data_to_send)
try:
all_ships, all_bullets, planets, asteroids = pickle.loads(bridge.recv(state_cng.recv_size))
except:
pass
all_bullets = list(itertools.chain.from_iterable(all_bullets))
pipe.send([all_ships, all_bullets, planets, asteroids])
except:
print("Client comm process terminated")
pipe.send([0,0,0,0])
pipe.close()
return
class mayhem(game_state.mayhem):
'''
Multiplayer (LAN) mayhem class, inherits the single-player mayhem class
and is meant to be a part of a state machine.
'''
def __init__(self, MANAGER, WINDOW):
super().__init__(MANAGER, WINDOW)
def init_attributes(self):
'''Helper method for initializing attributes, used for soft-reloading'''
self.planets = []
self.asteroids = []
self.all_ships = []
self.bullets = []
self.ship = game_i.ship
self.camera = np.array([self.w_shape[0], self.w_shape[1]])/2 - self.ship.pos
self.show_refuel = False
self.show_loading_ammo = False
def update_graphics(self):
'''
Blits and flips
The objects does not contain any surface objects to be blitted and therefore
needs to be given which image to blit. This is because the the objects should
be lightweight (and you can't pickle surfaces anyways) in order to be sent
to a server during an online session.
'''
self.WINDOW.fill(self.bg_color)
self.WINDOW.blit(
imgs.bg_img,
self.camera
)
self.planets[0].draw(self.WINDOW, self.camera, imgs.earth_img)
self.planets[1].draw(self.WINDOW, self.camera, imgs.venus_img)
game_i.sun.draw(self.WINDOW, self.camera, imgs.sun_img)
for ship in self.all_ships:
ship.draw(self.WINDOW, self.camera, imgs.enemyship_img)
for asteroid, img in zip(self.asteroids, imgs.asteroid_imgs):
asteroid.draw(self.WINDOW, self.camera, img)
for bullet in self.all_bullets:
bullet.draw(self.WINDOW, self.camera, imgs.bullet_img)
self.ship.draw(
self.WINDOW,
self.camera,
imgs.ship_img
)
self.minimap.draw(
WINDOW=self.WINDOW,
colors=game_i.minimap_colors_online,
sizes=game_i.minimap_sizes_online,
bullets=self.all_bullets,
sun=[game_i.sun],
celestials=self.planets,
asteroids=self.asteroids,
others=self.all_ships,
)
self.minimap.draw_player(
self.WINDOW,
game_i.ship,
2
)
for text in ui_i.indicators:
text.draw(self.WINDOW)
pg.display.update()
def logic(self):
'''
Method for handling logic
'''
self.ship.update(self)
for bullet in self.bullets:
bullet.update(self)
bullet.interact_ship(self.all_ships, self.bullets)
def parallell_comm_protocol(self, pipe):
'''
Stands for parallell communication protocoll, and is the
function that is meant to be called from the main process to communicate
with the parallell process via a pipe.
'''
self.all_ships, self.all_bullets, self.planets, self.asteroids = pipe.recv()
if self.all_ships == 0:
return 0
del(self.all_ships[self.client_id])
self.all_ships = list(self.all_ships.values())
return 1
def run(self):
'''
The "main" loop
'''
self.socket = net.Network()
# If socket fails to connect to server
if not self.socket.connect():
self._active = False
return self.MANAGER.get_state('main_menu')
state_pipe, comm_pipe = Pipe()
self.client_id = handshake(self.socket, self.ship)
print(f"Client id = {self.client_id}")
# Run server communication protocol in separate process
p = Process(target=server_comm_protocol, args=(self.socket, comm_pipe, self.ship, self.bullets))
p.start()
while(self._active):
state_pipe.send((0, self.ship, self.bullets))
if not self.parallell_comm_protocol(state_pipe):
self._active = False
self.next_state = self.MANAGER.get_state('main_menu')
break
self.dt = self.clock.tick(cng.fps)
# TODO: Check if this works properly for high fps
self.lag_correction = self.dt/self.target_timestep
self.update_graphics()
self.update_user_input()
self.logic()
# This needs to be below update graphics for some reason
self.animations()
# Terminate parallell process by telling it to kill itself
state_pipe.send((1, None, None))
self.socket.client_socket.close()
p.join()
# p.close happens automatically during garbage collection, and using p.close raises attribute error for some computers
# p.close()
return self.next_state
|
StarcoderdataPython
|
3294514
|
import filecmp
import os
import tempfile
import unittest
import sbol3
import tyto
from sbol_utilities.component import contained_components, contains, add_feature, add_interaction, constitutive, \
regulate, order, in_role, all_in_role, ensure_singleton_feature
from sbol_utilities.component import dna_component_with_sequence, rna_component_with_sequence, \
protein_component_with_sequence, media, functional_component, promoter, rbs, cds, terminator, \
protein_stability_element, gene, operator, engineered_region, mrna, transcription_factor, \
strain, ed_simple_chemical, ed_protein
from sbol_utilities.sbol_diff import doc_diff
class TestComponent(unittest.TestCase):
def test_system_building(self):
doc = sbol3.Document()
sbol3.set_namespace('http://sbolstandard.org/testfiles')
system = sbol3.Component('system', sbol3.SBO_FUNCTIONAL_ENTITY)
doc.add(system)
# make a couple of stand-alone components
gfp_cds = sbol3.Component('gfp_cds', sbol3.SBO_DNA, roles=[tyto.SO.CDS])
doc.add(gfp_cds)
# make a functional unit
expression = add_feature(system, sbol3.LocalSubComponent([sbol3.SBO_DNA], roles=[tyto.SO.engineered_region]))
contains(expression, gfp_cds)
rbs = contains(expression, sbol3.LocalSubComponent([sbol3.SBO_DNA], roles=[tyto.SO.ribosome_entry_site]))
regulate(rbs, gfp_cds)
terminator = contains(expression, sbol3.LocalSubComponent([sbol3.SBO_DNA], roles=[tyto.SO.terminator]))
order(gfp_cds, terminator)
constitutive(expression)
# link it to a product
gfp_mut3_ncbi = 'https://www.ncbi.nlm.nih.gov/protein/AAB18957.1'
gfp = add_feature(system, sbol3.ExternallyDefined([sbol3.SBO_PROTEIN], gfp_mut3_ncbi))
prod = add_interaction(sbol3.SBO_GENETIC_PRODUCTION,
participants={gfp: sbol3.SBO_PRODUCT, gfp_cds: sbol3.SBO_TEMPLATE})
assert contained_components(system) == {system, gfp_cds}
assert in_role(prod, sbol3.SBO_PRODUCT) == gfp
assert all_in_role(prod, sbol3.SBO_TEMPLATE) == [ensure_singleton_feature(system, gfp_cds)]
# confirm that the system constructed is exactly as expected
tmp_out = tempfile.mkstemp(suffix='.nt')[1]
doc.write(tmp_out, sbol3.SORTED_NTRIPLES)
test_dir = os.path.dirname(os.path.realpath(__file__))
comparison_file = os.path.join(test_dir, 'test_files', 'component_construction.nt')
assert filecmp.cmp(tmp_out, comparison_file), f'Converted file {tmp_out} is not identical'
def test_high_level_constructors(self):
hlc_doc = sbol3.Document()
doc = sbol3.Document()
sbol3.set_namespace('http://sbolstandard.org/testfiles')
dna_identity = 'dna_component_with_sequence'
dna_sequence = 'ttt'
test_description = 'test'
hlc_dna_comp, hlc_dna_seq = dna_component_with_sequence(dna_identity, dna_sequence, description=test_description)
dna_seq = sbol3.Sequence(f'{dna_identity}_seq',
elements=dna_sequence,
encoding=sbol3.IUPAC_DNA_ENCODING)
dna_comp = sbol3.Component(dna_identity, sbol3.SBO_DNA,
sequences=[dna_seq],
description=test_description)
hlc_doc.add([hlc_dna_comp, hlc_dna_seq])
doc.add([dna_comp, dna_seq])
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {dna_identity}'
rna_identity = 'rna_component_with_sequence'
rna_sequence = 'uuu'
hlc_rna_comp, hlc_rna_seq = rna_component_with_sequence(rna_identity, rna_sequence, description=test_description)
rna_seq = sbol3.Sequence(f'{rna_identity}_seq',
elements=rna_sequence,
encoding=sbol3.IUPAC_RNA_ENCODING)
rna_comp = sbol3.Component(rna_identity, sbol3.SBO_RNA,
sequences=[rna_seq],
description=test_description)
hlc_doc.add([hlc_rna_comp, hlc_rna_seq])
doc.add([rna_comp, rna_seq])
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {rna_identity}'
pro_identity = 'pro_component_with_sequence'
pro_sequence = 'F'
hlc_pro_comp, hlc_pro_seq = protein_component_with_sequence(pro_identity, pro_sequence, description=test_description)
pro_seq = sbol3.Sequence(f'{pro_identity}_seq',
elements=pro_sequence,
encoding=sbol3.IUPAC_PROTEIN_ENCODING)
pro_comp = sbol3.Component(pro_identity, sbol3.SBO_PROTEIN,
sequences =[pro_seq],
description=test_description)
hlc_doc.add([hlc_pro_comp, hlc_pro_seq])
doc.add([pro_comp, pro_seq])
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {pro_identity}'
fun_identity = 'fun_component_with_sequence'
hlc_fun_comp = functional_component(fun_identity, description=test_description)
fun_comp = sbol3.Component(fun_identity, sbol3.SBO_FUNCTIONAL_ENTITY, description=test_description)
hlc_doc.add(hlc_fun_comp)
doc.add(fun_comp)
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {fun_identity}'
pro_identity = 'promoter'
hlc_pro_comp, hlc_pro_seq = promoter(pro_identity, dna_sequence, description=test_description)
promoter_comp, promoter_seq = dna_component_with_sequence(pro_identity, dna_sequence, description=test_description)
promoter_comp.roles.append(sbol3.SO_PROMOTER)
hlc_doc.add([hlc_pro_comp, hlc_pro_seq])
doc.add([promoter_comp, promoter_seq])
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {pro_identity}'
rbs_identity = 'rbs'
hlc_rbs_comp, hlc_rbs_seq = rbs(rbs_identity, dna_sequence, description=test_description)
rbs_comp, rbs_seq = dna_component_with_sequence(rbs_identity, dna_sequence, description=test_description)
rbs_comp.roles. append(sbol3.SO_RBS)
hlc_doc.add([hlc_rbs_comp, hlc_rbs_seq])
doc.add([rbs_comp, rbs_seq])
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {rbs_identity}'
cds_identity = 'cds'
hlc_cds_comp, hlc_cds_seq = cds(cds_identity, dna_sequence, description=test_description)
cds_comp, cds_seq = dna_component_with_sequence(cds_identity, dna_sequence, description=test_description)
cds_comp.roles. append(sbol3.SO_CDS)
hlc_doc.add([hlc_cds_comp, hlc_cds_seq])
doc.add([cds_comp, cds_seq])
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {cds_identity}'
ter_identity = 'terminator'
hlc_ter_comp, hlc_ter_seq = terminator(ter_identity, dna_sequence, description=test_description)
ter_comp, ter_seq = dna_component_with_sequence(ter_identity, dna_sequence, description=test_description)
ter_comp.roles. append(sbol3.SO_TERMINATOR)
hlc_doc.add([hlc_ter_comp, hlc_ter_seq])
doc.add([ter_comp, ter_seq])
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {ter_identity}'
pse_identity = 'protein_stability_element'
hlc_pse_comp, hlc_pse_seq = protein_stability_element(pse_identity, dna_sequence, description=test_description)
pse_comp, pse_seq = dna_component_with_sequence(pse_identity, dna_sequence, description=test_description)
pse_comp.roles. append(tyto.SO.protein_stability_element)
hlc_doc.add([hlc_pse_comp, hlc_pse_seq])
doc.add([pse_comp, pse_seq])
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {pse_identity}'
gene_identity = 'gene'
hlc_gene_comp, hlc_gene_seq = gene(gene_identity, dna_sequence, description=test_description)
gene_comp, gene_seq = dna_component_with_sequence(gene_identity, dna_sequence, description=test_description)
gene_comp.roles. append(sbol3.SO_GENE)
hlc_doc.add([hlc_gene_comp, hlc_gene_seq])
doc.add([gene_comp, gene_seq])
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {gene_identity}'
operator_identity = 'operator'
hlc_ope_comp, hlc_ope_seq = operator(operator_identity, dna_sequence, description=test_description)
ope_comp, ope_seq = dna_component_with_sequence(operator_identity, dna_sequence, description=test_description)
ope_comp.roles. append(sbol3.SO_OPERATOR)
hlc_doc.add([hlc_ope_comp, hlc_ope_seq])
doc.add([ope_comp, ope_seq])
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {operator_identity}'
enr_identity = 'engineered_region'
enr_features = [pro_comp, rbs_comp, cds_comp, ter_comp]
hlc_enr_comp = engineered_region(enr_identity, enr_features, description=test_description)
enr_comp = sbol3.Component(enr_identity, sbol3.SBO_DNA, description=test_description)
enr_comp.roles.append(sbol3.SO_ENGINEERED_REGION)
for to_add in enr_features:
if isinstance(to_add, sbol3.Component):
to_add = sbol3.SubComponent(to_add)
enr_comp.features.append(to_add)
if len(enr_comp.features) > 1:
for i in range(len(enr_comp.features)-1):
enr_comp.constraints = [sbol3.Constraint(sbol3.SBOL_PRECEDES, enr_comp.features[i], enr_comp.features[i+1])]
else: pass
hlc_doc.add(hlc_enr_comp)
doc.add(enr_comp)
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {enr_identity}'
mrna_identity = 'mrna'
hlc_mrna_comp, hlc_mrna_seq = mrna(mrna_identity, rna_sequence, description=test_description)
mrna_comp, mrna_seq = rna_component_with_sequence(mrna_identity, rna_sequence, description=test_description)
mrna_comp.roles. append(sbol3.SO_MRNA)
hlc_doc.add([hlc_mrna_comp, hlc_mrna_seq])
doc.add([mrna_comp, mrna_seq])
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {mrna_identity}'
tf_identity = 'transcription_factor'
hlc_tf_comp, hlc_tf_seq = transcription_factor(tf_identity, rna_sequence, description=test_description)
tf_comp, tf_seq = protein_component_with_sequence(tf_identity, rna_sequence, description=test_description)
tf_comp.roles. append(sbol3.SO_TRANSCRIPTION_FACTOR)
hlc_doc.add([hlc_tf_comp, hlc_tf_seq])
doc.add([tf_comp, tf_seq])
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {tf_identity}'
strain_identity = 'strain'
hlc_strain_comp = strain(strain_identity, description=test_description)
strain_comp = functional_component(strain_identity, description=test_description)
strain_comp.roles.append(tyto.NCIT.Strain)
hlc_doc.add(hlc_strain_comp)
doc.add(strain_comp)
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {strain_identity}'
cds_ed_sch_identity = 'cds_ed_sch_identity'
hlc_cds_ed_sch_comp, _ = cds(cds_ed_sch_identity, dna_sequence, description=test_description)
cds_ed_sch_comp, _ = dna_component_with_sequence(cds_ed_sch_identity, dna_sequence, description=test_description)
cds_ed_sch_comp.roles. append(sbol3.SO_CDS)
ed_sch_identity = 'ed_simple_chemical'
ed_sch_definition = 'http://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:177976'
hlc_ed_sch = ed_simple_chemical(ed_sch_definition, description=test_description)
ed_sch = sbol3.ExternallyDefined([sbol3.SBO_SIMPLE_CHEMICAL], ed_sch_definition, description=test_description)
hlc_cds_ed_sch_comp.features.append(hlc_ed_sch)
cds_ed_sch_comp.features.append(ed_sch)
hlc_doc.add(hlc_cds_ed_sch_comp)
doc.add(cds_ed_sch_comp)
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {ed_sch_identity}'
cds_ed_pro_identity = 'cds_ed_pro_identity'
hlc_cds_ed_pro_comp, _ = cds(cds_ed_pro_identity, dna_sequence, description=test_description)
cds_ed_pro_comp, _ = dna_component_with_sequence(cds_ed_pro_identity, dna_sequence, description=test_description)
cds_ed_pro_comp.roles. append(sbol3.SO_CDS)
ed_pro_identity = 'ed_protein'
ed_pro_definition = 'https://www.uniprot.org/uniprot/P12747'
hlc_ed_pro = ed_protein(ed_pro_identity, description=test_description)
ed_pro = sbol3.ExternallyDefined([sbol3.SBO_PROTEIN], ed_pro_identity, description=test_description)
hlc_cds_ed_pro_comp.features.append(hlc_ed_pro)
cds_ed_pro_comp.features.append(ed_pro)
hlc_doc.add(hlc_cds_ed_pro_comp)
doc.add(cds_ed_pro_comp)
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {ed_pro_identity}'
peptone = sbol3.Component('Bacto_Peptone',
tyto.SBO.functional_entity,
name = 'Bacto_Peptone',
derived_from = ['https://www.thermofisher.com/order/catalog/product/211820'])
nacl = sbol3.Component('NaCl',
tyto.SBO.functional_entity,
name = 'NaCl',
derived_from = ['https://www.sigmaaldrich.com/AU/en/product/sigald/s9888'])
yeast_extract = sbol3.Component('Yeast_Extract',
tyto.SBO.functional_entity,
name = 'Yeast_Extract',
derived_from = ['https://www.thermofisher.com/order/catalog/product/212720'])
recipe = {
peptone:[10,tyto.OM.gram],
nacl:[5,tyto.OM.gram],
yeast_extract:[5,tyto.OM.gram]}
media_identity = 'media'
hlc_media_comp = media(media_identity, recipe, description = test_description)
media_comp = functional_component(media_identity, description = test_description)
media_comp.roles.append(tyto.NCIT.Media)
if recipe:
for key, value in recipe.items():
if isinstance(key, sbol3.Component):
key = sbol3.SubComponent(key)
key.measures.append(sbol3.Measure(value[0], value[1]))
media_comp.features.append(key)
hlc_doc.add(hlc_media_comp)
doc.add(media_comp)
assert doc_diff(doc, hlc_doc) == 0, f'Constructor Error: {media_identity}'
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3301864
|
<reponame>gyeongmoon/CNN-DM<filename>model/memoryModel.py
import time
import torch
import torch.nn as nn
from model import utils
from model import LwFLoss
from torchvision import models
from torch.autograd import Variable
############################################################
# Defining the CNN with Developmental Memory (CNN-DM) model.
# ----------------------------------------------------------
class Model(nn.Module):
def __init__(self, model_name, dataset, num_classes, GB=False, is_WWA=True, is_scale=True, guided_learning=True,
k_init=0.5, memory_test=False, is_fine_tuning=True, pretrained=True, network_name='memoryModel'):
super(Model, self).__init__()
# Set options.
self.GB = GB
self.is_WWA = is_WWA
self.is_scale = is_scale
self.guided_learning = guided_learning
self.memory_test = memory_test
prev_model = eval(model_name)(pretrained=True)
if not is_fine_tuning: # Feature-extraction.
for param in prev_model.parameters():
param.requires_grad = False
# Total number of classifiers.
self.num_classifiers = len(num_classes)
# Define the base model.
self.features = prev_model.features
self.fc6 = nn.Sequential(*list(prev_model.classifier.children())[:3])
self.fc7 = nn.Sequential(*list(prev_model.classifier.children())[3:6])
# self.classifier = nn.Linear(prev_model.classifier._modules['6'].in_features, num_classes).
for i, num_class in enumerate(num_classes):
classifier_name = 'classifier' + str(i)
setattr(self, classifier_name, nn.Linear(prev_model.classifier._modules['6'].in_features + 2048, num_class))
augClassifier_name = 'augClassifier' + str(i)
if GB: # WWA-CNN from "Growing a Brain" paper.
if is_WWA:
setattr(self, augClassifier_name + '_0', nn.Sequential(
nn.Linear(256 * 6 * 6, 1024),
nn.ReLU(inplace=True),
))
setattr(self, augClassifier_name + '_1', nn.Sequential(
nn.Linear(4096 + 1024, 2048),
nn.ReLU(inplace=True),
))
else: # WA-CNN
setattr(self, augClassifier_name, nn.Sequential(
nn.Linear(4096, 2048),
nn.ReLU(inplace=True),
))
else:
if is_WWA: # Both features and fc6.
if 'vgg16' in model_name:
setattr(self, augClassifier_name, nn.Linear(512 * 7 * 7 + 4096, 2048))
else:
setattr(self, augClassifier_name, nn.Linear(256 * 6 * 6 + 4096, 2048))
else: # Only fc6.
setattr(self, augClassifier_name, nn.Linear(4096, 2048))
if is_scale:
scale = torch.randn(4096 + 2048).fill_(20) # scale = [h^k, h^k+]
if guided_learning:
k = k_init
scale[:4096] = scale[:4096] * k
setattr(self, 'scale' + str(i), nn.Parameter(scale.cuda(), requires_grad=True))
# If continual_learning & pretrained & before a new classifier, load the saved model.
if (self.num_classifiers > 1) and pretrained and (i == self.num_classifiers - 2):
if 'imagenet' in dataset[i]:
setattr(self, classifier_name, prev_model.classifier[6])
else:
self.load_model(dataset[0:-1], model_name, network_name)
# Load the saved model.
def load_model(self, dataset, model_name, network_name):
saved_model_name = network_name + '_'
for data_name in dataset:
saved_model_name = saved_model_name + data_name + '_'
if 'vgg16' in model_name: # vgg16 model.
saved_model_name = saved_model_name + 'vgg'
else: # alexnet model.
saved_model_name = saved_model_name + 'model'
checkpoint = torch.load(saved_model_name)
self.load_state_dict(checkpoint['state_dict']) # Containing ['bias', 'weight'].
# Define parameters to be trained.
def params(self, lr, is_fine_tuning=True):
if is_fine_tuning:
if self.num_classifiers > 1:
if self.GB: # From here.
params = [{'params': self.features.parameters(), 'lr': 0.01 * lr},
{'params': self.fc6.parameters(), 'lr': 0.01 * lr},
{'params': self.fc7.parameters(), 'lr': 0.01 * lr}]
for i in range(self.num_classifiers):
if i != self.num_classifiers - 1:
params = params + [{'params': getattr(self, 'classifier' + str(i)).parameters(), 'lr': 0.001 * lr},
{'params': getattr(self, 'augClassifier' + str(i) + '_0').parameters(), 'lr': 0.001 * lr},
{'params': getattr(self, 'augClassifier' + str(i) + '_1').parameters(), 'lr': 0.001 * lr},
{'params': getattr(self, 'scale' + str(i)), 'lr': 0.001 * lr}]
else:
params = params + [{'params': getattr(self, 'classifier' + str(i)).parameters()},
{'params': getattr(self, 'augClassifier' + str(i) + '_0').parameters(), 'lr': 0.001 * lr},
{'params': getattr(self, 'augClassifier' + str(i) + '_1').parameters(), 'lr': 0.001 * lr},
{'params': getattr(self, 'scale' + str(i))}]
else:
params = [{'params': self.features.parameters(), 'lr': 0.01 * lr},
{'params': self.fc6.parameters(), 'lr': 0.01 * lr},
{'params': self.fc7.parameters(), 'lr': 0.05 * lr}]
for i in range(self.num_classifiers):
if i != self.num_classifiers - 1:
params = params + [{'params': getattr(self, 'classifier' + str(i)).parameters(), 'lr': 0.001 * lr},
{'params': getattr(self, 'augClassifier' + str(i)).parameters(), 'lr': 0.001 * lr},
{'params': getattr(self, 'scale' + str(i)), 'lr': 0.001 * lr}]
else:
params = params + [{'params': getattr(self, 'classifier' + str(i)).parameters()},
{'params': getattr(self, 'augClassifier' + str(i)).parameters()},
{'params': getattr(self, 'scale' + str(i))}]
else:
# To train the memory for imagenet..
# if (self.num_classifiers == 1) and 'imagenet' in dataset[0]:
# params = [{'params': getattr(self, 'classifier' + str(0)).parameters()},
# {'params': getattr(self, 'augClassifier' + str(0)).parameters()},
# {'params': getattr(self, 'scale' + str(0))}]
params = self.parameters()
else: # Feature-Extraction.
classifier_name = 'classifier' + str(self.num_classifiers - 1)
params = [{'params': getattr(self, classifier_name).parameters()}]
return params
def forward(self, x):
features = self.features(x)
features = features.view(features.size(0), -1)
fc6 = self.fc6(features)
fc7 = self.fc7(fc6)
outputs, augOutputs = [], []
for i in range(self.num_classifiers):
if self.GB:
if self.is_WWA: # Both features and fc6.
fc6_plus = getattr(self, 'augClassifier' + str(i) + '_0')(features)
fc7_plus = getattr(self, 'augClassifier' + str(i) + '_1')(torch.cat((fc6, fc6_plus), 1))
else: # Only fc6.
fc7_plus = getattr(self, 'augClassifier' + str(i))(fc6)
else:
if self.is_WWA: # Both features and fc6.
fc7_plus = getattr(self, 'augClassifier' + str(i))(torch.cat((features, fc6), 1))
else: # Only fc6.
fc7_plus = getattr(self, 'augClassifier' + str(i))(fc6) # Only fc6.
classifier_name = 'classifier' + str(i)
if self.is_scale:
# ParseNet Normalization.
norm_fc7 = fc7.div(torch.norm(fc7, 2, 1, keepdim=True).expand_as(fc7))
norm_fc7_plus = fc7_plus.div(torch.norm(fc7_plus, 2, 1, keepdim=True).expand_as(fc7_plus))
output = getattr(self, classifier_name)(torch.cat(
(getattr(self, 'scale' + str(i))[:4096].expand_as(norm_fc7) * norm_fc7,
getattr(self, 'scale' + str(i))[4096:].expand_as(norm_fc7_plus) * norm_fc7_plus), 1))
zero_inputs = Variable(torch.zeros(norm_fc7.size()).cuda(), requires_grad=False)
augOutput = getattr(self, classifier_name)(torch.cat(
(zero_inputs, getattr(self, 'scale' + str(i))[4096:].expand_as(norm_fc7_plus) * norm_fc7_plus), 1))
augOutputs = augOutputs + [augOutput]
else:
output = getattr(self, classifier_name)(torch.cat((fc7, fc7_plus), 1))
outputs = outputs + [output]
return outputs, augOutputs
#####################
# Training the model.
def train_model(model, optimizer, scheduler, start_epoch, num_epochs, dataloaders, dataset_sizes, ld=0.02, zeta=1):
# Define dataloader & dataset_size
dataloader, dataset_size = dataloaders[model.num_classifiers-1], dataset_sizes[model.num_classifiers-1]
# Define Criterion for loss.
criterion = nn.CrossEntropyLoss()
LwF_criterion = LwFLoss.LwFLoss() # LwF_Loss
# Gen_output for LwFLoss.
prev_labels = {}
if model.num_classifiers > 1:
prev_labels = utils.gen_output(model, dataloader, prev_labels, network_name='memoryModel')
best_model_wts = model.state_dict()
torch.save({'model': best_model_wts}, 'curr_best_model_wts')
best_loss = 0.0
best_acc = 0.0
since = time.time()
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(start_epoch + epoch, start_epoch + num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'test']:
if phase == 'train':
scheduler.step()
model.train(True) # Set model to training mode
else:
model.train(False) # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for i, data in enumerate(dataloader[phase]):
# get the inputs
inputs, labels, _ = data
# wrap them in Variable
if torch.cuda.is_available():
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs, augOutputs = model(inputs)
_, preds = torch.max(outputs[-1].data, 1) # You can use "topk" function.
if phase == 'train':
LwF_Loss = 0
for k in range(model.num_classifiers - 1):
# wrap prev_labels in Variable for out of memory.
if torch.cuda.is_available():
prev_labels_i = Variable(prev_labels[k][i].cuda())
else:
prev_labels_i = prev_labels[k][i]
LwF_Loss = LwF_Loss + LwF_criterion(outputs[k], prev_labels_i)
# CrossEntropyLoss + Knowledge Distillation Loss.
if model.guided_learning:
loss = criterion(outputs[-1], labels) + zeta * criterion(augOutputs[-1], labels) + ld * LwF_Loss
else:
loss = criterion(outputs[-1], labels) + ld * LwF_Loss
else:
loss = criterion(outputs[-1], labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item()
running_corrects += torch.sum(preds == labels.data).item()
epoch_loss = running_loss / dataset_size[phase]
epoch_acc = running_corrects / dataset_size[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'test' and epoch_acc > best_acc:
best_loss = epoch_loss
best_acc = epoch_acc
best_model_wts = model.state_dict()
torch.save({'model': best_model_wts}, 'curr_best_model_wts')
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best test Loss: {:4f} Acc: {:4f}'.format(best_loss, best_acc)) # mems
# load the best model.
checkpoint = torch.load('curr_best_model_wts')
model.load_state_dict(checkpoint['model'])
return model
#################
# Test the model.
def test_model(model, dataloaders, dataset_sizes, num_task):
# Define dataloader & dataset_size
dataloader, dataset_size = dataloaders[num_task], dataset_sizes[num_task]
# Define Criterion for loss.
criterion = nn.CrossEntropyLoss()
model.train(False)
running_loss = 0.0
running_corrects = 0
for i, data in enumerate(dataloader['test']):
inputs, labels, _ = data
if torch.cuda.is_available():
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# forward
outputs, augOutputs = model(inputs)
if model.memory_test:
output = augOutputs[num_task]
else:
output = outputs[num_task]
_, preds = torch.max(output.data, 1) # To check Ac (Accuracy of model).
loss = criterion(output, labels)
# statistics
running_loss += loss.item()
running_corrects += torch.sum(preds == labels.data).item()
epoch_loss = running_loss / dataset_size['test']
epoch_acc = running_corrects / dataset_size['test']
print('Test Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc))
|
StarcoderdataPython
|
1764086
|
# vim:fileencoding=utf-8:noet
""" python function """
# Copyright (c) 2010 - 2019, © Badassops LLC / <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*
#* File : bao_create.py
#* Description : python class to create or delete a VPC and it components
#* Author : <NAME> <<EMAIL>>
#* Version : 0.2
#* Date : Feb 21, 2019
#*
#* History :
#* Date: Author: Info:
#* Jun 1, 2010 LIS First Release
#* Mar 30, 2019 LIS refactored
import sys
from time import strftime
from bao_network import set_network_config, get_cidr
from bao_vpc import AwsVPC
from bao_eip import AwsEIP
from bao_subnet import AwsSubnet
from bao_internet_gateway import AwsInternetGateway
from bao_nat_gateway import AwsNatGateway
from bao_route_table import AwsRouteTable
class BaoCreate():
""" Class to create or delete a VPC and it components
"""
def __init__(self, **kwargs):
""" initial the object """
self.aws_conn = kwargs.get('aws_conn', {})
self.net_config = kwargs.get('net_config', {})
self.tag = kwargs.get('tag', {})
self.ipv6 = kwargs.get('ipv6', False)
self.vpc_conn = None
self.vpc_id = None
self.vpc_route_table = None
self.subnet_conn = None
self.subnets_fe_id = []
self.subnets_be_id = {}
self.int_gate_id = None
self.nat_gate_info = {}
def _create_vpc(self):
""" create the VPC and update the network configuration with the ipv6 detail """
# for create we only need to know the CIDR block first
vpc_cidr = get_cidr(dc_cfg=self.net_config)
self.vpc_conn = AwsVPC(aws_conn=self.aws_conn, tag=self.tag, cidr=vpc_cidr)
if not self.vpc_conn:
print('error AwsVPC\n')
sys.exit(-1)
# there should be no ID yet
if self.vpc_conn.get_vpc_id() is not None:
print('There is already a VPC with the given tag: {}, aborted.'.format(self.tag))
sys.exit(-1)
print('\t--< Start creating the VPC: {} >--'.format(strftime("%c")))
if self.vpc_conn.create() is None:
print('error creating the VPC\n')
sys.exit(-1)
# get the vpc id and ipv6 details and update the net_config dict
self.vpc_id = self.vpc_conn.get_vpc_id()
networkv6, cidrv6 = self.vpc_conn.get_vpc_cidr(ip_type='v6', split_cidr=True)
self.net_config.update(set_network_config(dc_cfg=self.net_config, \
dc_cidr_v6=cidrv6, dc_network_v6=networkv6))
# get the main route table
self.vpc_route_table = self.vpc_conn.get_main_route_table()
if self.vpc_route_table is None:
print('error getting main route of the VPC\n')
sys.exit(-1)
def _create_subnets(self):
""" create the subnets and keep list of their id """
print('\t--< create the VPC\'s subnet >--')
self.subnet_conn = AwsSubnet(aws_conn=self.aws_conn, tag=self.tag, vpc_id=self.vpc_id)
if not self.subnet_conn:
print('error AwsSubnet\n')
sys.exit(-1)
subnet_position = 0
for _ in self.net_config['vpc_fe_subnets']:
subnet_id = self.subnet_conn.create_subnet(
zone_name=self.net_config['dc_zones_names'][subnet_position], \
subnet_cidr=self.net_config['vpc_fe_subnets'][subnet_position], \
subnet_cidrv6=self.net_config['vpc_fe_subnets_v6'][subnet_position], \
subnet_type='fe',
ipv6=self.ipv6
)
if subnet_id is None:
sys.exit(-1)
subnet_position += 1
self.subnets_fe_id.append(subnet_id)
subnet_position = 0
for _ in self.net_config['vpc_be_subnets']:
subnet_id = self.subnet_conn.create_subnet(
zone_name=self.net_config['dc_zones_names'][subnet_position], \
subnet_cidr=self.net_config['vpc_be_subnets'][subnet_position], \
subnet_cidrv6=self.net_config['vpc_be_subnets_v6'][subnet_position], \
subnet_type='be',
ipv6=self.ipv6
)
if subnet_id is None:
sys.exit(-1)
self.subnets_be_id[self.net_config['dc_zones_names'][subnet_position]] = {'subnet_id': subnet_id}
subnet_position += 1
def _create_internet_gateway(self):
""" create the internet gateway and attach to VPC """
print('\t--< create the internet gateway and attach to the VPC >--')
int_gate_conn = AwsInternetGateway(aws_conn=self.aws_conn, tag=self.tag, vpc_id=self.vpc_id)
if not int_gate_conn:
print('error AwsInternetGateway\n')
sys.exit(-1)
self.int_gate_id = int_gate_conn.create_internet_gateway()
if self.int_gate_id is None:
sys.exit(-1)
result = int_gate_conn.attach_internet_gateway()
if result is None:
sys.exit(-1)
def _create_nat_gateways(self):
""" create the NAT gateways and attach one to each fe-subnet with it own EIP """
# get the subnet ids
subnet_data_fe, _, _ = self.subnet_conn.get_subnet_info(fe_subnet=self.net_config['vpc_fe_subnets'], \
be_subnet=self.net_config['vpc_be_subnets'])
print('\t--< create the NAT gateway and attach to each fe-subnet with it own EIP >--')
nat_gate_conn = AwsNatGateway(aws_conn=self.aws_conn, tag=self.tag)
if not nat_gate_conn:
print('error nat_gate_conn\n')
sys.exit(-1)
eip_conn = AwsEIP(aws_conn=self.aws_conn)
if not eip_conn:
print('error AwsEIP\n')
sys.exit(-1)
for subnet_id in subnet_data_fe:
zone_name = subnet_data_fe[subnet_id]['zone_name']
eip_id = eip_conn.create_eip(tag=self.tag + '-' + 'nat_gate' + '-' + zone_name)
if eip_id is None:
sys.exit(-1)
nat_gateway_id = nat_gate_conn.create_nat_gateway(eip_id=eip_id, subnet_id=subnet_id, \
tag=self.tag + '-' + zone_name)
if nat_gateway_id is None:
sys.exit(-1)
self.nat_gate_info[zone_name] = nat_gateway_id
def _create_routes(self):
"""
create the route for the fe-subnets
create the route for the be-subnets, each subnet get it own route and own NAT gateway
"""
print('\t--< create the route for the fe-subnets >--')
route_conn = AwsRouteTable(aws_conn=self.aws_conn, vpc_id=self.vpc_id, tag=self.tag)
if not route_conn:
print('error AwsRouteTable\n')
sys.exit(-1)
if route_conn.create_fe_route_table(subnets_id=self.subnets_fe_id, \
internet_gateway=self.int_gate_id, main_route_table=self.vpc_route_table) is False:
sys.exit(1)
print('\t--< create the route for the be-subnets, 1 route per subnet with it own NAT gateway >--')
for subnet in self.subnets_be_id:
zone_name = subnet
subnet_id = self.subnets_be_id[zone_name]['subnet_id']
nat_gate_id = self.nat_gate_info[zone_name]
if route_conn.create_be_route_table(subnet_id=subnet_id, \
nat_gateway=nat_gate_id, zone_name=zone_name) is False:
sys.exit(1)
def create(self):
""" create tge VPC and is components """
# start the creation process
self._create_vpc()
self._create_subnets()
self._create_internet_gateway()
self._create_nat_gateways()
self._create_routes()
def get_vpc_detail(self):
""" get the new vpc detail """
vpc_detail = {}
vpc_detail['vpc_id'] = self.vpc_id
vpc_detail['subnets_fe'] = self.subnets_fe_id
vpc_detail['subnets_be'] = self.subnets_be_id
vpc_detail['vpc_int_gate'] = self.int_gate_id
vpc_detail['vpc_nat_gate'] = self.nat_gate_info
vpc_detail['vpc_route_table'] = self.vpc_route_table
return vpc_detail
|
StarcoderdataPython
|
3244387
|
<reponame>bobo-care/bobo-care<gh_stars>0
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Baby(models.Model):
name = models.CharField(max_length=255)
born = models.DateField()
class Meta:
verbose_name_plural = "babies"
def __str__(self):
return self.name
class Guardian(models.Model):
class GuardianStatus(models.TextChoices):
NEW = 'NEW', 'New'
INVITATION_SENT = 'INVITATION_SENT', 'Invitation sent'
REJECTED = 'REJECTED', 'Rejected'
ACTIVE = 'ACTIVE', 'Active'
owner = models.ForeignKey(User,
on_delete=models.DO_NOTHING,
null=True,
related_name='guardian_owner')
status = models.CharField(max_length=50, choices=GuardianStatus.choices, default='NEW')
email = models.CharField(max_length=255, null=True)
user = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)
baby = models.ForeignKey(Baby, on_delete=models.CASCADE)
def __str__(self):
return self.user.username
class Diaper(models.Model):
list_display = ('baby', 'time')
baby = models.ForeignKey(Baby, on_delete=models.CASCADE)
poop = models.BooleanField()
wet = models.BooleanField()
time = models.DateTimeField()
class Nap(models.Model):
baby = models.ForeignKey(Baby, on_delete=models.CASCADE)
start_time = models.DateTimeField()
end_time = models.DateTimeField(null=True)
class Feed(models.Model):
baby = models.ForeignKey(Baby, on_delete=models.CASCADE)
start_time = models.DateTimeField()
quantity = models.IntegerField(null=True)
unit = models.CharField(max_length=255, null=True)
food_type = models.CharField(max_length=255, null=True)
|
StarcoderdataPython
|
3310124
|
<filename>firmwares/models.py
# Copyright 2016 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models
from users.models import User
MENDER_ARTIFACT = 3
class TargetDevice(models.Model):
full_name = models.CharField(max_length=80)
short_name = models.CharField(max_length=40)
class Distro(models.Model):
full_name = models.CharField(max_length=80)
short_name = models.CharField(max_length=40)
class BuildType(models.Model):
full_name = models.CharField(max_length=80)
class UnknownBuildTypeId(Exception):
"""Exception raised when attempting to get an unknow build type. """
class Firmware(models.Model):
DONE = 'done'
FAILED = 'failed'
BUILDING = 'building'
INITIALIZED = 'initialized'
TAR_GZ = 'tar.gz'
IMG_GZ = 'img.gz'
ART_MENDER = 'mender'
STATUS_CHOICES = (
(DONE, 'Done'),
(FAILED, 'Failed'),
(BUILDING, 'Building'),
(INITIALIZED, 'Initialized')
)
FORMAT_CHOISES = (
(TAR_GZ, 'Old tar.gz'),
(IMG_GZ, 'New img.gz'),
(ART_MENDER, 'Mender artifact')
)
name = models.CharField(max_length=36)
user = models.ForeignKey(User)
status = models.CharField(
max_length=11,
choices=STATUS_CHOICES,
default=DONE,
)
started_at = models.DateTimeField(
auto_now_add=True,
editable=False,
blank=True,
null=True,
)
finished_at = models.DateTimeField(
blank=True,
null=True,
)
log = models.TextField(blank=True)
pro_only = models.BooleanField(
default=False
)
format = models.CharField(
max_length=6,
choices=FORMAT_CHOISES,
default=TAR_GZ,
)
targetdevice = models.ForeignKey(
TargetDevice,
blank=True,
null=True,
)
distro = models.ForeignKey(
Distro,
blank=True,
null=True,
)
build_type = models.ForeignKey(
BuildType,
blank=True,
null=True,
)
notes = models.TextField(
default='',
)
def set_build_type(self, build_type_id):
self.build_type = BuildType.objects.get(pk=build_type_id)
if self.build_type is None:
raise UnknownBuildTypeId('Unknow build_type_id {}'.format(build_type_id))
if self.build_type.id == MENDER_ARTIFACT:
self.format = Firmware.ART_MENDER
else:
self.format = Firmware.IMG_GZ
|
StarcoderdataPython
|
96554
|
from Button import Button
import pygame
class SmallButton(Button):
def __init__(self, position, value, label):
Button.__init__(self, position, value, label)
self.unPressedImage = pygame.transform.smoothscale(self.unPressedImage, (104, 32))
self.pressedImage = pygame.transform.smoothscale(self.pressedImage, (104, 32))
self.collisionArea = self.pressedImage.get_rect(topleft=position)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.