id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3266750
|
<filename>backend/massiliarp/migrations/0001_initial.py
# Generated by Django 3.2.7 on 2021-09-24 14:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CityPopulation',
fields=[
('name', models.CharField(max_length=40, primary_key=True, serialize=False, verbose_name='City name')),
('population', models.PositiveIntegerField(verbose_name='Population')),
],
),
migrations.CreateModel(
name='MassiliaSettings',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('turn', models.PositiveSmallIntegerField(verbose_name='Turn')),
('year', models.PositiveSmallIntegerField(verbose_name='Year')),
('taxation', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Taxation')),
('trade', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Trade')),
('polis_tributes', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Polis tributes')),
('miscellaneous', models.DecimalField(decimal_places=2, max_digits=5, verbose_name='Misc.')),
('garrison_upkeep', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Garrison upkeep')),
('balance', models.DecimalField(decimal_places=2, max_digits=7, verbose_name='Balance')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProfitableBuilding',
fields=[
('name', models.CharField(max_length=30, primary_key=True, serialize=False, verbose_name='Building name')),
('construction_cost', models.DecimalField(decimal_places=2, max_digits=5, verbose_name='Construction cost')),
('building_income', models.DecimalField(decimal_places=2, max_digits=4, verbose_name='Income')),
('settings', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='massiliarp.massiliasettings')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='NavyUnit',
fields=[
('name', models.CharField(max_length=25, primary_key=True, serialize=False, verbose_name='Building name')),
('recruitment_cost', models.DecimalField(decimal_places=2, max_digits=4, verbose_name='Recruitment cost')),
('upkeep_cost', models.DecimalField(decimal_places=2, max_digits=4, verbose_name='Unit upkeep')),
('units_recruited', models.PositiveIntegerField(verbose_name='Units recruited')),
('settings', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='massiliarp.massiliasettings')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MaintainableBuilding',
fields=[
('name', models.CharField(max_length=30, primary_key=True, serialize=False, verbose_name='Building name')),
('construction_cost', models.DecimalField(decimal_places=2, max_digits=5, verbose_name='Construction cost')),
('building_maintenance', models.DecimalField(decimal_places=2, max_digits=4, verbose_name='Maintenance expenses')),
('settings', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='massiliarp.massiliasettings')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BalanceSheet',
fields=[
('turn', models.PositiveSmallIntegerField(primary_key=True, serialize=False)),
('taxation', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Taxation')),
('trade', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Trade')),
('polis_tributes', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Polis tributes')),
('miscellaneous', models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True, verbose_name='Misc.')),
('army_upkeep', models.DecimalField(blank=True, decimal_places=2, max_digits=6, null=True, verbose_name='Army upkeep')),
('navy_upkeep', models.DecimalField(blank=True, decimal_places=2, max_digits=6, null=True, verbose_name='Navy upkeep')),
('garrison_upkeep', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Garrison upkeep')),
('infrastructure_maintenance', models.DecimalField(blank=True, decimal_places=2, max_digits=6, null=True, verbose_name='Infrastructure maintenance')),
('total_income', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True, verbose_name='Total income')),
('total_expenses', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True, verbose_name='Total expenses')),
('new_balance', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True, verbose_name='New balance')),
('archived', models.BooleanField(default=False, verbose_name='Archived')),
('settings', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='massiliarp.massiliasettings')),
],
),
migrations.CreateModel(
name='ArmyUnit',
fields=[
('name', models.CharField(max_length=25, primary_key=True, serialize=False, verbose_name='Building name')),
('recruitment_cost', models.DecimalField(decimal_places=2, max_digits=4, verbose_name='Recruitment cost')),
('upkeep_cost', models.DecimalField(decimal_places=2, max_digits=4, verbose_name='Unit upkeep')),
('units_recruited', models.PositiveIntegerField(verbose_name='Units recruited')),
('raised', models.BooleanField(default=False, verbose_name='Raised')),
('settings', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='massiliarp.massiliasettings')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UniqueEvent',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Event name')),
('event_type', models.CharField(choices=[('I', 'Income'), ('E', 'Expense')], max_length=1, verbose_name='Event type')),
('talents', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Talents')),
('turn', models.PositiveSmallIntegerField(verbose_name='Turn')),
('expired', models.BooleanField(default=False, verbose_name='Expired')),
('balance_sheet', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='massiliarp.balancesheet')),
],
options={
'unique_together': {('name', 'turn')},
},
),
]
|
StarcoderdataPython
|
1773528
|
# coding: utf-8
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# <NAME> <<EMAIL>>,
# <<EMAIL>>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Sequana class to plot a CanvasJS linegraph from an embedded csv file.
"""
from sequana import logger
from sequana.plots.canvasjs_base import CanvasJS
class CanvasJSLineGraph(CanvasJS):
""" Class to create a CanvasJS linegraphe for an HTML page. It creates a
hidden pre section with your CSV. It is necessary embedded because browsers
forbid the reading of data stored locally. Your html page need CanvasJS and
PapaParse.
"""
def __init__(self, csv, html_id, x_column, y_columns):
""".. rubric:: constructor
:param str csv: data as CSV format.
:param str html_id: the ID used in your html. All function
will have this tag.
:param str x_column: column used as axis X for plot.
:param list y_columns: colums used as axis Y for plot.
"""
super().__init__(html_id)
self.csv = csv.strip()
self.html_id = html_id
self.x_column = x_column
self.y_columns = y_columns
# create hidden csv
self.html_cjs = self._create_hidden_csv()
self.html_cjs += '<script type="text/javascript">{0}\n'.format(
self._create_js_csv_parser())
def _create_hidden_csv(self):
""" Return the HTML code and the CSV code for your hidden CSV section.
"""
html = '<pre id="{0}">{1}</pre>'.format(self.html_id, self.csv)
css = '<style>#{0}{{display:none}}</style>'.format(self.html_id)
return '{0}\n{1}\n'.format(html, css)
def _create_js_csv_parser(self):
""" Create the efficient javascript csv parser with PapaParse.
"""
# Create variable name
variable = ["data_{0}".format(name) for name in self.y_columns]
# Create variable for javascript array
init_var = ["var {0} = [];".format(name) for name in variable]
init_var = "\n".join(init_var)
# Fill arrays
fill_array = ["{0}.push({{x: curow.{1}, y: curow.{2}}});".format(v,
self.x_column, n) for n, v in
zip(self.y_columns, variable)]
fill_array = "\n".join(fill_array)
self.variables = ", ".join(variable)
function = """
function processData_{0}(csv) {{
{1}
var curow;
Papa.parse(csv, {{
comments: '#',
delimiter: ',',
header: true,
dynamicTyping: true,
error: function(reason) {{
console.log(reason);
}},
step: function(row){{
curow = row.data[0];
{2}
}},
complete: function(results) {{
drawChart_{0}({3});
}}
}});
}};
""".format(self.html_id, init_var, fill_array, self.variables)
self.data_section = [{'dataPoints': var} for var in variable]
return function
def set_axis_x(self, axis_attr=dict()):
""" Method to configure X axis of the line graph.
:param dict axis_attr: dictionary with canvasjs axisX
Attributes.
axisX: http://canvasjs.com/docs/charts/chart-options/axisx/
Example:
::
line_graph = CanvaJSLineGraph(csv, csvdata)
axisX_section = line_graph.set_x_axis({
'title': 'Title of X data',
'titleFontSize': 16,
'labelFontSize': 12,
'lineColor': '#5BC0DE',
'titleFontColor': '#5BC0DE',
'labelFontColor': '#5BC0DE'})
"""
self._set_axis("axisX", axis_attr)
def set_axis_y(self, axis_attr=dict()):
""" Method to configure first axis Y of the line graph.
:param dict axis_attr: dictionary with canvasjs axisY
Attributes.
axisY: http://canvasjs.com/docs/charts/chart-options/axisy/
Example:
::
line_graph = CanvaJSLineGraph(csv, csvdata)
line_graph.set_y_axis({'title': 'Title of Y data',
'titleFontSize': 16,
'labelFontSize': 12,
'lineColor': '#5BC0DE',
'titleFontColor': '#5BC0DE',
'labelFontColor': '#5BC0DE'})
"""
self._set_axis('axisY', axis_attr)
def set_axis_y2(self, axis_attr=dict()):
""" Method to configure second axis Y of the line graph.
:param dict axis_attr: dictionary with canvasjs axisY
Attributes.
axisY: http://canvasjs.com/docs/charts/chart-options/axisy/
Example:
::
line_graph = CanvaJSLineGraph(csv, csvdata)
axisX_section = line_graph.set_x_axis({
'title': 'Title of Y data',
'titleFontSize': 16,
'labelFontSize': 12,
'lineColor': '#5BC0DE',
'titleFontColor': '#5BC0DE',
'labelFontColor': '#5BC0DE'})
"""
self._set_axis('axisY2', axis_attr)
def create_canvasjs(self):
""" Method to convert all section as javascript function.
Return a string which contains command line to launch generation of plot,
js function to create CanvasJS object and the html div that contains
CanvasJS plot.
"""
js = self.html_cjs
# js function to create the chart container
js_canvas = """
function drawChart_{0}({1}) {{
console.log("Start drawChart");
var chart = new CanvasJS.Chart("chartContainer_{0}",{2}
);
chart.render()
}};
"""
canvas_attr = super().create_canvas_js_object()
js += js_canvas.format(self.html_id, self.variables, canvas_attr)
# js command to run the CanvasJS
js += """
$(document).ready(function(){{
var csv_{0} = document.getElementById('{0}').innerText;
processData_{0}(csv_{0});
}});
</script>
""".format(self.html_id)
js += self.create_div_chart_container("height: 450px; width: 100%;")
return js
|
StarcoderdataPython
|
190608
|
<filename>NC_optical_properties_v_alt_variable_coating.py<gh_stars>1-10
from pymiecoated import Mie
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
import mysql.connector
import math
import matplotlib.pyplot as plt
import matplotlib.colors
import calendar
from scipy.optimize import curve_fit
cloud_droplet_conc = 0.5
wavelength = 550 #nm
rBC_RI = complex(2.26,1.26)
min_coat = 0 #assumed minimum coating thickness for particles with LEO failure or outside of detection range = 0
max_coat = 100 #assumed maximum coating thickness for particles with LEO failure or outside of detection range = 100
savefig = False
show_distr_plots = False
#alt parameters
min_alt = 0
max_alt =6000
alt_incr = 1000#800
#distr parameters
min_BC_VED = 80
max_BC_VED = 220
bin_incr = 10
flight_times = {
#'science 1' : [datetime(2015,4,5,9,43),datetime(2015,4,5,13,48),15.6500, 78.2200, 'Longyearbyen (sc1)'] , #longyearbyen
#'ferry 1' : [datetime(2015,4,6,9,0),datetime(2015,4,6,11,0),15.6500, 78.2200] ,
#'ferry 2' : [datetime(2015,4,6,15,0),datetime(2015,4,6,18,0),-16.6667, 81.6000] ,
'science 2' : [datetime(2015,4,7,16,31),datetime(2015,4,7,20,48),-62.338, 82.5014,'Alert (sc2-5)'] , #Alert
'science 3' : [datetime(2015,4,8,13,51),datetime(2015,4,8,16,43),-62.338, 82.5014,'Alert (sc2-5)'] , #Alert
'science 4' : [datetime(2015,4,8,17,53),datetime(2015,4,8,21,22),-70.338, 82.5014,'Alert (sc2-5)'] , #Alert
'science 5' : [datetime(2015,4,9,13,50),datetime(2015,4,9,17,47),-62.338, 82.0,'Alert (sc2-5)'] , #Alert
##'ferry 3' : [datetime(2015,4,10,14,0),datetime(2015,4,10,17,0),-75.338, 81] ,
'science 6' : [datetime(2015,4,11,15,57),datetime(2015,4,11,21,16),-90.9408, 80.5,'Eureka (sc6-7)'] , #eureka
'science 7' : [datetime(2015,4,13,15,14),datetime(2015,4,13,20,52),-95, 80.1,'Eureka (sc6-7)'] , #eureka
#'science 8' : [datetime(2015,4,20,15,49),datetime(2015,4,20,19,49),-133.7306, 67.1,'Inuvik (sc8-10)'], #inuvik
#'science 9' : [datetime(2015,4,20,21,46),datetime(2015,4,21,1,36),-133.7306, 69.3617,'Inuvik (sc8-10)'] , #inuvik
#'science 10' : [datetime(2015,4,21,16,07),datetime(2015,4,21,21,24),-131, 69.55,'Inuvik (sc8-10)'], #inuvik
}
#database connection
cnx = mysql.connector.connect(user='root', password='<PASSWORD>', host='localhost', database='black_carbon')
cursor = cnx.cursor()
#define methods
def lognorm(x_vals, A, w, xc):
return A/(np.sqrt(2*math.pi)*w*x_vals)*np.exp(-(np.log(x_vals/xc))**2/(2*w**2))
def MieCalc(wavelength,core_dia,coat_th):
mie = Mie()
wl = wavelength
core_rad = core_dia/2 #nm
shell_thickness = coat_th
size_par = 2*math.pi*core_rad*1/wl
#Refractive indices PSL 1.59-0.0i rBC 2.26- 1.26i shell 1.5-0.0i
core_RI = rBC_RI
shell_rad = core_rad + shell_thickness
shell_RI = complex(1.5,0.0)
mie.x = 2*math.pi*core_rad/wl
mie.m = core_RI
mie.y = 2*math.pi*shell_rad/wl
mie.m2 = shell_RI
abs = mie.qabs()
abs_xs_nm2 = abs*math.pi*shell_rad**2 #in nm^2
abs_xs = abs_xs_nm2*1e-14 #in cm^2
sca = mie.qsca()
sca_xs_nm2 = sca*math.pi*shell_rad**2 #in nm^2
sca_xs = sca_xs_nm2*1e-14 #in cm^2
ext_xs = sca_xs+abs_xs
return [abs_xs,sca_xs,ext_xs]
def find_dg(A, w, xc):
fit_vals = {}
for bin_val in range (35,1000,1):
fit_val = lognorm(bin_val, A, w, xc)
fit_vals[bin_val] = fit_val
return max(fit_vals.iterkeys(), key=(lambda key: fit_vals[key]))
def fraction_sampled(A, w, xc):
fit_vals = []
fit_vals_m = []
for bin_val in range (35,1000,1):
fit_val = lognorm(bin_val, A, w, xc)
fit_vals.append(fit_val)
full_distr = np.sum(fit_vals)
for bin_val in range (min_BC_VED,max_BC_VED,1):
fit_val_m = lognorm(bin_val, A, w, xc)
fit_vals_m.append(fit_val_m)
sampled_distr = np.sum(fit_vals_m)
return sampled_distr/full_distr
def sampling_time_at_alt(start_time,end_time,min_alt,max_alt):
cursor.execute(('''SELECT ftd.UNIX_UTC_ts, ftd.alt
FROM polar6_flight_track_details ftd
JOIN polar6_fssp_cloud_data fssp on ftd.fssd_id = fssp.id
WHERE ftd.UNIX_UTC_ts >= %s and ftd.UNIX_UTC_ts < %s and fssp.FSSPTotalConc <=%s and ftd.alt >=%s and ftd.alt < %s ORDER BY ftd.UNIX_UTC_ts'''),
(start_time,end_time,cloud_droplet_conc,min_alt,max_alt))
alt_data = cursor.fetchall()
first_line = True
temp_list = []
interval_list = []
for line in alt_data:
current_ts = line[0]
alt = line[1]
if first_line == True:
prior_ts = current_ts
first_line = False
if (current_ts - prior_ts) <= 1:
temp_list.append(current_ts)
prior_ts = current_ts
else:
time_interval = (temp_list[-1]-temp_list[0]) #in sec
interval_list.append(time_interval)
temp_list = []
temp_list.append(current_ts)
prior_ts = current_ts
#add in last interval
if len(temp_list):
time_interval = (temp_list[-1]-temp_list[0]) #in sec
interval_list.append(time_interval)
total_sampling_time = np.sum(interval_list)
else:
total_sampling_time = 0
return total_sampling_time
def assign_min_max_coat(VED):
Dp_Dc = np.nan
optical_properties_max_coat = MieCalc(wavelength,VED,max_coat)
abs_xsec_max_coat = optical_properties_max_coat[0]
sca_xsec_max_coat = optical_properties_max_coat[1]
optical_properties_min_coat = MieCalc(wavelength,VED,min_coat)
abs_xsec_min_coat = optical_properties_min_coat[0]
sca_xsec_min_coat = optical_properties_min_coat[1]
return [Dp_Dc,abs_xsec_max_coat,sca_xsec_max_coat,abs_xsec_min_coat,sca_xsec_min_coat]
def assemble_bin_data(retrieved_records):
#set up data structure
LEO_successes = 0
LEO_failures = 0
bin_data = {
'mass':[],
'Dp_Dc':[],
'STP_correction_factor':[],
'sample_flow':[],
'abs_xsec_max_coat':[],
'sca_xsec_max_coat':[],
'abs_xsec_min_coat':[],
'sca_xsec_min_coat':[],
'abs_xsec_bare':[],
}
#parse each row in results
for row in retrieved_records:
mass = row[0]
coat = row[1]
LEO_amp = row[2]
sample_flow = row[3]
temperature = row[4] + 273.15 #convert to Kelvin
pressure = row[5]
VED = (((mass/(10**15*1.8))*6/math.pi)**(1/3.0))*10**7
STP_correction_factor = (101325/pressure)*(temperature/273.15)
#succesful LEO fitting and pos coating
if (0 <= LEO_amp < 45000):
LEO_successes += 1
if coat >0:
optical_properties = MieCalc(wavelength,VED,coat)
if 160 <= bin < 180:
Dp_Dc = (VED+2*coat)/VED
else:
Dp_Dc = np.nan
opt_results = [Dp_Dc,optical_properties[0],optical_properties[1],optical_properties[0],optical_properties[1]] #[Dp_Dc,abs_xsec_max_coat,sca_xsec_max_coat,abs_xsec_min_coat,sca_xsec_min_coat]
else:
opt_results = assign_min_max_coat(VED)
#failed LEO fitting or neg coating, we calc a max and min case for these
else:
LEO_failures += 1
opt_results = assign_min_max_coat(VED)
bare_optical_properties = MieCalc(wavelength,VED,0.0)
abs_xsec_bare = bare_optical_properties[0]
bin_data['mass'].append(mass)
bin_data['Dp_Dc'].append(opt_results[0])
bin_data['STP_correction_factor'].append(STP_correction_factor)
bin_data['sample_flow'].append(sample_flow)
bin_data['abs_xsec_max_coat'].append(opt_results[1])
bin_data['sca_xsec_max_coat'].append(opt_results[2])
bin_data['abs_xsec_min_coat'].append(opt_results[3])
bin_data['sca_xsec_min_coat'].append(opt_results[4])
bin_data['abs_xsec_bare'].append(abs_xsec_bare)
bin_data_list = [bin_data,LEO_successes,LEO_failures]
return bin_data_list
def calc_bin_optical_properties(bin_start, binning_incr, bin_data_list,binned_data):
bin_data = bin_data_list[0]
LEO_successes = bin_data_list[1]
LEO_failures = bin_data_list[2]
bin_mid = bin_start + (binning_incr/2)
total_mass = np.sum(bin_data['mass']) #in fg
mean_sample_flow = np.nanmean(bin_data['sample_flow']) #in cm2/min
mean_STP_correction_factor = np.nanmean(bin_data['STP_correction_factor']) #no units
total_samping_time = sampling_time_at_alt(UNIX_start_time,UNIX_end_time,lower_alt,(lower_alt + alt_incr))
total_vol = mean_sample_flow*total_samping_time/60 #factor of 60 to convert minutes to secs, result is in cc
mass_conc = (total_mass/total_vol)*mean_STP_correction_factor #in fg/cm3
numb_conc = (len(bin_data['mass'])/total_vol)*mean_STP_correction_factor #in #/cm3
bin_mass_conc_norm = mass_conc/(math.log((bin_start+binning_incr))-math.log(bin_start)) #normalize mass
bin_numb_conc_norm = numb_conc/(math.log((bin_start+binning_incr))-math.log(bin_start)) #normalize number
mean_Dp_Dc = np.nanmean(bin_data['Dp_Dc'])
bin_vol_abs_coeff_max = np.nanmean(bin_data['abs_xsec_max_coat']) * bin_numb_conc_norm #in cm-1 (cm2 * /cm3)
bin_vol_sca_coeff_max = np.nanmean(bin_data['sca_xsec_max_coat']) * bin_numb_conc_norm #in cm-1
bin_vol_abs_coeff_min = np.nanmean(bin_data['abs_xsec_min_coat']) * bin_numb_conc_norm #in cm-1
bin_vol_sca_coeff_min = np.nanmean(bin_data['sca_xsec_min_coat']) * bin_numb_conc_norm #in cm-1
bin_vol_abs_coeff_bare = np.nanmean(bin_data['abs_xsec_bare']) * bin_numb_conc_norm #in cm-1 - need to calc absorption enhancement
fraction_successful = LEO_successes*1.0/(LEO_successes+LEO_failures)
binned_data[bin_mid] = {
'bin_mass_conc':mass_conc,
'bin_numb_conc':numb_conc,
'bin_Dp_Dc':mean_Dp_Dc,
'bin_mass_conc_norm': bin_mass_conc_norm ,
'bin_numb_conc_norm': bin_numb_conc_norm ,
'bin_vol_abs_coeff_max': bin_vol_abs_coeff_max,
'bin_vol_sca_coeff_max': bin_vol_sca_coeff_max,
'bin_vol_abs_coeff_min': bin_vol_abs_coeff_min,
'bin_vol_sca_coeff_min': bin_vol_sca_coeff_min,
'bin_vol_abs_coeff_bare':bin_vol_abs_coeff_bare,
'fraction_successful': fraction_successful,
}
return binned_data
def fit_distrs(binned_data_dict,bin_increment):
#create bin and step size for extrapolating to the full distr
fit_bins = []
for x in range(50,1000,bin_increment):
fit_bins.append(x)
fit_concs = {}
bin_midpoints = binned_data_dict.keys()
number_concs_norm = []
mass_concs_norm = []
LEO_fractions = []
#fit the number binned data so we can extrapolate outside of the detection range
for key in bin_midpoints:
number_concs_norm.append(binned_data_dict[key]['bin_numb_conc_norm'])
mass_concs_norm.append(binned_data_dict[key]['bin_mass_conc_norm'])
LEO_fractions.append(binned_data_dict[key]['fraction_successful'])
try:
popt, pcov = curve_fit(lognorm, np.array(bin_midpoints), np.array(number_concs_norm))
integrated_number = 0
for bin_val in fit_bins:
fit_number_val = lognorm(bin_val, popt[0], popt[1], popt[2])
fit_concs[bin_val] = [fit_number_val]
un_normed_numb = fit_number_val*(math.log((bin_val+bin_increment/2))-math.log(bin_val-bin_increment/2))
integrated_number = integrated_number + un_normed_numb
Dg_number = find_dg(popt[0], popt[1], popt[2])
sigma_number = math.exp(popt[1])
print Dg_number
except Exception,e:
integrated_number = np.nan
for bin_val in fit_bins:
fit_concs[bin_val]= [np.nan]
print str(e)
print 'number fit failure'
#fit the mass binned data so we can extrapolate outside of the detection range
try:
popt, pcov = curve_fit(lognorm, np.array(bin_midpoints), np.array(mass_concs_norm))
for bin_val in fit_bins:
fit_mass_val = lognorm(bin_val, popt[0], popt[1], popt[2])
fit_concs[bin_val].append(fit_mass_val)
Dg_mass_result = find_dg(popt[0], popt[1], popt[2])
fraction_mass_meas = fraction_sampled(popt[0], popt[1], popt[2])
sigma_mass_result = math.exp(popt[1])
print Dg_mass_result
except Exception,e:
Dg_mass_result = np.nan
sigma_mass_result = np.nan
fraction_mass_meas = np.nan
for bin_val in fit_bins:
fit_concs[bin_val].append(np.nan)
print str(e)
print 'mass fit failure'
fitted_data = []
for key,val in fit_concs.iteritems():
fitted_data.append([key, val[0],val[1]])
fitted_data.sort()
return [fitted_data,Dg_mass_result,sigma_mass_result,fraction_mass_meas,integrated_number]
def plot_distrs(fitted_concs,binned_data_results):
#####plotting distrs if desired
#fitted data
fitted_bin_mids = [row[0] for row in fitted_concs]
fit_binned_number_conc_vals = [row[1] for row in fitted_concs]
fit_binned_mass_conc_vals = [row[2] for row in fitted_concs]
#Leo successful fraction data
LEO_pts = []
binned_distrs = []
for bin_midpt in binned_data_results:
binned_distrs.append([bin_midpt,binned_data_results[bin_midpt]['bin_numb_conc_norm'],binned_data_results[bin_midpt]['bin_mass_conc_norm'],binned_data_results[bin_midpt]['fraction_successful']])
LEO_fraction = binned_data_results[bin_midpt]['fraction_successful']
if LEO_fraction > 0.97:
LEO_pts.append(bin_midpt)
LEO_cutoff = min(LEO_pts or [np.nan])
bin_midpt = [row[0] for row in binned_distrs]
number_concs_norm = [row[1] for row in binned_distrs]
mass_concs_norm = [row[2] for row in binned_distrs]
LEO_fractions = [row[3] for row in binned_distrs]
#plots
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(bin_midpt,number_concs_norm, color = 'k',marker='o')
ax1.plot(fitted_bin_mids,fit_binned_number_conc_vals, color = 'k',marker=None, label = 'number')
ax1.scatter(bin_midpt,mass_concs_norm, color = 'b',marker='o')
ax1.plot(fitted_bin_mids,fit_binned_mass_conc_vals, color = 'b',marker=None, label = 'mass')
ax1.set_xscale('log')
ax1.set_xlabel('rBC core VED (nm)')
ax1.set_ylabel('d/dlog(VED)')
ax1.set_ylim(0,35)
ax1.set_xlim(40,700)
plt.legend()
ax2 = ax1.twinx()
ax2.scatter(bin_midpt,LEO_fractions, color = 'r',marker='s')
ax2.set_ylim(0,1)
ax2.set_xlim(40,700)
ax2.set_ylabel('fraction successful LEO fits', color='r')
ax2.axvspan(min_BC_VED, LEO_cutoff, alpha=0.15, color='yellow')
ax2.axvspan(LEO_cutoff, max_BC_VED, alpha=0.15, color='green')
ax2.fill([160,180,180,160],[0,0,1,1], fill=False, hatch='\\',color ='grey')
ax2.fill([130,220,220,130],[0,0,1,1], fill=False, hatch='//',color ='grey')
ax2.axvspan(35, min_BC_VED, alpha=0.15, color='grey')
ax2.axvspan(max_BC_VED, 1000, alpha=0.15, color='grey')
ax2.set_xticks([40,50,60,80,100,150,200,300,400,600])
ax2.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.show()
def add_vals_outside_range(fit_concs,binned_data_dict):
for row in fit_concs:
bin_mid = row[0]
if bin_mid > max_BC_VED or bin_mid < min_BC_VED:
bin_mass_conc_norm = row[2]
bin_numb_conc_norm = row[1]
fitted_optical_properties_max = MieCalc(wavelength,bin_mid,max_coat) #resturns [abs xsec, sca xsec, ext xsec]
fitted_optical_properties_min = MieCalc(wavelength,bin_mid,min_coat)
fitted_optical_properties_bare = MieCalc(wavelength,bin_mid,0.0)
bin_vol_abs_coeff_max = fitted_optical_properties_max[0] * bin_numb_conc_norm #in cm-1 (cm2 * /cm3)
bin_vol_sca_coeff_max = fitted_optical_properties_max[1] * bin_numb_conc_norm #in cm-1
bin_vol_abs_coeff_min = fitted_optical_properties_min[0] * bin_numb_conc_norm #in cm-1
bin_vol_sca_coeff_min = fitted_optical_properties_min[1] * bin_numb_conc_norm #in cm-1
bin_vol_abs_coeff_bare = fitted_optical_properties_bare[0] * bin_numb_conc_norm #in cm-1
binned_data_dict[bin_mid] = {
'bin_mass_conc': 0,
'bin_Dp_Dc': np.nan,
'bin_mass_conc_norm': bin_mass_conc_norm ,
'bin_numb_conc_norm': bin_numb_conc_norm ,
'bin_vol_abs_coeff_max': bin_vol_abs_coeff_max,
'bin_vol_sca_coeff_max': bin_vol_sca_coeff_max,
'bin_vol_abs_coeff_min': bin_vol_abs_coeff_min,
'bin_vol_sca_coeff_min': bin_vol_sca_coeff_min,
'bin_vol_abs_coeff_bare':bin_vol_abs_coeff_bare,
#'fraction_successful': fraction_successful,
}
return binned_data_dict
#calc optical parameters for each altitude
def calc_opti_params(binned_data_dict,Dg_mass,sigma_mass,plot_data,fraction_measured,BC_frac):
mass_concs_raw_sum = 0
mass_concs_sum = 0
vol_abs_coeff_sum_max = 0
vol_sca_coeff_sum_max = 0
vol_abs_coeff_sum_min = 0
vol_sca_coeff_sum_min = 0
vol_abs_coeff_sum_bare = 0
Dp_Dcs = []
for bin_mid in binned_data_dict: #integrate
Dp_Dcs.append(binned_data_dict[bin_mid]['bin_Dp_Dc'])
mass_concs_raw_sum = mass_concs_raw_sum + binned_data_dict[bin_mid]['bin_mass_conc']
mass_concs_sum = mass_concs_sum + binned_data_dict[bin_mid]['bin_mass_conc_norm']
vol_abs_coeff_sum_max = vol_abs_coeff_sum_max + binned_data_dict[bin_mid]['bin_vol_abs_coeff_max']
vol_sca_coeff_sum_max = vol_sca_coeff_sum_max + binned_data_dict[bin_mid]['bin_vol_sca_coeff_max']
vol_abs_coeff_sum_min = vol_abs_coeff_sum_min + binned_data_dict[bin_mid]['bin_vol_abs_coeff_min']
vol_sca_coeff_sum_min = vol_sca_coeff_sum_min + binned_data_dict[bin_mid]['bin_vol_sca_coeff_min']
vol_abs_coeff_sum_bare = vol_abs_coeff_sum_bare + binned_data_dict[bin_mid]['bin_vol_abs_coeff_bare']
Dp_Dc_mean = np.nanmean(Dp_Dcs)
MAC_max = vol_abs_coeff_sum_max*(10**11)/mass_concs_sum
MAC_min = vol_abs_coeff_sum_min*(10**11)/mass_concs_sum
SSA_max = vol_sca_coeff_sum_max/(vol_abs_coeff_sum_max+vol_sca_coeff_sum_max)
SSA_min = vol_sca_coeff_sum_min/(vol_abs_coeff_sum_min+vol_sca_coeff_sum_min)
AE_max = vol_abs_coeff_sum_max/vol_abs_coeff_sum_bare
AE_min = vol_abs_coeff_sum_min/vol_abs_coeff_sum_bare
mass_conc_total = mass_concs_raw_sum/fraction_measured
#add overall data to dict
mean_alt = lower_alt + alt_incr/2
if mean_alt not in plot_data:
plot_data[mean_alt] = {
'mass_concs' :[],
'Dp_Dcs' :[],
'Dgs_mass' :[],
'numb_frac_w_BC':[],
'sigmas_mass' :[],
'MAC_maxs' :[],
'MAC_mins' :[],
'SSA_maxs' :[],
'SSA_mins' :[],
'AE_maxs' :[],
'AE_mins' :[],
}
plot_data[mean_alt]['Dgs_mass' ].append(Dg_mass)
plot_data[mean_alt]['Dp_Dcs' ].append(Dp_Dc_mean)
plot_data[mean_alt]['sigmas_mass'].append(sigma_mass)
plot_data[mean_alt]['mass_concs'].append(mass_conc_total)
plot_data[mean_alt]['numb_frac_w_BC'].append(BC_frac)
plot_data[mean_alt]['MAC_maxs' ].append(MAC_max)
plot_data[mean_alt]['MAC_mins' ].append(MAC_min)
plot_data[mean_alt]['SSA_maxs' ].append(SSA_max)
plot_data[mean_alt]['SSA_mins' ].append(SSA_min)
plot_data[mean_alt]['AE_maxs' ].append(AE_max)
plot_data[mean_alt]['AE_mins' ].append(AE_min)
return plot_data
##start script
plot_data={}
for flight in flight_times:
print flight
lower_alt = min_alt
start_time = flight_times[flight][0]
end_time = flight_times[flight][1]
UNIX_start_time = calendar.timegm(start_time.utctimetuple())
UNIX_end_time = calendar.timegm(end_time.utctimetuple())
print
while (lower_alt + alt_incr) <= max_alt:
binned_data = {}
print lower_alt, lower_alt + alt_incr
for bin in range(min_BC_VED,max_BC_VED,bin_incr):
#retrieve the data for this bin
cursor.execute(('''SELECT bc.rBC_mass_fg, bc.coat_thickness_nm_jancalib, bc.LF_scat_amp, hk.sample_flow, ftd.temperature_C, ftd.BP_Pa
FROM polar6_coating_2015 bc
JOIN polar6_fssp_cloud_data fssp on bc.fssp_id = fssp.id
JOIN polar6_flight_track_details ftd on bc.flight_track_data_id = ftd.id
JOIN polar6_hk_data_2015 hk on bc.hk_data_id = hk.id
WHERE bc.rBC_mass_fg IS NOT NULL and bc.UNIX_UTC_ts >= %s and bc.UNIX_UTC_ts < %s and bc.particle_type = %s and fssp.FSSPTotalConc <=%s and ftd.alt >=%s and ftd.alt < %s and (POW(bc.rBC_mass_fg,(1/3.0))*101.994391398)>=%s and (POW( bc.rBC_mass_fg,(1/3.0))*101.994391398) <%s and hk.sample_flow >%s and hk.sample_flow <%s ORDER BY bc.UNIX_UTC_ts'''),
(UNIX_start_time,UNIX_end_time,'incand',cloud_droplet_conc,lower_alt, (lower_alt + alt_incr),bin, bin+bin_incr,100,200))
coat_data = cursor.fetchall()
#assemble the data for this bin
bin_data = assemble_bin_data(coat_data)
#calc the overall properties for this bin and add them to the dictionary for this alt
binned_data = calc_bin_optical_properties(bin,bin_incr,bin_data,binned_data)
#for this altitude, fit the mass and number distributions
distr_fit_results = fit_distrs(binned_data,bin_incr)
fit_conc_values = distr_fit_results[0]
Dg_mass = distr_fit_results[1]
sigma_mass = distr_fit_results[2]
fraction_mass_meas = distr_fit_results[3]
integrated_SP2_number = distr_fit_results[4]
if show_distr_plots == True:
plot_distrs(fit_conc_values,binned_data)
#add values from outside dectection range to the binned data
binned_data = add_vals_outside_range(fit_conc_values,binned_data)
#get UHSAS values
cursor.execute(('''SELECT AVG(uh.number_per_sccm)
FROM polar6_uhsas_total_number uh
JOIN polar6_fssp_cloud_data fssp on uh.fssp_id = fssp.id
JOIN polar6_flight_track_details ftd on uh.flight_track_data_id = ftd.id
WHERE uh.UNIX_UTC_ts >= %s and uh.UNIX_UTC_ts < %s and fssp.FSSPTotalConc <=%s and ftd.alt >=%s and ftd.alt < %s'''),
(UNIX_start_time,UNIX_end_time,cloud_droplet_conc,lower_alt, (lower_alt + alt_incr)))
uhsas_data = cursor.fetchall()
uhsas_number_conc = uhsas_data[0][0]
if uhsas_number_conc == None:
uhsas_number_conc = np.nan
numb_frac_w_BC = integrated_SP2_number/uhsas_number_conc
#calculate optical parameters for this altitude and add them to the overall dict
plot_data = calc_opti_params(binned_data,Dg_mass,sigma_mass,plot_data,fraction_mass_meas,numb_frac_w_BC)
lower_alt += alt_incr
cnx.close()
print 'next step . . .'
## make plots
plot_data_list = []
for mean_alt in plot_data:
mean_Dg = np.nanmean(plot_data[mean_alt]['Dgs_mass'])
neg_err_Dg = mean_Dg - np.nanmin(plot_data[mean_alt]['Dgs_mass'])
pos_err_Dg = np.nanmax(plot_data[mean_alt]['Dgs_mass']) - mean_Dg
mean_sigma = np.nanmean(plot_data[mean_alt]['sigmas_mass'])
neg_err_sigma = mean_sigma - np.nanmin(plot_data[mean_alt]['sigmas_mass'])
pos_err_sigma = np.nanmax(plot_data[mean_alt]['sigmas_mass']) - mean_sigma
mean_mass_conc = np.nanmean(plot_data[mean_alt]['mass_concs'])
neg_err_mass_conc = mean_mass_conc - np.nanmin(plot_data[mean_alt]['mass_concs'])
pos_err_mass_conc = np.nanmax(plot_data[mean_alt]['mass_concs']) - mean_mass_conc
Dp_Dc_mean = np.nanmean(plot_data[mean_alt]['Dp_Dcs'])
neg_err_Dp_Dc = Dp_Dc_mean - np.nanmin(plot_data[mean_alt]['Dp_Dcs'])
pos_err_Dp_Dc = np.nanmax(plot_data[mean_alt]['Dp_Dcs']) - Dp_Dc_mean
BC_frac_mean = np.nanmean(plot_data[mean_alt]['numb_frac_w_BC'])
neg_err_BC_frac = BC_frac_mean - np.nanmin(plot_data[mean_alt]['numb_frac_w_BC'])
pos_err_BC_frac = np.nanmax(plot_data[mean_alt]['numb_frac_w_BC']) - BC_frac_mean
mean_MAC_max = np.nanmean(plot_data[mean_alt]['MAC_maxs'])
mean_MAC_min = np.nanmean(plot_data[mean_alt]['MAC_mins'])
mean_SSA_max = np.nanmean(plot_data[mean_alt]['SSA_maxs'])
mean_SSA_min = np.nanmean(plot_data[mean_alt]['SSA_mins'])
mean_abs_e_max = np.nanmean(plot_data[mean_alt]['AE_maxs'])
mean_abs_e_min = np.nanmean(plot_data[mean_alt]['AE_mins'])
plot_data_list.append([mean_alt,mean_Dg,neg_err_Dg,pos_err_Dg,mean_sigma,neg_err_sigma,pos_err_sigma,mean_mass_conc,neg_err_mass_conc,pos_err_mass_conc,mean_MAC_max,mean_MAC_min,mean_SSA_max, mean_SSA_min,mean_abs_e_max,mean_abs_e_min,Dp_Dc_mean,neg_err_Dp_Dc,pos_err_Dp_Dc,BC_frac_mean,neg_err_BC_frac,pos_err_BC_frac])
plot_data_list.sort()
bar_height = 800
altitudes_bar = [(row[0]-bar_height/2) for row in plot_data_list]
altitudes = [row[0] for row in plot_data_list]
p_mean_Dg = [row[1] for row in plot_data_list]
p_neg_err_Dg = [row[2] for row in plot_data_list]
p_pos_err_Dg = [row[3] for row in plot_data_list]
p_mean_sigma = [row[4] for row in plot_data_list]
p_neg_err_sigma = [row[5] for row in plot_data_list]
p_pos_err_sigma = [row[6] for row in plot_data_list]
p_mean_mass_conc = [row[7] for row in plot_data_list]
p_neg_err_mass_conc = [row[8] for row in plot_data_list]
p_pos_err_mass_conc = [row[9] for row in plot_data_list]
p_mean_MAC_max = [row[10] for row in plot_data_list]
p_mean_MAC_min = [row[11] for row in plot_data_list]
p_mean_MAC_mid = [(row[10] +(row[11]-row[10])/2) for row in plot_data_list]
p_mean_MAC_poserr = [(row[10] - (row[10] +(row[11]-row[10])/2)) for row in plot_data_list]
p_mean_MAC_negerr = [((row[10] +(row[11]-row[10])/2) - row[11]) for row in plot_data_list]
p_mean_MAC_width = [(row[10] - row[11]) for row in plot_data_list]
p_mean_SSA_max = [row[12] for row in plot_data_list]
p_mean_SSA_min = [row[13] for row in plot_data_list]
p_mean_SSA_mid = [(row[12] +(row[13]-row[12])/2) for row in plot_data_list]
p_mean_SSA_poserr = [(row[12] - (row[12] +(row[13]-row[12])/2)) for row in plot_data_list]
p_mean_SSA_negerr = [((row[12] +(row[13]-row[12])/2) - row[13]) for row in plot_data_list]
p_mean_SSA_width = [(row[12] - row[13]) for row in plot_data_list]
p_mean_ae_max = [row[14] for row in plot_data_list]
p_mean_ae_min = [row[15] for row in plot_data_list]
p_mean_ae_mid = [(row[14] +(row[15]-row[14])/2) for row in plot_data_list]
p_mean_ae_poserr = [(row[14] - (row[14] +(row[15]-row[14])/2)) for row in plot_data_list]
p_mean_ae_negerr = [((row[14] +(row[15]-row[14])/2) - row[15]) for row in plot_data_list]
p_mean_ae_width = [(row[14] - row[15]) for row in plot_data_list]
p_mean_Dp_Dc = [row[16] for row in plot_data_list]
p_neg_err_Dp_Dc = [row[17] for row in plot_data_list]
p_pos_err_Dp_Dc = [row[18] for row in plot_data_list]
p_BC_frac_mean = [row[19] for row in plot_data_list]
p_neg_err_BC_frac_mean = [row[20] for row in plot_data_list]
p_pos_err_BC_frac_mean = [row[21] for row in plot_data_list]
dir = 'C:/Users/<NAME>/Documents/Data/Netcare/Spring 2015/coating data/'
os.chdir(dir)
max_alt = 6200
bar_height = 800
fig = plt.figure(figsize=(10,10))
ax1 = plt.subplot2grid((2,2), (0,0), colspan=1)
ax2 = plt.subplot2grid((2,2), (0,1), colspan=1)
ax3 = plt.subplot2grid((2,2), (1,0), colspan=1)
ax1.errorbar(p_mean_MAC_mid,altitudes,xerr=[p_mean_MAC_negerr,p_mean_MAC_poserr],linestyle='', color = 'grey', elinewidth=2, capsize=8,capthick = 2)
#ax1.barh(altitudes_bar,p_mean_MAC_width,height=bar_height, left=p_mean_MAC_min,alpha = 0.5,edgecolor = None, color = 'grey')
#ax1.plot(p_mean_MAC_max,altitudes,marker='o',linestyle='-', color = 'b', label = 'coated rBC')
#ax1.plot(p_mean_MAC_min,altitudes,marker='o',linestyle='--', color = 'b',alpha = 0.5, label = 'bare rBC')
ax1.set_ylabel('altitude (m)')
ax1.set_xlabel(r'MAC $\mathregular{(m^2/g)}$')
ax1.set_xlim(6,18)
ax1.set_ylim(0,max_alt)
ax1.text(0.06,0.93,'A)', transform=ax1.transAxes)
#ax2.fill_betweenx(altitudes, p_mean_SSA_min, p_mean_SSA_max,alpha = 0.5, color = 'grey')
ax2.errorbar(p_mean_SSA_mid,altitudes,xerr=[p_mean_SSA_negerr,p_mean_SSA_poserr],linestyle='', color = 'b', elinewidth=2, capsize=8,capthick = 2)
#ax2.barh(altitudes_bar,p_mean_SSA_width,height=bar_height, left=p_mean_SSA_min,alpha = 0.5, color = 'grey')
#ax2.plot(p_mean_SSA_max,altitudes,marker='o',linestyle='-', color = 'grey')
#ax2.plot(p_mean_SSA_min,altitudes,marker='o',linestyle='-', color = 'grey')
ax2.set_xlabel('SSA')
ax2.set_ylabel('altitude (m)')
ax2.set_xlim(0.4,0.8)
ax2.set_ylim(0,max_alt)
ax2.text(0.06,0.93,'B)', transform=ax2.transAxes)
ax3.errorbar(p_mean_ae_mid,altitudes,xerr=[p_mean_ae_negerr,p_mean_ae_poserr],linestyle='', color = 'g', elinewidth=2, capsize=8,capthick = 2)
#ax3.barh(altitudes_bar,p_mean_ae_width,height=bar_height, left=p_mean_ae_min,alpha = 0.5, color = 'grey')
#ax3.plot(p_mean_ae_max,altitudes,marker='o',linestyle='-', color = 'b')
#ax3.plot(p_mean_ae_min,altitudes,marker='o',linestyle='--', color = 'b',alpha = 0.5)
ax3.set_xlabel('Abs enhancement')
ax3.set_ylabel('altitude (m)')
ax3.set_xlim(1,2)
ax3.set_ylim(0,max_alt)
ax3.text(0.06,0.93,'C)', transform=ax3.transAxes)
if savefig == True:
plt.savefig(dir + 'MAC SSA AE - 550nm - Sc 1-7 full mass range - using variable coating - neg coats given max-min.png', bbox_inches='tight')
plt.show()
####
fig = plt.figure()
ax4 = plt.subplot2grid((1,1), (0,0), colspan=1)
ax4.errorbar(p_mean_Dp_Dc,altitudes,xerr=[p_neg_err_Dp_Dc,p_pos_err_Dp_Dc],fmt='o',linestyle='-', color = 'red')
ax4.set_xlabel(r'$\mathregular{D_p/D_c}$ (rBC cores from 160-180nm)')
ax4.set_ylabel('altitude (m)')
ax4.set_xlim(0.8,2.4)
ax4.set_ylim(0,max_alt)
if savefig == True:
plt.savefig(dir + 'Dp_Dc 160-180nm - Sc 1-7 full mass range using variable coating - neg coats given max-min.png', bbox_inches='tight')
plt.show()
#####
fig = plt.figure(figsize=(10,10))
ax1 = plt.subplot2grid((2,2), (0,0), colspan=1)
ax2 = plt.subplot2grid((2,2), (0,1), colspan=1)
ax3 = plt.subplot2grid((2,2), (1,0), colspan=1)
ax1.errorbar(p_mean_Dg,altitudes,xerr = [p_neg_err_Dg,p_pos_err_Dg],fmt='o',linestyle='-', color = 'b')
ax1.set_ylabel('altitude (m)')
ax1.set_xlabel(r'Dg (from dM/dlog(D) $\mathregular{ng/m^3-STP}$)')
ax1.set_xlim(100,220)
ax1.set_ylim(0,max_alt)
ax1.text(0.06,0.93,'A)', transform=ax1.transAxes)
ax2.errorbar(p_mean_sigma,altitudes,xerr = [p_neg_err_sigma,p_pos_err_sigma],fmt='o',linestyle='-', color = 'grey')
ax2.set_xlabel(r'sigma (from dM/dlog(D) $\mathregular{ng/m^3-STP}$)')
ax2.set_ylabel('altitude (m)')
ax2.set_xlim(1,2)
ax2.set_ylim(0,max_alt)
ax2.text(0.06,0.93,'B)', transform=ax2.transAxes)
ax3.errorbar(p_mean_mass_conc,altitudes,xerr = [p_neg_err_mass_conc,p_pos_err_mass_conc],fmt='o',linestyle='-', color = 'green')
ax3.set_xlabel(r'total mass conc ($\mathregular{ng/m^3-STP}$)')
ax3.set_ylabel('altitude (m)')
ax3.set_xlim(0,100)
ax3.set_ylim(0,max_alt)
ax3.text(0.06,0.93,'C)', transform=ax3.transAxes)
if savefig == True:
#plt.savefig('altitude dependent plots - '+flight_times[flight][4]+' - cloud-free.png', bbox_inches='tight')
plt.savefig(dir + 'altitude dependent plots Dp sig mass DpDc fracBC - using variable coating - sc1-7 - cloud-free - neg coats given max-min.png', bbox_inches='tight')
plt.show()
####
fig = plt.figure()
ax4 = plt.subplot2grid((1,1), (0,0), colspan=1)
ax4.errorbar(p_BC_frac_mean,altitudes,xerr = [p_neg_err_BC_frac_mean,p_pos_err_BC_frac_mean],fmt='o',linestyle='-', color = 'b')
ax4.set_xlabel('Fraction of all particles which contain rBC')
ax4.set_ylabel('altitude (m)')
ax4.set_xlim(0,0.1)
ax4.set_ylim(0,max_alt)
#ax4.text(0.06,0.93,'D)', transform=ax4.transAxes)
if savefig == True:
plt.savefig(dir + 'fraction particles conatining rBC - Sc 1-7.png', bbox_inches='tight')
plt.show()
|
StarcoderdataPython
|
3365710
|
<filename>printing/PSWriter.py<gh_stars>0
"""Wrapper for the PSStream to support the standard AbstractWriter interface.
"""
__version__ = '$Revision: 1.10 $'
import formatter
import string
import utils
class PSWriter(formatter.AbstractWriter):
"""Class PSWriter supports the backend interface expected by
Grail, actually the HTMLParser class. It does this by deriving
from AbstractWriter and overriding methods to interface with the
PSStream class, which performs the real PostScript work.
Exported methods:
__init__(OUTPUT_FILE_OBJECT, optional:TITLE)
close()
new_font(FONT_TUPLE)
new_margin(MARGIN_TAG(ignored) LEVEL)
new_spacing(SPACING)
new_styles(STYLE_TUPLE)
send_paragraph(NUMBER_OF_BLANKLINES)
send_line_break()
send_hor_rule()
send_label_data(LABEL_TAG)
send_flowing_data(TEXT)
send_literal_data(TEXT)
send_indentation(WIDTH)
suppress_indentation([suppress=1])
Exported ivars:
"""
__detab_pos = 0
__pending_indentation = None
__suppress_indentation = 0
def __init__(self, ofile, title='', url='',
varifamily='Times', fixedfamily='Courier', paper=None,
settings=None):
if not title:
title = url
import PSFont
import PSStream
fontsize, leading = settings.get_fontsize()
font = PSFont.PSFont(varifamily=varifamily,
fixedfamily=fixedfamily,
size=fontsize)
self.ps = PSStream.PSStream(font, ofile, title, url, paper=paper)
self.settings = settings
if leading:
self.ps.set_leading(leading)
self.ps.start()
## self.new_alignment = self.ps.push_alignment
## self.new_font = self.ps.push_font_change
def close(self):
## utils.debug('close')
self.ps.push_end()
def new_alignment(self, align):
## utils.debug('new_alignment: %s' % `align`)
self.__alignment = align
self.ps.push_alignment(align)
def new_font(self, font):
## utils.debug('new_font: %s' % `font`)
self.ps.push_font_change(font)
def new_margin(self, margin, level):
## utils.debug('new_margin: margin=%s, level=%s' % (margin, level))
self.ps.push_margin(level)
self.__detab_pos = 0
def new_spacing(self, spacing):
raise RuntimeError('not yet implemented')
# semantics of STYLES is a tuple of single char strings.
# Right now the only styles we support are lower case 'underline' for
# underline and a 'blockquote' for each right-hand indentation.
def new_styles(self, styles):
## utils.debug('new_styles: %s' % styles)
self.ps.push_underline('underline' in styles)
self.ps.push_rightmargin(map(None, styles).count('blockquote'))
def send_paragraph(self, blankline):
## utils.debug('send_paragraph: %s' % blankline)
self.ps.push_paragraph(blankline, self.settings.paragraph_skip)
self.__detab_pos = 0
self.__pending_indentation = None
self.__suppress_indentation = 0
def suppress_indentation(self, suppress=1):
"""Controll suppression of the *next* indentation sent."""
self.__suppress_indentation = suppress
if suppress:
self.__pending_indentation = None
def send_indentation(self, width):
"""Add some 'pended' paragraph indentation which might get cancelled
later."""
## utils.debug('send_indentation: %s' % width)
if self.__suppress_indentation:
self.__suppress_indentation = 0
else:
self.__pending_indentation = width
def send_line_break(self):
## utils.debug('send_line_break')
self.ps.push_hard_newline()
self.__detab_pos = 0
self.__pending_indentation = None
self.__suppress_indentation = 0
def send_hor_rule(self, abswidth=None, percentwidth=None,
height=None, align=None):
## utils.debug('send_hor_rule')
self.ps.push_horiz_rule(abswidth, percentwidth, height, align)
self.__detab_pos = 0
self.__pending_indentation = None
self.__suppress_indentation = 0
def send_label_data(self, data):
## utils.debug('send_label_data: %s' % data)
self.ps.push_label(data)
self.__detab_pos = 0
self.__pending_indentation = None
self.__suppress_indentation = 0
def send_flowing_data(self, data):
## utils.debug('send_flowing_data: %s' % data)
self.ps.push_literal(0)
if self.__pending_indentation:
self.ps.push_horiz_space(self.__pending_indentation)
self.__pending_indentation = None
else:
self.__suppress_indentation = 0
self.ps.push_string_flowing(data)
self.__detab_pos = 0
def send_literal_data(self, data):
## utils.debug('send_literal_data: %s' % data)
self.ps.push_literal(1)
if self.__pending_indentation:
self.ps.push_horiz_space(self.__pending_indentation)
self.__pending_indentation = None
else:
self.__suppress_indentation = 0
self.ps.push_string(self.__detab_data(data))
def send_eps_data(self, image, align):
## utils.debug('send_eps_data: <epsdata>, ' + `bbox`)
if self.__pending_indentation:
self.ps.push_horiz_space(self.__pending_indentation)
self.__pending_indentation = None
else:
self.__suppress_indentation = 0
self.ps.push_eps(image, align)
self.__detab_pos = 0
def __detab_data(self, data):
pos = self.__detab_pos
s = []
append = s.append
for c in data:
if c == '\n':
append('\n')
pos = 0
elif c == '\t':
append(' ' * (8 - (pos % 8)))
pos = 0
else:
append(c)
pos = pos + 1
self.__detab_pos = pos
return string.joinfields(s, '')
|
StarcoderdataPython
|
3255300
|
<filename>train_byol.py
import torch
from byol_pytorch import BYOL
import torchvision
from torchvision import models
import torchvision.transforms as transforms
import torch.nn as nn
from torch.autograd import Variable
use_gpu = torch.cuda.is_available()
resnet = models.resnet50(pretrained=True)
learner = BYOL(
resnet,
image_size = 256
# ,
# hidden_layer = 'avgpool'
)
learner=learner.cuda()
# if torch.cuda.is_available:
# learner=nn.DataParallel(learner,device_ids=[0,1,2]) # multi-GPU
opt = torch.optim.Adam(learner.parameters(), lr=3e-4)
transform = transforms.ToTensor()
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64,
shuffle=True, num_workers=2)
# def sample_unlabelled_images():
# return torch.randn(20, 3, 256, 256)
for epoch in range(1000):
f = open('pytorch_out.txt', 'a')
for batch_id, data in enumerate(trainloader):
images, _ = data
images.permute(0,3,1,2)
if use_gpu:
images = Variable(images.cuda())
# print(images.shape)
loss = learner(images)
opt.zero_grad()
loss.backward()
opt.step()
learner.update_moving_average() # update moving average of target encoder
if batch_id % 50 == 0:
item = "[Epoch %d, batch %d] loss: %.5f" % (epoch, batch_id, loss)
print(item)
f.write(str(item)+'\n')
f.close()
# save your improved network
torch.save(resnet.state_dict(), './improved-net.txt')
|
StarcoderdataPython
|
3254820
|
materiais = ['caneta', 'caderno', 'livro', 'lapis']
print(materiais)
#printing each element from the list
materiais = ['caneta', 'caderno', 'livro', 'e-book']
for material in materiais :
print(material) #this is the for body
print(material.title())
print(len(materiais))
# "for" makes variable "material" write each element from the list per time
#numeric lists. In python we use Range to generate number in "for"
for valor in range(1,5):
print(valor)
numeros = list(range(1,6))
print(numeros)
for numeros in range(1,7):
print(numeros)
quadrados = []
for valor in range(1,11):
quadrado = valor ** 2
quadrados.append(quadrado)
print(quadrados)
#square os a list numbers
digitos = [1,2,3,4,5,6,7,8,9,0]
print(min(digitos))
print(max(digitos))
print(sum(digitos))
# stats basics
materiais = ['caneta','caderno','livro','e-book']
print(materiais[0:4])
# manipulating some list contents
materiais = ['caneta','caderno','livro','e-book','etc']
print(materiais[2:])
# manipulating some list contents
materiais = ['caneta','caderno','livro','e-book']
objetos = materiais[1:3]
print(materiais)
print(objetos)
materiais = [1,2,3,4]
objetos = materiais
objetos.append(5)
print(materiais)
print(objetos)
materiais = [1,2,3,4]
objetos = materiais[:]
objetos.append(5)
print(materiais)
print(objetos)
for i in range(1,11):
...
for j in range (1,11):
...
for i in range(2,4):
print("Tabuada do " + str(i))
for j in range(0,11):
print(str(j) + "" + str(j*i))
|
StarcoderdataPython
|
1618221
|
# _*_ coding: utf-8 _*_
from django.db import models
from django.contrib.auth.models import AbstractUser
from fastrunner.models import Project
class User(AbstractUser):
belong_project = models.ManyToManyField(Project, blank=True, help_text="所属项目", verbose_name="所属项目",
related_name="user_set", related_query_name="user")
class Meta:
verbose_name = "用户信息"
verbose_name_plural = verbose_name
db_table = 'auth_user'
def __str__(self):
return self.username
|
StarcoderdataPython
|
4817398
|
from tkinter import *
import sqlite3
import random
import pickle
conn = sqlite3.connect('clickbait.db')
c = conn.cursor()
def close():
conn.close()
window.destroy()
exit()
def history():
output.delete(0.0, END)
c.execute("SELECT * FROM history")
history = c.fetchall()
numout = 1.0
numarr = 0
for i in history:
output.insert(numout, f'''{history[numarr]}
''')
numout = numout + 1.0
numarr = numarr + 1
def generate():
output.delete(0.0, END)
abbreviations = ["OMG", "LMAO", "LMFAO", "ROFL", "OMFG", "STFU", "VIRAL", "NSFW", "GTFO"]
begin = ["I", "My Friend", "My GF", "My BF", "Obama", "Trump", "Trudeau", "Biden"]
mid = ["Contacted The Dead Spirit Of XxxTentacion", "Filled My House With SLIME", "MURDERED ME", "ORDERED HUMAN SLAVES", "Drank Among Us Potion", "Drank MINECRAFT Potion", "Browsed The Dark Web!", "Got High On Skittles!", "Filled A Pool Of Orbeez!", "Blew Up My Toilet!", "Killed My SISTER?!"]
end = ["ACTUALLY HAPPENED", "COPS CALLED", "GONE WRONG", "GONE SEXUAL?", "GONE DEADLY", "I LITERALLY CAN'T BELIEVE IT HAPPENED", "666", "GONE SPOOKY", "420"]
whole = f"{random.choice(abbreviations)}!!! {random.choice(begin)} {random.choice(mid)}! ({random.choice(end)}!!!!)"
pickle.dump( whole, open( "clickbait.p", "wb" ) )
wholefinal = pickle.load( open( "clickbait.p", "rb" ) )
sql = ("INSERT INTO history(title) VALUES(?)")
val = (wholefinal,)
c.execute(sql, val)
conn.commit()
output.insert(1.0, f'''{wholefinal}
''')
window = Tk()
window.title('Clickbait 2021 Abridged The Manga')
window.configure(background="black")
window.resizable(0,0)
Label (window, text="Clickbait 2021 Abridged The Manga", bg="black", fg='white', font="none 12") .grid(row=1, column=0, sticky=N)
Button(window, text="Generate", width=6, bg="black", fg="white", command=generate) .grid(row=2, column=0, sticky=N)
Button(window, text="History", width=6, bg="black", fg="white", command=history) .grid(row=3, column=0, sticky=N)
output = Text(window, width=40, height=10, wrap=WORD, fg="white", background="black")
output.grid(row=4, column=0, sticky=N)
window.mainloop()
|
StarcoderdataPython
|
3227736
|
from PIL import Image, ImageTk
from tkinter import messagebox
import tkinter as tk
import numpy as np
import matplotlib.pyplot as plt
import cv2 as cv
#from tkinter import ttk
window = tk.Tk() # create window
window.title( 'B063040061 hw2' ) # name title
flagOpen = False # file open flag
flagOB = False # preserve/black flag
flagSS = False # smoothing/sharpening
glsvar1 = tk.IntVar() # gray level slicing varible
glsvar2 = tk.IntVar() # ''
bpivar = tk.IntVar() # bit plane image varible
def WarningMessage() :
# warning message show if image name not found
a = tk.messagebox.showwarning( 'Warning', 'File Not Found!\nPlease try again!' )
def ShowInfo() :
# successful message saved
a = tk.messagebox.showinfo( 'Success', 'File Saved!' )
def ShowWarning() :
# successful message saved
a = tk.messagebox.showwarning( 'Failed', 'Unknowed type' )
def oCBOpen() :
# open or reset the image
global imageOri, flagOpen
if flagOpen == False :
try :
# load the image from directory
# detect edges in it
imageOri = cv.imread( o.get() )
imageOri = cv.cvtColor( imageOri, cv.COLOR_BGR2GRAY )
image = cv.resize( imageOri, ( 300,300 ), interpolation = cv.INTER_CUBIC )
edged = cv.Canny( imageOri, 50, 100 )
# OpenCV represents images in BGR order ;
# however PIL represents images in RGB order, so we need to swap the channels
imgRender = cv.cvtColor( image, cv.COLOR_BGR2RGB )
# convert the images to PIL format
imgRender = Image.fromarray( imgRender )
edged = Image.fromarray( edged )
imgRender = ImageTk.PhotoImage( imgRender )
edged = ImageTk.PhotoImage( edged )
imgPanelL.config( image = imgRender )
imgPanelL.image = imgRender ;
imgPanelR.config( image = imgRender )
imgPanelR.image = imgRender ;
scaleSS.config( command = Smoothing_Sharpening )
except Exception : # or IOError
# when something wrong with image
WarningMessage()
def oCBSave() :
# save current image
try :
cv.imwrite( s.get(), imgCur )
ShowInfo()
except Exception :
ShowWarning()
def oCBchangeOB() :
# buttom to change preserve/black
global flagOB
if flagOB == False :
flagOB = True
btnOorB.config( text = 'Preserve' )
else :
flagOB = False
btnOorB.config( text = 'Black' )
def oCBchangeSS() :
# buttom to change sharpening/smoothing
global flagSS
if flagSS == False :
flagSS = True
btnSS.config( text = 'Sharpening' )
scaleSS.config( from_ = 0 )
else :
flagSS = False
btnSS.config( text = 'Smoothing' )
scaleSS.config( from_ = 1 )
def Gray_Level_Slicing() :
# change the selected range to 255
global imgCur
row,col = imageOri.shape
imgGLS = np.zeros((row, col),dtype = 'uint8')
min_range = glsvar1.get()
max_range = glsvar2.get()
# get from user input of range
for i in range ( row ) :
for j in range ( col ) :
if imageOri[i,j] > min_range and imageOri[i,j] < max_range :
imgGLS[i,j] = 255
else :
if flagOB == False : # change to black
imgGLS[i,j] = 0
else : # preserve unchoose value
imgGLS[i,j] = imageOri[i,j]
imgCur = imgGLS
imgGLS = cv.resize( imgCur, ( 300,300 ), interpolation = cv.INTER_CUBIC )
imgRender = Image.fromarray( imgGLS )
#edged = Image.fromarray( edged )
imgRender = ImageTk.PhotoImage( imgRender )
#edged = ImageTk.PhotoImage( edged )
imgPanelL.config( image = imgRender )
imgPanelL.image = imgRender ;
def Bit_Plane_Image() :
# slice bit wise for bit #0 to bit #8
global imgCur
row,col = imageOri.shape
imgBPI = np.zeros((row, col),dtype = 'uint8') # initial
level = np.power( 2, bpivar.get()-1 ) # 2^n-1
# slicing bit level to white and black
for i in range ( row ) :
for j in range ( col ) :
if imageOri[i,j] & level == 0 :
imgBPI[i,j] = 0
else :
imgBPI[i,j] = 255
imgCur = imgBPI
imgBPI = cv.resize( imgCur, ( 300,300 ), interpolation = cv.INTER_CUBIC )
imgRender = Image.fromarray( imgBPI )
#edged = Image.fromarray( edged )
imgRender = ImageTk.PhotoImage( imgRender )
#edged = ImageTk.PhotoImage( edged )
imgPanelL.config( image = imgRender )
imgPanelL.image = imgRender ;
def Smoothing_Sharpening(v) :
# smoothing or sharpening funtion from user input of buttom
global imgCur
if flagSS == False : # smoothing
imgSmoo = cv.blur(imageOri, (int(v),int(v)))
imgCur = imgSmoo
imgSmoo = cv.resize( imgCur, ( 300,300 ), interpolation = cv.INTER_CUBIC )
imgRender = Image.fromarray( imgSmoo )
else : # sharpening
kernel = np.array([[-1,-1,-1],
[-1, 8,-1],
[-1,-1,-1]])
ori = np.array([[0,0,0],
[0,1,0],
[0,0,0]])
imgSharp = cv.filter2D( imageOri, -1, kernel*int(v)/5+ori )
imgCur = imgSharp
imgSharp = cv.resize( imgCur, ( 300,300 ), interpolation = cv.INTER_CUBIC )
imgRender = Image.fromarray( imgSharp )
#edged = Image.fromarray( edged )
imgRender = ImageTk.PhotoImage( imgRender )
#edged = ImageTk.PhotoImage( edged )
imgPanelL.config( image = imgRender )
imgPanelL.image = imgRender ;
def FFT() :
# Fast Fourier Transform
global imgCur
imgFFT = np.fft.fft2( imageOri )
imgFFT = np.fft.fftshift(imgFFT) # shift to right way
imgFFT = 20*np.log( np.abs( imgFFT )+1 )
# remap by log
imgCur = imgFFT
imgFFT = cv.resize( imgCur, ( 300,300 ), interpolation = cv.INTER_CUBIC )
imgRender = Image.fromarray( imgFFT )
#edged = Image.fromarray( edged )
imgRender = ImageTk.PhotoImage( imgRender )
#edged = ImageTk.PhotoImage( edged )
imgPanelL.config( image = imgRender )
imgPanelL.image = imgRender ;
def Phase_Image() :
# show phase image by invert fft2 phase spectrum
f = np.fft.fft2( imageOri )
fshift = np.fft.fftshift( f )
fre = np.abs(fshift)
fre = fre.clip( min=1 )
phase = fshift/fre # phase = fshift/np.abs(fshift) # np.angle( fshift )
imgPhase = np.fft.ifft2(np.fft.ifftshift(phase))
imgPhase = np.abs( imgPhase )
phase = phase.clip( min=0 )
plt.subplot(121),plt.imshow( np.log10( np.abs(phase)+0.1 ), cmap = 'gray')
plt.title('Phase Spectrum'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow( np.log10( imgPhase+0.01 ), cmap = 'gray')
plt.title('Phase Image'), plt.xticks([]), plt.yticks([])
plt.show()
def Amplitude_Image() :
# show amplitude image by invert fft2 amplitude spectrum
f= np.fft.fft2( imageOri )
fshift = np.fft.fftshift( f )
amplitude = np.abs( fshift )
imgAmplitude = np.fft.ifft2(np.fft.ifftshift(amplitude))
imgAmplitude = np.abs( imgAmplitude )
plt.subplot(121),plt.imshow( 20*np.log ( amplitude+1 ), cmap = 'gray')
plt.title('Amplitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow( 20*np.log( imgAmplitude+1 ), cmap = 'gray')
plt.title('Amplitude Image'), plt.xticks([]), plt.yticks([])
plt.show()
#--------------------using tkinter GUI-----------------------
imgPanelL = tk.Label( window, width = 300, height = 300 )
imgPanelR = tk.Label( window, width = 300, height = 300 )
oName = tk.Label( window, text = 'Open File : ', width = 10, height = 1 )
sName = tk.Label( window, text = 'Save File : ', width = 10, height = 1 )
o = tk.Entry( window )
s = tk.Entry( window )
btnOpen = tk.Button( window, width = 10, height = 1,
text = 'open / reset', command = oCBOpen )
btnSave = tk.Button( window, width = 10, height = 1,
text = 'save', command = oCBSave )
labelGLS = tk.Label( window, text = 'Gray-level slicing', width = 15, height = 1 )
labelBPI = tk.Label( window, text = 'Bit-Plane images', width = 15, height = 1 )
btnSS = tk.Button( window, width = 10, height = 1,
text = 'Smoothing', command = oCBchangeSS )
labelrange1 = tk.Label( window, text = 'range from : ',
width = 10, height = 1, fg = 'gray35' )
labelrange2 = tk.Label( window, text = 'to : ', width = 10, height = 1, fg = 'gray35' )
labelOB = tk.Label( window, text = 'unselected area : ',
width = 15, height = 1, fg = 'gray35' )
btnOorB = tk.Button( window, width = 10, height = 1,
text = 'Black', command = oCBchangeOB )
btnGLSshow = tk.Button( window, width = 10, height = 1,
text = 'show', command = Gray_Level_Slicing )
btnBPIshow = tk.Button( window, width = 10, height = 1,
text = 'show', command = Bit_Plane_Image )
spinboxGLS1 = tk.Spinbox( window, width = 10, from_ = 0, to = 255,
textvariable = glsvar1 )
spinboxGLS2 = tk.Spinbox( window, width = 10, from_ = 0, to = 255,
textvariable = glsvar2 )
labelshowbpi = tk.Label( window, text = 'show bit-plane : ',
width = 15, height = 1, fg = 'gray35' )
radiobtn1 = tk.Radiobutton( window, text = '1', variable = bpivar, value = 1 )
radiobtn2 = tk.Radiobutton( window, text = '2', variable = bpivar, value = 2 )
radiobtn3 = tk.Radiobutton( window, text = '3', variable = bpivar, value = 3 )
radiobtn4 = tk.Radiobutton( window, text = '4', variable = bpivar, value = 4 )
radiobtn5 = tk.Radiobutton( window, text = '5', variable = bpivar, value = 5 )
radiobtn6 = tk.Radiobutton( window, text = '6', variable = bpivar, value = 6 )
radiobtn7 = tk.Radiobutton( window, text = '7', variable = bpivar, value = 7 )
radiobtn8 = tk.Radiobutton( window, text = '8', variable = bpivar, value = 8 )
scaleSS = tk.Scale( window, orient = tk.HORIZONTAL, length = 630, width = 10, showvalue = 0, from_ = 1, to = 10 )
labelFFT = tk.Label( window, text = 'FFT image', width = 15, height = 1 )
labelAP = tk.Label( window, text = 'Amplitude/Phase', width = 15, height = 1 )
btnFFTshow = tk.Button( window, width = 20, height = 1,
text = 'Display FFT image', command = FFT )
btnAmplitudeshow = tk.Button( window, width = 20, height = 1,
text = 'Display Amplitude image', command = Amplitude_Image )
btnPhaseshow = tk.Button( window, width = 20, height = 1,
text = 'Display Phase image', command = Phase_Image )
imgPanelL.place( x = 120, y = 40 )
imgPanelR.place( x = 450, y = 40 )
oName.place( x = 30, y = 5 )
sName.place( x = 360, y = 5 )
o.place( x = 120, y = 5 )
s.place( x = 450, y = 5 )
btnOpen.place( x = 10 , y = 40 )
btnSave.place( x = 10 , y = 80 )
labelGLS.place( x = 10, y = 350 )
labelBPI.place( x = 10, y = 390 )
btnSS.place( x = 10, y = 430 )
labelrange1.place( x = 120, y = 350 )
labelrange2.place( x = 270, y = 350 )
labelOB.place( x = 420, y = 350 )
btnOorB.place( x = 540, y = 350 )
btnGLSshow.place( x = 655, y = 350 )
btnBPIshow.place( x = 655, y = 390 )
spinboxGLS1.place( x = 200, y = 350 )
spinboxGLS2.place( x = 320, y = 350 )
labelshowbpi.place( x = 120, y = 390 )
radiobtn1.place( x = 220, y = 390 )
radiobtn2.place( x = 260, y = 390 )
radiobtn3.place( x = 300, y = 390 )
radiobtn4.place( x = 340, y = 390 )
radiobtn5.place( x = 380, y = 390 )
radiobtn6.place( x = 420, y = 390 )
radiobtn7.place( x = 460, y = 390 )
radiobtn8.place( x = 500, y = 390 )
scaleSS.place ( x = 120, y = 430 )
labelFFT.place( x = 10, y = 470 )
labelAP.place( x = 10, y = 510 )
btnFFTshow.place( x = 120, y = 470 )
btnAmplitudeshow.place( x = 120, y = 510 )
btnPhaseshow.place( x = 300, y = 510 )
#--------------------using tkinter GUI-----------------------
window.geometry( "800x600" )
window.mainloop()
|
StarcoderdataPython
|
882
|
# Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module with assertion helpers.
The advantages of using a method like
assert_eq(expected, actual)
instead of
assert expected == actual
include:
1 - On failures, assert_eq prints an informative message of the actual
values compared (e.g. AssertionError: 1 != 2) for free, which makes it
faster and easier to iterate on tests.
2 - In the context of refactors, basic asserts incorrectly shift the burden of
adding printouts and writing good test code to people refactoring code
rather than the person who initially wrote the code.
"""
__all__ = [
"assert_is",
"assert_is_not",
"assert_is_instance",
"assert_eq",
"assert_dict_eq",
"assert_ne",
"assert_gt",
"assert_ge",
"assert_lt",
"assert_le",
"assert_in",
"assert_not_in",
"assert_in_with_tolerance",
"assert_unordered_list_eq",
"assert_raises",
"AssertRaises",
# Strings
"assert_is_substring",
"assert_is_not_substring",
"assert_startswith",
"assert_endswith",
]
# The unittest.py testing framework checks for this variable in a module to
# filter out stack frames from that module from the test output, in order to
# make the output more concise.
# __unittest = 1
import traceback
from .inspection import get_full_name
_number_types = (int, float, complex)
def _assert_fail_message(message, expected, actual, comparison_str, extra):
if message:
return message
if extra:
return "%a %s %a (%s)" % (expected, comparison_str, actual, extra)
return "%a %s %a" % (expected, comparison_str, actual)
def assert_is(expected, actual, message=None, extra=None):
"""Raises an AssertionError if expected is not actual."""
assert expected is actual, _assert_fail_message(
message, expected, actual, "is not", extra
)
def assert_is_not(expected, actual, message=None, extra=None):
"""Raises an AssertionError if expected is actual."""
assert expected is not actual, _assert_fail_message(
message, expected, actual, "is", extra
)
def assert_is_instance(value, types, message=None, extra=None):
"""Raises an AssertionError if value is not an instance of type(s)."""
assert isinstance(value, types), _assert_fail_message(
message, value, types, "is not an instance of", extra
)
def assert_eq(expected, actual, message=None, tolerance=None, extra=None):
"""Raises an AssertionError if expected != actual.
If tolerance is specified, raises an AssertionError if either
- expected or actual isn't a number, or
- the difference between expected and actual is larger than the tolerance.
"""
if tolerance is None:
assert expected == actual, _assert_fail_message(
message, expected, actual, "!=", extra
)
else:
assert isinstance(tolerance, _number_types), (
"tolerance parameter to assert_eq must be a number: %a" % tolerance
)
assert isinstance(expected, _number_types) and isinstance(
actual, _number_types
), "parameters must be numbers when tolerance is specified: %a, %a" % (
expected,
actual,
)
diff = abs(expected - actual)
assert diff <= tolerance, _assert_fail_message(
message, expected, actual, "is more than %a away from" % tolerance, extra
)
def _dict_path_string(path):
if len(path) == 0:
return "(root)"
return "->".join(map(ascii, path))
def assert_dict_eq(expected, actual, number_tolerance=None, dict_path=[]):
"""Asserts that two dictionaries are equal, producing a custom message if they are not."""
assert_is_instance(expected, dict)
assert_is_instance(actual, dict)
expected_keys = set(expected.keys())
actual_keys = set(actual.keys())
assert expected_keys <= actual_keys, "Actual dict at %s is missing keys: %a" % (
_dict_path_string(dict_path),
expected_keys - actual_keys,
)
assert actual_keys <= expected_keys, "Actual dict at %s has extra keys: %a" % (
_dict_path_string(dict_path),
actual_keys - expected_keys,
)
for k in expected_keys:
key_path = dict_path + [k]
assert_is_instance(
actual[k],
type(expected[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
assert_is_instance(
expected[k],
type(actual[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
if isinstance(actual[k], dict):
assert_dict_eq(
expected[k],
actual[k],
number_tolerance=number_tolerance,
dict_path=key_path,
)
elif isinstance(actual[k], _number_types):
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
tolerance=number_tolerance,
)
else:
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
)
def assert_ne(expected, actual, message=None, tolerance=None, extra=None):
"""Raises an AssertionError if expected == actual.
If tolerance is specified, raises an AssertionError if either
- expected or actual isn't a number, or
- the difference between expected and actual is smaller than the tolerance.
"""
if tolerance is None:
assert expected != actual, _assert_fail_message(
message, expected, actual, "==", extra
)
else:
assert isinstance(tolerance, _number_types), (
"tolerance parameter to assert_eq must be a number: %a" % tolerance
)
assert isinstance(expected, _number_types) and isinstance(
actual, _number_types
), "parameters must be numbers when tolerance is specified: %a, %a" % (
expected,
actual,
)
diff = abs(expected - actual)
assert diff > tolerance, _assert_fail_message(
message, expected, actual, "is less than %a away from" % tolerance, extra
)
def assert_gt(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand <= right_hand."""
assert left > right, _assert_fail_message(message, left, right, "<=", extra)
def assert_ge(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand < right_hand."""
assert left >= right, _assert_fail_message(message, left, right, "<", extra)
def assert_lt(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand >= right_hand."""
assert left < right, _assert_fail_message(message, left, right, ">=", extra)
def assert_le(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand > right_hand."""
assert left <= right, _assert_fail_message(message, left, right, ">", extra)
def assert_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is not in seq."""
assert obj in seq, _assert_fail_message(message, obj, seq, "is not in", extra)
def assert_not_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is in iter."""
# for very long strings, provide a truncated error
if isinstance(seq, str) and obj in seq and len(seq) > 200:
index = seq.find(obj)
start_index = index - 50
if start_index > 0:
truncated = "(truncated) ..."
else:
truncated = ""
start_index = 0
end_index = index + len(obj) + 50
truncated += seq[start_index:end_index]
if end_index < len(seq):
truncated += "... (truncated)"
assert False, _assert_fail_message(message, obj, truncated, "is in", extra)
assert obj not in seq, _assert_fail_message(message, obj, seq, "is in", extra)
def assert_in_with_tolerance(obj, seq, tolerance, message=None, extra=None):
"""Raises an AssertionError if obj is not in seq using assert_eq cmp."""
for i in seq:
try:
assert_eq(obj, i, tolerance=tolerance, message=message, extra=extra)
return
except AssertionError:
pass
assert False, _assert_fail_message(message, obj, seq, "is not in", extra)
def assert_unordered_list_eq(expected, actual, message=None):
"""Raises an AssertionError if the objects contained
in expected are not equal to the objects contained
in actual without regard to their order.
This takes quadratic time in the umber of elements in actual; don't use it for very long lists.
"""
missing_in_actual = []
missing_in_expected = list(actual)
for x in expected:
try:
missing_in_expected.remove(x)
except ValueError:
missing_in_actual.append(x)
if missing_in_actual or missing_in_expected:
if not message:
message = (
"%a not equal to %a; missing items: %a in expected, %a in actual."
% (expected, actual, missing_in_expected, missing_in_actual)
)
assert False, message
def assert_raises(fn, *expected_exception_types):
"""Raises an AssertionError if calling fn does not raise one of the expected_exception-types."""
with AssertRaises(*expected_exception_types):
fn()
class AssertRaises(object):
"""With-context that asserts that the code within the context raises the specified exception."""
def __init__(self, *expected_exception_types, **kwargs):
# when you don't specify the exception expected, it's easy to write buggy tests that appear
# to pass but actually throw an exception different from the expected one
assert (
len(expected_exception_types) >= 1
), "You must specify the exception type when using AssertRaises"
self.expected_exception_types = set(expected_exception_types)
self.expected_exception_found = None
self.extra = kwargs.pop("extra", None)
assert_eq({}, kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type in self.expected_exception_types:
# Return True to suppress the Exception if the type matches. For details,
# see: http://docs.python.org/release/2.5.2/lib/typecontextmanager.html
self.expected_exception_found = exc_val
return True
for t in self.expected_exception_types:
if isinstance(exc_val, t):
self.expected_exception_found = exc_val
return True
expected = ", ".join(map(get_full_name, self.expected_exception_types))
if exc_type is None:
message = "No exception raised, but expected: %s" % expected
if self.extra is not None:
message += " (%s)" % self.extra
else:
template = (
"{TYPE}: {VAL} is raised, but expected:"
" {EXPECTED}{EXTRA_STR}\n\n{STACK}"
)
message = template.format(
TYPE=get_full_name(exc_type),
VAL=exc_val,
EXPECTED=expected,
STACK="".join(traceback.format_tb(exc_tb)),
EXTRA_STR=(" (%s)" % self.extra) if self.extra is not None else "",
)
raise AssertionError(message)
# ===================================================
# Strings
# ===================================================
def assert_is_substring(substring, subject, message=None, extra=None):
"""Raises an AssertionError if substring is not a substring of subject."""
assert (
(subject is not None)
and (substring is not None)
and (subject.find(substring) != -1)
), _assert_fail_message(message, substring, subject, "is not in", extra)
def assert_is_not_substring(substring, subject, message=None, extra=None):
"""Raises an AssertionError if substring is a substring of subject."""
assert (
(subject is not None)
and (substring is not None)
and (subject.find(substring) == -1)
), _assert_fail_message(message, substring, subject, "is in", extra)
def assert_startswith(prefix, subject, message=None, extra=None):
"""Raises an AssertionError if the subject string does not start with prefix."""
assert (
(type(subject) is str)
and (type(prefix) is str)
and (subject.startswith(prefix))
), _assert_fail_message(message, subject, prefix, "does not start with", extra)
def assert_endswith(suffix, subject, message=None, extra=None):
"""Raises an AssertionError if the subject string does not end with suffix."""
assert (
(type(subject) is str) and (type(suffix) is str) and (subject.endswith(suffix))
), _assert_fail_message(message, subject, suffix, "does not end with", extra)
|
StarcoderdataPython
|
3344961
|
<gh_stars>1-10
#!/usr/bin/env python
#
# pKaTool - analysis of systems of titratable groups
# Copyright (C) 2010 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_<EMAIL>
# Normal mail:
# <NAME>
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
def get_net_charge(pdbfile,HIS):
"""Get the net charge within 20 A of the HIS"""
import Protool
X=Protool.structureIO()
X.readpdb(pdbfile)
close=[]
HIS_ND1='%s:ND1' %HIS
HIS_NE2='%s:NE2' %HIS
for residue in X.residues.keys():
for atom in X.residues[residue]:
#print atom
mdist=min(X.dist(HIS_ND1,atom),X.dist(HIS_NE2,atom))
if mdist<50.0:
close.append(residue)
break
elif mdist>355.0:
break
# Got all close residues, now count charge
charge=0.0
nc={'ASP':-1,'GLU':-1,'LYS':+1,'ARG':+1,'HIS':+1}
close.sort()
print close
for res in close:
restype=X.resname(res)
if nc.has_key(restype):
charge=charge+nc[restype]
print res,restype,nc[restype],charge
print 'Net charge',charge
return charge
def get_HIS(pdbfilename,PD,Nuc):
#
# find the residue number of the HIS
#
import Protool
X=Protool.structureIO()
X.readpdb(pdbfilename)
dists=[]
for residue in X.residues.keys():
if X.resname(residue)=='HIS':
for PD_atom in ['OE1','OE2']:
for HIS_atom in ['ND1','NE2']:
dist=X.distance(PD+':%s' %PD_atom,residue+':%s' %HIS_atom)
dists.append([dist,residue,PD+':%s' %PD_atom,residue+':%s' %HIS_atom])
dists.sort()
if dists[0][0]<3.5:
print 'HIS Hbonded to PD: %s %5.2f' %(dists[0][1],dists[0][0])
HIS=dists[0][1]
HIS_atom=dists[0][3][-3:]
#print dists
#print HIS_atom
if HIS_atom=='ND1':
other_atom=dists[0][3][:-3]+'NE2'
elif HIS_atom=='NE2':
other_atom=dists[0][3][:-3]+'ND1'
else:
raise Exception('Something is rotten')
print 'Other atom',other_atom
#
# find the other Hbond partner of this HIS
#
HisDists=[]
for residue in X.residues.keys():
if residue==HIS or residue==PD:
continue
for atom in X.residues[residue]:
dist=X.distance(other_atom,atom)
if dist>30.0:
break
HisDists.append([dist,atom])
HisDists.sort()
Hbond_res=[]
for dist,atom in HisDists:
if dist<3.2:
Hbond_res.append([dist,atom])
if len(Hbond_res)==0:
Hbond_res.append(HisDists[0])
print Hbond_res
if len(Hbond_res)>2:
raise Exception('More than two Hbond donors')
other_side=[]
for dist,atom in Hbond_res:
print 'The residue "on the other side" is %s %s. dist: %5.2f A' %(atom,X.resname(atom),dist)
other_side.append([pdbfilename,X.resname(atom)])
#
# Can we say anything about the protonation state of the HIS?
#
pass
elif pdbfilename.find('H241A'):
return 'Mutated',[]
else:
print 'No HIS Hbond found'
print dists
HIS=None
stop
return HIS,other_side
def main():
import os
scores={}
for calctype in ['APO','HOLO','Helens_calcs']:
dirs=os.listdir(calctype)
for direc in dirs:
if direc in ['CCPS','CCPS_3','CCPS_HIS+','CCPS_HIS-']:
continue
rdir=os.path.join(os.getcwd(),calctype,direc)
if not os.path.isdir(rdir):
continue
files=os.listdir(rdir)
found=False
for fn in files:
if fn.find('sensresult_10')!=-1:
found=fn
print os.path.join(calctype,direc)
name=direc
if scores.has_key(direc):
raise Exception('Duplicate PDBID!!: %s' %direc)
#
if found:
fd=open(os.path.join(rdir,found))
import pickle
data=pickle.load(fd)
fd.close()
pdbfile=data[0]
SR=data[1]
PD=data[2]
Nuc=data[3]
#
# get the Histidine pKa, and figure out if the His should be positive in the CCPS
#
if PD and Nuc:
HIS,other_side=get_HIS(os.path.join(rdir,pdbfile),PD[:-4],Nuc[:-4])
#print 'HIS',HIS
#
# Get the net charge within 20 A
#
if HIS.lower()!='mutated' and HIS:
net_charge=get_net_charge(os.path.join(rdir,pdbfile),HIS)
else:
net_charge=None
#
# Do sensitivity analysis
#
import pKaTool.Do_Sensitivity_analysis as dosens
score,csvline=dosens.print_result(pdbfile,SR,PD,Nuc,resultdir=rdir)
scores[direc]={'score':score,'HIS':HIS,'HisPartner':other_side,'net_charge':net_charge}
scores[direc]['PD_pKa']=dosens.get_pKa(PD,resultdir=rdir,calc=pdbfile)
scores[direc]['Nuc_pKa']=dosens.get_pKa(Nuc,resultdir=rdir,calc=pdbfile)
scores[direc]['His_pKa']=dosens.get_pKa(HIS,resultdir=rdir,calc=pdbfile)
#
# Get intrinsic pKa values
#
scores[direc]['PD_intpKa']=dosens.get_intpKa(PD,resultdir=rdir,calc=pdbfile)
scores[direc]['Nuc_intpKa']=dosens.get_intpKa(Nuc,resultdir=rdir,calc=pdbfile)
scores[direc]['His_intpKa']=dosens.get_intpKa(HIS,resultdir=rdir,calc=pdbfile)
else:
scores[direc]={'score':'notdone'}
print 'No sensitivity analysis found'
print '---------------------------------'
if direc.find('ligand')!=-1 or direc.find('lignad')!=-1:
ligand=True
else:
ligand=False
scores[direc]['ligand']=ligand
scores[direc]['csvline']=csvline
#
# Save the scores
#
fd=open('scores.pickle','w')
import pickle
pickle.dump(scores,fd)
fd.close()
return
def make_plots():
fd=open('scores.pickle')
import pickle
scores=pickle.load(fd)
fd.close()
#
# Do final stats
#
ligpdbs=[]
for pdb in scores.keys():
if scores[pdb]['ligand']:
ligpdbs.append(pdb[:4])
print 'Final stats'
for structure in ['true_apo','apo_generated','holo']:
notdone=0
missingpka=0
correct=0
corrects=[]
wrong=0
wrongs=[]
inconclusive=0
incon=[]
totcalcs=0
printlines=[]
partners={'normal':[],'reversed':[],'incon':[]}
print 'Type of structures',structure
PD_pKas=[]
Nuc_pKas=[]
His_pKas=[]
PD_intpKas=[]
Nuc_intpKas=[]
His_intpKas=[]
nc=[]
cor_nc=[]
rev_nc=[]
inc_nc=[]
for pdb in scores.keys():
score=scores[pdb]['score']
partner=None
if scores[pdb].has_key('HisPartner'):
partner=scores[pdb]['HisPartner']
# Look only at the correct set of structures
if structure=='true_apo':
if pdb[:4] in ligpdbs or scores[pdb]['ligand']:
continue
elif structure=='apo_generated':
if not pdb[:4] in ligpdbs or scores[pdb]['ligand']:
continue
elif structure=='holo':
if not scores[pdb]['ligand']:
continue
if scores[pdb].has_key('csvline'):
printlines.append(scores[pdb]['csvline'])
if scores[pdb].has_key('net_charge'):
if scores[pdb]['net_charge']:
nc.append(scores[pdb]['net_charge'])
#
# Keep track of the pKas
#
if score!='notdone':
#print scores[pdb]
for name,l1,l2 in [['PD_pKa',PD_pKas,PD_intpKas],
['Nuc_pKa',Nuc_pKas,Nuc_intpKas],
['His_pKa',His_pKas,His_intpKas]]:
pKa=scores[pdb][name]
intpKa=scores[pdb][name.replace('pKa','intpKa')]
if not pKa is None:
l1.append(pKa)
l2.append(intpKa)
#PD_pKas.append(scores[pdb]['PD_pKa'])
#Nuc_pKas.append(scores[pdb]['Nuc_pKa'])
#His_pKas.append(scores[pdb]['His_pKa'])
#
# Tally the scores
#
totcalcs=totcalcs+1
if score is None:
missingpka=missingpka+1
print 'MISSING',pdb
raw_input('kkk')
elif score == 'notdone':
notdone=notdone+1
elif score>0.0:
correct=correct+1
corrects.append(pdb)
partners['normal'].append(partner)
if scores[pdb].has_key('net_charge'):
if scores[pdb]['net_charge']:
cor_nc.append(scores[pdb]['net_charge'])
elif score<0.0:
wrong=wrong+1
wrongs.append(pdb)
partners['reversed'].append(partner)
if scores[pdb].has_key('net_charge'):
if scores[pdb]['net_charge']:
rev_nc.append(scores[pdb]['net_charge'])
elif score==0:
inconclusive=inconclusive+1
partners['incon'].append(partner)
if scores[pdb].has_key('net_charge'):
if scores[pdb]['net_charge']:
inc_nc.append(scores[pdb]['net_charge'])
# If inconclusive, then what is the other residue?
printlines.sort()
for line in printlines:
print line
wrongs.sort()
corrects.sort()
incon.sort()
print 'Total calcs : %d' %totcalcs
print 'Calcs not analyzed: %d' %notdone
print 'Calcs analyzed: %d' %(totcalcs-notdone)
print 'Correct IDs : %d' %correct
print 'Reverse IDs : %d' %wrong
print 'Inconclusive: %d' %inconclusive
print 'Missing pKas: %d' %missingpka
#print 'Correct pdbs',corrects
#print 'Wrong pdbs',wrongs
print 'inconclusive'
for ptype in partners.keys():
print ptype
print partners[ptype]
print
print '---------------------------'
print
import pylab
print cor_nc
print rev_nc
print inc_nc
n,bins,patches=pylab.hist([cor_nc,rev_nc,inc_nc],histtype='bar')
print n
print bins
print patches
#pylab.hist(rev_nc,label='reverse',histtype='barstacked')
#pylab.hist(inc_nc,label='inconclusive',histtype='barstacked')
pylab.legend([patches[0][0],patches[1][0],patches[2][0]],['normal','reverse','outlier'])
pylab.title(structure)
#pylab.show()
# Histograms of pKa values
#pylab.hist(PD_pKas,20,label='PD')
#pylab.hist(Nuc_pKas,20,label='Nuc')
#pylab.hist(His_pKas,20,label='His')
#pylab.title(structure)
#pylab.legend()
#pylab.show()
# Effect of delec
delec_PD=[]
delec_Nuc=[]
ID_PD=[]
ID_PDint=[]
ID_Nuc=[]
ID_Nucint=[]
for count in range(len(PD_pKas)):
delec_PD.append(PD_pKas[count]-PD_intpKas[count])
if PD_pKas[count]>5.0 and PD_pKas[count]-Nuc_pKas[count]>=1.5:
ID_PD.append(PD_pKas[count])
ID_PDint.append(PD_intpKas[count])
elif Nuc_pKas[count]>5.0 and Nuc_pKas[count]-PD_pKas[count]>=1.5:
ID_Nuc.append(Nuc_pKas[count])
ID_Nucint.append(Nuc_intpKas[count])
for count in range(len(Nuc_pKas)):
delec_Nuc.append(Nuc_pKas[count]-Nuc_intpKas[count])
#Plot of pKa value against intrinsic pKa value
#pylab.scatter(PD_intpKas,PD_pKas,len(PD_pKas),'b',label='PD',alpha=0.4)
#pylab.scatter(ID_PDint,ID_PD,len(ID_PDint),'b',label='PD ID',alpha=1)
#pylab.scatter(Nuc_intpKas,Nuc_pKas,len(Nuc_pKas),'r',label='Nuc',alpha=0.4)
#pylab.scatter(ID_Nucint,ID_Nuc,len(ID_Nuc),'r',label='Nuc',alpha=1.0)
# Plot the ones with ID
#pylab.ylabel('pKa value')
#pylab.xlabel('Intrinsic pKa value')
#pylab.plot([0,12],[0,12],c='k',alpha=0.2)
#pylab.plot([0,12],[5,5],c='g')
#pylab.title('pKa values vs. Intrinsic pKa values')
#pylab.legend()
#pylab.xlim(0.0,12.0)
#pylab.ylim(0.0,12.0)
#pylab.show()
#pylab.scatter(delec_PD,PD_pKas,len(PD_pKas),'b',label='PD',alpha=0.4)
#pylab.scatter(delec_Nuc,Nuc_pKas,len(Nuc_pKas),'r',label='Nuc',alpha=0.4)
#pylab.ylabel('pKa value')
#pylab.xlabel('dpKa(elec)')
#pylab.plot([0,12],[0,12],c='r')
#pylab.plot([0,12],[5,5],c='g')
#pylab.title('pKa values vs. delec values')
#pylab.legend()
#pylab.show()
if __name__=='__main__':
#import pylab
#pkas=[4,4,5,5,4,3,8,8,9,7]
#pylab.scatter(pkas,pkas,len(pkas))
#pylab.hist(pkas,20)
#pylab.show()
#main()
make_plots()
|
StarcoderdataPython
|
3387622
|
<gh_stars>1-10
#!/usr/bin/python
import os
import sys
import appdirs
import json
from pathlib import Path as plPath
from operator import itemgetter
from settings import *
from tkinter import filedialog
from pygubu import Builder as pgBuilder
# if dist fails to start because it's missing these, uncomment these two imports
# import pygubu.builder.ttkstdwidgets
# import pygubu.builder.widgets.dialog
from PyPDF2 import PdfFileMerger, PdfFileReader, PdfFileWriter
# check to see if we're running from stand-alone one-file executable:
if hasattr(sys, '_MEIPASS'):
CURRENT_DIR = sys._MEIPASS
else:
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
USER_DIR = str(plPath.home())
CONFIG_DIR = appdirs.user_config_dir(APPNAME)
DATA_DIR = appdirs.user_data_dir(APPNAME)
class SettingsData:
'''Class for managing current user's application settings'''
def __init__(self):
self.__settings_data_path = os.path.join(CONFIG_DIR, 'data.json')
self.__settings_defaults = {
'use_poppler_tools': False,
}
self.__settings_data = self.__get_settings_data()
@property
def use_poppler_tools(self):
'''If set to True, PyPDF Builder will first try to use Poppler Tools where possible
to produce the desired PDFs.
The getter will first try to return the value stored in the
instance, then try to read it out of the user data file, and if all else fails,
set it to False and return that value.
The setter will set the according class instance property and save that property to
a settings data file. If no such file exists yet, one will be created.
'''
return self.__settings_data.get('use_poppler_tools', self.__get_settings_data()['use_poppler_tools'])
@use_poppler_tools.setter
def use_poppler_tools(self, val):
self.__settings_data['use_poppler_tools'] = val
self.__save_settings_data()
def __get_settings_data(self):
'''Method to retrieve current user's settings data
Return:
dict: Dictionary of settings data with keys:
* `use_poppler_tools`: user Poppler PDF tools by default
'''
try:
with (open(self.__settings_data_path, 'r')) as datafile:
settings_data = json.load(datafile)
# make sure all values are returned. If a key is non-existant, fill it with default value
for key, val in self.__settings_defaults.items():
if key not in settings_data:
settings_data[key] = val
except FileNotFoundError:
settings_data = self.__settings_defaults
return settings_data
def __save_settings_data(self):
if not os.path.exists(os.path.dirname(self.__settings_data_path)):
plPath(os.path.dirname(self.__settings_data_path)).mkdir(parents=True, exist_ok=True)
try:
with (open(self.__settings_data_path, 'w')) as datafile:
json.dump(self.__settings_data, datafile)
except FileNotFoundError:
print('Something went horribly wrong while trying to save your current user data.')
class UserData:
'''Class for storing current user's application data'''
def __init__(self):
self.__user_data_path = os.path.join(DATA_DIR, 'data.json')
self.__data_defaults = {
'filedialog_path': USER_DIR,
'number_of_processed_files': 0,
}
self.__user_data = self.__get_user_data()
@property
def filedialog_path(self):
'''The last directory the user visited while opening or saving a file
using a Tk File Dialog.
The getter will first try to return the value stored in the
instance, then try to read it out of the user data file, and if all else fails,
set it to the user's home directory and return that value.
The setter will set the according class instance property and save that property to
a user data file. If no such file exists yet, one will be created.
'''
return self.__user_data.get('filedialog_path', self.__get_user_data()['filedialog_path'])
@filedialog_path.setter
def filedialog_path(self, val):
self.__user_data['filedialog_path'] = val
self.__save_user_data()
@property
def number_of_processed_files(self):
'''Simple counter of PDF produced with PyPDF Builder
The getter will first try to return the value stored in the state of the
instance, then try to read it out of the user data file, and if all else fails,
set it to 0 and return that value.
The setter will set the according class instance property and save that property to
a user data file. If no such file exists yet, one will be created.
'''
return self.__user_data.get('number_of_processed_files', self.__get_user_data()['number_of_processed_files'])
@number_of_processed_files.setter
def number_of_processed_files(self, val):
self.__user_data['number_of_processed_files'] = val
self.__save_user_data()
def __get_user_data(self):
'''Method to retrieve current user's data
Return:
dict: Dictionary of user data with keys:
* `filedialog_path`: last accessed file path
* `number_of_processed_files`: number of processed files
'''
try:
with (open(self.__user_data_path, 'r')) as datafile:
user_data = json.load(datafile)
# make sure all values are returned. If a key is non-existant, fill it with default value
for key, val in self.__data_defaults.items():
if key not in user_data:
user_data[key] = val
except FileNotFoundError:
user_data = self.__data_defaults
return user_data
def __save_user_data(self):
if not os.path.exists(os.path.dirname(self.__user_data_path)):
plPath(os.path.dirname(self.__user_data_path)).mkdir(parents=True, exist_ok=True)
try:
with (open(self.__user_data_path, 'w')) as datafile:
json.dump(self.__user_data, datafile)
except FileNotFoundError:
print('Something went horribly wrong while trying to save your current user data.')
class PDFInfo:
'''File info class for PDF files.
Instances of this class show information about PDF files that are being edited in
PyPDF Builder.
Args:
filepath (str): Path to PDF File
'''
def __init__(self, filepath):
self.__filepath = filepath
@property
def pages(self):
'''int: Number of pages contained in PDF file'''
with open(self.__filepath, 'rb') as in_pdf:
pdf_handler = PdfFileReader(in_pdf)
return pdf_handler.getNumPages()
def concat_filename(self, max_length=35):
'''Concatenate a filename to a certain length.
Args:
max_length (int): Maximum length of concatenated string (default: 35)
Returns:
str: Filename of PDFInfo-object concatenated to max length of `max_length`
'''
basename = os.path.basename(self.__filepath)
concat_filename = f'{basename[0:max_length]}'
if len(basename) > max_length:
concat_filename += '…'
return concat_filename
def pdf_info_string(self, concat_length=35):
'''Fetch a standard info-string about the PDFInfo-object.
Args:
concat_length (int): Maximum length of concatenated filename string (default: 35)
Returns:
str: Information in the format `Filename (pages)` of PDFInfo-object
'''
concat_filename = self.concat_filename(max_length=concat_length)
return f'{concat_filename} ({self.pages} pages)'
class BgTabManager:
def __init__(self, parent=None):
self.parent = parent
self.__source_filepath = None
self.__bg_filepath = None
self.__source_file_info = None
self.__bg_file_info = None
self.__bg_pdf_pages = None
self.__source_file_info_widget = self.parent.builder.get_variable('source_file_info')
self.__bg_file_info_widget = self.parent.builder.get_variable('bg_file_info')
self.__bg_command = self.parent.builder.get_variable('bg_command')
self.__bg_only_first_page = self.parent.builder.get_variable('bg_only_first_page')
self.__bg_button_label = self.parent.builder.get_variable('bg_options_bg_button')
self.__only_first_button_label = self.parent.builder.get_variable('bg_options_only_first_button')
self.__bg_command.set('BG')
@property
def parent(self):
return self.__parent
@parent.setter
def parent(self, val):
self.__parent = val
def choose_source_file(self):
choose_source_file = self.parent.get_file_dialog(
func=filedialog.askopenfilename, widget_title='Choose Source PDF …')
if choose_source_file:
self.__source_filepath = choose_source_file
self.__source_file_info = PDFInfo(self.__source_filepath)
self.__show_source_file_info()
def choose_bg_file(self):
choose_bg_file = self.parent.get_file_dialog(
func=filedialog.askopenfilename, widget_title='Choose Background PDF …')
if choose_bg_file:
self.__bg_filepath = choose_bg_file
self.__bg_file_info = PDFInfo(self.__bg_filepath)
self.__show_bg_file_info()
def __show_source_file_info(self):
self.__source_file_info_widget.set(self.__source_file_info.pdf_info_string(concat_length=80))
def __show_bg_file_info(self):
self.__bg_file_info_widget.set(self.__bg_file_info.pdf_info_string(concat_length=80))
def choose_stamp_option(self):
self.__only_first_button_label.set('Apply stamp to only the first page')
self.__bg_button_label.set('Choose Stamp …')
def choose_bg_option(self):
self.__only_first_button_label.set('Apply background to only the first page')
self.__bg_button_label.set('Choose Background …')
def save_as(self):
save_filepath = self.parent.get_file_dialog(func=filedialog.asksaveasfilename, widget_title='Save New PDF to …')
if self.__source_filepath and self.__bg_filepath:
out_pdf = PdfFileWriter()
command = self.__bg_command.get()
with open(self.__source_filepath, "rb") as source_pdf_stream, \
open(self.__bg_filepath, "rb") as bg_pdf_stream:
for p in range(self.__source_file_info.pages):
# new PdfFileReader instances needed for every page merged. See here:
# https://github.com/mstamy2/PyPDF2/issues/100#issuecomment-43145634
source_pdf = PdfFileReader(source_pdf_stream)
bg_pdf = PdfFileReader(bg_pdf_stream)
if not self.__bg_only_first_page.get() or (self.__bg_only_first_page.get() and p < 1):
if command == 'STAMP':
top_page = bg_pdf.getPage(0)
bottom_page = source_pdf.getPage(p)
elif command == 'BG':
top_page = source_pdf.getPage(p)
bottom_page = bg_pdf.getPage(0)
bottom_page.mergePage(top_page)
else:
bottom_page = source_pdf.getPage(p)
out_pdf.addPage(bottom_page)
with open(save_filepath, "wb") as out_pdf_stream:
out_pdf.write(out_pdf_stream)
self.parent.save_success(status_text=BG_FILE_SUCCESS.format(os.path.basename(save_filepath)))
class SplitTabManager:
'''Manager class for the Split Tab
An instance of this class manages all aspects of the Split Tab in the calling `PyPDFBuilderApplication` instance
Args:
parent (PyPDFBuilderApplication): Application that created the instance and that contains the Split Tab.
'''
def __init__(self, parent=None):
self.parent = parent
self.__split_filepath = None
self.__split_file_info = None
self.__split_file_info_widget = self.parent.builder.get_variable('split_file_info')
@property
def parent(self):
'''PyPDFBuilderApplication: Application that created the instance and that contains the Split Tab.'''
return self.__parent
@parent.setter
def parent(self, val):
self.__parent = val
def open_file(self):
choose_split_file = self.parent.get_file_dialog(
func=filedialog.askopenfilename, widget_title='Choose PDF to Split…')
if choose_split_file:
self.__split_filepath = choose_split_file
self.__split_file_info = PDFInfo(self.__split_filepath)
self.__show_file_info()
def __show_file_info(self):
self.__split_file_info_widget.set(self.__split_file_info.pdf_info_string())
def save_as(self):
if self.__split_filepath:
basepath = os.path.splitext(self.__split_filepath)[0]
# in spite of discussion here https://stackoverflow.com/a/2189814
# we'll just go the lazy way to count the number of needed digits:
num_length = len(str(abs(self.__split_file_info.pages)))
in_pdf = PdfFileReader(open(self.__split_filepath, "rb"))
for p in range(self.__split_file_info.pages):
output_path = f"{basepath}_{str(p+1).rjust(num_length, '0')}.pdf"
out_pdf = PdfFileWriter()
out_pdf.addPage(in_pdf.getPage(p))
with open(output_path, "wb") as out_pdf_stream:
out_pdf.write(out_pdf_stream)
self.parent.save_success(status_text=SPLIT_FILE_SUCCESS.format(os.path.dirname(self.__split_filepath)))
class RotateTabManager:
def __init__(self, parent=None):
self.parent = parent
self.__rotate_filepath = None
self.__rotate_file_info = None
self.__rotate_file_info_widget = self.parent.builder.get_variable('rotate_file_info')
self.__rotate_from_page_widget = self.parent.builder.get_variable('rotate_from_page')
self.__rotate_to_page_widget = self.parent.builder.get_variable('rotate_to_page')
self.__rotate_amount_widget = self.parent.builder.get_variable('rotate_amount')
self.__do_page_extract_widget = self.parent.builder.get_variable('do_extract_pages')
# Set default values. No idea how to avoid this using only the UI file, so I'm
# breaking the MVC principle here.
self.__rotate_amount_widget.set('NO_ROTATE')
self.__rotate_from_page_widget.set('')
self.__rotate_to_page_widget.set('')
self.__do_page_extract_widget.set(True)
@property
def parent(self):
return self.__parent
@parent.setter
def parent(self, val):
self.__parent = val
def open_file(self):
chose_rotate_file = self.parent.get_file_dialog(
func=filedialog.askopenfilename, widget_title='Choose PDF to Rotate…')
if chose_rotate_file:
self.__rotate_filepath = chose_rotate_file
self.__rotate_file_info = PDFInfo(self.__rotate_filepath)
self.__show_file_info()
self.__show_rotate_pages()
def __show_rotate_pages(self):
self.__rotate_from_page_widget.set(1)
self.__rotate_to_page_widget.set(self.__rotate_file_info.pages)
def __show_file_info(self):
self.__rotate_file_info_widget.set(self.__rotate_file_info.pdf_info_string())
def save_as(self):
page_range = (self.__rotate_from_page_widget.get()-1, self.__rotate_to_page_widget.get())
save_filepath = self.parent.get_file_dialog(func=filedialog.asksaveasfilename, widget_title='Save New PDF to…')
if self.__rotate_filepath:
in_pdf = PdfFileReader(open(self.__rotate_filepath, "rb"))
out_pdf = PdfFileWriter()
for p in range(self.__rotate_file_info.pages):
if p in range(*page_range):
if ROTATE_DEGREES[self.__rotate_amount_widget.get()] != 0:
out_pdf.addPage(in_pdf.getPage(p).rotateClockwise(
ROTATE_DEGREES[self.__rotate_amount_widget.get()]))
else:
out_pdf.addPage(in_pdf.getPage(p))
elif not self.__do_page_extract_widget.get():
out_pdf.addPage(in_pdf.getPage(p))
with open(save_filepath, "wb") as out_pdf_stream:
out_pdf.write(out_pdf_stream)
self.parent.save_success(status_text=ROTATE_FILE_SUCCESS.format(os.path.basename(save_filepath)))
class JoinTabManager:
def __init__(self, parent=None):
self.parent = parent
self.__current_file_info = None
self.__files_tree_widget = self.parent.builder.get_object('JoinFilesList')
self.__files_tree_widget['displaycolumns'] = ('FileNameColumn', 'PageSelectColumn')
self.__current_file_info_widget = self.parent.builder.get_variable('current_file_info')
self.__page_select_input_widget = self.parent.builder.get_variable('page_select_input')
self.__selected_files = []
@property
def parent(self):
return self.__parent
@parent.setter
def parent(self, val):
self.__parent = val
def on_file_select(self, event):
self.__selected_files = self.__files_tree_widget.selection()
self.__current_file_info = PDFInfo(
self.__files_tree_widget.item(self.__selected_files[0], 'values')[PDF_FILEPATH])
self.__show_file_info()
self.__show_selected_pages()
def enter_page_selection(self, event):
'''
This medthod is called when the page selection input field loses focus
i.e. when input is completed
'''
for f in self.__selected_files:
file_data = self.__files_tree_widget.item(f, 'values')
page_select = self.__page_select_input_widget.get()
new_tuple = (file_data[PDF_FILENAME], page_select, file_data[PDF_FILEPATH], file_data[PDF_PAGES])
self.__files_tree_widget.item(f, values=new_tuple)
def __show_file_info(self):
self.__current_file_info_widget.set(self.__current_file_info.pdf_info_string(concat_length=25))
def __show_selected_pages(self):
file_data = self.__files_tree_widget.item(self.__selected_files[0], 'values')
self.__page_select_input_widget.set(file_data[PDF_PAGESELECT])
def __get_join_files(self):
return [self.__files_tree_widget.item(i)['values'] for i in self.__files_tree_widget.get_children()]
def __parse_page_select(self, page_select):
'''
As this method deals with raw user input, there will have to be a whole lot of error checking
built into this function at a later time. Really don't look forward to this… at all.
'''
for page_range in page_select.replace(' ', '').split(','):
if '-' in page_range:
range_list = page_range.split('-')
yield tuple(sorted((int(range_list[0])-1, int(range_list[1]))))
else:
yield tuple(sorted((int(page_range)-1, int(page_range))))
def add_file(self):
add_filepaths = self.parent.get_file_dialog(
func=filedialog.askopenfilenames,
widget_title='Choose PDFs to Add…'
)
if add_filepaths:
for filepath in list(add_filepaths):
filename = os.path.basename(filepath)
file_info = PDFInfo(filepath)
file_data = (filename, '', filepath, file_info.pages)
self.__files_tree_widget.insert('', 'end', values=file_data)
def save_as(self):
if len(self.__get_join_files()) > 0:
save_filepath = self.parent.get_file_dialog(
func=filedialog.asksaveasfilename, widget_title='Save Joined PDF to…')
if save_filepath:
merger = PdfFileMerger()
for f in self.__get_join_files():
if not f[PDF_PAGESELECT]:
merger.append(fileobj=open(f[PDF_FILEPATH], 'rb'))
else:
for page_range in self.__parse_page_select(str(f[PDF_PAGESELECT])):
merger.append(fileobj=open(f[PDF_FILEPATH], 'rb'), pages=page_range)
with open(save_filepath, 'wb') as out_pdf:
merger.write(out_pdf)
self.parent.save_success(status_text=JOIN_FILE_SUCCESS.format(os.path.basename(save_filepath)))
def move_up(self):
selected_files = self.__selected_files
first_idx = self.__files_tree_widget.index(selected_files[0])
parent = self.__files_tree_widget.parent(selected_files[0])
if first_idx > 0:
for f in selected_files:
swap_item = self.__files_tree_widget.prev(f)
new_idx = self.__files_tree_widget.index(swap_item)
self.__files_tree_widget.move(f, parent, new_idx)
def move_down(self):
selected_files = list(reversed(self.__selected_files))
last_idx = self.__files_tree_widget.index(selected_files[0])
parent = self.__files_tree_widget.parent(selected_files[0])
last_idx_in_widget = self.__files_tree_widget.index(self.__files_tree_widget.get_children()[-1])
if last_idx < last_idx_in_widget:
for f in selected_files:
swap_item = self.__files_tree_widget.next(f)
own_idx = self.__files_tree_widget.index(f)
new_idx = self.__files_tree_widget.index(swap_item)
self.__files_tree_widget.move(f, parent, new_idx)
def remove_file(self):
for f in self.__selected_files:
self.__files_tree_widget.detach(f)
class PyPDFBuilderApplication:
'''Main application class. Handles setup and running of all application parts.'''
def __init__(self):
self.builder = pgBuilder()
self.builder.add_from_file(os.path.join(CURRENT_DIR, 'mainwindow.ui'))
self.__mainwindow = self.builder.get_object('MainWindow')
self.__settings_dialog = self.builder.get_object('SettingsDialog', self.__mainwindow)
self.__notebook = self.builder.get_object('AppNotebook')
self.__tabs = {
'join': self.builder.get_object('JoinFrame'),
'split': self.builder.get_object('SplitFrame'),
'bg': self.builder.get_object('BgFrame'),
'rotate': self.builder.get_object('RotateFrame'),
}
self.__mainmenu = self.builder.get_object('MainMenu')
self.__mainwindow.config(menu=self.__mainmenu)
self.__status_text_variable = self.builder.get_variable('application_status_text')
self.__settings_use_poppler_variable = self.builder.get_variable('settings_use_poppler')
self.status_text = None
self.builder.connect_callbacks(self)
self.user_data = UserData()
self.settings_data = SettingsData()
self.__jointab = JoinTabManager(self)
self.__splittab = SplitTabManager(self)
self.__bgtab = BgTabManager(self)
self.__rotatetab = RotateTabManager(self)
self.status_text = DEFAULT_STATUS
@property
def status_text(self):
return self.__status_text_variable.get()
@status_text.setter
def status_text(self, val):
self.__status_text_variable.set(val)
# boy oh boy if there's anyway to do these callsbacks more elegantly, please let me gain that knowledge!
def select_tab_join(self, *args, **kwargs):
'''Gets called when menu item "View > Join Files" is selected.
Pops appropriate tab into view.'''
self.__notebook.select(self.__tabs['join'])
def select_tab_split(self, *args, **kwargs):
'''Gets called when menu item "View > Split File" is selected.
Pops appropriate tab into view.'''
self.__notebook.select(self.__tabs['split'])
def select_tab_bg(self, *args, **kwargs):
'''Gets called when menu item "View > Background/Stamp/Number" is selected.
Pops appropriate tab into view.'''
self.__notebook.select(self.__tabs['bg'])
def select_tab_rotate(self, *args, **kwargs):
'''Gets called when menu item "View > Rotate Pages" is selected.
Pops appropriate tab into view.'''
self.__notebook.select(self.__tabs['rotate'])
def jointab_add_file(self):
self.__jointab.add_file()
def jointab_on_file_select(self, event):
self.__jointab.on_file_select(event)
def jointab_enter_page_selection(self, event):
self.__jointab.enter_page_selection(event)
def jointab_save_as(self):
self.__jointab.save_as()
def jointab_move_up(self):
self.__jointab.move_up()
def jointab_move_down(self):
self.__jointab.move_down()
def jointab_remove(self):
self.__jointab.remove_file()
def splittab_open_file(self):
self.__splittab.open_file()
def splittab_save_as(self):
self.__splittab.save_as()
def bgtab_choose_bg_option(self):
self.__bgtab.choose_bg_option()
def bgtab_choose_stamp_option(self):
self.__bgtab.choose_stamp_option()
def bgtab_choose_number_option(self):
'''
Numbering pages is currently not supported by PyPDF2 so this option will remain
disabled for now
'''
pass
def bgtab_choose_source_file(self):
self.__bgtab.choose_source_file()
def bgtab_choose_bg_file(self):
self.__bgtab.choose_bg_file()
def bgtab_save_as(self):
self.__bgtab.save_as()
def rotatetab_open_file(self):
self.__rotatetab.open_file()
def rotatetab_save_as(self):
self.__rotatetab.save_as()
def save_success(self, status_text=DEFAULT_STATUS):
'''Gets called when a PDF file was processed successfully. Currently only
increases the `number_of_processed_files`-counter by 1
'''
self.user_data.number_of_processed_files += 1
self.status_text = status_text
def show_settings(self, *args, **kwargs):
'''Shows the settings dialog. The close event is handled by `self.close_settings()`
and all the settings management is handled there. Args and kwargs are included in
method definition in case it is triggered by the keyboard shortcut, in which
case `event` gets passed into the call.'''
self.__settings_dialog.run()
self.__settings_use_poppler_variable.set(self.settings_data.use_poppler_tools)
def close_settings(self, *args, **kwargs):
self.settings_data.use_poppler_tools = self.__settings_use_poppler_variable.get()
self.__settings_dialog.close()
def cancel_settings(self, *args, **kwargs):
pass
def get_file_dialog(self, func, widget_title='Choose File(s) …'):
f = func(
initialdir=self.user_data.filedialog_path,
title=widget_title,
filetypes=(("PDF File", "*.pdf"), ("All Files", "*.*"))
)
if f:
if type(f) == list or type(f) == tuple:
self.user_data.filedialog_path = os.path.dirname(f[-1])
elif type(f) == str:
self.user_data.filedialog_path = os.path.dirname(f)
return f
def quit(self, event=None):
self.__mainwindow.quit()
def run(self):
self.__mainwindow.mainloop()
if __name__ == '__main__':
app = PyPDFBuilderApplication()
app.run()
|
StarcoderdataPython
|
3285339
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/12/6 12:17
# @Author : weihuchao
class Solution(object):
def largestTimeFromDigits(self, arr):
"""
:type arr: List[int]
:rtype: str
"""
arr.sort()
ret_val, ret_str = -1, ""
def get_time(a, b, c, d):
hour, min = a * 10 + b, c * 10 + d
if hour > 23:
return False, 0
if min > 59:
return False, 0
return True, hour * 100 + min
for a in range(4):
for b in range(4):
if b == a:
continue
for c in range(4):
if c in [a, b]:
continue
for d in range(4):
if d in [a, b, c]:
continue
z, v = get_time(arr[a], arr[b], arr[c], arr[d])
if z and v > ret_val:
ret_str = "%s%s:%s%s" % (arr[a], arr[b], arr[c], arr[d])
return ret_str
|
StarcoderdataPython
|
1717427
|
<filename>velocidade.py
'''Escreva um programa que pergunte a que velocidade do carro de um usuário. Caso o valor informado seja maior que 80km/h, exiba uma mensagem dizendo que o usuário foi multado. Neste caso, exiba o valor da multa, cobrando R$ 5,00 por Km acima dos 80 km/h'''
velocidade = int(input("Digite a velocidade do carro: "))
if velocidade >= 80:
print("Motorista multado!")
multa = (velocidade - 80) * 5
print("O valor da multa será: R$", multa)
else:
print("Ufa! Dessa vez levou multa...")
|
StarcoderdataPython
|
9829
|
"""Tests joulia.unit_conversions.
"""
from django.test import TestCase
from joulia import unit_conversions
class GramsToPoundsTest(TestCase):
def test_grams_to_pounds(self):
self.assertEquals(unit_conversions.grams_to_pounds(1000.0), 2.20462)
class GramsToOuncesTest(TestCase):
def test_grams_to_ounces(self):
self.assertEquals(unit_conversions.grams_to_ounces(1000.0), 35.27392)
|
StarcoderdataPython
|
1787489
|
import datetime as dt
import dateutil.tz
import pandas as pd
import matplotlib.pyplot as plt
ams = dateutil.tz.gettz('Europe/Amsterdam')
utc = dateutil.tz.tzutc()
start_graph = dt.datetime(2021, 3, 20, 18, tzinfo=utc)
end_graph = start_graph + dt.timedelta(minutes=240)
def trivial_interpolation_windnet():
base_windnet_df = pd.read_csv('../data/windnet/cleaned_windnet_data_aug_2020_sep_2021.csv')
base_windnet_df.index = pd.to_datetime(base_windnet_df['date'], utc=True, errors='coerce')
base_windnet_df.index = base_windnet_df.index - dt.timedelta(minutes=5)
base_windnet_df = base_windnet_df.drop(['date'], axis=1)
base_windnet_df['nht_usage_kwh'] = base_windnet_df['nht_usage_kwh'] / 5
base_windnet_df['nht_production_kwh'] = base_windnet_df['nht_production_kwh'] / 5
base_windnet_df['mmt_usage_kwh'] = base_windnet_df['mmt_usage_kwh'] / 5
base_windnet_df['mmt_production_kwh'] = base_windnet_df['mmt_production_kwh'] / 5
base_windnet_df = base_windnet_df.resample('1T').pad()
return base_windnet_df
def pandas_linear_interpolation_windnet():
base_windnet_df = pd.read_csv('../data/windnet/cleaned_windnet_data_aug_2020_sep_2021.csv')
base_windnet_df.index = pd.to_datetime(base_windnet_df['date'], utc=True, errors='coerce')
base_windnet_df.index = base_windnet_df.index - dt.timedelta(minutes=5)
base_windnet_df = base_windnet_df.drop(['date', 'nht_usage_kwh', 'nht_production_kwh', 'mmt_usage_kwh', 'mmt_production_kwh'], axis=1)
base_windnet_df = base_windnet_df.resample('1T').interpolate()
base_windnet_df['nht_usage_kwh'] = base_windnet_df['nht_usage_kw'] / 60
base_windnet_df['nht_production_kwh'] = base_windnet_df['nht_production_kw'] / 60
base_windnet_df['mmt_usage_kwh'] = base_windnet_df['mmt_usage_kw'] / 60
base_windnet_df['mmt_production_kwh'] = base_windnet_df['mmt_production_kw'] / 60
return base_windnet_df
def csv_maker():
trivial_df = trivial_interpolation_windnet()
pandas_df = pandas_linear_interpolation_windnet()
print(trivial_df)
print(pandas_df)
trivial_df.to_csv('../data/windnet/trivial_interpolation_windnet.csv')
pandas_df.to_csv('../data/windnet/pandas_interpolation_windnet.csv')
def make_simple_graph(filtered_df, title):
plt.plot(filtered_df.index, filtered_df['nht_production_kw'], marker='o')
plt.title(title)
plt.xlabel('Time')
plt.ylabel('Produced power (kW)')
plt.show()
if __name__ == '__main__':
# csv_maker()
original_df = pd.read_csv('../data/windnet/cleaned_windnet_data_aug_2020_sep_2021.csv')
original_df.index = pd.to_datetime(original_df['date'], utc=True, errors='coerce')
original_df.index = original_df.index - dt.timedelta(minutes=5)
original_df = original_df.drop(['date'], axis=1)
trivial_df = pd.read_csv('../data/windnet/trivial_interpolation_windnet.csv')
trivial_df.index = pd.to_datetime(trivial_df['date'])
pandas_df = pd.read_csv('../data/windnet/pandas_interpolation_windnet.csv')
pandas_df.index = pd.to_datetime(pandas_df['date'])
original_df = original_df[original_df.index.to_series().between(start_graph, end_graph)]
trivial_df = trivial_df[trivial_df.index.to_series().between(start_graph, end_graph)]
pandas_df = pandas_df[pandas_df.index.to_series().between(start_graph, end_graph)]
make_simple_graph(original_df, title='Original data')
make_simple_graph(trivial_df, title='Trivial interpolation.')
make_simple_graph(pandas_df, title='Pandas interpolation.')
# csv_maker()
|
StarcoderdataPython
|
181563
|
from typing import Tuple
from NewDeclarationInQueue.processfiles.customprocess.formulars.davere import DAvere
from NewDeclarationInQueue.processfiles.customprocess.search_text_line_parameter import SearchTextLineParameter
from NewDeclarationInQueue.processfiles.customprocess.table_config_detail import TableConfigDetail
from NewDeclarationInQueue.processfiles.tableobjects.art import Art
from NewDeclarationInQueue.processfiles.tableobjects.building import Building
from NewDeclarationInQueue.processfiles.tableobjects.debt import Debt
from NewDeclarationInQueue.processfiles.tableobjects.finance import Finance
from NewDeclarationInQueue.processfiles.tableobjects.gift import Gift
from NewDeclarationInQueue.processfiles.tableobjects.income import Income
from NewDeclarationInQueue.processfiles.tableobjects.investment import Investment
from NewDeclarationInQueue.processfiles.tableobjects.mobile import Mobile
from NewDeclarationInQueue.processfiles.tableobjects.parcel import Parcel
from NewDeclarationInQueue.processfiles.tableobjects.transport import Transport
#from NewDeclarationInQueue.processfiles.customprocess.text_with_special_ch import TextWithSpecialCharacters
from NewDeclarationInQueue.processfiles.process_messages import ProcessMessages
class Davere01(DAvere):
"""
Class for a specific formular for Wealth Declaration
"""
def __init__(self, no_of_pages: int):
self.no_of_pages = no_of_pages
def get_parcels(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('parcels', tables, lambda x: Parcel(), message)
if message.has_errors() or result is not None:
json['parcels'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_buildings(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('buildings', tables, lambda x: Building(), message)
if message.has_errors() or result is not None:
json['buildings'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_transport(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('transport', tables, lambda x: Transport(), message)
if message.has_errors() or result is not None:
json['transport'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_art(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('art', tables, lambda x: Art(), message)
if message.has_errors() or result is not None:
json['art'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_mobile(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('mobile', tables, lambda x: Mobile(), message)
if message.has_errors() or result is not None:
json['mobile'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_finances(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('finances', tables, lambda x: Finance(), message)
if message.has_errors() or result is not None:
json['finance'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_investments(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('investments', tables, lambda x: Investment(), message)
if message.has_errors() or result is not None:
json['investment'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_extra_finance_info(self, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get text from a specific section
Args:
data (dict): text info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: esponse JSON for the specific object, processing messages
and the page number where the table ends
"""
lines, end_page_no = self.find_lines_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, \
'3. Alte active producatoare de venituri nete,', None, False, \
'NOTA:', None, False)
result = self.extract_lines_info_to_json(lines)
if result is not None and len(result) > 0:
json['finance_extra_info'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_debt(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('debt', tables, lambda x: Debt(), message)
if message.has_errors() or result is not None:
json['debt'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_gift(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_one_level_to_json(tables, \
config.first_level, lambda x: Gift(), message)
if message.has_errors() or result is not None:
json['gift'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_income(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_two_level_to_json(tables, \
config.second_level, config.first_level, lambda x: Income(), message)
if message.has_errors() or result is not None:
json['income'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
|
StarcoderdataPython
|
1636928
|
<filename>turtlebot4_bringup/launch/rplidar.launch.py
# Copyright 2021 Clearpath Robotics, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author <NAME> (<EMAIL>)
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
rplidar_node = Node(
name='rplidar_composition',
package='rplidar_ros',
executable='rplidar_composition',
output='screen',
parameters=[{
'serial_port': '/dev/RPLIDAR',
'serial_baudrate': 115200,
'frame_id': 'rplidar_link',
'inverted': False,
'angle_compensate': True,
}],
)
ld = LaunchDescription()
ld.add_action(rplidar_node)
return ld
|
StarcoderdataPython
|
3348269
|
<reponame>luiz-fischer/python<gh_stars>1-10
#WHILE
i = 1
while i < 6:
print(i)
i += 1
#The Break Statement
i = 1
while i < 6:
print(i)
if i == 3:
break
i += 1
#The Continue Statement
i = 0
while i < 6:
i += 1
if i == 3:
continue
print(i)
#The Else Statement
i = 1
while i < 6:
print(i)
i += 1
else:
print("i is no longer less than 6")
#FOR
fruits = ["apple", "banana", "cherry"]
for x in fruits:
print(x)
#Looping Through a String
for x in "banana":
print(x)
#The Break Statement
fruits = ["Apple", "banana", "cherry"]
for x in fruits:
print(x)
if x == "banana":
break
#Exit the loop when x is "banana", but this time the break comes before the print
fruits = ["apple", "banana", "cherry"]
for x in fruits:
if x == "banana":
break
print(x)
#The Continue Statement
fruits = ["apple", "banana", "cherry"]
for x in fruits:
if x == "banana":
continue
print(x)
#The Range Function
for x in range(6):
print(x)
for x in range(2, 6):
print(x)
for x in range(2, 30, 3):
print(x)
#Else in For Loop
for x in range(6):
print(x)
else:
print("Finally Finished")
#Nested Loops
adj = ["Red", "Big", "Tasty"]
fruits = ["Apple", "Banana", "Cherry"]
for x in adj:
for y in fruits:
print(x, y)
#The Pass Statement
for x in [0, 1, 2]:
pass
|
StarcoderdataPython
|
1772301
|
# coding: utf-8
from __future__ import absolute_import
from django.conf import settings
from sentry.models import Project, Team, User
from sentry.receivers.core import create_default_project
from sentry.testutils import TestCase
class CreateDefaultProjectTest(TestCase):
def test_simple(self):
user, _ = User.objects.get_or_create(is_superuser=True, defaults={
'username': 'test'
})
Team.objects.filter(project__id=settings.SENTRY_PROJECT).delete()
Project.objects.filter(id=settings.SENTRY_PROJECT).delete()
create_default_project(created_models=[Project])
project = Project.objects.filter(id=settings.SENTRY_PROJECT)
assert project.exists() is True
project = project.get()
assert project.owner == user
assert project.public is False
assert project.name == 'Sentry (Internal)'
assert project.slug == 'sentry'
team = project.team
assert team.owner == user
assert team.slug == 'sentry'
|
StarcoderdataPython
|
188674
|
<gh_stars>10-100
from __future__ import absolute_import
from .classification import accuracy
from .eval_reid import eval_func
__all__ = [
'accuracy',
'eval_func'
]
|
StarcoderdataPython
|
3226916
|
import functools as ft
import itertools as it
import typing as t
from .. import openapi as api
from .references import references
@ft.singledispatch
def generate_imports(
input: t.Union[api.Operation, api.Schema, api.Reference], module: t.Sequence[str]
) -> t.Iterator[str]:
raise NotImplementedError(f"generate_imports not defined for {type(input)}")
@generate_imports.register
def generate_imports_operation(
operation: api.Operation, module: t.Sequence[str]
) -> t.Iterator[str]:
schema: t.Union[api.Schema, api.Reference]
for schema in it.chain( # type: ignore
(parameter.schema for parameter in operation.parameters),
(operation.response,),
(operation.request_body,) if operation.request_body else [],
):
yield from generate_imports(schema, module)
@generate_imports.register
def generate_imports_schema(
schema: api.Schema, module: t.Sequence[str]
) -> t.Iterator[str]:
for reference in references(schema):
yield from generate_import(reference, module)
@generate_imports.register
def generate_import(
reference: api.Reference, module: t.Sequence[str]
) -> t.Iterator[str]:
if not reference.local(module):
yield f"from {'.'.join(reference.module)} import {reference.name}"
|
StarcoderdataPython
|
161290
|
<gh_stars>0
from .firebase import *
|
StarcoderdataPython
|
3204244
|
"""
test_run_chronostar.py
Integration test, testing some simple scenarios for NaiveFit
"""
import logging
import numpy as np
import sys
from distutils.dir_util import mkpath
sys.path.insert(0, '..')
from chronostar.naivefit import NaiveFit
from chronostar.synthdata import SynthData
from chronostar.component import SphereComponent
from chronostar import tabletool
from chronostar import expectmax
PY_VERS = sys.version[0]
def dummy_trace_orbit_func(loc, times=None):
"""
Integrating orbits takes a long time. So we can run this test quickly,
we enforce the age of all components to be ~0, and then use this
"dummy" trace orbit function.
This function doesn't do anything, and it definitely should not be
used in any actual comuptation. It is merely a place holder to
skip irrelevant computation when running integration tests.
"""
if times is not None:
if np.all(times > 1.0):
return loc + 1000.
return loc
def test_2comps_and_background():
"""
Synthesise a file with negligible error, retrieve initial
parameters
Takes a while... maybe this belongs in integration unit_tests
Performance of test is a bit tricky to callibrate. Since we are skipping
any temporal evolution for speed reasons, we model two
isotropic Gaussians. Now if these Gaussians are too far apart, NaiveFit
will gravitate to one of the Gaussians during the 1 component fit, and then
struggle to discover the second Gaussian.
If the Gaussians are too close, then both will be characteresied by the
1 component fit, and the BIC will decide two Gaussians components are
overkill.
I think I've addressed this by having the two groups have
large number of stars.
"""
using_bg = True
run_name = '2comps_and_background'
logging.info(60 * '-')
logging.info(15 * '-' + '{:^30}'.format('TEST: ' + run_name) + 15 * '-')
logging.info(60 * '-')
savedir = 'temp_data/{}_naive_{}/'.format(PY_VERS, run_name)
mkpath(savedir)
data_filename = savedir + '{}_naive_{}_data.fits'.format(PY_VERS,
run_name)
log_filename = 'temp_data/{}_naive_{}/log.log'.format(PY_VERS,
run_name)
logging.basicConfig(level=logging.INFO, filemode='w',
filename=log_filename)
### INITIALISE SYNTHETIC DATA ###
# DON'T CHANGE THE AGE! BECAUSE THIS TEST DOESN'T USE ANY ORBIT INTEGRATION!!!
# Note: if peaks are too far apart, it will be difficult for
# chronostar to identify the 2nd when moving from a 1-component
# to a 2-component fit.
uniform_age = 1e-10
sphere_comp_pars = np.array([
# X, Y, Z, U, V, W, dX, dV, age,
[ 0, 0, 0, 0, 0, 0, 5., 2, uniform_age],
[ 30, 0, 0, 0, 5, 0, 5., 2, uniform_age],
])
starcounts = [100, 150]
ncomps = sphere_comp_pars.shape[0]
nstars = np.sum(starcounts)
background_density = 1e-9
# initialise z appropriately
true_memb_probs = np.zeros((np.sum(starcounts), ncomps))
start = 0
for i in range(ncomps):
true_memb_probs[start:start + starcounts[i], i] = 1.0
start += starcounts[i]
try:
# Check if the synth data has already been constructed
data_dict = tabletool.build_data_dict_from_table(data_filename)
except:
synth_data = SynthData(pars=sphere_comp_pars, starcounts=starcounts,
Components=SphereComponent,
background_density=background_density,
)
synth_data.synthesise_everything()
tabletool.convert_table_astro2cart(synth_data.table,
write_table=True,
filename=data_filename)
background_count = len(synth_data.table) - np.sum(starcounts)
# insert background densities
synth_data.table['background_log_overlap'] =\
len(synth_data.table) * [np.log(background_density)]
synth_data.table.write(data_filename, overwrite=True)
origins = [SphereComponent(pars) for pars in sphere_comp_pars]
### SET UP PARAMETER FILE ###
fit_pars = {
'results_dir':savedir,
'data_table':data_filename,
'trace_orbit_func':'dummy_trace_orbit_func',
'return_results':True,
'par_log_file':'fit_pars.log',
'overwrite_prev_run':True,
# 'nthreads':18,
'nthreads':3,
}
### INITIALISE AND RUN A NAIVE FIT ###
naivefit = NaiveFit(fit_pars=fit_pars)
result, score = naivefit.run_fit()
best_comps = result['comps']
memb_probs = result['memb_probs']
# Check membership has ncomps + 1 (bg) columns
n_fitted_comps = memb_probs.shape[-1] - 1
assert ncomps == n_fitted_comps
### CHECK RESULT ###
# No guarantee of order, so check if result is permutated
# also we drop the bg memberships for permutation reasons
perm = expectmax.get_best_permutation(memb_probs[:nstars,:ncomps], true_memb_probs)
memb_probs = memb_probs[:nstars]
logging.info('Best permutation is: {}'.format(perm))
n_misclassified_stars = np.sum(np.abs(true_memb_probs - np.round(memb_probs[:,perm])))
# Check fewer than 15% of association stars are misclassified
try:
assert n_misclassified_stars / nstars * 100 < 15
except AssertionError:
import pdb; pdb.set_trace()
for origin, best_comp in zip(origins, np.array(best_comps)[perm,]):
assert (isinstance(origin, SphereComponent) and
isinstance(best_comp, SphereComponent))
o_pars = origin.get_pars()
b_pars = best_comp.get_pars()
logging.info("origin pars: {}".format(o_pars))
logging.info("best fit pars: {}".format(b_pars))
assert np.allclose(origin.get_mean(),
best_comp.get_mean(),
atol=5.)
assert np.allclose(origin.get_sphere_dx(),
best_comp.get_sphere_dx(),
atol=2.5)
assert np.allclose(origin.get_sphere_dv(),
best_comp.get_sphere_dv(),
atol=2.5)
assert np.allclose(origin.get_age(),
best_comp.get_age(),
atol=1.)
if __name__ == '__main__':
test_2comps_and_background()
|
StarcoderdataPython
|
57175
|
import json
from app import create_app, db
from app.models import User, UserType
from .base import BaseTest
class TestOrders(BaseTest):
def setUp(self):
self.app = create_app(config_name='testing')
self.client = self.app.test_client()
with self.app.app_context():
db.create_all()
self.setUpAuth()
def data(self):
return json.dumps({
'quantity': 2,
'user_id': self.user['id'],
'menu_item_id': self.create_menu_item()['menu_item']['id']
})
def test_can_create_order(self):
res = self.client.post(
'api/v1/orders', data=self.data(), headers=self.user_headers)
self.assertEqual(res.status_code, 201)
self.assertIn(b'Successfully saved order', res.data)
def test_cannot_create_order_without_user_id(self):
res = self.client.post(
'api/v1/orders',
data=self.data_without(['user_id']),
headers=self.user_headers)
self.assertEqual(res.status_code, 400)
self.assertIn(b'user id field is required', res.data)
def test_cannot_create_order_without_quantity(self):
res = self.client.post(
'api/v1/orders',
data=self.data_without(['quantity']),
headers=self.user_headers)
self.assertEqual(res.status_code, 400)
self.assertIn(b'quantity field is required', res.data)
def test_cannot_create_order_without_menu_item_id(self):
res = self.client.post(
'api/v1/orders',
data=self.data_without(['menu_item_id']),
headers=self.user_headers)
self.assertEqual(res.status_code, 400)
self.assertIn(b'menu item id field is required', res.data)
def test_cannot_create_order_with_quantity_than_available(self):
res = self.client.post(
'api/v1/orders',
data=self.data_with({
'quantity': 1000
}),
headers=self.user_headers)
self.assertEqual(res.status_code, 400)
self.assertIn(b'meal(s) are available', res.data)
def test_can_update_order(self):
json_res = self.create_order()
res = self.client.put(
'api/v1/orders/{}'.format(json_res['order']['id']),
data=json.dumps({
'quantity': 20,
'menu_item_id': json_res['order']['menu_item_id'],
}),
headers=self.user_headers)
json_res = self.to_dict(res)
self.assertEqual(res.status_code, 200)
self.assertEqual(json_res['order']['quantity'], 20)
self.assertIn(b'successfully updated', res.data)
def test_admin_can_update_order(self):
json_res = self.create_order()
res = self.client.put(
'api/v1/orders/{}'.format(json_res['order']['id']),
data=json.dumps({
'quantity': 20,
'menu_item_id': json_res['order']['menu_item_id'],
}),
headers=self.admin_headers)
json_res = self.to_dict(res)
self.assertEqual(res.status_code, 200)
self.assertEqual(json_res['order']['quantity'], 20)
self.assertIn(b'successfully updated', res.data)
def test_cannot_update_another_users_order(self):
json_res = self.create_order()
user, headers = self.authUser(email='<EMAIL>')
res = self.client.put(
'api/v1/orders/{}'.format(json_res['order']['id']),
data=json.dumps({
'quantity': 20,
'menu_item_id': json_res['order']['menu_item_id'],
}),
headers=headers)
self.assertEqual(res.status_code, 401)
self.assertIn(b'Unauthorized access', res.data)
def test_can_get_order(self):
json_res = self.create_order()
res = self.client.get(
'api/v1/orders/{}'.format(json_res['order']['id']),
headers=self.user_headers)
self.assertEqual(res.status_code, 200)
self.assertIn(b'successfully retrieved', res.data)
def test_can_get_many_orders(self):
json_res = self.create_order()
res = self.client.get(
'api/v1/orders',
headers=self.user_headers)
self.assertEqual(res.status_code, 200)
self.assertIn(b'Successfully retrieved orders', res.data)
def test_can_get_many_orders_history(self):
json_res = self.create_order()
res = self.client.get(
'api/v1/orders?history=1',
headers=self.user_headers)
self.assertEqual(res.status_code, 200)
self.assertIn(b'Successfully retrieved orders', res.data)
def test_can_delete_order(self):
json_res = self.create_order()
res = self.client.delete(
'api/v1/orders/{}'.format(json_res['order']['id']),
headers=self.user_headers)
self.assertEqual(res.status_code, 200)
self.assertIn(b'successfully deleted', res.data)
res = self.client.get(
'api/v1/orders/{}'.format(json_res['order']['id']),
headers=self.user_headers)
self.assertEqual(res.status_code, 404)
def test_cannot_delete_another_users_order(self):
json_res = self.create_order()
user, headers = self.authUser(email='<EMAIL>')
res = self.client.delete(
'api/v1/orders/{}'.format(json_res['order']['id']),
headers=headers)
self.assertEqual(res.status_code, 401)
self.assertIn(b'Unauthorized access', res.data)
def create_order(self):
res = self.client.post(
'api/v1/orders', data=self.data(), headers=self.user_headers)
self.assertEqual(res.status_code, 201)
self.assertIn(b'Successfully saved order', res.data)
return self.to_dict(res)
def create_menu_item(self):
# create a meal
res = self.client.post(
'api/v1/meals',
data=json.dumps({
'name': 'ugali',
'cost': 30,
}),
headers=self.admin_headers)
self.assertEqual(res.status_code, 201)
self.assertIn(b'Successfully saved meal', res.data)
meal_id = self.to_dict(res)['meal']['id']
# now create a menu
res = self.client.post(
'api/v1/menus',
data=json.dumps({
'name': 'Lunch'
}),
headers=self.admin_headers)
self.assertEqual(res.status_code, 201)
self.assertIn(b'Successfully saved menu', res.data)
menu_id = self.to_dict(res)['menu']['id']
# finally create a menu item
res = self.client.post(
'api/v1/menu-items',
data=json.dumps({
'quantity': 100,
'menu_id': menu_id,
'meal_id': meal_id
}),
headers=self.admin_headers)
self.assertEqual(res.status_code, 201)
self.assertIn(b'Successfully saved menu item', res.data)
return self.to_dict(res)
def tearDown(self):
with self.app.app_context():
db.drop_all()
|
StarcoderdataPython
|
4811009
|
<gh_stars>0
from pyspark import RDD
from pyspark.sql import SparkSession
from pyspark.sql.types import *
if __name__ == "__main__":
spark = SparkSession.builder.appName("Triangles").getOrCreate()
sc = spark.sparkContext
edges: RDD = sc.textFile("./data/graph.txt")
edges = edges.map(lambda l: l.split()).map(lambda p: (int(p[0]), int(p[1]))) \
.filter(lambda v: v[0] != v[1]).map(lambda v: v if v[0] < v[1] else (v[1], v[0])).distinct().cache()
abFields = [StructField("A", IntegerType(), False), StructField("B", IntegerType(), False)]
abSchema = StructType(abFields)
ab = spark.createDataFrame(edges, abSchema)
bc1Fields = [StructField("B", IntegerType(), False), StructField("C1", IntegerType(), False)]
bc1Schema = StructType(bc1Fields)
bc1 = spark.createDataFrame(edges, bc1Schema)
ac2Fields = [StructField("A", IntegerType(), False), StructField("C2", IntegerType(), False)]
ac2Schema = StructType(ac2Fields)
ac2 = spark.createDataFrame(edges, ac2Schema)
abc1c2 = ab.join(bc1, "B").join(ac2, "A")
print(abc1c2.filter("C1 = C2").count())
|
StarcoderdataPython
|
3367643
|
<filename>v0.10.0/lnclipb/__init__.py
from .lncli_pb2 import *
from .lncli_pb2_grpc import *
|
StarcoderdataPython
|
132190
|
from view import screen, images
import common
from model import maps
commands = "Enter a (#) to purchase an item, (L)eave Shop"
# This function controls our interactions at the weapons store
def enter_the_map_shop(our_hero):
is_leaving_the_shop = False
message = "Welcome to Tina's Cartography, mighty warrior! Would you like to buy a map of the dungeon? They are " \
"incredibly useful, and many warriors died to produce them! "
left_pane = images.scroll
right_pane = draw_map_list()
while not is_leaving_the_shop:
screen.paint(screen.State(
common.get_stats(None, our_hero),
commands,
message,
left_pane,
right_pane
))
next_move = input("Next? ")
if next_move.lower() == 'l':
is_leaving_the_shop = True
elif next_move.isdigit():
number_picked = int(next_move)
if number_picked < len(maps.map_list):
m = maps.map_list[number_picked]
if m in our_hero.inventory:
message = "You already own that map!"
elif our_hero.gold < m["cost"]:
message = "You don't have enough money for that!"
else:
our_hero.gold -= m["cost"]
our_hero.inventory.append(m)
message = "You have boughten the " + m["name"] + "!"
else:
message = "There is no map for that number!"
def draw_map_list():
border = "<====================<o>====================>\n"
response = border
response += " # | Item | Cost " + '\n'
response += border
for number, m in enumerate(maps.map_list):
response += common.front_padding(str(number), 3) + " | " \
+ common.back_padding(m["name"], 17) + " | " \
+ common.front_padding(str(m["cost"]), 4) + ' Gold\n'
response += border
return response
|
StarcoderdataPython
|
1718008
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# Meta-info
Author: <NAME>
Created: 10/03/2017
Updated: 10/03/2017
# Description
Unit tests for the functions in the ands.algorithms.dac.select module.
"""
import unittest
from random import randint, sample, randrange
from ands.algorithms.dac.select import select
class TestSelect(unittest.TestCase):
def test_when_empty_list(self):
# No matter which value for k, with an empty list ValueError is always raised.
self.assertRaises(ValueError, select, [], 2)
def test_when_list_size_1_invalid_k(self):
self.assertRaises(ValueError, select, [3], 1)
self.assertRaises(ValueError, select, [3], -1)
def test_when_list_size_2_invalid_k(self):
self.assertRaises(ValueError, select, [3, 5], 2)
self.assertRaises(ValueError, select, [3, 5], -1)
def test_when_list_size_1_k_is_zero(self):
self.assertEqual(select([7], 0), 7)
def test_when_list_size_2_k_is_zero(self):
self.assertEqual(select([7, 5], 0), 5)
self.assertEqual(select([5, 7], 0), 5)
def test_when_list_random_size_k_is_zero(self):
a = [randint(-100, 100) for _ in range(randint(3, 100))]
self.assertEqual(select(a, 0), min(a))
def test_when_list_random_size_all_elements_equal(self):
x = randint(-100, 100)
a = [x] * randint(1, 100)
self.assertEqual(select(a, randint(0, len(a) - 1)), x)
def test_when_list_random_size_random_k(self):
a = sample(range(100), 100)
self.assertIn(select(a, randrange(0, len(a))), a)
|
StarcoderdataPython
|
139143
|
<filename>alipay/aop/api/domain/AlipayDataAiserviceSmartpriceGetModel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayDataAiserviceSmartpriceGetModel(object):
def __init__(self):
self._base_price_cent = None
self._channel = None
self._city_code = None
self._default_promo_price_cent = None
self._from = None
self._high_price_cent = None
self._lower_price_cent = None
self._trace_id = None
self._user_id = None
@property
def base_price_cent(self):
return self._base_price_cent
@base_price_cent.setter
def base_price_cent(self, value):
self._base_price_cent = value
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def city_code(self):
return self._city_code
@city_code.setter
def city_code(self, value):
self._city_code = value
@property
def default_promo_price_cent(self):
return self._default_promo_price_cent
@default_promo_price_cent.setter
def default_promo_price_cent(self, value):
self._default_promo_price_cent = value
@property
def from(self):
return self._from
@from.setter
def from(self, value):
self._from = value
@property
def high_price_cent(self):
return self._high_price_cent
@high_price_cent.setter
def high_price_cent(self, value):
self._high_price_cent = value
@property
def lower_price_cent(self):
return self._lower_price_cent
@lower_price_cent.setter
def lower_price_cent(self, value):
self._lower_price_cent = value
@property
def trace_id(self):
return self._trace_id
@trace_id.setter
def trace_id(self, value):
self._trace_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.base_price_cent:
if hasattr(self.base_price_cent, 'to_alipay_dict'):
params['base_price_cent'] = self.base_price_cent.to_alipay_dict()
else:
params['base_price_cent'] = self.base_price_cent
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.city_code:
if hasattr(self.city_code, 'to_alipay_dict'):
params['city_code'] = self.city_code.to_alipay_dict()
else:
params['city_code'] = self.city_code
if self.default_promo_price_cent:
if hasattr(self.default_promo_price_cent, 'to_alipay_dict'):
params['default_promo_price_cent'] = self.default_promo_price_cent.to_alipay_dict()
else:
params['default_promo_price_cent'] = self.default_promo_price_cent
if self.from:
if hasattr(self.from, 'to_alipay_dict'):
params['from'] = self.from.to_alipay_dict()
else:
params['from'] = self.from
if self.high_price_cent:
if hasattr(self.high_price_cent, 'to_alipay_dict'):
params['high_price_cent'] = self.high_price_cent.to_alipay_dict()
else:
params['high_price_cent'] = self.high_price_cent
if self.lower_price_cent:
if hasattr(self.lower_price_cent, 'to_alipay_dict'):
params['lower_price_cent'] = self.lower_price_cent.to_alipay_dict()
else:
params['lower_price_cent'] = self.lower_price_cent
if self.trace_id:
if hasattr(self.trace_id, 'to_alipay_dict'):
params['trace_id'] = self.trace_id.to_alipay_dict()
else:
params['trace_id'] = self.trace_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayDataAiserviceSmartpriceGetModel()
if 'base_price_cent' in d:
o.base_price_cent = d['base_price_cent']
if 'channel' in d:
o.channel = d['channel']
if 'city_code' in d:
o.city_code = d['city_code']
if 'default_promo_price_cent' in d:
o.default_promo_price_cent = d['default_promo_price_cent']
if 'from' in d:
o.from = d['from']
if 'high_price_cent' in d:
o.high_price_cent = d['high_price_cent']
if 'lower_price_cent' in d:
o.lower_price_cent = d['lower_price_cent']
if 'trace_id' in d:
o.trace_id = d['trace_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
StarcoderdataPython
|
39450
|
import json
from aio_pika import Message, DeliveryMode, ExchangeType
from lib.ipc.util import poll_for_async_connection
class Emitter:
def __init__(self):
self.connection = None
self.event_exchange = None
async def connect(self, loop):
# Perform connection
self.connection = await poll_for_async_connection(loop)
channel = await self.connection.channel()
self.event_exchange = await channel.declare_exchange(
'events', ExchangeType.TOPIC
)
return self
async def close(self):
await self.connection.close()
async def emit(self, routing_key, body):
message = Message(
json.dumps(body).encode(),
delivery_mode=DeliveryMode.PERSISTENT
)
# Sending the message
await self.event_exchange.publish(
message, routing_key=routing_key
)
|
StarcoderdataPython
|
1761040
|
from .value_list_content_block import ValueListContentBlockRenderer
from .table_content_block import TableContentBlockRenderer
from .bullet_list_content_block import PrescriptiveBulletListContentBlockRenderer
|
StarcoderdataPython
|
139429
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 9 01:58:58 2019
@author: iqbalsublime
"""
#================================================================================================================
#----------------------------------------------------------------------------------------------------------------
# K NEAREST NEIGHBOURS
#----------------------------------------------------------------------------------------------------------------
#================================================================================================================
# Details of implementation/tutorial is in : http://madhugnadig.com/articles/machine-learning/2017/01/13/implementing-k-nearest-neighbours-from-scratch-in-python.html
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
import random
from collections import Counter
from sklearn import preprocessing
import time
#for plotting
plt.style.use('ggplot')
class CustomKNN:
def __init__(self):
self.accurate_predictions = 0
self.total_predictions = 0
self.accuracy = 0.0
def predict(self, training_data, to_predict, k = 3):
if len(training_data) >= k:
print("K cannot be smaller than the total voting groups(ie. number of training data points)")
return
distributions = []
for group in training_data:
for features in training_data[group]:
euclidean_distance = np.linalg.norm(np.array(features)- np.array(to_predict))
distributions.append([euclidean_distance, group])
results = [i[1] for i in sorted(distributions)[:k]]
result = Counter(results).most_common(1)[0][0]
confidence = Counter(results).most_common(1)[0][1]/k
return result, confidence
def test(self, test_set, training_set):
for group in test_set:
for data in test_set[group]:
predicted_class,confidence = self.predict(training_set, data, k =3)
if predicted_class == group:
self.accurate_predictions += 1
else:
print("Wrong classification with confidence " + str(confidence * 100) + " and class " + str(predicted_class))
self.total_predictions += 1
self.accuracy = 100*(self.accurate_predictions/self.total_predictions)
print("\nAcurracy :", str(self.accuracy) + "%")
def mod_data(df):
df.replace('?', -999999, inplace = True)
df.replace('yes', 4, inplace = True)
df.replace('no', 2, inplace = True)
df.replace('notpresent', 4, inplace = True)
df.replace('present', 2, inplace = True)
df.replace('abnormal', 4, inplace = True)
df.replace('normal', 2, inplace = True)
df.replace('poor', 4, inplace = True)
df.replace('good', 2, inplace = True)
df.replace('ckd', 4, inplace = True)
df.replace('notckd', 2, inplace = True)
def main():
df = pd.read_csv("chronic_kidney_disease.csv")
mod_data(df)
dataset = df.astype(float).values.tolist()
#Normalize the data
x = df.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df = pd.DataFrame(x_scaled) #Replace df with normalized values
#Shuffle the dataset
random.shuffle(dataset)
#20% of the available data will be used for testing
test_size = 0.2
#The keys of the dict are the classes that the data is classfied into
training_set = {2: [], 4:[]}
test_set = {2: [], 4:[]}
#Split data into training and test for cross validation
training_data = dataset[:-int(test_size * len(dataset))]
test_data = dataset[-int(test_size * len(dataset)):]
#Insert data into the training set
for record in training_data:
training_set[record[-1]].append(record[:-1]) # Append the list in the dict will all the elements of the record except the class
#Insert data into the test set
for record in test_data:
test_set[record[-1]].append(record[:-1]) # Append the list in the dict will all the elements of the record except the class
s = time.clock()
knn = CustomKNN()
knn.test(test_set, training_set)
e = time.clock()
print("Exec Time:" ,e-s)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3298952
|
<reponame>thekiosk/i3ot3commas<gh_stars>1-10
from flask import (
Flask,
request,
json
)
import re
import socket
from datetime import datetime
import time
import requests
from collections import defaultdict
from telegramManager import telegramManager
class i3ot3commas:
def __init__(self):
self.config = self.read_config()
def read_config(self):
configFile = 'config.ini'
oConfig = self.nested_dict(1,list)
f = open(configFile, "r")
for x in f:
checkComment = re.match('^[#]', x.strip())
if not checkComment and x.strip() != '':
confDetails = x.strip().split('=')
if len(confDetails) > 2 :
j = 0
strData = ""
for item in confDetails :
if j > 0:
if j == 1 :
strData = item
else :
strData = strData + "=" + item
j = j+1
oConfig[confDetails[0]] = strData
else:
oConfig[confDetails[0]] = confDetails[1]
return oConfig
def nested_dict(self,n, type):
if n == 1:
return defaultdict(type)
else:
return defaultdict(lambda: nested_dict(n-1, type))
def initialBot(self):
self.config3commas={}
configLocation = './'
with open(configLocation + "3commas.json", "r") as fin:
self.config3commas = json.load(fin)
def getTimeStamp(self):
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime('[%Y-%m-%d] %H:%M:%S.%f')[:-3] + " GMT +7"
return timestampStr
api = Flask(__name__)
#trading for 3commas
@api.route('/trade_signal', methods=['GET', 'POST'])
def trade_signal():
if request.method == 'POST':
#load all JSON from source
json_request_data = request.get_json()
print(json_request_data, flush=True)
#Sample JSON
### {'owner': 'Knot', 'data': [{'side': 'buy', 'symbol': 'ALGOBTC', 'exchange': 'BINANCE'}]}
#dump JSON after load
dumpJson = json.dumps(json_request_data['data'])
msgIn = json.loads( dumpJson )
print(msgIn, flush=True)
raw_msgBot = ({
"message_type": "bot",
"bot_id": 999999,
"email_token": "<PASSWORD> be replaced by code",
"delay_seconds": 0,
"pair": "will be replaced by code"
})
msgBot = json.loads(json.dumps(raw_msgBot))
#set end point for 3commas
url = robot.config['3commas']
telegramBot = robot.config['enableTelegram']
rfq_details = {}
# assign owner
owner = str(json_request_data['owner'])
rfq_details['owner'] = str(json_request_data['owner'])
#check direction
if str(msgIn['side']).upper() == 'SELL' :
print("Sell !!!", flush=True)
msgBot['side'] = "sell"
rfq_details['side'] = "sell"
msgBot['action'] = "close_at_market_price"
else :
print("Buy !!!", flush=True)
msgBot['side'] = "buy"
rfq_details['side'] = "buy"
if str(msgIn['exchange']).upper() == 'KUCOIN':
print("KUCOIN", flush=True)
if owner in robot.config3commas['owner'] :
loadPairs = json.loads(json.dumps( robot.config3commas['pairs']['KUCOIN'] ))
loadAmount = json.loads(json.dumps( robot.config3commas['owner'][owner]['amount']['KUCOIN'] ))
ccyPair = str(loadPairs[ str(msgIn['symbol']) ])
amount = str(loadAmount[ str(msgIn['symbol']) ])
side = str(msgBot['side'])
msgBot['pair'] = str(ccyPair)
elif str(msgIn['exchange']).upper() == 'BINANCE':
print("BINANCE", flush=True)
#check user profiles
if owner in robot.config3commas['owner'] :
loadPairs = json.loads(json.dumps( robot.config3commas['pairs']['BINANCE'] ))
loadAmount = json.loads(json.dumps( robot.config3commas['owner'][owner]['amount']['BINANCE'] ))
ccyPair = str(loadPairs[ str(msgIn['symbol']) ])
amount = str(loadAmount[ str(msgIn['symbol']) ])
side = str(msgBot['side'])
msgBot['pair'] = str(ccyPair)
#prepare bot for send request
msgBot['exchange'] = msgIn['exchange']
msgBot['bot_id'] = robot.config3commas['owner'][owner]['credentials'][str(msgBot['exchange']).upper()]['BOT_ID']
msgBot['email_token'] = robot.config3commas['owner'][owner]['credentials'][str(msgBot['exchange']).upper()]['EMAIL_TOKEN']
msgBot['timestamp'] = robot.getTimeStamp()
rfq_details['chat_id'] = json.loads(json.dumps( robot.config3commas['owner'][owner]['telegram']['chat_id'] ))
r = requests.post(url, json = msgBot)
if str(r.status_code) == '200':
print("Place order COMPLETED !!!", flush=True)
else:
print("Place order FAILED !!!", flush=True)
#send message service to Telegram
tf = msgIn['tf']
message_alert = "Signal: " +side.upper()+ " on " + msgIn['symbol'] + " exchange " + msgIn['exchange']
message_alert = message_alert + " TimeFrame "+ tf +" !!!"
data = {"type" : "individual",
"token" : robot.config['telegram_token'],
"chat_id" : rfq_details['chat_id'],
"message" : message_alert
}
msgService = json.dumps( data )
messageService = telegramManager()
if telegramBot.upper() == 'TRUE':
messageService.send_alert_message(msgService)
print(msgBot, flush=True)
return request.method, 201
else :
print("false")
return request.method, 404
#default route is 404
@api.route('/', methods=['GET', 'POST'])
def root_randing():
return request.method, 404
if __name__ == '__main__':
#create obj robot
robot = i3ot3commas()
robot.initialBot()
hostname = robot.config['domainName']
remoteServerIP = str(socket.gethostbyname(hostname))
api.run(host=remoteServerIP, port=8080)
|
StarcoderdataPython
|
3277562
|
<reponame>EmilPi/PuzzleLib<gh_stars>10-100
import numpy as np
from PuzzleLib import Config
from PuzzleLib.Backend import gpuarray, Blas
from PuzzleLib.Backend.Dnn import PoolMode, poolNd, poolNdBackward, mapLRN, mapLRNBackward
from PuzzleLib.Modules.Module import ModuleError
from PuzzleLib.Modules.LRN import LRN
class LCN(LRN):
def __init__(self, N=5, alpha=1e-4, beta=0.75, K=2.0, includePad=True, name=None):
super().__init__(N, alpha, beta, K, name)
self.registerBlueprint(locals())
if N % 2 != 1 or N == 1:
raise ModuleError("LCN size must be odd and > 1")
self.size = self.repeat(N, 2)
self.pad = (self.size[0] // 2, self.size[1] // 2)
self.gradUsesOutData = Config.backend != Config.Backend.cuda
self.includePad = includePad
self.mode = PoolMode.avgWithPad if includePad else PoolMode.avgNoPad
self.means = None
self.poolspace = None
self.lrnspace = None
def updateData(self, data):
self.means, self.poolspace = poolNd(
data, size=self.size, stride=1, pad=self.pad, mode=self.mode, test=not self.train
)
self.data, self.lrnspace = mapLRN(
data, self.means, N=self.N, alpha=self.alpha, beta=self.beta, K=self.K, test=not self.train
)
def updateGrad(self, grad):
self.grad, meansGrad = mapLRNBackward(
self.inData, self.data, grad, self.means, None, N=self.N, alpha=self.alpha, beta=self.beta, K=self.K
)
if self.includePad:
meansGrad = poolNdBackward(
self.inData, self.means, meansGrad, self.workspace, size=self.size, stride=1, pad=self.pad,
mode=self.mode
)
Blas.addVectorToVector(self.grad.ravel(), meansGrad.ravel(), out=self.grad.ravel(), beta=-1.0)
def reset(self):
super().reset()
self.means = None
self.poolspace = None
self.lrnspace = None
def unittest():
batchsize, maps, h, w = 1, 1, 5, 5
data = gpuarray.to_gpu(np.random.randn(batchsize, maps, h, w).astype(np.float32))
lcn = LCN(N=5)
lcn(data)
lookBehind = int((lcn.N - 1) / 2)
lookAhead = lcn.N - lookBehind
hsize, wsize = lcn.size
hpad, wpad = lcn.pad
hostData = np.zeros(shape=(batchsize, maps, h + 2 * hpad, w + 2 * wpad), dtype=np.float32)
hostData[:, :, hpad:-hpad, wpad:-wpad] = data.get()
hostMeans = np.empty(lcn.data.shape, dtype=np.float32)
for b in range(batchsize):
for c in range(maps):
for y in range(lcn.data.shape[2]):
for x in range(lcn.data.shape[3]):
hostMeans[b, c, y, x] = np.sum(hostData[b, c, y:y + hsize, x:x + wsize]) / (hsize * wsize)
assert np.allclose(hostMeans, lcn.means.get())
norms = np.empty(lcn.data.shape, dtype=np.float32)
for b in range(batchsize):
for c in range(maps):
for y in range(h):
for x in range(w):
norm = 0.0
for dy in range(max(0, y - lookBehind), min(h, y + lookAhead)):
for dx in range(max(0, x - lookBehind), min(w, x + lookAhead)):
norm += (hostData[b, c, dy, dx] - hostMeans[b, c, y, x])**2
norms[b, c, y, x] = lcn.K + norm * lcn.alpha / lcn.N**2
hostOutData = hostData[:, :, hpad:-hpad, wpad:-wpad] / (norms**lcn.beta)
assert np.allclose(hostOutData, lcn.data.get(), atol=1e-5)
grad = gpuarray.to_gpu(np.random.randn(*lcn.data.shape).astype(np.float32))
lcn.backward(grad)
hostGrad = grad.get()
hostInGrad, hostMeansGrad = np.zeros(data.shape, dtype=np.float32), np.zeros(data.shape, dtype=np.float32)
k = 2.0 * lcn.alpha * lcn.beta / lcn.N**2
for b in range(batchsize):
for c in range(maps):
for y in range(h):
for x in range(w):
hostInGrad[b, c, y, x] += hostGrad[b, c, y, x] / norms[b, c, y, x]**lcn.beta
for dy in range(max(0, y - lookBehind), min(h, y + lookAhead)):
for dx in range(max(0, x - lookBehind), min(w, x + lookAhead)):
hostInGrad[b, c, y, x] -= k * hostGrad[b, c, dy, dx] * (
hostData[b, c, y, x] - hostMeans[b, c, dy, dx]
) * hostData[b, c, dy, dx] / norms[b, c, dy, dx]**(lcn.beta + 1)
hostMeansGrad[b, c, y, x] += hostData[b, c, dy, dx] - hostMeans[b, c, y, x]
K = 2.0 * lcn.alpha * lcn.beta * hostData[b, c, y, x] / lcn.N**2 / \
norms[b, c, y, x]**(lcn.beta + 1)
hostMeansGrad[b, c, y, x] *= K * hostGrad[b, c, y, x]
extInGrad = np.zeros(hostData.shape, dtype=np.float32)
extInGrad[:, :, hpad:-hpad, wpad:-wpad] = hostInGrad
hostInGrad = extInGrad
for b in range(batchsize):
for c in range(maps):
for y in range(hostGrad.shape[2]):
for x in range(hostGrad.shape[3]):
for dy in range(lcn.N):
for dx in range(lcn.N):
hostInGrad[b, c, y + dy, x + dx] -= hostMeansGrad[b, c, y, x] / lcn.N**2
assert np.allclose(hostInGrad[:, :, hpad:-hpad, wpad:-wpad], lcn.grad.get(), atol=1e-4)
if __name__ == "__main__":
unittest()
|
StarcoderdataPython
|
1674540
|
import pandas as pd
import math
import parser
# Worksheets with indemnification funds and temporary remuneration
# For active members there are spreadsheets as of July 2019
# Adjust existing spreadsheet variations
def format_value(element):
if element == None:
return 0.0
if type(element) == str and "-" in element:
return 0.0
if element == "#N/DISP":
return 0.0
return element
# July and August 2019
def update_employee_indemnity_jul_aug_2019(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
ferias_pc = format_value(row[5])
cumulativa = format_value(row[6]) # Gratificação Cumulativa
grat_natureza = format_value(row[7]) # Gratificação de Natureza Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
emp["income"].update(
{
"perks": {
"total": round(ferias_pc + alimentacao, 2),
"food": alimentacao,
"vacation_pecuniary": ferias_pc,
}
}
)
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"] + cumulativa + grat_natureza, 2
)
total_gratificacoes = round(
emp["income"]["other"]["total"] + cumulativa + grat_natureza, 2
)
emp["income"].update(
{"total": round(emp["income"]["total"] + cumulativa + grat_natureza, 2)}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# September to December 2019 / January and November 2020
def update_employee_indemnity_sept_2019_to_jan_and_nov_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
ferias_pc = format_value(row[5])
licensa_pc = format_value(row[6])
cumulativa = format_value(row[7]) # Gratificação Cumulativa
grat_natureza = format_value(row[8]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[9]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
emp["income"].update(
{"total": round(emp["income"]["total"] + cumulativa + grat_natureza, 2)}
)
emp["income"]["perks"].update(
{
"total": round(ferias_pc + alimentacao + licensa_pc, 2),
"food": alimentacao,
"vacation_pecuniary": ferias_pc,
"premium_license_pecuniary": licensa_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# February and March 2020
def update_employee_indemnity_feb_mar_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
licensa_compensatoria = format_value(
row[5]
) # Licença Compensatória ato 1124/18
ferias_pc = format_value(row[6])
licensa_pc = format_value(row[7])
cumulativa = format_value(row[8]) # Gratificação Cumulativa
grat_natureza = format_value(row[9]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[10]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
emp["income"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
}
)
emp["income"]["perks"].update(
{
"total": round(ferias_pc + alimentacao + licensa_compensatoria, 2),
"food": alimentacao,
"compensatory_leave": licensa_compensatoria,
"vacation_pecuniary": ferias_pc,
"premium_license_pecuniary": licensa_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# April to July 2020
def update_employee_indemnity_apr_to_july_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
licensa_compensatoria = format_value(
row[5]
) # Licença Compensatória ato 1124/18
ferias_pc = format_value(row[6])
cumulativa = format_value(row[7]) # Gratificação Cumulativa
grat_natureza = format_value(row[8]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[9]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
emp["income"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
}
)
emp["income"]["perks"].update(
{
"total": round(ferias_pc + alimentacao + licensa_compensatoria, 2),
"food": alimentacao,
"compensatory_leave": licensa_compensatoria,
"vacation_pecuniary": ferias_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# August and September 2020
def update_employee_indemnity_aug_sept_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentacao = format_value(row[4])
transporte = format_value(row[5]) # Auxilio Transporte
creche = format_value(row[6]) # Auxilio Creche
ferias_pc = format_value(row[7])
licensa_pc = format_value(row[8]) # Licensa em pecunia
licensa_compensatoria = format_value(
row[9]
) # Licença Compensatória ato 1124/18
insalubridade = format_value(row[10]) # Adicional de Insalubridade
subs_funcao = format_value(row[11]) # Substituição de Função
viatura = format_value(row[12]) # Viatura
cumulativa = format_value(row[13]) # Gratificação Cumulativa
grat_qualificacao = format_value(row[14])
grat_natureza = format_value(row[15]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[16]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial
+ grat_qualificacao
+ viatura
+ insalubridade
+ subs_funcao,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial
+ grat_qualificacao
+ viatura
+ insalubridade
+ subs_funcao,
2,
)
emp["income"].update(
{
"total": round(
emp["income"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial
+ grat_qualificacao
+ viatura
+ insalubridade
+ subs_funcao,
2,
)
}
)
emp["income"]["perks"].update(
{
"total": round(
ferias_pc
+ alimentacao
+ transporte
+ creche
+ licensa_compensatoria
+ licensa_pc,
2,
),
"food": alimentacao,
"transportation": transporte,
"pre_school": creche,
"vacation_pecuniary": ferias_pc,
"premium_license_pecuniary": licensa_pc,
"compensatory_leave": licensa_compensatoria,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"INSALUBRIDADE": insalubridade,
"SUBS. DE FUNÇÃO": subs_funcao,
"VIATURA": viatura,
"GRAT. CUMULATIVA": cumulativa,
"GRAT. DE QUALIFICAÇÃO": grat_qualificacao,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# October 2020
def update_employee_indemnity_oct_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
ferias_pc = format_value(row[4])
cumulativa = format_value(row[5]) # Gratificação Cumulativa
grat_natureza = format_value(row[6]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[7]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
emp["income"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
}
)
emp["income"]["perks"].update(
{
"vacation_pecuniary": ferias_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# December 2020
def update_employee_indemnity_dec_2020(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentação = format_value(row[4])
ferias_pc = format_value(row[5])
cumulativa = format_value(row[6]) # Gratificação Cumulativa
grat_natureza = format_value(row[7]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[8]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_bruto = round(
emp["income"]["total"] + cumulativa + grat_natureza + atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
total_bruto,
2,
)
}
)
emp["income"]["perks"].update(
{
"food": alimentação,
"vacation_pecuniary": ferias_pc,
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# January 2021
def update_employee_indemnity_jan_2021(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentação = format_value(row[4])
cumulativa = format_value(row[5]) # Gratificação Cumulativa
grat_natureza = format_value(row[6]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[7]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_bruto = round(
emp["income"]["total"] + cumulativa + grat_natureza + atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
total_bruto,
2,
)
}
)
emp["income"]["perks"].update(
{
"food": alimentação
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# February 2021
def update_employee_indemnity_feb_2021(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentação = format_value(row[4])
ferias_pc = format_value(row[5])
cumulativa = format_value(row[6]) # Gratificação Cumulativa
grat_natureza = format_value(row[7]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[8]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_bruto = round(
emp["income"]["total"] + cumulativa + grat_natureza + atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
total_bruto,
2,
)
}
)
emp["income"]["perks"].update(
{
"food": alimentação,
"vacation_pecuniary": ferias_pc
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# March and april 2021
def update_employee_indemnity_mar_apr_2021(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentação = format_value(row[4])
ferias_pc = format_value(row[5])
saude = format_value(row[6])
cumulativa = format_value(row[7]) # Gratificação Cumulativa
grat_natureza = format_value(row[8]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[9]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_bruto = round(
emp["income"]["total"] + cumulativa + grat_natureza + atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
total_bruto,
2,
)
}
)
emp["income"]["perks"].update(
{
"food": alimentação,
"vacation_pecuniary": ferias_pc,
"health": saude
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
# June 2021
def update_employee_indemnity_june_jul_2021(file_name, employees):
rows = parser.read_data(file_name).to_numpy()
begin_row = parser.get_begin_row(rows)
end_row = parser.get_end_row(rows, begin_row, file_name)
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = str(int(row[0])) # convert to string by removing the '.0'
alimentação = format_value(row[4])
ferias_pc = format_value(row[5])
licensa_pc = format_value(row[6])
saude = format_value(row[7])
cumulativa = format_value(row[8]) # Gratificação Cumulativa
grat_natureza = format_value(row[9]) # Gratificação de Natureza Especial
atuacao_especial = format_value(
row[10]
) # Gratificação de Grupo de Atuação Especial
if (
matricula in employees.keys()
): # Realiza o update apenas para os servidores que estão na planilha de remuneração mensal
emp = employees[matricula]
total_outras_gratificacoes = round(
emp["income"]["other"]["others_total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_gratificacoes = round(
emp["income"]["other"]["total"]
+ cumulativa
+ grat_natureza
+ atuacao_especial,
2,
)
total_bruto = round(
emp["income"]["total"] + cumulativa + grat_natureza + atuacao_especial,
2,
)
emp["income"].update(
{
"total": round(
total_bruto,
2,
)
}
)
emp["income"]["perks"].update(
{
"food": alimentação,
"vacation_pecuniary": ferias_pc,
"premium_license_pecuniary": licensa_pc,
"health": saude
}
)
emp["income"]["other"].update(
{
"total": total_gratificacoes,
"others_total": total_outras_gratificacoes,
}
)
emp["income"]["other"]["others"].update(
{
"GRAT. CUMULATIVA": cumulativa,
"GRAT. NATUREZA ESPECIAL": grat_natureza,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": atuacao_especial,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
|
StarcoderdataPython
|
3225599
|
<gh_stars>100-1000
import os
import sys
import json
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class WindowClassificationTrainUpdateLossParam(QtWidgets.QWidget):
forward_train = QtCore.pyqtSignal();
backward_scheduler_param = QtCore.pyqtSignal();
def __init__(self):
super().__init__()
self.cfg_setup()
self.title = 'Experiment {} - Update Loss Params'.format(self.system["experiment"])
self.left = 10
self.top = 10
self.width = 900
self.height = 600
self.loss_ui_mxnet = [];
self.loss_ui_keras = [];
self.loss_ui_pytorch = [];
self.current_loss_ = {};
self.current_loss_["name"] = "";
self.current_loss_["params"] = {};
self.initUI()
def cfg_setup(self):
with open('base_classification.json') as json_file:
self.system = json.load(json_file)
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height);
# Backward
self.b1 = QPushButton('Back', self)
self.b1.move(600,550)
self.b1.clicked.connect(self.backward)
# Forward
self.b2 = QPushButton('Next', self)
self.b2.move(700,550)
self.b2.clicked.connect(self.forward)
# Quit
self.b3 = QPushButton('Quit', self)
self.b3.move(800,550)
self.b3.clicked.connect(self.close)
self.cb1 = QComboBox(self);
self.cb1.move(20, 20);
self.cb1.activated.connect(self.select_loss);
self.cb2 = QComboBox(self);
self.cb2.move(20, 20);
self.cb2.activated.connect(self.select_loss);
self.cb3 = QComboBox(self);
self.cb3.move(20, 20);
self.cb3.activated.connect(self.select_loss);
self.mxnet_losses_list = ["select", "loss_l1", "loss_l2", "loss_softmax_crossentropy", "loss_crossentropy",
"loss_sigmoid_binary_crossentropy", "loss_binary_crossentropy",
"loss_kldiv", "loss_poisson_nll", "loss_huber", "loss_hinge",
"loss_squared_hinge"];
self.keras_losses_list = ["select", "loss_l1", "loss_l2", "loss_crossentropy", "loss_binary_crossentropy",
"loss_kldiv", "loss_hinge", "loss_squared_hinge"];
self.pytorch_losses_list = ["select", "loss_l1", "loss_l2", "loss_softmax_crossentropy", "loss_crossentropy",
"loss_sigmoid_binary_crossentropy", "loss_binary_crossentropy",
"loss_kldiv", "loss_poisson_nll", "loss_huber", "loss_hinge",
"loss_squared_hinge", "loss_multimargin", "loss_squared_multimargin",
"loss_multilabel_margin", "loss_multilabel_softmargin"];
if(self.system["backend"] == "Mxnet-1.5.1"):
self.cb1.addItems(self.mxnet_losses_list);
self.cb1.show();
self.cb2.hide();
self.cb3.hide();
elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"):
self.cb2.addItems(self.keras_losses_list);
self.cb2.show();
self.cb1.hide();
self.cb3.hide();
elif(self.system["backend"] == "Pytorch-1.3.1"):
self.cb3.addItems(self.pytorch_losses_list);
self.cb3.show();
self.cb1.hide();
self.cb2.hide();
tmp = [];
self.mx_lo1_l1 = QLabel(self);
self.mx_lo1_l1.setText("1. Scalar Weight: ");
self.mx_lo1_l1.move(20, 100);
tmp.append(self.mx_lo1_l1);
self.mx_lo1_e1 = QLineEdit(self)
self.mx_lo1_e1.move(150, 100);
self.mx_lo1_e1.setText("1.0");
tmp.append(self.mx_lo1_e1);
self.mx_lo1_l2 = QLabel(self);
self.mx_lo1_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo1_l2.move(20, 150);
tmp.append(self.mx_lo1_l2);
self.mx_lo1_e2 = QLineEdit(self)
self.mx_lo1_e2.move(290, 150);
self.mx_lo1_e2.setText("0");
tmp.append(self.mx_lo1_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo2_l1 = QLabel(self);
self.mx_lo2_l1.setText("1. Scalar Weight: ");
self.mx_lo2_l1.move(20, 100);
tmp.append(self.mx_lo2_l1);
self.mx_lo2_e1 = QLineEdit(self)
self.mx_lo2_e1.move(150, 100);
self.mx_lo2_e1.setText("1.0");
tmp.append(self.mx_lo2_e1);
self.mx_lo2_l2 = QLabel(self);
self.mx_lo2_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo2_l2.move(20, 150);
tmp.append(self.mx_lo2_l2);
self.mx_lo2_e2 = QLineEdit(self)
self.mx_lo2_e2.move(290, 150);
self.mx_lo2_e2.setText("0");
tmp.append(self.mx_lo2_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo3_l1 = QLabel(self);
self.mx_lo3_l1.setText("1. Scalar Weight: ");
self.mx_lo3_l1.move(20, 100);
tmp.append(self.mx_lo3_l1);
self.mx_lo3_e1 = QLineEdit(self)
self.mx_lo3_e1.move(150, 100);
self.mx_lo3_e1.setText("1.0");
tmp.append(self.mx_lo3_e1);
self.mx_lo3_l2 = QLabel(self);
self.mx_lo3_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo3_l2.move(20, 150);
tmp.append(self.mx_lo3_l2);
self.mx_lo3_e2 = QLineEdit(self)
self.mx_lo3_e2.move(290, 150);
self.mx_lo3_e2.setText("0");
tmp.append(self.mx_lo3_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo4_l1 = QLabel(self);
self.mx_lo4_l1.setText("1. Scalar Weight: ");
self.mx_lo4_l1.move(20, 100);
tmp.append(self.mx_lo4_l1);
self.mx_lo4_e1 = QLineEdit(self)
self.mx_lo4_e1.move(150, 100);
self.mx_lo4_e1.setText("1.0");
tmp.append(self.mx_lo4_e1);
self.mx_lo4_l2 = QLabel(self);
self.mx_lo4_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo4_l2.move(20, 150);
tmp.append(self.mx_lo4_l2);
self.mx_lo4_e2 = QLineEdit(self)
self.mx_lo4_e2.move(290, 150);
self.mx_lo4_e2.setText("0");
tmp.append(self.mx_lo4_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo5_l1 = QLabel(self);
self.mx_lo5_l1.setText("1. Scalar Weight: ");
self.mx_lo5_l1.move(20, 100);
tmp.append(self.mx_lo5_l1);
self.mx_lo5_e1 = QLineEdit(self)
self.mx_lo5_e1.move(150, 100);
self.mx_lo5_e1.setText("1.0");
tmp.append(self.mx_lo5_e1);
self.mx_lo5_l2 = QLabel(self);
self.mx_lo5_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo5_l2.move(20, 150);
tmp.append(self.mx_lo5_l2);
self.mx_lo5_e2 = QLineEdit(self)
self.mx_lo5_e2.move(290, 150);
self.mx_lo5_e2.setText("0");
tmp.append(self.mx_lo5_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo6_l1 = QLabel(self);
self.mx_lo6_l1.setText("1. Scalar Weight: ");
self.mx_lo6_l1.move(20, 100);
tmp.append(self.mx_lo6_l1);
self.mx_lo6_e1 = QLineEdit(self)
self.mx_lo6_e1.move(150, 100);
self.mx_lo6_e1.setText("1.0");
tmp.append(self.mx_lo6_e1);
self.mx_lo6_l2 = QLabel(self);
self.mx_lo6_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo6_l2.move(20, 150);
tmp.append(self.mx_lo6_l2);
self.mx_lo6_e2 = QLineEdit(self)
self.mx_lo6_e2.move(290, 150);
self.mx_lo6_e2.setText("0");
tmp.append(self.mx_lo6_e2);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo7_l1 = QLabel(self);
self.mx_lo7_l1.setText("1. Scalar Weight: ");
self.mx_lo7_l1.move(20, 100);
tmp.append(self.mx_lo7_l1);
self.mx_lo7_e1 = QLineEdit(self)
self.mx_lo7_e1.move(150, 100);
self.mx_lo7_e1.setText("1.0");
tmp.append(self.mx_lo7_e1);
self.mx_lo7_l2 = QLabel(self);
self.mx_lo7_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo7_l2.move(20, 150);
tmp.append(self.mx_lo7_l2);
self.mx_lo7_e2 = QLineEdit(self)
self.mx_lo7_e2.move(290, 150);
self.mx_lo7_e2.setText("0");
tmp.append(self.mx_lo7_e2);
self.mx_lo7_l3 = QLabel(self);
self.mx_lo7_l3.setText("3. Input has log pre-applied: ");
self.mx_lo7_l3.move(20, 200);
tmp.append(self.mx_lo7_l3);
self.mx_lo7_cb3 = QComboBox(self);
self.mx_lo7_cb3.move(290, 200);
self.mx_lo7_cb3.addItems(["No", "Yes"]);
tmp.append(self.mx_lo7_cb3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo8_l1 = QLabel(self);
self.mx_lo8_l1.setText("1. Scalar Weight: ");
self.mx_lo8_l1.move(20, 100);
tmp.append(self.mx_lo8_l1);
self.mx_lo8_e1 = QLineEdit(self)
self.mx_lo8_e1.move(150, 100);
self.mx_lo8_e1.setText("1.0");
tmp.append(self.mx_lo8_e1);
self.mx_lo8_l2 = QLabel(self);
self.mx_lo8_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo8_l2.move(20, 150);
tmp.append(self.mx_lo8_l2);
self.mx_lo8_e2 = QLineEdit(self)
self.mx_lo8_e2.move(290, 150);
self.mx_lo8_e2.setText("0");
tmp.append(self.mx_lo8_e2);
self.mx_lo8_l3 = QLabel(self);
self.mx_lo8_l3.setText("3. Input has log pre-applied: ");
self.mx_lo8_l3.move(20, 200);
tmp.append(self.mx_lo8_l3);
self.mx_lo8_cb3 = QComboBox(self);
self.mx_lo8_cb3.move(290, 200);
self.mx_lo8_cb3.addItems(["No", "Yes"]);
tmp.append(self.mx_lo8_cb3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo9_l1 = QLabel(self);
self.mx_lo9_l1.setText("1. Scalar Weight: ");
self.mx_lo9_l1.move(20, 100);
tmp.append(self.mx_lo9_l1);
self.mx_lo9_e1 = QLineEdit(self)
self.mx_lo9_e1.move(150, 100);
self.mx_lo9_e1.setText("1.0");
tmp.append(self.mx_lo9_e1);
self.mx_lo9_l2 = QLabel(self);
self.mx_lo9_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo9_l2.move(20, 150);
tmp.append(self.mx_lo9_l2);
self.mx_lo9_e2 = QLineEdit(self)
self.mx_lo9_e2.move(290, 150);
self.mx_lo9_e2.setText("0");
tmp.append(self.mx_lo9_e2);
self.mx_lo9_l3 = QLabel(self);
self.mx_lo9_l3.setText("3. Threshold for mean estimator: ");
self.mx_lo9_l3.move(20, 200);
tmp.append(self.mx_lo9_l3);
self.mx_lo9_e3 = QLineEdit(self)
self.mx_lo9_e3.move(290, 200);
self.mx_lo9_e3.setText("1.0");
tmp.append(self.mx_lo9_e3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo10_l1 = QLabel(self);
self.mx_lo10_l1.setText("1. Scalar Weight: ");
self.mx_lo10_l1.move(20, 100);
tmp.append(self.mx_lo10_l1);
self.mx_lo10_e1 = QLineEdit(self)
self.mx_lo10_e1.move(150, 100);
self.mx_lo10_e1.setText("1.0");
tmp.append(self.mx_lo10_e1);
self.mx_lo10_l2 = QLabel(self);
self.mx_lo10_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo10_l2.move(20, 150);
tmp.append(self.mx_lo10_l2);
self.mx_lo10_e2 = QLineEdit(self)
self.mx_lo10_e2.move(290, 150);
self.mx_lo10_e2.setText("0");
tmp.append(self.mx_lo10_e2);
self.mx_lo10_l3 = QLabel(self);
self.mx_lo10_l3.setText("3. Margin: ");
self.mx_lo10_l3.move(20, 200);
tmp.append(self.mx_lo10_l3);
self.mx_lo10_e3 = QLineEdit(self)
self.mx_lo10_e3.move(150, 200);
self.mx_lo10_e3.setText("1.0");
tmp.append(self.mx_lo10_e3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.mx_lo11_l1 = QLabel(self);
self.mx_lo11_l1.setText("1. Scalar Weight: ");
self.mx_lo11_l1.move(20, 100);
tmp.append(self.mx_lo11_l1);
self.mx_lo11_e1 = QLineEdit(self)
self.mx_lo11_e1.move(150, 100);
self.mx_lo11_e1.setText("1.0");
tmp.append(self.mx_lo11_e1);
self.mx_lo11_l2 = QLabel(self);
self.mx_lo11_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.mx_lo11_l2.move(20, 150);
tmp.append(self.mx_lo11_l2);
self.mx_lo11_e2 = QLineEdit(self)
self.mx_lo11_e2.move(290, 150);
self.mx_lo11_e2.setText("0");
tmp.append(self.mx_lo11_e2);
self.mx_lo11_l3 = QLabel(self);
self.mx_lo11_l3.setText("3. Margin: ");
self.mx_lo11_l3.move(20, 200);
tmp.append(self.mx_lo11_l3);
self.mx_lo11_e3 = QLineEdit(self)
self.mx_lo11_e3.move(150, 200);
self.mx_lo11_e3.setText("1.0");
tmp.append(self.mx_lo11_e3);
self.loss_ui_mxnet.append(tmp)
tmp = [];
self.ke_lo1_l1 = QLabel(self);
self.ke_lo1_l1.setText("1. Scalar Weight: ");
self.ke_lo1_l1.move(20, 100);
tmp.append(self.ke_lo1_l1);
self.ke_lo1_e1 = QLineEdit(self)
self.ke_lo1_e1.move(150, 100);
self.ke_lo1_e1.setText("1.0");
tmp.append(self.ke_lo1_e1);
self.ke_lo1_l2 = QLabel(self);
self.ke_lo1_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo1_l2.move(20, 150);
tmp.append(self.ke_lo1_l2);
self.ke_lo1_e2 = QLineEdit(self)
self.ke_lo1_e2.move(290, 150);
self.ke_lo1_e2.setText("0");
tmp.append(self.ke_lo1_e2);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo2_l1 = QLabel(self);
self.ke_lo2_l1.setText("1. Scalar Weight: ");
self.ke_lo2_l1.move(20, 100);
tmp.append(self.ke_lo2_l1);
self.ke_lo2_e1 = QLineEdit(self)
self.ke_lo2_e1.move(150, 100);
self.ke_lo2_e1.setText("1.0");
tmp.append(self.ke_lo2_e1);
self.ke_lo2_l2 = QLabel(self);
self.ke_lo2_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo2_l2.move(20, 150);
tmp.append(self.ke_lo2_l2);
self.ke_lo2_e2 = QLineEdit(self)
self.ke_lo2_e2.move(290, 150);
self.ke_lo2_e2.setText("0");
tmp.append(self.ke_lo2_e2);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo3_l1 = QLabel(self);
self.ke_lo3_l1.setText("1. Scalar Weight: ");
self.ke_lo3_l1.move(20, 100);
tmp.append(self.ke_lo3_l1);
self.ke_lo3_e1 = QLineEdit(self)
self.ke_lo3_e1.move(150, 100);
self.ke_lo3_e1.setText("1.0");
tmp.append(self.ke_lo3_e1);
self.ke_lo3_l2 = QLabel(self);
self.ke_lo3_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo3_l2.move(20, 150);
tmp.append(self.ke_lo3_l2);
self.ke_lo3_e2 = QLineEdit(self)
self.ke_lo3_e2.move(290, 150);
self.ke_lo3_e2.setText("0");
tmp.append(self.ke_lo3_e2);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo4_l1 = QLabel(self);
self.ke_lo4_l1.setText("1. Scalar Weight: ");
self.ke_lo4_l1.move(20, 100);
tmp.append(self.ke_lo4_l1);
self.ke_lo4_e1 = QLineEdit(self)
self.ke_lo4_e1.move(150, 100);
self.ke_lo4_e1.setText("1.0");
tmp.append(self.ke_lo4_e1);
self.ke_lo4_l2 = QLabel(self);
self.ke_lo4_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo4_l2.move(20, 150);
tmp.append(self.ke_lo4_l2);
self.ke_lo4_e2 = QLineEdit(self)
self.ke_lo4_e2.move(290, 150);
self.ke_lo4_e2.setText("0");
tmp.append(self.ke_lo4_e2);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo5_l1 = QLabel(self);
self.ke_lo5_l1.setText("1. Scalar Weight: ");
self.ke_lo5_l1.move(20, 100);
tmp.append(self.ke_lo5_l1);
self.ke_lo5_e1 = QLineEdit(self)
self.ke_lo5_e1.move(150, 100);
self.ke_lo5_e1.setText("1.0");
tmp.append(self.ke_lo5_e1);
self.ke_lo5_l2 = QLabel(self);
self.ke_lo5_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo5_l2.move(20, 150);
tmp.append(self.ke_lo5_l2);
self.ke_lo5_e2 = QLineEdit(self)
self.ke_lo5_e2.move(290, 150);
self.ke_lo5_e2.setText("0");
tmp.append(self.ke_lo5_e2);
self.ke_lo5_l3 = QLabel(self);
self.ke_lo5_l3.setText("3. Input has log pre-applied: ");
self.ke_lo5_l3.move(20, 200);
tmp.append(self.ke_lo5_l3);
self.ke_lo5_cb3 = QComboBox(self);
self.ke_lo5_cb3.move(290, 200);
self.ke_lo5_cb3.addItems(["No", "Yes"]);
tmp.append(self.ke_lo5_cb3);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo6_l1 = QLabel(self);
self.ke_lo6_l1.setText("1. Scalar Weight: ");
self.ke_lo6_l1.move(20, 100);
tmp.append(self.ke_lo6_l1);
self.ke_lo6_e1 = QLineEdit(self)
self.ke_lo6_e1.move(150, 100);
self.ke_lo6_e1.setText("1.0");
tmp.append(self.ke_lo6_e1);
self.ke_lo6_l2 = QLabel(self);
self.ke_lo6_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo6_l2.move(20, 150);
tmp.append(self.ke_lo6_l2);
self.ke_lo6_e2 = QLineEdit(self)
self.ke_lo6_e2.move(290, 150);
self.ke_lo6_e2.setText("0");
tmp.append(self.ke_lo6_e2);
self.ke_lo6_l3 = QLabel(self);
self.ke_lo6_l3.setText("3. Margin: ");
self.ke_lo6_l3.move(20, 200);
tmp.append(self.ke_lo6_l3);
self.ke_lo6_e3 = QLineEdit(self)
self.ke_lo6_e3.move(150, 200);
self.ke_lo6_e3.setText("1.0");
tmp.append(self.ke_lo6_e3);
self.loss_ui_keras.append(tmp)
tmp = [];
self.ke_lo7_l1 = QLabel(self);
self.ke_lo7_l1.setText("1. Scalar Weight: ");
self.ke_lo7_l1.move(20, 100);
tmp.append(self.ke_lo7_l1);
self.ke_lo7_e1 = QLineEdit(self)
self.ke_lo7_e1.move(150, 100);
self.ke_lo7_e1.setText("1.0");
tmp.append(self.ke_lo7_e1);
self.ke_lo7_l2 = QLabel(self);
self.ke_lo7_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.ke_lo7_l2.move(20, 150);
tmp.append(self.ke_lo7_l2);
self.ke_lo7_e2 = QLineEdit(self)
self.ke_lo7_e2.move(290, 150);
self.ke_lo7_e2.setText("0");
tmp.append(self.ke_lo7_e2);
self.ke_lo7_l3 = QLabel(self);
self.ke_lo7_l3.setText("3. Margin: ");
self.ke_lo7_l3.move(20, 200);
tmp.append(self.ke_lo7_l3);
self.ke_lo7_e3 = QLineEdit(self)
self.ke_lo7_e3.move(150, 200);
self.ke_lo7_e3.setText("1.0");
tmp.append(self.ke_lo7_e3);
self.loss_ui_keras.append(tmp)
tmp = [];
self.py_lo1_l1 = QLabel(self);
self.py_lo1_l1.setText("1. Scalar Weight: ");
self.py_lo1_l1.move(20, 100);
tmp.append(self.py_lo1_l1);
self.py_lo1_e1 = QLineEdit(self)
self.py_lo1_e1.move(150, 100);
self.py_lo1_e1.setText("1.0");
tmp.append(self.py_lo1_e1);
self.py_lo1_l2 = QLabel(self);
self.py_lo1_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo1_l2.move(20, 150);
tmp.append(self.py_lo1_l2);
self.py_lo1_e2 = QLineEdit(self)
self.py_lo1_e2.move(290, 150);
self.py_lo1_e2.setText("0");
tmp.append(self.py_lo1_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo2_l1 = QLabel(self);
self.py_lo2_l1.setText("1. Scalar Weight: ");
self.py_lo2_l1.move(20, 100);
tmp.append(self.py_lo2_l1);
self.py_lo2_e1 = QLineEdit(self)
self.py_lo2_e1.move(150, 100);
self.py_lo2_e1.setText("1.0");
tmp.append(self.py_lo2_e1);
self.py_lo2_l2 = QLabel(self);
self.py_lo2_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo2_l2.move(20, 150);
tmp.append(self.py_lo2_l2);
self.py_lo2_e2 = QLineEdit(self)
self.py_lo2_e2.move(290, 150);
self.py_lo2_e2.setText("0");
tmp.append(self.py_lo2_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo3_l1 = QLabel(self);
self.py_lo3_l1.setText("1. Scalar Weight: ");
self.py_lo3_l1.move(20, 100);
tmp.append(self.py_lo3_l1);
self.py_lo3_e1 = QLineEdit(self)
self.py_lo3_e1.move(150, 100);
self.py_lo3_e1.setText("1.0");
tmp.append(self.py_lo3_e1);
self.py_lo3_l2 = QLabel(self);
self.py_lo3_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo3_l2.move(20, 150);
tmp.append(self.py_lo3_l2);
self.py_lo3_e2 = QLineEdit(self)
self.py_lo3_e2.move(290, 150);
self.py_lo3_e2.setText("0");
tmp.append(self.py_lo3_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo4_l1 = QLabel(self);
self.py_lo4_l1.setText("1. Scalar Weight: ");
self.py_lo4_l1.move(20, 100);
tmp.append(self.py_lo4_l1);
self.py_lo4_e1 = QLineEdit(self)
self.py_lo4_e1.move(150, 100);
self.py_lo4_e1.setText("1.0");
tmp.append(self.py_lo4_e1);
self.py_lo4_l2 = QLabel(self);
self.py_lo4_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo4_l2.move(20, 150);
tmp.append(self.py_lo4_l2);
self.py_lo4_e2 = QLineEdit(self)
self.py_lo4_e2.move(290, 150);
self.py_lo4_e2.setText("0");
tmp.append(self.py_lo4_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo5_l1 = QLabel(self);
self.py_lo5_l1.setText("1. Scalar Weight: ");
self.py_lo5_l1.move(20, 100);
tmp.append(self.py_lo5_l1);
self.py_lo5_e1 = QLineEdit(self)
self.py_lo5_e1.move(150, 100);
self.py_lo5_e1.setText("1.0");
tmp.append(self.py_lo5_e1);
self.py_lo5_l2 = QLabel(self);
self.py_lo5_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo5_l2.move(20, 150);
tmp.append(self.py_lo5_l2);
self.py_lo5_e2 = QLineEdit(self)
self.py_lo5_e2.move(290, 150);
self.py_lo5_e2.setText("0");
tmp.append(self.py_lo5_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo6_l1 = QLabel(self);
self.py_lo6_l1.setText("1. Scalar Weight: ");
self.py_lo6_l1.move(20, 100);
tmp.append(self.py_lo6_l1);
self.py_lo6_e1 = QLineEdit(self)
self.py_lo6_e1.move(150, 100);
self.py_lo6_e1.setText("1.0");
tmp.append(self.py_lo6_e1);
self.py_lo6_l2 = QLabel(self);
self.py_lo6_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo6_l2.move(20, 150);
tmp.append(self.py_lo6_l2);
self.py_lo6_e2 = QLineEdit(self)
self.py_lo6_e2.move(290, 150);
self.py_lo6_e2.setText("0");
tmp.append(self.py_lo6_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo7_l1 = QLabel(self);
self.py_lo7_l1.setText("1. Scalar Weight: ");
self.py_lo7_l1.move(20, 100);
tmp.append(self.py_lo7_l1);
self.py_lo7_e1 = QLineEdit(self)
self.py_lo7_e1.move(150, 100);
self.py_lo7_e1.setText("1.0");
tmp.append(self.py_lo7_e1);
self.py_lo7_l2 = QLabel(self);
self.py_lo7_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo7_l2.move(20, 150);
tmp.append(self.py_lo7_l2);
self.py_lo7_e2 = QLineEdit(self)
self.py_lo7_e2.move(290, 150);
self.py_lo7_e2.setText("0");
tmp.append(self.py_lo7_e2);
self.py_lo7_l3 = QLabel(self);
self.py_lo7_l3.setText("3. Input has log pre-applied: ");
self.py_lo7_l3.move(20, 200);
tmp.append(self.py_lo7_l3);
self.py_lo7_cb3 = QComboBox(self);
self.py_lo7_cb3.move(290, 200);
self.py_lo7_cb3.addItems(["No", "Yes"]);
tmp.append(self.py_lo7_cb3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo8_l1 = QLabel(self);
self.py_lo8_l1.setText("1. Scalar Weight: ");
self.py_lo8_l1.move(20, 100);
tmp.append(self.py_lo8_l1);
self.py_lo8_e1 = QLineEdit(self)
self.py_lo8_e1.move(150, 100);
self.py_lo8_e1.setText("1.0");
tmp.append(self.py_lo8_e1);
self.py_lo8_l2 = QLabel(self);
self.py_lo8_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo8_l2.move(20, 150);
tmp.append(self.py_lo8_l2);
self.py_lo8_e2 = QLineEdit(self)
self.py_lo8_e2.move(290, 150);
self.py_lo8_e2.setText("0");
tmp.append(self.py_lo8_e2);
self.py_lo8_l3 = QLabel(self);
self.py_lo8_l3.setText("3. Input has log pre-applied: ");
self.py_lo8_l3.move(20, 200);
tmp.append(self.py_lo8_l3);
self.py_lo8_cb3 = QComboBox(self);
self.py_lo8_cb3.move(290, 200);
self.py_lo8_cb3.addItems(["No", "Yes"]);
tmp.append(self.py_lo8_cb3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo9_l1 = QLabel(self);
self.py_lo9_l1.setText("1. Scalar Weight: ");
self.py_lo9_l1.move(20, 100);
tmp.append(self.py_lo9_l1);
self.py_lo9_e1 = QLineEdit(self)
self.py_lo9_e1.move(150, 100);
self.py_lo9_e1.setText("1.0");
tmp.append(self.py_lo9_e1);
self.py_lo9_l2 = QLabel(self);
self.py_lo9_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo9_l2.move(20, 150);
tmp.append(self.py_lo9_l2);
self.py_lo9_e2 = QLineEdit(self)
self.py_lo9_e2.move(290, 150);
self.py_lo9_e2.setText("0");
tmp.append(self.py_lo9_e2);
self.py_lo9_l3 = QLabel(self);
self.py_lo9_l3.setText("3. Threshold for mean estimator: ");
self.py_lo9_l3.move(20, 200);
tmp.append(self.py_lo9_l3);
self.py_lo9_e3 = QLineEdit(self)
self.py_lo9_e3.move(290, 200);
self.py_lo9_e3.setText("1.0");
tmp.append(self.py_lo9_e3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo10_l1 = QLabel(self);
self.py_lo10_l1.setText("1. Scalar Weight: ");
self.py_lo10_l1.move(20, 100);
tmp.append(self.py_lo10_l1);
self.py_lo10_e1 = QLineEdit(self)
self.py_lo10_e1.move(150, 100);
self.py_lo10_e1.setText("1.0");
tmp.append(self.py_lo10_e1);
self.py_lo10_l2 = QLabel(self);
self.py_lo10_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo10_l2.move(20, 150);
tmp.append(self.py_lo10_l2);
self.py_lo10_e2 = QLineEdit(self)
self.py_lo10_e2.move(290, 150);
self.py_lo10_e2.setText("0");
tmp.append(self.py_lo10_e2);
self.py_lo10_l3 = QLabel(self);
self.py_lo10_l3.setText("3. Margin: ");
self.py_lo10_l3.move(20, 200);
tmp.append(self.py_lo10_l3);
self.py_lo10_e3 = QLineEdit(self)
self.py_lo10_e3.move(150, 200);
self.py_lo10_e3.setText("1.0");
tmp.append(self.py_lo10_e3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo11_l1 = QLabel(self);
self.py_lo11_l1.setText("1. Scalar Weight: ");
self.py_lo11_l1.move(20, 100);
tmp.append(self.py_lo11_l1);
self.py_lo11_e1 = QLineEdit(self)
self.py_lo11_e1.move(150, 100);
self.py_lo11_e1.setText("1.0");
tmp.append(self.py_lo11_e1);
self.py_lo11_l2 = QLabel(self);
self.py_lo11_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo11_l2.move(20, 150);
tmp.append(self.py_lo11_l2);
self.py_lo11_e2 = QLineEdit(self)
self.py_lo11_e2.move(290, 150);
self.py_lo11_e2.setText("0");
tmp.append(self.py_lo11_e2);
self.py_lo11_l3 = QLabel(self);
self.py_lo11_l3.setText("3. Margin: ");
self.py_lo11_l3.move(20, 200);
tmp.append(self.py_lo11_l3);
self.py_lo11_e3 = QLineEdit(self)
self.py_lo11_e3.move(150, 200);
self.py_lo11_e3.setText("1.0");
tmp.append(self.py_lo11_e3);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo12_l1 = QLabel(self);
self.py_lo12_l1.setText("1. Scalar Weight: ");
self.py_lo12_l1.move(20, 100);
tmp.append(self.py_lo12_l1);
self.py_lo12_e1 = QLineEdit(self)
self.py_lo12_e1.move(150, 100);
self.py_lo12_e1.setText("1.0");
tmp.append(self.py_lo12_e1);
self.py_lo12_l2 = QLabel(self);
self.py_lo12_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo12_l2.move(20, 150);
tmp.append(self.py_lo12_l2);
self.py_lo12_e2 = QLineEdit(self)
self.py_lo12_e2.move(290, 150);
self.py_lo12_e2.setText("0");
tmp.append(self.py_lo12_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo13_l1 = QLabel(self);
self.py_lo13_l1.setText("1. Scalar Weight: ");
self.py_lo13_l1.move(20, 100);
tmp.append(self.py_lo13_l1);
self.py_lo13_e1 = QLineEdit(self)
self.py_lo13_e1.move(150, 100);
self.py_lo13_e1.setText("1.0");
tmp.append(self.py_lo13_e1);
self.py_lo13_l2 = QLabel(self);
self.py_lo13_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo13_l2.move(20, 150);
tmp.append(self.py_lo13_l2);
self.py_lo13_e2 = QLineEdit(self)
self.py_lo13_e2.move(290, 150);
self.py_lo13_e2.setText("0");
tmp.append(self.py_lo13_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo14_l1 = QLabel(self);
self.py_lo14_l1.setText("1. Scalar Weight: ");
self.py_lo14_l1.move(20, 100);
tmp.append(self.py_lo14_l1);
self.py_lo14_e1 = QLineEdit(self)
self.py_lo14_e1.move(150, 100);
self.py_lo14_e1.setText("1.0");
tmp.append(self.py_lo14_e1);
self.py_lo14_l2 = QLabel(self);
self.py_lo14_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo14_l2.move(20, 150);
tmp.append(self.py_lo14_l2);
self.py_lo14_e2 = QLineEdit(self)
self.py_lo14_e2.move(290, 150);
self.py_lo14_e2.setText("0");
tmp.append(self.py_lo14_e2);
self.loss_ui_pytorch.append(tmp)
tmp = [];
self.py_lo15_l1 = QLabel(self);
self.py_lo15_l1.setText("1. Scalar Weight: ");
self.py_lo15_l1.move(20, 100);
tmp.append(self.py_lo15_l1);
self.py_lo15_e1 = QLineEdit(self)
self.py_lo15_e1.move(150, 100);
self.py_lo15_e1.setText("1.0");
tmp.append(self.py_lo15_e1);
self.py_lo15_l2 = QLabel(self);
self.py_lo15_l2.setText("2. Batch Axis (0, 1, 2, 3): ");
self.py_lo15_l2.move(20, 150);
tmp.append(self.py_lo15_l2);
self.py_lo15_e2 = QLineEdit(self)
self.py_lo15_e2.move(290, 150);
self.py_lo15_e2.setText("0");
tmp.append(self.py_lo15_e2);
self.loss_ui_pytorch.append(tmp)
self.select_loss();
self.tb1 = QTextEdit(self)
self.tb1.move(550, 20)
self.tb1.resize(300, 500)
if(self.system["update"]["losses"]["active"]):
wr = "";
wr = json.dumps(self.system["update"]["losses"]["value"], indent=4)
self.tb1.setText(wr);
else:
self.tb1.setText("Using Default loss.")
self.b4 = QPushButton('Select loss', self)
self.b4.move(400,400)
self.b4.clicked.connect(self.add_loss)
self.b6 = QPushButton('Clear ', self)
self.b6.move(400,500)
self.b6.clicked.connect(self.clear_loss)
def select_loss(self):
self.current_loss = {};
self.current_loss["name"] = "";
self.current_loss["params"] = {};
if(self.system["backend"] == "Mxnet-1.5.1"):
self.current_loss["name"] = self.cb1.currentText();
index = self.mxnet_losses_list.index(self.cb1.currentText());
for i in range(len(self.loss_ui_mxnet)):
for j in range(len(self.loss_ui_mxnet[i])):
if((index-1)==i):
self.loss_ui_mxnet[i][j].show();
else:
self.loss_ui_mxnet[i][j].hide();
for i in range(len(self.loss_ui_keras)):
for j in range(len(self.loss_ui_keras[i])):
self.loss_ui_keras[i][j].hide();
for i in range(len(self.loss_ui_pytorch)):
for j in range(len(self.loss_ui_pytorch[i])):
self.loss_ui_pytorch[i][j].hide();
elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"):
self.current_loss["name"] = self.cb2.currentText();
index = self.keras_losses_list.index(self.cb2.currentText());
for i in range(len(self.loss_ui_keras)):
for j in range(len(self.loss_ui_keras[i])):
if((index-1)==i):
self.loss_ui_keras[i][j].show();
else:
self.loss_ui_keras[i][j].hide();
for i in range(len(self.loss_ui_mxnet)):
for j in range(len(self.loss_ui_mxnet[i])):
self.loss_ui_mxnet[i][j].hide();
for i in range(len(self.loss_ui_pytorch)):
for j in range(len(self.loss_ui_pytorch[i])):
self.loss_ui_pytorch[i][j].hide();
elif(self.system["backend"] == "Pytorch-1.3.1"):
self.current_loss["name"] = self.cb3.currentText();
index = self.pytorch_losses_list.index(self.cb3.currentText());
for i in range(len(self.loss_ui_pytorch)):
for j in range(len(self.loss_ui_pytorch[i])):
if((index-1)==i):
self.loss_ui_pytorch[i][j].show();
else:
self.loss_ui_pytorch[i][j].hide();
for i in range(len(self.loss_ui_keras)):
for j in range(len(self.loss_ui_keras[i])):
self.loss_ui_keras[i][j].hide();
for i in range(len(self.loss_ui_mxnet)):
for j in range(len(self.loss_ui_mxnet[i])):
self.loss_ui_mxnet[i][j].hide();
def add_loss(self):
self.system["update"]["losses"]["active"] = True;
if(self.system["backend"] == "Mxnet-1.5.1"):
if(self.current_loss["name"] == self.mxnet_losses_list[1]):
self.current_loss["params"]["weight"] = self.mx_lo1_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo1_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[2]):
self.current_loss["params"]["weight"] = self.mx_lo2_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo2_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[3]):
self.current_loss["params"]["weight"] = self.mx_lo3_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo3_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[4]):
self.current_loss["params"]["weight"] = self.mx_lo4_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo4_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[5]):
self.current_loss["params"]["weight"] = self.mx_lo5_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo5_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[6]):
self.current_loss["params"]["weight"] = self.mx_lo6_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo6_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[7]):
self.current_loss["params"]["weight"] = self.mx_lo7_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo7_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.mx_lo7_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[8]):
self.current_loss["params"]["weight"] = self.mx_lo8_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo8_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.mx_lo8_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[9]):
self.current_loss["params"]["weight"] = self.mx_lo9_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo9_e2.text();
self.current_loss["params"]["threshold_for_mean_estimator"] = self.mx_lo9_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[10]):
self.current_loss["params"]["weight"] = self.mx_lo10_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo10_e2.text();
self.current_loss["params"]["margin"] = self.mx_lo10_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.mxnet_losses_list[11]):
self.current_loss["params"]["weight"] = self.mx_lo11_e1.text();
self.current_loss["params"]["batch_axis"] = self.mx_lo11_e2.text();
self.current_loss["params"]["margin"] = self.mx_lo11_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"):
if(self.current_loss["name"] == self.keras_losses_list[1]):
self.current_loss["params"]["weight"] = self.ke_lo1_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo1_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[2]):
self.current_loss["params"]["weight"] = self.ke_lo2_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo2_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[3]):
self.current_loss["params"]["weight"] = self.ke_lo3_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo3_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[4]):
self.current_loss["params"]["weight"] = self.ke_lo4_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo4_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[5]):
self.current_loss["params"]["weight"] = self.ke_lo5_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo5_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.ke_lo5_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[6]):
self.current_loss["params"]["weight"] = self.ke_lo6_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo6_e2.text();
self.current_loss["params"]["margin"] = self.ke_lo6_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.keras_losses_list[7]):
self.current_loss["params"]["weight"] = self.ke_lo7_e1.text();
self.current_loss["params"]["batch_axis"] = self.ke_lo7_e2.text();
self.current_loss["params"]["margin"] = self.ke_lo7_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.system["backend"] == "Pytorch-1.3.1"):
if(self.current_loss["name"] == self.pytorch_losses_list[1]):
self.current_loss["params"]["weight"] = self.py_lo1_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo1_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[2]):
self.current_loss["params"]["weight"] = self.py_lo2_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo2_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[3]):
self.current_loss["params"]["weight"] = self.py_lo3_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo3_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[4]):
self.current_loss["params"]["weight"] = self.py_lo4_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo4_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[5]):
self.current_loss["params"]["weight"] = self.py_lo5_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo5_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[6]):
self.current_loss["params"]["weight"] = self.py_lo6_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo6_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[7]):
self.current_loss["params"]["weight"] = self.py_lo7_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo7_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.py_lo7_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[8]):
self.current_loss["params"]["weight"] = self.py_lo8_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo8_e2.text();
self.current_loss["params"]["log_pre_applied"] = self.py_lo8_cb3.currentText();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[9]):
self.current_loss["params"]["weight"] = self.py_lo9_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo9_e2.text();
self.current_loss["params"]["threshold_for_mean_estimator"] = self.py_lo9_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[10]):
self.current_loss["params"]["weight"] = self.py_lo10_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo10_e2.text();
self.current_loss["params"]["margin"] = self.py_lo10_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[11]):
self.current_loss["params"]["weight"] = self.py_lo11_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo11_e2.text();
self.current_loss["params"]["margin"] = self.py_lo11_e3.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[12]):
self.current_loss["params"]["weight"] = self.py_lo12_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo12_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[13]):
self.current_loss["params"]["weight"] = self.py_lo13_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo13_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[14]):
self.current_loss["params"]["weight"] = self.py_lo14_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo14_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
elif(self.current_loss["name"] == self.pytorch_losses_list[15]):
self.current_loss["params"]["weight"] = self.py_lo15_e1.text();
self.current_loss["params"]["batch_axis"] = self.py_lo15_e2.text();
self.system["update"]["losses"]["value"] = self.current_loss;
wr = "";
wr = json.dumps(self.system["update"]["losses"]["value"], indent=4)
self.tb1.setText(wr);
def clear_loss(self):
self.system["update"]["losses"]["value"] = "";
self.system["update"]["losses"]["active"] = False;
wr = "";
self.tb1.setText(wr);
def forward(self):
with open('base_classification.json', 'w') as outfile:
json.dump(self.system, outfile)
self.forward_train.emit();
def backward(self):
with open('base_classification.json', 'w') as outfile:
json.dump(self.system, outfile)
self.backward_scheduler_param.emit();
'''
app = QApplication(sys.argv)
screen = WindowClassificationTrainUpdateLossParam()
screen.show()
sys.exit(app.exec_())
'''
|
StarcoderdataPython
|
1708592
|
<filename>resnet.py
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : resnet.py
# Author : YunYang1994
# Created date: 2019-10-11 19:16:55
# Description :
#
#================================================================
''' A copy of the above code used to possibly customize the ResNet architecture'''
import tensorflow as tf
class BasicBlock(tf.keras.Model):
expansion = 1
def __init__(self, in_channels, out_channels, strides=1):
super().__init__()
self.conv1 = tf.keras.layers.Conv2D(out_channels, kernel_size=3, strides=strides,
padding="same", use_bias=False)
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(out_channels, kernel_size=3, strides=1,
padding="same", use_bias=False)
self.bn2 = tf.keras.layers.BatchNormalization()
"""
Adds a shortcut between input and residual block and merges them with "sum"
"""
if strides != 1 or in_channels != self.expansion * out_channels:
self.shortcut = tf.keras.Sequential([
tf.keras.layers.Conv2D(self.expansion*out_channels, kernel_size=1,
strides=strides, use_bias=False),
tf.keras.layers.BatchNormalization()]
)
else:
self.shortcut = lambda x,_: x
self.activation = tf.keras.layers.ReLU()
def call(self, x, training=False):
# if training: print("=> training network ... ")
out = self.activation(self.bn1(self.conv1(x), training=training))
out = self.bn2(self.conv2(out), training=training)
out += self.shortcut(x, training)
return self.activation(out)
class Bottleneck(tf.keras.Model):
expansion = 4
def __init__(self, in_channels, out_channels, strides=1):
super().__init__()
self.conv1 = tf.keras.layers.Conv2D(out_channels, 1, 1, use_bias=False)
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(out_channels, 3, strides, padding="same", use_bias=False)
self.bn2 = tf.keras.layers.BatchNormalization()
self.conv3 = tf.keras.layers.Conv2D(out_channels*self.expansion, 1, 1, use_bias=False)
self.bn3 = tf.keras.layers.BatchNormalization()
if strides != 1 or in_channels != self.expansion * out_channels:
self.shortcut = tf.keras.Sequential([
tf.keras.layers.Conv2D(self.expansion*out_channels, kernel_size=1,
strides=strides, use_bias=False),
tf.keras.layers.BatchNormalization()]
)
else:
self.shortcut = lambda x,_: x
self.activation = tf.keras.layers.ReLU()
def call(self, x, training=False):
out = self.activation(self.bn1(self.conv1(x), training))
out = self.activation(self.bn2(self.conv2(out), training))
out = self.bn3(self.conv3(out), training)
out += self.shortcut(x, training)
return self.activation(out)
class ResNet(tf.keras.Model):
def __init__(self, block, num_blocks, num_classes=10):
super().__init__()
self.in_channels = 64
self.conv1 = tf.keras.layers.Conv2D(64, 3, 1, padding="same", use_bias=False)
self.bn1 = tf.keras.layers.BatchNormalization()
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avg_pool2d = tf.keras.layers.GlobalAveragePooling2D()
self.linear = tf.keras.layers.Dense(units=num_classes, activation="sigmoid")
self.activation = tf.keras.layers.ReLU()
def _make_layer(self, block, out_channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return tf.keras.Sequential(layers)
def call(self, x, training=False):
out = x
out = self.activation(self.bn1(self.conv1(x), training))
out = self.layer1(out, training=training)
out = self.layer2(out, training=training)
out = self.layer3(out, training=training)
out = self.layer4(out, training=training)
# For classification
out = self.avg_pool2d(out)
out = self.linear(out)
return out
def ResNet18(num_classes=10):
return ResNet(BasicBlock, [2,2,2,2], num_classes)
def ResNet34(num_classes=10):
return ResNet(BasicBlock, [3,4,6,3], num_classes)
def ResNet50(num_classes=10):
return ResNet(Bottleneck, [3,4,14,3], num_classes)
def ResNet101(num_classes=10):
return ResNet(Bottleneck, [3,4,23,3], num_classes)
def ResNet152(num_classes=10):
return ResNet(Bottleneck, [3,8,36,3], num_classes)
if __name__ == "__main__":
from utils import allow_growth
allow_growth()
model = ResNet18(1024)
model.build(input_shape=[1, 64, 64, 1])
print(model.summary())
print(model.predict_on_batch(tf.ones([1, 64, 64, 1], tf.float32)).shape)
|
StarcoderdataPython
|
55137
|
<reponame>Rayhane-mamah/Efficient-VDVAE
import torch
import os
import numpy as np
from hparams import HParams
from collections import defaultdict
from torch.utils.tensorboard import SummaryWriter
from prettytable import PrettyTable
hparams = HParams.get_hparams_by_name("efficient_vdvae")
def get_logdir():
return f'logs-{hparams.run.name}'
def transpose_dicts(dct):
d = defaultdict(dict)
for key1, inner in dct.items():
for key2, value in inner.items():
d[key2][key1] = value
return d
def get_variate_masks(stats):
thresh = np.quantile(stats, 1 - hparams.synthesis.variates_masks_quantile)
return stats > thresh
def scale_pixels(img):
img = np.floor(img / np.uint8(2 ** (8 - hparams.data.num_bits))) * 2 ** (8 - hparams.data.num_bits)
shift = scale = (2 ** 8 - 1) / 2
img = (img - shift) / scale # Images are between [-1, 1]
return img
def effective_pixels():
if hparams.data.dataset_source == 'binarized_mnist':
return 28 * 28 * hparams.data.channels
else:
return hparams.data.target_res * hparams.data.target_res * hparams.data.channels
def one_hot(indices, depth, dim):
indices = indices.unsqueeze(dim)
size = list(indices.size())
size[dim] = depth
y_onehot = torch.zeros(size, device=torch.device('cuda'))
y_onehot.zero_()
y_onehot.scatter_(dim, indices, 1)
return y_onehot
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
table.add_row([name, param])
total_params += param
print(table)
print(f"Total Trainable Params: {total_params}")
return total_params
def assert_CUDA_and_hparams_gpus_are_equal():
print('Running on: ', torch.cuda.device_count(), ' GPUs')
assert hparams.run.num_gpus == torch.cuda.device_count()
def load_checkpoint_if_exists(checkpoint_path, rank):
try:
checkpoint = torch.load(checkpoint_path, map_location='cuda:{}'.format(rank))
except FileNotFoundError:
checkpoint = {'global_step': -1,
'model_state_dict': None,
'ema_model_state_dict': None,
'optimizer_state_dict': None,
'scheduler_state_dict': None}
return checkpoint
def create_checkpoint_manager_and_load_if_exists(model_directory='.', rank=0):
checkpoint_path = os.path.join(model_directory, f'checkpoints-{hparams.run.name}')
checkpoint = load_checkpoint_if_exists(checkpoint_path, rank)
return checkpoint, checkpoint_path
def get_logdir():
return f'logs-{hparams.run.name}'
def create_tb_writer(mode):
logdir = get_logdir()
tbdir = os.path.join(logdir, mode)
os.makedirs(logdir, exist_ok=True)
os.makedirs(tbdir, exist_ok=True)
writer = SummaryWriter(log_dir=tbdir)
return writer, logdir
def get_same_padding(kernel_size, strides, dilation_rate, n_dims=2):
p_ = []
# Reverse order for F.pad
for i in range(n_dims - 1, -1, -1):
if strides[i] > 1 and dilation_rate[i] > 1:
raise ValueError("Can't have the stride and dilation rate over 1")
p = (kernel_size[i] - strides[i]) * dilation_rate[i]
if p % 2 == 0:
p = (p // 2, p // 2)
else:
p = (int(np.ceil(p / 2)), int(np.floor(p / 2)))
p_ += p
return tuple(p_)
def get_valid_padding(n_dims=2):
p_ = (0,) * 2 * n_dims
return p_
def get_causal_padding(kernel_size, strides, dilation_rate, n_dims=2):
p_ = []
for i in range(n_dims - 1, -1, -1):
if strides[i] > 1 and dilation_rate[i] > 1:
raise ValueError("can't have the stride and dilation over 1")
p = (kernel_size[i] - strides[i]) * dilation_rate[i]
p_ += (p, 0)
return p_
def compute_latent_dimension():
assert np.prod(hparams.model.down_strides) == np.prod(hparams.model.up_strides)
return hparams.data.target_res // np.prod(hparams.model.down_strides)
|
StarcoderdataPython
|
3386978
|
<gh_stars>0
from importlib import reload
import aiohttp
import settings
from tgapi.apimethods import get_me
from tgapi.tgtypes import base, bot_entity
async def setup_bot_settings(session: aiohttp.ClientSession,
bot_url: str) -> dict:
"""
Args:
session:
bot_url:
Returns:
"""
result = await get_me(session,
bot_url)
bot = base.dataclass_factory(bot_entity.BotEntity, result)
settings.BOT_ID = bot._id
settings.BOT_NAME = bot.username
reload(settings)
return bot.to_dict()
|
StarcoderdataPython
|
3218378
|
<reponame>crispzips/IsisCB<filename>isiscb/curation/forms.py
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
from builtins import object
from django import forms
from django.http import QueryDict
from isisdata.models import *
from isisdata import export # This never gets old...
from isisdata import export_authority
from curation import actions
import rules
class CCRelationForm(forms.ModelForm):
subject = forms.CharField(widget=forms.HiddenInput(), required=True)
object = forms.CharField(widget=forms.HiddenInput(), required=True)
"""We will set these dynamically in the rendered form."""
record_status_value = forms.ChoiceField(choices=CuratedMixin.STATUS_CHOICES, required=False)
INCLUDES_CHAPTER = 'IC'
INCLUDES_SERIES_ARTICLE = 'ISA'
INCLUDES_CITATION_OBJECT = "ICO"
REVIEWED_BY = 'RB'
RESPONDS_TO = 'RE'
ASSOCIATED_WITH = 'AS'
TYPE_CHOICES = (
(INCLUDES_CHAPTER, 'Includes Chapter'),
(INCLUDES_SERIES_ARTICLE, 'Includes Series Article'),
(INCLUDES_CITATION_OBJECT, 'Includes'),
(ASSOCIATED_WITH, 'Is Associated With'),
(REVIEWED_BY, 'Is Reviewed By')
)
type_controlled = forms.ChoiceField(choices=TYPE_CHOICES)
def __init__(self, *args, **kwargs):
super(CCRelationForm, self).__init__(*args, **kwargs)
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
def clean(self):
super(CCRelationForm, self).clean()
subject_id = self.cleaned_data.get('subject', None)
if subject_id:
self.cleaned_data['subject'] = Citation.objects.get(pk=subject_id)
object_id = self.cleaned_data.get('object', None)
if object_id:
self.cleaned_data['object'] = Citation.objects.get(pk=object_id)
class Meta:
model = CCRelation
fields = [
'type_controlled', 'data_display_order', 'subject',
'object', 'record_status_value', 'record_status_explanation',
'administrator_notes', 'record_history',
]
labels = {
'administrator_notes': 'Staff notes'
}
class ACRelationForm(forms.ModelForm):
authority = forms.CharField(widget=forms.HiddenInput(), required=False)
citation = forms.CharField(widget=forms.HiddenInput(), required=False)
"""We will set these dynamically in the rendered form."""
record_status_value = forms.ChoiceField(choices=CuratedMixin.STATUS_CHOICES, required=False)
type_controlled = forms.ChoiceField(choices=ACRelation.TYPE_CHOICES, required=False)
confidence_measure = forms.TypedChoiceField(**{
'choices': [
(1.0, 'Certain/very likely'),
(0.5, 'Likely'),
(0.0, 'Unsure'),
],
'coerce': float,
'required': True,
})
class Meta(object):
model = ACRelation
fields = [
'type_controlled',
'name_for_display_in_citation', 'data_display_order',
'confidence_measure', 'authority', 'citation',
'record_status_value', 'record_status_explanation',
'administrator_notes', 'record_history'
]
labels = {
'administrator_notes': 'Staff notes',
}
def __init__(self, *args, **kwargs):
super(ACRelationForm, self).__init__(*args, **kwargs)
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
def clean(self):
super(ACRelationForm, self).clean()
authority_id = self.cleaned_data.get('authority', None)
if authority_id:
self.cleaned_data['authority'] = Authority.objects.get(pk=authority_id)
else:
self.cleaned_data['authority'] = None
citation_id = self.cleaned_data.get('citation', None)
if citation_id:
self.cleaned_data['citation'] = Citation.objects.get(pk=citation_id)
else:
self.cleaned_data['citation'] = None
class AARelationForm(forms.ModelForm):
authority_subject = forms.CharField(widget=forms.HiddenInput(), required=False)
authority_object = forms.CharField(widget=forms.HiddenInput(), required=False)
"""We will set these dynamically in the rendered form."""
record_status_value = forms.ChoiceField(choices=CuratedMixin.STATUS_CHOICES, required=False)
type_controlled = forms.ChoiceField(choices=AARelation.TYPE_CHOICES, required=False)
confidence_measure = forms.TypedChoiceField(**{
'choices': [
(1.0, 'Certain/very likely'),
(0.5, 'Likely'),
(0.0, 'Unsure'),
],
'coerce': float,
'required': False,
})
class Meta(object):
model = AARelation
fields = [
'type_controlled', 'aar_type',
'confidence_measure', 'subject', 'object',
'record_status_value', 'record_status_explanation',
'administrator_notes', 'record_history'
]
labels = {
'administrator_notes': 'Staff notes',
}
def __init__(self, *args, **kwargs):
super(AARelationForm, self).__init__(*args, **kwargs)
self.fields['subject'].required=False
self.fields['object'].required=False
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
if not self.fields['authority_subject'].initial and self.instance.subject:
self.fields['authority_subject'].initial = self.instance.subject.id
if not self.fields['authority_object'].initial and self.instance.object:
self.fields['authority_object'].initial = self.instance.object.id
def clean(self):
super(AARelationForm, self).clean()
if self.cleaned_data.get('aar_type', None):
self.cleaned_data['type_controlled'] + self.cleaned_data.get('aar_type').base_type
authority_subject_id = self.cleaned_data.get('authority_subject', None)
if authority_subject_id:
self.cleaned_data['subject'] = Authority.objects.get(pk=authority_subject_id)
else:
self.cleaned_data['subject'] = None
authority_object_id = self.cleaned_data.get('authority_object', None)
if authority_object_id:
self.cleaned_data['object'] = Authority.objects.get(pk=authority_object_id)
else:
self.cleaned_data['object'] = None
class ISODateValueForm(forms.ModelForm):
value = forms.CharField()
def __init__(self, *args, **kwargs):
super(ISODateValueForm, self).__init__(*args, **kwargs)
instance = kwargs.get('instance')
if instance and not self.is_bound:
self.fields['value'].initial = instance.__unicode__()
def clean_value(self):
value = self.cleaned_data['value']
try:
ISODateValue.convert(value)
except:
raise forms.ValidationError('Please enter an ISO8601-compliant date.')
return value
def save(self, *args, **kwargs):
self.instance.value = self.cleaned_data.get('value')
super(ISODateValueForm, self).save(*args, **kwargs)
class Meta(object):
model = ISODateValue
fields = []
class AuthorityValueForm(forms.ModelForm):
value = forms.CharField(label="Authority ID")
authority_name = forms.CharField(label='Name of stored authority')
def __init__(self, *args, **kwargs):
super(AuthorityValueForm, self).__init__(*args, **kwargs)
instance = kwargs.get('instance')
if instance and not self.is_bound:
self.fields['value'].initial = instance.pk
self.fields['authority_name'].initial = instance.value.name
self.fields['authority_name'].widget.attrs['readonly'] = True
def clean_value(self):
value = self.cleaned_data['value']
try:
value = Authority.objects.get(id=value)
except:
raise forms.ValidationError('Authority record does not exist.')
return value
def save(self, *args, **kwargs):
self.instance.value = self.cleaned_data.get('value')
super(AuthorityValueForm, self).save(*args, **kwargs)
class Meta(object):
model = AuthorityValue
fields = ['value']
class CitationValueForm(forms.ModelForm):
value = forms.CharField(label="Citation ID", widget=forms.TextInput(attrs={'data-type':'citation_id'}))
citation_name = forms.CharField(label='Name of stored citation', widget=forms.TextInput(attrs={'readonly': True}))
def __init__(self, *args, **kwargs):
super(CitationValueForm, self).__init__(*args, **kwargs)
instance = kwargs.get('instance')
if instance and not self.is_bound:
self.fields['value'].initial = instance.pk
self.fields['citation_name'].initial = instance.value.title_for_display
def clean_value(self):
value = self.cleaned_data['value']
try:
value = Citation.objects.get(id=value)
except:
raise forms.ValidationError('Citation record does not exist.')
return value
def save(self, *args, **kwargs):
self.instance.value = self.cleaned_data.get('value')
super(CitationValueForm, self).save(*args, **kwargs)
class Meta(object):
model = CitationValue
fields = ['value']
class PartDetailsForm(forms.ModelForm):
extent_note = forms.CharField(widget=forms.widgets.Textarea({'rows': '1'}), required=False)
def __init__(self, user, citation_id=None, *args, **kwargs):
super(PartDetailsForm, self).__init__( *args, **kwargs)
self.user = user
self.citation_id = citation_id
self.fields['volume_begin'].widget.attrs['placeholder'] = "Begin #"
self.fields['volume_end'].widget.attrs['placeholder'] = "End #"
self.fields['volume_free_text'].widget.attrs['placeholder'] = "Volume"
self.fields['issue_begin'].widget.attrs['placeholder'] = "Begin #"
self.fields['issue_end'].widget.attrs['placeholder'] = "End #"
self.fields['issue_free_text'].widget.attrs['placeholder'] = "Issue"
self.fields['page_begin'].widget.attrs['placeholder'] = "Begin #"
self.fields['page_end'].widget.attrs['placeholder'] = "End #"
self.fields['pages_free_text'].widget.attrs['placeholder'] = "Pages"
self.fields['extent'].widget.attrs['placeholder'] = "Extent"
self.fields['extent_note'].widget.attrs['placeholder'] = "Extent note"
if citation_id:
can_update = rules.test_rule('can_update_citation_field', user, ('part_details', citation_id))
can_view = rules.test_rule('can_view_citation_field', user, ('part_details', citation_id))
set_field_access(can_update, can_view, self.fields)
class Meta(object):
model = PartDetails
exclude =['volume', 'sort_order']
def _get_validation_exclusions(self):
exclude = super(PartDetailsForm, self)._get_validation_exclusions()
# remove fields that user isn't allowed to modify
if self.citation_id:
can_update = rules.test_rule('can_update_citation_field', self.user, ('part_details', self.citation_id))
can_view = rules.test_rule('can_view_citation_field', self.user, ('part_details', self.citation_id))
for field in self.fields:
if not can_update or not can_view:
exclude.append(field)
return exclude
def set_field_access(can_update, can_view, fields):
for field in fields:
if not can_update:
fields[field].widget.attrs['readonly'] = True
if not can_view:
fields[field] = forms.CharField(widget=NoViewInput())
fields[field].widget.attrs['readonly'] = True
class StubCheckboxInput(forms.widgets.CheckboxInput):
def __init__(self, attrs=None, check_test=None):
super().__init__(attrs, lambda v: v == Citation.STUB_RECORD)
class CitationForm(forms.ModelForm):
abstract = forms.CharField(widget=forms.widgets.Textarea({'rows': '7'}), required=False)
complete_citation = forms.CharField(widget=forms.widgets.Textarea({'rows': '7'}), required=False)
description = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
record_history = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
additional_titles = forms.CharField(widget=forms.widgets.Textarea({'rows': '2'}), required=False)
edition_details = forms.CharField(widget=forms.widgets.Textarea({'rows': '2'}), required=False)
physical_details = forms.CharField(widget=forms.widgets.Textarea({'rows': '2'}), required=False)
language = forms.ModelMultipleChoiceField(queryset=Language.objects.all(), required=False)
belongs_to = forms.ModelChoiceField(queryset=Dataset.objects.all(), label='Dataset', required=False)
record_status_value = forms.ChoiceField(choices=CuratedMixin.STATUS_CHOICES, required=False)
administrator_notes = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False, label="Staff notes")
title = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
subtype = forms.ModelChoiceField(queryset=CitationSubtype.objects.all(), label='Subtype', required=False)
stub_record_status = forms.BooleanField(label='Stub', widget=StubCheckboxInput(), required=False)
class Meta(object):
model = Citation
fields = [
'type_controlled', 'title', 'description', 'edition_details',
'physical_details', 'abstract', 'additional_titles',
'book_series', 'record_status_value', 'record_status_explanation',
'belongs_to', 'administrator_notes', 'record_history', 'subtype',
'complete_citation', 'stub_record_status'
]
labels = {
'belongs_to': 'Dataset',
'administrator_notes': 'Staff notes',
'complete_citation': 'Stub text'
}
def __init__(self, user, *args, **kwargs):
super(CitationForm, self).__init__( *args, **kwargs)
self.user = user
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
# disable fields user doesn't have access to
if self.instance.pk:
self.fields['title'].widget.attrs['placeholder'] = "No title"
self.fields['type_controlled'].widget = forms.widgets.HiddenInput()
if self.instance.type_controlled in [Citation.REVIEW, Citation.CHAPTER, Citation.ARTICLE, Citation.ESSAY_REVIEW]:
self.fields['book_series'].widget = forms.widgets.HiddenInput()
if self.instance.type_controlled in [Citation.THESIS]:
self.fields['book_series'].widget = forms.widgets.HiddenInput()
self.fields['subtype'].queryset = CitationSubtype.objects.filter(related_citation_type=self.instance.type_controlled)
for field in self.fields:
can_update = rules.test_rule('can_update_citation_field', user, (field, self.instance.pk))
if not can_update:
self.fields[field].widget.attrs['readonly'] = True
self.fields[field].widget.attrs['disabled'] = True
can_view = rules.test_rule('can_view_citation_field', user, (field, self.instance.pk))
if not can_view:
self.fields[field] = forms.CharField(widget=NoViewInput())
self.fields[field].widget.attrs['readonly'] = True
self.fields[field].widget.attrs['disabled'] = True
def clean(self):
super(CitationForm, self).clean()
stub_record_status = self.cleaned_data.get('stub_record_status', False)
if stub_record_status:
self.cleaned_data['stub_record_status'] = Citation.STUB_RECORD
else:
self.cleaned_data['stub_record_status'] = None
def _get_validation_exclusions(self):
exclude = super(CitationForm, self)._get_validation_exclusions()
# remove fields that user isn't allowed to modify
if self.instance.pk:
for field in self.fields:
can_update = rules.test_rule('can_update_citation_field', self.user, (field, self.instance.pk))
can_view = rules.test_rule('can_view_citation_field', self.user, (field, self.instance.pk))
if not can_update or not can_view:
exclude.append(field)
return exclude
class LinkedDataForm(forms.ModelForm):
class Meta(object):
model = LinkedData
fields = [
'universal_resource_name', 'resource_name', 'url',
'type_controlled', 'record_status_value',
'record_status_explanation', 'administrator_notes',
'record_history'
]
labels = {
'universal_resource_name': 'URN (link to authority)'
}
def __init__(self, *args, **kwargs):
super(LinkedDataForm, self).__init__(*args, **kwargs)
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
def save(self, *args, **kwargs):
super(LinkedDataForm, self).save(*args, **kwargs)
class NoViewInput(forms.TextInput):
def render(self, name, value, attrs=None):
value = "You do not have sufficient permissions to view this field."
return super(NoViewInput, self).render(name, value, attrs)
class AuthorityForm(forms.ModelForm):
description = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
record_status_value = forms.ChoiceField(choices=CuratedMixin.STATUS_CHOICES, required=False)
redirect_to = forms.CharField(widget=forms.HiddenInput(), required = False)
record_history = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
belongs_to = forms.ModelChoiceField(queryset=Dataset.objects.all(), label='Dataset', required=False)
class Meta(object):
model = Authority
fields = [
'type_controlled', 'name', 'description', 'classification_system',
'classification_code', 'classification_hierarchy',
'record_status_value', 'record_status_explanation', 'redirect_to',
'administrator_notes', 'record_history', 'belongs_to'
]
labels = {
'belongs_to': 'Dataset',
'administrator_notes': 'Staff notes',
}
def __init__(self, user, *args, **kwargs):
super(AuthorityForm, self).__init__(*args, **kwargs)
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
self.user = user
# disable fields user doesn't have access to
if self.instance.pk:
for field in self.fields:
can_update = rules.test_rule('can_update_authority_field', user, (field, self.instance.pk))
if not can_update:
self.fields[field].widget.attrs['readonly'] = True
can_view = rules.test_rule('can_view_authority_field', user, (field, self.instance.pk))
if not can_view:
self.fields[field] = forms.CharField(widget=NoViewInput())
self.fields[field].widget.attrs['readonly'] = True
def clean(self):
super(AuthorityForm, self).clean()
authority_id = self.cleaned_data['redirect_to']
if authority_id:
self.cleaned_data['redirect_to'] = Authority.objects.get(pk=authority_id)
else:
self.cleaned_data['redirect_to'] = None
def _get_validation_exclusions(self):
exclude = super(AuthorityForm, self)._get_validation_exclusions()
# remove fields that user isn't allowed to modify
if self.instance.pk:
for field in self.fields:
can_update = rules.test_rule('can_update_authority_field', self.user, (field, self.instance.pk))
can_view = rules.test_rule('can_view_authority_field', self.user, (field, self.instance.pk))
if not can_update or not can_view:
exclude.append(field)
return exclude
class CitationTrackingForm(forms.ModelForm):
HSTM_UPLOAD = 'HS'
PRINTED = 'PT'
AUTHORIZED = 'AU'
PROOFED = 'PD'
FULLY_ENTERED = 'FU'
BULK_DATA = 'BD'
TYPE_CHOICES = (
(HSTM_UPLOAD, 'HSTM Upload'),
(PRINTED, 'Printed'),
(AUTHORIZED, 'Authorized'),
(PROOFED, 'Proofed'),
(FULLY_ENTERED, 'Fully Entered'),
(BULK_DATA, 'Bulk Data Update')
)
type_controlled = forms.ChoiceField(required=True,
choices=TYPE_CHOICES)
class Meta(object):
model = Tracking
fields = [
'tracking_info', 'notes', 'type_controlled'
]
class AuthorityTrackingForm(forms.ModelForm):
HSTM_UPLOAD = 'HS'
PRINTED = 'PT'
AUTHORIZED = 'AU'
PROOFED = 'PD'
FULLY_ENTERED = 'FU'
BULK_DATA = 'BD'
TYPE_CHOICES = (
(HSTM_UPLOAD, 'HSTM Upload'),
(PRINTED, 'Printed'),
(AUTHORIZED, 'Authorized'),
(PROOFED, 'Proofed'),
(FULLY_ENTERED, 'Fully Entered'),
(BULK_DATA, 'Bulk Data Update')
)
type_controlled = forms.ChoiceField(required=True,
choices=TYPE_CHOICES)
class Meta(object):
model = AuthorityTracking
fields = [
'tracking_info', 'notes', 'type_controlled'
]
class PersonForm(forms.ModelForm):
description = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
def __init__(self, user, authority_id, *args, **kwargs):
super(PersonForm, self).__init__( *args, **kwargs)
self.user = user
self.authority_id = authority_id
if authority_id:
can_update = rules.test_rule('can_update_authority_field', user, ('person', authority_id))
can_view = rules.test_rule('can_view_authority_field', user, ('person', authority_id))
set_field_access(can_update, can_view, self.fields)
class Meta(object):
model = Person
fields = [
'personal_name_last', 'personal_name_first', 'personal_name_suffix',
'personal_name_preferred',
]
def _get_validation_exclusions(self):
exclude = super(PersonForm, self)._get_validation_exclusions()
if self.authority_id:
# remove fields that user isn't allowed to modify
can_update = rules.test_rule('can_update_authority_field', self.user, ('person', self.authority_id))
can_view = rules.test_rule('can_view_authority_field', self.user, ('person', self.authority_id))
for field in self.fields:
if not can_update or not can_view:
exclude.append(field)
return exclude
class RoleForm(forms.ModelForm):
class Meta(object):
model = IsisCBRole
fields = [
'name', 'description',
]
class DatasetRuleForm(forms.ModelForm):
dataset = forms.ChoiceField(required=False)
def __init__(self, *args, **kwargs):
super(DatasetRuleForm, self).__init__( *args, **kwargs)
dataset_values = Dataset.objects.all()
choices = set()
choices.add((None, "No Dataset"))
for ds in dataset_values:
choices.add((ds.pk, ds.name))
self.fields['dataset'].choices = choices
def clean_field(self):
data = self.cleaned_data['dataset']
if data == '':
data = None
return data
class Meta(object):
model = DatasetRule
fields = [
'dataset', 'role'
]
class AddRoleForm(forms.Form):
role = forms.ChoiceField(required=True)
def __init__(self, *args, **kwargs):
super(AddRoleForm, self).__init__( *args, **kwargs)
roles = IsisCBRole.objects.all()
choices = []
for role in roles:
choices.append((role.pk, role.name))
self.fields['role'].choices = choices
class CRUDRuleForm(forms.ModelForm):
class Meta(object):
model = CRUDRule
fields = [
'crud_action'
]
labels = {
'crud_action': 'Allowed Action',
}
class FieldRuleCitationForm(forms.ModelForm):
field_name = forms.ChoiceField(required = True)
def __init__(self, *args, **kwargs):
super(FieldRuleCitationForm, self).__init__( *args, **kwargs)
all_citation_fields = Citation._meta.get_fields()
choices = []
for field in all_citation_fields:
choices.append((field.name, field.name))
choices.sort()
self.fields['field_name'].choices = choices
class Meta(object):
model = FieldRule
fields = [
'field_action', 'field_name',
]
class FieldRuleAuthorityForm(forms.ModelForm):
field_name = forms.ChoiceField(required = True)
def __init__(self, *args, **kwargs):
super(FieldRuleAuthorityForm, self).__init__( *args, **kwargs)
all_authority_fields = Authority._meta.get_fields()
authority_choices = []
for field in all_authority_fields:
authority_choices.append((field.name, field.name))
authority_choices.sort()
self.fields['field_name'].choices = authority_choices
class Meta(object):
model = FieldRule
fields = [
'field_action', 'field_name',
]
class UserModuleRuleForm(forms.ModelForm):
class Meta(object):
model = UserModuleRule
fields = [
'module_action',
]
class AttributeForm(forms.ModelForm):
description = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
type_controlled = forms.ModelChoiceField(queryset=AttributeType.objects.all(), required=False)
record_status_value = forms.ChoiceField(choices=CuratedMixin.STATUS_CHOICES)
class Meta(object):
model = Attribute
fields = [
'type_controlled',
'description',
'value_freeform',
'record_status_value',
'record_status_explanation',
'record_history'
]
def __init__(self, *args, **kwargs):
super(AttributeForm, self).__init__(*args, **kwargs)
# if self.instance.id:
# self.fields['type_controlled'].widget.attrs['disabled'] = True
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
def save(self, *args, **kwargs):
if self.instance.id:
self.fields['type_controlled'].initial = self.instance.type_controlled
return super(AttributeForm, self).save(*args, **kwargs)
class BulkActionForm(forms.Form):
def apply(self, user, filter_params_raw, extra=None):
selected_actions = self.cleaned_data.get('action')
tasks = []
for action_name in selected_actions:
action_value = self.cleaned_data.get(action_name)
extra_data = {
k.split('__')[1]: v for k, v in list(self.cleaned_data.items())
if k.startswith(action_name) and not k == action_name and '__' in k
}
if extra:
extra_data.update(extra)
# Load and instantiate the corresponding action class.
action = getattr(actions, action_name)() # Object is callable.
tasks.append(action.apply(user, filter_params_raw, action_value, **extra_data))
return tasks
# Emulates django's modelform_factory
def bulk_action_form_factory(form=BulkActionForm, **kwargs):
attrs = {} # For the form's Meta inner class.
# For the Media inner class.
media_attrs = {'js': ('curation/js/bulkaction.js', )}
queryset = kwargs.pop('queryset', None)
object_type = kwargs.pop('object_type', 'CITATION')
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
form_class_attrs = {'Meta': Meta}
action_choices = []
extra_data = {}
# hack until we also make tracking status work
avail_actions = actions.AVAILABLE_ACTIONS_AUTHORITY if object_type == 'AUTHORITY' else actions.AVAILABLE_ACTIONS
for action_class in avail_actions:
if hasattr(action_class, 'extra_js'):
media_attrs['js'] = tuple(list(media_attrs['js']) + [action_class.extra_js])
if hasattr(action_class, 'get_extra_data'):
extra_data[action_class.__name__] = action_class.get_extra_data(queryset=queryset)
action = action_class()
action_choices.append((action_class.__name__, action.label))
form_class_attrs[action_class.__name__] = action.get_value_field(required=False)
extras = action.get_extra_fields()
if extras:
form_class_attrs.update({'%s__%s' % (action_class.__name__, name): field for name, field in extras})
form_class_attrs['Media'] = type(str('Media'), (object,), media_attrs)
form_class_attrs['extra_data'] = extra_data
form_class_attrs['action'] = forms.MultipleChoiceField(choices=action_choices)
form_class_attrs['filters'] = forms.CharField(widget=forms.widgets.HiddenInput())
return type(form)('BulkChangeForm', (form,), form_class_attrs)
class CitationCollectionForm(forms.ModelForm):
filters = forms.CharField(widget=forms.widgets.HiddenInput())
class Meta(object):
model = CitationCollection
exclude = ('created', 'createdBy', 'citations')
class AuthorityCollectionForm(forms.ModelForm):
filters = forms.CharField(widget=forms.widgets.HiddenInput())
class Meta(object):
model = AuthorityCollection
exclude = ('created', 'createdBy', 'authorities')
class AARSetForm(forms.ModelForm):
class Meta(object):
model = AARSet
fields = ['name', 'description']
class AARelationTypeForm(forms.ModelForm):
class Meta(object):
model = AARelationType
fields = ['name', 'description', 'relation_type_controlled', 'base_type', 'aarset']
class SelectCitationCollectionForm(forms.Form):
collection = forms.ModelChoiceField(queryset=CitationCollection.objects.all())
filters = forms.CharField(widget=forms.widgets.HiddenInput())
class SelectAuthorityCollectionForm(forms.Form):
collection = forms.ModelChoiceField(queryset=AuthorityCollection.objects.all())
filters = forms.CharField(widget=forms.widgets.HiddenInput())
class ExportCitationsForm(forms.Form):
export_name = forms.CharField(help_text='This tag will be added to the export filename')
export_format = forms.ChoiceField(choices=[('CSV', 'Comma-separated values (CSV)'), ('EBSCO_CSV', 'Comma-separated values (CSV) in EBSCO format (disregard column selection below)'), ('ITEM_COUNT', 'Export for Item Counts'), ('SWP_ANALYSIS', "Export for SPW Analysis")])
export_linked_records = forms.BooleanField(label="Export linked records (make sure that the 'Link to Record' Field is selected in the field list)", required=False)
export_metadata = forms.BooleanField(label="Export metadata", required=False)
use_pipe_delimiter = forms.BooleanField(label='Use "||" to separate related authority and citation fields', required=False)
fields = forms.MultipleChoiceField(choices=[(c.slug, c.label) for c in export.CITATION_COLUMNS], required=False)
filters = forms.CharField(widget=forms.widgets.HiddenInput())
# compress_output = forms.BooleanField(required=False, initial=True,
# help_text="If selected, the output"
# " will be gzipped.")
def clean_fields(self):
field_data = self.cleaned_data['fields']
export_type = self.cleaned_data['export_format']
if export_type == 'CSV':
if not field_data:
raise forms.ValidationError("Please select fields to export.")
return field_data
class ExportAuthorityForm(forms.Form):
export_name = forms.CharField(help_text='This tag will be added to the export filename')
export_format = forms.ChoiceField(choices=[('CSV', 'Comma-separated values (CSV)')])
export_metadata = forms.BooleanField(label="Export metadata", required=False)
fields = forms.MultipleChoiceField(choices=[(c.slug, c.label) for c in export_authority.AUTHORITY_COLUMNS])
filters = forms.CharField(widget=forms.widgets.HiddenInput())
class BulkChangeCSVForm(forms.Form):
csvFile = forms.FileField()
NO_CHOICE = None
CREATE_ATTR = 'CRATT'
UPDATE_ATTR = 'UPATT'
CREATE_LINKED_DATA = 'CRLD'
CREATE_ACRELATIONS = 'CRACR'
CREATE_AARELATIONS = 'CRAAR'
CREATE_CCRELATIONS = 'CRCCR'
CREATE_AUTHORITIES = 'CRAUTH'
CREATE_CITATIONS = 'CRCIT'
MERGE_AUTHORITIES = 'MGAUTH'
CHOICES = [
(NO_CHOICE, '-------------'),
(CREATE_ATTR, 'Create Attributes'),
(UPDATE_ATTR, 'Update Elements'),
(CREATE_LINKED_DATA, 'Create Linked Data'),
(CREATE_ACRELATIONS, 'Create ACRelations'),
(CREATE_AARELATIONS, 'Create AARelations'),
(CREATE_CCRELATIONS, 'Create CCRelations'),
(CREATE_AUTHORITIES, 'Create Authorities'),
(CREATE_CITATIONS, 'Create Citations'),
(MERGE_AUTHORITIES, 'Duplicate Authority Merge and Redirect'),
]
action = forms.ChoiceField(choices=CHOICES)
|
StarcoderdataPython
|
3381838
|
<gh_stars>10-100
from helper import *
def doTest():
_color()
_complicated_color()
_special()
def _complicated_color():
fixer, msg = doFix('.test {background0:#dddddd url(dddddd) no-repeat left top;}', '')
styleSheet = fixer.getStyleSheet()
ruleSet = styleSheet.getRuleSets()[0]
equal(ruleSet.getRuleByName('background0').fixedValue, '#DDD url(dddddd) no-repeat left top', 'bgcolor 0 ok')
fixer, msg = doFix('.test {border:1px solid #ffffff;}', '')
styleSheet = fixer.getStyleSheet()
ruleSet = styleSheet.getRuleSets()[0]
equal(ruleSet.getRuleByName('border').fixedValue, '1px solid #FFF', 'border is ok')
fixer, msg = doFix('.test {border:1px solid red;}', '')
styleSheet = fixer.getStyleSheet()
ruleSet = styleSheet.getRuleSets()[0]
equal(ruleSet.getRuleByName('border').fixedValue, '1px solid red', 'red border is ok')
def _color():
fixer, msg = doFix('.test {color0:red;color1:#DDD;color2:#DDDDDD;color3:#dddddd;color4:#ddd;color5:#DDFFCC;color6:#ABCDEF;color7:#ABCDEFGH;color8:#abcdef;color9:#ffff;color10:#f;}', '')
styleSheet = fixer.getStyleSheet()
equal(len(styleSheet.getRuleSets()), 1, 'one ruleset')
equal(len(styleSheet.getRuleSets()[0].getRules()), 11, 'eleven rules')
ruleSet = styleSheet.getRuleSets()[0]
equal(ruleSet.getRuleByName('color0').fixedValue, 'red', 'color0 ok')
equal(ruleSet.getRuleByName('color1').fixedValue, '#DDD', 'color1 ok')
equal(ruleSet.getRuleByName('color2').fixedValue, '#DDD', 'color2 ok')
equal(ruleSet.getRuleByName('color3').fixedValue, '#DDD', 'color3 ok')
equal(ruleSet.getRuleByName('color4').fixedValue, '#DDD', 'color4 ok')
equal(ruleSet.getRuleByName('color5').fixedValue, '#DFC', 'color5 ok')
equal(ruleSet.getRuleByName('color6').fixedValue, '#ABCDEF', 'color6 ok')
equal(ruleSet.getRuleByName('color7').fixedValue, '#ABCDEFGH', 'color7 ok')
equal(ruleSet.getRuleByName('color8').fixedValue, '#ABCDEF', 'color8 ok')
equal(ruleSet.getRuleByName('color9').fixedValue, '#FFF', 'color9 ok')
equal(ruleSet.getRuleByName('color10').fixedValue, '#FFF', 'color10 ok')
def _special():
css = '.t{box-shadow:0 4px 5px 1px rgba(74, 116, 161, 0.1), inset 0 -1px #cadaea, inset 0 -2px #fbfcfe;}'
fixer, msg = doFix(css, '')
ruleSet = fixer.getStyleSheet().getRuleSets()[0]
rule = ruleSet.getRules()[0]
equal(rule.fixedValue, '0 4px 5px 1px rgba(74, 116, 161, .1), inset 0 -1px #CADAEA, inset 0 -2px #FBFCFE', 'fixed ok')
|
StarcoderdataPython
|
1750901
|
<reponame>sgarg18/arshadowgan<filename>shadow_class/networks.py<gh_stars>0
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import segmentation_models_pytorch as smp
class Generator_with_Refin(nn.Module):
def __init__(self, encoder):
"""Generator initialization
Args:
encoder: an encoder for Unet generator
"""
super(Generator_with_Refin, self).__init__()
# declare Unet generator
self.generator = smp.Unet(
encoder_name=encoder,
classes=1,
activation='identity',
encoder_depth=4,
decoder_channels=[128, 64, 32, 16],
)
# replace the first conv block in generator (6 channels tensor as input)
self.generator.encoder.conv1 = nn.Conv2d(4, 64, kernel_size=(6, 6), stride=(2, 2), padding=(2, 2), bias=False)
self.generator.segmentation_head = nn.Identity()
# RGB-shadow mask as output before refinement module
self.SG_head = nn.Conv2d(in_channels=16, out_channels=3, kernel_size=3, stride=1, padding=1)
# refinement module
self.refinement = torch.nn.Sequential()
for i in range(4):
self.refinement.add_module(f'refinement{3*i+1}', nn.BatchNorm2d(16))
self.refinement.add_module(f'refinement{3*i+2}', nn.ReLU())
self.refinement.add_module(f'refinement{3*i+3}', nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1))
# RGB-shadow mask as output after refinement module
self.output1 = nn.Conv2d(in_channels=16, out_channels=3, kernel_size=3, stride=1, padding=1)
def forward(self, x):
"""Forward for generator
Args:
x: torch.FloatTensor or torch.cuda.FloatTensor - input tensor with images and masks
"""
x = self.generator(x)
out1 = self.SG_head(x)
x = self.refinement(x)
x = self.output1(x)
return out1, x
class Discriminator(nn.Module):
def __init__(self, input_shape):
"""Discriminator initialization
Args:
input_shape (tuple): shape of input image
"""
super(Discriminator, self).__init__()
self.input_shape = input_shape
in_channels, in_height, in_width = self.input_shape
patch_h, patch_w = int(in_height / 2 ** 4), int(in_width / 2 ** 4)
self.output_shape = (1, patch_h, patch_w)
def discriminator_block(in_filters, out_filters, first_block=False):
layers = []
layers.append(nn.Conv2d(in_filters, out_filters, kernel_size=3, stride=1, padding=1))
if not first_block:
layers.append(nn.BatchNorm2d(out_filters))
layers.append(nn.LeakyReLU(0.2, inplace=True))
layers.append(nn.Conv2d(out_filters, out_filters, kernel_size=4, stride=2, padding=1)) #k=3,p=1
layers.append(nn.BatchNorm2d(out_filters))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
layers = []
in_filters = in_channels
for i, out_filters in enumerate([64, 128, 256, 512]):
layers.extend(discriminator_block(in_filters, out_filters, first_block=(i == 0)))
in_filters = out_filters
layers.append(nn.Conv2d(out_filters, 1, kernel_size=3, stride=1, padding=1))
self.model = nn.Sequential(*layers)
def forward(self, img):
"""Discriminator forward
"""
return self.model(img)
|
StarcoderdataPython
|
1710768
|
# from labels import default_labeler
import numpy as np
from six import string_types
class unitsDict(dict):
"""
A dictionary sub-class for tracking units.
unitsDict instances support simple math operations (multiply,
divide, power)
The *key* of unitsDicts objects are the units, the values
represent the power of that unit. For example:
a unitsDict({'s':-1,'m':1}) object represents units of m/s.
"""
def copy(self,):
"""
Return a shallow copy of the present object.
"""
return unitsDict([(ky, val) for ky, val in list(self.items())])
def __mul__(self, other):
"""
Multiple the units in this instance by the units in the *other* object.
"""
out = self.copy()
if other.__class__ is unitsDict:
for u, vl in list(other.items()):
if u in list(out.keys()):
out[u] += vl
else:
out[u] = vl
return out
def __pow__(self, other):
"""
Raise the units in this object to the power of *other*.
"""
out = self.copy()
for u in self:
out[u] *= other
return out
def __div__(self, other):
"""
Divide the units in this instance by the units in the *other* object.
"""
out = self.copy()
if other.__class__ is unitsDict:
for u, vl in list(other.items()):
if u in list(out.keys()):
out[u] -= vl
else:
out[u] = -vl
return out
class varMeta(object):
"""
A class for variable metadata.
In particular, the units and name of the variable are stored here.
*units_style* specifies how to format the units.
0: no fractions (e.g. units of acceleration are: ms^{-2})
1: fractions (e.g. units of acceleration are: m/s^{2})
***Currently only units_style=0 is supported.***
"""
_units_style = 0
latex = True
_scale_place = 'top'
dim_names = []
def __eq__(self, other):
"""
Test for equivalence between varMeta objects.
"""
if (other.__class__ is varMeta and
self.name == other.name and
self._units == other._units):
return True
return False
def __mul__(self, other):
out = self.copy()
out.name = self.name + other.name
out._units = self._units * other._units
return out
def __pow__(self, other):
out = self.copy()
out.name = self.name + '^%d' % (other)
out._units = self._units ** other
return out
def __div__(self, other):
out = self.copy()
if other.name != '':
out.name = self.name + '/' + other.name
out._units = self._units / other._units
return out
def __init__(self, name, units=None, dim_names=[],
units_style=None, scale=0, vecnames={}):
self.vecnames = vecnames
self.dim_names = dim_names
if units.__class__ is not unitsDict:
self._units = unitsDict(units)
elif isinstance(units, string_types):
self._units = unitsDict({units: 1})
else:
self._units = units
self.name = name
self.xformat = r'$%s/[\mathrm{%s}]$'
self.scale = scale
if units_style is not None:
self._units_style = units_style
self.yformat = r'$%s/[\mathrm{%s}]$'
def _copy_rep(self, name=None):
"""
A copy method for use in constructing new varMeta objects from
a basic type.
It behaves as follows:
1) If self.name is None, it return None.
2) If the input is None, it returns a copy of itself.
3) Otherwise, it does a % replace of self.name with the input.
e.g. this is for use such as:
vm=varMeta(r"\overline{%s'%s'}",{'m':2,'s':-2})
vm._copy_rep(('u','u'))
"""
if self.name is None:
return None
if name is None:
name = self.name
else:
name = self.name % name
return varMeta(name,
(self._units and self._units.copy()),
list(self.dim_names),
self._units_style,
self.scale)
def copy(self, name=None):
"""
Return a copy of this varMeta object.
Optional variable *name* may be used to create a copy of these
units, with a new 'name'.
"""
if self.name is None and name is None:
return None
if name is None:
name = self.name
return varMeta(name,
(self._units and self._units.copy()),
list(self.dim_names),
self._units_style,
self.scale)
def __repr__(self,):
return "<varMeta for %s (%s)>" % (self.name, self.units)
def get_label(self, form=None, units_style=None):
"""
Get a formatted label for the variable.
"""
unit = self.get_units(units_style=units_style)
if unit is None:
return '$' + self.get_numer() + '$'
if form is None:
form = r'$%s/[\mathrm{%s}]$'
return form % (self.get_numer(), unit,)
def get_numer(self,):
if self.scale != 0 and self._scale_place == 'top':
return '10^{%d}%s' % (-self.scale, self.name)
else:
return self.name
@property
def units(self,):
"""
A shortcut to the units string.
"""
return self.get_units()
@property
def label(self,):
"""
A shortcut to the label.
"""
return self.get_label()
@property
def ylabel(self,):
"""
A shortcut to the ylabel.
"""
return self.get_label(form=self.yformat)
@property
def xlabel(self,):
"""
A shortcut to the xlabel.
"""
return self.label
def get_units(self, units_style=None,):
"""
Get the properly formatted units string.
"""
if self.scale != 0 and self._scale_place != 'top':
st = r'10^{%d}' % self.scale
else:
st = ''
if self._units is None:
return None
elif self._units.__class__ is str:
return self._units
elif None in self._units:
return self._units[None]
if units_style is None:
units_style = self._units_style
if units_style == 0:
ks = np.unique(np.array(self._units.values()))
ups = np.sort([ks[ks > 0]])[0][::-1]
dns = np.sort([ks[ks < 0]])[0]
st = r''
for ik in ups:
for ky, vl in list(self._units.items()):
if vl == ik:
st += ky
if ik != 1: # If the power is not 1, add an exponent:
st += '^{%d}' % ik
for ik in dns:
for ky, vl in list(self._units.items()):
if vl == ik:
st += '%s^{%d}' % (ky, ik)
return st
|
StarcoderdataPython
|
198754
|
#!/usr/bin/env python3
"""
Simple tool for collating multiple mbox files into a single one, sorted by message ID.
If the message-ID is missing, use the Date or Subject and prefix the sort key to appear last.
Can optionally sort by ezmlm number.
This should be less likely to have missing numbers or duplicate entries.
However duplicates can occur in archive files if:
- the sequence number was reset at any point
- multiple mailing lists were merged
- messages were somehow duplicated before archival
Used for multi-import tests where you wish to check that multiple sources give the same ID
Emails with duplicate sort keys are logged and dropped
"""
import argparse
import mailbox
import re
import sys
parser = argparse.ArgumentParser(description='Command line options.')
parser.add_argument('--ezmlm', dest='ezmlm', action='store_true',
help="Use ezmlm numbering for sorting")
parser.add_argument('args', nargs=argparse.REMAINDER)
args = parser.parse_args()
outmbox = args.args[0]
msgfiles = args.args[1:] # multiple input files allowed
allmessages = {}
noid = 0
skipped = 0
crlf = None # assume that all emails have the same EOL
for msgfile in msgfiles:
messages = mailbox.mbox(
msgfile, None, create=False
)
sortkey = None
for key in messages.iterkeys():
message = messages.get(key)
if args.ezmlm:
from_ = message.get_from()
m = re.search(r"return-(\d+)-", from_)
if m:
sortkey = m.group(1)
else:
print("Failed to find ezmlm id in %s" % from_)
skipped += 1
continue
else:
msgid = message.get('message-id')
if msgid:
sortkey = msgid.strip()
else:
print("No message id, sorting by date or subject: ", message.get_from())
noid += 1
altid = message.get('date') or message.get('subject')
sortkey = "~" + altid.strip() # try to ensure it sorts last
# store the data
file = messages.get_file(key, True)
message_raw = b''
if crlf is None:
message_raw = file.readline()
crlf = (message_raw.endswith(b'\r\n'))
message_raw += file.read()
file.close()
if sortkey in allmessages:
print("Duplicate sort key: %s" % sortkey)
skipped += 1
allmessages[sortkey] = message_raw
nw = 0
with open(outmbox, "wb") as f:
for key in sorted(allmessages.keys()):
f.write(allmessages[key])
if crlf:
f.write(b'\r\n')
else:
f.write(b'\n')
nw += 1
print("Wrote %u emails to %s with CRLF %s (%u without message-id) WARN: %u skipped" % (nw, outmbox, crlf, noid, skipped))
|
StarcoderdataPython
|
3350360
|
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..utils import (
int_or_none,
js_to_json,
qualities,
)
class PornHdIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<id>\d+)(?:/(?P<display_id>.+))?'
_TEST = {
'url': 'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
'md5': '956b8ca569f7f4d8ec563e2c41598441',
'info_dict': {
'id': '1962',
'display_id': 'sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
'ext': 'mp4',
'title': 'Sierra loves doing laundry',
'description': 'md5:8ff0523848ac2b8f9b065ba781ccf294',
'thumbnail': 're:^https?://.*\.jpg',
'view_count': int,
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id or video_id)
title = self._html_search_regex(
r'<title>(.+) porn HD.+?</title>', webpage, 'title')
description = self._html_search_regex(
r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
view_count = int_or_none(self._html_search_regex(
r'(\d+) views\s*</span>', webpage, 'view count', fatal=False))
thumbnail = self._search_regex(
r"'poster'\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False)
quality = qualities(['sd', 'hd'])
sources = json.loads(js_to_json(self._search_regex(
r"(?s)'sources'\s*:\s*(\{.+?\})\s*\}[;,)]",
webpage, 'sources')))
formats = []
for qname, video_url in sources.items():
if not video_url:
continue
formats.append({
'url': video_url,
'format_id': qname,
'quality': quality(qname),
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'view_count': view_count,
'formats': formats,
'age_limit': 18,
}
|
StarcoderdataPython
|
3217983
|
#!/usr/bin/env python3
"""
Run SPBuild on all sequences in a fasta files
"""
import sys
import os
import multiprocessing
import subprocess
import itertools
import tempfile
import argparse
import pandas as pd
from Bio import SeqIO
def run_spbuild(seq, tempdir):
"""
Run SPBuild
"""
node_name = multiprocessing.current_process().name
print(f'Processing {seq.id} on {node_name}', file=sys.stderr)
# Write required Fasta
fasta_path = f'{tempdir}/{node_name}.fa'
SeqIO.write(seq, fasta_path, 'fasta')
# Run SPBuild and cleanup
mtx_path = f'{tempdir}/{node_name}.mtx'
spbuild = subprocess.run(['spbuild', '-i', fasta_path, '-m', mtx_path])
if not spbuild.returncode == 0:
print(f'Error processing {seq.id}:', spbuild.std, sep='\n', file=sys.stderr)
return None
# Process Output
mtx = pd.read_csv(mtx_path, skiprows=2, sep='\s+')
mtx = mtx.reset_index().rename(columns={'level_0': 'position', 'level_1': 'wt'})
mtx['protein'] = seq.id
cols = mtx.columns.to_list()
mtx = mtx[['protein'] + cols[:-1]]
os.remove(fasta_path)
os.remove(mtx_path)
return mtx
def main(args):
"""
Run SPBuild on input Fasta and format into a single output file
"""
seqs = SeqIO.parse(args.fasta, 'fasta')
with multiprocessing.Pool(processes=args.processes) as pool,\
tempfile.TemporaryDirectory(dir=args.temp) as tempdir:
profiles = pool.starmap(run_spbuild, zip(seqs, itertools.cycle([tempdir])))
profiles = pd.concat(profiles, axis=0)
profiles.to_csv(sys.stdout, sep='\t', index=False)
def parse_args():
"""Process arguments"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('fasta', metavar='F', help="Input Fasta")
parser.add_argument('--processes', '-p', default=1, type=int,
help="Number of processes available")
parser.add_argument('--temp', '-t', default='.', type=str,
help="Root location for tempory storage")
return parser.parse_args()
if __name__ == "__main__":
main(parse_args())
|
StarcoderdataPython
|
3329702
|
salary = int(input('Enter the salary of the employee:: '))
work_year = int(input('\nEnter the number of years the employee has work in company:: '))
if work_year > 5:
bonus = salary * 0.05 # 5 percent bonus on salary
print('\nThe bonus you get on your salary is :: ', int(bonus))
print('\nYour net salary will be::', salary + bonus)
else:
print("\nYou do not get bonus and your salary will be same")
|
StarcoderdataPython
|
109554
|
<filename>Month 02/Week 03/Day 01/c.py
# Pow(x, n): https://leetcode.com/problems/powx-n/
# Implement pow(x, n), which calculates x raised to the power n (i.e., xn).
# This problem is pretty straight forward we simply iterate over the n times
# multiplying the input every time. The only tricky thing is that if we have
# a negative number we need to make sure to set it to 1/x before multiplying together
class Solution:
def myPow(self, x: float, n: int) -> float:
if n == 0:
return 1.0
if x == 0:
return 0
if n < 0:
x = 1/x
n *= -1
result = 1
for _ in range(n):
result *= x
return result
# The above works! The code runs in o(N) steps and uses o(1) space the problem is this isn't the fastest we can do!
# This is based off of the math that 2 ^ 10 == 4 ^ 5 = 4 * 16 ^ 2 = 4 * 256 ^ 1 = 1024
# If you look at the above we can reduce n by half in every single iteration which means that we can have a resulting
# o(logn) solution
def myPowOptimus(self, x: float, n: int) -> float:
if n == 0:
return 1.0
if x == 0:
return 0
if n < 0:
x = 1/x
n *= -1
result = 1.0
current = x
index = n
while index > 0:
# If we have an odd number we mutliply the result times the current factor so we can make our factor even
# so we can multiply it by itslef to reduce to half
if index % 2 == 1:
result = result * current
# Multiply current by current to reduce exponent in half
current = current * current
# Make sure our iterator is decreased by half
index = index // 2
return result
# Score Card
# Did I need hints? Yes because I forgot that I switched my index to equal n to make things more clear
# Did you finish within 30 min? 20
# Was the solution optimal? Oh yeah this runs in o(logn) and o(1)
# Were there any bugs? Yeah i forgot to make my odd exponent check use the right variable
# 3 5 5 3 = 4
|
StarcoderdataPython
|
4805092
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
if numRows == 1:
return s
step, zigzag = 2 * numRows - 2, ""
for i in xrange(numRows):
for j in xrange(i, len(s), step):
zigzag += s[j]
if 0 < i < numRows - 1 and j + step - 2 * i < len(s):
zigzag += s[j + step - 2 * i]
return zigzag
|
StarcoderdataPython
|
118270
|
<reponame>gnott/elife-bot
import json
import os
import importlib
from optparse import OptionParser
import boto.swf
import settings as settingsLib
import workflow
import activity
# Add parent directory for imports, so activity classes can use elife-poa-xml-generation
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
"""
Amazon SWF register workflow or activity utility
"""
def start(ENV="dev"):
# Specify run environment settings
settings = settingsLib.get_settings(ENV)
# Simple connect
conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
workflow_names = []
workflow_names.append("CopyGlencoeStillImages")
workflow_names.append("SilentCorrectionsIngest")
workflow_names.append("SilentCorrectionsProcess")
workflow_names.append("IngestArticleZip")
workflow_names.append("ProcessArticleZip")
workflow_names.append("ArticleInformationSupplier")
workflow_names.append("Ping")
workflow_names.append("Sum")
workflow_names.append("ApproveArticlePublication")
workflow_names.append("NewS3File")
workflow_names.append("S3Monitor")
workflow_names.append("LensArticlePublish")
workflow_names.append("AdminEmail")
workflow_names.append("SendQueuedEmail")
workflow_names.append("PackagePOA")
workflow_names.append("PublishPOA")
workflow_names.append("DepositCrossref")
workflow_names.append("PubmedArticleDeposit")
workflow_names.append("PublicationEmail")
workflow_names.append("FTPArticle")
workflow_names.append("PubRouterDeposit")
workflow_names.append("PMCDeposit")
workflow_names.append("PublishPerfectArticle")
workflow_names.append("PostPerfectPublication")
for workflow_name in workflow_names:
# Import the workflow libraries
class_name = "workflow_" + workflow_name
module_name = "workflow." + class_name
importlib.import_module(module_name)
full_path = "workflow." + class_name + "." + class_name
# Create the workflow object
f = eval(full_path)
logger = None
workflow_object = f(settings, logger, conn)
# Now register it
response = workflow_object.register()
print 'got response: \n%s' % json.dumps(response, sort_keys=True, indent=4)
activity_names = []
activity_names.append("InvalidateCdn")
activity_names.append("ConvertImagesToJPG")
activity_names.append("DepositIngestAssets")
activity_names.append("CopyGlencoeStillImages")
activity_names.append("VerifyImageServer")
activity_names.append("GeneratePDFCovers")
activity_names.append("VerifyGlencoe")
activity_names.append("UpdateRepository")
activity_names.append("SetEIFPublish")
activity_names.append("VersionLookup")
activity_names.append("VersionDateLookup")
activity_names.append("VerifyPublishResponse")
activity_names.append("PublishToLax")
activity_names.append("VerifyLaxResponse")
activity_names.append("IngestToLax")
activity_names.append("PostEIFBridge")
activity_names.append("PingWorker")
activity_names.append("SetPublicationStatus")
activity_names.append("ConvertJATS")
activity_names.append("ExpandArticle")
activity_names.append("ApplyVersionNumber")
activity_names.append("ArchiveArticle")
activity_names.append("RewriteEIF")
activity_names.append("UpdateLAX")
activity_names.append("DepositAssets")
activity_names.append("ApprovePublication")
activity_names.append("ResizeImages")
activity_names.append("PreparePostEIF")
activity_names.append("Sum")
activity_names.append("S3Monitor")
activity_names.append("AdminEmailHistory")
activity_names.append("SendQueuedEmail")
activity_names.append("LensArticle")
activity_names.append("PackagePOA")
activity_names.append("PublishFinalPOA")
activity_names.append("DepositCrossref")
activity_names.append("PubmedArticleDeposit")
activity_names.append("PublicationEmail")
activity_names.append("FTPArticle")
activity_names.append("PubRouterDeposit")
activity_names.append("PMCDeposit")
activity_names.append("ScheduleCrossref")
activity_names.append("ScheduleDownstream")
for activity_name in activity_names:
# Import the activity libraries
class_name = "activity_" + activity_name
module_name = "activity." + class_name
importlib.import_module(module_name)
full_path = "activity." + class_name + "." + class_name
# Create the workflow object
f = eval(full_path)
logger = None
activity_object = f(settings, logger, conn)
# Now register it
response = activity_object.register()
print 'got response: \n%s' % json.dumps(response, sort_keys=True, indent=4)
if __name__ == "__main__":
# Add options
parser = OptionParser()
parser.add_option("-e", "--env", default="dev", action="store", type="string",
dest="env", help="set the environment to run, either dev or live")
(options, args) = parser.parse_args()
if options.env:
ENV = options.env
start(ENV)
|
StarcoderdataPython
|
77952
|
<filename>Utils/Classes/discordwebuser.py
from Utils.Classes.undefined import UNDEFINED
from Utils.Classes.contentclass import ContentClass
class DiscordWebUser(ContentClass):
"""
Contains information's about a discord web user,
this object is suppose to be appended to a `AuthDiscordWebUser` object.
It contains data similar to discord.User,
but its actually only filled with data we got from discord when the user authorised phaaze access.
"""
def __init__(self, data:dict):
self.user_id:str = self.asString(data.get("id", UNDEFINED))
self.username:str = self.asString(data.get("username", UNDEFINED))
self.email:str = self.asString(data.get("email", UNDEFINED))
self.verified:bool = self.asBoolean(data.get("verified", UNDEFINED))
self.locale:str = self.asString(data.get("locale", UNDEFINED))
self.premium_type:int = self.asInteger(data.get("premium_type", UNDEFINED))
self.flags:int = self.asInteger(data.get("flags", UNDEFINED))
self.avatar:str = self.asString(data.get("avatar", UNDEFINED))
self.discriminator:str = self.asString(data.get("discriminator", UNDEFINED))
def __repr__(self):
return f"<{self.__class__.__name__} id='{self.user_id}' name='{self.username}'>"
def toJSON(self) -> dict:
""" Returns a json save dict representation of all values for API, storage, etc... """
j:dict = dict()
j["user_id"] = self.asString(self.user_id)
j["username"] = self.asString(self.username)
j["email"] = self.asString(self.email)
j["verified"] = self.asBoolean(self.verified)
j["locale"] = self.asString(self.locale)
j["premium_type"] = self.asInteger(self.premium_type)
j["flags"] = self.asInteger(self.flags)
j["avatar"] = self.asString(self.avatar)
j["discriminator"] = self.asString(self.discriminator)
return j
|
StarcoderdataPython
|
3213506
|
<gh_stars>1-10
# TOOL list_zip.py: "List contents of a zip file" (List the contents of a zip file.)
# INPUT input_file: ".zip file" TYPE GENERIC (Zip file.)
# OUTPUT output_file: "List of files in the zip package"
# PARAMETER OPTIONAL full_paths: "Keep directories" TYPE [yes: Yes, no: No] DEFAULT no (Use the whole file path for the filename.)
# RUNTIME python3
import os
import zipfile
from tool_utils import *
def main():
document_python_version()
input_name = read_input_definitions()['input_file']
input_basename = remove_postfix(input_name, ".zip")
output_name = input_basename + '_list.txt'
with zipfile.ZipFile("input_file", "r") as zip_file:
with open('output_file', 'w') as list_file:
for member in zip_file.infolist():
# member.is_dir() after python 3.6
if member.filename[-1] == '/':
print('skipping directory: ' + member.filename)
continue
# remove paths from dataset names, because those aren't supported in client
dataset_name = member.filename
if (full_paths == 'no'):
dataset_name = os.path.basename(dataset_name)
list_file.write(dataset_name + '\n')
# set dataset names
write_output_definitions({
'output_file': output_name
})
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1676293
|
from django.contrib import admin
from django.urls import path, include
from core.views import Cadastrar, home, carinho, cadastrar_cliente, cadastrar_produto, \
listar_produto, editar_produto, excluir_produto, exibir_produto, cadastrar_funcionario, \
listar_funcionario, editar_funcionario, excluir_funcionario, cadastrar_cargo, listar_cargo, \
editar_cargo, excluir_cargo, cadastrar_setor, listar_setor, editar_setor, excluir_setor, \
cadastrar_fabricante, listar_fabricante, editar_fabricante, excluir_fabricante, cadastrar_status, \
listar_status, editar_status, excluir_status, cadastrar_tipo_solicitacao, listar_tipo_solicitacao, \
editar_tipo_solicitacao, excluir_tipo_solicitacao, cadastrar_pagamento, listar_pagamento, \
editar_pagamento, excluir_pagamento, login_cliente, login_funcionario
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('account/', include('django.contrib.auth.urls')),
path('', home, name='url_home'),
path('cadastrar/', Cadastrar.as_view(), name='url_cadastrar'),
path('login_cliente/', login_cliente, name='url_login_cliente'),
path('login_funcionario/', login_funcionario, name='url_login_funcionario'),
path('carrinho/', carinho, name='url_carrinho'),
path('cadastrar_cliente/', cadastrar_cliente, name='url_cadastrar_cliente'),
path('cadastrar_produto/', cadastrar_produto, name='url_cadastrar_produto'),
path('listar_produto/', listar_produto, name='url_listar_produto'),
path('editar_produto/<int:id>/', editar_produto, name='url_editar_produto'),
path('excluir_produto/<int:id>/', excluir_produto, name='url_excluir_produto'),
path('exibir_produto/<int:id>/', exibir_produto, name='url_exibir_produto'),
path('cadastrar_funcionario/', cadastrar_funcionario, name='url_cadastrar_funcionario'),
path('listar_funcionario/', listar_funcionario, name='url_listar_funcionario'),
path('editar_funcionario/<int:id>/', editar_funcionario, name='url_editar_funcionario'),
path('excluir_funcionario/<int:id>/', excluir_funcionario, name='url_excluir_funcionario'),
path('cadastrar_cargo/', cadastrar_cargo, name='url_cadastrar_cargo'),
path('listar_cargo/', listar_cargo, name='url_listar_cargo'),
path('editar_cargo/<int:id>/', editar_cargo, name='url_editar_cargo'),
path('excluir_cargo/<int:id>/', excluir_cargo, name='url_excluir_cargo'),
path('cadastrar_setor/', cadastrar_setor, name='url_cadastrar_setor'),
path('listar_setor/', listar_setor, name='url_listar_setor'),
path('editar_setor/<int:id>/', editar_setor, name='url_editar_setor'),
path('excluir_setor/<int:id>/', excluir_setor, name='url_excluir_setor'),
path('cadastrar_fabricante/', cadastrar_fabricante, name='url_cadastrar_fabricante'),
path('listar_fabricante/', listar_fabricante, name='url_listar_fabricante'),
path('editar_fabricante/<int:id>/', editar_fabricante, name='url_editar_fabricante'),
path('excluir_fabricante/<int:id>/', excluir_fabricante, name='url_excluir_fabricante'),
path('cadastrar_status/', cadastrar_status, name='url_cadastrar_status'),
path('listar_status/', listar_status, name='url_listar_status'),
path('editar_status/<int:id>/', editar_status, name='url_editar_status'),
path('excluir_status/<int:id>/', excluir_status, name='url_excluir_status'),
path('cadastrar_tipo_solicitacao/', cadastrar_tipo_solicitacao, name='url_cadastrar_tipo_solicitacao'),
path('listar_tipo_solicitacao/', listar_tipo_solicitacao, name='url_listar_tipo_solicitacao'),
path('editar_tipo_solicitacao/<int:id>/', editar_tipo_solicitacao, name='url_editar_tipo_solicitacao'),
path('excluir_tipo_solicitacao/<int:id>/', excluir_tipo_solicitacao, name='url_excluir_tipo_solicitacao'),
path('cadastrar_pagamento/', cadastrar_pagamento, name='url_cadastrar_pagamento'),
path('listar_pagamento/', listar_pagamento, name='url_listar_pagamento'),
path('editar_pagamento/<int:id>/', editar_pagamento, name='url_editar_pagamento'),
path('excluir_pagamento/<int:id>/', excluir_pagamento, name='url_excluir_pagamento')
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
StarcoderdataPython
|
3252132
|
from mock import patch
from nose.tools import assert_equal
from gittip.elsewhere import github
from gittip.models import Elsewhere
from gittip.testing import Harness, DUMMY_GITHUB_JSON
from gittip.testing.client import TestClient
class TestElsewhereGithub(Harness):
def test_github_resolve_resolves_correctly(self):
alice = self.make_participant('alice')
alice_on_github = Elsewhere(platform='github', user_id="1",
user_info={'login': 'alice'})
alice.accounts_elsewhere.append(alice_on_github)
self.session.commit()
expected = 'alice'
actual = github.resolve(u'alice')
assert actual == expected, actual
@patch('gittip.elsewhere.github.requests')
def test_github_user_info_status_handling(self, requests):
client = TestClient()
# Check that different possible github statuses are handled correctly
for (github_status, github_content), expected_gittip_response in [
((200, DUMMY_GITHUB_JSON), 200),
((404, ""), 404),
((500, ""), 502),
((777, ""), 502)]:
requests.get().status_code = github_status
requests.get().text = github_content
response = client.get('/on/github/not-in-the-db/')
print response.code, expected_gittip_response, response.body
assert_equal(response.code, expected_gittip_response)
|
StarcoderdataPython
|
4829741
|
<gh_stars>1-10
import sys
import os
from glob import glob
from scipy.signal import detrend
from time import time
from geoNet.geoNet_file import GeoNet_File
from geoNet.process import Process, adjust_gf_for_time_delay
init_time = time()
#enter location of ObservedGroundMotions directory
#files are placed after download
LOC=os.path.join(os.getcwd(), 'ObservedGroundMotions')
#open file with eventid
f = open('list_all')
#loop over events
for line in f:
eventid = line.strip()
LOC_V1A = os.path.join(LOC,eventid,'Vol1','data')
#get list of V1A files for this event
FILE_NAMES = []
event_stats_V1A = glob(os.path.join(LOC_V1A,'*.V1A'))
FILE_NAMES = [os.path.basename(_) for _ in event_stats_V1A]
print("\n Processing %d stations in Vol1 data ..." %len(FILE_NAMES))
#loop over each V1A file and process
for station_file_name in FILE_NAMES:
print("\n**************************")
print("%s" %station_file_name)
print("\n**************************")
try:
gf = GeoNet_File(station_file_name, LOC_V1A, vol=1)
if gf.comp_1st.acc.size < 5./gf.comp_1st.delta_t:
print("%s has less than 5 secs of data" %station_file_name)
print("skipping %s" %station_file_name)
continue
#When appended zeroes at the beginning of the record are removed, the
#record might then be empty, skipp processing in such a case
agf=adjust_gf_for_time_delay(gf)
if agf.comp_1st.acc.size <= 10:
print("no elements in %s. Skipping it." %station_file_name)
continue
gf.comp_1st.acc -= gf.comp_1st.acc.mean()
gf.comp_2nd.acc -= gf.comp_2nd.acc.mean()
gf.comp_up.acc -= gf.comp_up.acc.mean()
gf.comp_1st.acc = detrend(gf.comp_1st.acc, type='linear')
gf.comp_2nd.acc = detrend(gf.comp_2nd.acc, type='linear')
gf.comp_up.acc = detrend(gf.comp_up.acc, type='linear')
pgf = Process(gf, lowcut=0.05, gpInt=False)
#pgf = Process(gf, lowcut=0.05, ft=0.25)
except Exception as e:
print(e)
print("%s is problematic, skipping it" %station_file_name)
continue
#raise
#extract stat code
stat_code = station_file_name.split(".")[0].split("_")[2]
try:
#expects that velBB etc directories already exist, created when getData.py is used
pgf.save2disk(LOC_V1A+"/velBB/", stat_code, 'velBB')
pgf.save2disk(LOC_V1A+"/velLF/", stat_code, 'velLF')
pgf.save2disk(LOC_V1A+"/accBB/", stat_code, 'accBB')
except Exception as e:
print(e)
print("Skipping this station %s\n" %station_file_name)
continue
final_time = time()
print("Done in {:10.1f} secs".format(final_time-init_time))
|
StarcoderdataPython
|
1619564
|
<filename>demo.py
from kuro import Worker
import random
def run(guesser_name, hyper_parameters):
worker = Worker('nibel')
experiment = worker.experiment(
'guesser', guesser_name,
metrics=[('test_acc', 'max'), 'test_loss'],
hyper_parameters=hyper_parameters,
n_trials=3# Used to group models together and compare them
)
# Run 5 trials of same parameters
for _ in range(5):
trial = experiment.trial() # If worker doesn't have a trial for experiment make one, otherwise fetch it
# If there is nothing more to run, skip running a trial
if trial is None:
continue
for step in range(10): # Steps can be epochs or batch steps etc
acc, loss = 9 + step, -1 - step # Model results here from testing data
trial.report_metric('test_acc', acc + random.uniform(0, 5), step=step) # Explicitly pass step, no need for mode since it was passed in metrics
trial.report_metric('test_loss', loss + random.uniform(0, 10), step=step) # For common things such as loss/accuracy, these are automatically inferred if not given
trial.report_metric('optimality', 0, mode='min', step=step)# Allow step to be auto-computed, need mode since its a new metric on first iteration
trial.report_metric('final_metric', 98 + random.uniform(-3, 0), mode='max') # similarly new metric needs mode
trial.report_metric('final_nums', 93 + random.uniform(-10, 10), mode='min')
trial.report_metric('final_digits', random.randint(0, 100), mode='min')
trial.end() # Mark trial as complete
if __name__ == '__main__':
run('qanta.guesser.dan.DanGuesser', {'lr': .001, 'dropout': .5})
run('qanta.guesser.rnn.RnnGuesser', {'lr': .1, 'dropout': .1})
|
StarcoderdataPython
|
171351
|
# A collection of common functions used in the creation and manipulation of the data for this project.
from approaches.approach import Multiclass_Logistic_Regression, Perceptron, Sklearn_SVM
import comp_vis.img_tools as it
import numpy as np
import sys
def images_to_data(images, label, already_cropped=True):
'''
# TODO: Should this be fixed or modular? I think just returning the most successful arrangement of data from our tets is best, but who knows!
:param images: A list of OpenCV images
:param already_cropped: if these images have already been cropped. If not, they will be.
:param label: An integer label for the data.
:return: A numpy matrix of the data, with the label if any.
'''
# Crop the images appropriately
cropped_images = []
if already_cropped:
cropped_images = images
else:
for image in images:
cropped_img = it.crop_image(image)
if np.shape(cropped_img) != np.shape(image):
cropped_images += [cropped_img]
if len(cropped_images) == 0:
sys.stderr.write('Error: No objects detected in images.\n(Did you mistakenly set already_cropped to false?)')
img_avgs = [it.average_color(image) for image in cropped_images]
img_dims = it.get_images_dimensions(cropped_images, ordered=True)
data = [img_avgs[i] + list(img_dims[i]) + [label] for i in range(len(images))]
data = np.array([np.array(x) for x in data])
return data
def string_to_model(approach_name):
'''
:param approach_name: The string name of the model to be returned
:return: The model (subset of the Approach class) with a name matching approach_name
:raises ValueError: Raises if approach_name not recognized
'''
if approach_name == "perceptron":
return Perceptron()
elif approach_name == "multiclass":
return Multiclass_Logistic_Regression()
elif approach_name == "sklearn_svm":
return Sklearn_SVM()
else:
raise ValueError('Model type ' + approach_name + ' not recognized.')
def training_and_testing_sep(data, training_fraction):
'''
:param data: Numpy matrix of data
:param training_fraction: Float between 0.00 and 1.00 denoting the size of the training set
:return: A training and testing set.
'''
# Randomly shuffle the data
np.random.shuffle(data)
training_size = int(training_fraction*np.shape(data)[0])
training_data, testing_data = data[0:training_size], data[training_size:]
return training_data, testing_data
|
StarcoderdataPython
|
172138
|
import matplotlib.pyplot as plt
def load_tsne_coordinates_from(filename):
file = open(filename)
lines = file.readlines()
line_xy_dict = {}
line_to_xy_dict = {}
for line in lines:
row = line.split()
x = float(row[0])
y = float(row[1])
try:
line_id = row[2]
except:
continue
line_xy_dict[line_id] = (x, y)
line_to_xy_dict[(x, y)] = line_id
return line_xy_dict, line_to_xy_dict
def plot_sentiment(line_xy_dict):
xs = []
ys = []
for key, (x, y) in line_xy_dict.items():
xs.append(x)
ys.append(y)
plt.scatter(xs, ys, marker='.', c='r', s=1.3, linewidth=0)
plt.legend(loc='best')
def plot_temporal_cluster(line_xy_dict, start, end):
sequence_x = []
sequence_y = []
sequence_labels_dict = {}
index = 0
for line_id in range(start, end):
line_str = 'LINES_' + str(line_id)
x, y = line_xy_dict[line_str]
index += 1
sequence_x.append(x)
sequence_y.append(y)
sequence_labels_dict[str(index)] = (x, y)
for key in sequence_labels_dict.keys():
plt.annotate(key, xy=sequence_labels_dict[key], fontsize=30)
plt.plot(sequence_x, sequence_y, marker='.', color='k', )
def plot_temporal_cluster_using_path(line_xy_dict, path):
sequence_x = []
sequence_y = []
sequence_labels_dict = {}
index = 0
for line_id in path:
line_str = 'LINES_' + str(line_id)
x, y = line_xy_dict[line_str]
index += 1
sequence_x.append(x)
sequence_y.append(y)
sequence_labels_dict[str(index)] = (x, y)
for key in sequence_labels_dict.keys():
plt.annotate(key, xy=sequence_labels_dict[key], fontsize=30)
plt.plot(sequence_x, sequence_y, marker='.', color='k', )
|
StarcoderdataPython
|
3361136
|
<gh_stars>0
from flask import render_template
from app import app
# Error handlers
@app.errorhandler(500)
def internal_error(error):
return render_template('errors/500.html'), 500
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
@app.errorhandler(403)
def permission_error(error):
return render_template('errors/403.html', error=error.description), 403
|
StarcoderdataPython
|
1649776
|
<reponame>prashant-rathod/deep-time-series<filename>models/darnn/dataset.py
import numpy as np
import math
class Dataset:
def __init__(self, X_train, y_train, T, split_ratio=0.7, normalized=False):
self.train_size = int(split_ratio * (y_train.shape[0] - T - 1))
self.test_size = y_train.shape[0] - T - 1 - self.train_size
self.time_step = T
if normalized:
y_train = y_train - y_train.mean()
self.X, self.y, self.y_seq = self.time_series_gen(X_train, y_train, T)
def get_time_step(self):
return self.time_step
def get_size(self):
return self.train_size, self.test_size
def get_num_features(self):
return self.X.shape[1]
def get_train_set(self):
return self.X[:self.train_size], self.y[:self.train_size], self.y_seq[:self.train_size]
def get_test_set(self):
return self.X[self.train_size:], self.y[self.train_size:], self.y_seq[self.train_size:]
def time_series_gen(self, X, y, T):
ts_x, ts_y, ts_y_seq = [], [], []
for i in range(len(X) - T - 1):
last = i + T
ts_x.append(X[i: last])
ts_y.append(y[last])
ts_y_seq.append(y[i: last])
return np.array(ts_x), np.array(ts_y), np.array(ts_y_seq)
|
StarcoderdataPython
|
3337599
|
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from haystack.forms import SearchForm
from haystack.views import SearchView
from . import views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', views.HomePageView.as_view(), name="home"),
url(r'^blog/', include("blog.urls", namespace="blog")),
url(r'^search/', SearchView(form_class=SearchForm)),
]
urlpatterns += [
# url(r'^about/$', include('django.contrib.flatpages.urls'), name='about'),
# url(r'^contact/$', include('django.contrib.flatpages.urls'), name='contact'),
url(r'^about/', include('django.contrib.flatpages.urls')),
url(r'^contact/', include('django.contrib.flatpages.urls')),
]
urlpatterns += [
# ... the rest of your URLconf goes here ...
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
handler404 = "blavanet_web.views.custom_404"
handler500 = "blavanet_web.views.custom_500"
|
StarcoderdataPython
|
3382629
|
import json
logic = """
{
"and": [
{
"or": [
{"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018A/DQM"]},
{"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018B/DQM"]},
{"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018C/DQM"]},
{"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018D/DQM"]},
{"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018E/DQM"]},
{"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018F/DQM"]},
{"==": [{"var": "dataset.name"}, "/PromptReco/Collisions2018G/DQM"]}
]
},
{ ">=": [{ "var": "run.oms.energy" }, 6000] },
{ "<=": [{ "var": "run.oms.energy" }, 7000] },
{ ">=": [{ "var": "run.oms.b_field" }, 3.7] },
{ "in": [{ "var": "run.oms.injection_scheme" }, "25ns"] },
{ "==": [{ "in": [{ "var": "run.oms.hlt_key" }, "WMass"] }, false] },
{ "==": [{ "var": "lumisection.rr.dt-dt" }, "GOOD"] },
{ "==": [{ "var": "lumisection.rr.csc-csc" }, "GOOD"] },
{ "==": [{ "var": "lumisection.rr.l1t-l1tmu" }, "GOOD"] },
{ "==": [{ "var": "lumisection.rr.hlt-hlt" }, "GOOD"] },
{ "==": [{ "var": "lumisection.rr.tracker-pixel" }, "GOOD"] },
{ "==": [{ "var": "lumisection.rr.tracker-strip" }, "GOOD"] },
{ "==": [{ "var": "lumisection.rr.tracker-tracking" }, "GOOD"] },
{ "==": [{ "var": "lumisection.rr.ecal-ecal" }, "GOOD"] },
{ "==": [{ "var": "lumisection.rr.ecal-es" }, "GOOD"] },
{ "==": [{ "var": "lumisection.rr.hcal-hcal" }, "GOOD"] },
{ "==": [{ "var": "lumisection.rr.muon-muon" }, "GOOD"] },
{ "==": [{ "var": "lumisection.rr.jetmet-jetmet" }, "GOOD"] },
{ "==": [{ "var": "lumisection.rr.lumi-lumi" }, "GOOD"] },
{ "==": [{ "var": "lumisection.rr.dc-lowlumi" }, "GOOD"] },
{ "==": [{ "var": "lumisection.oms.cms_active" }, true] },
{ "==": [{ "var": "lumisection.oms.bpix_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.fpix_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.tibtid_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.tecm_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.tecp_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.castor_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.tob_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.ebm_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.ebp_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.eem_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.eep_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.esm_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.esp_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.hbhea_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.hbheb_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.hbhec_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.hf_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.ho_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.dtm_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.dtp_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.dt0_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.cscm_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.cscp_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.rpc_ready" }, true] },
{ "==": [{ "var": "lumisection.oms.beam1_present" }, true] },
{ "==": [{ "var": "lumisection.oms.beam2_present" }, true] },
{ "==": [{ "var": "lumisection.oms.beam1_stable" }, true] },
{ "==": [{ "var": "lumisection.oms.beam2_stable" }, true] }
]
}
"""
logicobj = json.loads(logic)
def sqlsafe(arg):
# TODO
return str(arg)
def indent(string):
return " " + "\n ".join(string.split("\n"))
class Expr():
pass
class ConstExpr(Expr):
def __str__(self):
return sqlsafe(self.val)
class AndExpr(Expr):
def __str__(self):
return "(\n" + indent(" AND \n".join([str(e) for e in self.subexp])) + "\n)"
class OrExpr(Expr):
def __str__(self):
return "(\n" + indent(" OR \n".join([str(e) for e in self.subexp])) + "\n)"
class EqExprExpr(Expr):
def __str__(self):
return "(" + str(self.lhs) + " = " + str(self.rhs) + ")"
class EqLumiValueExpr(Expr):
def __str__(self):
return "(" + str(self.lhs) + "." + str(self.rhs) + " > 0 )"
class EqRunValueExpr(Expr):
def __str__(self):
return "(" + str(self.lhs) + " = " + str(self.rhs) + ")"
class RelLumiValueExpr(Expr):
pass
class RelRunValueExpr(Expr):
def __str__(self):
return "(" + str(self.lhs) + " " + self.op + " " + str(self.rhs) + ")"
class InRunValueExpr(Expr):
pass
class ConstValue():
def __str__(self):
return '"' + sqlsafe(self.val) + '"'
class RunValue():
def __str__(self):
return sqlsafe(self.name)
class LumiValue():
def __str__(self):
return sqlsafe(self.name)
def parse_const(obj):
assert(isinstance(obj, str) or isinstance(
obj, int) or isinstance(obj, float))
val = ConstValue()
val.val = obj
return val
def parse_runvalue(obj):
items = list(obj.items())
assert(len(items) == 1)
assert(items[0][0] == "var")
name = items[0][1]
assert(isinstance(name, str))
assert(name.split(".")[0] in ["dataset", "run"])
val = RunValue()
val.name = name
return val
def parse_lumivalue(obj):
items = list(obj.items())
assert(len(items) == 1)
assert(items[0][0] == "var")
name = items[0][1]
assert(isinstance(name, str))
assert(name.split(".")[0] == "lumisection")
val = LumiValue()
val.name = name
return val
def parse_eq(op, args):
assert(len(args) == 2)
options = [
(EqExprExpr, lambda lhs, rhs: (parse_expr(lhs), parse_expr(rhs))),
(EqLumiValueExpr, lambda lhs, rhs: (parse_lumivalue(lhs), parse_const(rhs))),
(EqRunValueExpr, lambda lhs, rhs: (parse_runvalue(lhs), parse_const(rhs))),
]
for exprclass, argparse in options:
exp = exprclass()
try:
exp.lhs, exp.rhs = argparse(args[0], args[1])
except:
continue
return exp
# out of options
print(args)
assert(False)
def parse_rel(op, args):
assert(len(args) == 2)
options = [
(RelLumiValueExpr, lambda lhs, rhs: (
parse_lumivalue(lhs), parse_const(rhs))),
(RelRunValueExpr, lambda lhs, rhs: (parse_runvalue(lhs), parse_const(rhs))),
]
for exprclass, argparse in options:
exp = exprclass()
try:
exp.lhs, exp.rhs = argparse(args[0], args[1])
except:
continue
exp.op = op
return exp
# out of options
print(args)
assert(False)
def parse_in(op, args):
assert(len(args) == 2)
exp = InRunValueExpr()
exp.lhs = parse_runvalue(args[0])
exp.rhs = parse_const(args[1])
return exp
def parse_and(op, args):
exp = AndExpr()
exp.subexp = [parse_expr(obj) for obj in args]
return exp
def parse_or(op, args):
exp = OrExpr()
exp.subexp = [parse_expr(obj) for obj in args]
return exp
def parse_expr(obj):
# If its a boolean:
if isinstance(obj, bool):
exp = ConstExpr()
exp.val = obj
return exp
else:
# It must be an expression:
items = list(obj.items())
assert(len(items) == 1)
op = items[0][0]
args = items[0][1]
decode = {
"and": parse_and,
"or": parse_or,
"==": parse_eq,
"<": parse_rel,
">": parse_rel,
">=": parse_rel,
"<=": parse_rel,
"in": parse_in,
}
return decode[op](op, args)
print(parse_expr(logicobj))
|
StarcoderdataPython
|
3284475
|
<filename>src/testcase/GN_APP/input_case/GN_APP_Register.py
# coding=utf-8
try:
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_001 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_002 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_003 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_004 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_005 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_006 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_007 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_008 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_009 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_010 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_011 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_012 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_013 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_014 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_015 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_016 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_017 import *
from src.testcase.GN_APP.case.GN_APP_REGISTER.GN_APP_REGISTER_018 import *
except ImportError as e:
print(e)
|
StarcoderdataPython
|
1669336
|
<filename>physlearn/supervised/__init__.py<gh_stars>1-10
from __future__ import absolute_import
from .interface import RegressorDictionaryInterface
from .regression import BaseRegressor, Regressor
from .interpretation.interpret_regressor import ShapInterpret
from .model_selection.cv_comparison import plot_cv_comparison
from .model_selection.learning_curve import LearningCurve, plot_learning_curve
__all__ = ['RegressorDictionaryInterface',
'BaseRegressor', 'Regressor',
'ShapInterpret', 'plot_cv_comparison',
'LearningCurve', 'plot_cv_comparison']
|
StarcoderdataPython
|
1644321
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='user_information_main_page'),
#暂未加载 url(r'^user_comment/', views.user_comment, name='user_information_comment'),
# 用户信息修改
url('edit/(?P<id>[0-9]+)/', views.profile_edit, name='profile_edit'),
# 用户信息查看
url('view/(?P<id>[0-9]+)/', views.profile_view, name='profile_view'),
url('view/(?P<id>[0-9]+)/(?P<option>[\w]+)', views.event_view, name='event_view'),
]
|
StarcoderdataPython
|
1673827
|
#-*-coding:utf8-*-
import copy, os
from gen_conf_file import *
from dataset_cfg import *
def gen_conv_lstm(d_mem, init):
net = {}
# dataset = 'tb_fine'
dataset = 'mr'
if dataset == 'mr':
net['cross_validation'] = 10
ds = DatasetCfg(dataset)
g_filler = gen_uniform_filter_setting(init)
zero_filler = gen_zero_filter_setting()
g_updater = gen_adadelta_setting()
g_layer_setting = {}
g_layer_setting['no_bias'] = True
g_layer_setting['phrase_type'] = 2
g_layer_setting['w_filler'] = g_filler
g_layer_setting['u_filler'] = g_filler
g_layer_setting['b_filler'] = zero_filler
g_layer_setting['w_updater'] = g_updater
g_layer_setting['u_updater'] = g_updater
g_layer_setting['b_updater'] = g_updater
net['net_name'] = 'cnn_lstm'
# net['log'] = 'log.cnnlstm.cv'
net_cfg_train, net_cfg_valid, net_cfg_test = {}, {}, {}
net['net_config'] = [net_cfg_train, net_cfg_valid, net_cfg_test]
net_cfg_train["tag"] = "Train"
net_cfg_train["max_iters"] = (ds.n_train * 10)/ ds.batch_size
net_cfg_train["display_interval"] = (ds.n_train/ds.batch_size)/3
net_cfg_train["out_nodes"] = ['acc']
net_cfg_valid["tag"] = "Valid"
net_cfg_valid["max_iters"] = int(ds.n_valid/ds.batch_size)
net_cfg_valid["display_interval"] = (ds.n_train/ds.batch_size)/3
net_cfg_valid["out_nodes"] = ['acc']
net_cfg_test["tag"] = "Test"
net_cfg_test["max_iters"] = int(ds.n_test/ds.batch_size)
net_cfg_test["display_interval"] = (ds.n_train/ds.batch_size)/3
net_cfg_test["out_nodes"] = ['acc']
layers = []
net['layers'] = layers
layer = {}
layers.append(layer)
layer['bottom_nodes'] = []
layer['top_nodes'] = ['y', 'x']
layer['layer_name'] = 'train_data'
layer['layer_type'] = 72
layer['tag'] = ['Train']
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['phrase_type'] = 0
setting['batch_size'] = ds.batch_size
setting['data_file'] = ds.train_data_file
setting['max_doc_len'] = ds.max_doc_len
layer = {}
layers.append(layer)
layer['bottom_nodes'] = []
layer['top_nodes'] = ['y', 'x']
layer['layer_name'] = 'valid_data'
layer['layer_type'] = 72
layer['tag'] = ['Valid']
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['phrase_type'] = 1
setting['batch_size'] = ds.batch_size
setting['data_file'] = ds.valid_data_file
setting['max_doc_len'] = ds.max_doc_len
layer = {}
layers.append(layer)
layer['bottom_nodes'] = []
layer['top_nodes'] = ['y', 'x']
layer['layer_name'] = 'test_data'
layer['layer_type'] = 72
layer['tag'] = ['Test']
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['phrase_type'] = 1
setting['batch_size'] = ds.batch_size
setting['data_file'] = ds.test_data_file
setting['max_doc_len'] = ds.max_doc_len
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['x']
layer['top_nodes'] = ['word_rep_seq']
layer['layer_name'] = 'embedding'
layer['layer_type'] = 21
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['embedding_file'] = ds.embedding_file
# print "ORC, use static word rep"
# setting['update_indication_file'] = update_indication_file
setting['feat_size'] = ds.d_word_rep
setting['word_count'] = ds.vocab_size
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['word_rep_seq']
layer['top_nodes'] = ['word_low_rep_seq']
layer['layer_name'] = 'word_dim_reduction'
layer['layer_type'] = 28
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['num_hidden'] = d_mem
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['word_rep_seq']
layer['top_nodes'] = ['l_lstm_seq']
layer['layer_name'] = 'l_lstm'
layer['layer_type'] = 24
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['d_mem'] = d_mem
setting['reverse'] = False
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['word_rep_seq']
layer['top_nodes'] = ['r_lstm_seq']
layer['layer_name'] = 'r_lstm'
layer['layer_type'] = 24
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['d_mem'] = d_mem
setting['reverse'] = True
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['l_lstm_seq', 'word_low_rep_seq', 'r_lstm_seq']
layer['top_nodes'] = ['conv_lstm_seq']
layer['layer_name'] = 'conv_lstm'
layer['layer_type'] = 26
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['num_hidden'] = d_mem * 2
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['conv_lstm_seq']
layer['top_nodes'] = ['conv_lstm_activation']
layer['layer_name'] = 'nonlinear'
layer['layer_type'] = 1
setting = {"phrase_type":2}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['conv_lstm_activation']
layer['top_nodes'] = ['pool_rep']
layer['layer_name'] = 'wholePooling'
layer['layer_type'] = 25
setting = {"phrase_type":2, "pool_type":"max"}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['pool_rep']
layer['top_nodes'] = ['drop_rep']
layer['layer_name'] = 'dropout'
layer['layer_type'] = 13
setting = {'phrase_type':2, 'rate':ds.dp_rate}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['drop_rep']
layer['top_nodes'] = ['softmax_ret']
layer['layer_name'] = 'softmax_fullconnect'
layer['layer_type'] = 11
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['num_hidden'] = ds.num_class
setting['w_filler'] = zero_filler
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['softmax_ret', 'y']
layer['top_nodes'] = ['loss']
layer['layer_name'] = 'softmax_activation'
layer['layer_type'] = 51
setting = {'phrase_type':2}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['softmax_ret', 'y']
layer['top_nodes'] = ['acc']
layer['layer_name'] = 'accuracy'
layer['layer_type'] = 56
setting = {'phrase_type':2, 'topk':1}
layer['setting'] = setting
# gen_conf_file(net, '../bin/conv_lstm_simulation.model')
return net
idx = 0
for d_mem in [50, 70, 100]:
for _, init in enumerate([0.5, 0.3, 0.2, 0.1, 0.03, 0.01, 0.003]):
net = gen_conv_lstm(d_mem = d_mem, init = init)
net['log'] = 'log.cnnlstm.max.mr.' + str(idx)
gen_conf_file(net, '/home/wsx/exp/mr/log/run.7/cnnlstm.max.mr.model.' + str(idx))
idx += 1
# os.system("../bin/textnet ../bin/cnn_lstm_mr.model")
# os.system("../bin/textnet ../bin/conv_lstm_simulation.model > ../bin/simulation/neg.gen.train.{0}".format(d_mem))
|
StarcoderdataPython
|
3383702
|
# -*- coding: utf-8 -*-
from epicstore_api.api import EpicGamesStoreAPI
from epicstore_api.models.categories import EGSCategory
Categories = {
'CATEGORY_ACTION': EGSCategory.CATEGORY_ACTION, # Экшн
'CATEGORY_ADVENTURE': EGSCategory.CATEGORY_ADVENTURE, # Приключения
'CATEGORY_EDITOR': EGSCategory.CATEGORY_EDITOR, # Строительство
'CATEGORY_MULTIPLAYER': EGSCategory.CATEGORY_MULTIPLAYER, # Мультиплеер
'CATEGORY_PUZZLE': EGSCategory.CATEGORY_PUZZLE, # Головоломки
'CATEGORY_RACING': EGSCategory.CATEGORY_RACING, # Гонки
'CATEGORY_RPG': EGSCategory.CATEGORY_RPG, # РПГ
'CATEGORY_SHOOTER': EGSCategory.CATEGORY_SHOOTER, # Шутер
'CATEGORY_SINGLE_PLAYER': EGSCategory.CATEGORY_SINGLE_PLAYER, # Синглплеер
'CATEGORY_STRATEGY': EGSCategory.CATEGORY_STRATEGY, # Стратегия
'CATEGORY_SURVIVAL': EGSCategory.CATEGORY_SURVIVAL, # Выживание
'CATEGORY_OSX': EGSCategory.CATEGORY_OSX, # Mac OS
'CATEGORY_WINDOWS': EGSCategory.CATEGORY_WINDOWS # Windows
}
__request = EpicGamesStoreAPI(locale='ru-RU', country='RU', session=None)
def open_info(game):
# pprint.pprint(game)
price_info = game['price']['totalPrice']
images_info = {img['type']: img['url'] for img in game['keyImages']}
developer_info = ''
for elem in game['linkedOffer']['customAttributes']:
if 'developerName' in elem['key']:
developer_info = elem['value']
info = {
'title': game['title'],
'original_price': price_info['originalPrice'] / 100,
'discount': price_info['discount'] / 100,
'discount_price': price_info['discountPrice'] / 100,
'developer_name': developer_info,
'published_date': game['linkedOffer']['effectiveDate'].split('T')[0],
'image_urls': {'Wide': images_info['DieselStoreFrontWide'],
'Tall': images_info['DieselStoreFrontTall']}}
# pprint.pprint(info)
return info
def game_find_similar(result_count=5, start=0, count=50, keywords='', categories=[]):
data = __request.fetch_catalog(start=start, count=count, product_type='games', sort_by='effectiveDate',
keywords=keywords, categories=categories)
data = data['data']['Catalog']['catalogOffers']['elements']
result = []
for index, game in enumerate(data):
if index == result_count:
break
result.append(open_info(game))
return result
|
StarcoderdataPython
|
4816423
|
from .mean_base import MeanBase
from .meanKronSum import MeanKronSum
|
StarcoderdataPython
|
115794
|
<filename>read_visualdl_data.py
from visualdl import LogReader
log_reader = LogReader("./log")
print("Data associated with the train loss:\n")
with log_reader.mode("train") as logger:
text_reader = logger.scalar("scalars/train_loss")
print("Train loss =", text_reader.records())
print("Ids = ", text_reader.ids())
print("Timestamps =", text_reader.timestamps())
print("\nData associated with the test loss:\n")
with log_reader.mode("test") as logger:
text_reader = logger.scalar("scalars/test_loss")
print("Test losses =", text_reader.records())
print("Ids = ", text_reader.ids())
print("Timestamps =", text_reader.timestamps())
print("\nData associated with the test accuracy:\n")
with log_reader.mode("test") as logger:
text_reader = logger.scalar("scalars/test_accuracy")
print("Test accuracy =", text_reader.records())
print("Ids = ", text_reader.ids())
print("Timestamps =", text_reader.timestamps())
|
StarcoderdataPython
|
1760055
|
<gh_stars>0
from django.shortcuts import render
from rest_framework import viewsets
from .models import Author, Book, BookInstance
from .serializers import AuthorSerializer, BookSerializer, BookInstanceSerializer
class BookViewSet(viewsets.ModelViewSet):
serializer_class = BookSerializer
queryset = Book.objects.all()
class BookInstanceViewSet(viewsets.ModelViewSet):
serializer_class = BookInstanceSerializer
queryset = BookInstance.objects.all()
def get_queryset(self):
return self.queryset.filter(status="m")
class AuthorViewSet(viewsets.ModelViewSet):
serializer_class = AuthorSerializer
queryset = Author.objects.all()
|
StarcoderdataPython
|
3384329
|
from pumapy.utilities.workspace import Workspace
from pumapy.physicsmodels.boundary_conditions import ConductivityBC
from pumapy.physicsmodels.linear_solvers import PropertySolver
import numpy as np
class Conductivity(PropertySolver):
def __init__(self, workspace, cond_map, direction, side_bc, prescribed_bc, tolerance, maxiter, solver_type, display_iter):
allowed_solvers = ['direct', 'gmres', 'cg', 'bicgstab']
super().__init__(workspace, solver_type, allowed_solvers, tolerance, maxiter, display_iter)
self.cond_map = cond_map
self.direction = direction
self.side_bc = side_bc
self.prescribed_bc = prescribed_bc
self.keff = [-1., -1., -1.]
self.solve_time = -1
self.T = np.zeros([1, 1, 1])
self.q = np.zeros([1, 1, 1, 3])
def log_input(self):
self.ws.log.log_section("Computing Conductivity")
self.ws.log.log_line("Domain Size: " + str(self.ws.get_shape()))
self.ws.log.log_line("Conductivity Map: ")
for i in range(self.cond_map.get_size()):
low, high, cond = self.cond_map.get_material(i)
self.ws.log.log_line(
" - Material " + str(i) + "[" + str(low) + "," + str(high) + "," + str(cond) + "]")
self.ws.log.log_line("Solver Type: " + str(self.solver_type))
self.ws.log.log_line("Solver Tolerance: " + str(self.tolerance))
self.ws.log.log_line("Max Iterations: " + str(self.maxiter))
self.ws.log.write_log()
def log_output(self):
self.ws.log.log_section("Finished Conductivity Calculation")
self.ws.log.log_line("Conductivity: " + "[" + str(self.keff) + "]")
self.ws.log.log_line("Solver Time: " + str(self.solve_time))
self.ws.log.write_log()
def error_check(self):
# ws checks
if not isinstance(self.ws, Workspace):
raise Exception("Workspace must be a puma.Workspace.")
if self.ws.len_x() < 3 or self.ws.len_y() < 3 or self.ws.len_z() < 3:
raise Exception("Workspace must be at least 3x3x3 for Conductivity solver.")
# direction checks
if self.direction == "x" or self.direction == "X":
self.direction = "x"
elif self.direction == "y" or self.direction == "Y":
self.direction = "y"
elif self.direction == "z" or self.direction == "Z":
self.direction = "z"
else:
raise Exception("Invalid simulation direction.")
# side_bc checks
if self.side_bc == "periodic" or self.side_bc == "Periodic" or self.side_bc == "p":
self.side_bc = "p"
elif self.side_bc == "symmetric" or self.side_bc == "Symmetric" or self.side_bc == "s":
self.side_bc = "s"
elif self.side_bc == "dirichlet" or self.side_bc == "Dirichlet" or self.side_bc == "d":
self.side_bc = "d"
else:
raise Exception("Invalid side boundary conditions.")
# prescribed_bc checks
if self.prescribed_bc is not None:
if not isinstance(self.prescribed_bc, ConductivityBC):
raise Exception("prescribed_bc must be a puma.ConductivityBC.")
if self.prescribed_bc.dirichlet.shape != self.ws.matrix.shape:
raise Exception("prescribed_bc must be of the same size as the domain.")
# rotate it
if self.direction == 'y':
self.prescribed_bc.dirichlet = self.prescribed_bc.dirichlet.transpose((1, 0, 2))
elif self.direction == 'z':
self.prescribed_bc.dirichlet = self.prescribed_bc.dirichlet.transpose((2, 1, 0))
if np.any((self.prescribed_bc.dirichlet[[0, -1]] == np.Inf)):
raise Exception("prescribed_bc must be defined on the direction sides")
|
StarcoderdataPython
|
1696801
|
<filename>tests/fields/test_charfield.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import logging
import pytest
from django_dynamicfields.exceptions import ValidationError
from django_dynamicfields.models import CustomFieldDefs
from django_dynamicfields.fields import CharField
from demoproject.models import DocumentField, DocumentTable
logger = logging.getLogger(__name__)
@pytest.mark.parametrize("input", [DocumentField, DocumentTable])
@pytest.mark.django_db
def test_max_length(input):
CustomFieldDefs.objects.add_to_model(input,
name='custom',
init_string=json.dumps({'max_length': 1}),
default_value='a',
field_type=CharField)
c1 = input(title='1')
with pytest.raises(ValidationError):
c1.dynamic.custom = 'abc'
@pytest.mark.parametrize("input", [DocumentField, DocumentTable])
@pytest.mark.django_db
def test_min_length(input):
CustomFieldDefs.objects.add_to_model(input,
name='custom',
init_string=json.dumps({'min_length': 10}),
default_value='a',
field_type=CharField)
c1 = input(title='1')
with pytest.raises(ValidationError):
c1.dynamic.custom = 'abc'
@pytest.mark.parametrize("input", [DocumentField, DocumentTable])
@pytest.mark.django_db
def test_mandatory(input):
CustomFieldDefs.objects.add_to_model(input,
name='custom',
mandatory=True,
field_type=CharField)
c1 = input(title='1')
with pytest.raises(ValidationError):
c1.save()
|
StarcoderdataPython
|
1607466
|
#######################################################
#
# ClientInformationController.py
# Python implementation of the Class ClientInformationController
# Generated by Enterprise Architect
# Created on: 19-May-2020 6:56:00 PM
# Original author: <NAME>
#
#######################################################
from lxml import etree
from model.Event import Event
from model.ClientInformation import ClientInformation
from BasicModelInstantiate import BasicModelInstantiate
import uuid
from logging.handlers import RotatingFileHandler
import logging
from configuration.LoggingConstants import LoggingConstants
import sys
from CreateLoggerController import CreateLoggerController
logger = CreateLoggerController("ClientInformationController").getLogger()
loggingConstants = LoggingConstants()
class ClientInformationController(BasicModelInstantiate):
def __init__(self):
pass
'''
connection setup is obsolete with intstantiateClientInformationModelFromController
'''
def intstantiateClientInformationModelFromConnection(self, rawClientInformation, queue):
try:
self.m_clientInformation = ClientInformation()
argument = "initialConnection"
self.m_clientInformation.dataQueue = queue
self.modelObject = Event(argument)
self.m_clientInformation.socket = rawClientInformation[0]
self.m_clientInformation.IP = rawClientInformation[1]
self.m_clientInformation.idData = rawClientInformation[2]
self.m_clientInformation.alive = 1
self.m_clientInformation.ID = uuid.uuid1().int
super().__init__(self.m_clientInformation.idData, self.modelObject)
self.m_clientInformation.modelObject = self.modelObject
return self.m_clientInformation
except Exception as e:
logger.error('error in client information controller '+str(e))
def connectionSetup(self, client, address):
pass
'''
try:
sqliteServer = sqlite3.connect(const.DATABASE)
cursor = sqliteServer.cursor()
first_run = 1
#create client dictionary within main dictionary containing arrays for data and chat also other stuff for client enitial connection
current_id = 0
total_clients_connected = 0
total_clients_connected += 1
id_data = client.recv(const.STARTBUFFER)
print(id_data)
print('\n'+str(id_data))
print('\n \n')
tree = ET.fromstring(id_data)
uid = tree.get('uid')
if uid == self.bandaidUID:
return 'Bandaid'
callsign = tree[1][1].attrib['callsign']
current_id = uuid.uuid1().int
#add identifying information
self.client_dict[current_id] = {'id_data': '', 'main_data': [], 'alive': 1, 'uid': '', 'client':client, 'callsign':callsign}
self.client_dict[current_id]['id_data'] = id_data
self.client_dict[current_id]['uid'] = uid
cursor.execute(sql.INSERTNEWUSER,(str(current_id), str(uid), str(callsign)))
sqliteServer.commit()
cursor.close()
sqliteServer.close()
#print(self.client_dict)
logger.info('client connected, information is as follows initial'+ '\n'+ 'connection data:'+str(id_data)+'\n'+'current id:'+ str(current_id))
return str(first_run)+' ? '+str(total_clients_connected)+' ? '+str(id_data)+' ? '+str(current_id)
except Exception as e:
logger.warning('error in connection setup: ' + str(e))
logger.warning(id_data)
return "error"
'''
#rawClientInformation = ['abc', 'def', b'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n<event version="2.0" uid="ANDROID-359975090666199" type="a-f-G-U-C" time="2020-05-25T12:23:13.288Z" start="2020-05-25T12:23:13.288Z" stale="2020-05-25T12:29:28.288Z" how="h-e"><point lat="43.855596" lon="-66.10805" hae="20.395709421887993" ce="62.1" le="9999999.0"/><detail><takv os="28" version="3.12.0-45691.45691-CIV" device="SAMSUNG SM-G950W" platform="ATAK-CIV"/><contact endpoint="*:-1:stcp" callsign="SUMMER"/><uid Droid="SUMMER"/><precisionlocation altsrc="GPS" geopointsrc="GPS"/><__group role="Sniper" name="Cyan"/><status battery="4"/><track course="191.76600028243948" speed="0.0"/></detail></event>']
#ClientInformationController().intstantiateClientInformationModelFromConnection(rawClientInformation)
|
StarcoderdataPython
|
3218483
|
from django.contrib import admin
# Register your models here.
# we don't have to add user model here since it is already present in django
|
StarcoderdataPython
|
1661964
|
import datetime
import logging
import random
import re
from zoneinfo import ZoneInfo
import pytz
import requests
from telegram import Update
import database
import rules_of_acquisition
import main
import git_promotions
from ranks import ranks
from weather_command import weather_command
logger = logging.getLogger(__name__)
def message_handler(update: Update, context=None):
del context
database.update_chat_in_db(update)
database.update_user_in_db(update)
if update.message is not None and update.message.text is not None:
if update.message.reply_to_message is not None:
reply_handler(update)
elif update.message.text == "1337":
leet_command(update)
elif update.message.text.startswith((".", "/", "!")):
command_handler(update)
elif re.search(r'..*\s.vai\s..*', update.message.text) is not None:
or_command(update)
elif update.message.text.lower() == "huutista":
update.message.reply_text('...joka tuutista! 😂')
else:
low_probability_reply(update)
def command_handler(update):
incoming_message_text = update.message.text
chat = database.get_chat(update.effective_chat.id)
is_ruoka_command = (incoming_message_text[1:] == "ruoka")
is_space_command = (incoming_message_text[1:] == "space")
is_user_command = (incoming_message_text[1:] == "käyttäjät")
is_kuulutus_command = incoming_message_text[1:].startswith("kuulutus")
is_aika_command = (incoming_message_text[1:] == "aika")
is_rules_of_acquisition = (incoming_message_text[1:].startswith("sääntö"))
is_weather_command = incoming_message_text[1:].startswith("sää")
is_leaderboard_command = (incoming_message_text[1:].startswith("tulostaulu"))
if update.message.reply_to_message is not None:
reply_handler(update)
elif is_ruoka_command and chat.ruoka_enabled:
ruoka_command(update)
elif is_space_command and chat.space_enabled:
space_command(update)
elif is_user_command:
users_command(update) # TODO: Admin vivun taakse
elif is_kuulutus_command:
broadcast_toggle_command(update)
elif is_aika_command and chat.time_enabled:
time_command(update)
elif is_rules_of_acquisition:
rules_of_acquisition_command(update)
elif is_weather_command and chat.weather_enabled:
weather_command(update)
elif is_leaderboard_command:
leaderboard_command(update)
def reply_handler(update):
if update.message.reply_to_message.from_user.is_bot:
# Reply to bot, so most probably to me! (TODO: Figure out my own ID and use that instead)
if update.message.reply_to_message.text.startswith("Git käyttäjä "):
git_promotions.process_entities(update)
def leet_command(update: Update):
now = datetime.datetime.now(pytz.timezone('Europe/Helsinki'))
chat = database.get_chat(update.effective_chat.id)
sender = database.get_chat_member(chat_id=update.effective_chat.id,
tg_user_id=update.effective_user.id)
if chat.latest_leet != now.date() and \
now.hour == 13 and \
now.minute == 37:
chat.latest_leet = now.date()
chat.save()
reply_text = promote(sender)
else:
reply_text = demote(sender)
update.message.reply_text(reply_text, quote=False)
def promote(sender):
if sender.rank < len(ranks) - 1:
sender.rank += 1
up = u"\U0001F53C"
reply_text = "Asento! " + str(sender.tg_user) + " ansaitsi ylennyksen arvoon " + \
ranks[sender.rank] + "! " + up + " Lepo. "
else:
sender.prestige += 1
reply_text = "Asento! " + str(sender.tg_user) + \
" on saavuttanut jo korkeimman mahdollisen sotilasarvon! Näin ollen " + str(sender.tg_user) + \
" lähtee uudelle kierrokselle. Onneksi olkoon! " + \
"Juuri päättynyt kierros oli hänen " + str(sender.prestige) + ". Lepo. "
sender.rank = 0
sender.save()
return reply_text
def demote(sender):
if sender.rank > 0:
sender.rank -= 1
down = u"\U0001F53D"
reply_text = "Alokasvirhe! " + str(sender.tg_user) + " alennettiin arvoon " + \
ranks[sender.rank] + ". " + down
sender.save()
return reply_text
def ruoka_command(update: Update) -> None:
"""
Send a message when the command /ruoka is issued.
Returns link to page in https://www.soppa365.fi
"""
with open("recipes.txt", "r") as recipe_file:
recipes = recipe_file.readlines()
reply_text = random.choice(recipes)
update.message.reply_text(reply_text, quote=False)
def space_command(update: Update) -> None:
"""
Send a message when the command /space is issued.
Queries next spacex launch time from public API:
https://github.com/r-spacex/SpaceX-API
"""
helsinki_tz = ZoneInfo('Europe/Helsinki')
try:
r = requests.get('https://api.spacexdata.com/v4/launches/next')
r = r.json()
name = r.get('name', None)
launch_date = r.get('date_utc', None)
waiting_time = "T-: "
if launch_date:
launch_date = datetime.datetime.fromisoformat(launch_date[:-1])
delta = launch_date - datetime.datetime.now()
days, hours, minutes = delta.days, delta.seconds // 3600, delta.seconds // 60 % 60
if days > 0:
waiting_time += "{} päivää, ".format(days)
if hours > 0:
waiting_time += "{} tuntia ja ".format(hours)
if minutes > 0:
waiting_time += "{} minuuttia.".format(minutes)
launch_date = launch_date.astimezone(helsinki_tz).strftime('%d.%m.%Y klo %H:%M:%S (Helsinki)')
reply_text = 'Seuraava SpaceX laukaisu {}:\n{}\n{}\n'.format(name, launch_date, waiting_time)
except requests.exceptions.RequestException:
reply_text = 'Ei tietoa seuraavasta lähdöstä :( API ehkä rikki.'
update.message.reply_text(reply_text, quote=False)
def users_command(update: Update):
chat_members = database.get_chat_members_for_chat(chat_id=update.effective_chat.id)
reply_text = ""
# code in place if we want to get the chat name and use it
# chat_name = str(update.effective_chat.title)
# if chat_name != "None":
# reply_text = chat_name + " -ryhmän käyttäjät " + "\U0001F913 " + "\n" + "\n"
# else:
# reply_text = "Käyttäjät " + "\U0001F913 " + "\n" + "\n"
reply_text = "*Käyttäjät* " + "\U0001F913 " + "\n" + "\n" + \
"*Nimi* ⌇ Arvo ⌇ Kunnia ⌇ Viestit" + "\n" # nerd face emoji
for chat_member in chat_members:
reply_text += "*" + str(chat_member) + " ⌇*" + " " + \
str(chat_member.rank) + " ⌇ " + \
str(chat_member.prestige) + " ⌇ " + \
str(chat_member.message_count) + "\n"
update.message.reply_markdown(reply_text, quote=False)
def broadcast_toggle_command(update):
chat = database.get_chat(chat_id=update.effective_chat.id)
if update.message.text.casefold() == "/kuulutus on".casefold():
chat.broadcast_enabled = True
update.message.reply_text("Kuulutukset ovat nyt päällä tässä ryhmässä.", quote=False)
elif update.message.text.casefold() == "/kuulutus off".casefold():
chat.broadcast_enabled = False
update.message.reply_text("Kuulutukset ovat nyt pois päältä.", quote=False)
else:
update.message.reply_text("Käyttö: \n"
"'/kuulutus on' - Kytkee kuulutukset päälle \n"
"'/kuulutus off' - Kytkee kuulutukset pois päältä\n")
if chat.broadcast_enabled:
update.message.reply_text("Tällä hetkellä kuulutukset ovat päällä.", quote=False)
else:
update.message.reply_text("Tällä hetkellä kuulutukset ovat pois päältä.", quote=False)
chat.save()
async def broadcast_command(update):
message = update.message.text
await main.broadcast(update.bot, message)
def time_command(update: Update):
date_time_obj = date_time_obj = datetime.datetime.now(pytz.timezone('Europe/Helsinki')).strftime('%H:%M:%S.%f')[:-4]
time_stamps_str = str(date_time_obj)
reply_text = '\U0001F551 ' + time_stamps_str
update.message.reply_text(reply_text, quote=False)
def or_command(update):
options = re.split(r'\s.vai\s', update.message.text)
options = [i.strip() for i in options]
reply = random.choice(options)
reply = reply.rstrip("?")
if reply and reply is not None:
update.message.reply_text(reply)
def rules_of_acquisition_command(update):
rule_number = update.message.text.split(" ")[1]
try:
update.message.reply_text(rules_of_acquisition.dictionary[int(rule_number)], quote=False)
except (KeyError, ValueError) as e:
logger.info("Rule not found with key: \"" + str(e) + "\" Sending random rule instead.")
random_rule_number = random.choice(list(rules_of_acquisition.dictionary))
random_rule = rules_of_acquisition.dictionary[random_rule_number]
update.message.reply_text(str(random_rule_number) + ". " + random_rule, quote=False)
def leaderboard_command(update):
# TODO
pass
def low_probability_reply(update, integer=0): # added int argument for unit testing
if integer == 0:
random_int = random.randint(1, 10000) # 0,01% probability
else:
random_int = integer
if random_int == 1:
reply_text = "Vaikuttaa siltä että olette todella onnekas " + "\U0001F340" # clover emoji
update.message.reply_text(reply_text, quote=True)
|
StarcoderdataPython
|
1741082
|
<gh_stars>0
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
#current related damage rate
a_I = 1.23 * 10**(-17) #A/cm
k_0I = 1.2 * 10**(13)*60 #1/min
E_I = 1.11 * 1.6 * 10**(-19) #j
b = 3.07*10**(-18) #A/cm
t_0 = 1 #min
k_B = 1.38064852 * 10**(-23) #<NAME>
t, phi, T, T_2, T_3, T_4 = np.genfromtxt('Daten/daten.txt', unpack=True)
t_5, T_5 = np.genfromtxt('Daten/tdata_1.txt', unpack=True)
#t_1, T_1 = np.genfromtxt('Daten/tdata.txt', unpack=True)
def tau_I(T): #time constant
return 1/(k_0I* np.exp(-E_I/(k_B*T)))
#def gett_I(t, tau_I0, T):
# timediff_I = np.zeros(len(t))
# timediff_I = np.ediff1d(t, to_begin=0)
# tau_I0 = np.roll(tau_I0, shift=1) # shifting array by one to the right
# tau_I1 = tau_I(T)
# timediff_I /= (tau_I0 + tau_I1)/2
# t_I = np.zeros(len(t))
# for i in range(0, len(t)):
# t_I[i] = np.sum(timediff_I[0:i+1])
# return t_I
def a_0(T): #part of the longterm annealing
return -8.9*10**(-17) + 4.6*10**(-14) * 1/T
def damage(t, T):
#tau_I0 = tau_I(T) #tau_I0 = Array [egal, tau_I(T[0]), tau_I(T[1]),...]
#t_I = gett_I(t, tau_I0, T) #damage rate
return (a_I * np.exp(-t/tau_I(T)) + a_0(T) - b * np.log((t+10**(-50))/t_0))
fig, ax1 = plt.subplots()
plt.semilogx(t_5/60 , T_5, 'r.', label='Temperatur', Markersize=6)
ax1.set_ylabel(r"Temperatur / $^{\circ}$C", color = 'red')
ax1.tick_params('y',colors='red')
ax1.set_xlabel("Zeit / min")
ax1.legend(loc='upper left')
ax2 = ax1.twinx()
plt.semilogx(t_5/60, damage(t_5/60, T_5+273.15), 'b.', label=r'$\Delta N_{\mathrm{eff}}$ von R1', Markersize=6)
#plt.semilogx(t_2/60, N_eff(t_2/60, 5*10**(15), 80+273.15), 'k--', label=r'$\Delta N_{\mathrm{eff}}$@80°C', Markersize=6)
ax2.set_ylabel(r"$\alpha $ /$\mathrm{A cm^{-1}} $",color='blue')
plt.ylim(0, 1*10**(-16))
ax2.tick_params('y',colors='blue')
ax1.grid()
ax2.legend(loc='best')
plt.xlabel(r'Zeit / $\mathrm{min}$')
plt.savefig('build/damage_ohne_korrektur.pdf')
#plt.show()
plt.clf()
#t = np.logspace(-1, 5, 30, endpoint=True)
#plt.gcf().subplots_adjust(bottom=0.18)
#plt.semilogx(t, damage(t, 60+273.15), 'r.', label=r'$\alpha @60°\mathrm{C}$', Markersize=6)
#plt.semilogx(t, damage(t, 80+273.15), 'b.', label=r'$\alpha @80°\mathrm{C}$', Markersize=6)
#plt.legend()
#plt.grid()
#
#plt.xlabel(r'Zeit / $\mathrm{min}$')
#plt.ylabel(r"$\alpha $ /$\mathrm{A cm^{-1}} $")
#plt.ylim(0, 10*10**(-17))
#plt.savefig('build/damage.pdf')
#plt.clf()
|
StarcoderdataPython
|
1642025
|
<reponame>tkf/compapp
from compapp import Computer
class SimpleApp(Computer):
x = 1.0
y = 2.0
def run(self):
self.results.sum = self.x + self.y
if __name__ == '__main__':
app = SimpleApp.cli()
|
StarcoderdataPython
|
34894
|
# Copyright (C) 2017 <NAME> and <NAME>
#
# This file is part of WESTPA.
#
# WESTPA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WESTPA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WESTPA. If not, see <http://www.gnu.org/licenses/>.
'''
Function(s) for the postanalysis toolkit
'''
import logging
log = logging.getLogger(__name__)
import _reweight
from _reweight import (stats_process, reweight_for_c) #@UnresolvedImport
from matrix import FluxMatrix
|
StarcoderdataPython
|
1677767
|
from abc import ABC, abstractmethod
import os
from nltk import pos_tag
import gzip
from a3t.dataset.download import download_ppdb, download_artifacts
class Transformation(ABC):
def __init__(self, length_preserving=False):
"""
A default init function.
"""
super().__init__()
self.length_preserving = length_preserving
@abstractmethod
def get_pos(self, ipt):
# get matched positions in input
pass
@abstractmethod
def transformer(self, ipt, start_pos, end_pos):
# transformer for a segment of input
pass
def sub_transformer(self, ipt, start_pos, end_pos):
# substring transformer for length preserving transformation
assert self.length_preserving
class Sub(Transformation):
def __init__(self, use_fewer_sub, dataset_home="/tmp/.A3T", ppdb_type="s"):
"""
Init Sub transformation
Sub substitutes one word with its synonym with a requirement that they have the same pos_tag.
:param use_fewer_sub: if True, one word can only be substituted by one of its synonyms.
:param dataset_home: the home of the dataset
:param ppdb_type the type of the ppdb synonyms can be ["s", "m", "l", "xl", "xxl", "xxxl"]
"""
assert ppdb_type in ["s", "m", "l", "xl", "xxl", "xxxl"]
self.use_fewer_sub = use_fewer_sub
self.synonym_dict = {}
self.synonym_dict_pos_tag = {}
pddb_path = os.path.join(dataset_home, "ppdb")
if not os.path.exists(pddb_path):
os.mkdir(pddb_path)
pddb_file = os.path.join(pddb_path, "ppdb-2.0-%s-lexical" % ppdb_type)
if not os.path.exists(pddb_file):
download_ppdb(pddb_path, ppdb_type)
with gzip.open(pddb_file, 'rb') as f:
lines = f.readlines()
for line in lines:
line = line.decode()
tmp = line.strip().split(" ||| ")
postag, x, y = tmp[0][1:-1], tmp[1], tmp[2]
self.synonym_dict_add_str(x, y, postag)
self.synonym_dict_add_str(y, x, postag)
print("Compute synonym_dict success!")
super(Sub, self).__init__(True)
def synonym_dict_add_str(self, x, y, postag):
if x not in self.synonym_dict:
self.synonym_dict[x] = [y]
self.synonym_dict_pos_tag[x] = [postag]
elif not self.use_fewer_sub:
self.synonym_dict[x].append(y)
self.synonym_dict_pos_tag[x].append(postag)
def get_pos(self, ipt):
ipt_pos_tag = pos_tag(ipt)
ret = []
for (start_pos, (x, pos_tagging)) in enumerate(ipt_pos_tag):
if x in self.synonym_dict:
if any(pos_tagging == t for t in self.synonym_dict_pos_tag[x]):
ret.append((start_pos, start_pos + 1))
return ret
def transformer(self, ipt, start_pos, end_pos):
ipt_pos_tag = pos_tag(ipt)
x = ipt[start_pos]
pos_tagging = ipt_pos_tag[start_pos][1]
for (w, t) in zip(self.synonym_dict[x], self.synonym_dict_pos_tag[x]):
if t == pos_tagging:
new_ipt = ipt[:start_pos] + [w] + ipt[end_pos:]
yield new_ipt
def sub_transformer(self, ipt, start_pos, end_pos):
ipt_pos_tag = pos_tag(ipt)
x = ipt[start_pos]
pos_tagging = ipt_pos_tag[start_pos][1]
for (w, t) in zip(self.synonym_dict[x], self.synonym_dict_pos_tag[x]):
if t == pos_tagging:
yield [w]
class SubChar(Transformation):
def __init__(self, use_fewer_sub, dataset_home="/tmp/.A3T"):
"""
Init SubChar transformation
Sub substitutes one char with other chars. The substitution relation is one direction (x => y, but not x <= y)
:param sub_file: the character substitution file
:param use_fewer_sub: if True, one char can only be substituted by one of its synonyms.
"""
download_artifacts(dataset_home)
a3t_sst2test_file = os.path.join(dataset_home, "A3T-artifacts", "dataset", "en.key")
lines = open(a3t_sst2test_file).readlines()
self.synonym_dict = {}
for line in lines:
tmp = line.strip().split()
x, y = tmp[0], tmp[1]
if x not in self.synonym_dict:
self.synonym_dict[x] = [y]
elif not use_fewer_sub:
self.synonym_dict[x].append(y)
print("Compute set success!")
super(SubChar, self).__init__(True)
def get_pos(self, ipt):
ret = []
for (start_pos, x) in enumerate(ipt):
if x in self.synonym_dict:
ret.append((start_pos, start_pos + 1))
return ret
def transformer(self, ipt, start_pos, end_pos):
x = ipt[start_pos]
for w in self.synonym_dict[x]:
new_ipt = ipt[:start_pos] + [w] + ipt[end_pos:]
yield new_ipt
def sub_transformer(self, ipt, start_pos, end_pos):
x = ipt[start_pos]
for w in self.synonym_dict[x]:
yield [w]
class Del(Transformation):
def __init__(self, stop_words=None):
"""
Init Del transformation
Del removes one stop word.
:param stop_words: stop words to delete
"""
if stop_words is None:
stop_words = {"a", "and", "the", "of", "to"}
self.stop_words = stop_words
super(Del, self).__init__()
def get_pos(self, ipt):
ret = []
for (start_pos, x) in enumerate(ipt):
if x in self.stop_words:
ret.append((start_pos, start_pos + 1))
return ret
def transformer(self, ipt, start_pos, end_pos):
new_ipt = ipt[:start_pos] + ipt[end_pos:]
yield new_ipt
class Ins(Transformation):
def __init__(self):
"""
Init Ins transformation
Ins duplicates a word behind it.
"""
super(Ins, self).__init__()
def get_pos(self, ipt):
return [(x, x + 1) for x in range(len(ipt))]
def transformer(self, ipt, start_pos, end_pos):
new_ipt = ipt[:start_pos] + [ipt[start_pos], ipt[start_pos]] + ipt[end_pos:]
yield new_ipt
class InsChar(Transformation):
def __init__(self, use_fewer_sub, dataset_home="/tmp/.A3T"):
"""
Init SubChar transformation
Sub substitutes one char with other chars. The substitution relation is one direction (x => y, but not x <= y)
:param sub_file: the character substitution file
:param use_fewer_sub: if True, one char can only be substituted by one of its synonyms.
"""
download_artifacts(dataset_home)
a3t_sst2test_file = os.path.join(dataset_home, "A3T-artifacts", "dataset", "en.key")
lines = open(a3t_sst2test_file).readlines()
self.synonym_dict = {}
for line in lines:
tmp = line.strip().split()
x, y = tmp[0], tmp[1]
if x not in self.synonym_dict:
self.synonym_dict[x] = [y]
elif not use_fewer_sub:
self.synonym_dict[x].append(y)
print("Compute set success!")
super(InsChar, self).__init__()
def get_pos(self, ipt):
ret = []
for (start_pos, x) in enumerate(ipt):
if x in self.synonym_dict:
ret.append((start_pos, start_pos + 1))
return ret
def transformer(self, ipt, start_pos, end_pos):
x = ipt[start_pos]
for w in self.synonym_dict[x]:
new_ipt = ipt[:start_pos] + [x, w] + ipt[end_pos:]
yield new_ipt
class Swap(Transformation):
def __init__(self):
"""
Init Swap transformation
Swap two adjacent tokens.
"""
super(Swap, self).__init__(True)
def get_pos(self, ipt):
return [(x, x + 2) for x in range(len(ipt) - 1)]
def transformer(self, ipt, start_pos, end_pos):
new_ipt = ipt[:start_pos] + [ipt[start_pos + 1], ipt[start_pos]] + ipt[end_pos:]
yield new_ipt
def sub_transformer(self, ipt, start_pos, end_pos):
yield [ipt[start_pos + 1], ipt[start_pos]]
|
StarcoderdataPython
|
4804754
|
<gh_stars>10-100
__all__ = ['Addition',
'Concat',
'Division',
'FloorDivision',
'Modulo',
'Multiplication',
'Power',
'StrMultiplication',
'Subtraction'
]
from boa3.model.operation.binary.arithmetic.addition import Addition
from boa3.model.operation.binary.arithmetic.concat import Concat
from boa3.model.operation.binary.arithmetic.division import Division
from boa3.model.operation.binary.arithmetic.floordivision import FloorDivision
from boa3.model.operation.binary.arithmetic.modulo import Modulo
from boa3.model.operation.binary.arithmetic.multiplication import Multiplication
from boa3.model.operation.binary.arithmetic.power import Power
from boa3.model.operation.binary.arithmetic.strmultiplication import StrMultiplication
from boa3.model.operation.binary.arithmetic.subtraction import Subtraction
|
StarcoderdataPython
|
37371
|
<gh_stars>10-100
from anyrun.client import AnyRunClient, AnyRunException
__version__ = '0.1'
__all__ = ['AnyRunClient', 'AnyRunException']
|
StarcoderdataPython
|
127251
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 10 13:01:49 2020
@author: saksh
"""
import numpy as np
np.random.seed(1337)
import tensorflow as tf
import pandas as pd
from statsmodels.tsa.api import VAR
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.svm import SVR
from sklearn.preprocessing import MinMaxScaler
from matplotlib.pyplot import *
from datetime import datetime
"""
Calculate VAR residuals. Information criteria for optimal lag = AIC
"""
def var_resids(label1, label2, data_cache):
model = VAR(data_cache[[label1,label2]])
model_fit = model.fit(maxlags = 10, ic = 'aic', trend = 'c')
return model_fit.resid[label1]
"""
Data split = 80-10-10
MinMaxScaler applied to both input and output, range = [-1, 1]
LSTM model uses windowing of 3 input steps
"""
def make_datasets(df, target_column = True, train_size = 0.9, model_name = 'dense', input_steps = 3):
if target_column:
data = df.iloc[:, :-1]
data = np.array(data, dtype = np.float32)
targets = np.array(df.iloc[:,-1], dtype = np.float32)
X_train, X_test, y_train, y_test = train_test_split(data,targets, train_size = train_size, shuffle = False)
input_scaler = MinMaxScaler(feature_range = (-1,1))
input_scaler.fit(X_train)
X_train = input_scaler.transform(X_train)
X_test = input_scaler.transform(X_test)
y_train = y_train.reshape(len(y_train), 1)
y_test = y_test.reshape(len(y_test), 1)
output_scaler = MinMaxScaler(feature_range = (-1,1))
output_scaler.fit(y_train)
y_train = output_scaler.transform(y_train)
y_test = output_scaler.transform(y_test)
if model_name == 'dense':
return X_train, X_test, y_train, y_test, output_scaler
elif model_name == 'lstm':
y_train = y_train.reshape(len(y_train), 1)
input_ds_train = np.hstack((X_train, y_train))
X_train, y_train = split_sequences(input_ds_train, input_steps)
y_test = y_test.reshape(len(y_test), 1)
input_ds_test = np.hstack((X_test, y_test))
X_test, y_test = split_sequences(input_ds_test, input_steps)
return X_train, X_test, y_train, y_test, output_scaler
else:
data = np.array(df, dtype = np.float32)
X_train, X_test = train_test_split(data, train_size = train_size)
return X_train, X_test
"""
Early stopping is defined, can be enabled by adding early_stopping to callback
Inputs are batched: batch size = 32
Provides tensorboard accessibility
"""
def nn_model_compile(model, X_train_data, y_train_data, patience = 2, MAX_EPOCHS = 20):
tf.keras.backend.clear_session()
model.compile(optimizer = tf.optimizers.SGD(), loss = tf.losses.MeanSquaredError(), metrics = [tf.metrics.RootMeanSquaredError()])
logdir = "logs\\fit\\" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, mode='min')
final_res = model.fit(x = X_train_data, y = y_train_data, validation_split = 0.1, epochs = MAX_EPOCHS, batch_size = 32, callbacks=[tensorboard_callback])
return final_res
"""
epsilon = 0.0001
"""
def svr_model(X_train, y_train, param_grid):
model = GridSearchCV(SVR(epsilon = 0.0001), param_grid, return_train_score=True)
model.fit(X_train, y_train)
return model
def split_sequences(input_arr, n_steps):
X, y = list(), list()
for i in range(len(input_arr)):
end_ix = i + n_steps
# check if we are beyond the dataset
if end_ix > len(input_arr):
break
# gather input and output parts of the pattern
_x, _y = input_arr[i:end_ix, :-1], input_arr[end_ix-1, -1]
X.append(_x)
y.append(_y)
return np.array(X), np.array(y)
def make_save_plot(index, y_test, y_pred, figsize = (6, 6), xlabel = "Date", ylabel = "Market Volatility (Normalized Data)", y_lim = [0.0000, 0.0015], filepath = "default.svg"):
df_plot = pd.DataFrame(index = index[-len(y_test):])
df_plot['target_variable'] = y_test
df_plot['predictions'] = np.abs(y_pred)
fig, ax = subplots()
df_plot.plot(figsize=figsize, ax=ax, ylabel = ylabel, xlabel = xlabel)
ax.legend()
ax.set_ylim(y_lim)
savefig(filepath, transparent = True, bbox_inches = 'tight')
|
StarcoderdataPython
|
3296145
|
import calendar
import time
import unittest
from unittest.mock import call
from unittest.mock import patch
from unittest.mock import sentinel
import httpie_ovh_auth
import httpie.models
class TestSuite(unittest.TestCase):
def test_signature(self):
"""Check signature generation."""
# time_o without any timezone info
time_o = time.strptime("2020-02-10 +0000", "%Y-%m-%d %z")
# time_o interpreted as UTC to provide epoch
timestamp = calendar.timegm(time_o)
def sign(method):
return httpie_ovh_auth.sign(
"secret_key",
"consumer_key",
method,
"https://url.com/path",
'{"json": "content"}',
timestamp,
)
signature1 = sign("get")
signature2 = sign("GET")
# check signature
assert (
signature1 == "cfcd791f25a9786eda88926c7a7ad68580e6bb45"
), "signature differs from expected value"
# check method serialization
assert signature1 == signature2, "signature must not be sensible to method case"
return
@patch("os.getenv")
def test_env(self, GetEnv):
"""Check that plugin init load configuration from environment."""
GetEnv.return_value = None
p = httpie_ovh_auth.OvhAuthPlugin()
GetEnv.assert_has_calls(
[
call("OVH_CLIENT_ID", None),
call("OVH_CLIENT_SECRET", None),
call("OVH_CONSUMER_KEY", None),
],
any_order=True,
)
@patch("os.getenv")
def test_env_empty(self, GetEnv):
"""Check that an exception is raised if environment variables
are empty. Check that message provides environment variables
names."""
GetEnv.return_value = None
p = httpie_ovh_auth.OvhAuthPlugin()
with (self.assertRaises(httpie_ovh_auth.OvhAuthException)) as e:
p.get_auth()
assert "OVH_CLIENT_ID" in str(e.exception)
assert "OVH_CLIENT_SECRET" in str(e.exception)
assert "OVH_CONSUMER_KEY" in str(e.exception)
@patch("httpie_ovh_auth.OvhAuth")
@patch("os.getenv")
def test_env_not_empty(self, GetEnv, OvhAuth):
"""Check that OvhAuth is correctly built if credentials are
present."""
def side_effect(arg1, arg2):
return getattr(sentinel, arg1)
GetEnv.side_effect = side_effect
p = httpie_ovh_auth.OvhAuthPlugin()
p.get_auth()
OvhAuth.assert_called_once_with(
getattr(sentinel, "OVH_CLIENT_ID"),
getattr(sentinel, "OVH_CLIENT_SECRET"),
getattr(sentinel, "OVH_CONSUMER_KEY"),
)
@patch("time.time", return_value=12345)
@patch("httpie_ovh_auth.sign", return_value="XX_SIGN_XX")
@patch("httpie.models.HTTPRequest")
def test_ovh_auth(self, request, sign, time):
"""Test that headers are filled with expected values by OvhAuth
implementation."""
client_id = sentinel.client_id
client_secret = sentinel.client_secret
consumer_key = sentinel.consumer_key
o = httpie_ovh_auth.OvhAuth(client_id, client_secret, consumer_key)
request.headers = {}
o(request)
sign.assert_called_once_with(
client_secret,
consumer_key,
request.method,
request.url,
request.body,
time.return_value,
)
assert request.headers["X-Ovh-Application"] == client_id
assert request.headers["X-Ovh-Consumer"] == consumer_key
assert request.headers["X-Ovh-Timestamp"] == str(time.return_value)
assert request.headers["X-Ovh-Signature"] == "$1$" + sign.return_value
|
StarcoderdataPython
|
1685149
|
# Generated by Django 3.0.7 on 2020-08-10 08:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leave', '0004_auto_20200810_1107'),
]
operations = [
migrations.AlterField(
model_name='leaveplan',
name='approval_status',
field=models.CharField(choices=[('Pending', 'Pending'), ('Approved', 'Approved'), ('Rejected', 'Rejected')], default='Pending', max_length=8),
),
]
|
StarcoderdataPython
|
1763857
|
<filename>shapenet_train.py
import argparse
import json
import copy
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from datasets.shapenet import build_shapenet
from models.nerf import build_nerf
from models.rendering import get_rays_shapenet, sample_points, volume_render
import wandb
from shapenet_test import test
from rich import print
from rich import pretty
pretty.install()
from rich import traceback
traceback.install()
from torchvision.utils import make_grid
from utils.shape_video import create_360_video
from pathlib import Path
import numpy as np
import random
SEED=42
torch.manual_seed(SEED)
random.seed(SEED)
np.random.seed(SEED)
import logging
def inner_loop(args, model, optim, imgs, poses, hwf, bound, num_samples, raybatch_size, inner_steps,
device, idx, log_round=False, setup="train/"):
"""
train the inner model for a specified number of iterations
"""
pixels = imgs.reshape(-1, 3)
rays_o, rays_d = get_rays_shapenet(hwf, poses)
rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)
num_rays = rays_d.shape[0]
logs = dict()
for i in range(1, inner_steps+1):
if log_round and ((i % args.tto_log_steps == 0) or (i == inner_steps) or (i==1)):
with torch.no_grad():
scene_psnr = report_result(model, imgs,
poses, hwf,
bound, num_samples, raybatch_size)
vid_frames = create_360_video(args, model, hwf, bound,
device,
idx, args.savedir)
logs[setup + "scene_psnr tto_step=" + str(i)] = scene_psnr
logs[setup + "vid_post tto_step=" + str(i)] = wandb.Video(
vid_frames.transpose(0, 3, 1, 2), fps=30,
format="mp4")
indices = torch.randint(num_rays, size=[raybatch_size])
raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]
pixelbatch = pixels[indices]
t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],
num_samples, perturb=True)
optim.zero_grad()
rgbs, sigmas = model(xyz)
colors = volume_render(rgbs, sigmas, t_vals, white_bkgd=True)
loss = F.mse_loss(colors, pixelbatch)
loss.backward()
optim.step()
return logs
def train_meta(args, epoch_idx, meta_model, meta_optim, data_loader, device):
"""
train the meta_model for one epoch using reptile meta learning
https://arxiv.org/abs/1803.02999
"""
step = (epoch_idx - 1) * len(data_loader)
avg_psnr = 0
psnr_accum = dict()
for idx,(imgs, poses, hwf, bound) in enumerate(data_loader):
log_round = (step % args.log_interval == 0)
imgs, poses, hwf, bound = imgs.to(device), poses.to(device), hwf.to(device), bound.to(device)
imgs, poses, hwf, bound = imgs.squeeze(), poses.squeeze(), hwf.squeeze(), bound.squeeze()
meta_optim.zero_grad()
inner_model = copy.deepcopy(meta_model)
inner_optim = torch.optim.SGD(inner_model.parameters(), args.inner_lr)
logs=inner_loop(args, inner_model, inner_optim, imgs, poses,
hwf, bound, args.num_samples,
args.train_batchsize, args.inner_steps, device=device, idx=idx, log_round=log_round,
setup="train/")
with torch.no_grad():
for meta_param, inner_param in zip(meta_model.parameters(), inner_model.parameters()):
meta_param.grad = meta_param - inner_param
meta_optim.step()
if log_round:
avg_psnr += logs["train/scene_psnr tto_step=" + str(args.inner_steps)]
# logs["train/gen_model_mse_loss"] = float(loss)
logs = {**logs, "train_step": step,
"train/imgs": wandb.Image(
make_grid(imgs.permute(0, 3, 1, 2)))}
wandb.log(logs)
for (key, val) in logs.items():
if "psnr" in key:
if psnr_accum.get(key) is None:
psnr_accum[key] = 0
psnr_accum[key] += val
step+=1
psnr_mean = dict()
for (key, val) in psnr_accum.items():
psnr_mean[key + "_mean"] = val / len(data_loader)
avg_psnr /= len(data_loader)
wandb.log({**psnr_mean, "val/avg_psnr": avg_psnr, "epoch_step": epoch_idx})
def report_result(model, imgs, poses, hwf, bound, num_samples, raybatch_size):
"""
report view-synthesis result on heldout views
"""
ray_origins, ray_directions = get_rays_shapenet(hwf, poses)
view_psnrs = []
for img, rays_o, rays_d in zip(imgs, ray_origins, ray_directions):
rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)
t_vals, xyz = sample_points(rays_o, rays_d, bound[0], bound[1],
num_samples, perturb=False)
synth = []
num_rays = rays_d.shape[0]
with torch.no_grad():
for i in range(0, num_rays, raybatch_size):
rgbs_batch, sigmas_batch = model(xyz[i:i+raybatch_size])
color_batch = volume_render(rgbs_batch, sigmas_batch,
t_vals[i:i+raybatch_size],
white_bkgd=True)
synth.append(color_batch)
synth = torch.cat(synth, dim=0).reshape_as(img)
error = F.mse_loss(img, synth)
psnr = -10*torch.log10(error)
view_psnrs.append(psnr)
scene_psnr = torch.stack(view_psnrs).mean()
return scene_psnr
def val_meta(args, epoch_idx, model, val_loader, device):
"""
validate the meta trained model for few-shot view synthesis
"""
meta_trained_state = model.state_dict()
val_model = copy.deepcopy(model)
avg_psnr = 0
psnr_accum = dict()
val_step = max((epoch_idx - 1) * len(val_loader) + 1, 0)
for idx, (imgs, poses, hwf, bound) in enumerate(val_loader):
imgs, poses, hwf, bound = imgs.to(device), poses.to(device), hwf.to(device), bound.to(device)
imgs, poses, hwf, bound = imgs.squeeze(), poses.squeeze(), hwf.squeeze(), bound.squeeze()
tto_imgs, test_imgs = torch.split(imgs, [args.tto_views, args.test_views], dim=0)
tto_poses, test_poses = torch.split(poses, [args.tto_views, args.test_views], dim=0)
val_model.load_state_dict(meta_trained_state)
val_optim = torch.optim.SGD(val_model.parameters(), args.tto_lr)
logs = inner_loop(args, val_model, val_optim, tto_imgs, tto_poses, hwf,
bound, args.num_samples, args.tto_batchsize, args.tto_steps,
device=device, idx=idx, log_round=True, setup="val/")
avg_psnr += logs["val/scene_psnr tto_step=" + str(args.tto_steps)]
logs["val/tto_views"] = wandb.Image(
make_grid(tto_imgs.permute(0, 3, 1, 2)))
logs["val/test_views"] = wandb.Image(
make_grid(test_imgs.permute(0, 3, 1, 2)))
logs["val_step"] = val_step
wandb.log(logs)
for (key,val) in logs.items():
if "psnr" in key:
if psnr_accum.get(key) is None:
psnr_accum[key] = 0
psnr_accum[key] += val
val_step+=1
psnr_mean = dict()
for (key,val) in psnr_accum.items():
psnr_mean[key+"_mean"] = val/len(val_loader)
avg_psnr /= len(val_loader)
wandb.log({**psnr_mean, "val/avg_psnr":avg_psnr, "epoch_step":epoch_idx})
def main():
parser = argparse.ArgumentParser(description='shapenet few-shot view synthesis')
parser.add_argument('--config', type=str, required=True,
help='config file for the shape class (cars, chairs or lamps)')
parser.add_argument('--weight_path', type=str, default=None)
args = parser.parse_args()
with open(args.config) as config:
info = json.load(config)
for key, value in info.items():
args.__dict__[key] = value
args.savedir = Path(args.savedir)
wandb.init(name="train_"+args.exp_name, dir="/root/nerf-meta/", project="meta_NeRF", entity="stereo",
save_code=True, job_type="train")
wandb.config.update(args)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_set = build_shapenet(image_set="train", dataset_root=args.dataset_root,
splits_path=args.splits_path, num_views=args.train_views)
train_loader = DataLoader(train_set, batch_size=1, shuffle=True)
val_set = build_shapenet(image_set="val", dataset_root=args.dataset_root,
splits_path=args.splits_path,
num_views=args.tto_views+args.test_views)
val_loader = DataLoader(val_set, batch_size=1, shuffle=False)
meta_model = build_nerf(args)
meta_model.to(device)
if hasattr(args, "weight_path") and args.weight_path is not None:
checkpoint = torch.load(args.weight_path, map_location=device)
meta_state = checkpoint['meta_model_state_dict']
meta_model.load_state_dict(meta_state)
meta_optim = torch.optim.Adam(meta_model.parameters(), lr=args.meta_lr)
logging.info("starting to train...")
val_meta(args, 0, meta_model, val_loader, device)
for epoch in range(1, args.meta_epochs+1):
logging.info("Epoch: " + str(epoch))
train_meta(args, epoch, meta_model, meta_optim, train_loader, device)
val_meta(args, epoch, meta_model, val_loader, device)
ckpt_name = args.save_dir + "/"+args.exp_name+"_epoch" + str(epoch) + ".pth"
torch.save({
'epoch': epoch,
'meta_model_state_dict': meta_model.state_dict(),
'meta_optim_state_dict': meta_optim.state_dict(),
}, ckpt_name)
wandb.save(ckpt_name)
args.weight_path = ckpt_name
test(args)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3247493
|
import __about__
import argparse
from rogify.base.util import (file_exists, load_items, store_items)
from rogify.__config__ import slot_synonyms
def resolve_unknown_slots(items):
fixed_items = []
choice_dict = {i: k for i, k in enumerate(slot_synonyms.keys())}
for item in items:
if item.slot not in slot_synonyms.keys():
print('\n{}\n-> Unknown Slot Type!'.format(item))
choice = -1
while choice not in choice_dict.keys():
print(choice_dict)
choice = int(input('Slot: '))
item.slot = choice_dict[choice]
fixed_items.append(item)
else:
fixed_items.append(item)
return fixed_items
def main():
parser = argparse.ArgumentParser(
description='DAoC Rogify DB Slot Fix! v{}.\nClassify Unknown Slot Items!'.format(
__about__.__version__))
parser.add_argument('database', type=str,
help='Rogify DB file')
parser.add_argument('-o', '--output', type=str, default='items.json',
help='output file name')
args = parser.parse_args()
if not file_exists(args.database):
print("Error: file '%s' does not exists." % args.database)
return 1
if file_exists(args.output):
print("Error: output file '%s' already exists." % args.output)
return 1
items = load_items(args.database)
fixed_items = resolve_unknown_slots(items)
store_items(args.output, fixed_items)
print("Done!")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3239820
|
<filename>tests/test_algorithms.py
#
# Copyright 2020 <NAME>
#
# This file is part of Library of Graph Algorithms for Python.
#
# Library of Graph Algorithms for Python is free software developed for
# educational # and experimental purposes. It is licensed under the Apache
# License, Version 2.0 # (the "License"); you may not use this file except
# in compliance with the # License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the graphlib.algorithms module.
"""
from itertools import permutations
from pytest import raises
from graphlib.algorithms import MinimumSpanningTreeAlgorithm, ShortestPathSearchRequest, ShortestPathSearchResult
from graphlib.algorithms import MinimumSpanningTreeSearchRequest, MinimumSpanningTreeSearchResult
from graphlib.algorithms import find_minimum_spanning_tree, find_shortest_path, sort_topologically
from graphlib.algorithms import _DistanceTable
from graphlib.graph import AdjacencySetGraph, Edge, GraphType
class TestDistanceTable: # pylint: disable=R0201,C0116
"""Collection of test methods exercising the :class:
graphlib.algorithms._DistanceTable.
"""
def test_starting_vertex_has_distance_zero_and_itself_as_predecessor(self):
distance_table = _DistanceTable('A')
assert distance_table.get_distance_from_start('A') == 0
assert distance_table.get_predecessor('A') == 'A'
def test_in_operator_tests_presence_of_entry_for_the_given_vertex(self):
distance_table = _DistanceTable('A')
distance_table.update('B', 'A', 4)
distance_table.update('C', 'A', 2)
assert 'A' in distance_table
assert 'B' in distance_table
assert 'C' in distance_table
assert 'X' not in distance_table
def test_updates_leading_to_shorter_distance_are_accepted(self):
distance_table = _DistanceTable('A')
distance_table.update('B', 'A', 4)
distance_table.update('C', 'A', 2)
distance_table.update('D', 'C', 8)
assert distance_table.update('D', 'B', 5) == True
assert distance_table.get_distance_from_start('D') == 5
assert distance_table.get_predecessor('D') == 'B'
def updates_leading_to_equal_distance_are_ignored(self):
distance_table = _DistanceTable('A')
distance_table.update('B', 'A', 2)
distance_table.update('C', 'A', 2)
distance_table.update('D', 'C', 5)
assert distance_table.update('D', 'B', 5) == False
def updates_leading_to_longer_distance_are_ignored(self):
distance_table = _DistanceTable('A')
distance_table.update('B', 'A', 4)
distance_table.update('C', 'A', 2)
distance_table.update('D', 'C', 8)
assert distance_table.update('D', 'B', 9) == False
assert distance_table.get_distance_from_start('D') == 8
assert distance_table.get_predecessor('D') == 'C'
def test_backtracking_reconstructs_proper_shortest_path(self):
distance_table = _DistanceTable('A')
distance_table.update('B', 'A', 2)
distance_table.update('C', 'A', 3)
distance_table.update('D', 'C', 6)
distance_table.update('D', 'B', 4)
distance_table.update('F', 'B', 4)
distance_table.update('E', 'D', 9)
distance_table.update('F', 'E', 12)
distance_table.update('G', 'F', 15)
distance_table.backtrack_shortest_path('G') == ShortestPathSearchResult((
Edge(start='A', destination='B', weight=2),
Edge(start='B', destination='F', weight=2),
Edge(start='F', destination='G', weight=3),
))
def test_attempt_to_get_distance_for_non_existent_vertex_leads_to_error(self):
distance_table = _DistanceTable('A')
distance_table.update('B', 'A', 4)
with raises(ValueError, match='No distance table entry found for the vertex X.'):
distance_table.get_distance_from_start('X')
def test_attempt_to_get_predecessor_for_non_existent_vertex_leads_to_error(self):
distance_table = _DistanceTable('A')
distance_table.update('B', 'A', 4)
with raises(ValueError, match='No distance table entry found for the vertex Y.'):
distance_table.get_predecessor('Y')
def test_attempt_to_backtrack_non_existent_path_leads_to_error(self):
distance_table = _DistanceTable('A')
with raises(ValueError, match='There is no path from A to X.'):
distance_table.backtrack_shortest_path('X')
class TestTopologicalSort: # pylint: disable=R0201,C0116
"""Collection of test methods exercising the :method:
graphlib.algorithms.sort_toplogically.
"""
def test_topological_sort_returns_vertices_in_proper_order_case_01(self):
graph = AdjacencySetGraph(GraphType.DIRECTED)
graph.add_edge('A', 'C')
graph.add_edge('B', 'C')
graph.add_edge('C', 'E')
graph.add_edge('D', 'E')
graph.add_edge('E', 'F')
sort_result = sort_topologically(graph)
sort_result = list(sort_result)
assert sort_result in [
['A', 'B', 'C', 'D', 'E', 'F'],
['B', 'A', 'C', 'D', 'E', 'F'],
['A', 'B', 'D', 'C', 'E', 'F'],
['B', 'A', 'D', 'C', 'E', 'F'],
['A', 'D', 'B', 'C', 'E', 'F'],
['B', 'D', 'A', 'C', 'E', 'F'],
['D', 'A', 'B', 'C', 'E', 'F'],
['D', 'B', 'A', 'C', 'E', 'F'],
]
def test_topological_sort_returns_vertices_in_proper_order_case_02(self):
graph = AdjacencySetGraph(GraphType.DIRECTED)
graph.add_edge('A', 'B')
graph.add_edge('B', 'C')
graph.add_edge('B', 'D')
graph.add_edge('B', 'E')
graph.add_edge('C', 'F')
graph.add_edge('D', 'F')
graph.add_edge('E', 'F')
graph.add_edge('F', 'G')
sort_result = sort_topologically(graph)
sort_result = list(sort_result)
assert sort_result in [
['A', 'B'] + list(p) + ['F', 'G'] for p in permutations('CDE', 3)
]
def test_topological_sort_returns_vertices_in_proper_order_case_03(self):
graph = AdjacencySetGraph(GraphType.DIRECTED)
graph.add_edge('A', 'B')
graph.add_edge('C', 'D')
graph.add_edge('B', 'E')
graph.add_edge('D', 'E')
graph.add_edge('E', 'F')
sort_result = sort_topologically(graph)
sort_result = list(sort_result)
assert sort_result in [
['A', 'B', 'C', 'D', 'E', 'F'],
['A', 'C', 'B', 'D', 'E', 'F'],
['A', 'C', 'D', 'B', 'E', 'F'],
['C', 'D', 'A', 'B', 'E', 'F'],
['C', 'A', 'D', 'B', 'E', 'F'],
['C', 'A', 'B', 'D', 'E', 'F'],
]
def test_topological_sort_returns_vertices_in_proper_order_case_04(self):
graph = AdjacencySetGraph(GraphType.DIRECTED)
graph.add_edge('A', 'C')
graph.add_edge('B', 'C')
graph.add_edge('D', 'F')
graph.add_edge('E', 'F')
graph.add_edge('C', 'G')
graph.add_edge('F', 'G')
sort_result = sort_topologically(graph)
sort_result = list(sort_result)
assert len(sort_result) == 7
for vertex in 'ABCDEFG':
assert vertex in sort_result
assert sort_result.index('A') < sort_result.index('C')
assert sort_result.index('B') < sort_result.index('C')
assert sort_result.index('D') < sort_result.index('F')
assert sort_result.index('E') < sort_result.index('F')
assert sort_result.index('C') < sort_result.index('G')
assert sort_result.index('F') < sort_result.index('G')
def test_attempt_to_apply_topological_sort_to_undirected_graph_leads_to_exception(self):
graph = AdjacencySetGraph(GraphType.UNDIRECTED)
graph.add_edge('A', 'C')
graph.add_edge('B', 'C')
graph.add_edge('C', 'D')
with raises(ValueError, match=r'.+ applied to directed graphs\.'):
sort_topologically(graph)
def test_attepmt_to_apply_topological_sort_to_cyclic_graph_leads_to_exception(self):
graph = AdjacencySetGraph(GraphType.DIRECTED)
graph.add_edge('A', 'B')
graph.add_edge('B', 'C')
graph.add_edge('C', 'A')
with raises(ValueError, match=r'.+ applied to acyclic graphs\.'):
sort_topologically(graph)
class TestShortestPathSearchResult: # pylint: disable=R0201,C0116
"""Collection of test methods exercising the :class:
graphlib.algorithms.ShortestPathSearchResult class.
"""
def test_shortest_path_search_result_provides_proper_derived_properties(self):
path = (
Edge(start='A', destination='B', weight=2),
Edge(start='B', destination='C', weight=3),
Edge(start='C', destination='F', weight=5),
Edge(start='F', destination='H', weight=2),
Edge(start='H', destination='L', weight=7),
)
shortest_path = ShortestPathSearchResult(path)
assert shortest_path.start == 'A'
assert shortest_path.destination == 'L'
assert shortest_path.overall_distance == 19
class TestShortestPathSearchForUnweightedGraphSuiteOne: # pylint: disable=R0201,C0116,C0103
"""Collection of test methods exercising the method :method:
graphlib.algorithms.find_shortest_path for unweighted graphs.
"""
def _create_tested_graph(self):
graph = AdjacencySetGraph(GraphType.DIRECTED)
graph.add_edge('A', 'B')
graph.add_edge('A', 'C')
graph.add_edge('B', 'D')
graph.add_edge('C', 'D')
graph.add_edge('D', 'E')
graph.add_edge('E', 'F')
graph.add_edge('C', 'E')
graph.add_edge('B', 'F')
return graph
def test_path_from_A_to_F(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='A', destination='F')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='A', destination='B', weight=1),
Edge(start='B', destination='F', weight=1),
))
def test_path_from_A_to_E(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='A', destination='E')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='A', destination='C', weight=1),
Edge(start='C', destination='E', weight=1),
))
class TestShortestPathSearchForUnweightedGraphSuiteTwo: # pylint: disable=R0201,C0116,C0103
"""Collection of test methods exercising the method :method:
graphlib.algorithms.find_shortest_path for unweighted graphs.
"""
def _create_tested_graph(self):
graph = AdjacencySetGraph(GraphType.DIRECTED)
graph.add_edge('A', 'B')
graph.add_edge('A', 'C')
graph.add_edge('B', 'D')
graph.add_edge('C', 'D')
graph.add_edge('C', 'E')
graph.add_edge('D', 'E')
graph.add_edge('E', 'F')
graph.add_edge('F', 'G')
graph.add_edge('F', 'H')
graph.add_edge('G', 'H')
graph.add_edge('H', 'I')
return graph
def test_path_from_A_to_I(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='A', destination='I')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='A', destination='C', weight=1),
Edge(start='C', destination='E', weight=1),
Edge(start='E', destination='F', weight=1),
Edge(start='F', destination='H', weight=1),
Edge(start='H', destination='I', weight=1),
))
def test_path_from_B_to_G(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='B', destination='G')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='B', destination='D', weight=1),
Edge(start='D', destination='E', weight=1),
Edge(start='E', destination='F', weight=1),
Edge(start='F', destination='G', weight=1),
))
class TestShortestPathSearchForUnweightedGraphSuiteThree: # pylint: disable=R0201,C0116,C0103
"""Collection of test methods exercising the method :method:
graphlib.algorithms.find_shortest_path for unweighted graphs.
"""
def _create_tested_graph(self):
graph = AdjacencySetGraph(GraphType.DIRECTED)
graph.add_edge('A', 'B')
graph.add_edge('B', 'C')
graph.add_edge('C', 'B')
graph.add_edge('C', 'D')
graph.add_edge('A', 'E')
graph.add_edge('B', 'F')
graph.add_edge('F', 'A')
graph.add_edge('C', 'G')
graph.add_edge('H', 'D')
graph.add_edge('F', 'E')
graph.add_edge('F', 'G')
graph.add_edge('G', 'F')
graph.add_edge('H', 'G')
graph.add_edge('E', 'I')
graph.add_edge('J', 'F')
graph.add_edge('F', 'K')
graph.add_edge('K', 'H')
graph.add_edge('L', 'H')
graph.add_edge('J', 'I')
graph.add_edge('K', 'J')
graph.add_edge('L', 'K')
graph.add_edge('M', 'I')
graph.add_edge('J', 'N')
graph.add_edge('P', 'J')
graph.add_edge('P', 'L')
graph.add_edge('N', 'M')
graph.add_edge('N', 'O')
graph.add_edge('O', 'P')
graph.add_edge('P', 'O')
return graph
def test_path_from_A_to_D(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='A', destination='D')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='A', destination='B', weight=1),
Edge(start='B', destination='C', weight=1),
Edge(start='C', destination='D', weight=1),
))
def test_path_from_A_to_I(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='A', destination='I')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='A', destination='E', weight=1),
Edge(start='E', destination='I', weight=1),
))
def test_path_from_A_to_M(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='A', destination='M')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='A', destination='B', weight=1),
Edge(start='B', destination='F', weight=1),
Edge(start='F', destination='K', weight=1),
Edge(start='K', destination='J', weight=1),
Edge(start='J', destination='N', weight=1),
Edge(start='N', destination='M', weight=1),
))
def test_path_from_A_to_P(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='A', destination='P')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='A', destination='B', weight=1),
Edge(start='B', destination='F', weight=1),
Edge(start='F', destination='K', weight=1),
Edge(start='K', destination='J', weight=1),
Edge(start='J', destination='N', weight=1),
Edge(start='N', destination='O', weight=1),
Edge(start='O', destination='P', weight=1),
))
def test_path_from_P_to_A(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='P', destination='A')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='P', destination='J', weight=1),
Edge(start='J', destination='F', weight=1),
Edge(start='F', destination='A', weight=1),
))
def test_path_from_P_to_C(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='P', destination='C')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='P', destination='J', weight=1),
Edge(start='J', destination='F', weight=1),
Edge(start='F', destination='A', weight=1),
Edge(start='A', destination='B', weight=1),
Edge(start='B', destination='C', weight=1),
))
def test_path_from_P_to_G(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='P', destination='G')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='P', destination='J', weight=1),
Edge(start='J', destination='F', weight=1),
Edge(start='F', destination='G', weight=1),
))
def test_path_from_P_to_M(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='P', destination='M')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='P', destination='J', weight=1),
Edge(start='J', destination='N', weight=1),
Edge(start='N', destination='M', weight=1),
))
class TestShortestPathSearchForWeightedGraphSuiteOne: # pylint: disable=R0201,C0116,C0103
"""Collection of test methods exercising the method :method:
graphlib.algorithms.find_shortest_path for weighted graphs.
"""
def _create_tested_graph(self):
graph = AdjacencySetGraph(GraphType.DIRECTED)
graph.add_edge('A', 'B', 2)
graph.add_edge('A', 'C', 4)
graph.add_edge('A', 'D', 7)
graph.add_edge('B', 'C', 1)
graph.add_edge('D', 'C', 3)
graph.add_edge('B', 'E', 5)
graph.add_edge('C', 'F', 3)
graph.add_edge('D', 'F', 8)
graph.add_edge('E', 'F', 8)
graph.add_edge('F', 'E', 5)
graph.add_edge('E', 'G', 3)
graph.add_edge('F', 'G', 2)
return graph
def test_path_from_A_to_G(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='A', destination='G')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='A', destination='B', weight=2),
Edge(start='B', destination='C', weight=1),
Edge(start='C', destination='F', weight=3),
Edge(start='F', destination='G', weight=2),
))
def test_path_from_B_to_G(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='B', destination='G')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='B', destination='C', weight=1),
Edge(start='C', destination='F', weight=3),
Edge(start='F', destination='G', weight=2),
))
def test_path_from_D_to_G(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='D', destination='G')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='D', destination='C', weight=3),
Edge(start='C', destination='F', weight=3),
Edge(start='F', destination='G', weight=2),
))
def test_path_from_A_to_E(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='A', destination='E')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='A', destination='B', weight=2),
Edge(start='B', destination='E', weight=5),
))
def test_path_from_D_to_E(self):
graph = self._create_tested_graph()
search_request = ShortestPathSearchRequest(graph, start='D', destination='E')
assert find_shortest_path(search_request) == ShortestPathSearchResult((
Edge(start='D', destination='C', weight=3),
Edge(start='C', destination='F', weight=3),
Edge(start='F', destination='E', weight=5),
))
class TestShortestPathSearchForWeightedGraphSuiteTwo: # pylint: disable=R0201,C0116,C0103
"""Collection of test methods exercising the method :method:
graphlib.algorithms.find_shortest_path for weighted graphs.
"""
def _create_tested_graph(self):
graph = AdjacencySetGraph(GraphType.DIRECTED)
graph.add_edge('A', 'D', 3)
graph.add_edge('B', 'D', 2)
graph.add_edge('D', 'C', 2)
graph.add_edge('C', 'E', 2)
graph.add_edge('D', 'F', 8)
graph.add_edge('D', 'G', 6)
graph.add_edge('E', 'F', 1)
graph.add_edge('E', 'H', 4)
graph.add_edge('F', 'H', 2)
graph.add_edge('F', 'G', 3)
graph.add_edge('H', 'G', 4)
return graph
def test_path_from_A_to_G(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='A', destination='G')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='A', destination='D', weight=3),
Edge(start='D', destination='G', weight=6),
))
def test_path_from_A_to_H(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='A', destination='H')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='A', destination='D', weight=3),
Edge(start='D', destination='C', weight=2),
Edge(start='C', destination='E', weight=2),
Edge(start='E', destination='F', weight=1),
Edge(start='F', destination='H', weight=2),
))
def test_path_from_C_to_G(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='C', destination='G')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='C', destination='E', weight=2),
Edge(start='E', destination='F', weight=1),
Edge(start='F', destination='G', weight=3),
))
class TestShortestPathSearchForWeightedGraphSuiteThree: # pylint: disable=R0201,C0116,C0103
"""Collection of test methods exercising the method :method:
graphlib.algorithms.find_shortest_path for weighted graphs.
"""
def _create_tested_graph(self):
graph = AdjacencySetGraph(GraphType.DIRECTED)
graph.add_edge('A', 'C', 14)
graph.add_edge('A', 'D', 3)
graph.add_edge('B', 'D', 5)
graph.add_edge('B', 'E', 15)
graph.add_edge('C', 'F', 4)
graph.add_edge('G', 'C', 5)
graph.add_edge('D', 'G', 4)
graph.add_edge('D', 'H', 4)
graph.add_edge('H', 'E', 3)
graph.add_edge('E', 'I', 2)
graph.add_edge('F', 'J', 7)
graph.add_edge('G', 'J', 32)
graph.add_edge('G', 'K', 16)
graph.add_edge('K', 'H', 22)
graph.add_edge('H', 'L', 16)
graph.add_edge('I', 'L', 3)
graph.add_edge('J', 'M', 24)
graph.add_edge('J', 'N', 6)
graph.add_edge('K', 'N', 9)
graph.add_edge('K', 'O', 4)
graph.add_edge('L', 'O', 18)
graph.add_edge('L', 'P', 2)
graph.add_edge('Q', 'M', 5)
graph.add_edge('N', 'Q', 4)
graph.add_edge('R', 'N', 6)
graph.add_edge('O', 'R', 5)
graph.add_edge('S', 'O', 4)
graph.add_edge('P', 'S', 6)
graph.add_edge('Q', 'T', 7)
graph.add_edge('R', 'T', 28)
graph.add_edge('R', 'U', 3)
graph.add_edge('S', 'U', 17)
return graph
def test_path_from_A_to_J(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='A', destination='J')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='A', destination='D', weight=3),
Edge(start='D', destination='G', weight=4),
Edge(start='G', destination='C', weight=5),
Edge(start='C', destination='F', weight=4),
Edge(start='F', destination='J', weight=7),
))
def test_path_from_B_to_I(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='B', destination='I')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='B', destination='D', weight=5),
Edge(start='D', destination='H', weight=4),
Edge(start='H', destination='E', weight=3),
Edge(start='E', destination='I', weight=2),
))
def test_path_from_B_to_M(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='B', destination='M')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='B', destination='D', weight=5),
Edge(start='D', destination='G', weight=4),
Edge(start='G', destination='C', weight=5),
Edge(start='C', destination='F', weight=4),
Edge(start='F', destination='J', weight=7),
Edge(start='J', destination='N', weight=6),
Edge(start='N', destination='Q', weight=4),
Edge(start='Q', destination='M', weight=5),
))
def test_path_from_K_to_L(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='K', destination='L')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='K', destination='H', weight=22),
Edge(start='H', destination='E', weight=3),
Edge(start='E', destination='I', weight=2),
Edge(start='I', destination='L', weight=3),
))
def test_path_from_L_to_M(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='L', destination='M')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='L', destination='P', weight=2),
Edge(start='P', destination='S', weight=6),
Edge(start='S', destination='O', weight=4),
Edge(start='O', destination='R', weight=5),
Edge(start='R', destination='N', weight=6),
Edge(start='N', destination='Q', weight=4),
Edge(start='Q', destination='M', weight=5),
))
class TestShortestPathSearchForWeightedGraphSuiteFour: # pylint: disable=R0201,C0116,C0103
"""Collection of test methods exercising the method :method:
graphlib.algorithms.find_shortest_path for weighted graphs.
"""
def _create_tested_graph(self):
graph = AdjacencySetGraph(GraphType.DIRECTED)
graph.add_edge('A', 'B', 2)
graph.add_edge('C', 'B', 3)
graph.add_edge('A', 'D', 8)
graph.add_edge('B', 'E', 3)
graph.add_edge('C', 'F', 4)
graph.add_edge('E', 'D', 2)
graph.add_edge('F', 'E', 5)
graph.add_edge('D', 'G', 2)
graph.add_edge('E', 'H', 10)
graph.add_edge('I', 'F', 3)
graph.add_edge('G', 'H', 4)
graph.add_edge('H', 'I', 2)
graph.add_edge('J', 'G', 2)
graph.add_edge('H', 'K', 3)
graph.add_edge('I', 'L', 12)
graph.add_edge('K', 'J', 2)
graph.add_edge('K', 'L', 3)
return graph
def test_path_from_A_to_J(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='A', destination='J')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='A', destination='B', weight=2),
Edge(start='B', destination='E', weight=3),
Edge(start='E', destination='D', weight=2),
Edge(start='D', destination='G', weight=2),
Edge(start='G', destination='H', weight=4),
Edge(start='H', destination='K', weight=3),
Edge(start='K', destination='J', weight=2),
))
def test_path_from_F_to_I(self):
graph = self._create_tested_graph()
request = ShortestPathSearchRequest(graph, start='F', destination='I')
assert find_shortest_path(request) == ShortestPathSearchResult((
Edge(start='F', destination='E', weight=5),
Edge(start='E', destination='D', weight=2),
Edge(start='D', destination='G', weight=2),
Edge(start='G', destination='H', weight=4),
Edge(start='H', destination='I', weight=2),
))
class TestMinimumSpanningTreeSearchResult: # pylint: disable=R0201,C0116
"""Collection of test methods exercising the :class:
graphlib.algorithms.MinimumSpanningTree class.
"""
def _create_minimum_spanning_tree(self):
edges = (
Edge(start='A', destination='B', weight=2),
Edge(start='B', destination='C', weight=3),
Edge(start='C', destination='F', weight=5),
Edge(start='D', destination='D', weight=2),
)
return MinimumSpanningTreeSearchResult(MinimumSpanningTreeAlgorithm.PRIM, 'A', edges)
def test_overall_weight_is_calculated_properly(self):
minimum_spanning_tree = self._create_minimum_spanning_tree()
assert minimum_spanning_tree.overall_weight == 12
def test_len_function_provides_number_of_edges(self):
minimum_spanning_tree = self._create_minimum_spanning_tree()
assert len(minimum_spanning_tree) == 4
def test_in_operator_verifies_presence_of_edge(self):
minimum_spanning_tree = self._create_minimum_spanning_tree()
assert Edge(start='A', destination='B', weight=2) in minimum_spanning_tree
assert Edge(start='B', destination='C', weight=3) in minimum_spanning_tree
assert Edge(start='A', destination='B', weight=3) not in minimum_spanning_tree
assert Edge(start='A', destination='X', weight=2) not in minimum_spanning_tree
assert Edge(start='X', destination='B', weight=2) not in minimum_spanning_tree
class TestMinimumSpanningTreeSearch: # pylint: disable=R0201,C0116
"""Collection of test methods exercising the :method:
method graphlib.algorithms.find_minimum_spanning_tree.
"""
def test_prims_algorithm_01(self):
graph = AdjacencySetGraph(GraphType.UNDIRECTED)
graph.add_edge('A', 'B', 5)
graph.add_edge('A', 'C', 7)
graph.add_edge('B', 'C', 2)
graph.add_edge('B', 'D', 3)
graph.add_edge('B', 'E', 4)
graph.add_edge('C', 'D', 4)
graph.add_edge('C', 'E', 6)
graph.add_edge('D', 'E', 8)
graph.add_edge('D', 'F', 10)
graph.add_edge('E', 'F', 4)
search_request = MinimumSpanningTreeSearchRequest(graph, MinimumSpanningTreeAlgorithm.PRIM, 'A')
search_result = find_minimum_spanning_tree(search_request)
assert search_result.overall_weight == 18
assert len(search_result) == 5
assert Edge(start='A', destination='B', weight=5) in search_result
assert Edge(start='B', destination='C', weight=2) in search_result
assert Edge(start='B', destination='D', weight=3) in search_result
assert Edge(start='B', destination='E', weight=4) in search_result
assert Edge(start='E', destination='F', weight=4) in search_result
def test_prims_algorithm_02(self):
graph = AdjacencySetGraph(GraphType.UNDIRECTED)
graph.add_edge('A', 'B', 7)
graph.add_edge('A', 'C', 8)
graph.add_edge('A', 'D', 3)
graph.add_edge('B', 'C', 5)
graph.add_edge('B', 'E', 2)
graph.add_edge('C', 'D', 1)
graph.add_edge('C', 'F', 2)
graph.add_edge('D', 'G', 6)
graph.add_edge('E', 'F', 1)
graph.add_edge('E', 'H', 3)
graph.add_edge('F', 'G', 2)
graph.add_edge('F', 'H', 8)
graph.add_edge('G', 'H', 9)
search_request = MinimumSpanningTreeSearchRequest(graph, MinimumSpanningTreeAlgorithm.PRIM, 'A')
search_result = find_minimum_spanning_tree(search_request)
assert search_result.overall_weight == 14
assert len(search_result) == 7
assert Edge(start='A', destination='D', weight=3) in search_result
assert Edge(start='D', destination='C', weight=1) in search_result
assert Edge(start='C', destination='F', weight=2) in search_result
assert Edge(start='F', destination='E', weight=1) in search_result
assert Edge(start='E', destination='B', weight=2) in search_result
assert Edge(start='F', destination='G', weight=2) in search_result
assert Edge(start='E', destination='H', weight=3) in search_result
def test_prims_algorithm_03(self):
graph = AdjacencySetGraph(GraphType.UNDIRECTED)
graph.add_edge('A', 'B', 7)
graph.add_edge('B', 'C', 4)
graph.add_edge('A', 'D', 2)
graph.add_edge('B', 'E', 6)
graph.add_edge('C', 'F', 1)
graph.add_edge('D', 'E', 3)
graph.add_edge('E', 'F', 5)
graph.add_edge('D', 'G', 5)
graph.add_edge('E', 'H', 2)
graph.add_edge('F', 'I', 2)
graph.add_edge('G', 'H', 2)
graph.add_edge('H', 'I', 3)
graph.add_edge('G', 'J', 1)
graph.add_edge('H', 'K', 7)
graph.add_edge('I', 'L', 3)
graph.add_edge('J', 'K', 2)
graph.add_edge('K', 'L', 8)
search_request = MinimumSpanningTreeSearchRequest(graph, MinimumSpanningTreeAlgorithm.PRIM, 'A')
search_result = find_minimum_spanning_tree(search_request)
assert search_result.overall_weight == 25
assert len(search_result) == 11
assert Edge(start='A', destination='D', weight=2) in search_result
assert Edge(start='D', destination='E', weight=3) in search_result
assert Edge(start='E', destination='H', weight=2) in search_result
assert Edge(start='H', destination='G', weight=2) in search_result
assert Edge(start='G', destination='J', weight=1) in search_result
assert Edge(start='H', destination='I', weight=3) in search_result
assert Edge(start='I', destination='L', weight=3) in search_result
assert Edge(start='I', destination='F', weight=2) in search_result
assert Edge(start='F', destination='C', weight=1) in search_result
assert Edge(start='C', destination='B', weight=4) in search_result
assert Edge(start='J', destination='K', weight=2) in search_result
def test_kruskals_algorithm_connected_graph_01(self):
graph = AdjacencySetGraph(GraphType.UNDIRECTED)
graph.add_edge('A', 'B', 2)
graph.add_edge('A', 'C', 3)
graph.add_edge('A', 'D', 4)
graph.add_edge('B', 'C', 3)
graph.add_edge('B', 'E', 1)
graph.add_edge('C', 'D', 6)
graph.add_edge('C', 'E', 2)
graph.add_edge('C', 'F', 2)
graph.add_edge('C', 'G', 4)
graph.add_edge('D', 'G', 1)
graph.add_edge('E', 'F', 5)
graph.add_edge('F', 'G', 2)
search_request = MinimumSpanningTreeSearchRequest(graph, MinimumSpanningTreeAlgorithm.KRUSKAL)
search_result = find_minimum_spanning_tree(search_request)
assert len(search_result) == 6
assert Edge(start='A', destination='B', weight=2) in search_result
assert Edge(start='B', destination='E', weight=1) in search_result
assert Edge(start='C', destination='E', weight=2) in search_result
assert Edge(start='C', destination='F', weight=2) in search_result
assert Edge(start='F', destination='G', weight=2) in search_result
assert Edge(start='D', destination='G', weight=1) in search_result
def test_kruskals_algorithm_connected_graph_02(self):
graph = AdjacencySetGraph(GraphType.UNDIRECTED)
graph.add_edge('A', 'B', 6)
graph.add_edge('A', 'C', 5)
graph.add_edge('A', 'D', 4)
graph.add_edge('B', 'C', 3)
graph.add_edge('B', 'E', 2)
graph.add_edge('B', 'F', 3)
graph.add_edge('C', 'D', 1)
graph.add_edge('C', 'F', 2)
graph.add_edge('D', 'F', 3)
graph.add_edge('D', 'G', 5)
graph.add_edge('E', 'F', 1)
graph.add_edge('E', 'H', 3)
graph.add_edge('F', 'G', 4)
graph.add_edge('F', 'H', 4)
graph.add_edge('G', 'H', 3)
search_request = MinimumSpanningTreeSearchRequest(graph, MinimumSpanningTreeAlgorithm.KRUSKAL)
search_result = find_minimum_spanning_tree(search_request)
assert len(search_result) == 7
assert Edge(start='C', destination='D', weight=1) in search_result
assert Edge(start='E', destination='F', weight=1) in search_result
assert Edge(start='B', destination='E', weight=2) in search_result
assert Edge(start='C', destination='F', weight=2) in search_result
assert Edge(start='E', destination='H', weight=3) in search_result
assert Edge(start='G', destination='H', weight=3) in search_result
assert Edge(start='A', destination='D', weight=4) in search_result
def test_kruskals_algorithm_connected_graph_03(self):
graph = AdjacencySetGraph(GraphType.UNDIRECTED)
graph.add_edge('A', 'B', 3)
graph.add_edge('B', 'C', 3)
graph.add_edge('A', 'D', 4)
graph.add_edge('A', 'E', 2)
graph.add_edge('B', 'E', 2)
graph.add_edge('C', 'E', 4)
graph.add_edge('C', 'F', 8)
graph.add_edge('D', 'E', 5)
graph.add_edge('E', 'F', 7)
graph.add_edge('D', 'G', 6)
graph.add_edge('E', 'G', 8)
graph.add_edge('E', 'H', 8)
graph.add_edge('E', 'I', 7)
graph.add_edge('F', 'I', 3)
graph.add_edge('G', 'H', 5)
graph.add_edge('H', 'I', 2)
graph.add_edge('G', 'J', 4)
graph.add_edge('G', 'K', 3)
graph.add_edge('H', 'K', 4)
graph.add_edge('I', 'K', 5)
graph.add_edge('I', 'L', 6)
graph.add_edge('J', 'K', 5)
graph.add_edge('K', 'L', 8)
search_request = MinimumSpanningTreeSearchRequest(graph, MinimumSpanningTreeAlgorithm.KRUSKAL)
search_result = find_minimum_spanning_tree(search_request)
assert len(search_result) == 11
assert Edge(start='A', destination='E', weight=2) in search_result
assert Edge(start='B', destination='E', weight=2) in search_result
assert Edge(start='H', destination='I', weight=2) in search_result
assert Edge(start='B', destination='C', weight=3) in search_result
assert Edge(start='F', destination='I', weight=3) in search_result
assert Edge(start='G', destination='K', weight=3) in search_result
assert Edge(start='A', destination='D', weight=4) in search_result
assert Edge(start='G', destination='J', weight=4) in search_result
assert Edge(start='H', destination='K', weight=4) in search_result
assert Edge(start='D', destination='G', weight=6) in search_result
assert Edge(start='I', destination='L', weight=6) in search_result
def test_kruskals_algorithm_connected_graph_04(self):
graph = AdjacencySetGraph(GraphType.UNDIRECTED)
graph.add_edge('A', 'B', 4)
graph.add_edge('B', 'C', 1)
graph.add_edge('A', 'F', 4)
graph.add_edge('B', 'G', 6)
graph.add_edge('C', 'H', 5)
graph.add_edge('F', 'G', 2)
graph.add_edge('G', 'H', 5)
graph.add_edge('F', 'K', 5)
graph.add_edge('G', 'L', 1)
graph.add_edge('H', 'M', 3)
graph.add_edge('K', 'L', 4)
graph.add_edge('L', 'M', 4)
graph.add_edge('A', 'D', 3)
graph.add_edge('B', 'D', 3)
graph.add_edge('D', 'F', 5)
graph.add_edge('D', 'G', 5)
graph.add_edge('B', 'E', 5)
graph.add_edge('C', 'E', 6)
graph.add_edge('E', 'G', 3)
graph.add_edge('E', 'H', 2)
graph.add_edge('F', 'I', 4)
graph.add_edge('G', 'I', 3)
graph.add_edge('I', 'K', 5)
graph.add_edge('I', 'L', 4)
graph.add_edge('G', 'J', 4)
graph.add_edge('H', 'J', 1)
graph.add_edge('J', 'L', 4)
graph.add_edge('J', 'M', 2)
search_request = MinimumSpanningTreeSearchRequest(graph, MinimumSpanningTreeAlgorithm.KRUSKAL)
search_result = find_minimum_spanning_tree(search_request)
assert len(search_result) == 12
assert Edge(start='B', destination='C', weight=1) in search_result
assert Edge(start='G', destination='L', weight=1) in search_result
assert Edge(start='H', destination='J', weight=1) in search_result
assert Edge(start='E', destination='H', weight=2) in search_result
assert Edge(start='F', destination='G', weight=2) in search_result
assert Edge(start='J', destination='M', weight=2) in search_result
assert Edge(start='A', destination='D', weight=3) in search_result
assert Edge(start='B', destination='D', weight=3) in search_result
assert Edge(start='E', destination='G', weight=3) in search_result
assert Edge(start='G', destination='I', weight=3) in search_result
assert Edge(start='A', destination='F', weight=4) in search_result
assert Edge(start='K', destination='L', weight=4) in search_result
def test_kruskals_algorithm_disconnected_graph_01(self):
graph = AdjacencySetGraph(GraphType.UNDIRECTED)
# left part
graph.add_edge('A', 'B', 3)
graph.add_edge('A', 'D', 2)
graph.add_edge('B', 'E', 3)
graph.add_edge('D', 'E', 6)
graph.add_edge('D', 'G', 4)
graph.add_edge('E', 'H', 3)
graph.add_edge('G', 'H', 5)
graph.add_edge('A', 'C', 1)
graph.add_edge('B', 'C', 2)
graph.add_edge('C', 'D', 4)
graph.add_edge('C', 'E', 4)
graph.add_edge('D', 'F', 4)
graph.add_edge('E', 'F', 1)
graph.add_edge('F', 'G', 3)
graph.add_edge('F', 'H', 2)
# right part
graph.add_edge('I', 'J', 3)
graph.add_edge('I', 'L', 4)
graph.add_edge('J', 'M', 4)
graph.add_edge('L', 'M', 2)
graph.add_edge('L', 'O', 6)
graph.add_edge('M', 'P', 3)
graph.add_edge('O', 'P', 5)
graph.add_edge('I', 'K', 4)
graph.add_edge('J', 'K', 1)
graph.add_edge('K', 'L', 4)
graph.add_edge('K', 'M', 3)
graph.add_edge('L', 'N', 1)
graph.add_edge('M', 'N', 3)
graph.add_edge('N', 'O', 6)
graph.add_edge('N', 'P', 4)
search_request = MinimumSpanningTreeSearchRequest(graph, MinimumSpanningTreeAlgorithm.KRUSKAL)
search_result = find_minimum_spanning_tree(search_request)
assert len(search_result) == 14
# left part
assert Edge(start='A', destination='C', weight=1) in search_result
assert Edge(start='E', destination='F', weight=1) in search_result
assert Edge(start='A', destination='D', weight=2) in search_result
assert Edge(start='B', destination='C', weight=2) in search_result
assert Edge(start='F', destination='H', weight=2) in search_result
assert Edge(start='B', destination='E', weight=3) in search_result
assert Edge(start='F', destination='G', weight=3) in search_result
# right part
assert Edge(start='J', destination='K', weight=1) in search_result
assert Edge(start='L', destination='N', weight=1) in search_result
assert Edge(start='L', destination='M', weight=2) in search_result
assert Edge(start='I', destination='J', weight=3) in search_result
assert Edge(start='K', destination='M', weight=3) in search_result
assert Edge(start='M', destination='P', weight=3) in search_result
assert Edge(start='O', destination='P', weight=5) in search_result
def test_kruskals_algorithm_disconnected_graph_02(self):
graph = AdjacencySetGraph(GraphType.UNDIRECTED)
# left part
graph.add_edge('A', 'C', 2)
graph.add_edge('C', 'E', 4)
graph.add_edge('E', 'G', 3)
graph.add_edge('G', 'H', 2)
graph.add_edge('H', 'F', 3)
graph.add_edge('F', 'D', 2)
graph.add_edge('D', 'B', 3)
graph.add_edge('B', 'A', 4)
graph.add_edge('B', 'C', 3)
graph.add_edge('C', 'G', 2)
graph.add_edge('G', 'F', 3)
graph.add_edge('F', 'B', 1)
graph.add_edge('B', 'G', 4)
graph.add_edge('C', 'F', 1)
# right part
graph.add_edge('I', 'K', 3)
graph.add_edge('K', 'M', 6)
graph.add_edge('M', 'O', 5)
graph.add_edge('O', 'P', 4)
graph.add_edge('P', 'N', 5)
graph.add_edge('N', 'L', 3)
graph.add_edge('L', 'J', 4)
graph.add_edge('J', 'I', 1)
graph.add_edge('J', 'K', 4)
graph.add_edge('K', 'O', 6)
graph.add_edge('O', 'N', 4)
graph.add_edge('N', 'J', 5)
graph.add_edge('J', 'O', 1)
graph.add_edge('K', 'N', 2)
search_request = MinimumSpanningTreeSearchRequest(graph, MinimumSpanningTreeAlgorithm.KRUSKAL)
search_result = find_minimum_spanning_tree(search_request)
assert len(search_result) == 14
# left part
assert Edge(start='B', destination='F', weight=1) in search_result
assert Edge(start='C', destination='F', weight=1) in search_result
assert Edge(start='A', destination='C', weight=2) in search_result
assert Edge(start='C', destination='G', weight=2) in search_result
assert Edge(start='D', destination='F', weight=2) in search_result
assert Edge(start='G', destination='H', weight=2) in search_result
assert Edge(start='E', destination='G', weight=3) in search_result
# right part
assert Edge(start='I', destination='J', weight=1) in search_result
assert Edge(start='J', destination='O', weight=1) in search_result
assert Edge(start='K', destination='N', weight=2) in search_result
assert Edge(start='I', destination='K', weight=3) in search_result
assert Edge(start='L', destination='N', weight=3) in search_result
assert Edge(start='O', destination='P', weight=4) in search_result
assert Edge(start='M', destination='O', weight=5) in search_result
def test_request_for_prims_algorithm_without_start_vertex_leads_to_error(self):
graph = AdjacencySetGraph(GraphType.UNDIRECTED)
graph.add_edge('A', 'B', 3)
graph.add_edge('B', 'C', 7)
graph.add_edge('A', 'C', 2)
search_request = MinimumSpanningTreeSearchRequest(graph, MinimumSpanningTreeAlgorithm.PRIM)
with raises(ValueError, match="Prim's algorithm is requested, but starting vertex is undefined."):
find_minimum_spanning_tree(search_request)
def test_request_for_kruskals_algorithm_with_start_vertex_leads_to_error(self):
graph = AdjacencySetGraph(GraphType.UNDIRECTED)
graph.add_edge('A', 'B', 3)
graph.add_edge('B', 'C', 7)
graph.add_edge('A', 'C', 2)
search_request = MinimumSpanningTreeSearchRequest(graph, MinimumSpanningTreeAlgorithm.KRUSKAL, 'A')
with raises(ValueError, match="Kruskal's algorithm is requested, but starting vertex is specified."):
find_minimum_spanning_tree(search_request)
|
StarcoderdataPython
|
1798509
|
# Generated by Django 3.1.3 on 2020-11-19 06:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracker', '0002_auto_20201118_0923'),
]
operations = [
migrations.RemoveField(
model_name='timer',
name='is_paused',
),
migrations.AddField(
model_name='timer',
name='is_running',
field=models.BooleanField(default=True),
),
]
|
StarcoderdataPython
|
3319504
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Density compensation for non-uniform acquried data."""
import numpy as np
def cmp(k):
"""Golden angle density compensation.
Simple linear ramp based density compensation function
Parameters
----------
k : numpy.array
Trajectory which should be density compensated.
Returns
-------
numpy.array :
The density compensation array.
"""
if len(np.shape(k)) == 2:
nspokes, N = np.shape(k)
elif len(np.shape(k)) == 3:
_, nspokes, N = np.shape(k)
else:
return -5
w = np.abs(np.linspace(-N / 2, N / 2, N)) # -N/2 N/2
w = w * (np.pi / 4) / nspokes # no scaling seems to work better??
w = np.repeat(w, nspokes, 0)
w = np.reshape(w, (N, nspokes)).T
return np.array(w)
|
StarcoderdataPython
|
1781243
|
<reponame>finswimmer/clikit
from contextlib import contextmanager
from clikit.api.io import IO
from clikit.ui import Component
from clikit.ui.alignment import LabelAlignment
from clikit.ui.components import LabeledParagraph
class BlockLayout:
"""
Renders renderable objects in indented blocks.
"""
def __init__(self): # type: () -> None
self._current_indentation = 0
self._elements = []
self._indentations = []
self._alignment = LabelAlignment()
def add(self, element): # type: (Component) -> BlockLayout
self._elements.append(element)
self._indentations.append(self._current_indentation)
if isinstance(element, LabeledParagraph):
self._alignment.add(element, self._current_indentation)
element.set_alignment(self._alignment)
return self
@contextmanager
def block(self): # type: () -> BlockLayout
self._current_indentation += 2
yield self
self._current_indentation -= 2
def render(self, io, indentation=0): # type: (IO, int) -> None
self._alignment.align(io, indentation)
for i, element in enumerate(self._elements):
element.render(io, self._indentations[i] + indentation)
self._elements = []
|
StarcoderdataPython
|
33954
|
import fastai
from neptune.new.integrations.fastai import NeptuneCallback
from fastai.vision.all import *
import neptune.new as neptune
run = neptune.init(
project="common/fastai-integration", api_token="<PASSWORD>", tags="basic"
)
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_csv(path)
# Log all training phases of the learner
learn = cnn_learner(dls, resnet18, cbs=[NeptuneCallback(run=run, base_namespace="experiment")])
learn.fit_one_cycle(2)
learn.fit_one_cycle(1)
run.stop()
|
StarcoderdataPython
|
3265537
|
# https://leetcode.com/problems/two-sum/
# class Solution:
# def twoSum(self, nums: List[int], target: int) -> List[int]:
# #for loop for the range of the nums []
# for i in range(len(nums)):
# for j in range(len(nums)):
# #check to see if each num is equal to target
# if (nums[i] + nums[j]) == target and i != j:
# #return the two index's where the two index's val equal that target
# return i, j
# return None
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
# make a dictionary from element in nums to index.
elem_to_index = {}
for i in range(len(nums)):
# find the complement at each element in nums
complement = target - nums[i]
# if complement has been seen before, return index from dict, and i
if complement in elem_to_index:
return elem_to_index[complement], i
# add nums[i] to the dictionary to mark it as seen
elem_to_index[nums[i]] = i
return None
|
StarcoderdataPython
|
184481
|
#%%
import asyncio
from datetime import datetime
import aiofiles
import aiohttp
import pandas as pd
OPEN_DATA_BUCKET_URL = "https://open-neurodata.s3.amazonaws.com"
#%%
def return_url_dataset(coll, exp, ch):
return f"{OPEN_DATA_BUCKET_URL}/{coll}/{exp}/{ch}/info"
#%%
# read the data
df = pd.read_csv("scripts/public_datasets_downsample.csv", na_filter=False)
# removing empty/dev channels in boss
df = df[(df["ch"] != "empty") & (df["ch"] != "dev")]
# %%
df["url"] = df.apply(
lambda x: return_url_dataset(x["coll"], x["exp"], x["ch"]), axis=1
)
# %%
outfname = "scripts/datasets_status_async.csv"
header = df.to_csv(header=None)
with open(outfname, mode='w') as f:
f.write(",".join(df.columns.to_list()) + ",status_code" + "\n")
async with aiohttp.ClientSession() as session:
async with aiofiles.open(outfname, mode='a') as f:
for _, dataset in df.iterrows():
async with session.get(dataset["url"]) as resp:
await resp.text()
data = ",".join(map(str, dataset.to_list())) + "," + str(resp.status) + "\n"
await f.write(data)
#%%
df_to_do = pd.read_csv(outfname, na_filter=False)
# we transferred these into a different prefix
df_to_do.loc[(df_to_do["coll"] == "bock") | (df_to_do["exp"] == "kasthuri14s1colANNO"), "status_code"] = 200
df_to_do = df_to_do[df_to_do["status_code"] != 200]
# %%
df_to_do.to_csv("scripts/public_data_sets_to_tx.csv", index=False)
print("done")
|
StarcoderdataPython
|
1713967
|
import json
import logging
import time
import sys
import ipaddress
if sys.version_info[0] != 3:
raise Exception('Can only run under python3')
LOGFILE = __file__ + '.log'
formatter = logging.Formatter('%(asctime)s: %(levelname)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
def waitfor(seconds=2, reason=None):
if reason is not None:
logger.info('Waiting for {} seconds. Reason: {}'.format(seconds, reason))
else:
logger.info('Waiting for {} seconds'.format(seconds))
time.sleep(seconds)
# def to_json(data):
# return codecs.encode(json.dumps(data))
def to_json(data, indent=2):
return json.dumps(data, indent=indent)
def from_json(data):
return json.loads(data)
def get_gateway_ip(cidr):
return [str(ip) for ip in ipaddress.IPv4Network(cidr)][1]
def cidr_to_netmask(cidr):
return (str(ipaddress.IPv4Network(cidr).network_address), str(ipaddress.IPv4Network(cidr).netmask))
|
StarcoderdataPython
|
3231485
|
<reponame>Signal-Kinetics/alexa-apis-for-python
# -*- coding: utf-8 -*-
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
import six
import unittest
from dateutil import tz
import datetime
from pytest import raises
from ask_sdk_model_runtime import (
ApiClient, ApiClientResponse, ApiConfiguration,
AuthenticationConfiguration, Serializer, ServiceException)
from ask_sdk_model_runtime.lwa import (
AccessToken, AccessTokenResponse, LwaClient)
if six.PY3:
from unittest import mock
else:
import mock
class MockedApiClient(ApiClient):
def __init__(self):
self.request = None
def invoke(self, request):
self.request = request
return self.empty_response()
def empty_response(self):
fake_response = ApiClientResponse()
fake_response.status_code = 200
return fake_response
class TestBaseServiceClient(unittest.TestCase):
def test_lwa_client_init_no_auth_config_throw_exception(self):
with raises(ValueError) as exc:
lwa_client = LwaClient(
api_configuration=ApiConfiguration(),
authentication_configuration=None)
self.assertEqual(str(exc.value), (
"authentication_configuration must be provided"), (
"LwaClient Initialization didn't throw exception if a null "
"Authentication Configuration is passed"))
def test_get_access_token_for_null_scope_throw_exception(self):
test_lwa_client = LwaClient(
api_configuration=ApiConfiguration(),
authentication_configuration=AuthenticationConfiguration())
with raises(ValueError) as exc:
test_lwa_client.get_access_token_for_scope(scope=None)
self.assertEqual(str(exc.value), "scope must be provided", (
"LWA Client get access token call didn't throw exception if a "
"null scope is passed"))
def test_get_access_token_retrieve_from_cache(self):
test_lwa_client = LwaClient(
api_configuration=ApiConfiguration(),
authentication_configuration=AuthenticationConfiguration())
test_scope = "test"
expected_token_value = "<PASSWORD>"
test_token_expiry = (
datetime.datetime.now(tz.tzutc()) +
datetime.timedelta(hours=1))
test_access_token = AccessToken(
token=expected_token_value, expiry=test_token_expiry)
test_lwa_client._scoped_token_cache[test_scope] = test_access_token
actual_token_value = test_lwa_client.get_access_token_for_scope(
scope=test_scope)
self.assertEqual(expected_token_value, actual_token_value, (
"LWA Client get access token call didn't retrieve unexpired "
"scoped access token from cache when available"))
def test_get_access_token_cache_miss_api_call_success(self):
mocked_api_client = MockedApiClient()
mocked_serializer = mock.MagicMock(spec=Serializer)
mocked_serializer.serialize.return_value = "access token request"
local_now = datetime.datetime.now(tz.tzutc())
test_scope = "test"
test_client_id = "test_client_id"
test_client_secret = "test_client_secret"
expected_token_value = "<PASSWORD>"
expected_headers = [(
'Content-type', 'application/x-www-form-urlencoded')]
expected_request_method = "POST"
expected_request_url = "https://api.amazon.com/auth/O2/token"
expected_request_body = (
"grant_type=client_credentials&client_id={}&client_secret={}"
"&scope={}").format(test_client_id, test_client_secret, test_scope)
mocked_serializer.deserialize.return_value = AccessTokenResponse(
access_token=expected_token_value, expires_in=10, scope=test_scope)
test_lwa_client = LwaClient(
api_configuration=ApiConfiguration(
serializer=mocked_serializer,
api_client=mocked_api_client),
authentication_configuration=AuthenticationConfiguration(
client_id=test_client_id,
client_secret=test_client_secret))
with mock.patch(
"ask_sdk_model_runtime.lwa.lwa_client.datetime") as mock_date:
mock_date.now.return_value = local_now
actual_token_value = test_lwa_client.get_access_token_for_scope(
scope=test_scope)
self.assertEqual(expected_token_value, actual_token_value, (
"LWA Client get access token call didn't retrieve scoped access token"))
actual_token_expiry = test_lwa_client._scoped_token_cache[
test_scope].expiry
self.assertEqual((local_now + datetime.timedelta(
seconds=10)), actual_token_expiry, (
"LWA Client get access token call cached wrong access token "
"expiry date"))
self.assertEqual(mocked_api_client.request.headers, expected_headers, (
"LWA Client get access token called API with wrong headers"))
self.assertEqual(mocked_api_client.request.method, expected_request_method, (
"LWA Client get access token called API with wrong HTTP method"))
self.assertEqual(mocked_api_client.request.url, expected_request_url, (
"LWA Client get access token called API with wrong HTTP URL"))
mocked_serializer.serialize.assert_called_with(expected_request_body)
def test_get_access_token_for_smapi_cache_miss_api_call_success(
self):
mocked_api_client = MockedApiClient()
mocked_serializer = mock.MagicMock(spec=Serializer)
mocked_serializer.serialize.return_value = "access token request"
local_now = datetime.datetime.now(tz.tzutc())
refresh_access_token = "<PASSWORD>_access_token"
test_refresh_token = "<PASSWORD>_refresh_token"
test_grant_type = "refresh_token"
test_client_id = "test_client_id"
test_client_secret = "test_client_secret"
expected_token_value = "test_token"
expected_headers = [(
'Content-type', 'application/x-www-form-urlencoded')]
expected_request_method = "POST"
expected_request_url = "https://api.amazon.com/auth/O2/token"
expected_request_body = (
"grant_type={}&client_id={}&client_secret={}"
"&refresh_token={}").format(test_grant_type, test_client_id,
test_client_secret, test_refresh_token)
mocked_serializer.deserialize.return_value = AccessTokenResponse(
access_token=expected_token_value, expires_in=10, scope=None)
test_lwa_client = LwaClient(
api_configuration=ApiConfiguration(
serializer=mocked_serializer,
api_client=mocked_api_client),
authentication_configuration=AuthenticationConfiguration(
client_id=test_client_id,
client_secret=test_client_secret,
refresh_token=test_refresh_token),
grant_type=test_grant_type
)
with mock.patch(
"ask_sdk_model_runtime.lwa.lwa_client.datetime") as mock_date:
mock_date.now.return_value = local_now
actual_token_value = test_lwa_client.get_access_token_from_refresh_token()
self.assertEqual(expected_token_value, actual_token_value, (
"LWA Client get access token call didn't retrieve unexpired "
"scoped access token from cache when available"))
actual_token_expiry = test_lwa_client._scoped_token_cache[
refresh_access_token].expiry
self.assertEqual((local_now + datetime.timedelta(
seconds=10)), actual_token_expiry, (
"LWA Client get access token call cached wrong access token "
"expiry date"))
self.assertEqual(mocked_api_client.request.headers, expected_headers, (
"LWA Client get access token called API with wrong headers"))
self.assertEqual(mocked_api_client.request.method, expected_request_method, (
"LWA Client get access token called API with wrong HTTP method"))
self.assertEqual(mocked_api_client.request.url, expected_request_url, (
"LWA Client get access token called API with wrong HTTP URL"))
mocked_serializer.serialize.assert_called_with(expected_request_body)
def test_get_access_token_for_default_endpoint_api_success(
self):
mocked_api_client = MockedApiClient()
mocked_serializer = mock.MagicMock(spec=Serializer)
mocked_serializer.serialize.return_value = "access token request"
local_now = datetime.datetime.now(tz.tzutc())
test_scope = "test"
test_client_id = "test_client_id"
test_client_secret = "test_client_secret"
test_endpoint = "https://foo.com"
expected_token_value = "test_token"
expected_request_url = "{}/auth/O2/token".format(test_endpoint)
mocked_serializer.deserialize.return_value = AccessTokenResponse(
access_token=expected_token_value, expires_in=10, scope=test_scope)
test_lwa_client = LwaClient(
api_configuration=ApiConfiguration(
serializer=mocked_serializer,
api_client=mocked_api_client,
api_endpoint=test_endpoint),
authentication_configuration=AuthenticationConfiguration(
client_id=test_client_id,
client_secret=test_client_secret))
with mock.patch(
"ask_sdk_model_runtime.lwa.lwa_client.datetime") as mock_date:
mock_date.now.return_value = local_now
actual_token_value = test_lwa_client.get_access_token_for_scope(
scope=test_scope)
self.assertEqual(expected_token_value, actual_token_value, (
"LWA Client get access token call didn't retrieve scoped access "
"token when a custom endpoint is passed"))
self.assertEqual(mocked_api_client.request.url, expected_request_url, (
"LWA Client get access token called API with wrong HTTP URL, "
"when a custom endpoint is passed"))
def test_get_access_token_api_call_fails_throws_exception(
self):
mocked_serializer = mock.MagicMock(spec=Serializer)
mocked_serializer.serialize.return_value = "access token request"
test_scope = "test"
fake_response = ApiClientResponse()
fake_response.status_code = 400
fake_response.body = "test_body"
fake_api_client = mock.MagicMock(spec=ApiClient)
fake_api_client.invoke.return_value = fake_response
mocked_serializer.deserialize.return_value = "test error body"
test_lwa_client = LwaClient(
api_configuration=ApiConfiguration(
serializer=mocked_serializer,
api_client=fake_api_client),
authentication_configuration=AuthenticationConfiguration())
with raises(ServiceException) as exc:
_actual_token_value = test_lwa_client.get_access_token_for_scope(
scope=test_scope)
self.assertIn("Bad Request", str(exc.value), (
"LWA Client get access token threw unknown exception when "
"the LWA API call failed with an known exception"))
def test_get_access_token_for_null_lwa_response_throw_exception(
self):
mocked_api_client = MockedApiClient()
mocked_serializer = mock.MagicMock(spec=Serializer)
test_scope = "test"
test_lwa_client = LwaClient(
api_configuration=ApiConfiguration(
serializer=mocked_serializer,
api_client=mocked_api_client),
authentication_configuration=AuthenticationConfiguration())
with mock.patch.object(
test_lwa_client, "_generate_access_token", return_value=None):
with raises(ValueError) as exc:
test_lwa_client.get_access_token_for_scope(scope=test_scope)
self.assertEqual(str(exc.value), "Invalid response from LWA Client " \
"generate access token call", (
"LWA Client get access token call didn't throw exception if a "
"generate access token returns None "))
|
StarcoderdataPython
|
36454
|
import tensorflow as tf
from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D
from tensorflow.keras.models import Model
import numpy as np
class SelfAttention(Model):
def __init__(self, d_model, spatial_dims, positional_encoding=True, name="self_attention"):
'''
d_model : number of output channels
spatial_dim : spatial dimensions of input tensor (x , y)
if positional_encoding: depth must correspond to input channel number
adapted from: https://www.tensorflow.org/tutorials/text/transformer
'''
super().__init__(name=name)
self.d_model = d_model
self.spatial_dims=spatial_dims
self.spatial_dim = np.prod(spatial_dims)
self.wq = Dense(self.d_model, name=name+"_q")
self.wk = Dense(self.d_model, name=name+"_k")
self.wv = Dense(self.d_model, name=name+"_w")
self.positional_encoding=positional_encoding
if positional_encoding:
self.pos_embedding = Embedding(self.spatial_dim, d_model, name=name+"pos_enc") # TODO test other positional encoding. in particular that encodes X and Y
def call(self, x):
'''
x : tensor with shape (batch_size, y, x, channels)
'''
shape = tf.shape(x)
batch_size = shape[0]
#spatial_dims = shape[1:-1]
#spatial_dim = tf.reduce_prod(spatial_dims)
depth_dim = shape[3]
if self.positional_encoding:
x_index = tf.range(self.spatial_dim, dtype=tf.int32)
pos_emb = self.pos_embedding(x_index) # (spa_dim, d_model)
pos_emb = tf.reshape(pos_emb, (self.spatial_dims[0], self.spatial_dims[1], self.d_model)) #for broadcasting purpose
x = x + pos_emb # broadcast
q = self.wq(x) # (batch_size, *spa_dims, d_model)
k = self.wk(x) # (batch_size, *spa_dims, d_model)
v = self.wv(x) # (batch_size, *spa_dims, d_model)
q = tf.reshape(q, (batch_size, -1, depth_dim)) # (batch_size, spa_dim, d_model)
k = tf.reshape(k, (batch_size, -1, depth_dim))
v = tf.reshape(v, (batch_size, -1, depth_dim))
# scaled_attention.shape == (batch_size, spa_dims, depth)
# attention_weights.shape == (batch_size, spa_dims, spa_dims)
scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v)
output = tf.reshape(scaled_attention, (batch_size, self.spatial_dims[0], self.spatial_dims[1], self.d_model))
tf.identity(attention_weights, name=self.name+"_attention_weights")
return output, attention_weights
def compute_output_shape(self, input_shape):
return input_shape[:-1]+(self.d_model,), (input_shape[0],self.spatial_dim,self.spatial_dim)
def scaled_dot_product_attention(q, k, v):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
Returns:
output, attention_weights
from : https://www.tensorflow.org/tutorials/text/transformer
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
|
StarcoderdataPython
|
1723940
|
<reponame>cyandterry/Python-Study
"""
There are N gas stations along a circular route, where the amount of gas at station i is gas[i].
You have a car with an unlimited gas tank and it costs cost[i] of gas to travel from station i to its next station (i+1). You begin the journey with an empty tank at one of the gas stations.
Return the starting gas station's index if you can travel around the circuit once, otherwise return -1.
Note:
The solution is guaranteed to be unique.
"""
class Solution:
# @param gas, a list of integers
# @param cost, a list of integers
# @return an integer
def canCompleteCircuit(self, gas, cost):
N = len(gas)
start_node = 0
total_gas = 0
cur_gas = 0
for i in range(N):
total_gas += gas[i] - cost[i]
cur_gas += gas[i] - cost[i]
if cur_gas < 0:
start_node = i + 1
cur_gas = 0
if total_gas < 0:
return -1
else:
return start_node % N
# Note:
# 1. Notice line 18 for start node and line 30 for return
|
StarcoderdataPython
|
3399762
|
# -*- coding: utf-8 -*-
import sys
import pytest
import requests_mock
from chaoslib.exceptions import InvalidActivity
from chaoslib.activity import ensure_activity_is_valid
from chaoslib.types import Action
from fixtures import actions
def test_empty_action_is_invalid():
with pytest.raises(InvalidActivity) as exc:
ensure_activity_is_valid(actions.EmptyAction)
assert "empty activity is no activity" in str(exc.value)
|
StarcoderdataPython
|
1790017
|
<filename>poetry_model.py
# *-* coding:utf-8 *-*
'''
@author: ioiogoo
@date: 2018/1/31 19:33
'''
import random
import os
import keras
import numpy as np
from keras.callbacks import LambdaCallback
from keras.models import Input, Model, load_model
from keras.layers import LSTM, Dropout, Dense, Flatten, Bidirectional, Embedding, GRU
from keras.optimizers import Adam
from data_utils import *
class PoetryModel(object):
def __init__(self, config):
self.model = None
self.do_train = True
self.loaded_model = False
self.config = config
# 文件预处理
self.word2numF, self.num2word, self.words, self.files_content = preprocess_file(self.config)
# 如果模型文件存在则直接加载模型,否则开始训练
if os.path.exists(self.config.weight_file):
self.model = load_model(self.config.weight_file)
self.model.summary()
else:
self.train()
self.do_train = False
self.loaded_model = True
def build_model(self):
'''建立模型'''
# 输入的dimension
input_tensor = Input(shape=(self.config.max_len,))
embedd = Embedding(len(self.num2word) + 2, 300, input_length=self.config.max_len)(input_tensor)
lstm = Bidirectional(GRU(128, return_sequences=True))(embedd)
# dropout = Dropout(0.6)(lstm)
# lstm = LSTM(256)(dropout)
# dropout = Dropout(0.6)(lstm)
flatten = Flatten()(lstm)
dense = Dense(len(self.words), activation='softmax')(flatten)
self.model = Model(inputs=input_tensor, outputs=dense)
optimizer = Adam(lr=self.config.learning_rate)
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
def sample(self, preds, temperature=1.0):
'''
当temperature=1.0时,模型输出正常
当temperature=0.5时,模型输出比较open
当temperature=1.5时,模型输出比较保守
在训练的过程中可以看到temperature不同,结果也不同
'''
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def generate_sample_result(self, epoch, logs):
'''训练过程中,每个epoch打印出当前的学习情况'''
# if epoch % 5 != 0:
# return
print("\n==================Epoch {}=====================".format(epoch))
for diversity in [0.5, 1.0, 1.5]:
print("------------Diversity {}--------------".format(diversity))
start_index = random.randint(0, len(self.files_content) - self.config.max_len - 1)
generated = ''
sentence = self.files_content[start_index: start_index + self.config.max_len]
generated += sentence
for i in range(20):
x_pred = np.zeros((1, self.config.max_len))
for t, char in enumerate(sentence[-6:]):
x_pred[0, t] = self.word2numF(char)
preds = self.model.predict(x_pred, verbose=0)[0]
next_index = self.sample(preds, diversity)
next_char = self.num2word[next_index]
generated += next_char
sentence = sentence + next_char
print(sentence)
def predict(self, text):
'''根据给出的文字,生成诗句'''
if not self.loaded_model:
return
with open(self.config.poetry_file, 'r', encoding='utf-8') as f:
file_list = f.readlines()
random_line = random.choice(file_list)
# 如果给的text不到四个字,则随机补全
if not text or len(text) != 4:
for _ in range(4 - len(text)):
random_str_index = random.randrange(0, len(self.words))
text += self.num2word.get(random_str_index) if self.num2word.get(random_str_index) not in [',', '。',
','] else self.num2word.get(
random_str_index + 1)
seed = random_line[-(self.config.max_len):-1]
res = ''
seed = 'c' + seed
for c in text:
seed = seed[1:] + c
for j in range(5):
x_pred = np.zeros((1, self.config.max_len))
for t, char in enumerate(seed):
x_pred[0, t] = self.word2numF(char)
preds = self.model.predict(x_pred, verbose=0)[0]
next_index = self.sample(preds, 1.0)
next_char = self.num2word[next_index]
seed = seed[1:] + next_char
res += seed
return res
def data_generator(self):
'''生成器生成数据'''
i = 0
while 1:
x = self.files_content[i: i + self.config.max_len]
y = self.files_content[i + self.config.max_len]
puncs = [']', '[', '(', ')', '{', '}', ':', '《', '》', ':']
if len([i for i in puncs if i in x]) != 0:
i += 1
continue
if len([i for i in puncs if i in y]) != 0:
i += 1
continue
y_vec = np.zeros(
shape=(1, len(self.words)),
dtype=np.bool
)
y_vec[0, self.word2numF(y)] = 1.0
x_vec = np.zeros(
shape=(1, self.config.max_len),
dtype=np.int32
)
for t, char in enumerate(x):
x_vec[0, t] = self.word2numF(char)
yield x_vec, y_vec
i += 1
def train(self):
'''训练模型'''
number_of_epoch = len(self.files_content) // self.config.batch_size
if not self.model:
self.build_model()
self.model.summary()
self.model.fit_generator(
generator=self.data_generator(),
verbose=True,
steps_per_epoch=self.config.batch_size,
epochs=number_of_epoch,
callbacks=[
keras.callbacks.ModelCheckpoint(self.config.weight_file, save_weights_only=False),
LambdaCallback(on_epoch_end=self.generate_sample_result)
]
)
if __name__ == '__main__':
from config import Config
model = PoetryModel(Config)
while 1:
text = input("text:")
sentence = model.predict(text)
print(sentence)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.