code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from typing import List
from bs4 import BeautifulSoup
from base_parser import BaseParser
class SquarefactionRu_Parser(BaseParser):
def _parse(self) -> List[str]:
url = f'http://squarefaction.ru/main/search/games?q={self.game_name}'
rs = self.send_get(url)
root = BeautifulSoup(rs.content, 'html.parser')
# http://squarefaction.ru/main/search/games?q=dead+space
if '/main/search/games' in rs.url:
self.log_info(f'Parsing of game list')
for game_block in root.select('#games > .entry'):
title = self.get_norm_text(game_block.select_one('.name'))
if not self.is_found_game(title):
continue
# <div class="infos">TPS,Survival Horror,Action</div>
genres = self.get_norm_text(game_block.select_one('.infos')).split(',')
# Сойдет первый, совпадающий по имени, вариант
return genres
# http://squarefaction.ru/game/dead-space
else:
self.log_info(f'Parsing of game page')
game_block = root.select_one('#page-info')
if game_block:
title = self.get_norm_text(game_block.select_one('#title'))
if not self.is_found_game(title):
self.log_warn(f'Not match game title {title!r}')
# <td class="nowraps-links">
# <a href="/games?genre=tps">TPS</a>,
# <a href="/games?genre=survival-horror">Survival Horror</a>,
# <a href="/games?genre=action">Action</a>
# </td>
genres = [
self.get_norm_text(a) for a in game_block.select('a') if '?genre=' in a['href']
]
# Сойдет первый, совпадающий по имени, вариант
return genres
self.log_info(f'Not found game {self.game_name!r}')
return []
def get_game_genres(game_name: str, *args, **kwargs) -> List[str]:
return SquarefactionRu_Parser(*args, **kwargs).get_game_genres(game_name)
if __name__ == '__main__':
from common import _common_test
_common_test(get_game_genres)
# Search 'Hellgate: London'...
# Genres: ['Action RPG']
#
# Search 'The Incredible Adventures of Van Helsing'...
# Genres: ['Action RPG']
#
# Search 'Dark Souls: Prepare to Die Edition'...
# Genres: []
#
# Search 'Twin Sector'...
# Genres: []
#
# Search 'Call of Cthulhu: Dark Corners of the Earth'...
# Genres: ['Survival Horror']
|
[
"bs4.BeautifulSoup",
"common._common_test"
] |
[((2240, 2269), 'common._common_test', '_common_test', (['get_game_genres'], {}), '(get_game_genres)\n', (2252, 2269), False, 'from common import _common_test\n'), ((368, 408), 'bs4.BeautifulSoup', 'BeautifulSoup', (['rs.content', '"""html.parser"""'], {}), "(rs.content, 'html.parser')\n", (381, 408), False, 'from bs4 import BeautifulSoup\n')]
|
#!/usr/bin/env python
"""
Unit tests for Regional Intersection Graph -- NetworkX
- test_nxgraph_create
- test_nxgraph_sweepctor
- test_nxgraph_mdsweepctor
- test_nxgraph_sweepctor_graph
- test_nxgraph_sweepctor_random
"""
from io import StringIO
from typing import List, Tuple
from unittest import TestCase
from pprint import pprint
from networkx import networkx as nx
from slig.datastructs.rigraph import RIGraph
from slig.datastructs.region import Region
class TestRIGraph(TestCase):
test_regions: List[Region]
def setUp(self):
self.test_regions = []
self.test_regions.append(Region([0, 0], [5, 5]))
self.test_regions.append(Region([2, 2], [5, 10]))
self.test_regions.append(Region([1, 5], [3, 7]))
self.test_regions.append(Region([-5, 5], [1, 7]))
self.test_regions.append(Region([-5, 5], [2, 7]))
def test_nxgraph_create(self):
graph = RIGraph(dimension=1)
self.assertTrue(graph.G is not None)
self.assertTrue(isinstance(graph.G, nx.Graph))
def test_nxgraph_contains(self):
dimension = self.test_regions[0].dimension
graph = RIGraph(dimension=dimension)
for region in self.test_regions[0:3]:
graph.put_region(region)
self.assertTrue(self.test_regions[0].id in graph)
def test_nxgraph_put_region(self):
dimension = self.test_regions[0].dimension
graph = RIGraph(dimension=dimension)
for region in self.test_regions:
graph.put_region(region)
self.assertEqual(self.test_regions, list(graph.regions))
def test_nxgraph_put_intersect(self):
dimension = self.test_regions[0].dimension
graph = RIGraph(dimension=dimension)
graph.put_region(self.test_regions[0])
graph.put_region(self.test_regions[1])
graph.put_intersection(self.test_regions[0], self.test_regions[1])
intersection = self.test_regions[0].get_intersection(self.test_regions[1])
self.assertEqual(intersection, list(graph.intersections)[0])
def test_nxgraph_to_dict(self):
dimension = self.test_regions[0].dimension
graph = RIGraph(dimension=dimension)
graph.put_region(self.test_regions[0])
graph.put_region(self.test_regions[1])
graph.put_intersection(self.test_regions[0], self.test_regions[1])
intersection = self.test_regions[0].get_intersection(self.test_regions[1])
graphdict = {'id':graph.id,'dimension':dimension,'json_graph':'node_link',
'graph':{
'directed': False, 'multigraph': False, 'graph':{},
'nodes':[{'id':r.id, 'region':r} for r in graph.regions],
'links':[{'source': self.test_regions[0].id,
'target': self.test_regions[1].id,
'region': intersection}]
}}
self.assertEqual(graphdict, graph.to_dict())
def test_nxgraph_from_dict(self):
dimension = self.test_regions[0].dimension
graph = RIGraph(dimension=dimension)
graph.put_region(self.test_regions[0])
graph.put_region(self.test_regions[1])
graph.put_intersection(self.test_regions[0], self.test_regions[1])
self.assertEqual(graph.to_dict(),
RIGraph.from_dict(graph.to_dict()).to_dict())
|
[
"slig.datastructs.rigraph.RIGraph",
"slig.datastructs.region.Region"
] |
[((888, 908), 'slig.datastructs.rigraph.RIGraph', 'RIGraph', ([], {'dimension': '(1)'}), '(dimension=1)\n', (895, 908), False, 'from slig.datastructs.rigraph import RIGraph\n'), ((1103, 1131), 'slig.datastructs.rigraph.RIGraph', 'RIGraph', ([], {'dimension': 'dimension'}), '(dimension=dimension)\n', (1110, 1131), False, 'from slig.datastructs.rigraph import RIGraph\n'), ((1364, 1392), 'slig.datastructs.rigraph.RIGraph', 'RIGraph', ([], {'dimension': 'dimension'}), '(dimension=dimension)\n', (1371, 1392), False, 'from slig.datastructs.rigraph import RIGraph\n'), ((1636, 1664), 'slig.datastructs.rigraph.RIGraph', 'RIGraph', ([], {'dimension': 'dimension'}), '(dimension=dimension)\n', (1643, 1664), False, 'from slig.datastructs.rigraph import RIGraph\n'), ((2074, 2102), 'slig.datastructs.rigraph.RIGraph', 'RIGraph', ([], {'dimension': 'dimension'}), '(dimension=dimension)\n', (2081, 2102), False, 'from slig.datastructs.rigraph import RIGraph\n'), ((2868, 2896), 'slig.datastructs.rigraph.RIGraph', 'RIGraph', ([], {'dimension': 'dimension'}), '(dimension=dimension)\n', (2875, 2896), False, 'from slig.datastructs.rigraph import RIGraph\n'), ((598, 620), 'slig.datastructs.region.Region', 'Region', (['[0, 0]', '[5, 5]'], {}), '([0, 0], [5, 5])\n', (604, 620), False, 'from slig.datastructs.region import Region\n'), ((651, 674), 'slig.datastructs.region.Region', 'Region', (['[2, 2]', '[5, 10]'], {}), '([2, 2], [5, 10])\n', (657, 674), False, 'from slig.datastructs.region import Region\n'), ((705, 727), 'slig.datastructs.region.Region', 'Region', (['[1, 5]', '[3, 7]'], {}), '([1, 5], [3, 7])\n', (711, 727), False, 'from slig.datastructs.region import Region\n'), ((758, 781), 'slig.datastructs.region.Region', 'Region', (['[-5, 5]', '[1, 7]'], {}), '([-5, 5], [1, 7])\n', (764, 781), False, 'from slig.datastructs.region import Region\n'), ((812, 835), 'slig.datastructs.region.Region', 'Region', (['[-5, 5]', '[2, 7]'], {}), '([-5, 5], [2, 7])\n', (818, 835), False, 'from slig.datastructs.region import Region\n')]
|
# -*- coding: utf-8 -*-
# Written by <NAME> (<EMAIL>)
import os
import pprint
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
from wbia_orientation.config.default import _C as cfg # NOQA
from wbia_orientation.config.default import update_config
from wbia_orientation.core.function import validate
from wbia_orientation.dataset import custom_transforms
from wbia_orientation.dataset.animal import AnimalDataset
from wbia_orientation.train import parse_args, _make_model, _model_to_gpu, _make_loss
from wbia_orientation.utils.utils import create_logger
def _make_test_data(cfg, logger):
"""Initialise train and validation loaders as per config parameters
Input:
cfg: config object
logger: logging object
Returns:
test_loader: Data Loader over test dataset
test_dataset: test dataset object
"""
test_transform = transforms.Compose(
[
custom_transforms.CropObjectAlignedArea(noise=0.0),
custom_transforms.Resize(cfg.MODEL.IMSIZE),
custom_transforms.ToTensor(),
custom_transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
input_size=cfg.MODEL.IMSIZE[0],
),
]
)
test_dataset = AnimalDataset(cfg, cfg.DATASET.TEST_SET, test_transform)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=cfg.TEST.BS * len(cfg.GPUS),
shuffle=False,
num_workers=cfg.WORKERS,
pin_memory=cfg.PIN_MEMORY,
)
return test_loader, test_dataset
def main():
args = parse_args()
update_config(cfg, args)
logger, final_output_dir = create_logger(cfg, args.cfg, 'test', False)
logger.info(pprint.pformat(args))
logger.info(cfg)
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
# Initialise models
model = _make_model(cfg, is_train=False)
# Load model weights
if cfg.TEST.MODEL_FILE:
model_state_file = cfg.TEST.MODEL_FILE
else:
model_state_file = os.path.join(final_output_dir, 'best.pth')
logger.info('=> loading model from {}'.format(model_state_file))
if cfg.USE_GPU:
model.load_state_dict(torch.load(model_state_file))
else:
model.load_state_dict(
torch.load(model_state_file, map_location=torch.device('cpu'))
)
model = _model_to_gpu(model, cfg)
# Initialise losses
loss_func = _make_loss(cfg)
# Initialise data loaders
test_loader, test_dataset = _make_test_data(cfg, logger)
# Evaluate on validation set
perf_indicator = validate(
cfg,
test_loader,
test_dataset,
model,
loss_func,
cfg.DATASET.TEST_SET,
final_output_dir,
)
logger.info(
'Final results. Accuracy@{} on {} {} is {:.2%}'.format(
cfg.TEST.THETA_THR, cfg.DATASET.NAME, cfg.DATASET.TEST_SET, perf_indicator
)
)
if __name__ == '__main__':
main()
|
[
"wbia_orientation.train._make_loss",
"pprint.pformat",
"wbia_orientation.dataset.custom_transforms.ToTensor",
"wbia_orientation.config.default.update_config",
"torch.load",
"wbia_orientation.train.parse_args",
"wbia_orientation.train._make_model",
"wbia_orientation.dataset.custom_transforms.Resize",
"wbia_orientation.dataset.animal.AnimalDataset",
"wbia_orientation.train._model_to_gpu",
"wbia_orientation.core.function.validate",
"torch.device",
"wbia_orientation.dataset.custom_transforms.CropObjectAlignedArea",
"wbia_orientation.dataset.custom_transforms.Normalize",
"os.path.join",
"wbia_orientation.utils.utils.create_logger"
] |
[((1435, 1491), 'wbia_orientation.dataset.animal.AnimalDataset', 'AnimalDataset', (['cfg', 'cfg.DATASET.TEST_SET', 'test_transform'], {}), '(cfg, cfg.DATASET.TEST_SET, test_transform)\n', (1448, 1491), False, 'from wbia_orientation.dataset.animal import AnimalDataset\n'), ((1770, 1782), 'wbia_orientation.train.parse_args', 'parse_args', ([], {}), '()\n', (1780, 1782), False, 'from wbia_orientation.train import parse_args, _make_model, _model_to_gpu, _make_loss\n'), ((1787, 1811), 'wbia_orientation.config.default.update_config', 'update_config', (['cfg', 'args'], {}), '(cfg, args)\n', (1800, 1811), False, 'from wbia_orientation.config.default import update_config\n'), ((1844, 1887), 'wbia_orientation.utils.utils.create_logger', 'create_logger', (['cfg', 'args.cfg', '"""test"""', '(False)'], {}), "(cfg, args.cfg, 'test', False)\n", (1857, 1887), False, 'from wbia_orientation.utils.utils import create_logger\n'), ((2174, 2206), 'wbia_orientation.train._make_model', '_make_model', (['cfg'], {'is_train': '(False)'}), '(cfg, is_train=False)\n', (2185, 2206), False, 'from wbia_orientation.train import parse_args, _make_model, _model_to_gpu, _make_loss\n'), ((2677, 2702), 'wbia_orientation.train._model_to_gpu', '_model_to_gpu', (['model', 'cfg'], {}), '(model, cfg)\n', (2690, 2702), False, 'from wbia_orientation.train import parse_args, _make_model, _model_to_gpu, _make_loss\n'), ((2743, 2758), 'wbia_orientation.train._make_loss', '_make_loss', (['cfg'], {}), '(cfg)\n', (2753, 2758), False, 'from wbia_orientation.train import parse_args, _make_model, _model_to_gpu, _make_loss\n'), ((2906, 3009), 'wbia_orientation.core.function.validate', 'validate', (['cfg', 'test_loader', 'test_dataset', 'model', 'loss_func', 'cfg.DATASET.TEST_SET', 'final_output_dir'], {}), '(cfg, test_loader, test_dataset, model, loss_func, cfg.DATASET.\n TEST_SET, final_output_dir)\n', (2914, 3009), False, 'from wbia_orientation.core.function import validate\n'), ((1905, 1925), 'pprint.pformat', 'pprint.pformat', (['args'], {}), '(args)\n', (1919, 1925), False, 'import pprint\n'), ((2345, 2387), 'os.path.join', 'os.path.join', (['final_output_dir', '"""best.pth"""'], {}), "(final_output_dir, 'best.pth')\n", (2357, 2387), False, 'import os\n'), ((1058, 1108), 'wbia_orientation.dataset.custom_transforms.CropObjectAlignedArea', 'custom_transforms.CropObjectAlignedArea', ([], {'noise': '(0.0)'}), '(noise=0.0)\n', (1097, 1108), False, 'from wbia_orientation.dataset import custom_transforms\n'), ((1122, 1164), 'wbia_orientation.dataset.custom_transforms.Resize', 'custom_transforms.Resize', (['cfg.MODEL.IMSIZE'], {}), '(cfg.MODEL.IMSIZE)\n', (1146, 1164), False, 'from wbia_orientation.dataset import custom_transforms\n'), ((1178, 1206), 'wbia_orientation.dataset.custom_transforms.ToTensor', 'custom_transforms.ToTensor', ([], {}), '()\n', (1204, 1206), False, 'from wbia_orientation.dataset import custom_transforms\n'), ((1220, 1339), 'wbia_orientation.dataset.custom_transforms.Normalize', 'custom_transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]', 'input_size': 'cfg.MODEL.IMSIZE[0]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, \n 0.225], input_size=cfg.MODEL.IMSIZE[0])\n', (1247, 1339), False, 'from wbia_orientation.dataset import custom_transforms\n'), ((2508, 2536), 'torch.load', 'torch.load', (['model_state_file'], {}), '(model_state_file)\n', (2518, 2536), False, 'import torch\n'), ((2633, 2652), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2645, 2652), False, 'import torch\n')]
|
# Generated by Django 3.0.9 on 2020-08-04 16:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("resources", "0083_auto_20200804_1634"),
("users", "0059_auto_20200706_1659"),
]
operations = [
migrations.AlterField(
model_name="authorization",
name="partners",
field=models.ManyToManyField(
blank=True,
help_text="The partner(s) for which the editor is authorized.",
limit_choices_to=models.Q(status__in=[0, 2]),
to="resources.Partner",
),
),
migrations.AlterField(
model_name="authorization",
name="stream",
field=models.ForeignKey(
blank=True,
help_text="The stream for which the editor is authorized.",
limit_choices_to=models.Q(partner__status__in=[0, 2]),
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="resources.Stream",
),
),
]
|
[
"django.db.models.Q"
] |
[((591, 618), 'django.db.models.Q', 'models.Q', ([], {'status__in': '[0, 2]'}), '(status__in=[0, 2])\n', (599, 618), False, 'from django.db import migrations, models\n'), ((958, 994), 'django.db.models.Q', 'models.Q', ([], {'partner__status__in': '[0, 2]'}), '(partner__status__in=[0, 2])\n', (966, 994), False, 'from django.db import migrations, models\n')]
|
import discord
from discord.ext import commands, tasks
from discord.ext.commands import has_permissions, CheckFailure
from utils.converters import CtxRoleConverter
from utils.utils import str2bool
from functools import reduce
import random
import json
import utils.embed as embed
from utils.colors import *
import os
#DB
from pymongo import MongoClient
import logging
# ENV
from dotenv import dotenv_values
ENV = dotenv_values(os.path.dirname(os.path.abspath(__file__)) + "/../.env")
class RoleManager(commands.Cog):
"""
Manager is useful to create and delete roles.
You can link a role to a chat or just create a role with a name that you like!
"""
def __init__(self, client):
self.client = client
# Some good paramters like timer and other shits
with open(os.path.dirname(os.path.abspath(__file__)) + '/../database/utils.json', 'r') as f:
info = json.load(f)
# Just to log everything :D
self.log = logging.getLogger(__name__)
# TODO: Loading things :P (I want to put it in a parent class, but i'm not sure at this moment)
self.delete_user_message = info['utils']['delete_user_message']
self.delete_system_message = info['utils']['delete_system_message']
self.db_client = MongoClient(ENV['MONGODB'])
self.guild_preferences_db = self.db_client[info['mongo']['database']][info['mongo']['collection']]
self.channel_permissions = [
"add_reactions",
"administrator",
"attach_files",
"ban_members",
"change_nickname",
"connect",
"create_instant_invite",
"deafen_members",
"embed_links",
"external_emojis",
"kick_members",
"manage_channels",
"manage_emojis",
"manage_guild",
"manage_messages",
"manage_nicknames",
"manage_permissions",
"manage_roles",
"manage_webhooks",
"mention_everyone",
"move_members",
"mute_members",
"priority_speaker",
"read_message_history",
"read_messages",
"request_to_speak",
"send_messages",
"send_tts_messages",
"speak",
"stream",
"use_external_emojis",
"use_slash_commands",
"use_voice_activation",
"value",
"view_audit_log",
"view_channel",
"view_guild_insights"
]
@commands.Cog.listener()
async def on_guild_channel_update(self, before, after):
'''
Function to monitor guild channels and delete a role linked to a channel if the channel was moved to trash
'''
# Mudou de categoria
if after.category == None:
return
elif (before.category == None and after.category != None) or (before.category.id != after.category.id):
guild = after.guild
info = self.guild_preferences_db.find_one({"_id": guild.id})
# Nome criado sempre que um chat é linkado a uma categoria!
if before.category != None:
role_name = before.category.name + " - " + before.name
else:
role_name = before.name
# Categoria que devo deletar o cargo
if after.category.id == info['archives']:
for r in guild.roles:
if r.name == role_name:
await r.delete()
embedmsg = embed.createEmbed(title="Cargo associado excluído!",
description= f"O cargo '{role_name}' associado ao canal foi excluído devido a movimentação do mesmo para os arquivos.",
color=rgb_to_int((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))),
fields=[
],
img="https://cdn.discordapp.com/emojis/753575574546415656.png?v=1")
# Send that shit
await after.send(embed=embedmsg)
self.log.debug(f"Role {role_name} deleted (Channel moved to archives)!")
return
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel):
target_type_channels = ["text", "category"]
if channel.type.name.lower() not in target_type_channels:
return
elif channel.type.name.lower() == "text" and channel.category != None:
option = channel.category.name + " - " + channel.name
# I don't know why i did that shit, but i won't change
elif channel.type.name.lower() == "text":
option = channel.name
else:
option = channel.name
for r in channel.guild.roles:
if r.name == option:
role = r
await role.delete()
self.log.debug(f"Role '{option}' deleted because linked channel was deleted")
break
return
@commands.command(aliases=['criar'], pass_context=True)
@has_permissions(manage_roles = True)
async def create(self, ctx, *, args: str = "channel"):
"""Create a new role with the given name
"""
await ctx.message.delete(delay = self.delete_user_message)
linked_keys = ["channel", "category"]
role_name = self.linked_role(ctx, args) if args in linked_keys else args
# Defining useful variables
guild = ctx.guild
author = ctx.author
msg = ctx.message
role_exists, role = await self.role_exists(ctx, role_name)
if role_exists:
embedmsg = embed.createEmbed(title="CARGO JÁ EXISTE!",
description= f"O cargo <@&{role.id}> já está no servidor, não precisa criar de novo!🍻",
color=rgb_to_int((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))),
fields=[
("Como pegar?", f"Apenas digite '.get' e ele será adicionado na sua conta", False)
],
img="https://cdn.discordapp.com/emojis/814010519022600192.png?v=1")
await msg.channel.send(embed=embedmsg, delete_after= self.delete_system_message)
else:
# New Role Created!
new_role = await guild.create_role(name=role_name, mentionable=True)
self.log.info( (f"New role '{new_role.name}' created in guild {guild.name} : {guild.id}").encode('ascii', 'ignore').decode('ascii') )
# TODO: Especificar a mensagem de acordo com o cargo que foi criado!
embedmsg = embed.createEmbed(title="Novo Cargo!",
description= f"O cargo <@&{new_role.id}> foi criado por <@{author.id}>",
color=rgb_to_int((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))),
fields=[
("Como pegar?", f"Apenas digite .get no chat do cargo ou .get {new_role.name} e ele será adicionado na sua conta", False)
],
img="https://cdn.discordapp.com/emojis/859150737509580800.gif?v=1")
await msg.channel.send(embed=embedmsg)
return
@create.error
async def create_error(self, ctx, error):
await ctx.message.delete(delay = self.delete_user_message)
if isinstance(error, CheckFailure):
await ctx.send("**Erro:** Você não pode criar um cargo!", delete_after = self.delete_system_message)
else:
self.log.error(f"{error} - creation of a new role failed")
await ctx.send(error, delete_after = self.delete_system_message)
# TODO: Parent class too
async def role_exists(self, ctx, role_name):
"""
Method to check if a role exists in the current context, return a status and the role, if it exists.
"""
conv = commands.RoleConverter()
# If found it
# The role already exists
try:
r = await conv.convert(ctx, role_name)
return True, r
except commands.RoleNotFound:
return False, None
# TODO: Put it in a parent class
def linked_role(self, ctx, type: str):
"""
This function is used to return a name to a role linked to a channel or category
"""
guild = ctx.guild
author = ctx.author
msg = ctx.message
if type.lower() == "channel" and msg.channel.category != None:
option = msg.channel.category.name + " - " + msg.channel.name
elif type.lower() == "channel":
option = msg.channel.name
elif type.lower() == "category":
option = msg.channel.category.name
else:
raise ValueError("")
return option;
@commands.command(aliases=['deletar'], pass_context=True)
@has_permissions(manage_roles = True)
async def delete(self, ctx, *, role: commands.RoleConverter):
await ctx.message.delete(delay= self.delete_user_message)
await role.delete()
await ctx.send(f"**AVISO:** Cargo '{role.name}' apagado do servidor por <@{ctx.author.id}>!")
@delete.error
async def delete_error(self, ctx, error):
await ctx.message.delete(delay = self.delete_user_message)
if isinstance(error, CheckFailure):
await ctx.send("**Erro:** Você não pode deletar um cargo!", delete_after = self.delete_system_message)
else:
self.log.error(f"{error} - delete role failed")
await ctx.send(error, delete_after = self.delete_system_message)
async def _permission(self, ctx, role: CtxRoleConverter, mode: str, perm: str, can: bool):
guild = ctx.guild
author = ctx.author
msg = ctx.message
overwrite = discord.PermissionOverwrite()
# Fundamental
# x.attr_name = s
# setattr(x, 'attr_name', s)
if perm not in channel_permissions:
self.log.debug( f"[.permission] Permission {perm} not found!")
return
setattr(overwrite, perm, can)
if mode == 'category':
category = ctx.channel.category
await category.set_permissions(role, overwrite = overwrite)
elif mode == 'channel':
channel = ctx.channel
await channel.set_permissions(role, overwrite = overwrite)
else:
# TODO: N ta funcionando
await role.edit(permission = overwrite)
self.log.debug( (f'Permission {perm} was changed to {can} in role {role.name} in current category').encode('ascii', 'ignore').decode('ascii') )
fb = 'Permitido' if can else 'Proibido'
embedmsg = embed.createEmbed(title="Permissão alterada!",
description= f"O cargo <@&{role.id}> foi atualizado por <@{author.id}>",
color=rgb_to_int((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))),
fields=[
(f"Permissão '{perm}'", f"Atualizada para {fb}", False)
],
img="https://cdn.discordapp.com/emojis/765969524897218594.png?v=1")
await msg.channel.send(embed=embedmsg)
return
@commands.command(pass_context=True)
@has_permissions(manage_roles = True, manage_channels = True)
async def permission(self, ctx, *, args: str = ""):
"""
Arg List:
ctx -> Discord Context
role -> CtxRoleConverter
mode -> channel, category or role
perm -> permission to change
bool -> bool
"""
await ctx.message.delete(delay = self.delete_user_message)
splitted_args = args.split(' ')
if len(splitted_args) < 4 or args == "":
# Just for now
self.log.debug("[.permission] Missing args")
await self.permission_tutorial(ctx)
return;
can = str2bool(splitted_args[-1])
perm = splitted_args[-2]
mode = splitted_args[-3]
role_name = ' '.join(splitted_args[:-3])
status, role = await self.role_exists(ctx, role_name)
await self._permission(ctx, role, mode, perm, can)
async def permission_tutorial(self, ctx):
embedmsg = embed.createEmbed(title="Configurações de Permissões!",
description= f"Verifique a lista de argumentos e permissões",
color=rgb_to_int((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))),
fields=[
(f"Argumentos", f"""role -> Role
mode -> channel, category or role
perm -> permission to change
bool -> bool""", False),
(f"Permissões", "\n".join([item for item in self.channel_permissions]), False)
],
img="https://cdn.discordapp.com/emojis/767241157003837460.png?v=1")
await ctx.send(embed=embedmsg)
# Setup
def setup(client):
client.add_cog(RoleManager(client))
|
[
"pymongo.MongoClient",
"os.path.abspath",
"utils.utils.str2bool",
"discord.ext.commands.command",
"json.load",
"discord.ext.commands.RoleConverter",
"random.randint",
"discord.ext.commands.has_permissions",
"discord.ext.commands.Cog.listener",
"discord.PermissionOverwrite",
"logging.getLogger"
] |
[((2683, 2706), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (2704, 2706), False, 'from discord.ext import commands, tasks\n'), ((4513, 4536), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (4534, 4536), False, 'from discord.ext import commands, tasks\n'), ((5392, 5446), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['criar']", 'pass_context': '(True)'}), "(aliases=['criar'], pass_context=True)\n", (5408, 5446), False, 'from discord.ext import commands, tasks\n'), ((5453, 5487), 'discord.ext.commands.has_permissions', 'has_permissions', ([], {'manage_roles': '(True)'}), '(manage_roles=True)\n', (5468, 5487), False, 'from discord.ext.commands import has_permissions, CheckFailure\n'), ((9298, 9354), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['deletar']", 'pass_context': '(True)'}), "(aliases=['deletar'], pass_context=True)\n", (9314, 9354), False, 'from discord.ext import commands, tasks\n'), ((9361, 9395), 'discord.ext.commands.has_permissions', 'has_permissions', ([], {'manage_roles': '(True)'}), '(manage_roles=True)\n', (9376, 9395), False, 'from discord.ext.commands import has_permissions, CheckFailure\n'), ((11839, 11874), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (11855, 11874), False, 'from discord.ext import commands, tasks\n'), ((11881, 11937), 'discord.ext.commands.has_permissions', 'has_permissions', ([], {'manage_roles': '(True)', 'manage_channels': '(True)'}), '(manage_roles=True, manage_channels=True)\n', (11896, 11937), False, 'from discord.ext.commands import has_permissions, CheckFailure\n'), ((1030, 1057), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1047, 1057), False, 'import logging\n'), ((1343, 1370), 'pymongo.MongoClient', 'MongoClient', (["ENV['MONGODB']"], {}), "(ENV['MONGODB'])\n", (1354, 1370), False, 'from pymongo import MongoClient\n'), ((8325, 8349), 'discord.ext.commands.RoleConverter', 'commands.RoleConverter', ([], {}), '()\n', (8347, 8349), False, 'from discord.ext import commands, tasks\n'), ((10359, 10388), 'discord.PermissionOverwrite', 'discord.PermissionOverwrite', ([], {}), '()\n', (10386, 10388), False, 'import discord\n'), ((12560, 12587), 'utils.utils.str2bool', 'str2bool', (['splitted_args[-1]'], {}), '(splitted_args[-1])\n', (12568, 12587), False, 'from utils.utils import str2bool\n'), ((473, 498), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (488, 498), False, 'import os\n'), ((958, 970), 'json.load', 'json.load', (['f'], {}), '(f)\n', (967, 970), False, 'import json\n'), ((871, 896), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (886, 896), False, 'import os\n'), ((11479, 11501), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (11493, 11501), False, 'import random\n'), ((11503, 11525), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (11517, 11525), False, 'import random\n'), ((11527, 11549), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (11541, 11549), False, 'import random\n'), ((13079, 13101), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (13093, 13101), False, 'import random\n'), ((13103, 13125), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (13117, 13125), False, 'import random\n'), ((13127, 13149), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (13141, 13149), False, 'import random\n'), ((6242, 6264), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (6256, 6264), False, 'import random\n'), ((6266, 6288), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (6280, 6288), False, 'import random\n'), ((6290, 6312), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (6304, 6312), False, 'import random\n'), ((7185, 7207), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (7199, 7207), False, 'import random\n'), ((7209, 7231), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (7223, 7231), False, 'import random\n'), ((7233, 7255), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (7247, 7255), False, 'import random\n'), ((4028, 4050), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (4042, 4050), False, 'import random\n'), ((4052, 4074), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (4066, 4074), False, 'import random\n'), ((4076, 4098), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (4090, 4098), False, 'import random\n')]
|
from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float
from ..db import Base
class Attribute(Base):
__tablename__ = "attribute"
id = Column(Integer, autoincrement=True, primary_key=True, unique=True, nullable=False)
type = Column(String(length=256), nullable=False)
remote_reference = Column(String(256), nullable=False)
key = Column(String(length=256), unique=True)
__mapper_args__ = {
'polymorphic_identity': 'attribute',
'polymorphic_on': type
}
class BooleanAttribute(Attribute):
__tablename__ = "boolean_attribute"
id = Column(Integer, ForeignKey('attribute.id'), primary_key=True)
value = Column(Boolean)
__mapper_args__ = {
'polymorphic_identity': bool.__name__
}
class IntegerAttribute(Attribute):
__tablename__ = "integer_attribute"
id = Column(Integer, ForeignKey('attribute.id'), primary_key=True)
value = Column(Integer)
__mapper_args__ = {
'polymorphic_identity': int.__name__
}
class FloatAttribute(Attribute):
__tablename__ = "float_attribute"
id = Column(Integer, ForeignKey('attribute.id'), primary_key=True)
value = Column(Float)
__mapper_args__ = {
'polymorphic_identity': float.__name__
}
class StringAttribute(Attribute):
__tablename__ = "string_attribute"
id = Column(Integer, ForeignKey('attribute.id'), primary_key=True)
value = Column(String(length=4096))
__mapper_args__ = {
'polymorphic_identity': str.__name__
}
|
[
"sqlalchemy.String",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column"
] |
[((163, 250), 'sqlalchemy.Column', 'Column', (['Integer'], {'autoincrement': '(True)', 'primary_key': '(True)', 'unique': '(True)', 'nullable': '(False)'}), '(Integer, autoincrement=True, primary_key=True, unique=True, nullable\n =False)\n', (169, 250), False, 'from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float\n'), ((676, 691), 'sqlalchemy.Column', 'Column', (['Boolean'], {}), '(Boolean)\n', (682, 691), False, 'from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float\n'), ((929, 944), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (935, 944), False, 'from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float\n'), ((1177, 1190), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (1183, 1190), False, 'from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float\n'), ((264, 282), 'sqlalchemy.String', 'String', ([], {'length': '(256)'}), '(length=256)\n', (270, 282), False, 'from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float\n'), ((330, 341), 'sqlalchemy.String', 'String', (['(256)'], {}), '(256)\n', (336, 341), False, 'from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float\n'), ((376, 394), 'sqlalchemy.String', 'String', ([], {'length': '(256)'}), '(length=256)\n', (382, 394), False, 'from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float\n'), ((618, 644), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""attribute.id"""'], {}), "('attribute.id')\n", (628, 644), False, 'from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float\n'), ((871, 897), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""attribute.id"""'], {}), "('attribute.id')\n", (881, 897), False, 'from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float\n'), ((1119, 1145), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""attribute.id"""'], {}), "('attribute.id')\n", (1129, 1145), False, 'from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float\n'), ((1369, 1395), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""attribute.id"""'], {}), "('attribute.id')\n", (1379, 1395), False, 'from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float\n'), ((1434, 1453), 'sqlalchemy.String', 'String', ([], {'length': '(4096)'}), '(length=4096)\n', (1440, 1453), False, 'from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float\n')]
|
# -*- coding: utf-8 -*-
"""
SciHub client
"""
import logging
import os
import random
import urllib
import requests
from bs4 import BeautifulSoup
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
class SciHubClient:
"""
Client for accessing SciHub
"""
DEFAULT_HEADERS = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0",
}
SCIHUB_NOW_URL = "https://sci-hub.now.sh"
FALLBACK_BASE_URL = "https://sci-hub.tw"
def __init__(self, proxy=None, fallback_base_url=FALLBACK_BASE_URL):
self._sess = requests.Session()
self._sess.headers.update(self.DEFAULT_HEADERS)
self._fallback_base_url = fallback_base_url
self._available_base_url_list = self._get_available_scihub_urls()
self._set_base_url()
if proxy is not None:
self._set_proxy(proxy)
def _get(self, url, raise_for_status=True, **kwargs):
response = self._sess.get(url, **kwargs)
if raise_for_status is True:
response.raise_for_status()
return response
def _post(self, url, raise_for_status=True, **kwargs):
response = self._sess.post(url, **kwargs)
if raise_for_status is True:
response.raise_for_status()
return response
def _get_available_scihub_urls(self):
response = self._get(self.SCIHUB_NOW_URL, raise_for_status=False)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
LOG.debug("falling back to %s", self._fallback_base_url)
return [self._fallback_base_url]
parsed_content = BeautifulSoup(response.content, "html.parser")
urls = []
for a_tag in parsed_content.find_all("a", href=True):
link = a_tag["href"]
if (
"sci-hub" in link # pylint: disable=C0330
and link.startswith("https") # pylint: disable=C0330
and link != self.SCIHUB_NOW_URL # pylint: disable=C0330
):
urls.append(a_tag["href"])
return urls
def _set_proxy(self, proxy):
self._sess.proxies = {
"http": proxy,
"https": proxy,
}
def _set_base_url(self):
"""
Pick a random url from the available scihub urls
set the current base url to the new url
"""
if not self._available_base_url_list:
raise ValueError("Ran out of valid sci-hub urls")
(base_url,) = random.sample(self._get_available_scihub_urls(), 1)
self._base_url = base_url
LOG.debug("url changing to %s", self._base_url)
@staticmethod
def _get_doi(parsed_response):
((doi,),) = [
[
line.strip().split("'")[1]
for line in script.string.split("\n")
if "var doi" in line
]
for script in parsed_response.find_all("script")
if script.string and "var doi" in script.string
]
return doi
def query(self, query):
"""
Query for a paper hosted by sci-hub
"""
response = self._post(
self._base_url,
data={"request": query},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
parsed_response = BeautifulSoup(response.content, "html.parser")
if parsed_response.find("div").text.endswith("article not found"):
raise ValueError(f"Article not found: {query}")
cleaned_url = urllib.parse.urlparse(
urllib.parse.urldefrag(parsed_response.find("iframe").get("src")).url,
scheme="https",
).geturl()
return {
"doi": self._get_doi(parsed_response),
"pdf_url": cleaned_url,
}
def _download_pdf(self, url):
result = self._get(url)
if result.headers["Content-Type"] != "application/pdf":
raise ValueError("File is not a pdf")
return result.content
def _get_paper_meta(self, doi):
return self._get(
urllib.parse.urljoin("https://doi.org", doi),
headers={"Accept": "application/vnd.citationstyles.csl+json"},
).json()
def _generate_file_name(self, doi):
paper_meta = self._get_paper_meta(doi)
# date = "-".join(map(str, paper_meta["indexed"]["date-parts"][0]))
((year, _, _),) = paper_meta["published-print"]["date-parts"]
title = paper_meta["title"]
# return f"({date}) {title}.pdf"
return f"({year}) {title}.pdf"
def download(self, query, destination="", filename=None):
"""
Download paper from sci-hub
"""
query_result = self.query(query)
pdf_string = self._download_pdf(query_result["pdf_url"])
filename = (
self._generate_file_name(query_result["doi"])
if filename is None
else filename
)
out_path = os.path.join(destination, filename)
with open(out_path, "wb") as out_fp:
out_fp.write(pdf_string)
return {"out_path": out_path, **query_result}
|
[
"urllib.parse.urljoin",
"requests.Session",
"logging.NullHandler",
"bs4.BeautifulSoup",
"os.path.join",
"logging.getLogger"
] |
[((157, 184), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (174, 184), False, 'import logging\n'), ((200, 221), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (219, 221), False, 'import logging\n'), ((604, 622), 'requests.Session', 'requests.Session', ([], {}), '()\n', (620, 622), False, 'import requests\n'), ((1677, 1723), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""html.parser"""'], {}), "(response.content, 'html.parser')\n", (1690, 1723), False, 'from bs4 import BeautifulSoup\n'), ((3389, 3435), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""html.parser"""'], {}), "(response.content, 'html.parser')\n", (3402, 3435), False, 'from bs4 import BeautifulSoup\n'), ((5032, 5067), 'os.path.join', 'os.path.join', (['destination', 'filename'], {}), '(destination, filename)\n', (5044, 5067), False, 'import os\n'), ((4148, 4192), 'urllib.parse.urljoin', 'urllib.parse.urljoin', (['"""https://doi.org"""', 'doi'], {}), "('https://doi.org', doi)\n", (4168, 4192), False, 'import urllib\n')]
|
from BusArrivalItem import BusArrivalItem
from api import call
# bus_arrival_item = BusArrivalItem(xml_root.find('msgBody').find('busArrivalItem'))
# print(bus_arrival_item)
def fetch(station_id: str, route_id: str):
response = call(
'busarrivalservice',
{
'stationId': station_id,
'routeId': route_id
}
)
if response is None:
return None
return ''.join(
map(
lambda list_element: str(BusArrivalItem(list_element)),
response
)
)
if __name__ == '__main__':
print(fetch('218000952', '241449005'))
|
[
"BusArrivalItem.BusArrivalItem",
"api.call"
] |
[((235, 308), 'api.call', 'call', (['"""busarrivalservice"""', "{'stationId': station_id, 'routeId': route_id}"], {}), "('busarrivalservice', {'stationId': station_id, 'routeId': route_id})\n", (239, 308), False, 'from api import call\n'), ((482, 510), 'BusArrivalItem.BusArrivalItem', 'BusArrivalItem', (['list_element'], {}), '(list_element)\n', (496, 510), False, 'from BusArrivalItem import BusArrivalItem\n')]
|
"""
Author: <NAME>
GitHub: wafflescore
"""
from minisom import MiniSom, asymptotic_decay
import numpy as np
import matplotlib.pyplot as plt
import itertools
from skimage import measure
from skimage.segmentation import random_walker
from skimage import filters
from scipy.spatial import distance
from collections import Counter
from timeit import default_timer as timer
import random
from acse_9_irp_wafflescore import MiscHelpers as mh
import logging
import sys
logging.basicConfig(format='%(asctime)s | %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
def compute_dim(num_sample):
"""
Compute a default dimension of the SOMs.
This function returns the dimension size of the SOMs.
The size returned is sqrt(5 * sqrt(num_sample)), with the exception
that the minimum dimension size = 10
Parameters
----------
num_sample : int
Total number of data points that will populate the SOMs
Returns
-------
int
Ideal dimension.
"""
dim = 5 * np.sqrt(num_sample)
dim = np.int(np.sqrt(dim))
if dim < 10:
return 10
else:
return dim
def som_assemble(in_data, seed, dim, lr=0.5, sigma=2.5):
"""Initialize the SOMs model for training
Parameters
----------
in_data : np.array or list
data matrix
seed : integer
random seed for reproducibility
dim : int
dimension of the SOMs distance matrix
lr : float, optional
learning rate, by default 0.5
sigma : float, optional
spread of the neighborhood function, by default 2.5
Returns
-------
MiniSom
an object of Minisom class, see minisom.py for further details
"""
# Initialization som and weights
num_features = np.shape(in_data)[1]
som = MiniSom(dim, dim, num_features, sigma=sigma, learning_rate=lr,
neighborhood_function='gaussian', random_seed=seed)
som.pca_weights_init(in_data)
return som
def plot_som(som, in_data, label, save=False, save_name='temp'):
"""plots the distance map / u-matrix of the SOMs along with the label
Parameters
----------
som : MiniSom
trained Minisom object
in_data : np.array or list
data matrix
label : np.array or list
the true label of each data point
save : bool, optional
flag, by default False
save_name : str, optional
the name which will be used to save the plot as png file,
by default 'temp'
"""
plt.figure(figsize=(9, 7))
# Plotting the response for each litho-class
plt.pcolor(som.distance_map().T, cmap='bone_r')
# plotting the distance map as background
plt.colorbar()
for t, xx in zip(label, in_data):
w = som.winner(xx) # getting the winner
# palce a marker on the winning position for the sample xx
plt.text(w[0]+.5, w[1]+.5, str(t),
color=plt.cm.rainbow(t/10.))
plt.axis([0, som.get_weights().shape[0], 0, som.get_weights().shape[1]])
if(save):
save_dir = 'SOMs_results/' + save_name + '_plot.png'
plt.savefig(save_dir)
print('Plot saved at:', save_dir)
plt.show()
def save_som_report(som, save_name, it, et, report=None):
param_vals = str(save_name) + '\n---' + \
'\niterations,' + str(it) + \
'\nelapsed time,' + str(et) + '\n\n'
# save report to file
fdir = save_name + '_report.csv'
print('Report saved at', fdir)
mode = 'w'
f1 = open(fdir, mode)
f1.write(param_vals)
if(report):
f1.write(str(report))
f1.write('\n\n--------------------\n\n')
f1.close()
print('Report saved at:', fdir)
def histedges_equalN(in_data, nbin=10):
"""generates a histogram where each bin will contain the same number of
data points
Parameters
----------
in_data : np.array or list
data array
nbin : int
number of bins to populate, by default 10
Returns
-------
np.array
numpy array of all the histogram bins
"""
ttl_dtp = len(in_data)
return np.interp(np.linspace(0, ttl_dtp, nbin + 1),
np.arange(ttl_dtp),
np.sort(in_data))
def plot_u_matrix(som_u_mat):
"""Plots the distance map / u-matrix of the SOMs
Parameters
----------
som : MiniSom
trained Minisom object
Returns
-------
np.array
numpy array of all the histogram bins
"""
f_image = som_u_mat.flatten()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
fig.show()
ax1.pcolor(som_u_mat, cmap='bone_r')
hist = plt.hist(f_image, histedges_equalN(f_image, 10), density=True)
return hist[1]
def gen_e_model(n_map, som_label):
"""generates the Earth model from neuron map"""
som_class = []
for i in range(len(n_map)):
som_class.append(som_label[n_map[i][0]][n_map[i][1]])
return np.array(som_class)
def closest_n(value):
"""Assign cluster number to the mask's border indexes by using the
closest neighbor's value
Parameters
----------
value : np.array
numpy array of the cluster number, noted that the borders are marked
with 0
Returns
-------
np.array
new label with all the border index populated
"""
borders = np.array(np.where(value == 0)).T
new_label = np.array(value)
vals = np.where(value != 0)
vals = np.array(vals).T
for b in borders:
# find index of the closest value
c_idx = distance.cdist([b], vals).argmin()
new_label[b[0], b[1]] = value[vals[c_idx, 0]][vals[c_idx, 1]]
return new_label
def KNN(value, k=5, border_val=0):
"""Assign cluster number to the mask's border indexes by using the
K-nearest neighbor method
Parameters
----------
value : np.array
numpy array of the cluster number, noted that the borders are marked
with 0
k : int, optional
number of neighbor to consider, by default 5
Returns
-------
np.array
new label with all the border index populated
"""
borders = np.array(np.where(value == border_val)).T
new_label = np.array(value)
vals = np.where(value != 0)
if(len(vals[0]) < 5):
logging.info("Not enough labeled neighbor to perform KNN.\n\
Will return the original inputted value.")
return value
vals = np.array(vals).T
for b in borders:
# find index of the closest k neighbors
dist = distance.cdist([b], vals)
c_idx = np.argpartition(dist, k)
c_idx = c_idx[0, :k]
mins_idx = np.array(list(zip(vals[c_idx, 0], vals[c_idx, 1])))
class_counter = Counter()
for idx in mins_idx:
class_counter[value[idx[0], idx[1]]] += 1
cl = class_counter.most_common(1)[0][0]
new_label[b[0], b[1]] = cl
return new_label
def watershed_level(image, bins, border_width=0.1, plot=False, conn=None):
num_bins = len(bins)
"""Computes and classify the SOM's u-matrix or total gradient using
watershed classification method
Parameters
----------
image : np.array
u-matrix or total gradient of the SOMs
bins : np.array
numpy array of all the histogram bins
plot : bool, optional
flag whether to plot the watershed level or not, by default False
conn : int, optional
connectivity flag for measure.label, by default None
Returns
-------
np.array
numpy array of predicted cluster labels from each watershed level
"""
ncols = 6
if(plot):
fig, axes = plt.subplots(ncols=ncols, nrows=num_bins,
figsize=(12, num_bins*3),
sharex=True, sharey=True)
ax = axes.ravel()
ws_labels = np.zeros((num_bins * ncols, image.shape[0], image.shape[1]))
for i in range(num_bins):
val = filters.threshold_local(image, block_size=3 + 2*i)
block_mask = (image < val)
markers = measure.label(block_mask, connectivity=conn)
ws_labels[i*ncols] = closest_n(markers) - 1
ws_labels[i*ncols + 1] = KNN(markers) - 1
ws_labels[i*ncols + 2] = random_walker(image, markers)
if(plot):
ax[i*ncols].imshow(ws_labels[i*ncols + 0], origin='lower')
ax[i*ncols].title.set_text('b_cn: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 0]))))
ax[i*ncols + 1].imshow(ws_labels[i*ncols + 1], origin='lower')
ax[i*ncols + 1].title.set_text('b_knn: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 1]))))
ax[i*ncols + 2].imshow(ws_labels[i*ncols + 2], origin='lower')
ax[i*ncols + 2].title.set_text('b_rw: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 2]))))
thres_mask = (image <= bins[i])
markers = measure.label(thres_mask, connectivity=conn)
ws_labels[i*ncols + 3] = closest_n(markers) - 1
ws_labels[i*ncols + 4] = KNN(markers) - 1
ws_labels[i*ncols + 5] = random_walker(image, markers)
if(plot):
ax[i*ncols + 3].imshow(ws_labels[i*ncols + 3], origin='lower')
ax[i*ncols + 3].title.set_text('b_cn: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 3]))))
ax[i*ncols + 4].imshow(ws_labels[i*ncols + 4], origin='lower')
ax[i*ncols + 4].title.set_text('b_knn: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 4]))))
ax[i*ncols + 5].imshow(ws_labels[i*ncols + 5], origin='lower')
ax[i*ncols + 5].title.set_text('b_rw: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 5]))))
return ws_labels
def eval_ws(in_data, ws_labels, n_map, label=None, re_all=False):
"""Evaluate and return the best watershed prediction result
Parameters
----------
in_data : np.array or list
data matrix
ws_labels : np.array
predicted cluster labels from watershed segmentation
n_map : np.array
array of the winner neuron
label : np.array or list, optional
the true label of each data point
Returns
-------
np.array
list of best watershed labels, may contain more than one set
"""
len_watershed = ws_labels.shape[0]
cluster_labels = np.zeros((len_watershed, len(in_data)))
avg_sils = np.full(len_watershed, np.nan)
ch_scs = np.full(len_watershed, np.nan)
if(label is not None):
avg_ents = np.full(len_watershed, np.nan)
avg_purs = np.full(len_watershed, np.nan)
for i in range(len_watershed):
param = {'watershed idx': i}
if(len(np.unique(ws_labels[i])) > 1):
cluster_labels[i] = gen_e_model(n_map, ws_labels[i])
avg_sils[i] = mh.int_eval_silhouette(in_data, cluster_labels[i],
method='som_watershed',
param=param)
try:
ch_scs[i] = mh.cal_har_sc(in_data, cluster_labels[i])
except:
ch_scs[i] = -1
if(label is not None):
avg_ents[i], avg_purs[i] = mh.ext_eval_entropy(label,
cluster_labels[i])
best_idx = []
best_idx.append(np.nanargmax(np.array(avg_sils))) # closest to 1
best_idx.append(np.nanargmax(ch_scs)) # higher = better
if(label is not None):
best_idx.append(np.nanargmin(np.array(avg_ents))) # closest to 0
best_idx.append(np.nanargmax(np.array(avg_purs))) # closest to 1
best_idx = np.unique(best_idx)
if(re_all):
return (cluster_labels, avg_sils,
ch_scs, best_idx)
else:
return (cluster_labels[best_idx], avg_sils[best_idx],
ch_scs[best_idx])
def run_SOMs(in_data, dim, iter_cnt, lr, sigma, seed=10):
"""Method to fully run SOMs
Parameters
----------
in_data : np.array or list
data matrix
dim : int
dimension of the SOMs distance matrix
iter_cnt : integer
number of iterations for SOMs to perform
lr : float
learning rate
sigma : float
spread of the neighborhood function, by default 2.5dim : int
seed : integer, optional
random seed for reproducibility, by default 10
Returns
-------
minisom
minisom object
np.array
cluster label
"""
som = som_assemble(in_data, seed, dim, lr, sigma)
som.train_random(in_data, iter_cnt, verbose=False)
u_matrix = som.distance_map().T
watershed_bins = histedges_equalN(u_matrix.flatten())
ws_labels = watershed_level(u_matrix, watershed_bins)
n_map = som.neuron_map(in_data)
cluster_labels, _, _ = eval_ws(in_data, ws_labels, n_map)
return som, cluster_labels
def gen_param_grid(init_guess):
g_dim, g_it, g_lr, g_sigma = init_guess
min_dim = g_dim - 10 if g_dim - 5 > 10 else 10
max_dim = g_dim + 10 if g_dim + 10 > 10 else 20
param_grid = {
'dim': list(range(min_dim, max_dim+1)),
'iter_cnt': list(range(g_it - 500, g_it + 500, 200)),
'learning_rate': list(np.logspace(np.log10(0.25), np.log10(0.75),
base=10, num=100)),
'sigma': list(np.linspace(g_sigma-1, g_sigma+1, num=30)),
}
return param_grid
def random_search_som(in_data, init_guess, max_eval=20, label=None, seed=10,
re_all=False):
"""perform random search for SOMs best parameters.
Parameters
----------
in_data : np.array or list
data matrix
init_guess : tuple
list of initial guess of the parameters, in order of dimension,
number of iterations, learning rate, and sigma
max_eval : int, optional
number of max iterartion to perform the search, by default 20
label : np.array or list, optional
the true label of each data point, by default None
seed : integer, optional
random seed for reproducibility, by default 10
Returns
-------
All cluster label and its counterpart parameters.
"""
random.seed(seed)
param_grid = gen_param_grid(init_guess)
dims = np.zeros(max_eval)
iters = np.zeros(max_eval)
lrs = np.zeros(max_eval)
sigmas = np.zeros(max_eval)
avg_sils = np.full(max_eval, np.nan)
ch_scs = np.full(max_eval, np.nan)
cluster_labels = np.zeros((max_eval, len(in_data)))
if(label is not None):
avg_ents = np.full(max_eval, np.nan)
avg_purs = np.full(max_eval, np.nan)
i = 0
while i < max_eval:
random_params = {k: random.sample(v, 1)[0]
for k, v in param_grid.items()}
dims[i], iters[i], lrs[i], sigmas[i] = list(random_params.values())
som = som_assemble(in_data, seed, int(dims[i]), lr=lrs[i], sigma=sigmas[i])
som.train_random(in_data, int(iters[i]), verbose=False)
u_matrix = som.distance_map().T
watershed_bins = histedges_equalN(u_matrix.flatten())
ws_labels = watershed_level(u_matrix, watershed_bins)
n_map = som.neuron_map(in_data)
_c, _as, _ch = eval_ws(in_data, ws_labels, n_map)
cluster_labels[i], avg_sils[i], ch_scs[i] = _c[0], _as[0], _ch[0]
n_clusters = len(np.unique(cluster_labels[i]))
if(n_clusters < 5 or n_clusters > 30):
logging.info("Random search using dim=%d, iter=%d, lr=%.6f, sigma=%.6f\
result to very small / large number of clusters (n_clusters = %d)\
" % (dims[i], iters[i], lrs[i], sigmas[i], n_clusters))
continue
logging.info("dim=%d, iter=%d, lr=%.6f, sigma=%.6f, sil=%.6f, ch=%.6f" % (dims[i], iters[i], lrs[i], sigmas[i], avg_sils[i], ch_scs[i]))
if(label is not None):
avg_ents[i], avg_purs[i] = mh.ext_eval_entropy(label, cluster_labels[i], init_clus=-1)
logging.info("ent=%.6f, pur=%.6f" % (avg_ents[i], avg_purs[i]))
i += 1
best_idx = []
best_idx.append(np.nanargmax(np.array(avg_sils))) # closest to 1
best_idx.append(np.nanargmax(ch_scs)) # higher = better
if(label is not None):
best_idx.append(np.nanargmin(np.array(avg_ents))) # closest to 0
best_idx.append(np.nanargmax(np.array(avg_purs))) # closest to 1
best_idx = np.unique(best_idx)
if(re_all):
return (cluster_labels, avg_sils,
ch_scs, dims, iters, lrs, sigmas, best_idx)
else:
return (cluster_labels[best_idx], avg_sils[best_idx],
ch_scs[best_idx], dims[best_idx], iters[best_idx],
lrs[best_idx], sigmas[best_idx])
|
[
"random.sample",
"matplotlib.pyplot.cm.rainbow",
"acse_9_irp_wafflescore.MiscHelpers.ext_eval_entropy",
"numpy.shape",
"numpy.argpartition",
"matplotlib.pyplot.figure",
"skimage.measure.label",
"numpy.arange",
"numpy.unique",
"numpy.full",
"matplotlib.pyplot.colorbar",
"random.seed",
"numpy.linspace",
"collections.Counter",
"numpy.log10",
"matplotlib.pyplot.subplots",
"scipy.spatial.distance.cdist",
"matplotlib.pyplot.show",
"skimage.filters.threshold_local",
"numpy.sort",
"numpy.nanargmax",
"acse_9_irp_wafflescore.MiscHelpers.cal_har_sc",
"logging.basicConfig",
"skimage.segmentation.random_walker",
"numpy.zeros",
"acse_9_irp_wafflescore.MiscHelpers.int_eval_silhouette",
"logging.info",
"numpy.where",
"numpy.array",
"minisom.MiniSom",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((468, 582), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s | %(levelname)s : %(message)s"""', 'level': 'logging.INFO', 'stream': 'sys.stdout'}), "(format='%(asctime)s | %(levelname)s : %(message)s',\n level=logging.INFO, stream=sys.stdout)\n", (487, 582), False, 'import logging\n'), ((1828, 1946), 'minisom.MiniSom', 'MiniSom', (['dim', 'dim', 'num_features'], {'sigma': 'sigma', 'learning_rate': 'lr', 'neighborhood_function': '"""gaussian"""', 'random_seed': 'seed'}), "(dim, dim, num_features, sigma=sigma, learning_rate=lr,\n neighborhood_function='gaussian', random_seed=seed)\n", (1835, 1946), False, 'from minisom import MiniSom, asymptotic_decay\n'), ((2546, 2572), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (2556, 2572), True, 'import matplotlib.pyplot as plt\n'), ((2724, 2738), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2736, 2738), True, 'import matplotlib.pyplot as plt\n'), ((3212, 3222), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3220, 3222), True, 'import matplotlib.pyplot as plt\n'), ((4583, 4618), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 5)'}), '(1, 2, figsize=(12, 5))\n', (4595, 4618), True, 'import matplotlib.pyplot as plt\n'), ((4983, 5002), 'numpy.array', 'np.array', (['som_class'], {}), '(som_class)\n', (4991, 5002), True, 'import numpy as np\n'), ((5437, 5452), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (5445, 5452), True, 'import numpy as np\n'), ((5465, 5485), 'numpy.where', 'np.where', (['(value != 0)'], {}), '(value != 0)\n', (5473, 5485), True, 'import numpy as np\n'), ((6251, 6266), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (6259, 6266), True, 'import numpy as np\n'), ((6279, 6299), 'numpy.where', 'np.where', (['(value != 0)'], {}), '(value != 0)\n', (6287, 6299), True, 'import numpy as np\n'), ((7918, 7978), 'numpy.zeros', 'np.zeros', (['(num_bins * ncols, image.shape[0], image.shape[1])'], {}), '((num_bins * ncols, image.shape[0], image.shape[1]))\n', (7926, 7978), True, 'import numpy as np\n'), ((10748, 10778), 'numpy.full', 'np.full', (['len_watershed', 'np.nan'], {}), '(len_watershed, np.nan)\n', (10755, 10778), True, 'import numpy as np\n'), ((10792, 10822), 'numpy.full', 'np.full', (['len_watershed', 'np.nan'], {}), '(len_watershed, np.nan)\n', (10799, 10822), True, 'import numpy as np\n'), ((12026, 12045), 'numpy.unique', 'np.unique', (['best_idx'], {}), '(best_idx)\n', (12035, 12045), True, 'import numpy as np\n'), ((14588, 14605), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (14599, 14605), False, 'import random\n'), ((14671, 14689), 'numpy.zeros', 'np.zeros', (['max_eval'], {}), '(max_eval)\n', (14679, 14689), True, 'import numpy as np\n'), ((14702, 14720), 'numpy.zeros', 'np.zeros', (['max_eval'], {}), '(max_eval)\n', (14710, 14720), True, 'import numpy as np\n'), ((14731, 14749), 'numpy.zeros', 'np.zeros', (['max_eval'], {}), '(max_eval)\n', (14739, 14749), True, 'import numpy as np\n'), ((14763, 14781), 'numpy.zeros', 'np.zeros', (['max_eval'], {}), '(max_eval)\n', (14771, 14781), True, 'import numpy as np\n'), ((14798, 14823), 'numpy.full', 'np.full', (['max_eval', 'np.nan'], {}), '(max_eval, np.nan)\n', (14805, 14823), True, 'import numpy as np\n'), ((14837, 14862), 'numpy.full', 'np.full', (['max_eval', 'np.nan'], {}), '(max_eval, np.nan)\n', (14844, 14862), True, 'import numpy as np\n'), ((16878, 16897), 'numpy.unique', 'np.unique', (['best_idx'], {}), '(best_idx)\n', (16887, 16897), True, 'import numpy as np\n'), ((1052, 1071), 'numpy.sqrt', 'np.sqrt', (['num_sample'], {}), '(num_sample)\n', (1059, 1071), True, 'import numpy as np\n'), ((1089, 1101), 'numpy.sqrt', 'np.sqrt', (['dim'], {}), '(dim)\n', (1096, 1101), True, 'import numpy as np\n'), ((1797, 1814), 'numpy.shape', 'np.shape', (['in_data'], {}), '(in_data)\n', (1805, 1814), True, 'import numpy as np\n'), ((3144, 3165), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_dir'], {}), '(save_dir)\n', (3155, 3165), True, 'import matplotlib.pyplot as plt\n'), ((4154, 4187), 'numpy.linspace', 'np.linspace', (['(0)', 'ttl_dtp', '(nbin + 1)'], {}), '(0, ttl_dtp, nbin + 1)\n', (4165, 4187), True, 'import numpy as np\n'), ((4210, 4228), 'numpy.arange', 'np.arange', (['ttl_dtp'], {}), '(ttl_dtp)\n', (4219, 4228), True, 'import numpy as np\n'), ((4251, 4267), 'numpy.sort', 'np.sort', (['in_data'], {}), '(in_data)\n', (4258, 4267), True, 'import numpy as np\n'), ((5497, 5511), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (5505, 5511), True, 'import numpy as np\n'), ((6334, 6470), 'logging.info', 'logging.info', (['"""Not enough labeled neighbor to perform KNN.\n Will return the original inputted value."""'], {}), '(\n """Not enough labeled neighbor to perform KNN.\n Will return the original inputted value."""\n )\n', (6346, 6470), False, 'import logging\n'), ((6492, 6506), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (6500, 6506), True, 'import numpy as np\n'), ((6595, 6620), 'scipy.spatial.distance.cdist', 'distance.cdist', (['[b]', 'vals'], {}), '([b], vals)\n', (6609, 6620), False, 'from scipy.spatial import distance\n'), ((6637, 6661), 'numpy.argpartition', 'np.argpartition', (['dist', 'k'], {}), '(dist, k)\n', (6652, 6661), True, 'import numpy as np\n'), ((6787, 6796), 'collections.Counter', 'Counter', ([], {}), '()\n', (6794, 6796), False, 'from collections import Counter\n'), ((7715, 7814), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': 'ncols', 'nrows': 'num_bins', 'figsize': '(12, num_bins * 3)', 'sharex': '(True)', 'sharey': '(True)'}), '(ncols=ncols, nrows=num_bins, figsize=(12, num_bins * 3),\n sharex=True, sharey=True)\n', (7727, 7814), True, 'import matplotlib.pyplot as plt\n'), ((8024, 8076), 'skimage.filters.threshold_local', 'filters.threshold_local', (['image'], {'block_size': '(3 + 2 * i)'}), '(image, block_size=3 + 2 * i)\n', (8047, 8076), False, 'from skimage import filters\n'), ((8128, 8172), 'skimage.measure.label', 'measure.label', (['block_mask'], {'connectivity': 'conn'}), '(block_mask, connectivity=conn)\n', (8141, 8172), False, 'from skimage import measure\n'), ((8308, 8337), 'skimage.segmentation.random_walker', 'random_walker', (['image', 'markers'], {}), '(image, markers)\n', (8321, 8337), False, 'from skimage.segmentation import random_walker\n'), ((9115, 9159), 'skimage.measure.label', 'measure.label', (['thres_mask'], {'connectivity': 'conn'}), '(thres_mask, connectivity=conn)\n', (9128, 9159), False, 'from skimage import measure\n'), ((9299, 9328), 'skimage.segmentation.random_walker', 'random_walker', (['image', 'markers'], {}), '(image, markers)\n', (9312, 9328), False, 'from skimage.segmentation import random_walker\n'), ((10870, 10900), 'numpy.full', 'np.full', (['len_watershed', 'np.nan'], {}), '(len_watershed, np.nan)\n', (10877, 10900), True, 'import numpy as np\n'), ((10920, 10950), 'numpy.full', 'np.full', (['len_watershed', 'np.nan'], {}), '(len_watershed, np.nan)\n', (10927, 10950), True, 'import numpy as np\n'), ((11776, 11796), 'numpy.nanargmax', 'np.nanargmax', (['ch_scs'], {}), '(ch_scs)\n', (11788, 11796), True, 'import numpy as np\n'), ((14966, 14991), 'numpy.full', 'np.full', (['max_eval', 'np.nan'], {}), '(max_eval, np.nan)\n', (14973, 14991), True, 'import numpy as np\n'), ((15011, 15036), 'numpy.full', 'np.full', (['max_eval', 'np.nan'], {}), '(max_eval, np.nan)\n', (15018, 15036), True, 'import numpy as np\n'), ((16129, 16270), 'logging.info', 'logging.info', (["('dim=%d, iter=%d, lr=%.6f, sigma=%.6f, sil=%.6f, ch=%.6f' % (dims[i],\n iters[i], lrs[i], sigmas[i], avg_sils[i], ch_scs[i]))"], {}), "('dim=%d, iter=%d, lr=%.6f, sigma=%.6f, sil=%.6f, ch=%.6f' % (\n dims[i], iters[i], lrs[i], sigmas[i], avg_sils[i], ch_scs[i]))\n", (16141, 16270), False, 'import logging\n'), ((16628, 16648), 'numpy.nanargmax', 'np.nanargmax', (['ch_scs'], {}), '(ch_scs)\n', (16640, 16648), True, 'import numpy as np\n'), ((5397, 5417), 'numpy.where', 'np.where', (['(value == 0)'], {}), '(value == 0)\n', (5405, 5417), True, 'import numpy as np\n'), ((6202, 6231), 'numpy.where', 'np.where', (['(value == border_val)'], {}), '(value == border_val)\n', (6210, 6231), True, 'import numpy as np\n'), ((11161, 11252), 'acse_9_irp_wafflescore.MiscHelpers.int_eval_silhouette', 'mh.int_eval_silhouette', (['in_data', 'cluster_labels[i]'], {'method': '"""som_watershed"""', 'param': 'param'}), "(in_data, cluster_labels[i], method='som_watershed',\n param=param)\n", (11183, 11252), True, 'from acse_9_irp_wafflescore import MiscHelpers as mh\n'), ((11714, 11732), 'numpy.array', 'np.array', (['avg_sils'], {}), '(avg_sils)\n', (11722, 11732), True, 'import numpy as np\n'), ((13733, 13778), 'numpy.linspace', 'np.linspace', (['(g_sigma - 1)', '(g_sigma + 1)'], {'num': '(30)'}), '(g_sigma - 1, g_sigma + 1, num=30)\n', (13744, 13778), True, 'import numpy as np\n'), ((15773, 15801), 'numpy.unique', 'np.unique', (['cluster_labels[i]'], {}), '(cluster_labels[i])\n', (15782, 15801), True, 'import numpy as np\n'), ((15862, 16096), 'logging.info', 'logging.info', (["('Random search using dim=%d, iter=%d, lr=%.6f, sigma=%.6f result to very small / large number of clusters (n_clusters = %d) '\n % (dims[i], iters[i], lrs[i], sigmas[i], n_clusters))"], {}), "(\n 'Random search using dim=%d, iter=%d, lr=%.6f, sigma=%.6f result to very small / large number of clusters (n_clusters = %d) '\n % (dims[i], iters[i], lrs[i], sigmas[i], n_clusters))\n", (15874, 16096), False, 'import logging\n'), ((16345, 16404), 'acse_9_irp_wafflescore.MiscHelpers.ext_eval_entropy', 'mh.ext_eval_entropy', (['label', 'cluster_labels[i]'], {'init_clus': '(-1)'}), '(label, cluster_labels[i], init_clus=-1)\n', (16364, 16404), True, 'from acse_9_irp_wafflescore import MiscHelpers as mh\n'), ((16418, 16481), 'logging.info', 'logging.info', (["('ent=%.6f, pur=%.6f' % (avg_ents[i], avg_purs[i]))"], {}), "('ent=%.6f, pur=%.6f' % (avg_ents[i], avg_purs[i]))\n", (16430, 16481), False, 'import logging\n'), ((16566, 16584), 'numpy.array', 'np.array', (['avg_sils'], {}), '(avg_sils)\n', (16574, 16584), True, 'import numpy as np\n'), ((2960, 2984), 'matplotlib.pyplot.cm.rainbow', 'plt.cm.rainbow', (['(t / 10.0)'], {}), '(t / 10.0)\n', (2974, 2984), True, 'import matplotlib.pyplot as plt\n'), ((5595, 5620), 'scipy.spatial.distance.cdist', 'distance.cdist', (['[b]', 'vals'], {}), '([b], vals)\n', (5609, 5620), False, 'from scipy.spatial import distance\n'), ((11039, 11062), 'numpy.unique', 'np.unique', (['ws_labels[i]'], {}), '(ws_labels[i])\n', (11048, 11062), True, 'import numpy as np\n'), ((11386, 11427), 'acse_9_irp_wafflescore.MiscHelpers.cal_har_sc', 'mh.cal_har_sc', (['in_data', 'cluster_labels[i]'], {}), '(in_data, cluster_labels[i])\n', (11399, 11427), True, 'from acse_9_irp_wafflescore import MiscHelpers as mh\n'), ((11557, 11602), 'acse_9_irp_wafflescore.MiscHelpers.ext_eval_entropy', 'mh.ext_eval_entropy', (['label', 'cluster_labels[i]'], {}), '(label, cluster_labels[i])\n', (11576, 11602), True, 'from acse_9_irp_wafflescore import MiscHelpers as mh\n'), ((11898, 11916), 'numpy.array', 'np.array', (['avg_ents'], {}), '(avg_ents)\n', (11906, 11916), True, 'import numpy as np\n'), ((11973, 11991), 'numpy.array', 'np.array', (['avg_purs'], {}), '(avg_purs)\n', (11981, 11991), True, 'import numpy as np\n'), ((13617, 13631), 'numpy.log10', 'np.log10', (['(0.25)'], {}), '(0.25)\n', (13625, 13631), True, 'import numpy as np\n'), ((13633, 13647), 'numpy.log10', 'np.log10', (['(0.75)'], {}), '(0.75)\n', (13641, 13647), True, 'import numpy as np\n'), ((15104, 15123), 'random.sample', 'random.sample', (['v', '(1)'], {}), '(v, 1)\n', (15117, 15123), False, 'import random\n'), ((16750, 16768), 'numpy.array', 'np.array', (['avg_ents'], {}), '(avg_ents)\n', (16758, 16768), True, 'import numpy as np\n'), ((16825, 16843), 'numpy.array', 'np.array', (['avg_purs'], {}), '(avg_purs)\n', (16833, 16843), True, 'import numpy as np\n'), ((8544, 8579), 'numpy.unique', 'np.unique', (['ws_labels[i * ncols + 0]'], {}), '(ws_labels[i * ncols + 0])\n', (8553, 8579), True, 'import numpy as np\n'), ((8782, 8817), 'numpy.unique', 'np.unique', (['ws_labels[i * ncols + 1]'], {}), '(ws_labels[i * ncols + 1])\n', (8791, 8817), True, 'import numpy as np\n'), ((9019, 9054), 'numpy.unique', 'np.unique', (['ws_labels[i * ncols + 2]'], {}), '(ws_labels[i * ncols + 2])\n', (9028, 9054), True, 'import numpy as np\n'), ((9547, 9582), 'numpy.unique', 'np.unique', (['ws_labels[i * ncols + 3]'], {}), '(ws_labels[i * ncols + 3])\n', (9556, 9582), True, 'import numpy as np\n'), ((9785, 9820), 'numpy.unique', 'np.unique', (['ws_labels[i * ncols + 4]'], {}), '(ws_labels[i * ncols + 4])\n', (9794, 9820), True, 'import numpy as np\n'), ((10022, 10057), 'numpy.unique', 'np.unique', (['ws_labels[i * ncols + 5]'], {}), '(ws_labels[i * ncols + 5])\n', (10031, 10057), True, 'import numpy as np\n')]
|
import json
from urllib.request import urlopen
from urllib.error import URLError
from urllib.parse import urljoin
VERSION_REQUIRED = 3
EXTERNAL_LIST = 'https://pastebin.com/raw/aKjmATab'
# Returns repo index dictionary object, or None in case of failure
def fetch_index(repo_url):
try:
with urlopen(urljoin(repo_url, 'index.json')) as index_req:
index = json.loads(index_req.read().decode('utf-8'))
except URLError:
return None
if 'repo' not in index or not index['repo'] == 'wapkg':
return None
if not index['version'] == VERSION_REQUIRED:
if index['version'] > VERSION_REQUIRED:
print("! Source '" + repo_url + "' requires newer version of wapkg, " +
'consider upgrading your software in order to use this repo.')
return None
return index
def fetch_external_sources():
sources = []
try:
with urlopen(EXTERNAL_LIST) as lst_req:
for src in lst_req.read().decode('utf-8').split('\n'):
src_ = src.strip()
if len(src_) and not src_.startswith('#'):
sources.append(src_)
except URLError:
pass
return sources
# Unwraps the 'switch' content
def select_pkg(pkg, vs):
if not pkg:
return None
if 'switch' in pkg:
if not vs:
return None
switch = pkg['switch']
for v in switch:
if vs in v.split(','):
return switch[v]
if '*' in switch:
return switch['*']
return None
return pkg
# Returns True if package and all it's dependencies can be successfully installed
def trace_pkg_deps(pkgs_bundle, vs, name):
pkg = None
for pkgs in pkgs_bundle:
if name in pkgs:
pkg = pkgs[name]
break
pkg = select_pkg(pkg, vs)
if not pkg:
return False
if 'requirements' in pkg:
for req in pkg['requirements']:
if not trace_pkg_deps(pkgs_bundle, vs, req):
return False
return True
|
[
"urllib.parse.urljoin",
"urllib.request.urlopen"
] |
[((920, 942), 'urllib.request.urlopen', 'urlopen', (['EXTERNAL_LIST'], {}), '(EXTERNAL_LIST)\n', (927, 942), False, 'from urllib.request import urlopen\n'), ((315, 346), 'urllib.parse.urljoin', 'urljoin', (['repo_url', '"""index.json"""'], {}), "(repo_url, 'index.json')\n", (322, 346), False, 'from urllib.parse import urljoin\n')]
|
from flask import Blueprint,request
from app import pa_domain,pa_ip
from .tasks import scan_ip_task
from celery_app.utils.utils import get_current_time,insert_taskid_db
ipscan_blueprint = Blueprint("ipscan", __name__, url_prefix='/ipscan')
#通过传入一个一级域名,对这个域名下的所有ip进行scan
@ipscan_blueprint.route('/scan')
def scan_ip():
domain = request.args.get("domain")
#在数据库搜索该domain的索引
domain_index=pa_domain.find_one({"domain":domain})
if domain_index:
# 声明ip_list
ip_list = []
#获取整个domain所对应的ip
for item in domain_index['subdomain']:
for ip_s in item['ip']:
ip_list.append(ip_s)
#对ip_list去重
ip_list=list(set(ip_list))
#调用scan_ip 任务 传入主域名和对应的ip列表
r=scan_ip_task.delay(domain,ip_list)
# taskid入库
insert_taskid_db({"task_id":r.task_id,"add_time":get_current_time(),"task_type":"ip_scan","ip_list":ip_list,"task_info":"对{0}域名下的{1}等{2}个ip进行端口扫描".format(domain,ip_list[0],len(ip_list))})
return {"code":200,"msg":"添加扫描任务成功"}
return {"code":201,"msg":"未找到该域名所对应ip"}
#获取ip总数
@ipscan_blueprint.route('/getipnum')
def get_ip_num():
return {"ip_num":pa_ip.find({}).count()}
#获取ip列表,index为起始索引 offset为数量
@ipscan_blueprint.route('/getiplist')
def get_ip_list():
result = []
tmp = {}
domain_index = int(request.args.get("index"))
domain_offset = int(request.args.get("offset"))
cursor = pa_ip.find().sort([('_id', -1)]).skip(domain_index).limit(domain_offset)
for document in cursor:
tmp['ip'] = document['ip']
tmp['add_time'] = document['add_time']
tmp['port'] = document['port']
result.append(tmp)
tmp = {}
return {"ip_list": result}
|
[
"celery_app.utils.utils.get_current_time",
"flask.Blueprint",
"app.pa_domain.find_one",
"flask.request.args.get",
"app.pa_ip.find"
] |
[((189, 240), 'flask.Blueprint', 'Blueprint', (['"""ipscan"""', '__name__'], {'url_prefix': '"""/ipscan"""'}), "('ipscan', __name__, url_prefix='/ipscan')\n", (198, 240), False, 'from flask import Blueprint, request\n'), ((333, 359), 'flask.request.args.get', 'request.args.get', (['"""domain"""'], {}), "('domain')\n", (349, 359), False, 'from flask import Blueprint, request\n'), ((399, 437), 'app.pa_domain.find_one', 'pa_domain.find_one', (["{'domain': domain}"], {}), "({'domain': domain})\n", (417, 437), False, 'from app import pa_domain, pa_ip\n'), ((1341, 1366), 'flask.request.args.get', 'request.args.get', (['"""index"""'], {}), "('index')\n", (1357, 1366), False, 'from flask import Blueprint, request\n'), ((1392, 1418), 'flask.request.args.get', 'request.args.get', (['"""offset"""'], {}), "('offset')\n", (1408, 1418), False, 'from flask import Blueprint, request\n'), ((860, 878), 'celery_app.utils.utils.get_current_time', 'get_current_time', ([], {}), '()\n', (876, 878), False, 'from celery_app.utils.utils import get_current_time, insert_taskid_db\n'), ((1177, 1191), 'app.pa_ip.find', 'pa_ip.find', (['{}'], {}), '({})\n', (1187, 1191), False, 'from app import pa_domain, pa_ip\n'), ((1433, 1445), 'app.pa_ip.find', 'pa_ip.find', ([], {}), '()\n', (1443, 1445), False, 'from app import pa_domain, pa_ip\n')]
|
from notebook.utils import url_path_join as ujoin
from notebook.base.handlers import IPythonHandler
import os, json, git, urllib, requests
from git import Repo, GitCommandError
from subprocess import check_output
import subprocess
repo = None
htable = []
config = {
"GIT_USER": "alpaca",
"GIT_PARENT_DIR": os.path.expanduser("~/Desktop/jupyter_versioning"),
"GIT_BRANCH_NAME": "main",
# "GIT_REMOTE_URL" : "alpaca",
# "GIT_REMOTE_UPSTREAM": "alpaca",
# "GITHUB_ACCESS_TOKEN": "<PASSWORD>"
}
# def delete_cell():
# if cell in htable:
# del htable[cell]
# return True
# return False
# def register_cell(cell, content):
# filename = str(config['GIT_PARENT_DIR'] + "/" + os.environ.get('GIT_REPO_NAME') + str(cell) + filename.replace('ipynb', 'txt'))
# subprocess.run(['cat', content, '>', filename])
# print(repo.git.add(filename))
# print(repo.git.commit( a=False, m="\nUpdated {}".format(filename) ))
|
[
"os.path.expanduser"
] |
[((312, 362), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Desktop/jupyter_versioning"""'], {}), "('~/Desktop/jupyter_versioning')\n", (330, 362), False, 'import os, json, git, urllib, requests\n')]
|
from pymongo import MongoClient
from settings import MONGO_URL
client = MongoClient(MONGO_URL)
db = client.rolz_database
|
[
"pymongo.MongoClient"
] |
[((76, 98), 'pymongo.MongoClient', 'MongoClient', (['MONGO_URL'], {}), '(MONGO_URL)\n', (87, 98), False, 'from pymongo import MongoClient\n')]
|
"""
Samples of the various charts. Run this script to generate the reference
samples.
"""
import os
from svg.charts.plot import Plot
from svg.charts import bar
from svg.charts import time_series
from svg.charts import pie
from svg.charts import schedule
from svg.charts import line
def sample_Plot():
g = Plot(
{
'min_x_value': 0,
'min_y_value': 0,
'area_fill': True,
'stagger_x_labels': True,
'stagger_y_labels': True,
'show_x_guidelines': True,
}
)
g.add_data({'data': [[1, 25], [2, 30], [3, 45]], 'title': 'series 1'})
g.add_data({'data': [[1, 30], [2, 31], [3, 40]], 'title': 'series 2'})
g.add_data({'data': [[0.5, 35], [1, 20], [3, 10.5]], 'title': 'series 3'})
return g
def sample_PlotTextLabels():
g = Plot(
{
'draw_lines_between_points': False,
'min_x_value': 0,
'min_y_value': 0,
'show_x_guidelines': True,
}
)
# Processed Apple production 2015
# Any object with a .text attribute will do;
# we like namedtuple().
from collections import namedtuple
Datum = namedtuple("Datum", "x y text")
g.add_data(
{
'data': [
Datum(8.24, 80.85, 'ES'),
Datum(0.17, 6.73, 'IE'),
Datum(0, 0, 'IS'),
],
'title': 'Processed Apple',
}
)
return g
def sample_TimeSeries():
g = time_series.Plot({})
g.timescale_divisions = '4 hours'
g.stagger_x_labels = True
g.x_label_format = '%d-%b %H:%M'
# g.max_y_value = 200
g.add_data(
{
'data': ['2005-12-21T00:00:00', 20, '2005-12-22T00:00:00', 21],
'title': 'series 1',
}
)
return g
def generate_samples():
yield 'Plot', sample_Plot()
yield 'PlotTextLabels', sample_PlotTextLabels()
yield 'TimeSeries', sample_TimeSeries()
yield 'VerticalBar', SampleBar.vertical()
yield 'HorizontalBar', SampleBar.horizontal()
yield 'VerticalBarLarge', SampleBar.vertical_large()
yield 'VerticalBarStackTop', SampleBar.vertical_top()
yield 'Pie', sample_Pie()
yield 'Schedule', sample_Schedule()
yield 'Line', sample_Line()
class SampleBar:
fields = ['Internet', 'TV', 'Newspaper', 'Magazine', 'Radio']
@classmethod
def vertical(cls):
g = bar.VerticalBar(cls.fields)
g.stack = 'side'
g.scale_integers = True
g.width, g.height = 640, 480
g.graph_title = 'Question 7'
g.show_graph_title = True
g.add_data({'data': [-2, 3, 1, 3, 1], 'title': 'Female'})
g.add_data({'data': [0, 2, 1, 5, 4], 'title': 'Male'})
return g
@classmethod
def horizontal(cls):
g = bar.HorizontalBar(cls.fields)
g.stack = 'side'
g.scale_integers = True
g.width, g.height = 640, 480
g.graph_title = 'Question 7'
g.show_graph_title = True
g.add_data({'data': [-2, 3, 1, 3, 1], 'title': 'Female'})
g.add_data({'data': [0, 2, 1, 5, 4], 'title': 'Male'})
return g
@classmethod
def vertical_large(cls):
g = bar.VerticalBar(cls.fields)
options = dict(
scale_integers=True,
stack='side',
width=640,
height=480,
graph_title='Question 8',
show_graph_title=True,
no_css=False,
)
g.__dict__.update(options)
g.add_data(dict(data=[2, 22, 98, 143, 82], title='intermediate'))
g.add_data(dict(data=[2, 26, 106, 193, 105], title='old'))
return g
@classmethod
def vertical_top(cls):
g = bar.VerticalBar(cls.fields, dict(stack='top'))
assert g.stack == 'top'
g.scale_integers = True
g.width, g.height = 640, 480
g.graph_title = 'Question 7'
g.show_graph_title = True
g.add_data({'data': [-2, 3, 1, 3, 1], 'title': 'Female'})
g.add_data({'data': [0, 2, 1, 5, 4], 'title': 'Male'})
return g
def sample_Line():
g = line.Line()
options = dict(
scale_integers=True,
area_fill=True,
width=640,
height=480,
fields=SampleBar.fields,
graph_title='Question 7',
show_graph_title=True,
no_css=False,
)
g.__dict__.update(options)
g.add_data({'data': [-2, 3, 1, 3, 1], 'title': 'Female'})
g.add_data({'data': [0, 2, 1, 5, 4], 'title': 'Male'})
return g
def sample_Pie():
g = pie.Pie({})
options = dict(
width=640,
height=480,
fields=SampleBar.fields,
graph_title='Question 7',
expand_greatest=True,
show_data_labels=True,
)
g.__dict__.update(options)
g.add_data({'data': [-2, 3, 1, 3, 1], 'title': 'Female'})
g.add_data({'data': [0, 2, 1, 5, 4], 'title': 'Male'})
return g
def sample_Schedule():
title = "Billy's Schedule"
data1 = [
"History 107",
"5/19/04",
"6/30/04",
"Algebra 011",
"6/2/04",
"8/11/04",
"Psychology 101",
"6/28/04",
"8/9/04",
"Acting 105",
"7/7/04",
"8/16/04",
]
g = schedule.Schedule(
dict(
width=640,
height=480,
graph_title=title,
show_graph_title=True,
key=False,
scale_x_integers=True,
scale_y_integers=True,
show_data_labels=True,
show_y_guidelines=False,
show_x_guidelines=True,
# show_x_title=True, # not yet implemented
x_title="Time",
show_y_title=False,
rotate_x_labels=True,
rotate_y_labels=False,
x_label_format="%m/%d",
timescale_divisions="1 week",
popup_format="%m/%d/%y",
area_fill=True,
min_y_value=0,
)
)
g.add_data(dict(data=data1, title="Data"))
return g
def save_samples():
root = os.path.dirname(__file__)
for sample_name, sample in generate_samples():
res = sample.burn()
with open(os.path.join(root, sample_name + '.py.svg'), 'w') as f:
f.write(res)
if __name__ == '__main__':
save_samples()
|
[
"svg.charts.plot.Plot",
"svg.charts.pie.Pie",
"svg.charts.bar.VerticalBar",
"svg.charts.bar.HorizontalBar",
"os.path.dirname",
"collections.namedtuple",
"svg.charts.line.Line",
"svg.charts.time_series.Plot",
"os.path.join"
] |
[((313, 461), 'svg.charts.plot.Plot', 'Plot', (["{'min_x_value': 0, 'min_y_value': 0, 'area_fill': True, 'stagger_x_labels':\n True, 'stagger_y_labels': True, 'show_x_guidelines': True}"], {}), "({'min_x_value': 0, 'min_y_value': 0, 'area_fill': True,\n 'stagger_x_labels': True, 'stagger_y_labels': True, 'show_x_guidelines':\n True})\n", (317, 461), False, 'from svg.charts.plot import Plot\n'), ((832, 942), 'svg.charts.plot.Plot', 'Plot', (["{'draw_lines_between_points': False, 'min_x_value': 0, 'min_y_value': 0,\n 'show_x_guidelines': True}"], {}), "({'draw_lines_between_points': False, 'min_x_value': 0, 'min_y_value': \n 0, 'show_x_guidelines': True})\n", (836, 942), False, 'from svg.charts.plot import Plot\n'), ((1179, 1210), 'collections.namedtuple', 'namedtuple', (['"""Datum"""', '"""x y text"""'], {}), "('Datum', 'x y text')\n", (1189, 1210), False, 'from collections import namedtuple\n'), ((1497, 1517), 'svg.charts.time_series.Plot', 'time_series.Plot', (['{}'], {}), '({})\n', (1513, 1517), False, 'from svg.charts import time_series\n'), ((4137, 4148), 'svg.charts.line.Line', 'line.Line', ([], {}), '()\n', (4146, 4148), False, 'from svg.charts import line\n'), ((4580, 4591), 'svg.charts.pie.Pie', 'pie.Pie', (['{}'], {}), '({})\n', (4587, 4591), False, 'from svg.charts import pie\n'), ((6090, 6115), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (6105, 6115), False, 'import os\n'), ((2422, 2449), 'svg.charts.bar.VerticalBar', 'bar.VerticalBar', (['cls.fields'], {}), '(cls.fields)\n', (2437, 2449), False, 'from svg.charts import bar\n'), ((2819, 2848), 'svg.charts.bar.HorizontalBar', 'bar.HorizontalBar', (['cls.fields'], {}), '(cls.fields)\n', (2836, 2848), False, 'from svg.charts import bar\n'), ((3222, 3249), 'svg.charts.bar.VerticalBar', 'bar.VerticalBar', (['cls.fields'], {}), '(cls.fields)\n', (3237, 3249), False, 'from svg.charts import bar\n'), ((6213, 6256), 'os.path.join', 'os.path.join', (['root', "(sample_name + '.py.svg')"], {}), "(root, sample_name + '.py.svg')\n", (6225, 6256), False, 'import os\n')]
|
import data_reader
import time
import tensorflow as tf
def worker(num):
time.sleep(0.5)
print(num)
return num
if __name__=='__main__':
data = list(range(100))
bsize = 10
reader = data_reader.data_reader(data, worker, bsize)
for i in range(10):
a = reader.get_next_batch()
print(a)
|
[
"data_reader.data_reader",
"time.sleep"
] |
[((75, 90), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (85, 90), False, 'import time\n'), ((190, 234), 'data_reader.data_reader', 'data_reader.data_reader', (['data', 'worker', 'bsize'], {}), '(data, worker, bsize)\n', (213, 234), False, 'import data_reader\n')]
|
import mimetypes
from django.core.files.base import ContentFile
from django.core.files.storage import Storage
from django.utils.deconstruct import deconstructible
from django.conf import settings
from azure.storage.blob.models import ContentSettings
from azure.storage.blob.baseblobservice import BaseBlobService
from azure.storage.blob.blockblobservice import BlockBlobService
from azure.common import AzureMissingResourceHttpError
@deconstructible
class AzureStorage(Storage):
def __init__(self, azure_container=settings.AZURE_STATICFILES_CONTAINER, *args, **kwargs):
super(AzureStorage, self).__init__(*args, **kwargs)
self.account_name = settings.AZURE_STORAGE_ACCOUNT_NAME
self.account_key = settings.AZURE_STORAGE_ACCOUNT_KEY
self.azure_container = azure_container
self.azure_ssl = settings.AZURE_STATICFILES_SSL
self._base_blob_service = None
self._block_blob_service = None
@property
def base_blob_service(self):
if self._base_blob_service is None:
self._base_blob_service = BaseBlobService(
self.account_name, self.account_key)
return self._base_blob_service
@property
def block_blob_service(self):
if self._block_blob_service is None:
self._block_blob_service = BlockBlobService(
self.account_name, self.account_key)
return self._block_blob_service
@property
def azure_protocol(self):
if self.azure_ssl:
return 'https'
return 'http' if self.azure_ssl is not None else None
def _open(self, name, mode="rb"):
blob = self.base_blob_service.get_blob_to_bytes(self.azure_container, name)
return ContentFile(blob.content)
def exists(self, name):
return self.base_blob_service.exists(self.azure_container, name)
def delete(self, name):
try:
self.base_blob_service.delete_blob(self.azure_container, name)
except AzureMissingResourceHttpError: # pragma: no cover
pass
def size(self, name):
blob = self.base_blob_service.get_blob_properties(self.azure_container, name)
return blob.properties.content_length
def _save(self, name, content):
if hasattr(content.file, 'content_type'):
content_type = content.file.content_type
else:
content_type = mimetypes.guess_type(name)[0]
if hasattr(content, 'chunks'):
content_data = b''.join(chunk for chunk in content.chunks())
else:
content_data = content.read()
self.block_blob_service.create_blob_from_bytes(
self.azure_container, name,
content_data,
content_settings=ContentSettings(content_type=content_type))
return name
def url(self, name):
return self.base_blob_service.make_blob_url(
container_name=self.azure_container,
blob_name=name,
protocol=self.azure_protocol,
)
def get_modified_time(self, name):
blob = self.base_blob_service.get_blob_properties(
self.azure_container,
name
)
return blob.properties.last_modified
|
[
"azure.storage.blob.blockblobservice.BlockBlobService",
"azure.storage.blob.models.ContentSettings",
"django.core.files.base.ContentFile",
"azure.storage.blob.baseblobservice.BaseBlobService",
"mimetypes.guess_type"
] |
[((1728, 1753), 'django.core.files.base.ContentFile', 'ContentFile', (['blob.content'], {}), '(blob.content)\n', (1739, 1753), False, 'from django.core.files.base import ContentFile\n'), ((1076, 1128), 'azure.storage.blob.baseblobservice.BaseBlobService', 'BaseBlobService', (['self.account_name', 'self.account_key'], {}), '(self.account_name, self.account_key)\n', (1091, 1128), False, 'from azure.storage.blob.baseblobservice import BaseBlobService\n'), ((1318, 1371), 'azure.storage.blob.blockblobservice.BlockBlobService', 'BlockBlobService', (['self.account_name', 'self.account_key'], {}), '(self.account_name, self.account_key)\n', (1334, 1371), False, 'from azure.storage.blob.blockblobservice import BlockBlobService\n'), ((2396, 2422), 'mimetypes.guess_type', 'mimetypes.guess_type', (['name'], {}), '(name)\n', (2416, 2422), False, 'import mimetypes\n'), ((2747, 2789), 'azure.storage.blob.models.ContentSettings', 'ContentSettings', ([], {'content_type': 'content_type'}), '(content_type=content_type)\n', (2762, 2789), False, 'from azure.storage.blob.models import ContentSettings\n')]
|
#--------------------------------------------------------------
# By <NAME>
# Painted Harmony Group, Inc
# June 26, 2017
# Please See LICENSE.txt
#--------------------------------------------------------------
import unittest
import SentimentAnalyzer as analyzer
class SentimentAnalyzerTest(unittest.TestCase):
def test_analyze_sentiment(self):
sa = analyzer.SentimentAnalyzer()
self.assertTrue(sa.analyze_sentiment("This is a happy tweet. Have a nice day.")=="pos")
self.assertTrue(sa.analyze_sentiment("I am angry. He is very disonest. Sad.")=="neg")
|
[
"SentimentAnalyzer.SentimentAnalyzer"
] |
[((371, 399), 'SentimentAnalyzer.SentimentAnalyzer', 'analyzer.SentimentAnalyzer', ([], {}), '()\n', (397, 399), True, 'import SentimentAnalyzer as analyzer\n')]
|
import numpy as np
class DecisionTreeClassifierTranspiler(object):
def __init__(self, model):
self.model = model
self.build_classes()
self.build_feature_idx()
self.build_right_nodes()
self.build_thresholds()
def build_feature_idx(self):
self.features_idx = ','.join(self.model.tree_.feature.astype(str))
def build_classes(self):
class_aux = list(map(lambda x : x[0], self.model.tree_.value))
self.classes = np.argmax(class_aux, axis = 1)
self.classes = ','.join(self.classes.astype(str))
def build_right_nodes(self):
self.right_nodes = ','.join(self.model.tree_.children_right.astype(str)).replace('-1', '0')
def build_thresholds(self):
self.thresholds = ','.join(self.model.tree_.threshold.astype(str))
def generate_code(self):
return """
/*
The following code was generated using Clara.Transpiler. For more information please visit: https://github.com/asergiobranco/clara
*/
#define NO_NODES %s
unsigned char classes[NO_NODES] = {%s};
int FEATURE_IDX_NODE[NO_NODES] = {%s};
int RIGHT_CHILDS[NO_NODES] = {%s};
float THRESHOLDS[NO_NODES] = {%s};
int predict(double * sample){
unsigned int current_node = 0;
int feature_idx = FEATURE_IDX_NODE[0];
while(feature_idx >= 0){
if(sample[feature_idx] <= THRESHOLDS[current_node]){
current_node++;
}
else{
current_node = RIGHT_CHILDS[current_node];
}
feature_idx = FEATURE_IDX_NODE[current_node];
}
return classes[current_node];
}
""" % (self.model.tree_.node_count, self.classes, self.features_idx, self.right_nodes, self.thresholds)
|
[
"numpy.argmax"
] |
[((488, 516), 'numpy.argmax', 'np.argmax', (['class_aux'], {'axis': '(1)'}), '(class_aux, axis=1)\n', (497, 516), True, 'import numpy as np\n')]
|
import unittest
import requests
from helpers.fake_http_server import FakeServer
class FakeServerTest(unittest.TestCase):
SERVER = None
@classmethod
def setUpClass(cls):
cls.SERVER = FakeServer()
cls.SERVER.start_server()
cls.SERVER.serve_forever()
def setUp(self):
self.server = FakeServerTest.SERVER
def test_is_server_alive(self):
self.assertTrue(self.server.is_alive())
self.assertTrue(self.server.is_ready_to_process())
def test_server_process_forever(self):
self.assertTrue(self.server.is_ready_to_process())
send_and_check_request(self.server.get_url(), "request1")
self.assertTrue(self.server.is_ready_to_process())
send_and_check_request(self.server.get_url(), "request2")
self.assertTrue(self.server.is_ready_to_process())
def test_server_overlapped_listeners(self):
self.assertTrue(self.server.is_ready_to_process())
self.assertRaises(FakeServer.ServerStateException, self.server.serve_once)
self.assertRaises(FakeServer.ServerStateException, self.server.serve_forever)
def test_server_start_overlapped_instances(self):
self.assertRaises(FakeServer.ServerStateException, self.server.start_server)
def test_timeout_triggers_only_once_per_call(self):
timeout = 0.3
self.server.set_timeout_delay(timeout)
with self.assertRaises(requests.exceptions.ReadTimeout):
requests.get(self.server.get_url(), timeout=timeout)
requests.get(self.server.get_url(), timeout=timeout)
def test_server_stop_multiple_times(self):
self.server.stop_server()
self.assertRaises(FakeServer.ServerStateException, self.server.stop_server)
self.server.start_server()
self.server.serve_forever()
def test_set_custom_response(self):
expected_response = "Expected Response"
expected_response_code = 404
self.server.set_expected_response(expected_response, expected_response_code)
response = requests.get(self.server.get_url() + "request")
self.assertEquals(expected_response, response.text)
self.assertEquals(expected_response_code, response.status_code)
@classmethod
def tearDownClass(cls):
try:
cls.SERVER.stop_server()
except:
pass
def send_and_check_request(url, request):
url = url + request
response = requests.get(url)
received_request = open(FakeServer.REQUEST_FILE).read()
assert request in received_request[1:] # skip first character which always is '/'
assert response.status_code == FakeServer.DEFAULT_RESPONSE_CODE
assert response.text == FakeServer.DEFAULT_RESPONSE
|
[
"helpers.fake_http_server.FakeServer",
"requests.get"
] |
[((2488, 2505), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2500, 2505), False, 'import requests\n'), ((206, 218), 'helpers.fake_http_server.FakeServer', 'FakeServer', ([], {}), '()\n', (216, 218), False, 'from helpers.fake_http_server import FakeServer\n')]
|
#!/usr/bin/env python
import textgrid
import sys
if len(sys.argv) != 2:
print("textgrid-to-audacity.py [filename]")
quit()
tg = textgrid.TextGrid.fromFile(sys.argv[1])
started = False
start=0.0
end=0.0
text=list()
for i in tg[0]:
if i.mark != '':
if not started:
start = i.minTime
started = True
else:
started = True
end = i.maxTime
text.append(i.mark)
else:
if started:
print('{}\t{}\t{}'.format(start, end, ' '.join(text)))
start = 0.0
end = 0.0
text.clear()
started = False
|
[
"textgrid.TextGrid.fromFile"
] |
[((138, 177), 'textgrid.TextGrid.fromFile', 'textgrid.TextGrid.fromFile', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (164, 177), False, 'import textgrid\n')]
|
import contextlib
import os
import logging
from django.test.runner import DiscoverRunner
from django.conf import settings
from django.db import connections
logger = logging.getLogger(__name__)
class LegacyDiscoverRunner(DiscoverRunner):
"""
See https://docs.djangoproject.com/en/1.7/topics/testing/advanced/#defining-a-test-runner
"""
def setup_databases(self, **kwargs):
"""Though our schema is readonly in shared environments we assume DB control in testing"""
# Super will create an empty test_<db name> automatically
config = super(LegacyDiscoverRunner, self).setup_databases(**kwargs)
# Invoke any custom ddl to create the schema after that.
script_path = os.path.join(settings.MANAGE_ROOT, 'legacy-schema.sql')
logger.info("Initializing DB with script. [Path: {}]".format(script_path))
with open(script_path, 'r') as sql_file:
ddl = sql_file.read()
cursor = connections['legacy'].cursor()
cursor.executescript(ddl)
return config
|
[
"os.path.join",
"logging.getLogger"
] |
[((167, 194), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (184, 194), False, 'import logging\n'), ((724, 779), 'os.path.join', 'os.path.join', (['settings.MANAGE_ROOT', '"""legacy-schema.sql"""'], {}), "(settings.MANAGE_ROOT, 'legacy-schema.sql')\n", (736, 779), False, 'import os\n')]
|
# coding: utf8
import datetime
import logging
import random
import re
import ssl
import subprocess
import threading
import time
from multiprocessing import Process as Thread
import telebot
from aiohttp import web
from telebot import types
import api
import cherrypy
import config
import secret_config
import text
import ujson
import utils
WEBHOOK_HOST = utils.get_my_ip()
WEBHOOK_PORT = 8443 # 443, 80, 88 или 8443 (порт должен быть открыт!)
# На некоторых серверах придется указывать такой же IP, что и выше
WEBHOOK_LISTEN = '0.0.0.0'
WEBHOOK_SSL_CERT = './webhook_cert.pem' # Путь к сертификату
WEBHOOK_SSL_PRIV = './webhook_pkey.pem' # Путь к приватному ключу
WEBHOOK_URL_BASE = "https://%s:%s" % (WEBHOOK_HOST, WEBHOOK_PORT)
WEBHOOK_URL_PATH = "/%s/" % (secret_config.token)
start_time = int(time.time())
bot = telebot.TeleBot(token = secret_config.token)
my_info = bot.get_me()
telebot_logger = logging.getLogger('telebot')
sqlite_info = logging.getLogger('sqlite')
main_info = logging.getLogger('main_info')
report_info = logging.getLogger('reports')
if __name__ == '__main__':
log_name = 'logs.txt'
f = open(log_name,'w')
f.close()
print('Файл логов создан')
telebot_logger = logging.getLogger('telebot')
mysql_info = logging.getLogger('mysql')
main_info = logging.getLogger('main_info')
report_info = logging.getLogger('reports')
print('Список логгеров создан')
logging.basicConfig(
format='%(filename)s [LINE:%(lineno)-3d]# %(levelname)-8s - %(name)-9s [%(asctime)s] - %(message)-50s ',
datefmt='%m/%d/%Y %I:%M:%S %p',
level = logging.INFO
)
app = web.Application()
t = Thread(target = utils.check_deleting_queue)
t.start()
async def handle(request):
if request.match_info.get('token') == bot.token:
request_body_dict = await request.json()
update = telebot.types.Update.de_json(request_body_dict)
bot.process_new_updates([update])
return web.Response()
else:
return web.Response(status=403)
app.router.add_post('/{token}/', handle)
def create_user_language_keyboard():
lang_keyboard = types.InlineKeyboardMarkup()
for i in config.languages:
lang_keyboard.add(types.InlineKeyboardButton(text = i['title'], callback_data = 'lang::{lang_code}'.format(lang_code = i['code'])))
return lang_keyboard
def group_setting(chat_id):
keyboard = types.InlineKeyboardMarkup(row_width=1)
curr_settings = api.get_group_params(chat_id)
btn = types.InlineKeyboardButton(text = 'Принимать рассылки{}'.format(config.settings_statuses[curr_settings['get_notifications']]), callback_data = 'get_notifications::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Удалять ссылки{}'.format(config.settings_statuses[curr_settings['deletions']['url']]), callback_data = 'del_url::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Удалять системные сообщения{}'.format(config.settings_statuses[curr_settings['deletions']['system']]), callback_data = 'del_system::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Исключать ботов{}'.format(config.settings_statuses[curr_settings['kick_bots']]), callback_data='kick_bots::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Фильтры', callback_data='deletions_settings::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Ограничения новых пользователей', callback_data = 'new_users_restrictions::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Настройка предупреждений', callback_data = 'warns_settings::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Настройка приветствий', callback_data = 'welcome_settings::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Получить дамп настроек', callback_data = 'get_settings_json::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Получить топ инвайтеров', callback_data = 'get_chat_refs::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
keyboard.add(types.InlineKeyboardButton(text = 'К списку групп', callback_data = 'to_groups_list'))
return keyboard
def welcome_settings_kb(chat_id):
kb = types.InlineKeyboardMarkup(row_width = 4)
curr_settings = api.get_group_params(chat_id)
btn = types.InlineKeyboardButton(text = 'Отправлять приветствие в чат: {}'.format(config.settings_statuses[curr_settings['greeting']['is_enabled']]), callback_data = 'welcome_state::{chat_id}'.format(chat_id = chat_id))
kb.add(btn)
btn = types.InlineKeyboardButton(text = 'Задержка перед удалением приветствия: {} сек.'.format(curr_settings['greeting']['delete_timer']), callback_data = 'welcome_get::{chat_id}'.format(chat_id = chat_id))
kb.add(btn)
btn1 = types.InlineKeyboardButton(text = '➖10', callback_data = 'welcome_timer_-10::{chat_id}'.format(chat_id = chat_id))
btn2 = types.InlineKeyboardButton(text = '➖5', callback_data = 'welcome_timer_-5::{chat_id}'.format(chat_id = chat_id))
btn3 = types.InlineKeyboardButton(text = '➕5', callback_data = 'welcome_timer_+5::{chat_id}'.format(chat_id = chat_id))
btn4 = types.InlineKeyboardButton(text = '➕10', callback_data = 'welcome_timer_+10::{chat_id}'.format(chat_id = chat_id))
kb.add(btn1, btn2, btn3, btn4)
btn = types.InlineKeyboardButton(text = 'Показать приветствие', callback_data = 'welcome_get::{chat_id}'.format(chat_id = chat_id))
kb.add(btn)
btn = types.InlineKeyboardButton(text = 'Назад', callback_data='to_group_settings_menu::{chat_id}'.format(chat_id = chat_id))
kb.add(btn)
return kb
def new_users_restrictions_kb(chat_id):
keyboard = types.InlineKeyboardMarkup(row_width = 4)
curr_settings = api.get_group_params(chat_id)
btn = types.InlineKeyboardButton(text = 'Автоматический read-only на {} час - {}'.format(curr_settings['restrictions']['for_time'], config.settings_statuses[curr_settings['restrictions']['read_only']]), callback_data = 'read_only::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn1 = types.InlineKeyboardButton(text = '➖2', callback_data = 'time_ro_-2::{chat_id}'.format(chat_id = chat_id))
btn2 = types.InlineKeyboardButton(text = '➖1', callback_data = 'time_ro_-1::{chat_id}'.format(chat_id = chat_id))
btn3 = types.InlineKeyboardButton(text = '➕1', callback_data = 'time_ro_+1::{chat_id}'.format(chat_id = chat_id))
btn4 = types.InlineKeyboardButton(text = '➕2', callback_data = 'time_ro_+2::{chat_id}'.format(chat_id = chat_id))
btn5 = types.InlineKeyboardButton(text = 'Навсегда', callback_data = 'time_ro_+10000::{chat_id}'.format(chat_id = chat_id))
btn6 = types.InlineKeyboardButton(text = 'Сброс', callback_data = 'time_ro_-10000000000::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn1, btn2, btn3, btn4)
keyboard.add(btn5, btn6)
btn = types.InlineKeyboardButton(text = 'Снятие ограничений разрешено для: {}'.format(config.new_users[curr_settings['restrictions']['admins_only']]), callback_data = 'new_restrictions_admins_only_{state}::{chat_id}'.format(state = config.settings_states[curr_settings['restrictions']['admins_only']], chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Назад', callback_data='to_group_settings_menu::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
return keyboard
def warns_settings_kb(chat_id):
keyboard = types.InlineKeyboardMarkup(row_width = 4)
curr_settings = api.get_group_params(chat_id)
btn = types.InlineKeyboardButton(text = 'Максимальное количество исключений: {}'.format(curr_settings['warns']['count']), callback_data = 'empty_callback::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn1 = types.InlineKeyboardButton(text = '➖2', callback_data = 'warns_count_-2::{chat_id}'.format(chat_id = chat_id))
btn2 = types.InlineKeyboardButton(text = '➖1', callback_data = 'warns_count_-1::{chat_id}'.format(chat_id = chat_id))
btn3 = types.InlineKeyboardButton(text = '➕1', callback_data = 'warns_count_+1::{chat_id}'.format(chat_id = chat_id))
btn4 = types.InlineKeyboardButton(text = '➕2', callback_data = 'warns_count_+2::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn1, btn2, btn3, btn4)
btn = types.InlineKeyboardButton(text = 'Действие при максимальном кол-ве варнов: {}'.format(config.warns_states[curr_settings['warns']['action']]), callback_data='empty_callback::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn1 = types.InlineKeyboardButton(text = 'Ничего', callback_data = 'warns_action_0::{chat_id}'.format(chat_id = chat_id))
btn2 = types.InlineKeyboardButton(text = 'Кик', callback_data = 'warns_action_1::{chat_id}'.format(chat_id = chat_id))
btn3 = types.InlineKeyboardButton(text = 'Бан', callback_data = 'warns_action_2::{chat_id}'.format(chat_id = chat_id))
btn4 = types.InlineKeyboardButton(text = 'Read-only на сутки', callback_data = 'warns_action_3::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn1, btn2, btn3, btn4)
btn = types.InlineKeyboardButton(text = 'Назад', callback_data='to_group_settings_menu::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
return keyboard
def remove_warns_kb(user_id):
kb = types.InlineKeyboardMarkup(row_width=1)
btn = types.InlineKeyboardButton(text = 'Удалить предупреждения', callback_data = 'delete_warns::{user_id}'.format(user_id = user_id))
kb.add(btn)
return kb
def unban_new_user_kb(msg):
kb = types.InlineKeyboardMarkup(row_width=1)
btn = types.InlineKeyboardButton(text = 'Разблокировать', callback_data = 'unban_new_user::{chat_id}::{user_id}'.format(user_id = msg.new_chat_member.id, chat_id = msg.chat.id))
kb.add(btn)
return kb
def user_settings_main_menu(msg):
keyboard = types.InlineKeyboardMarkup(row_width=1)
curr_settings = api.get_user_param(msg.chat.id, 'settings')
btn = types.InlineKeyboardButton(text = 'Принимать рассылки{}'.format(config.settings_statuses['get_notifications']), callback_data='get_notifications')
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Выбор языка'.format(config.settings_statuses['get_notifications']), callback_data='open_lang_menu')
keyboard.add(btn)
return keyboard
def delete_settings(chat_id):
keyboard = types.InlineKeyboardMarkup(row_width=1)
curr_settings = api.get_group_params(chat_id)
for cont_type in config.available_attachments:
btn = types.InlineKeyboardButton(text=config.available_attachments_str[cont_type].format(config.settings_statuses[curr_settings['deletions']['files'][cont_type]]), callback_data='delete::{content_type}::{chat_id}'.format(content_type = cont_type, chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Переключить все', callback_data = 'change_all::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Назад', callback_data='to_group_settings_menu::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
return keyboard
def generate_leave_kb(msg):
chat_id = msg.chat.id
keyboard = types.InlineKeyboardMarkup(row_width=1)
btn = types.InlineKeyboardButton(text = 'Да, выйди из чата', callback_data='leave_cancel::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Нет, останься', callback_data='leave_confirm::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
return keyboard
def generate_user_menu_kb(user_id):
kb = types.InlineKeyboardMarkup(row_width = 1)
btn1 = types.InlineKeyboardButton(text = 'Мои чаты', callback_data = 'my_chats')
btn2 = types.InlineKeyboardButton(text = 'Изменить язык', callback_data = 'change_lang')
kb.add(btn1, btn2)
if utils.check_super_user(user_id):
kb.add(types.InlineKeyboardButton(text = 'Админка бота', callback_data = 'admin_menu'))
return kb
def generate_admin_menu_kb():
kb = types.InlineKeyboardMarkup(row_width = 2)
btn1 = types.InlineKeyboardButton(text = 'Рассылка', callback_data = 'broadcast_menu')
btn2 = types.InlineKeyboardButton(text = 'Статистика', callback_data = 'stats_menu')
kb.add(btn1, btn2)
kb.add(types.InlineKeyboardButton(text = 'В главное меню', callback_data = 'to_main_menu'))
return kb
def generate_broadcast_vars_menu_kb():
kb = types.InlineKeyboardMarkup(row_width = 1)
btn1 = types.InlineKeyboardButton(text = 'Рассылка-проверка', callback_data = 'check_broadcast')
btn2 = types.InlineKeyboardButton(text = 'Рассылка сообщения', callback_data = 'broadcast_settings')
kb.add(btn1, btn2)
kb.add(types.InlineKeyboardButton(text = 'В главное меню', callback_data = 'to_main_menu'))
return kb
def generate_broadcast_settings_menu_kb():
kb = types.InlineKeyboardMarkup(row_width = 2)
btn1 = types.InlineKeyboardButton(text = 'Ввести сообщение', callback_data = 'broadcast_message::input')
btn2 = types.InlineKeyboardButton(text = 'Просмотреть сообщение', callback_data = 'broadcast_message::show')
btn3 = types.InlineKeyboardButton(text = 'Начать рассылку', callback_data = 'broadcast_message::start')
kb.add(btn1, btn2, btn3)
return kb
def generate_broadcast_check_menu_kb():
kb = types.InlineKeyboardMarkup(row_width = 3)
curr_settings = ujson.loads(api.get_bot_settings(secret_config.token))
s = {
'users': 'пользователи',
'chats': 'диалоги',
'all': 'все'
}
btn1 = types.InlineKeyboardButton(text = 'Только диалоги', callback_data = 'broadcast_check::users')
btn2 = types.InlineKeyboardButton(text = 'Только чаты', callback_data = 'broadcast_check::chats')
btn3 = types.InlineKeyboardButton(text = 'Все', callback_data = 'broadcast_check::all')
btn4 = types.InlineKeyboardButton(text = 'Сейчас: {}'.format(s[curr_settings['broadcast']['check']['receivers']]), callback_data = 'empty_callback')
btn5 = types.InlineKeyboardButton(text = 'Начать рассылку', callback_data = 'broadcast_check::start')
kb.add(btn1, btn2, btn3)
kb.add(btn4, btn5)
return kb
def generate_user_groups(user_id):
kb = types.InlineKeyboardMarkup(row_width=2)
user_settings = ujson.loads(api.get_user_param(user_id, 'settings'))
btns = []
for i in user_settings['admined_groups']:
btn = types.InlineKeyboardButton(text = i['title'], callback_data = 'settings::{chat_id}'.format(chat_id = i['chat_id']))
btns.append(btn)
kb.add(*btns)
kb.add(types.InlineKeyboardButton(text = 'В главное меню', callback_data = 'to_main_menu'))
return kb
@bot.channel_post_handler(content_types=['text'], func = lambda msg: msg.chat.id == secret_config.channel_ID)
def bot_broadcast(msg):
r = bot.forward_message(secret_config.official_chat, msg.chat.id, msg.message_id)
bot.pin_chat_message(
r.chat.id,
r.message_id
)
@bot.message_handler(commands =['setlog'], func = lambda msg:
msg.chat.type in ['group', 'supergroup'] and
msg.forward_from_chat is not None and
utils.check_status(msg.from_user.id, msg.chat.id) and
not utils.check_log(msg.chat.id)
)
def bot_set_log(msg):
user_id = msg.from_user.id
try:
admins = bot.get_chat_administrators(msg.forward_from_chat.id)
status1 = False
status2 = False
for i in admins:
if i.user.id == user_id:
if i.status == 'creator':
status1 = True
if i.user.id == my_info.id:
status2 = True
if status1 is True and status2 is True:
utils.set_log_channel(msg.chat.id, msg.forward_from_chat.id)
elif status1 is not True:
bot.send_message(
msg.chat.id,
text = text.group_commands[utils.get_group_lang(chat_id)]['log_channel']['confirmation']['errors']['user_is_not_creator']
)
elif status2 is not True:
bot.send_message(
msg.chat.id,
text = text.group_commands[utils.get_group_lang(chat_id)]['log_channel']['confirmation']['errors']['bot_is_not_admin']
)
except Exception as e:
print(e)
@bot.message_handler(commands = ['dellog'], func = lambda msg:
msg.chat.type in ['group', 'supergroup'] and
msg.forward_from_chat is not None and
utils.check_status(msg.from_user.id, msg.chat.id) and
msg.forward_from_chat.id == utils.get_log_id(msg.chat.id) and
utils.check_log(msg.chat.id)
)
def bot_del_log(msg):
print(1)
user_id = msg.from_user.id
try:
admins = bot.get_chat_administrators(msg.forward_from_chat.id)
status1 = False
status2 = False
for i in admins:
if i.user.id == user_id:
if i.status == 'creator':
status1 = True
if i.user.id == my_info.id:
status2 = True
if status1 is True and status2 is True:
utils.remove_log_channel(msg.chat.id)
elif status1 is not True:
bot.send_message(
msg.chat.id,
text = text.group_commands[utils.get_group_lang(chat_id)]['log_channel']['confirmation']['errors']['user_is_not_creator']
)
elif status2 is not True:
bot.send_message(
msg.chat.id,
text = text.group_commands[utils.get_group_lang(chat_id)]['log_channel']['confirmation']['errors']['bot_is_not_admin']
)
except Exception as e:
print(e)
@bot.message_handler(commands = ['infolog'], func = lambda msg: msg.chat.type in ['group', 'supergroup'])
def bot_info_log(msg):
if utils.check_log(msg.chat.id):
m = text.group_commands[utils.get_group_lang(msg.chat.id)]['log_channel']['info']['is_on'].format(
chat_id = utils.get_log_id(msg.chat.id),
chat_name = bot.get_chat(utils.get_log_id(msg.chat.id)).title
)
else:
m = text.group_commands[utils.get_group_lang(msg.chat.id)]['log_channel']['info']['is_off']
bot.send_message(
msg.chat.id,
m,
parse_mode = 'HTML'
)
@bot.message_handler(commands = ['leave'], func = lambda msg: msg.chat.type != 'private' and utils.check_status(msg.from_user.id, msg.chat.id))
def bot_leave(msg):
bot.send_message(
msg.chat.id,
text.group_commands[utils.get_group_lang(msg.chat.id)]['leave']['question'],
reply_markup = generate_leave_kb(msg),
parse_mode = 'HTML'
)
@bot.message_handler(commands = ['rmkb'], func = lambda msg: msg.chat.type in ['group', 'supergroup'])
def bot_remove_kb(msg):
kb = types.ReplyKeyboardMarkup(one_time_keyboard=True)
kb.add(types.KeyboardButton(text='/rmkb'))
r = bot.send_message(
msg.chat.id,
text = text.group_commands[utils.get_group_lang(msg.chat.id)]['remove_keyboard'],
reply_markup = kb
)
bot.delete_message(
msg.chat.id,
r.message_id
)
bot.delete_message(
msg.chat.id,
msg.message_id
)
@bot.message_handler(commands = ['settings'], func = lambda msg: msg.chat.type == 'supergroup')
def bot_answ(msg):
start_time = time.time()
message = msg
kb = types.InlineKeyboardMarkup()
r = bot.reply_to(
msg,
'Настройки отправлены вам в личные сообщения',
)
kb.add(types.InlineKeyboardButton(text = 'Удалить', callback_data = 'settings_delete {} {}'.format(msg.message_id, r.message_id)))
bot.edit_message_reply_markup(
chat_id = msg.chat.id,
message_id = r.message_id,
reply_markup = kb
)
bot.send_message(
msg.from_user.id,
'<b>Настройки группы {}</b>'.format(msg.chat.title),
reply_markup=group_setting(msg.chat.id),
parse_mode='HTML'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['start'], func=lambda msg: msg.chat.type == 'private')
def bot_user_start(msg):
message = msg
start_time = time.time()
if utils.is_user_new(msg):
if utils.have_args(msg):
referrer = utils.parse_arg(msg)[1]
bot.send_message(
msg.chat.id,
text.user_messages['start'],
reply_markup=generate_user_menu_kb(msg.from_user.id)
)
api.register_new_user(msg.from_user, 'ru')
else:
bot.send_message(
msg.chat.id,
text.user_messages[utils.get_user_lang(msg)]['start'],
reply_markup=generate_user_menu_kb(msg.from_user.id)
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['start'], func=lambda msg: msg.chat.type != 'private')
def bot_group_start(msg):
start_time = time.time()
api.register_new_chat(msg.chat)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['get_logs'], func = lambda msg: msg.chat.id == -1001236256304 and utils.check_super_user(msg.from_user.id))
def bot_logs(msg):
bot.send_document(msg.chat.id, open('logs.txt', 'rb'))
@bot.message_handler(commands = ['menu'])
def bot_user_menu(msg):
bot.send_message(
msg.from_user.id,
'Ваше меню',
reply_markup = generate_user_menu_kb(msg.from_user.id)
)
@bot.message_handler(commands=['set_text'], func = lambda msg: msg.chat.type != 'private')
def bot_set_text(msg):
start_time = time.time()
message = msg
if len(msg.text) not in [9, 21]:
new_greeting = msg.text[len(msg.text):msg.entities[0].length:-1][::-1]
if utils.check_text(new_greeting):
utils.set_greeting(msg, new_greeting)
bot.send_message(
msg.chat.id,
'Приветствие изменено'
)
else:
bot.send_message(
msg.chat.id,
text = 'Данное приветствие не работает'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['kick'], func=lambda msg: msg.chat.type != 'private')
def bot_kick(msg):
start_time = time.time()
utils.kick_user(msg)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['ban', 'ban_me_please'], func = lambda msg: msg.chat.type == 'supergroup')
def bot_ban_me_please(msg):
start_time = time.time()
if msg.text == '/ban_me_please':
t = random.randint(1, 10)
ban_time = 60*t
try:
if not utils.check_status(msg.from_user.id, msg.chat.id):
bot.restrict_chat_member(
msg.chat.id,
msg.from_user.id,
until_date=str(time.time() + ban_time))
bot.reply_to(
msg,
text.group_commands[utils.get_group_lang(msg.chat.id)]['ban_me_please'].format(
t = t
),
parse_mode = 'HTML'
)
else:
bot.reply_to(
msg,
text.group_commands[utils.get_group_lang(msg.chat.id)]['errors']['prefix'].format(
reason = text.group_commands[utils.get_group_lang(msg.chat.id)]['errors']['reasons']['user_is_admin']
),
parse_mode='HTML'
)
except Exception as e:
logging.error(e)
else:
utils.ban_user(msg)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['ping'])
def bot_ping(msg):
start_timee = time.time()
uptime = datetime.timedelta(seconds = int(time.time()-start_time))
working_time = datetime.timedelta(seconds = int(time.time()-msg.date))
uptime_str = str(uptime).replace('day', 'days').replace('dayss', 'days')
working_time_str = str(working_time).replace('day', 'days').replace('dayss', 'days')
if uptime.days != 0:
uptime_str = uptime_str.replace(uptime_str.split(',')[0], utils.get_text_translation(uptime_str.split(',')[0]), 'ru')
if working_time.days != 0:
working_time_str = working_time_str.replace(working_time_str.split(',')[0], utils.get_text_translation(working_time_str.split(',')[0], 'ru'))
bot.send_message(
msg.chat.id,
text.user_messages['ru']['commands']['ping'].format(
unix_time = datetime.datetime.fromtimestamp(int(time.time())),
working_time = working_time_str,
uptime_sec = uptime
),
reply_to_message_id=msg.message_id,
parse_mode='HTML'
)
utils.new_update(msg, time.time()-start_timee)
@bot.message_handler(content_types=['new_chat_members'])
def bot_users_new(msg):
start_time = time.time()
api.register_new_chat(msg.chat)
chat_id = msg.chat.id
utils.new_member_logs(msg)
if api.get_group_params(msg.chat.id)['deletions']['system']:
bot.delete_message(
msg.chat.id,
msg.message_id
)
if msg.chat.type == 'channel':
bot.send_message(
msg.chat.id,
text.promotion_message,
parse_mode='HTML'
)
bot.leave_chat(
msg.chat.id
)
if msg.new_chat_member.id == 495038140:
api.change_group_params(msg.chat.id, ujson.dumps(config.default_group_settings))
else:
if api.get_group_params(msg.chat.id)['restrictions']['read_only']:
bot.restrict_chat_member(
msg.chat.id,
msg.new_chat_member.id,
until_date = int(time.time()+api.get_group_params(msg.chat.id)['restrictions']['for_time']*3600)
)
r = bot.send_message(
msg.chat.id,
text.group_commands['ru']['restricted']['new_user']['read_only'].format(
user_id = msg.new_chat_member.id,
user_name = api.replacer(msg.new_chat_member.first_name),
ban_time = api.get_group_params(msg.chat.id)['restrictions']['for_time']
),
reply_markup = unban_new_user_kb(msg),
parse_mode = 'HTML'
)
utils.add_to_delete_queue(msg.chat.id, r.message_id, api.get_group_params(msg.chat.id)['restrictions']['for_time']*3600)
if msg.new_chat_member.is_bot and api.get_group_params(msg.chat.id)['kick_bots']:
bot.kick_chat_member(
msg.chat.id,
msg.new_chat_member.id
)
bot.send_message(
msg.chat.id,
text.group_commands['ru']['restricted']['bot'],
parse_mode = 'HTML',
reply_markup = types.ReplyKeyboardRemove()
)
elif utils.check_global_ban(msg):
bot.kick_chat_member(
msg.chat.id,
msg.new_chat_member.id
)
bot.send_message(
msg.chat.id,
text.group_commands['ru']['restricted']['global_ban'].format(
user_id = msg.new_chat_member.id,
user_name = msg.new_chat_member.first_name
),
parse_mode = 'HTML'
)
else:
utils.new_user_in_chat(msg)
if utils.need_greeting(msg):
r = bot.send_message(
msg.chat.id,
utils.generate_welcome_text(msg),
parse_mode='HTML'
)
utils.add_to_delete_queue(msg.chat.id, r.message_id, api.get_group_params(msg.chat.id)['greeting']['delete_timer'])
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(content_types=[
'new_chat_members',
'left_chat_member',
'new_chat_title',
'new_chat_photo',
'delete_chat_photo',
'group_chat_created',
'supergroup_chat_created',
'channel_chat_created',
'migrate_to_chat_id',
'migrate_from_chat_id',
'pinned_message'
])
def bot_check_system(msg):
start_time = time.time()
if api.get_group_params(msg.chat.id)['deletions']['system']:
bot.delete_message(
msg.chat.id,
msg.message_id
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['report'])
def bot_report(msg):
start_time = time.time()
admins = bot.get_chat_administrators(msg.chat.id)
chat = bot.get_chat(msg.chat.id)
msg_id = ''
if chat.username:
if msg.reply_to_message:
msg_id = msg.reply_to_message.message_id
txt = text.reports_messages['report']['to_admin']['have_username']['reply']
else:
msg_id = msg.message_id
txt = text.reports_messages['report']['to_admin']['have_username']['no_reply']
else:
txt = text.reports_messages['report']['to_admin']['no_username']
for i in admins:
try:
bot.send_message(
i.user.id,
txt.format(
group_name = api.replacer(msg.chat.title),
group_username = chat.username,
message_id = msg_id,
user_id = msg.from_user.id,
user_name = api.replacer(msg.from_user.first_name),
),
parse_mode='HTML'
)
except Exception as e:
print(e)
bot.reply_to(
msg,
text.reports_messages['report']['to_user'],
parse_mode = 'HTML'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['unban'], func = lambda msg: msg.chat.type == 'supergroup')
def bot_user_unban(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id) and utils.have_args(msg):
words = utils.parse_arg(msg)[1]
user_id = int(words)
utils.unban_user(msg, user_id)
elif utils.check_status(msg.from_user.id, msg.chat.id) and msg.reply_to_message is not None:
user_id = msg.reply_to_message.from_user.id
utils.unban_user(msg, user_id)
elif utils.check_status(msg.from_user.id, msg.chat.id) and not utils.have_args(msg):
utils.send_err_report(msg, 'no_args_provided')
else:
utils.send_err_report(msg, 'not_enought_rights')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['reregister'], func = lambda msg: msg.chat.type == 'supergroup')
def bot_reregister(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id):
api.register_new_chat(msg.chat)
api.change_group_params(msg.chat.id, ujson.dumps(config.default_group_settings))
bot.send_message(
msg.chat.id,
text.group_commands[utils.get_group_lang(msg.chat.id)]['registration'],
parse_mode = 'HTML'
)
@bot.message_handler(commands=['ro'], func=lambda msg: msg.chat.type == 'supergroup')
def bot_users_ro(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id):
utils.read_only(msg)
else:
utils.send_err_report(msg, 'not_enought_rights')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['stickerpack_ban'],func=lambda msg: msg.chat.type == 'supergroup')
def bot_stickerpack_ban(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id):
utils.ban_stickerpack(msg)
else:
utils.send_err_report(msg, 'not_enought_rights')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['stickerpack_unban'], func=lambda msg: msg.chat.type != 'private')
def bot_stickerpack_unban(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id) and utils.have_args(msg):
stickerpack_name = utils.parse_arg(msg)[1]
utils.unban_stickerpack(msg, stickerpack_name)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['sticker_ban'], func=lambda msg: msg.chat.type == 'supergroup')
def bot_sticker_ban(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id):
sticker_id = msg.reply_to_message.sticker.file_id
utils.ban_sticker(msg, sticker_id)
elif not utils.check_status(msg.from_user.id, msg.chat.id):
utils.send_err_report(msg, 'not_enought_rights')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['sticker_unban'], func=lambda msg: msg.chat.type == 'supergroup')
def bot_sticker_unban(msg):
start_time = time.time()
if utils.have_args(msg) and utils.check_status(msg.from_user.id, msg.chat.id):
sticker_id = utils.parse_arg(msg)[1]
utils.unban_sticker(msg, sticker_id)
elif utils.check_status(msg.from_user.id, msg.chat.id) and not utils.have_args(msg):
utils.send_err_report(msg, 'not_enought_rights')
elif utils.have_args(msg) and not check_status(msg.from_user.id):
utils.send_err_report(msg, 'no_args_provided')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['help'])
def bot_help(msg):
start_time = time.time()
bot.send_message(
msg.from_user.id,
text.user_messages[utils.get_user_lang(msg)]['help'],
parse_mode='HTML'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['about'], func=lambda msg: msg.chat.type == 'private')
def bot_about(msg):
start_time = time.time()
bot.send_message(
msg.chat.id,
text.user_messages[utils.get_user_lang(msg)]['about'],
parse_mode='HTML'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['warn'], func=lambda msg: msg.chat.type != 'private')
def bot_new_warn(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id) and msg.reply_to_message is not None and not utils.check_status(msg.reply_to_message.from_user.id, msg.chat.id):
utils.new_warn(msg)
elif not utils.check_status(msg.from_user.id, msg.chat.id):
utils.send_err_report(msg, 'not_enought_rights')
elif utils.check_status(msg.reply_to_message.from_user.id, msg.chat.id):
utils.send_err_report(msg, 'user_is_admin')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['donate'])
def bot_donate(msg):
start_time = time.time()
bot.send_message(
msg.chat.id,
text.group_commands['ru']['donate'],
parse_mode = 'HTML'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['get_id'])
def bot_get_id(msg):
bot.send_message(
msg.chat.id,
msg.chat.id
)
# @bot.message_handler(commands = ['voteban'])
# def bot_voteban(msg):
# utils.new_voteban(msg)
# bot.send_message(
# msg.chat.id,
# text.
# )
@bot.message_handler(commands = ['version'])
def bot_version(msg):
bot.send_message(
msg.chat.id,
text.user_messages[utils.get_user_lang(msg)]['commands']['version'].format(version = text.VERSION),
parse_mode = 'HTML'
)
@bot.message_handler(commands = ['set_rules'], func = lambda msg: utils.check_status(msg.from_user.id, msg.chat.id))
def bot_set_rules(msg):
start_time = time.time()
message = msg
if len(msg.text) not in [9, 21]:
new_rules = msg.text[len(msg.text):msg.entities[0].length:-1][::-1]
if utils.check_text(new_rules):
utils.set_rules(msg, new_rules)
bot.send_message(
msg.chat.id,
'Правила изменены'
)
else:
bot.send_message(
msg.chat.id,
text = 'Правила составлены неверно'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['rules'], func = lambda msg: msg.chat.type != 'private')
def bot_get_rules(msg):
start_time = time.time()
try:
bot.send_message(
msg.from_user.id,
utils.generate_rules_text(msg),
parse_mode = 'HTML'
)
except Exception:
bot.reply_to(
msg,
text = ''
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['reset_settings'], func = lambda msg: msg.chat.type != 'private')
def bot_reset_settings(msg):
start_time = time.time()
kb = types.InlineKeyboardMarkup()
kb.add(types.InlineKeyboardButton(text = 'Да, выполнить сброс', callback_data = 'reset_settings_confirmation::{chat_id}'.format(chat_id = msg.chat.id)))
kb.add(types.InlineKeyboardButton(text = 'Нет, не стоит', callback_data = 'reset_settings_abort::{chat_id}'.format(chat_id = msg.chat.id)))
if utils.check_status(msg.from_user.id, msg.chat.id):
bot.send_message(
msg.chat.id,
'Вы действительно хотите сбросить настройки?',
reply_markup = kb
)
@bot.message_handler(commands = ['update_time'], func = lambda msg: utils.check_super_user(msg.from_user.id))
def bot_update_time(msg):
bot_ping(msg)
subprocess.run("timedatectl set-time '{time}'".format(time = datetime.datetime.fromtimestamp(msg.date+1).strftime("%Y-%m-%d %H:%M:%S")), shell=True)
bot_ping(msg)
@bot.message_handler(content_types=['text'], func = lambda msg: msg.chat.type != 'private')
def bot_check_text(msg):
start_time = time.time()
msg_text = msg.text
msg_text_low = msg_text.lower()
if utils.is_restricted(msg) and not utils.check_status(msg.from_user.id, msg.chat.id):
bot.delete_message(
msg.chat.id,
msg.message_id
)
if msg_text_low.startswith('разбан'):
if utils.check_super_user(msg.from_user.id):
utils.global_unban(msg)
elif msg_text.lower() in ['глобал бан']:
if utils.check_super_user(msg.from_user.id):
utils.global_ban(msg)
elif not utils.check_status(msg.from_user.id, msg.chat.id):
# if utils.is_new_in_chat(msg) and api.get_group_params(msg.chat.id)['restrict_new'] == '1':
if utils.check_for_urls(msg) and api.get_group_params(msg.chat.id)['deletions']['url']:
bot.delete_message(
msg.chat.id,
msg.message_id
)
bot.send_message(
msg.chat.id,
text.group_commands[utils.get_group_lang(msg.chat.id)]['restricted']['url'].format(
user_id = msg.from_user.id,
user_name = api.replacer(msg.from_user.first_name)
),
parse_mode='HTML'
)
# elif utils.check_for_forward(msg) and api.get_group_params(msg.chat.id)['deletions']['forward']:
# bot.delete_message(
# msg.chat.id,
# msg.message_id
# )
# bot.send_message(
# msg.chat.id,
# text.group_commands[utils.get_group_lang(msg.chat.id)]['restricted']['url'].format(
# user_id = msg.from_user.id,
# user_name = api.replacer(msg.from_user.first_name)
# ),
# parse_mode='HTML'
# )
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(content_types=['photo'], func = lambda msg: msg.chat.id == 303986717)
def bot_text(msg):
start_time = time.time()
bot.reply_to(msg, "<code>'{}': '{}',</code>".format(msg.photo[0].file_id, msg.caption), parse_mode ='HTML')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(content_types = ['sticker'], func = lambda msg: not utils.check_status(msg.from_user.id, msg.chat.id))
def bot_check_sticker(msg):
start_time = time.time()
if utils.is_restricted(msg) or utils.is_sticker_restricted(msg):
bot.delete_message(
msg.chat.id,
msg.message_id
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(content_types = ['audio', 'document', 'photo', 'sticker', 'video', 'video_note', 'voice', 'location', 'contact'], func = lambda msg: not utils.check_status(msg.from_user.id, msg.chat.id))
def testt(msg):
start_time = time.time()
if utils.is_restricted(msg):
bot.delete_message(
msg.chat.id,
msg.message_id
)
utils.new_update(msg, time.time()-start_time)
# Кнопки
@bot.callback_query_handler(func = lambda c: c.data.startswith('get_chat_refs::'))
def bot_get_chat_refs(c):
chat_id = utils.parse_chat_id(c)
user_id = c.from_user.id
inviters = utils.get_top_inviters(chat_id)
m = text.group_commands[utils.get_group_lang(chat_id)]['refs_stats']['header']
counter = 0
for i in inviters:
inviter_info = bot.get_chat_member(chat_id, i['inviter'])
counter += 1
m += text.group_commands[utils.get_group_lang(chat_id)]['refs_stats']['body'].format(
inviter_pos = counter,
inviter_id = inviter_info.user.id,
inviter_firstname = inviter_info.user.first_name,
invited_count = int(i['COUNT(`inviter`)'])
)
bot.send_message(
user_id,
m,
parse_mode = 'HTML'
)
bot.answer_callback_query(
c.id,
text = 'Список отправлен',
show_alert = True
)
@bot.callback_query_handler(func = lambda c: c.data in ['my_chats', 'to_groups_list'])
def my_chats_list(c):
user_id = c.from_user.id
user_settings = api.get_user_param(user_id, 'settings')
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = 'Список ваших групп'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = generate_user_groups(user_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Переход выполнен'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('get_settings_json'))
def bot_get_settings_json(c):
chat_id = utils.parse_chat_id(c)
bot.send_message(
chat_id = c.from_user.id,
text = 'Эти настройки можно получить в любое время и отправить @f0rden для восстановления их, в случае сбоя:\n'+ujson.dumps(api.get_group_params(chat_id))
)
bot.answer_callback_query(
c.id,
text = 'Настройки отправлены',
show_alert = True
)
@bot.callback_query_handler(func = lambda c: c.data == 'stats_menu')
def bot_stats_menu(c):
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = text.service_messages['stats'].format(
all_users = api.get_users_count(),
all_chats = api.get_chats_count(),
unblocked_users = api.get_unblocked_users_count(),
unblocked_chats = api.get_unblocked_chats_count()
)
)
@bot.callback_query_handler(func = lambda c: c.data == 'change_lang')
def bot_change_lang(c):
user_id = c.from_user.id
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = text.user_messages['start'],
parse_mode = 'HTML'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = create_user_language_keyboard()
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Переход выполнен'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('settings::'))
def chat_settings(c):
chat_id = utils.parse_chat_id(c)
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = '<b>Настройки группы {}</b>'.format(bot.get_chat(chat_id).title),
parse_mode = 'HTML'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = group_setting(chat_id),
)
@bot.callback_query_handler(func = lambda c: c.data == 'to_main_menu')
def bot_to_main_menu(c):
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = 'Ваше меню'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = generate_user_menu_kb(c.from_user.id)
)
@bot.callback_query_handler(func = lambda c: c.data == 'broadcast_menu')
def bot_admin_menu(c):
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = 'Выберите тип рассылки'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = generate_broadcast_vars_menu_kb()
)
@bot.callback_query_handler(func = lambda c: c.data == 'check_broadcast')
def bot_admin_menu(c):
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = 'Рассылка начата'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = generate_broadcast_check_menu_kb()
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('broadcast_check'))
def bot_broadcast_check(c):
arg = c.data.split('::')[1]
curr_bot_settings = ujson.loads(api.get_bot_settings(secret_config.token))
if arg in ['users', 'chats', 'all']:
curr_bot_settings['broadcast']['check']['recievers'] = arg
api.change_bot_settings(secret_config.token, ujson.dumps(curr_bot_settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = generate_broadcast_check_menu_kb()
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
t = Thread(target = utils.make_broadcast, kwargs = {
'is_test': True,
'receivers': curr_bot_settings['broadcast']['check']['recievers'],
'cont_type': 'text',
'msg_text': '',
'file_id': '',
'user_id': c.from_user.id,
'message_id': c.message.message_id
}
)
kb = types.InlineKeyboardMarkup()
kb.add(types.InlineKeyboardButton(text = 'В главное меню', callback_data = 'to_main_menu'))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = kb
)
t.start()
t.join()
@bot.callback_query_handler(func = lambda c: c.data == 'admin_menu')
def bot_admin_menu(c):
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = 'Админка'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = generate_admin_menu_kb()
)
@bot.callback_query_handler(func=lambda c: c.data.startswith('lang::'))
def change_language(c):
words = re.split('::', c.data)
lang = words[1]
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = text.user_messages[lang]['chosen_language'])
api.register_new_user(c.from_user, lang)
@bot.callback_query_handler(func = lambda c: c.data.startswith('get_notifications'))
def notify_change(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
utils.change_state_main(chat_id, 'get_notifications')
bot.edit_message_reply_markup(
chat_id=c.message.chat.id,
message_id=c.message.message_id,
reply_markup=group_setting(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)[c.data.split('::')[0]]])
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'Вы не являетесь администратором. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)[c.data.split('::')[0]]])
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('del_url'))
def del_url(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
utils.change_state_deletions_main(chat_id, 'url')
bot.edit_message_reply_markup(
chat_id=c.message.chat.id,
message_id=c.message.message_id,
reply_markup=group_setting(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)['deletions']['url']])
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'Вы не являетесь администратором. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)['deletions']['url']])
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('del_system'))
def del_system(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
utils.change_state_deletions_main(chat_id, 'system')
bot.edit_message_reply_markup(
chat_id=c.message.chat.id,
message_id=c.message.message_id,
reply_markup=group_setting(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)['deletions']['system']])
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'Вы не являетесь администратором. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)['deletions']['system']])
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('kick_bots'))
def kick_bots(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
utils.change_state_main(chat_id, 'kick_bots')
bot.edit_message_reply_markup(
chat_id=c.message.chat.id,
message_id=c.message.message_id,
reply_markup=group_setting(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)[c.data.split('::')[0]]])
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)[c.data.split('::')[0]]])
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('deletions_settings'))
def to_deletions(c):
chat_id = utils.parse_chat_id(c)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = delete_settings(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Переход выполнен.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('delete::'))
def group_settings_deletions(c):
chat_id = utils.parse_chat_id(c)
cont_type = re.split('::', c.data)[1]
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
utils.change_state_deletions_files(chat_id, cont_type)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = delete_settings(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены. Статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)['deletions']['files'][cont_type]])
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)['deletions']['files'][cont_type]])
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('change_all'))
def group_settings_deletions_all(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
for i in config.available_attachments:
utils.change_state_deletions_files(chat_id, i)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = delete_settings(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('to_group_settings_menu'))
def group_settings_deletions_photo(c):
chat_id = utils.parse_chat_id(c)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup=group_setting(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('warns_del'))
def del_warns(c):
user_id = utils.parse_user_id(c)
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
api.zeroing_warns(user_id, chat_id)
bot.edit_message_text(
text = 'Предупреждения обнулены.',
chat_id = c.message.chat.id,
message_id = c.message.message_id
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('new_users_restrictions'))
def new_users_restrictions(c):
chat_id = utils.parse_chat_id(c)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = new_users_restrictions_kb(chat_id)
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('read_only'))
def new_users_ro(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
settings['restrictions']['read_only'] = config.settings_states[settings['restrictions']['read_only']]
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = new_users_restrictions_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('time_ro_'))
def ro_time_change(c):
change_time = int(c.data.split('_')[2].split('::')[0])
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
settings['restrictions']['for_time'] = settings['restrictions']['for_time'] + change_time
if settings['restrictions']['for_time'] < 1:
settings['restrictions']['for_time'] = 1
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = new_users_restrictions_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('warns_count_'))
def ro_time_change(c):
change_count = int(c.data.split('_')[2].split('::')[0])
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
settings['warns']['count'] = settings['warns']['count'] + change_count
if settings['warns']['count'] < 1:
settings['warns']['count'] = 1
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = warns_settings_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('warns_settings'))
def warns_count_change(c):
chat_id = utils.parse_chat_id(c)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = warns_settings_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('warns_action_'))
def warns_count_change(c):
new_mod = int(c.data.split('_')[2].split('::')[0])
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
settings['warns']['action'] = new_mod
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = warns_settings_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('unban_new_user'))
def unban_new_user(c):
chat_id = utils.parse_chat_id(c)
user_id = utils.parse_user_id(c)
if api.get_group_params(chat_id)['restrictions']['admins_only']:
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
utils.unban_user_button(c)
user = bot.get_chat_member(
chat_id,
user_id
)
bot.edit_message_text(
text = text.group_commands[utils.get_group_lang(c.message.chat.id)]['restricted']['new_user']['button_pressed'].format(
user_id = user.user.id,
user_name = api.replacer(user.user.first_name)
),
parse_mode = 'HTML',
chat_id = c.message.chat.id,
message_id = c.message.message_id
)
utils.add_to_delete_queue(msg.chat.id, r.message_id, api.get_group_params(msg.chat.id)['greeting']['delete_timer'])
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
else:
if c.from_user.id == user_id or utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
user = bot.get_chat_member(
chat_id,
user_id
)
if user.status in ['restricted']:
bot.restrict_chat_member(
chat_id,
user_id,
can_send_media_messages=True,
can_add_web_page_previews=True,
can_send_messages=True,
can_send_other_messages=True
)
bot.edit_message_text(
text = text.group_commands[utils.get_group_lang(c.message.chat.id)]['restricted']['new_user']['button_pressed'].format(
user_id = user.user.id,
user_name = api.replacer(user.user.first_name)
),
parse_mode = 'HTML',
chat_id = c.message.chat.id,
message_id = c.message.message_id
)
utils.add_to_delete_queue(chat_id, c.message.message_id, api.get_group_params(chat_id)['greeting']['delete_timer'])
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('new_restrictions_admins_only_'))
def warns_count_change(c):
chat_id = utils.parse_chat_id(c)
state = c.data.split('_')[4].split('::')[0]
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
settings['restrictions']['admins_only'] = utils.to_bool(state)
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = new_users_restrictions_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('welcome_settings'))
def welcome_settings(c):
chat_id = utils.parse_chat_id(c)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = welcome_settings_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('welcome_state'))
def welcome_settings_state(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
curr_state = settings['greeting']['is_enabled']
new_state = config.settings_states[curr_state]
settings['greeting']['is_enabled'] = new_state
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = welcome_settings_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('welcome_timer'))
def welcome_timer_change(c):
change_count = int(c.data.split('_')[2].split('::')[0])
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
settings['greeting']['delete_timer'] = settings['greeting']['delete_timer'] + change_count
if settings['greeting']['delete_timer'] < 0:
settings['greeting']['delete_timer'] = 0
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = welcome_settings_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('settings_delete'))
def del_settings(c):
words = c.data.split()
bot.delete_message(
c.message.chat.id,
words[2]
)
bot.delete_message(
c.message.chat.id,
words[1]
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('welcome_get'))
def get_welcome_text(c):
chat_id = utils.parse_chat_id(c)
bot.send_message(
c.message.chat.id,
utils.get_greeting(chat_id),
parse_mode = 'HTML'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('reset_settings'))
def reset_settings_button(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
if c.data.startswith('reset_settings_confirmation'):
api.register_new_chat(c.message.chat)
api.change_group_params(chat_id, ujson.dumps(config.default_group_settings))
bot.send_message(
c.message.chat.id,
'Настройки сброшены.'
)
bot.delete_message(
c.message.chat.id,
c.message.message_id
)
else:
bot.delete_message(
c.message.chat.id,
c.message.message_id
)
bot.send_message(
c.message.chat.id,
'Сброс отменен'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('leave_'))
def bot_leave_cb(c):
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
if c.data.endswith('confirm'):
bot.delete_message(
c.message.chat.id,
c.message.message_id
)
bot.send_message(
c.message.chat.id,
text.group_commands[utils.get_group_lang(c.message.chat.id)]['leave']['accepted']
)
bot.leave_chat(
c.message.chat.id
)
else:
bot.send_message(
c.message.chat.id,
text.group_commands[utils.get_group_lang(c.message.chat.id)]['leave']['cancelled']
)
bot.delete_message(
c.message.chat.id,
c.message.message_id
)
# @bot.callback_query_handler(func = lambda c: c.data.startswith('settings_captcha'))
# def change_captcha_settings(c):
# chat_id = utils.parse_chat_id(c)
# if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
# settings = api.get_group_params(chat_id)
# settings['']
# api.change_group_params(chat_id, )
# Вебхук
bot.remove_webhook()
bot.set_webhook(
url=WEBHOOK_URL_BASE + WEBHOOK_URL_PATH,
certificate=open(WEBHOOK_SSL_CERT, 'r'))
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(WEBHOOK_SSL_CERT, WEBHOOK_SSL_PRIV)
# Start aiohttp server
web.run_app(
app,
host=WEBHOOK_LISTEN,
port=WEBHOOK_PORT,
ssl_context=context,
)
# bot.remove_webhook()
# bot.polling()
|
[
"utils.check_text",
"ssl.SSLContext",
"api.get_chats_count",
"api.get_user_param",
"utils.have_args",
"utils.send_err_report",
"utils.new_user_in_chat",
"utils.ban_stickerpack",
"utils.to_bool",
"utils.get_my_ip",
"utils.unban_stickerpack",
"utils.get_greeting",
"utils.parse_chat_id",
"utils.check_global_ban",
"utils.get_user_lang",
"telebot.TeleBot",
"telebot.types.Update.de_json",
"utils.change_state_deletions_main",
"utils.remove_log_channel",
"telebot.types.ReplyKeyboardRemove",
"utils.check_super_user",
"logging.error",
"aiohttp.web.Response",
"api.get_unblocked_chats_count",
"random.randint",
"telebot.types.ReplyKeyboardMarkup",
"utils.change_state_main",
"utils.set_log_channel",
"utils.ban_user",
"utils.check_for_urls",
"utils.is_sticker_restricted",
"api.zeroing_warns",
"utils.change_state_deletions_files",
"utils.get_top_inviters",
"aiohttp.web.run_app",
"telebot.types.InlineKeyboardMarkup",
"utils.unban_user",
"utils.get_log_id",
"utils.is_user_new",
"utils.check_status",
"utils.read_only",
"aiohttp.web.Application",
"ujson.dumps",
"utils.generate_rules_text",
"utils.check_log",
"utils.get_group_lang",
"utils.new_warn",
"re.split",
"telebot.types.KeyboardButton",
"api.register_new_chat",
"api.get_users_count",
"api.get_group_params",
"utils.is_restricted",
"utils.parse_arg",
"utils.ban_sticker",
"utils.set_rules",
"utils.set_greeting",
"datetime.datetime.fromtimestamp",
"utils.unban_sticker",
"utils.global_ban",
"api.register_new_user",
"utils.unban_user_button",
"utils.global_unban",
"utils.need_greeting",
"logging.basicConfig",
"telebot.types.InlineKeyboardButton",
"utils.parse_user_id",
"utils.kick_user",
"api.get_bot_settings",
"api.get_unblocked_users_count",
"time.time",
"utils.new_member_logs",
"utils.generate_welcome_text",
"multiprocessing.Process",
"api.replacer",
"logging.getLogger"
] |
[((358, 375), 'utils.get_my_ip', 'utils.get_my_ip', ([], {}), '()\n', (373, 375), False, 'import utils\n'), ((826, 868), 'telebot.TeleBot', 'telebot.TeleBot', ([], {'token': 'secret_config.token'}), '(token=secret_config.token)\n', (841, 868), False, 'import telebot\n'), ((911, 939), 'logging.getLogger', 'logging.getLogger', (['"""telebot"""'], {}), "('telebot')\n", (928, 939), False, 'import logging\n'), ((954, 981), 'logging.getLogger', 'logging.getLogger', (['"""sqlite"""'], {}), "('sqlite')\n", (971, 981), False, 'import logging\n'), ((994, 1024), 'logging.getLogger', 'logging.getLogger', (['"""main_info"""'], {}), "('main_info')\n", (1011, 1024), False, 'import logging\n'), ((1039, 1067), 'logging.getLogger', 'logging.getLogger', (['"""reports"""'], {}), "('reports')\n", (1056, 1067), False, 'import logging\n'), ((1212, 1240), 'logging.getLogger', 'logging.getLogger', (['"""telebot"""'], {}), "('telebot')\n", (1229, 1240), False, 'import logging\n'), ((1254, 1280), 'logging.getLogger', 'logging.getLogger', (['"""mysql"""'], {}), "('mysql')\n", (1271, 1280), False, 'import logging\n'), ((1293, 1323), 'logging.getLogger', 'logging.getLogger', (['"""main_info"""'], {}), "('main_info')\n", (1310, 1323), False, 'import logging\n'), ((1338, 1366), 'logging.getLogger', 'logging.getLogger', (['"""reports"""'], {}), "('reports')\n", (1355, 1366), False, 'import logging\n'), ((1400, 1586), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(filename)s [LINE:%(lineno)-3d]# %(levelname)-8s - %(name)-9s [%(asctime)s] - %(message)-50s """', 'datefmt': '"""%m/%d/%Y %I:%M:%S %p"""', 'level': 'logging.INFO'}), "(format=\n '%(filename)s [LINE:%(lineno)-3d]# %(levelname)-8s - %(name)-9s [%(asctime)s] - %(message)-50s '\n , datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)\n", (1419, 1586), False, 'import logging\n'), ((1668, 1685), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (1683, 1685), False, 'from aiohttp import web\n'), ((1691, 1732), 'multiprocessing.Process', 'Thread', ([], {'target': 'utils.check_deleting_queue'}), '(target=utils.check_deleting_queue)\n', (1697, 1732), True, 'from multiprocessing import Process as Thread\n'), ((69728, 69764), 'ssl.SSLContext', 'ssl.SSLContext', (['ssl.PROTOCOL_TLSv1_2'], {}), '(ssl.PROTOCOL_TLSv1_2)\n', (69742, 69764), False, 'import ssl\n'), ((69849, 69926), 'aiohttp.web.run_app', 'web.run_app', (['app'], {'host': 'WEBHOOK_LISTEN', 'port': 'WEBHOOK_PORT', 'ssl_context': 'context'}), '(app, host=WEBHOOK_LISTEN, port=WEBHOOK_PORT, ssl_context=context)\n', (69860, 69926), False, 'from aiohttp import web\n'), ((806, 817), 'time.time', 'time.time', ([], {}), '()\n', (815, 817), False, 'import time\n'), ((2163, 2191), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (2189, 2191), False, 'from telebot import types\n'), ((2432, 2471), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(1)'}), '(row_width=1)\n', (2458, 2471), False, 'from telebot import types\n'), ((2492, 2521), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (2512, 2521), False, 'import api\n'), ((4579, 4618), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(4)'}), '(row_width=4)\n', (4605, 4618), False, 'from telebot import types\n'), ((4641, 4670), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (4661, 4670), False, 'import api\n'), ((6042, 6081), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(4)'}), '(row_width=4)\n', (6068, 6081), False, 'from telebot import types\n'), ((6104, 6133), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (6124, 6133), False, 'import api\n'), ((7798, 7837), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(4)'}), '(row_width=4)\n', (7824, 7837), False, 'from telebot import types\n'), ((7860, 7889), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (7880, 7889), False, 'import api\n'), ((9650, 9689), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(1)'}), '(row_width=1)\n', (9676, 9689), False, 'from telebot import types\n'), ((9897, 9936), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(1)'}), '(row_width=1)\n', (9923, 9936), False, 'from telebot import types\n'), ((10199, 10238), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(1)'}), '(row_width=1)\n', (10225, 10238), False, 'from telebot import types\n'), ((10259, 10302), 'api.get_user_param', 'api.get_user_param', (['msg.chat.id', '"""settings"""'], {}), "(msg.chat.id, 'settings')\n", (10277, 10302), False, 'import api\n'), ((10715, 10754), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(1)'}), '(row_width=1)\n', (10741, 10754), False, 'from telebot import types\n'), ((10775, 10804), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (10795, 10804), False, 'import api\n'), ((11551, 11590), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(1)'}), '(row_width=1)\n', (11577, 11590), False, 'from telebot import types\n'), ((11962, 12001), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(1)'}), '(row_width=1)\n', (11988, 12001), False, 'from telebot import types\n'), ((12015, 12084), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Мои чаты"""', 'callback_data': '"""my_chats"""'}), "(text='Мои чаты', callback_data='my_chats')\n", (12041, 12084), False, 'from telebot import types\n'), ((12100, 12177), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Изменить язык"""', 'callback_data': '"""change_lang"""'}), "(text='Изменить язык', callback_data='change_lang')\n", (12126, 12177), False, 'from telebot import types\n'), ((12212, 12243), 'utils.check_super_user', 'utils.check_super_user', (['user_id'], {}), '(user_id)\n', (12234, 12243), False, 'import utils\n'), ((12399, 12438), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(2)'}), '(row_width=2)\n', (12425, 12438), False, 'from telebot import types\n'), ((12452, 12527), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Рассылка"""', 'callback_data': '"""broadcast_menu"""'}), "(text='Рассылка', callback_data='broadcast_menu')\n", (12478, 12527), False, 'from telebot import types\n'), ((12543, 12616), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Статистика"""', 'callback_data': '"""stats_menu"""'}), "(text='Статистика', callback_data='stats_menu')\n", (12569, 12616), False, 'from telebot import types\n'), ((12807, 12846), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(1)'}), '(row_width=1)\n', (12833, 12846), False, 'from telebot import types\n'), ((12860, 12950), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Рассылка-проверка"""', 'callback_data': '"""check_broadcast"""'}), "(text='Рассылка-проверка', callback_data=\n 'check_broadcast')\n", (12886, 12950), False, 'from telebot import types\n'), ((12961, 13055), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Рассылка сообщения"""', 'callback_data': '"""broadcast_settings"""'}), "(text='Рассылка сообщения', callback_data=\n 'broadcast_settings')\n", (12987, 13055), False, 'from telebot import types\n'), ((13241, 13280), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(2)'}), '(row_width=2)\n', (13267, 13280), False, 'from telebot import types\n'), ((13294, 13392), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Ввести сообщение"""', 'callback_data': '"""broadcast_message::input"""'}), "(text='Ввести сообщение', callback_data=\n 'broadcast_message::input')\n", (13320, 13392), False, 'from telebot import types\n'), ((13403, 13505), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Просмотреть сообщение"""', 'callback_data': '"""broadcast_message::show"""'}), "(text='Просмотреть сообщение', callback_data=\n 'broadcast_message::show')\n", (13429, 13505), False, 'from telebot import types\n'), ((13516, 13613), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Начать рассылку"""', 'callback_data': '"""broadcast_message::start"""'}), "(text='Начать рассылку', callback_data=\n 'broadcast_message::start')\n", (13542, 13613), False, 'from telebot import types\n'), ((13710, 13749), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(3)'}), '(row_width=3)\n', (13736, 13749), False, 'from telebot import types\n'), ((13936, 14030), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Только диалоги"""', 'callback_data': '"""broadcast_check::users"""'}), "(text='Только диалоги', callback_data=\n 'broadcast_check::users')\n", (13962, 14030), False, 'from telebot import types\n'), ((14041, 14132), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Только чаты"""', 'callback_data': '"""broadcast_check::chats"""'}), "(text='Только чаты', callback_data=\n 'broadcast_check::chats')\n", (14067, 14132), False, 'from telebot import types\n'), ((14143, 14219), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Все"""', 'callback_data': '"""broadcast_check::all"""'}), "(text='Все', callback_data='broadcast_check::all')\n", (14169, 14219), False, 'from telebot import types\n'), ((14388, 14483), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Начать рассылку"""', 'callback_data': '"""broadcast_check::start"""'}), "(text='Начать рассылку', callback_data=\n 'broadcast_check::start')\n", (14414, 14483), False, 'from telebot import types\n'), ((14598, 14637), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {'row_width': '(2)'}), '(row_width=2)\n', (14624, 14637), False, 'from telebot import types\n'), ((18132, 18160), 'utils.check_log', 'utils.check_log', (['msg.chat.id'], {}), '(msg.chat.id)\n', (18147, 18160), False, 'import utils\n'), ((19117, 19166), 'telebot.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (19142, 19166), False, 'from telebot import types\n'), ((19662, 19673), 'time.time', 'time.time', ([], {}), '()\n', (19671, 19673), False, 'import time\n'), ((19701, 19729), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (19727, 19729), False, 'from telebot import types\n'), ((20488, 20499), 'time.time', 'time.time', ([], {}), '()\n', (20497, 20499), False, 'import time\n'), ((20507, 20529), 'utils.is_user_new', 'utils.is_user_new', (['msg'], {}), '(msg)\n', (20524, 20529), False, 'import utils\n'), ((21218, 21229), 'time.time', 'time.time', ([], {}), '()\n', (21227, 21229), False, 'import time\n'), ((21234, 21265), 'api.register_new_chat', 'api.register_new_chat', (['msg.chat'], {}), '(msg.chat)\n', (21255, 21265), False, 'import api\n'), ((21873, 21884), 'time.time', 'time.time', ([], {}), '()\n', (21882, 21884), False, 'import time\n'), ((22539, 22550), 'time.time', 'time.time', ([], {}), '()\n', (22548, 22550), False, 'import time\n'), ((22555, 22575), 'utils.kick_user', 'utils.kick_user', (['msg'], {}), '(msg)\n', (22570, 22575), False, 'import utils\n'), ((22780, 22791), 'time.time', 'time.time', ([], {}), '()\n', (22789, 22791), False, 'import time\n'), ((24018, 24029), 'time.time', 'time.time', ([], {}), '()\n', (24027, 24029), False, 'import time\n'), ((25168, 25179), 'time.time', 'time.time', ([], {}), '()\n', (25177, 25179), False, 'import time\n'), ((25184, 25215), 'api.register_new_chat', 'api.register_new_chat', (['msg.chat'], {}), '(msg.chat)\n', (25205, 25215), False, 'import api\n'), ((25246, 25272), 'utils.new_member_logs', 'utils.new_member_logs', (['msg'], {}), '(msg)\n', (25267, 25272), False, 'import utils\n'), ((28499, 28510), 'time.time', 'time.time', ([], {}), '()\n', (28508, 28510), False, 'import time\n'), ((28797, 28808), 'time.time', 'time.time', ([], {}), '()\n', (28806, 28808), False, 'import time\n'), ((30153, 30164), 'time.time', 'time.time', ([], {}), '()\n', (30162, 30164), False, 'import time\n'), ((30946, 30957), 'time.time', 'time.time', ([], {}), '()\n', (30955, 30957), False, 'import time\n'), ((30965, 31014), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (30983, 31014), False, 'import utils\n'), ((31429, 31440), 'time.time', 'time.time', ([], {}), '()\n', (31438, 31440), False, 'import time\n'), ((31448, 31497), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (31466, 31497), False, 'import utils\n'), ((31791, 31802), 'time.time', 'time.time', ([], {}), '()\n', (31800, 31802), False, 'import time\n'), ((31810, 31859), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (31828, 31859), False, 'import utils\n'), ((32161, 32172), 'time.time', 'time.time', ([], {}), '()\n', (32170, 32172), False, 'import time\n'), ((32552, 32563), 'time.time', 'time.time', ([], {}), '()\n', (32561, 32563), False, 'import time\n'), ((32571, 32620), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (32589, 32620), False, 'import utils\n'), ((33037, 33048), 'time.time', 'time.time', ([], {}), '()\n', (33046, 33048), False, 'import time\n'), ((33620, 33631), 'time.time', 'time.time', ([], {}), '()\n', (33629, 33631), False, 'import time\n'), ((33948, 33959), 'time.time', 'time.time', ([], {}), '()\n', (33957, 33959), False, 'import time\n'), ((34274, 34285), 'time.time', 'time.time', ([], {}), '()\n', (34283, 34285), False, 'import time\n'), ((34865, 34876), 'time.time', 'time.time', ([], {}), '()\n', (34874, 34876), False, 'import time\n'), ((35773, 35784), 'time.time', 'time.time', ([], {}), '()\n', (35782, 35784), False, 'import time\n'), ((36429, 36440), 'time.time', 'time.time', ([], {}), '()\n', (36438, 36440), False, 'import time\n'), ((36881, 36892), 'time.time', 'time.time', ([], {}), '()\n', (36890, 36892), False, 'import time\n'), ((36902, 36930), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (36928, 36930), False, 'from telebot import types\n'), ((37239, 37288), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (37257, 37288), False, 'import utils\n'), ((37910, 37921), 'time.time', 'time.time', ([], {}), '()\n', (37919, 37921), False, 'import time\n'), ((40142, 40153), 'time.time', 'time.time', ([], {}), '()\n', (40151, 40153), False, 'import time\n'), ((40486, 40497), 'time.time', 'time.time', ([], {}), '()\n', (40495, 40497), False, 'import time\n'), ((40950, 40961), 'time.time', 'time.time', ([], {}), '()\n', (40959, 40961), False, 'import time\n'), ((40969, 40993), 'utils.is_restricted', 'utils.is_restricted', (['msg'], {}), '(msg)\n', (40988, 40993), False, 'import utils\n'), ((41271, 41293), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (41290, 41293), False, 'import utils\n'), ((41338, 41369), 'utils.get_top_inviters', 'utils.get_top_inviters', (['chat_id'], {}), '(chat_id)\n', (41360, 41369), False, 'import utils\n'), ((42237, 42276), 'api.get_user_param', 'api.get_user_param', (['user_id', '"""settings"""'], {}), "(user_id, 'settings')\n", (42255, 42276), False, 'import api\n'), ((42835, 42857), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (42854, 42857), False, 'import utils\n'), ((44414, 44436), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (44433, 44436), False, 'import utils\n'), ((48099, 48121), 're.split', 're.split', (['"""::"""', 'c.data'], {}), "('::', c.data)\n", (48107, 48121), False, 'import re\n'), ((48313, 48353), 'api.register_new_user', 'api.register_new_user', (['c.from_user', 'lang'], {}), '(c.from_user, lang)\n', (48334, 48353), False, 'import api\n'), ((48476, 48498), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (48495, 48498), False, 'import utils\n'), ((49468, 49490), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (49487, 49490), False, 'import utils\n'), ((50456, 50478), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (50475, 50478), False, 'import utils\n'), ((51451, 51473), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (51470, 51473), False, 'import utils\n'), ((52473, 52495), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (52492, 52495), False, 'import utils\n'), ((52910, 52932), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (52929, 52932), False, 'import utils\n'), ((54011, 54033), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (54030, 54033), False, 'import utils\n'), ((54867, 54889), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (54886, 54889), False, 'import utils\n'), ((55292, 55314), 'utils.parse_user_id', 'utils.parse_user_id', (['c'], {}), '(c)\n', (55311, 55314), False, 'import utils\n'), ((55329, 55351), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (55348, 55351), False, 'import utils\n'), ((55974, 55996), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (55993, 55996), False, 'import utils\n'), ((56289, 56311), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (56308, 56311), False, 'import utils\n'), ((57302, 57324), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (57321, 57324), False, 'import utils\n'), ((58414, 58436), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (58433, 58436), False, 'import utils\n'), ((59425, 59447), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (59444, 59447), False, 'import utils\n'), ((59909, 59931), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (59928, 59931), False, 'import utils\n'), ((60797, 60819), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (60816, 60819), False, 'import utils\n'), ((60834, 60856), 'utils.parse_user_id', 'utils.parse_user_id', (['c'], {}), '(c)\n', (60853, 60856), False, 'import utils\n'), ((63489, 63511), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (63508, 63511), False, 'import utils\n'), ((64462, 64484), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (64481, 64484), False, 'import utils\n'), ((64901, 64923), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (64920, 64923), False, 'import utils\n'), ((65980, 66002), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (65999, 66002), False, 'import utils\n'), ((67308, 67330), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (67327, 67330), False, 'import utils\n'), ((67578, 67600), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (67597, 67600), False, 'import utils\n'), ((1892, 1939), 'telebot.types.Update.de_json', 'telebot.types.Update.de_json', (['request_body_dict'], {}), '(request_body_dict)\n', (1920, 1939), False, 'import telebot\n'), ((1997, 2011), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (2009, 2011), False, 'from aiohttp import web\n'), ((2037, 2061), 'aiohttp.web.Response', 'web.Response', ([], {'status': '(403)'}), '(status=403)\n', (2049, 2061), False, 'from aiohttp import web\n'), ((4428, 4514), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""К списку групп"""', 'callback_data': '"""to_groups_list"""'}), "(text='К списку групп', callback_data=\n 'to_groups_list')\n", (4454, 4514), False, 'from telebot import types\n'), ((12655, 12734), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""В главное меню"""', 'callback_data': '"""to_main_menu"""'}), "(text='В главное меню', callback_data='to_main_menu')\n", (12681, 12734), False, 'from telebot import types\n'), ((13089, 13168), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""В главное меню"""', 'callback_data': '"""to_main_menu"""'}), "(text='В главное меню', callback_data='to_main_menu')\n", (13115, 13168), False, 'from telebot import types\n'), ((13784, 13825), 'api.get_bot_settings', 'api.get_bot_settings', (['secret_config.token'], {}), '(secret_config.token)\n', (13804, 13825), False, 'import api\n'), ((14670, 14709), 'api.get_user_param', 'api.get_user_param', (['user_id', '"""settings"""'], {}), "(user_id, 'settings')\n", (14688, 14709), False, 'import api\n'), ((14955, 15034), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""В главное меню"""', 'callback_data': '"""to_main_menu"""'}), "(text='В главное меню', callback_data='to_main_menu')\n", (14981, 15034), False, 'from telebot import types\n'), ((19178, 19212), 'telebot.types.KeyboardButton', 'types.KeyboardButton', ([], {'text': '"""/rmkb"""'}), "(text='/rmkb')\n", (19198, 19212), False, 'from telebot import types\n'), ((20542, 20562), 'utils.have_args', 'utils.have_args', (['msg'], {}), '(msg)\n', (20557, 20562), False, 'import utils\n'), ((20790, 20832), 'api.register_new_user', 'api.register_new_user', (['msg.from_user', '"""ru"""'], {}), "(msg.from_user, 'ru')\n", (20811, 20832), False, 'import api\n'), ((22030, 22060), 'utils.check_text', 'utils.check_text', (['new_greeting'], {}), '(new_greeting)\n', (22046, 22060), False, 'import utils\n'), ((22841, 22862), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (22855, 22862), False, 'import random\n'), ((23870, 23889), 'utils.ban_user', 'utils.ban_user', (['msg'], {}), '(msg)\n', (23884, 23889), False, 'import utils\n'), ((30172, 30221), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (30190, 30221), False, 'import utils\n'), ((30226, 30246), 'utils.have_args', 'utils.have_args', (['msg'], {}), '(msg)\n', (30241, 30246), False, 'import utils\n'), ((30325, 30355), 'utils.unban_user', 'utils.unban_user', (['msg', 'user_id'], {}), '(msg, user_id)\n', (30341, 30355), False, 'import utils\n'), ((31024, 31055), 'api.register_new_chat', 'api.register_new_chat', (['msg.chat'], {}), '(msg.chat)\n', (31045, 31055), False, 'import api\n'), ((31507, 31527), 'utils.read_only', 'utils.read_only', (['msg'], {}), '(msg)\n', (31522, 31527), False, 'import utils\n'), ((31546, 31594), 'utils.send_err_report', 'utils.send_err_report', (['msg', '"""not_enought_rights"""'], {}), "(msg, 'not_enought_rights')\n", (31567, 31594), False, 'import utils\n'), ((31869, 31895), 'utils.ban_stickerpack', 'utils.ban_stickerpack', (['msg'], {}), '(msg)\n', (31890, 31895), False, 'import utils\n'), ((31914, 31962), 'utils.send_err_report', 'utils.send_err_report', (['msg', '"""not_enought_rights"""'], {}), "(msg, 'not_enought_rights')\n", (31935, 31962), False, 'import utils\n'), ((32180, 32229), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (32198, 32229), False, 'import utils\n'), ((32234, 32254), 'utils.have_args', 'utils.have_args', (['msg'], {}), '(msg)\n', (32249, 32254), False, 'import utils\n'), ((32315, 32361), 'utils.unban_stickerpack', 'utils.unban_stickerpack', (['msg', 'stickerpack_name'], {}), '(msg, stickerpack_name)\n', (32338, 32361), False, 'import utils\n'), ((32688, 32722), 'utils.ban_sticker', 'utils.ban_sticker', (['msg', 'sticker_id'], {}), '(msg, sticker_id)\n', (32705, 32722), False, 'import utils\n'), ((33056, 33076), 'utils.have_args', 'utils.have_args', (['msg'], {}), '(msg)\n', (33071, 33076), False, 'import utils\n'), ((33081, 33130), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (33099, 33130), False, 'import utils\n'), ((33185, 33221), 'utils.unban_sticker', 'utils.unban_sticker', (['msg', 'sticker_id'], {}), '(msg, sticker_id)\n', (33204, 33221), False, 'import utils\n'), ((34293, 34342), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (34311, 34342), False, 'import utils\n'), ((34464, 34483), 'utils.new_warn', 'utils.new_warn', (['msg'], {}), '(msg)\n', (34478, 34483), False, 'import utils\n'), ((35927, 35954), 'utils.check_text', 'utils.check_text', (['new_rules'], {}), '(new_rules)\n', (35943, 35954), False, 'import utils\n'), ((37989, 38013), 'utils.is_restricted', 'utils.is_restricted', (['msg'], {}), '(msg)\n', (38008, 38013), False, 'import utils\n'), ((40505, 40529), 'utils.is_restricted', 'utils.is_restricted', (['msg'], {}), '(msg)\n', (40524, 40529), False, 'import utils\n'), ((40533, 40565), 'utils.is_sticker_restricted', 'utils.is_sticker_restricted', (['msg'], {}), '(msg)\n', (40560, 40565), False, 'import utils\n'), ((46289, 46330), 'api.get_bot_settings', 'api.get_bot_settings', (['secret_config.token'], {}), '(secret_config.token)\n', (46309, 46330), False, 'import api\n'), ((46874, 47129), 'multiprocessing.Process', 'Thread', ([], {'target': 'utils.make_broadcast', 'kwargs': "{'is_test': True, 'receivers': curr_bot_settings['broadcast']['check'][\n 'recievers'], 'cont_type': 'text', 'msg_text': '', 'file_id': '',\n 'user_id': c.from_user.id, 'message_id': c.message.message_id}"}), "(target=utils.make_broadcast, kwargs={'is_test': True, 'receivers':\n curr_bot_settings['broadcast']['check']['recievers'], 'cont_type':\n 'text', 'msg_text': '', 'file_id': '', 'user_id': c.from_user.id,\n 'message_id': c.message.message_id})\n", (46880, 47129), True, 'from multiprocessing import Process as Thread\n'), ((47244, 47272), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (47270, 47272), False, 'from telebot import types\n'), ((48541, 48563), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (48560, 48563), False, 'import utils\n'), ((48574, 48627), 'utils.change_state_main', 'utils.change_state_main', (['chat_id', '"""get_notifications"""'], {}), "(chat_id, 'get_notifications')\n", (48597, 48627), False, 'import utils\n'), ((49533, 49555), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (49552, 49555), False, 'import utils\n'), ((49566, 49615), 'utils.change_state_deletions_main', 'utils.change_state_deletions_main', (['chat_id', '"""url"""'], {}), "(chat_id, 'url')\n", (49599, 49615), False, 'import utils\n'), ((50521, 50543), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (50540, 50543), False, 'import utils\n'), ((50554, 50606), 'utils.change_state_deletions_main', 'utils.change_state_deletions_main', (['chat_id', '"""system"""'], {}), "(chat_id, 'system')\n", (50587, 50606), False, 'import utils\n'), ((51516, 51538), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (51535, 51538), False, 'import utils\n'), ((51549, 51594), 'utils.change_state_main', 'utils.change_state_main', (['chat_id', '"""kick_bots"""'], {}), "(chat_id, 'kick_bots')\n", (51572, 51594), False, 'import utils\n'), ((52949, 52971), 're.split', 're.split', (['"""::"""', 'c.data'], {}), "('::', c.data)\n", (52957, 52971), False, 'import re\n'), ((53017, 53039), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (53036, 53039), False, 'import utils\n'), ((53050, 53104), 'utils.change_state_deletions_files', 'utils.change_state_deletions_files', (['chat_id', 'cont_type'], {}), '(chat_id, cont_type)\n', (53084, 53104), False, 'import utils\n'), ((54076, 54098), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (54095, 54098), False, 'import utils\n'), ((55394, 55416), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (55413, 55416), False, 'import utils\n'), ((55427, 55462), 'api.zeroing_warns', 'api.zeroing_warns', (['user_id', 'chat_id'], {}), '(user_id, chat_id)\n', (55444, 55462), False, 'import api\n'), ((56354, 56376), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (56373, 56376), False, 'import utils\n'), ((56398, 56427), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (56418, 56427), False, 'import api\n'), ((57367, 57389), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (57386, 57389), False, 'import utils\n'), ((57411, 57440), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (57431, 57440), False, 'import api\n'), ((58479, 58501), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (58498, 58501), False, 'import utils\n'), ((58523, 58552), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (58543, 58552), False, 'import api\n'), ((59974, 59996), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (59993, 59996), False, 'import utils\n'), ((60018, 60047), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (60038, 60047), False, 'import api\n'), ((63602, 63624), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (63621, 63624), False, 'import utils\n'), ((63646, 63675), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (63666, 63675), False, 'import api\n'), ((63726, 63746), 'utils.to_bool', 'utils.to_bool', (['state'], {}), '(state)\n', (63739, 63746), False, 'import utils\n'), ((64970, 64992), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (64989, 64992), False, 'import utils\n'), ((65014, 65043), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (65034, 65043), False, 'import api\n'), ((66045, 66067), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (66064, 66067), False, 'import utils\n'), ((66089, 66118), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (66109, 66118), False, 'import api\n'), ((67388, 67415), 'utils.get_greeting', 'utils.get_greeting', (['chat_id'], {}), '(chat_id)\n', (67406, 67415), False, 'import utils\n'), ((67643, 67665), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (67662, 67665), False, 'import utils\n'), ((68484, 68506), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (68503, 68506), False, 'import utils\n'), ((12260, 12335), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""Админка бота"""', 'callback_data': '"""admin_menu"""'}), "(text='Админка бота', callback_data='admin_menu')\n", (12286, 12335), False, 'from telebot import types\n'), ((16054, 16114), 'utils.set_log_channel', 'utils.set_log_channel', (['msg.chat.id', 'msg.forward_from_chat.id'], {}), '(msg.chat.id, msg.forward_from_chat.id)\n', (16075, 16114), False, 'import utils\n'), ((17426, 17463), 'utils.remove_log_channel', 'utils.remove_log_channel', (['msg.chat.id'], {}), '(msg.chat.id)\n', (17450, 17463), False, 'import utils\n'), ((20317, 20328), 'time.time', 'time.time', ([], {}), '()\n', (20326, 20328), False, 'import time\n'), ((21064, 21075), 'time.time', 'time.time', ([], {}), '()\n', (21073, 21075), False, 'import time\n'), ((21292, 21303), 'time.time', 'time.time', ([], {}), '()\n', (21301, 21303), False, 'import time\n'), ((22074, 22111), 'utils.set_greeting', 'utils.set_greeting', (['msg', 'new_greeting'], {}), '(msg, new_greeting)\n', (22092, 22111), False, 'import utils\n'), ((22393, 22404), 'time.time', 'time.time', ([], {}), '()\n', (22402, 22404), False, 'import time\n'), ((22602, 22613), 'time.time', 'time.time', ([], {}), '()\n', (22611, 22613), False, 'import time\n'), ((23916, 23927), 'time.time', 'time.time', ([], {}), '()\n', (23925, 23927), False, 'import time\n'), ((25043, 25054), 'time.time', 'time.time', ([], {}), '()\n', (25052, 25054), False, 'import time\n'), ((25280, 25313), 'api.get_group_params', 'api.get_group_params', (['msg.chat.id'], {}), '(msg.chat.id)\n', (25300, 25313), False, 'import api\n'), ((25745, 25787), 'ujson.dumps', 'ujson.dumps', (['config.default_group_settings'], {}), '(config.default_group_settings)\n', (25756, 25787), False, 'import ujson\n'), ((27195, 27222), 'utils.check_global_ban', 'utils.check_global_ban', (['msg'], {}), '(msg)\n', (27217, 27222), False, 'import utils\n'), ((28098, 28109), 'time.time', 'time.time', ([], {}), '()\n', (28107, 28109), False, 'import time\n'), ((28518, 28551), 'api.get_group_params', 'api.get_group_params', (['msg.chat.id'], {}), '(msg.chat.id)\n', (28538, 28551), False, 'import api\n'), ((28692, 28703), 'time.time', 'time.time', ([], {}), '()\n', (28701, 28703), False, 'import time\n'), ((29993, 30004), 'time.time', 'time.time', ([], {}), '()\n', (30002, 30004), False, 'import time\n'), ((30264, 30284), 'utils.parse_arg', 'utils.parse_arg', (['msg'], {}), '(msg)\n', (30279, 30284), False, 'import utils\n'), ((30365, 30414), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (30383, 30414), False, 'import utils\n'), ((30513, 30543), 'utils.unban_user', 'utils.unban_user', (['msg', 'user_id'], {}), '(msg, user_id)\n', (30529, 30543), False, 'import utils\n'), ((30781, 30792), 'time.time', 'time.time', ([], {}), '()\n', (30790, 30792), False, 'import time\n'), ((31101, 31143), 'ujson.dumps', 'ujson.dumps', (['config.default_group_settings'], {}), '(config.default_group_settings)\n', (31112, 31143), False, 'import ujson\n'), ((31621, 31632), 'time.time', 'time.time', ([], {}), '()\n', (31630, 31632), False, 'import time\n'), ((31989, 32000), 'time.time', 'time.time', ([], {}), '()\n', (31998, 32000), False, 'import time\n'), ((32283, 32303), 'utils.parse_arg', 'utils.parse_arg', (['msg'], {}), '(msg)\n', (32298, 32303), False, 'import utils\n'), ((32388, 32399), 'time.time', 'time.time', ([], {}), '()\n', (32397, 32399), False, 'import time\n'), ((32736, 32785), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (32754, 32785), False, 'import utils\n'), ((32795, 32843), 'utils.send_err_report', 'utils.send_err_report', (['msg', '"""not_enought_rights"""'], {}), "(msg, 'not_enought_rights')\n", (32816, 32843), False, 'import utils\n'), ((32870, 32881), 'time.time', 'time.time', ([], {}), '()\n', (32879, 32881), False, 'import time\n'), ((33153, 33173), 'utils.parse_arg', 'utils.parse_arg', (['msg'], {}), '(msg)\n', (33168, 33173), False, 'import utils\n'), ((33231, 33280), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (33249, 33280), False, 'import utils\n'), ((33319, 33367), 'utils.send_err_report', 'utils.send_err_report', (['msg', '"""not_enought_rights"""'], {}), "(msg, 'not_enought_rights')\n", (33340, 33367), False, 'import utils\n'), ((33519, 33530), 'time.time', 'time.time', ([], {}), '()\n', (33528, 33530), False, 'import time\n'), ((33800, 33811), 'time.time', 'time.time', ([], {}), '()\n', (33809, 33811), False, 'import time\n'), ((34124, 34135), 'time.time', 'time.time', ([], {}), '()\n', (34133, 34135), False, 'import time\n'), ((34388, 34454), 'utils.check_status', 'utils.check_status', (['msg.reply_to_message.from_user.id', 'msg.chat.id'], {}), '(msg.reply_to_message.from_user.id, msg.chat.id)\n', (34406, 34454), False, 'import utils\n'), ((34497, 34546), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (34515, 34546), False, 'import utils\n'), ((34556, 34604), 'utils.send_err_report', 'utils.send_err_report', (['msg', '"""not_enought_rights"""'], {}), "(msg, 'not_enought_rights')\n", (34577, 34604), False, 'import utils\n'), ((34614, 34680), 'utils.check_status', 'utils.check_status', (['msg.reply_to_message.from_user.id', 'msg.chat.id'], {}), '(msg.reply_to_message.from_user.id, msg.chat.id)\n', (34632, 34680), False, 'import utils\n'), ((34760, 34771), 'time.time', 'time.time', ([], {}), '()\n', (34769, 34771), False, 'import time\n'), ((35025, 35036), 'time.time', 'time.time', ([], {}), '()\n', (35034, 35036), False, 'import time\n'), ((35968, 35999), 'utils.set_rules', 'utils.set_rules', (['msg', 'new_rules'], {}), '(msg, new_rules)\n', (35983, 35999), False, 'import utils\n'), ((36273, 36284), 'time.time', 'time.time', ([], {}), '()\n', (36282, 36284), False, 'import time\n'), ((35681, 35730), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (35699, 35730), False, 'import utils\n'), ((36518, 36548), 'utils.generate_rules_text', 'utils.generate_rules_text', (['msg'], {}), '(msg)\n', (36543, 36548), False, 'import utils\n'), ((36711, 36722), 'time.time', 'time.time', ([], {}), '()\n', (36720, 36722), False, 'import time\n'), ((37518, 37558), 'utils.check_super_user', 'utils.check_super_user', (['msg.from_user.id'], {}), '(msg.from_user.id)\n', (37540, 37558), False, 'import utils\n'), ((38022, 38071), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (38040, 38071), False, 'import utils\n'), ((38224, 38264), 'utils.check_super_user', 'utils.check_super_user', (['msg.from_user.id'], {}), '(msg.from_user.id)\n', (38246, 38264), False, 'import utils\n'), ((39990, 40001), 'time.time', 'time.time', ([], {}), '()\n', (39999, 40001), False, 'import time\n'), ((40292, 40303), 'time.time', 'time.time', ([], {}), '()\n', (40301, 40303), False, 'import time\n'), ((40683, 40694), 'time.time', 'time.time', ([], {}), '()\n', (40692, 40694), False, 'import time\n'), ((41111, 41122), 'time.time', 'time.time', ([], {}), '()\n', (41120, 41122), False, 'import time\n'), ((46493, 46523), 'ujson.dumps', 'ujson.dumps', (['curr_bot_settings'], {}), '(curr_bot_settings)\n', (46504, 46523), False, 'import ujson\n'), ((47288, 47367), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""В главное меню"""', 'callback_data': '"""to_main_menu"""'}), "(text='В главное меню', callback_data='to_main_menu')\n", (47314, 47367), False, 'from telebot import types\n'), ((54160, 54206), 'utils.change_state_deletions_files', 'utils.change_state_deletions_files', (['chat_id', 'i'], {}), '(chat_id, i)\n', (54194, 54206), False, 'import utils\n'), ((56579, 56600), 'ujson.dumps', 'ujson.dumps', (['settings'], {}), '(settings)\n', (56590, 56600), False, 'import ujson\n'), ((57686, 57707), 'ujson.dumps', 'ujson.dumps', (['settings'], {}), '(settings)\n', (57697, 57707), False, 'import ujson\n'), ((58759, 58780), 'ujson.dumps', 'ujson.dumps', (['settings'], {}), '(settings)\n', (58770, 58780), False, 'import ujson\n'), ((60135, 60156), 'ujson.dumps', 'ujson.dumps', (['settings'], {}), '(settings)\n', (60146, 60156), False, 'import ujson\n'), ((60864, 60893), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (60884, 60893), False, 'import api\n'), ((60972, 60994), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (60991, 60994), False, 'import utils\n'), ((61009, 61035), 'utils.unban_user_button', 'utils.unban_user_button', (['c'], {}), '(c)\n', (61032, 61035), False, 'import utils\n'), ((63788, 63809), 'ujson.dumps', 'ujson.dumps', (['settings'], {}), '(settings)\n', (63799, 63809), False, 'import ujson\n'), ((65251, 65272), 'ujson.dumps', 'ujson.dumps', (['settings'], {}), '(settings)\n', (65262, 65272), False, 'import ujson\n'), ((66365, 66386), 'ujson.dumps', 'ujson.dumps', (['settings'], {}), '(settings)\n', (66376, 66386), False, 'import ujson\n'), ((67741, 67778), 'api.register_new_chat', 'api.register_new_chat', (['c.message.chat'], {}), '(c.message.chat)\n', (67762, 67778), False, 'import api\n'), ((15510, 15559), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (15528, 15559), False, 'import utils\n'), ((16807, 16856), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (16825, 16856), False, 'import utils\n'), ((16931, 16959), 'utils.check_log', 'utils.check_log', (['msg.chat.id'], {}), '(msg.chat.id)\n', (16946, 16959), False, 'import utils\n'), ((18291, 18320), 'utils.get_log_id', 'utils.get_log_id', (['msg.chat.id'], {}), '(msg.chat.id)\n', (18307, 18320), False, 'import utils\n'), ((18698, 18747), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (18716, 18747), False, 'import utils\n'), ((20587, 20607), 'utils.parse_arg', 'utils.parse_arg', (['msg'], {}), '(msg)\n', (20602, 20607), False, 'import utils\n'), ((21416, 21456), 'utils.check_super_user', 'utils.check_super_user', (['msg.from_user.id'], {}), '(msg.from_user.id)\n', (21438, 21456), False, 'import utils\n'), ((22919, 22968), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (22937, 22968), False, 'import utils\n'), ((23835, 23851), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (23848, 23851), False, 'import logging\n'), ((25810, 25843), 'api.get_group_params', 'api.get_group_params', (['msg.chat.id'], {}), '(msg.chat.id)\n', (25830, 25843), False, 'import api\n'), ((26784, 26817), 'api.get_group_params', 'api.get_group_params', (['msg.chat.id'], {}), '(msg.chat.id)\n', (26804, 26817), False, 'import api\n'), ((27689, 27716), 'utils.new_user_in_chat', 'utils.new_user_in_chat', (['msg'], {}), '(msg)\n', (27711, 27716), False, 'import utils\n'), ((27732, 27756), 'utils.need_greeting', 'utils.need_greeting', (['msg'], {}), '(msg)\n', (27751, 27756), False, 'import utils\n'), ((30553, 30602), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (30571, 30602), False, 'import utils\n'), ((30641, 30687), 'utils.send_err_report', 'utils.send_err_report', (['msg', '"""no_args_provided"""'], {}), "(msg, 'no_args_provided')\n", (30662, 30687), False, 'import utils\n'), ((30706, 30754), 'utils.send_err_report', 'utils.send_err_report', (['msg', '"""not_enought_rights"""'], {}), "(msg, 'not_enought_rights')\n", (30727, 30754), False, 'import utils\n'), ((31216, 31249), 'utils.get_group_lang', 'utils.get_group_lang', (['msg.chat.id'], {}), '(msg.chat.id)\n', (31236, 31249), False, 'import utils\n'), ((33289, 33309), 'utils.have_args', 'utils.have_args', (['msg'], {}), '(msg)\n', (33304, 33309), False, 'import utils\n'), ((33377, 33397), 'utils.have_args', 'utils.have_args', (['msg'], {}), '(msg)\n', (33392, 33397), False, 'import utils\n'), ((33446, 33492), 'utils.send_err_report', 'utils.send_err_report', (['msg', '"""no_args_provided"""'], {}), "(msg, 'no_args_provided')\n", (33467, 33492), False, 'import utils\n'), ((33707, 33731), 'utils.get_user_lang', 'utils.get_user_lang', (['msg'], {}), '(msg)\n', (33726, 33731), False, 'import utils\n'), ((34030, 34054), 'utils.get_user_lang', 'utils.get_user_lang', (['msg'], {}), '(msg)\n', (34049, 34054), False, 'import utils\n'), ((34690, 34733), 'utils.send_err_report', 'utils.send_err_report', (['msg', '"""user_is_admin"""'], {}), "(msg, 'user_is_admin')\n", (34711, 34733), False, 'import utils\n'), ((38282, 38305), 'utils.global_unban', 'utils.global_unban', (['msg'], {}), '(msg)\n', (38300, 38305), False, 'import utils\n'), ((38374, 38414), 'utils.check_super_user', 'utils.check_super_user', (['msg.from_user.id'], {}), '(msg.from_user.id)\n', (38396, 38414), False, 'import utils\n'), ((40390, 40439), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (40408, 40439), False, 'import utils\n'), ((40866, 40915), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (40884, 40915), False, 'import utils\n'), ((41398, 41427), 'utils.get_group_lang', 'utils.get_group_lang', (['chat_id'], {}), '(chat_id)\n', (41418, 41427), False, 'import utils\n'), ((52650, 52672), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (52669, 52672), False, 'import utils\n'), ((55040, 55062), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (55059, 55062), False, 'import utils\n'), ((62024, 62046), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (62043, 62046), False, 'import utils\n'), ((67824, 67866), 'ujson.dumps', 'ujson.dumps', (['config.default_group_settings'], {}), '(config.default_group_settings)\n', (67835, 67866), False, 'import ujson\n'), ((15572, 15600), 'utils.check_log', 'utils.check_log', (['msg.chat.id'], {}), '(msg.chat.id)\n', (15587, 15600), False, 'import utils\n'), ((16893, 16922), 'utils.get_log_id', 'utils.get_log_id', (['msg.chat.id'], {}), '(msg.chat.id)\n', (16909, 16922), False, 'import utils\n'), ((18840, 18873), 'utils.get_group_lang', 'utils.get_group_lang', (['msg.chat.id'], {}), '(msg.chat.id)\n', (18860, 18873), False, 'import utils\n'), ((19296, 19329), 'utils.get_group_lang', 'utils.get_group_lang', (['msg.chat.id'], {}), '(msg.chat.id)\n', (19316, 19329), False, 'import utils\n'), ((20926, 20950), 'utils.get_user_lang', 'utils.get_user_lang', (['msg'], {}), '(msg)\n', (20945, 20950), False, 'import utils\n'), ((24076, 24087), 'time.time', 'time.time', ([], {}), '()\n', (24085, 24087), False, 'import time\n'), ((24153, 24164), 'time.time', 'time.time', ([], {}), '()\n', (24162, 24164), False, 'import time\n'), ((27140, 27167), 'telebot.types.ReplyKeyboardRemove', 'types.ReplyKeyboardRemove', ([], {}), '()\n', (27165, 27167), False, 'from telebot import types\n'), ((30611, 30631), 'utils.have_args', 'utils.have_args', (['msg'], {}), '(msg)\n', (30626, 30631), False, 'import utils\n'), ((38436, 38457), 'utils.global_ban', 'utils.global_ban', (['msg'], {}), '(msg)\n', (38452, 38457), False, 'import utils\n'), ((38475, 38524), 'utils.check_status', 'utils.check_status', (['msg.from_user.id', 'msg.chat.id'], {}), '(msg.from_user.id, msg.chat.id)\n', (38493, 38524), False, 'import utils\n'), ((43076, 43105), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (43096, 43105), False, 'import api\n'), ((43481, 43502), 'api.get_users_count', 'api.get_users_count', ([], {}), '()\n', (43500, 43502), False, 'import api\n'), ((43528, 43549), 'api.get_chats_count', 'api.get_chats_count', ([], {}), '()\n', (43547, 43549), False, 'import api\n'), ((43581, 43612), 'api.get_unblocked_users_count', 'api.get_unblocked_users_count', ([], {}), '()\n', (43610, 43612), False, 'import api\n'), ((43644, 43675), 'api.get_unblocked_chats_count', 'api.get_unblocked_chats_count', ([], {}), '()\n', (43673, 43675), False, 'import api\n'), ((48790, 48812), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (48809, 48812), False, 'import utils\n'), ((49778, 49800), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (49797, 49800), False, 'import utils\n'), ((50769, 50791), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (50788, 50791), False, 'import utils\n'), ((51757, 51779), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (51776, 51779), False, 'import utils\n'), ((53275, 53297), 'utils.parse_chat_id', 'utils.parse_chat_id', (['c'], {}), '(c)\n', (53294, 53297), False, 'import utils\n'), ((18359, 18388), 'utils.get_log_id', 'utils.get_log_id', (['msg.chat.id'], {}), '(msg.chat.id)\n', (18375, 18388), False, 'import utils\n'), ((18448, 18481), 'utils.get_group_lang', 'utils.get_group_lang', (['msg.chat.id'], {}), '(msg.chat.id)\n', (18468, 18481), False, 'import utils\n'), ((24838, 24849), 'time.time', 'time.time', ([], {}), '()\n', (24847, 24849), False, 'import time\n'), ((26346, 26390), 'api.replacer', 'api.replacer', (['msg.new_chat_member.first_name'], {}), '(msg.new_chat_member.first_name)\n', (26358, 26390), False, 'import api\n'), ((27849, 27881), 'utils.generate_welcome_text', 'utils.generate_welcome_text', (['msg'], {}), '(msg)\n', (27876, 27881), False, 'import utils\n'), ((29488, 29516), 'api.replacer', 'api.replacer', (['msg.chat.title'], {}), '(msg.chat.title)\n', (29500, 29516), False, 'import api\n'), ((29691, 29729), 'api.replacer', 'api.replacer', (['msg.from_user.first_name'], {}), '(msg.from_user.first_name)\n', (29703, 29729), False, 'import api\n'), ((37669, 37714), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(msg.date + 1)'], {}), '(msg.date + 1)\n', (37700, 37714), False, 'import datetime\n'), ((38646, 38671), 'utils.check_for_urls', 'utils.check_for_urls', (['msg'], {}), '(msg)\n', (38666, 38671), False, 'import utils\n'), ((61651, 61684), 'api.get_group_params', 'api.get_group_params', (['msg.chat.id'], {}), '(msg.chat.id)\n', (61671, 61684), False, 'import api\n'), ((26014, 26025), 'time.time', 'time.time', ([], {}), '()\n', (26023, 26025), False, 'import time\n'), ((26674, 26707), 'api.get_group_params', 'api.get_group_params', (['msg.chat.id'], {}), '(msg.chat.id)\n', (26694, 26707), False, 'import api\n'), ((35494, 35518), 'utils.get_user_lang', 'utils.get_user_lang', (['msg'], {}), '(msg)\n', (35513, 35518), False, 'import utils\n'), ((41612, 41641), 'utils.get_group_lang', 'utils.get_group_lang', (['chat_id'], {}), '(chat_id)\n', (41632, 41641), False, 'import utils\n'), ((49046, 49075), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (49066, 49075), False, 'import api\n'), ((49347, 49376), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (49367, 49376), False, 'import api\n'), ((52013, 52042), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (52033, 52042), False, 'import api\n'), ((52340, 52369), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (52360, 52369), False, 'import api\n'), ((61386, 61420), 'api.replacer', 'api.replacer', (['user.user.first_name'], {}), '(user.user.first_name)\n', (61398, 61420), False, 'import api\n'), ((63067, 63096), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (63087, 63096), False, 'import api\n'), ((68767, 68806), 'utils.get_group_lang', 'utils.get_group_lang', (['c.message.chat.id'], {}), '(c.message.chat.id)\n', (68787, 68806), False, 'import utils\n'), ((69034, 69073), 'utils.get_group_lang', 'utils.get_group_lang', (['c.message.chat.id'], {}), '(c.message.chat.id)\n', (69054, 69073), False, 'import utils\n'), ((18194, 18227), 'utils.get_group_lang', 'utils.get_group_lang', (['msg.chat.id'], {}), '(msg.chat.id)\n', (18214, 18227), False, 'import utils\n'), ((23118, 23129), 'time.time', 'time.time', ([], {}), '()\n', (23127, 23129), False, 'import time\n'), ((26423, 26456), 'api.get_group_params', 'api.get_group_params', (['msg.chat.id'], {}), '(msg.chat.id)\n', (26443, 26456), False, 'import api\n'), ((28009, 28042), 'api.get_group_params', 'api.get_group_params', (['msg.chat.id'], {}), '(msg.chat.id)\n', (28029, 28042), False, 'import api\n'), ((38676, 38709), 'api.get_group_params', 'api.get_group_params', (['msg.chat.id'], {}), '(msg.chat.id)\n', (38696, 38709), False, 'import api\n'), ((50034, 50063), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (50054, 50063), False, 'import api\n'), ((50332, 50361), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (50352, 50361), False, 'import api\n'), ((51025, 51054), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (51045, 51054), False, 'import api\n'), ((51326, 51355), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (51346, 51355), False, 'import api\n'), ((62774, 62808), 'api.replacer', 'api.replacer', (['user.user.first_name'], {}), '(user.user.first_name)\n', (62786, 62808), False, 'import api\n'), ((23239, 23272), 'utils.get_group_lang', 'utils.get_group_lang', (['msg.chat.id'], {}), '(msg.chat.id)\n', (23259, 23272), False, 'import utils\n'), ((39148, 39186), 'api.replacer', 'api.replacer', (['msg.from_user.first_name'], {}), '(msg.from_user.first_name)\n', (39160, 39186), False, 'import api\n'), ((53516, 53545), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (53536, 53545), False, 'import api\n'), ((53870, 53899), 'api.get_group_params', 'api.get_group_params', (['chat_id'], {}), '(chat_id)\n', (53890, 53899), False, 'import api\n'), ((23524, 23557), 'utils.get_group_lang', 'utils.get_group_lang', (['msg.chat.id'], {}), '(msg.chat.id)\n', (23544, 23557), False, 'import utils\n'), ((26026, 26059), 'api.get_group_params', 'api.get_group_params', (['msg.chat.id'], {}), '(msg.chat.id)\n', (26046, 26059), False, 'import api\n'), ((16251, 16280), 'utils.get_group_lang', 'utils.get_group_lang', (['chat_id'], {}), '(chat_id)\n', (16271, 16280), False, 'import utils\n'), ((17600, 17629), 'utils.get_group_lang', 'utils.get_group_lang', (['chat_id'], {}), '(chat_id)\n', (17620, 17629), False, 'import utils\n'), ((23640, 23673), 'utils.get_group_lang', 'utils.get_group_lang', (['msg.chat.id'], {}), '(msg.chat.id)\n', (23660, 23673), False, 'import utils\n'), ((61217, 61256), 'utils.get_group_lang', 'utils.get_group_lang', (['c.message.chat.id'], {}), '(c.message.chat.id)\n', (61237, 61256), False, 'import utils\n'), ((16496, 16525), 'utils.get_group_lang', 'utils.get_group_lang', (['chat_id'], {}), '(chat_id)\n', (16516, 16525), False, 'import utils\n'), ((17845, 17874), 'utils.get_group_lang', 'utils.get_group_lang', (['chat_id'], {}), '(chat_id)\n', (17865, 17874), False, 'import utils\n'), ((38988, 39021), 'utils.get_group_lang', 'utils.get_group_lang', (['msg.chat.id'], {}), '(msg.chat.id)\n', (39008, 39021), False, 'import utils\n'), ((62597, 62636), 'utils.get_group_lang', 'utils.get_group_lang', (['c.message.chat.id'], {}), '(c.message.chat.id)\n', (62617, 62636), False, 'import utils\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 22:08:31 2019
@author: iaricanli
"""
import copy
T = True
F = False
D = "_"
"""
Generate the inputs for the algorithm. A list of dictionarys.
Each element of the list represents a different boolean expression input --
say if we are trying to reduce something for a 7 segment display over a period
of multiple timesteps.
Each dictionary represents a boolean function. The key is the boolean input
represented in integer form (aka A^!B^!C^D -> 1001 -> 9) and the value is
either True, False, or Don't Care.
Reading from a file not yet supported.
Do whatever you want in here.
Arguments:
LEN : integer
- The dimensionality of the desired output truth table. AKA,
the number of boolean variables.
Return:
tt (truth table): list (dict (int, Booleanish))
- The only question here really is what is a booleanish?
in addition to True and False there is a third concept of Don'tCare
which is being represented here as "_". It fails "is True" but
passes "== True". this is abused heavily.
"""
def Get_Truth_Table(LEN):
tt = list()
e = dict()
e = {
0: T,
1: T,
2: D,
3: T,
4: F,
5: F,
6: T,
7: T,
}
tt.append(e)
return tt
"""
This is a helper function, existant in case I want to
expand the code and allow different iteration over the inputs.
If this returned randomly, the code would no longer necessarily
output predictable results.
"""
def _Provide_Index(dim):
return range(0, dim)
"""
Performs the expansion of a cube through the Nspace
Attempting to expand through ever dimension, one at a time.
While it does this, it maps the boolean expressions to minterms
and it maps the minterms to the boolean expressions. Thus providing
the program a quick view of the rows and columns regarding the results found.
Arguments:
boolean array: dict (int, truth)
- The key maps to the integer representation of the inputs
- The value points to whether that is mapped to True, False, or DC
idx: int
- The space in the boolean array we are beginning at, where the
expansion begins from.
dim: int
- The total number of dimensions we are operating in.
# REFERENCED BY VALUE
minterms2bln: dict (int, boolean_expression)
- Maps the minterms (tracked by integer -- same as in the idx we see
above), and keeps track of which minterms are related to what
boolean expressions.
Return:
covered_minterms: set
- The defined set of minterms covered by the boolean expression
bln_expr: boolean_expression
- The boolean expression (defined by a point and then a mask)
that covered the aforementioned minterms.
"""
def Expand_Cube(boolean_array, idx, dim, minterms2bln):
bln_expr = boolean_expression(idx, 0)
# Define the space of the cube
space = [idx]
covered_minterms = {idx}
if idx in minterms2bln:
minterms2bln[idx].add(bln_expr)
else:
minterms2bln[idx] = {bln_expr}
# Iterate over the indices however we decide
for i in _Provide_Index(dim):
# Control variable to exit a loop
_continue = False
# Convert the index into the representitive integer
dim2int = 2**i
# The space being explored
new_space = list()
for index in space:
# MAGIC LINE
# We need to turn 1s into 0s and 0s into 1s, depending on the index
new_index = index ^ dim2int
# We're expanding the cube, verify that we're expanding it into
# valid space. If it is valid, add the expanding indices into list
if new_index in boolean_array and boolean_array[new_index]:
new_space.append(new_index)
else:
# If the new space doesn't pan out _perfectly_, keep going to
# the the next index
_continue = True
break
# We don't want to extend into the space of the selected index
# if it didn't pan out. So skip this one and move on to the next
# dimension.
if not _continue:
# We like the new dimension, and are going to cover all the new
# elements into it.
space.extend(new_space)
for ns in new_space:
# If the value at the boolean array is specifically
# True and not just Don't Care, add it to the Covered Minterms
if boolean_array[ns] is T:
covered_minterms.add(ns)
if ns in minterms2bln:
minterms2bln[ns].add(bln_expr)
else:
minterms2bln[ns] = {bln_expr}
# Allow the Mask to contain the information regarding the dimension
# that was just covered.
bln_expr.mask += dim2int
return covered_minterms, bln_expr
class boolean_expression(object):
def __init__(self, idx, mask):
self.idx = idx
self.mask = mask
def __eq__(self, b):
return self.idx == b.idx and self.mask == b.mask
def __hash__(self):
return hash((self.idx, self.mask))
def __str__(self):
return "boolean_expression({0}, {1})".format(self.idx, self.mask)
def __repr__(self):
return self.__str__()
def Expand(truth_table, dim):
#
# Iterate over every boolean output
#
expr_per_output = list()
for boolean_array in truth_table:
bln2minterms= dict()
minterms2bln = dict()
for idx, bln in boolean_array.items():
if bln is T:
covered_minterms, bln_expr = Expand_Cube(boolean_array,
idx,
dim,
minterms2bln)
bln2minterms[bln_expr] = covered_minterms
# bln2minterms and minterms2bln
# are two dictionaries that are dually referent
# in order to keep computations fast.
expr_per_output.append((bln2minterms, minterms2bln))
return expr_per_output
def Intersect( list_of_maps ):
#
# Finds intersections between boolean statements and
# the minterms they cover
#
lom = list()
# Iterate over every solution-set per output
itr_list_of_maps = copy.deepcopy(list_of_maps)
for bln2minterms, minterms2bln in itr_list_of_maps:
# First we're going to look for any case where a minterm
# maps to only one boolean expression.
required_blns = set()
todelete = list()
itr_minterms2bln = copy.deepcopy(minterms2bln)
for minterm, set_of_blns in itr_minterms2bln.items():
if len(set_of_blns) == 1:
# WE found one!
# Take it
required_bln = set_of_blns.pop()
# Now find all the minterms related to the boolean
minterms_correlated_to_bln = bln2minterms[required_bln]
# Iterate over them
for correlated_minterm in minterms_correlated_to_bln:
# and remove the boolean from their knowledge
minterms2bln[correlated_minterm].remove(required_bln)
# Then delete the entire boolean from the booly-books
del bln2minterms[required_bln]
todelete.append(minterm)
# And remember what we've done on this day, this evil day.
required_blns.add( required_bln )
for i in todelete:
del minterms2bln[i]
# Now we get rid of booleans as we determine that they are "the best candidate
while len(minterms2bln):
# We are looking at only a SINGLE minterm.
# Scanning a subspace to decrease overall computation time
# and keep everything in linear time.
minterm = Select_Minterm(minterms2bln)
most = 0
best_candidate = None
# We determine the "Best candidate" as the boolean expression
# with the greatest number of related minterms
for bln in minterms2bln[minterm]:
if len(bln2minterms[bln]) > most:
best_candidate = bln
most = len(bln2minterms[bln])
required_blns.add( best_candidate )
# Now find all the minterms related to the boolean
minterms_correlated_to_bln = bln2minterms[best_candidate]
# Iterate over them
todelete = list()
for correlated_minterm in minterms_correlated_to_bln:
# Delete all minterms correlated to the highest-scoring boolean
for related_bln in minterms2bln[correlated_minterm]:
todelete.append((related_bln, correlated_minterm))
# Forreal, delete them
del minterms2bln[correlated_minterm]
for related_bln, correlated_minterm in todelete:
bln2minterms[related_bln].remove(correlated_minterm)
# The ndelete the aforementioned best candidate
del bln2minterms[best_candidate]
lom.append(required_blns)
return lom
"""
This is a helper function, existant in case I want to
expand the code and allow different iteration over the inputs.
If this returned randomly, the code would no longer necessarily
output predictable results.
"""
def Select_Minterm(minterms2bln):
return list(minterms2bln.keys())[0]
def main(dim):
#
# Define truth table
#
truth_table = Get_Truth_Table(dim)
# Perform the Expand operation on every output set
list_of_maps = Expand(truth_table, dim)
list_of_covering_blns = Intersect(list_of_maps)
return list_of_covering_blns
|
[
"copy.deepcopy"
] |
[((6780, 6807), 'copy.deepcopy', 'copy.deepcopy', (['list_of_maps'], {}), '(list_of_maps)\n', (6793, 6807), False, 'import copy\n'), ((7059, 7086), 'copy.deepcopy', 'copy.deepcopy', (['minterms2bln'], {}), '(minterms2bln)\n', (7072, 7086), False, 'import copy\n')]
|
import yaml
from pathlib import Path
from os import environ
class Config:
def __init__(self, file_path="config.yml"):
try:
with open(file_path, encoding="UTF-8") as file:
self.config = yaml.full_load(file.read())
except Exception:
self.config = {}
self.environ = environ
def get(self, param, default=None):
globals()[param.upper()] = (
self.environ.get(param.upper()) or
self.config.get(param, default))
config = Config()
config.get("db_path", default="jdanbot.db")
config.get("delay", default=30)
config.get("rss_feeds", default=[])
config.get("rss", default=False)
config.get("image_path", default="bot/cache/{image}.jpg")
config.get("token")
config.get("status", default="unknown")
config.get("vk", default=False)
config.get("vk_channels", default=())
config.get("access_token", default="")
config.get("katz_bots", default=False)
config.get("youtube", default=False)
config.get("youtube_channels", default=())
config.get("youtube_key", default=None)
config.get("langs_list", default=[
"ru", "en", "sv", "de", "ce",
"tt", "ba", "pl", "uk", "be",
"es", "he", "xh", "ab"])
config.get("unique_commands", default={
"ru": ["wikiru2", "w", "wiki"],
"en": ["van", "wen", "v"],
"uk": ["wikiua", "wua", "pawuk"],
"be-tarask": ["wikibe-tarask", "wikibet", "wbet", "xbet"]
})
config.get("admin_notes", default=[
"__rules__",
"__enable_bot__",
"__ban__",
"__welcome__",
"__enable_response__",
"__enable_welcome__",
"__enable_greatings__",
"__warns_to_ban__"
])
config.get("eggs", default=[
{"commands": ["java1"], "audio": "java.ogg"},
{"commands": ["cool_music"], "audio": "music.ogg"},
{"commands": ["cum"], "audio": "cum.ogg"},
{"commands": ["longcum"], "audio": "longcum.ogg"},
{"commands": ["frog"], "audio": "lyagushka.ogg"}])
config.get("stickers", {
"pizda": "<KEY>",
"net_pizdy": "<KEY>",
"pizda_tebe": "<KEY>",
"xui": "<KEY>",
"net_xua": "<KEY>"
})
BASE_DIR = Path(__file__).parent.parent
LOCALES_DIR = BASE_DIR / "i18n"
|
[
"pathlib.Path"
] |
[((2076, 2090), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2080, 2090), False, 'from pathlib import Path\n')]
|
#! /usr/bin/env python
import pyslet.xml.structures as xml
from pyslet.wsgi import SessionApp, session_decorator
class MyApp(SessionApp):
settings_file = 'samples/wsgi_session/settings.json'
def init_dispatcher(self):
super(MyApp, self).init_dispatcher()
self.set_method("/", self.home)
self.set_method("/setname", self.setname)
@session_decorator
def home(self, context):
page = """<html><head><title>Session Page</title></head><body>
<h1>Session Page</h1>
%s
</body></html>"""
if context.session.entity['UserName']:
noform = """<p>Welcome: %s</p>"""
page = page % (
noform % xml.EscapeCharData(
context.session.entity['UserName'].value))
else:
form = """<form method="POST" action="setname">
<p>Please enter your name: <input type="text" name="name"/>
<input type="hidden" name=%s value=%s />
<input type="submit" value="Set"/></p>
</form>"""
page = page % (
form % (xml.EscapeCharData(self.csrf_token, True),
xml.EscapeCharData(context.session.sid(),
True)))
context.set_status(200)
return self.html_response(context, page)
@session_decorator
def setname(self, context):
user_name = context.get_form_string('name')
if user_name:
context.session.entity['UserName'].set_from_value(user_name)
context.session.touch()
return self.redirect_page(context, context.get_app_root())
if __name__ == "__main__":
MyApp.main()
|
[
"pyslet.xml.structures.EscapeCharData"
] |
[((715, 775), 'pyslet.xml.structures.EscapeCharData', 'xml.EscapeCharData', (["context.session.entity['UserName'].value"], {}), "(context.session.entity['UserName'].value)\n", (733, 775), True, 'import pyslet.xml.structures as xml\n'), ((1147, 1188), 'pyslet.xml.structures.EscapeCharData', 'xml.EscapeCharData', (['self.csrf_token', '(True)'], {}), '(self.csrf_token, True)\n', (1165, 1188), True, 'import pyslet.xml.structures as xml\n')]
|
import pathlib
from typing import (
Iterable
)
CONTRACTS_ROOT = "./scripts/benchmark/contract_data/"
CONTRACTS = [
"erc20.sol"
]
def get_contracts() -> Iterable[pathlib.Path]:
for val in CONTRACTS:
yield pathlib.Path(CONTRACTS_ROOT) / pathlib.Path(val)
|
[
"pathlib.Path"
] |
[((227, 255), 'pathlib.Path', 'pathlib.Path', (['CONTRACTS_ROOT'], {}), '(CONTRACTS_ROOT)\n', (239, 255), False, 'import pathlib\n'), ((258, 275), 'pathlib.Path', 'pathlib.Path', (['val'], {}), '(val)\n', (270, 275), False, 'import pathlib\n')]
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import random
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from pgadmin.tools.sqleditor.tests.execute_query_test_utils \
import execute_query
from datetime import date
class TestQueryUpdatableResultset(BaseTestGenerator):
""" This class will test the detection of whether the query
result-set is updatable. """
scenarios = [
('When selecting all columns of the table', dict(
sql='SELECT * FROM {0};',
expected_primary_keys={
'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[True, True, True, True]
)),
('When selecting all primary keys of the table', dict(
sql='SELECT pk_col1, pk_col2 FROM {0};',
expected_primary_keys={
'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[True, True]
)),
('When selecting some of the primary keys of the table', dict(
sql='SELECT pk_col2 FROM {0};',
expected_primary_keys=None,
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[False]
)),
('When selecting none of the primary keys of the table', dict(
sql='SELECT normal_col1 FROM {0};',
expected_primary_keys=None,
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[False]
)),
('When renaming a primary key', dict(
sql='SELECT pk_col1 as some_col, pk_col2 FROM "{0}";',
expected_primary_keys=None,
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[False, False]
)),
('When renaming a normal column', dict(
sql='SELECT pk_col1, pk_col2, normal_col1 as some_col FROM "{0}";',
expected_primary_keys={
'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[True, True, False]
)),
('When renaming a normal column to a primary key name', dict(
sql='SELECT normal_col1 as pk_col1, pk_col1, pk_col2 FROM {0};',
expected_primary_keys={
'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[False, True, True]
)),
('When selecting a normal column twice', dict(
sql='SELECT pk_col1, pk_col2, normal_col1, normal_col1 FROM {0};',
expected_primary_keys={
'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[True, True, True, False]
)),
('When selecting a non-table column', dict(
sql='SELECT pk_col1, pk_col2, normal_col1 || normal_col2 FROM {0};',
expected_primary_keys={'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[True, True, False]
)),
('When selecting primary keys and oids (table with oids)', dict(
sql='SELECT *, oid FROM {0};',
expected_primary_keys={
'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=True,
table_has_oids=True,
expected_cols_is_editable=[True, True, True, True, False]
)),
('When selecting oids without primary keys (table with oids)', dict(
sql='SELECT oid, normal_col1, normal_col2 FROM {0};',
expected_primary_keys=None,
expected_has_oids=True,
table_has_oids=True,
expected_cols_is_editable=[False, True, True]
)),
('When selecting none of the primary keys or oids (table with oids)',
dict(
sql='SELECT normal_col1, normal_col2 FROM {0};',
expected_primary_keys=None,
expected_has_oids=False,
table_has_oids=True,
expected_cols_is_editable=[False, False]
))
]
def setUp(self):
self.test_table_name = "test_for_updatable_resultset" + \
str(random.randint(1000, 9999))
self._initialize_database_connection()
self._initialize_query_tool()
self._initialize_urls()
def runTest(self):
self._create_test_table(table_has_oids=self.table_has_oids)
response_data = self._execute_select_sql()
self._check_primary_keys(response_data)
self._check_oids(response_data)
self._check_editable_columns(response_data)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
def _execute_select_sql(self):
sql = self.sql.format(self.test_table_name)
is_success, response_data = \
execute_query(tester=self.tester,
query=sql,
poll_url=self.poll_url,
start_query_tool_url=self.start_query_tool_url)
self.assertEquals(is_success, True)
return response_data
def _check_primary_keys(self, response_data):
primary_keys = response_data['data']['primary_keys']
self.assertEquals(primary_keys, self.expected_primary_keys)
def _check_oids(self, response_data):
has_oids = response_data['data']['has_oids']
self.assertEquals(has_oids, self.expected_has_oids)
def _check_editable_columns(self, response_data):
columns_info = response_data['data']['colinfo']
for col, expected_is_editable in \
zip(columns_info, self.expected_cols_is_editable):
self.assertEquals(col['is_editable'], expected_is_editable)
def _initialize_database_connection(self):
database_info = parent_node_dict["database"][-1]
self.db_name = database_info["db_name"]
self.server_id = database_info["server_id"]
self.server_version = parent_node_dict["schema"][-1]["server_version"]
if self.server_version >= 120000 and self.table_has_oids:
self.skipTest('Tables with OIDs are not supported starting '
'PostgreSQL 12')
driver_version = utils.get_driver_version()
driver_version = float('.'.join(driver_version.split('.')[:2]))
if driver_version < 2.8:
self.skipTest('Updatable resultsets require pyscopg 2.8 or later')
self.db_id = database_info["db_id"]
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to the database.")
def _initialize_query_tool(self):
self.trans_id = str(random.randint(1, 9999999))
url = '/datagrid/initialize/query_tool/{0}/{1}/{2}/{3}'.format(
self.trans_id, utils.SERVER_GROUP, self.server_id, self.db_id)
response = self.tester.post(url)
self.assertEquals(response.status_code, 200)
def _initialize_urls(self):
self.start_query_tool_url = \
'/sqleditor/query_tool/start/{0}'.format(self.trans_id)
self.poll_url = '/sqleditor/poll/{0}'.format(self.trans_id)
def _create_test_table(self, table_has_oids=False):
create_sql = """
DROP TABLE IF EXISTS {0};
CREATE TABLE {0}(
pk_col1 SERIAL,
pk_col2 SERIAL,
normal_col1 VARCHAR,
normal_col2 VARCHAR,
PRIMARY KEY(pk_col1, pk_col2)
)
""".format(self.test_table_name)
if table_has_oids:
create_sql += ' WITH OIDS;'
else:
create_sql += ';'
utils.create_table_with_query(self.server, self.db_name, create_sql)
class TestTemporaryTable(TestQueryUpdatableResultset):
""" This class will test the query result-set for temporary tables """
scenarios = [
('When selecting all columns of the Temporary table, on commit drop',
dict(sql='''
DROP TABLE IF EXISTS {0};
CREATE TEMPORARY TABLE {0} ON COMMIT DROP AS
SELECT
CURRENT_DATE AS today;
SELECT * FROM {0};''',
expected_primary_keys=None,
expected_results_column_data=[[date.today().strftime(
"%Y-%m-%d")]],
expected_has_oids=False,
expected_results_column_is_editable=False,
table_has_oids=False,
expected_cols_is_editable=[False]
))
]
def runTest(self):
response_data = self._execute_select_sql()
self._check_primary_keys(response_data)
self._check_oids(response_data)
# Verifying Temporary table result data on Commit Drop
self._check_results_column_data(response_data)
self._check_editable_columns(response_data)
def _check_results_column_data(self, response_data):
results_column_data = response_data['data']['result']
for result_data, expected_is_editable in \
zip(results_column_data, self.expected_results_column_data):
self.assertEquals(result_data, expected_is_editable)
|
[
"random.randint",
"regression.python_test_utils.test_utils.get_driver_version",
"datetime.date.today",
"regression.python_test_utils.test_utils.create_table_with_query",
"pgadmin.browser.server_groups.servers.databases.tests.utils.connect_database",
"pgadmin.browser.server_groups.servers.databases.tests.utils.disconnect_database",
"pgadmin.tools.sqleditor.tests.execute_query_test_utils.execute_query"
] |
[((5722, 5790), 'pgadmin.browser.server_groups.servers.databases.tests.utils.disconnect_database', 'database_utils.disconnect_database', (['self', 'self.server_id', 'self.db_id'], {}), '(self, self.server_id, self.db_id)\n', (5756, 5790), True, 'from pgadmin.browser.server_groups.servers.databases.tests import utils as database_utils\n'), ((5929, 6049), 'pgadmin.tools.sqleditor.tests.execute_query_test_utils.execute_query', 'execute_query', ([], {'tester': 'self.tester', 'query': 'sql', 'poll_url': 'self.poll_url', 'start_query_tool_url': 'self.start_query_tool_url'}), '(tester=self.tester, query=sql, poll_url=self.poll_url,\n start_query_tool_url=self.start_query_tool_url)\n', (5942, 6049), False, 'from pgadmin.tools.sqleditor.tests.execute_query_test_utils import execute_query\n'), ((7320, 7346), 'regression.python_test_utils.test_utils.get_driver_version', 'utils.get_driver_version', ([], {}), '()\n', (7344, 7346), True, 'from regression.python_test_utils import test_utils as utils\n'), ((7594, 7683), 'pgadmin.browser.server_groups.servers.databases.tests.utils.connect_database', 'database_utils.connect_database', (['self', 'utils.SERVER_GROUP', 'self.server_id', 'self.db_id'], {}), '(self, utils.SERVER_GROUP, self.server_id,\n self.db_id)\n', (7625, 7683), True, 'from pgadmin.browser.server_groups.servers.databases.tests import utils as database_utils\n'), ((9147, 9215), 'regression.python_test_utils.test_utils.create_table_with_query', 'utils.create_table_with_query', (['self.server', 'self.db_name', 'create_sql'], {}), '(self.server, self.db_name, create_sql)\n', (9176, 9215), True, 'from regression.python_test_utils import test_utils as utils\n'), ((8017, 8043), 'random.randint', 'random.randint', (['(1)', '(9999999)'], {}), '(1, 9999999)\n', (8031, 8043), False, 'import random\n'), ((5227, 5253), 'random.randint', 'random.randint', (['(1000)', '(9999)'], {}), '(1000, 9999)\n', (5241, 5253), False, 'import random\n'), ((9785, 9797), 'datetime.date.today', 'date.today', ([], {}), '()\n', (9795, 9797), False, 'from datetime import date\n')]
|
import numpy as np
import pickle as pkl
def function_generator(init_num):
seq = np.array([], dtype='int')
n = init_num
seq = np.append(seq, n)
while True:
if ((n%2)==0):
next_number = n/2
next_number = np.asarray(next_number, dtype='int')
seq = np.append(seq, next_number)
if next_number==1:
break
else:
next_number = (3*n)+1
next_number = np.asarray(next_number, dtype='int')
seq = np.append(seq, next_number)
n = next_number
return seq
output_seq_data = []
output_seq_length = []
x_train = []
y_train = []
num = 0
for n in range(0,10000):
sequence = function_generator(n+1)
seq_len = len(sequence)
x_training = sequence[:(seq_len-1)]
x_training = np.array(x_training, dtype='int')
y_training = sequence[1:seq_len]
y_training = np.array(y_training, dtype='int')
output_seq_data.append(sequence)
output_seq_length.append(seq_len)
x_train.append(x_training)
y_train.append(y_training)
output_seq_data = np.asarray(output_seq_data)
x_train = np.asarray(x_train)
y_train = np.asarray(y_train)
print(y_train[26])
output_seq_length = np.asarray(output_seq_length)
max_length = output_seq_length.max()
# print(max_length)
# print(x_train[26])
# np.save('generated_data.npy', gen_data)
# np.save('x_train.npy', x_train)
# np.save('y_train.npy', y_train)
|
[
"numpy.append",
"numpy.asarray",
"numpy.array"
] |
[((1092, 1119), 'numpy.asarray', 'np.asarray', (['output_seq_data'], {}), '(output_seq_data)\n', (1102, 1119), True, 'import numpy as np\n'), ((1130, 1149), 'numpy.asarray', 'np.asarray', (['x_train'], {}), '(x_train)\n', (1140, 1149), True, 'import numpy as np\n'), ((1160, 1179), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (1170, 1179), True, 'import numpy as np\n'), ((1220, 1249), 'numpy.asarray', 'np.asarray', (['output_seq_length'], {}), '(output_seq_length)\n', (1230, 1249), True, 'import numpy as np\n'), ((86, 111), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int"""'}), "([], dtype='int')\n", (94, 111), True, 'import numpy as np\n'), ((139, 156), 'numpy.append', 'np.append', (['seq', 'n'], {}), '(seq, n)\n', (148, 156), True, 'import numpy as np\n'), ((813, 846), 'numpy.array', 'np.array', (['x_training'], {'dtype': '"""int"""'}), "(x_training, dtype='int')\n", (821, 846), True, 'import numpy as np\n'), ((901, 934), 'numpy.array', 'np.array', (['y_training'], {'dtype': '"""int"""'}), "(y_training, dtype='int')\n", (909, 934), True, 'import numpy as np\n'), ((252, 288), 'numpy.asarray', 'np.asarray', (['next_number'], {'dtype': '"""int"""'}), "(next_number, dtype='int')\n", (262, 288), True, 'import numpy as np\n'), ((307, 334), 'numpy.append', 'np.append', (['seq', 'next_number'], {}), '(seq, next_number)\n', (316, 334), True, 'import numpy as np\n'), ((462, 498), 'numpy.asarray', 'np.asarray', (['next_number'], {'dtype': '"""int"""'}), "(next_number, dtype='int')\n", (472, 498), True, 'import numpy as np\n'), ((517, 544), 'numpy.append', 'np.append', (['seq', 'next_number'], {}), '(seq, next_number)\n', (526, 544), True, 'import numpy as np\n')]
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import time
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
logger = logging.getLogger(__name__)
class FullModel(nn.Module):
"""
Distribute the loss on multi-gpu to reduce
the memory cost in the main gpu.
You can check the following discussion.
https://discuss.pytorch.org/t/dataparallel-imbalanced-memory-usage/22551/21
"""
def __init__(self, model, loss):
super(FullModel, self).__init__()
self.model = model
self.loss = loss
def forward(self, inputs, labels):
outputs = self.model(inputs)
loss = self.loss(outputs, labels)
return loss, outputs
def get_world_size():
if not torch.distributed.is_initialized():
return 1
return torch.distributed.get_world_size()
def get_rank():
if not torch.distributed.is_initialized():
return 0
return torch.distributed.get_rank()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.initialized = False
self.val = None
self.avg = None
self.sum = None
self.count = None
def initialize(self, val, weight):
self.val = val
self.avg = val
self.sum = val * weight
self.count = weight
self.initialized = True
def update(self, val, weight=1):
if not self.initialized:
self.initialize(val, weight)
else:
self.add(val, weight)
def add(self, val, weight):
self.val = val
self.sum += val * weight
self.count += weight
self.avg = self.sum / self.count
def value(self):
return self.val
def average(self):
return self.avg
def create_logger(cfg, cfg_name, phase='train'):
root_output_dir = Path(cfg.OUTPUT_DIR)
# set up logger
if not root_output_dir.exists():
print('=> creating {}'.format(root_output_dir))
root_output_dir.mkdir()
dataset = cfg.DATASET.DATASET
model = cfg.MODEL.NAME
cfg_name = os.path.basename(cfg_name).split('.')[0]
final_output_dir = root_output_dir / dataset / cfg_name
print('=> creating {}'.format(final_output_dir))
final_output_dir.mkdir(parents=True, exist_ok=True)
time_str = time.strftime('%Y-%m-%d-%H-%M')
log_file = '{}_{}_{}.log'.format(cfg_name, time_str, phase)
final_log_file = final_output_dir / log_file
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=str(final_log_file),
format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / model / \
(cfg_name + '_' + time_str)
print('=> creating {}'.format(tensorboard_log_dir))
tensorboard_log_dir.mkdir(parents=True, exist_ok=True)
return logger, str(final_output_dir), str(tensorboard_log_dir)
def get_confusion_matrix(label, pred, size, num_class, ignore=-1):
"""
Calcute the confusion matrix by given label and pred
"""
output = pred.cpu().numpy().transpose(0, 2, 3, 1)
seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)
seg_gt = np.asarray(
label.cpu().numpy()[:, :size[-2], :size[-1]], dtype=np.int)
ignore_index = seg_gt != ignore
seg_gt = seg_gt[ignore_index]
seg_pred = seg_pred[ignore_index]
index = (seg_gt * num_class + seg_pred).astype('int32')
label_count = np.bincount(index)
confusion_matrix = np.zeros((num_class, num_class))
for i_label in range(num_class):
for i_pred in range(num_class):
cur_index = i_label * num_class + i_pred
if cur_index < len(label_count):
confusion_matrix[i_label,
i_pred] = label_count[cur_index]
return confusion_matrix
def get_optimizer(config, model):
_nwd_keys = ('bias', 'bn', 'norm', 'prelu', 'nwd')
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = config.TRAIN.LR
weight_decay = config.TRAIN.WD
if 'head' in key:
lr *= 10
if any(key.find(sub) != -1 for sub in _nwd_keys):
weight_decay = 0
logger.info(f'Params: {key}, LR: {lr}, Weight_Decay: {weight_decay}')
elif 'base' in key:
if any(key.find(sub) != -1 for sub in _nwd_keys):
weight_decay = 0
logger.info(f'Params: {key}, LR: {lr}, Weight_Decay: {weight_decay}')
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if config.TRAIN.OPTIMIZER == 'sgd':
optimizer = torch.optim.SGD(params,
lr=config.TRAIN.LR,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD,
nesterov=config.TRAIN.NESTEROV,
)
elif config.TRAIN.OPTIMIZER == 'adam':
optimizer = torch.optim.Adam(params,
lr=config.TRAIN.LR,
amsgrad=config.TRAIN.AMSGRAD
)
else:
raise NotImplementedError
return optimizer
def get_group_gn(dim, dim_per_gp, num_groups):
"""get number of groups used by GroupNorm, based on number of channels."""
assert dim_per_gp == -1 or num_groups == -1, \
"GroupNorm: can only specify G or C/G."
if dim_per_gp > 0:
assert dim % dim_per_gp == 0, \
"dim: {}, dim_per_gp: {}".format(dim, dim_per_gp)
group_gn = dim // dim_per_gp
else:
assert dim % num_groups == 0, \
"dim: {}, num_groups: {}".format(dim, num_groups)
group_gn = num_groups
return group_gn
|
[
"torch.distributed.is_initialized",
"numpy.argmax",
"torch.distributed.get_rank",
"os.path.basename",
"logging.StreamHandler",
"numpy.zeros",
"time.strftime",
"pathlib.Path",
"torch.optim.Adam",
"torch.distributed.get_world_size",
"numpy.bincount",
"logging.getLogger",
"torch.optim.SGD"
] |
[((491, 518), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (508, 518), False, 'import logging\n'), ((1111, 1145), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (1143, 1145), False, 'import torch\n'), ((1239, 1267), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (1265, 1267), False, 'import torch\n'), ((2178, 2198), 'pathlib.Path', 'Path', (['cfg.OUTPUT_DIR'], {}), '(cfg.OUTPUT_DIR)\n', (2182, 2198), False, 'from pathlib import Path\n'), ((2649, 2680), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d-%H-%M"""'], {}), "('%Y-%m-%d-%H-%M')\n", (2662, 2680), False, 'import time\n'), ((2938, 2957), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2955, 2957), False, 'import logging\n'), ((3006, 3029), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (3027, 3029), False, 'import logging\n'), ((3908, 3926), 'numpy.bincount', 'np.bincount', (['index'], {}), '(index)\n', (3919, 3926), True, 'import numpy as np\n'), ((3950, 3982), 'numpy.zeros', 'np.zeros', (['(num_class, num_class)'], {}), '((num_class, num_class))\n', (3958, 3982), True, 'import numpy as np\n'), ((1047, 1081), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (1079, 1081), False, 'import torch\n'), ((1175, 1209), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (1207, 1209), False, 'import torch\n'), ((3588, 3613), 'numpy.argmax', 'np.argmax', (['output'], {'axis': '(3)'}), '(output, axis=3)\n', (3597, 3613), True, 'import numpy as np\n'), ((5150, 5291), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': 'config.TRAIN.LR', 'momentum': 'config.TRAIN.MOMENTUM', 'weight_decay': 'config.TRAIN.WD', 'nesterov': 'config.TRAIN.NESTEROV'}), '(params, lr=config.TRAIN.LR, momentum=config.TRAIN.MOMENTUM,\n weight_decay=config.TRAIN.WD, nesterov=config.TRAIN.NESTEROV)\n', (5165, 5291), False, 'import torch\n'), ((3034, 3055), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (3051, 3055), False, 'import logging\n'), ((5533, 5607), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'config.TRAIN.LR', 'amsgrad': 'config.TRAIN.AMSGRAD'}), '(params, lr=config.TRAIN.LR, amsgrad=config.TRAIN.AMSGRAD)\n', (5549, 5607), False, 'import torch\n'), ((2421, 2447), 'os.path.basename', 'os.path.basename', (['cfg_name'], {}), '(cfg_name)\n', (2437, 2447), False, 'import os\n'), ((3103, 3120), 'pathlib.Path', 'Path', (['cfg.LOG_DIR'], {}), '(cfg.LOG_DIR)\n', (3107, 3120), False, 'from pathlib import Path\n')]
|
import sys
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from virtualization.models import VirtualMachine, VMInterface
interfaces = load_yaml("/opt/netbox/initializers/virtualization_interfaces.yml")
if interfaces is None:
sys.exit()
required_assocs = {"virtual_machine": (VirtualMachine, "name")}
for params in interfaces:
custom_field_data = pop_custom_fields(params)
for assoc, details in required_assocs.items():
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
interface, created = VMInterface.objects.get_or_create(**params)
if created:
set_custom_fields_values(interface, custom_field_data)
print("🧷 Created interface", interface.name, interface.virtual_machine.name)
|
[
"virtualization.models.VMInterface.objects.get_or_create",
"startup_script_utils.pop_custom_fields",
"startup_script_utils.set_custom_fields_values",
"startup_script_utils.load_yaml",
"sys.exit"
] |
[((176, 243), 'startup_script_utils.load_yaml', 'load_yaml', (['"""/opt/netbox/initializers/virtualization_interfaces.yml"""'], {}), "('/opt/netbox/initializers/virtualization_interfaces.yml')\n", (185, 243), False, 'from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values\n'), ((272, 282), 'sys.exit', 'sys.exit', ([], {}), '()\n', (280, 282), False, 'import sys\n'), ((399, 424), 'startup_script_utils.pop_custom_fields', 'pop_custom_fields', (['params'], {}), '(params)\n', (416, 424), False, 'from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values\n'), ((629, 672), 'virtualization.models.VMInterface.objects.get_or_create', 'VMInterface.objects.get_or_create', ([], {}), '(**params)\n', (662, 672), False, 'from virtualization.models import VirtualMachine, VMInterface\n'), ((698, 752), 'startup_script_utils.set_custom_fields_values', 'set_custom_fields_values', (['interface', 'custom_field_data'], {}), '(interface, custom_field_data)\n', (722, 752), False, 'from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values\n')]
|
from __future__ import (
annotations,
)
from typing import (
Generator,
NoReturn
)
class StdReader:
def __init__(
self,
) -> NoReturn:
import sys
self.buf = sys.stdin.buffer
self.lines = (
self.async_readlines()
)
self.chunks: Generator
def async_readlines(
self,
) -> Generator:
while True:
gen = self.line_chunks()
yield gen
def line_chunks(
self,
) -> Generator:
ln = self.buf.readline()
for chunk in ln.split():
yield chunk
def __call__(
self,
) -> bytes:
try:
chunk = next(self.chunks)
except:
self.chunks = next(
self.lines,
)
chunk = self()
return chunk
def str(
self,
) -> str:
b = self()
return b.decode()
def int(
self,
) -> int:
return int(self.str())
from abc import (
ABC,
abstractmethod,
)
class Solver(ABC):
def __init__(self):
self.reader = StdReader()
def __call__(
self,
):
self.prepare()
self.solve()
@abstractmethod
def prepare(self):
...
@abstractmethod
def solve(self):
...
import numpy as np
from typing import (
List,
)
from dataclasses import (
dataclass,
)
@dataclass
class Node:
id_: int = None
@dataclass
class Edge:
id_: int = None
from_ : int = ...
to: int = ...
weight: int = 1
capacity: int = 0
@dataclass
class Graph:
nodes: List[Node]
edges: List[List[Edge]]
def __init__(
self,
n: int,
):
nodes = [
Node(i)
for i in range(n)
]
edges = [
[] for _ in range(n)
]
self.nodes = nodes
self.edges = edges
def add_edge(
self,
e: Edge,
):
i = e.from_
self.edges[i].append(e)
def add_edges(
self,
edges: List[Edge],
):
for e in edges:
self.add_edge(e)
@property
def size(self):
return len(self.nodes)
from collections import (
deque,
)
class GraphBFS:
level: List[int]
def __init__(
self,
graph: Graph,
):
self.g = graph
self.inf = float('inf')
def search(
self,
src: int,
):
self.init_level()
self.level[src] = 0
self.set_queue()
que = self.queue
que.append(src)
while que:
x = que.popleft()
self.explore(x)
def explore(
self,
u: int,
):
g = self.g
lv = self.level
que = self.queue
for e in g.edges[u]:
v = e.to
if lv[v] is not None:
continue
lv[v] = lv[u] + 1
que.append(v)
def set_queue(self):
que = deque()
self.queue = que
def init_level(self):
lv = [None] * self.g.size
self.level = lv
class Problem(
Solver,
):
def prepare(self):
reader = self.reader
r = reader.int()
c = reader.int()
sy = reader.int() - 1
sx = reader.int() - 1
gy = reader.int() - 1
gx = reader.int() - 1
maze = [None] * r
for i in range(r):
maze[i] = reader.str()
maze = ''.join(maze)
self.r = r
self.c = c
self.sy = sy
self.sx = sx
self.gy = gy
self.gx = gx
self.maze = maze
def solve(self):
c = self.c
self.moves = (-c, -1, 1, c)
self.make_graph()
print(self.calc_dist())
def calc_dist(self) -> int:
g = self.g
c = self.c
src = self.sy * c + self.sx
dst = self.gy * c + self.gx
bfs = GraphBFS(graph=g)
bfs.search(src)
dist = bfs.level[dst]
return dist
def make_graph(
self,
):
r, c = self.r, self.c
n = r * c
g = Graph(n)
for i in range(n):
edges = self.gen_edges(i)
g.add_edges(edges)
self.g = g
def gen_edges(
self,
i: int,
):
edges = []
maze = self.maze
if maze[i] == '#':
return edges
for d in self.moves:
j = i + d
if maze[j] == '#':
continue
e = Edge(
from_ = i,
to = j,
)
edges.append(e)
return edges
def main():
t = 1
# t = StdReader().int()
for _ in range(t):
Problem()()
if __name__ == '__main__':
main()
|
[
"collections.deque"
] |
[((2602, 2609), 'collections.deque', 'deque', ([], {}), '()\n', (2607, 2609), False, 'from collections import deque\n')]
|
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.timezone import now
from django.template.defaultfilters import slugify
import uuid
import os
class Article(models.Model):
STATUS_CHOICES = (
('d', '草稿'),
('p', '发表'),
)
title = models.CharField('标题', max_length=200, unique=True)
slug = models.SlugField('slug', max_length=60)
body = models.TextField('正文')
pub_date = models.DateTimeField('发布时间', default=now, null=True)
create_date = models.DateTimeField('创建时间', auto_now_add=True)
mod_date = models.DateTimeField('修改时间', auto_now=True)
status = models.CharField('文章状态', max_length=1, choices=STATUS_CHOICES)
views = models.PositiveIntegerField('浏览量', default=0)
author = models.ForeignKey(User, verbose_name="作者", on_delete=models.CASCADE)
tags = models.ManyToManyField('Tag', verbose_name="标签集合", blank=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:article_detail', args=[str(self.id)])
def viewed(self):
self.views += 1
self.save(update_fields=['views'])
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
if not self.slug or not self.id:
self.slug = slugify(self.title)
super(Article, self).save()
# do something
class Meta:
ordering = ['-pub_date']
verbose_name = 'article'
def user_directory_path(instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid.uuid4().hex[:10], ext)
# return the whole path to the file
return os.path.join(instance.user.id, 'avatar', filename)
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
avatar = models.ImageField(upload_to=user_directory_path, verbose_name='头像')
class AuthorManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(role='A')
class EditorManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(role='E')
class Person(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
role = models.CharField(max_length=1, choices=(('A', 'Author'), ('E', 'Editor')))
objects = models.Manager()
authors = AuthorManager()
editors = EditorManager()
|
[
"django.db.models.TextField",
"django.db.models.OneToOneField",
"django.db.models.ManyToManyField",
"uuid.uuid4",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.PositiveIntegerField",
"django.db.models.Manager",
"django.db.models.SlugField",
"django.db.models.ImageField",
"django.template.defaultfilters.slugify",
"django.db.models.DateTimeField",
"os.path.join"
] |
[((331, 382), 'django.db.models.CharField', 'models.CharField', (['"""标题"""'], {'max_length': '(200)', 'unique': '(True)'}), "('标题', max_length=200, unique=True)\n", (347, 382), False, 'from django.db import models\n'), ((394, 433), 'django.db.models.SlugField', 'models.SlugField', (['"""slug"""'], {'max_length': '(60)'}), "('slug', max_length=60)\n", (410, 433), False, 'from django.db import models\n'), ((445, 467), 'django.db.models.TextField', 'models.TextField', (['"""正文"""'], {}), "('正文')\n", (461, 467), False, 'from django.db import models\n'), ((483, 535), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""发布时间"""'], {'default': 'now', 'null': '(True)'}), "('发布时间', default=now, null=True)\n", (503, 535), False, 'from django.db import models\n'), ((554, 601), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""创建时间"""'], {'auto_now_add': '(True)'}), "('创建时间', auto_now_add=True)\n", (574, 601), False, 'from django.db import models\n'), ((617, 660), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""修改时间"""'], {'auto_now': '(True)'}), "('修改时间', auto_now=True)\n", (637, 660), False, 'from django.db import models\n'), ((674, 736), 'django.db.models.CharField', 'models.CharField', (['"""文章状态"""'], {'max_length': '(1)', 'choices': 'STATUS_CHOICES'}), "('文章状态', max_length=1, choices=STATUS_CHOICES)\n", (690, 736), False, 'from django.db import models\n'), ((749, 794), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (['"""浏览量"""'], {'default': '(0)'}), "('浏览量', default=0)\n", (776, 794), False, 'from django.db import models\n'), ((808, 876), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'verbose_name': '"""作者"""', 'on_delete': 'models.CASCADE'}), "(User, verbose_name='作者', on_delete=models.CASCADE)\n", (825, 876), False, 'from django.db import models\n'), ((888, 950), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""Tag"""'], {'verbose_name': '"""标签集合"""', 'blank': '(True)'}), "('Tag', verbose_name='标签集合', blank=True)\n", (910, 950), False, 'from django.db import models\n'), ((1714, 1764), 'os.path.join', 'os.path.join', (['instance.user.id', '"""avatar"""', 'filename'], {}), "(instance.user.id, 'avatar', filename)\n", (1726, 1764), False, 'import os\n'), ((1811, 1887), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE', 'related_name': '"""profile"""'}), "(User, on_delete=models.CASCADE, related_name='profile')\n", (1831, 1887), False, 'from django.db import models\n'), ((1901, 1968), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'user_directory_path', 'verbose_name': '"""头像"""'}), "(upload_to=user_directory_path, verbose_name='头像')\n", (1918, 1968), False, 'from django.db import models\n'), ((2260, 2291), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (2276, 2291), False, 'from django.db import models\n'), ((2308, 2339), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (2324, 2339), False, 'from django.db import models\n'), ((2351, 2425), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': "(('A', 'Author'), ('E', 'Editor'))"}), "(max_length=1, choices=(('A', 'Author'), ('E', 'Editor')))\n", (2367, 2425), False, 'from django.db import models\n'), ((2440, 2456), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (2454, 2456), False, 'from django.db import models\n'), ((1362, 1381), 'django.template.defaultfilters.slugify', 'slugify', (['self.title'], {}), '(self.title)\n', (1369, 1381), False, 'from django.template.defaultfilters import slugify\n'), ((1635, 1647), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1645, 1647), False, 'import uuid\n')]
|
# Copyright 2017 Quantum Information Science, University of Parma, Italy. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Quantum Information Science, University of Parma, Italy"
__license__ = "Apache"
__version__ = "2.0"
__email__ = "<EMAIL>"
import os
from time import sleep
from devices import *
import logging
import myLogger
import operator
import sys
sys.path.append( # solve the relative dependencies if you clone QISKit from the Git repo and use like a global.
"../qiskit-sdk-py")
from qiskit import QuantumProgram
import Qconfig
logger = logging.getLogger('utility')
logger.addHandler(myLogger.MyHandler())
logger.setLevel(logging.CRITICAL)
logger.propagate = False
class Utility(object):
def __init__(self, coupling_map):
self.__coupling_map = dict()
self.__inverse_coupling_map = dict()
self.__plain_map = dict()
self.__path = dict()
self.__n_qubits = 0
self.__ranks = dict()
self.__connected = dict()
self.__most_connected = []
if coupling_map:
self.__coupling_map = coupling_map.copy()
logger.log(logging.DEBUG, 'init() - coupling_map:\n%s', str(self.__coupling_map))
self.invert_graph(coupling_map, self.__inverse_coupling_map)
logger.log(logging.DEBUG, 'init() - inverse coupling map:\n%s', str(self.__inverse_coupling_map))
for i in coupling_map:
self.__plain_map.update({i: self.__inverse_coupling_map[i] + coupling_map[i]})
logger.debug('init() - plain map:\n%s', str(self.__plain_map))
self.start_explore(self.__coupling_map, self.__ranks)
self.__most_connected = self.find_max(self.__ranks)
self.create_path(self.__most_connected[0], plain_map=self.__plain_map)
else:
logger.critical('init() - Null argument: coupling_map')
exit(1)
def close(self):
self.__ranks.clear()
self.__inverse_coupling_map.clear()
self.__coupling_map.clear()
self.__path.clear()
self.__most_connected.clear()
def explore(self, source, visiting, visited, ranks):
for next in self.__coupling_map[visiting]:
if next not in visited[source]:
visited[source].append(next)
if next not in ranks:
ranks.update({next: 0})
ranks[next] = ranks[next] + 1
self.explore(source, next, visited, ranks)
# TODO Try using some sort of centrality algorithm
def start_explore(self, graph, ranks):
visited = dict()
for source in graph:
visited.update({source: []})
self.explore(source, source, visited, ranks)
# create an inverted coupling-map for further use
@staticmethod
def invert_graph(graph, inverse_graph=None):
if inverse_graph is None:
inverse_graph = {}
for end in graph:
for start in graph[end]:
if start not in inverse_graph:
inverse_graph.update({start: [end]})
else:
inverse_graph[start].append(end)
for node in graph:
if node not in inverse_graph:
inverse_graph.update({node: []})
# find the most connected qubit
@staticmethod
def find_max(ranks):
logger.debug('ranks:\n%s', str(ranks))
most_connected = max(ranks.items(), key=operator.itemgetter(1))[0]
found = [most_connected, ranks[most_connected]]
logger.debug('max: %s', str(found))
return found
# create a valid path that connect qubits used in the circuit
def create_path(self, start, plain_map):
self.__path.update({start: -1})
to_connect = [start]
max = len(self.__coupling_map)
logger.debug('create_path() - max:\n%s', str(max))
count = max - 1
changed = True
visiting = 0
while count > 0:
logger.debug('create_path() - visiting:\n%s - %s', str(visiting), str(to_connect[visiting]))
# for visiting in to_connect:
if count <= 0:
break
for node in plain_map[to_connect[visiting]]:
if count <= 0:
break
if node not in self.__path:
self.__path.update({node: to_connect[visiting]})
count -= 1
logger.debug('create_path() - path:\n%s', str(self.__path))
if node not in to_connect:
to_connect.append(node)
visiting += 1
logger.debug('create_path() - path:\n%s', str(self.__path))
def cx(self, circuit, control_qubit, target_qubit, control, target):
if target in self.__coupling_map[control]:
logger.log(logging.VERBOSE, 'cx() - cnot: (%s, %s)', str(control), str(target))
circuit.cx(control_qubit, target_qubit)
elif control in self.__coupling_map[target]:
logger.log(logging.VERBOSE, 'cx() - inverse-cnot: (%s, %s)', str(control), str(target))
circuit.h(control_qubit)
circuit.h(target_qubit)
circuit.cx(target_qubit, control_qubit)
circuit.h(control_qubit)
circuit.h(target_qubit)
else:
logger.critical('cx() - Cannot connect qubit %s to qubit %s', str(control), str(target))
exit(3)
# place cnot gates based on the path created in create_path method
def place_cx(self, circuit, quantum_r, oracle='11'):
if not oracle == '00':
logger.log(logging.VERBOSE, 'place_cx() - oracle != 00')
stop = self.__n_qubits // 2
for qubit in self.__connected:
if self.__connected[qubit] != -1:
if oracle == '11':
logger.log(logging.VERBOSE, 'place_cx() - oracle = 11')
self.cx(circuit, quantum_r[qubit], quantum_r[self.__connected[qubit]], qubit,
self.__connected[qubit])
elif oracle == '10':
logger.log(logging.VERBOSE, 'place_cx() - oracle = 10')
if stop > 0:
self.cx(circuit, quantum_r[qubit], quantum_r[self.__connected[qubit]], qubit,
self.__connected[qubit])
stop -= 1
# place Hadamard gates
def place_h(self, circuit, start, quantum_r, initial=True, x=True):
for qubit in self.__connected:
if qubit != start:
circuit.h(quantum_r[qubit])
else:
if initial is True:
if x is True:
circuit.x(quantum_r[qubit])
else:
circuit.h(quantum_r[qubit])
# place Pauli-X gates
def place_x(self, circuit, quantum_r):
sorted_c = sorted(self.__connected.items(), key=operator.itemgetter(0))
logger.log(logging.VERBOSE, 'place_x() - sorted_c:\n%s', str(sorted_c))
s_0 = self.__n_qubits // 2
i = 0
count = self.__n_qubits - 1
for qubit in sorted_c:
if count <= 0:
break
if i >= s_0:
circuit.x(quantum_r[qubit[0]])
else:
circuit.iden(quantum_r[qubit[0]])
i += 1
i = 0
for qubit in sorted_c:
if i >= s_0:
circuit.iden(quantum_r[qubit[0]])
else:
circuit.x(quantum_r[qubit[0]])
i += 1
# final measure
def measure(self, circuit, quantum_r, classical_r):
for qubit in self.__connected:
circuit.measure(quantum_r[qubit], classical_r[qubit])
# create the circuit
def create(self, circuit, quantum_r, classical_r, n_qubits, x=True, oracle='11'):
self.__n_qubits = n_qubits
max_qubits = len(self.__path)
logger.debug('create() - N qubits: %s', str(self.__n_qubits))
logger.debug('create() - Max qubits: %s', str(max_qubits))
if max_qubits < self.__n_qubits:
logger.critical('create() - Can use only up to %s qubits', str(max_qubits))
exit(2)
count = self.__n_qubits
for qubit in self.__path:
if count <= 0:
break
self.__connected.update({qubit: self.__path[qubit]})
count -= 1
logger.debug('create() - connected:\n%s', str(self.__connected))
self.place_h(circuit, self.__most_connected[0], quantum_r, x=x)
self.place_cx(circuit, quantum_r, oracle=oracle)
self.place_h(circuit, self.__most_connected[0], quantum_r, initial=False)
if x is True:
self.place_x(circuit, quantum_r)
self.measure(circuit, quantum_r, classical_r)
def envariance(self, circuit, quantum_r, classical_r, n_qubits):
self.create(circuit, quantum_r, classical_r, n_qubits)
sorted_c = sorted(self.__connected.items(), key=operator.itemgetter(0))
connected = list(zip(*sorted_c))[0]
logger.debug('envariance() - connected:\n%s', str(connected))
self.__n_qubits = 0
self.__connected.clear()
return connected
def parity(self, circuit, quantum_r, classical_r, n_qubits, oracle='11'):
self.create(circuit, quantum_r, classical_r, n_qubits, x=False, oracle=oracle)
connected = list(self.__connected.keys())
logger.debug('parity() - connected:\n%s', str(connected))
self.__n_qubits = 0
self.__connected.clear()
return connected
# launch envariance experiment on the given device
def envariance_exec(execution, device, utility, n_qubits, num_shots=1024, directory='Data_Envariance/'):
os.makedirs(os.path.dirname(directory), exist_ok=True)
size = 0
results = dict()
if device == qx2 or device == qx4:
if n_qubits <= 5:
size = 5
# device = 'ibmqx_qasm_simulator'
else:
logger.critical('launch_exp() - Too much qubits for %s !', device)
exit(1)
elif device == qx3 or device == qx5:
if n_qubits <= 16:
size = 16
# device = 'ibmqx_qasm_simulator'
else:
logger.critical('launch_exp() - Too much qubits for %s !', device)
exit(2)
elif device == online_sim:
if n_qubits <= 5:
size = 5
elif n_qubits <= 16:
size = 16
else:
logger.critical('launch_exp() - Unknown device.')
exit(3)
Q_program = QuantumProgram()
try:
Q_program.set_api(Qconfig.APItoken, Qconfig.config["url"]) # set the APIToken and API url
except ConnectionError:
sleep(900)
logger.critical('API Exception occurred, retrying\nQubits %d - Execution %d - Shots %d', n_qubits, execution,
num_shots)
envariance_exec(execution, device, utility, n_qubits=n_qubits, num_shots=num_shots, directory=directory)
return
quantum_r = Q_program.create_quantum_register("qr", size)
classical_r = Q_program.create_classical_register("cr", size)
circuit = Q_program.create_circuit("envariance", [quantum_r], [classical_r])
connected = utility.envariance(circuit=circuit, quantum_r=quantum_r, classical_r=classical_r, n_qubits=n_qubits)
QASM_source = Q_program.get_qasm("envariance")
logger.debug('launch_exp() - QASM:\n%s', str(QASM_source))
while True:
try:
backend_status = Q_program.get_backend_status(device)
if ('available' in backend_status and backend_status['available'] is False) \
or ('busy' in backend_status and backend_status['busy'] is True):
logger.critical('%s currently offline, waiting...', device)
while Q_program.get_backend_status(device)['available'] is False:
sleep(1800)
logger.critical('%s is back online, resuming execution', device)
except ConnectionError:
logger.critical('Error getting backend status, retrying...')
sleep(900)
continue
except ValueError:
logger.critical('Backend is not available, waiting...')
sleep(900)
continue
break
if Q_program.get_api().get_my_credits()['remaining'] < 3:
logger.critical('Qubits %d - Execution %d - Shots %d ---- Waiting for credits to replenish...',
n_qubits, execution, num_shots)
while Q_program.get_api().get_my_credits()['remaining'] < 3:
sleep(900)
logger.critical('Credits replenished, resuming execution')
try:
result = Q_program.execute(["envariance"], backend=device, wait=2, timeout=1000, shots=num_shots, max_credits=5)
except Exception:
sleep(900)
logger.critical('Exception occurred, retrying\nQubits %d - Execution %d - Shots %d', n_qubits, execution,
num_shots)
envariance_exec(execution, device, utility, n_qubits=n_qubits, num_shots=num_shots, directory=directory)
return
try:
counts = result.get_counts("envariance")
except Exception:
logger.critical('Exception occurred, retrying\nQubits %d - Execution %d - Shots %d', n_qubits, execution,
num_shots)
envariance_exec(execution, device, utility, n_qubits=n_qubits, num_shots=num_shots, directory=directory)
return
logger.debug('launch_exp() - counts:\n%s', str(counts))
sorted_c = sorted(counts.items(), key=operator.itemgetter(1), reverse=True)
filename = directory + device + '/' + 'execution' + str(
execution) + '/' + device + '_' + str(num_shots) + '_' + str(
n_qubits) + '_qubits_envariance.txt'
os.makedirs(os.path.dirname(filename), exist_ok=True)
out_f = open(filename, 'w')
# store counts in txt file and xlsx file
out_f.write('VALUES\t\tCOUNTS\n\n')
stop = n_qubits // 2
for i in sorted_c:
reverse = i[0][::-1]
sorted_v = []
for n in range(n_qubits - stop):
sorted_v.append(reverse[connected[n + stop]])
for n in range(stop):
sorted_v.append(reverse[connected[n]])
value = ''.join(str(v) for v in sorted_v)
results.update({value: i[1]})
out_f.write(value + '\t' + str(i[1]) + '\n')
out_f.close()
# launch parity experiment on the given device
def parity_exec(execution, device, utility, n_qubits, oracle='11', num_shots=1024, directory='Data_Parity/'):
os.makedirs(os.path.dirname(directory), exist_ok=True)
size = 0
results = dict()
if device == qx2 or device == qx4:
if n_qubits <= 5:
size = 5
# device = 'ibmqx_qasm_simulator'
else:
logger.critical('launch_exp() - Too much qubits for %s !', device)
exit(1)
elif device == qx3 or device == qx5:
if n_qubits <= 16:
size = 16
# device = 'ibmqx_qasm_simulator'
else:
logger.critical('launch_exp() - Too much qubits for %s !', device)
exit(2)
elif device == online_sim:
if n_qubits <= 5:
size = 5
elif n_qubits <= 16:
size = 16
else:
logger.critical('launch_exp() - Unknown device.')
exit(3)
Q_program = QuantumProgram()
try:
Q_program.set_api(Qconfig.APItoken, Qconfig.config["url"]) # set the APIToken and API url
except ConnectionError:
sleep(900)
logger.critical('API Exception occurred, retrying\nQubits %d - Oracle %s - Execution %d - Queries %d', n_qubits,
oracle,
execution, num_shots)
parity_exec(execution, device, utility, n_qubits=n_qubits, oracle=oracle, num_shots=num_shots, directory=directory)
return
quantum_r = Q_program.create_quantum_register("qr", size)
classical_r = Q_program.create_classical_register("cr", size)
circuit = Q_program.create_circuit('parity', [quantum_r], [classical_r])
connected = utility.parity(circuit=circuit, quantum_r=quantum_r, classical_r=classical_r, n_qubits=n_qubits,
oracle=oracle)
QASM_source = Q_program.get_qasm('parity')
logger.debug('launch_exp() - QASM:\n%s', str(QASM_source))
while True:
try:
backend_status = Q_program.get_backend_status(device)
if ('available' in backend_status and backend_status['available'] is False) \
or ('busy' in backend_status and backend_status['busy'] is True):
logger.critical('%s currently offline, waiting...', device)
while Q_program.get_backend_status(device)['available'] is False:
sleep(1800)
logger.critical('%s is back online, resuming execution', device)
except ConnectionError:
logger.critical('Error getting backend status, retrying...')
sleep(900)
continue
except ValueError:
logger.critical('Backend is not available, waiting...')
sleep(900)
continue
break
if Q_program.get_api().get_my_credits()['remaining'] < 3:
logger.critical('Qubits %d - Oracle %s - Execution %d - Queries %d ---- Waiting for credits to replenish...',
n_qubits, oracle,
execution, num_shots)
while Q_program.get_api().get_my_credits()['remaining'] < 3:
sleep(900)
logger.critical('Credits replenished, resuming execution')
try:
result = Q_program.execute(['parity'], backend=device, wait=2, timeout=1000, shots=num_shots, max_credits=5)
except Exception:
sleep(900)
logger.critical('Exception occurred, retrying\nQubits %d - Oracle %s - Execution %d - Queries %d', n_qubits, oracle,
execution, num_shots)
parity_exec(execution, device, utility, n_qubits=n_qubits, oracle=oracle, num_shots=num_shots, directory=directory)
return
try:
counts = result.get_counts('parity')
except Exception:
logger.critical('Exception occurred, retrying\nQubits %d - Oracle %s - Execution %d - Queries %d', n_qubits, oracle,
execution, num_shots)
parity_exec(execution, device, utility, n_qubits=n_qubits, oracle=oracle, num_shots=num_shots, directory=directory)
return
logger.debug('launch_exp() - counts:\n%s', str(counts))
sorted_c = sorted(counts.items(), key=operator.itemgetter(1), reverse=True)
filename = directory + device + '/' + oracle + '/' + 'execution' + str(
execution) + '/' + device + '_' + str(
num_shots) + 'queries_' + oracle + '_' + str(
n_qubits) + '_qubits_parity.txt'
os.makedirs(os.path.dirname(filename), exist_ok=True)
out_f = open(filename, 'w')
# store counts in txt file and xlsx file
out_f.write('VALUES\t\tCOUNTS\n\n')
logger.debug('launch_exp() - oredred_q:\n%s', str(connected))
stop = n_qubits // 2
for i in sorted_c:
reverse = i[0][::-1]
logger.log(logging.VERBOSE, 'launch_exp() - reverse in for 1st loop: %s', str(reverse))
sorted_v = [reverse[connected[0]]]
logger.log(logging.VERBOSE, 'launch_exp() - connected[0] in 1st for loop: %s', str(connected[0]))
logger.log(logging.VERBOSE, 'launch_exp() - sorted_v in 1st for loop: %s', str(sorted_v))
for n in range(stop):
sorted_v.append(reverse[connected[n + 1]])
logger.log(logging.VERBOSE, 'launch_exp() - connected[n+1], sorted_v[n+1] in 2nd for loop: %s,%s',
str(connected[n + 1]), str(sorted_v[n + 1]))
if (n + stop + 1) != n_qubits:
sorted_v.append(reverse[connected[n + stop + 1]])
logger.log(logging.VERBOSE, 'launch_exp() - connected[n+stop+1], sorted_v[n+2] in 2nd for loop: %s%s',
str(connected[n + stop + 1]), str(sorted_v[n + 2]))
value = ''.join(str(v) for v in sorted_v)
results.update({value: i[1]})
out_f.write(value + '\t' + str(i[1]) + '\n')
out_f.close()
|
[
"sys.path.append",
"os.path.dirname",
"time.sleep",
"myLogger.MyHandler",
"qiskit.QuantumProgram",
"operator.itemgetter",
"logging.getLogger"
] |
[((1015, 1050), 'sys.path.append', 'sys.path.append', (['"""../qiskit-sdk-py"""'], {}), "('../qiskit-sdk-py')\n", (1030, 1050), False, 'import sys\n'), ((1212, 1240), 'logging.getLogger', 'logging.getLogger', (['"""utility"""'], {}), "('utility')\n", (1229, 1240), False, 'import logging\n'), ((1259, 1279), 'myLogger.MyHandler', 'myLogger.MyHandler', ([], {}), '()\n', (1277, 1279), False, 'import myLogger\n'), ((11314, 11330), 'qiskit.QuantumProgram', 'QuantumProgram', ([], {}), '()\n', (11328, 11330), False, 'from qiskit import QuantumProgram\n'), ((16162, 16178), 'qiskit.QuantumProgram', 'QuantumProgram', ([], {}), '()\n', (16176, 16178), False, 'from qiskit import QuantumProgram\n'), ((10510, 10536), 'os.path.dirname', 'os.path.dirname', (['directory'], {}), '(directory)\n', (10525, 10536), False, 'import os\n'), ((14583, 14608), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (14598, 14608), False, 'import os\n'), ((15358, 15384), 'os.path.dirname', 'os.path.dirname', (['directory'], {}), '(directory)\n', (15373, 15384), False, 'import os\n'), ((19644, 19669), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (19659, 19669), False, 'import os\n'), ((11476, 11486), 'time.sleep', 'sleep', (['(900)'], {}), '(900)\n', (11481, 11486), False, 'from time import sleep\n'), ((13363, 13373), 'time.sleep', 'sleep', (['(900)'], {}), '(900)\n', (13368, 13373), False, 'from time import sleep\n'), ((13602, 13612), 'time.sleep', 'sleep', (['(900)'], {}), '(900)\n', (13607, 13612), False, 'from time import sleep\n'), ((14352, 14374), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (14371, 14374), False, 'import operator\n'), ((16324, 16334), 'time.sleep', 'sleep', (['(900)'], {}), '(900)\n', (16329, 16334), False, 'from time import sleep\n'), ((18332, 18342), 'time.sleep', 'sleep', (['(900)'], {}), '(900)\n', (18337, 18342), False, 'from time import sleep\n'), ((18567, 18577), 'time.sleep', 'sleep', (['(900)'], {}), '(900)\n', (18572, 18577), False, 'from time import sleep\n'), ((19371, 19393), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (19390, 19393), False, 'import operator\n'), ((7659, 7681), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (7678, 7681), False, 'import operator\n'), ((9744, 9766), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (9763, 9766), False, 'import operator\n'), ((12874, 12884), 'time.sleep', 'sleep', (['(900)'], {}), '(900)\n', (12879, 12884), False, 'from time import sleep\n'), ((13013, 13023), 'time.sleep', 'sleep', (['(900)'], {}), '(900)\n', (13018, 13023), False, 'from time import sleep\n'), ((17805, 17815), 'time.sleep', 'sleep', (['(900)'], {}), '(900)\n', (17810, 17815), False, 'from time import sleep\n'), ((17944, 17954), 'time.sleep', 'sleep', (['(900)'], {}), '(900)\n', (17949, 17954), False, 'from time import sleep\n'), ((4108, 4130), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (4127, 4130), False, 'import operator\n'), ((12664, 12675), 'time.sleep', 'sleep', (['(1800)'], {}), '(1800)\n', (12669, 12675), False, 'from time import sleep\n'), ((17595, 17606), 'time.sleep', 'sleep', (['(1800)'], {}), '(1800)\n', (17600, 17606), False, 'from time import sleep\n')]
|
import os
import time
import run_services
from Basic_linux_commands.chown_chmod import chown
from bigdata_logs.logger import getLoggingInstance
log = getLoggingInstance()
username = os.getenv("user")
groupname = username
def copy(src, file_list, dest, user_pass, *args):
log.info("\nCopying\n")
try:
file_not_exists = []
if src.endswith('/'):
src = src[:-1]
if dest.endswith('/'):
dest = dest[:-1]
if not os.path.exists(dest):
os.makedirs(dest)
for file in file_list:
file_to_copy = src + '/%s' % file
if not os.path.exists(file_to_copy):
file_not_exists.append(file_to_copy)
else:
if os.path.isdir(file_to_copy):
unique_id = str(time.time()).split('.')[0]
copied_file = dest + '/%s_%s' % (file, unique_id)
run_services.run_basic_services("echo %s | sudo -S cp -r %s %s" % (user_pass, file_to_copy, copied_file))
chown(copied_file, username, groupname, user_pass)
else:
unique_id = str(time.time()).split('.')[0]
copied_file = dest + '/%s_%s' % (file, unique_id)
run_services.run_basic_services("echo %s | sudo -S cp %s %s" % (user_pass, file_to_copy, copied_file))
chown(copied_file, username, groupname, user_pass)
if file_not_exists:
return '{"success": 0, "msg": ["%s", file does not exists!!!]}' % file_not_exists
return '{"success": 1}'
except Exception as e:
log.error("Exception in copy_move_delete ==> copy()")
log.error(e)
return '{"success": 0, "msg": ["%s"]}' % e
def move_rename(src, file_list, dest, user_pass):
try:
file_not_exists = []
existing_file_list = []
if src.endswith('/'):
src = src[:-1]
if dest.endswith('/'):
dest = dest[:-1]
for file in file_list:
file_to_move = src+ '/%s' % file
dest_path = dest + '/%s' % file
if os.path.exists(dest_path):
existing_file_list.append(dest_path)
continue
if not os.path.exists(file_to_move):
file_not_exists.append(file_to_move)
else:
run_services.run_basic_services("echo %s | sudo -S mv %s %s" % (user_pass, file_to_move, dest))
if file_not_exists:
return '{"success": 0, "msg": ["%s", file does not exists!!!]}' % file_not_exists
elif existing_file_list:
return '{"success": 0, "msg": ["%s", file already exists!!!]}' % existing_file_list
return '{"success": 1}'
except Exception as e:
log.error("Exception in copy_move_delete ==> move()")
log.error(e)
return '{"success": 0, "msg": ["%s"]}' % e
def delete(src, files, user_pass):
try:
file_not_exists = []
if src.endswith('/'):
src = src[:-1]
for file in files:
file_path = src + '/%s' % file
if not os.path.exists(file_path):
file_not_exists.append(file_path)
run_services.run_basic_services("echo %s | sudo -S rm -rf %s" % (user_pass, file_path))
if file_not_exists:
return '{"success": 0, "msg": ["%s", file does not exists!!!]}' % file_not_exists
return '{"success": 1}'
except Exception as e:
log.error("Exception in copy_move_delete ==> move()")
log.error(e)
return '{"success": 0, "msg": ["%s"]}' % e
|
[
"os.makedirs",
"os.path.isdir",
"os.path.exists",
"run_services.run_basic_services",
"time.time",
"bigdata_logs.logger.getLoggingInstance",
"os.getenv",
"Basic_linux_commands.chown_chmod.chown"
] |
[((152, 172), 'bigdata_logs.logger.getLoggingInstance', 'getLoggingInstance', ([], {}), '()\n', (170, 172), False, 'from bigdata_logs.logger import getLoggingInstance\n'), ((185, 202), 'os.getenv', 'os.getenv', (['"""user"""'], {}), "('user')\n", (194, 202), False, 'import os\n'), ((476, 496), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (490, 496), False, 'import os\n'), ((510, 527), 'os.makedirs', 'os.makedirs', (['dest'], {}), '(dest)\n', (521, 527), False, 'import os\n'), ((2145, 2170), 'os.path.exists', 'os.path.exists', (['dest_path'], {}), '(dest_path)\n', (2159, 2170), False, 'import os\n'), ((3239, 3330), 'run_services.run_basic_services', 'run_services.run_basic_services', (["('echo %s | sudo -S rm -rf %s' % (user_pass, file_path))"], {}), "('echo %s | sudo -S rm -rf %s' % (user_pass,\n file_path))\n", (3270, 3330), False, 'import run_services\n'), ((626, 654), 'os.path.exists', 'os.path.exists', (['file_to_copy'], {}), '(file_to_copy)\n', (640, 654), False, 'import os\n'), ((746, 773), 'os.path.isdir', 'os.path.isdir', (['file_to_copy'], {}), '(file_to_copy)\n', (759, 773), False, 'import os\n'), ((2269, 2297), 'os.path.exists', 'os.path.exists', (['file_to_move'], {}), '(file_to_move)\n', (2283, 2297), False, 'import os\n'), ((2386, 2485), 'run_services.run_basic_services', 'run_services.run_basic_services', (["('echo %s | sudo -S mv %s %s' % (user_pass, file_to_move, dest))"], {}), "('echo %s | sudo -S mv %s %s' % (user_pass,\n file_to_move, dest))\n", (2417, 2485), False, 'import run_services\n'), ((3149, 3174), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (3163, 3174), False, 'import os\n'), ((928, 1038), 'run_services.run_basic_services', 'run_services.run_basic_services', (["('echo %s | sudo -S cp -r %s %s' % (user_pass, file_to_copy, copied_file))"], {}), "('echo %s | sudo -S cp -r %s %s' % (\n user_pass, file_to_copy, copied_file))\n", (959, 1038), False, 'import run_services\n'), ((1054, 1104), 'Basic_linux_commands.chown_chmod.chown', 'chown', (['copied_file', 'username', 'groupname', 'user_pass'], {}), '(copied_file, username, groupname, user_pass)\n', (1059, 1104), False, 'from Basic_linux_commands.chown_chmod import chown\n'), ((1280, 1386), 'run_services.run_basic_services', 'run_services.run_basic_services', (["('echo %s | sudo -S cp %s %s' % (user_pass, file_to_copy, copied_file))"], {}), "('echo %s | sudo -S cp %s %s' % (user_pass,\n file_to_copy, copied_file))\n", (1311, 1386), False, 'import run_services\n'), ((1403, 1453), 'Basic_linux_commands.chown_chmod.chown', 'chown', (['copied_file', 'username', 'groupname', 'user_pass'], {}), '(copied_file, username, groupname, user_pass)\n', (1408, 1453), False, 'from Basic_linux_commands.chown_chmod import chown\n'), ((811, 822), 'time.time', 'time.time', ([], {}), '()\n', (820, 822), False, 'import time\n'), ((1163, 1174), 'time.time', 'time.time', ([], {}), '()\n', (1172, 1174), False, 'import time\n')]
|
import os
import pytest
from dj_database_url import parse
from django.conf import settings
from testing.postgresql import Postgresql
postgres = os.environ.get("POSTGRESQL_PATH")
initdb = os.environ.get("INITDB_PATH")
_POSTGRESQL = Postgresql(postgres=postgres, initdb=initdb)
@pytest.hookimpl(tryfirst=True)
def pytest_load_initial_conftests(early_config, parser, args):
os.environ["DJANGO_SETTINGS_MODULE"] = early_config.getini("DJANGO_SETTINGS_MODULE")
settings.DATABASES["default"] = parse(_POSTGRESQL.url())
settings.DATABASES["dashboard"] = parse(_POSTGRESQL.url())
def pytest_unconfigure(config):
_POSTGRESQL.stop()
|
[
"os.environ.get",
"pytest.hookimpl",
"testing.postgresql.Postgresql"
] |
[((146, 179), 'os.environ.get', 'os.environ.get', (['"""POSTGRESQL_PATH"""'], {}), "('POSTGRESQL_PATH')\n", (160, 179), False, 'import os\n'), ((189, 218), 'os.environ.get', 'os.environ.get', (['"""INITDB_PATH"""'], {}), "('INITDB_PATH')\n", (203, 218), False, 'import os\n'), ((233, 277), 'testing.postgresql.Postgresql', 'Postgresql', ([], {'postgres': 'postgres', 'initdb': 'initdb'}), '(postgres=postgres, initdb=initdb)\n', (243, 277), False, 'from testing.postgresql import Postgresql\n'), ((281, 311), 'pytest.hookimpl', 'pytest.hookimpl', ([], {'tryfirst': '(True)'}), '(tryfirst=True)\n', (296, 311), False, 'import pytest\n')]
|
"""
Script that scrapes Jaimini's box website to retrieve
when was the last One Piece chapter released to then ask you if
you want to read the chapter in your browser or download it
"""
from bs4 import BeautifulSoup
import requests
import webbrowser
from os import getcwd
url = 'https://jaiminisbox.com/reader/series/one-piece-2/' #website with chapters
r = requests.get(url).text
soup = BeautifulSoup(r, 'lxml')
#the first <div> tag with class 'element' is the latest chapter
#there are 3 anchor tags <a> with 'href' attributes within the div
#links respectively redirect to:download, reading and the uploader's profile
chapter = soup.find('div', class_='element')
chapter_title = chapter.find('div', class_='title').text
chapter_date = chapter.find('div', class_='meta_r').text
print('the latest one piece chapter is...\n')
print(chapter_title)
print(chapter_date+'(yyyy/mm/dd)\n')
links = chapter.find_all('a') #list of len 3 with links inside <a> tags
#convert links' content into strings to use split() to get the urls as strings
for i in range(len(links)):
links[i] = str(links[i])
links[i] = links[i].split('"')
#the 3 items in links are now lists where the second element for each are urls
#print(links)
#visually: [[htmlstuff, url, htmlstuff], [idem], [idem]]
#This format has been consistently used, which allows to hardcode this
#without the use of regular expressions for 'http*'
action = input('would you like to download or read the chapter [d/r]?')
if action == 'd':
d_url = links[0][1]
print('Downloading...')
r2 = requests.get(d_url)
chapter_zip = open('%s' %chapter_title, 'wb')
chapter_zip.write(r2.content)
chapter_zip.close()
print('Zip file in:', getcwd())
elif action == 'r':
r_url = links[1][1]
webbrowser.open(r_url)
else:
print('that was neither d nor r, quitting...')
|
[
"bs4.BeautifulSoup",
"webbrowser.open",
"os.getcwd",
"requests.get"
] |
[((390, 414), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r', '"""lxml"""'], {}), "(r, 'lxml')\n", (403, 414), False, 'from bs4 import BeautifulSoup\n'), ((360, 377), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (372, 377), False, 'import requests\n'), ((1566, 1585), 'requests.get', 'requests.get', (['d_url'], {}), '(d_url)\n', (1578, 1585), False, 'import requests\n'), ((1720, 1728), 'os.getcwd', 'getcwd', ([], {}), '()\n', (1726, 1728), False, 'from os import getcwd\n'), ((1778, 1800), 'webbrowser.open', 'webbrowser.open', (['r_url'], {}), '(r_url)\n', (1793, 1800), False, 'import webbrowser\n')]
|
import json
import xmltodict
with open('complexes.json', 'r', encoding='UTF-8') as f:
jsonString = f.read()
print('JSON input (json_to_xml.json):')
print(jsonString)
xmlString = xmltodict.unparse(json.loads(jsonString), pretty=True)
print('\nXML output(json_to_xml.xml):')
print(xmlString)
with open('json_to_xml.xml', 'w') as f:
f.write(xmlString)
|
[
"json.loads"
] |
[((207, 229), 'json.loads', 'json.loads', (['jsonString'], {}), '(jsonString)\n', (217, 229), False, 'import json\n')]
|
import numpy as np
from openmdao.api import CaseReader
from optigurator.utils import recording_filename
def get_case_reader(data_dir, problem_constants):
return CaseReader(recording_filename(data_dir, problem_constants.id))
def generate_valid_points(problem_constants, crm):
for (i, case_id) in enumerate(crm.list_cases()):
model_case = crm.get_case(case_id)
if (
model_case.outputs["usability.min_max_step_height"][1]
<= problem_constants.step_height.upper
and model_case.outputs["usability.min_max_step_depth"][0]
>= problem_constants.step_depth.lower
and model_case.outputs["usability.min_free_height"][0]
> problem_constants.free_height_lower
):
yield [
model_case.outputs["price_availability.total_price"][0],
model_case.outputs["usability.usability_penalty"][0],
model_case.outputs["price_availability.total_delivery_time"][0],
i,
]
def calculate(inputPoints, dominates):
paretoPoints = set()
candidateRowNr = 0
dominatedPoints = set()
normalizedRowNr = 0
# skapar en kopia på matrisen som normaliseras senare
normalizedPoints = np.array(inputPoints.copy())
sum1 = 0
sum2 = 0
sum3 = 0
sum4 = 0
for i in range(0, len(normalizedPoints)):
# summerar värden kolonnvis till nämnare för normalisering
sum1 = sum1 + normalizedPoints[i, 0] ** 2
sum2 = sum2 + normalizedPoints[i, 1] ** 2
sum3 = sum3 + normalizedPoints[i, 2] ** 2
# definerar en vektor med normaliseringsvärden
myarray_normalize = [sum1 ** 0.5, sum2 ** 0.5, sum3 ** 0.5, 1]
# Normaliserar matrisen
normalizedPoints = np.array(inputPoints) / np.array(myarray_normalize)
while True:
candidateRow = inputPoints[candidateRowNr]
normalized = normalizedPoints[normalizedRowNr]
normalizedPoints = np.delete(normalizedPoints, normalizedRowNr, 0)
inputPoints.remove(candidateRow)
rowNr = 0
nonDominated = True
while len(normalizedPoints) != 0 and rowNr < len(normalizedPoints):
row = normalizedPoints[rowNr]
rowIP = inputPoints[rowNr]
if dominates(
row, normalized
): # Går in om candidateRow är bättre än utmanaren.
normalizedPoints = np.delete(normalizedPoints, rowNr, 0)
inputPoints.remove(rowIP)
dominatedPoints.add(tuple(rowIP))
elif dominates(
normalized, row
): # Går in om utmanare är större än kandidaten.
nonDominated = False
dominatedPoints.add(tuple(candidateRow))
rowNr += 1
else:
rowNr += 1
if nonDominated: # Lägg till nondominated punkter till pareto
ID = int(normalized[3])
paretoPoints.add(tuple(candidateRow))
if len(normalizedPoints) == 0: # SLutar när man gått igenom alla punkter.
break
dp = np.array(list(dominatedPoints))
pp = np.array(list(paretoPoints))
return paretoPoints, dominatedPoints, dp, pp
def dominates(row, normalized): # Beräknar om utmanare är bättre än candidate.
return sum([row[x] >= normalized[x] for x in range(len(row) - 1)]) == len(row) - 1
def WeightPPpoints(pp, my_weights):
Pareto_points = pp
np.size(Pareto_points)
Nrofrows_pareto = np.size(Pareto_points, 0)
# skapar en vektor med ID
ID_vektor = np.delete(Pareto_points, [0, 1, 2], 1).tolist()
# skapar matris med outputvärden utan ID kolonn
A = np.delete(Pareto_points, 3, 1)
np.size(A)
# definerar storleken på matrisen som kommer som paretopoints output
Nrofcolumns = np.size(A, 1)
Nrofrows = np.size(A, 0)
sizeofA = (Nrofrows, Nrofcolumns)
# Skapar matris som sedan fylls med bästa lösningarnas ID
IDpoints = []
# skapar en kopia på matrisen som normaliseras senare
B = A.copy()
sum1 = 0
sum2 = 0
sum3 = 0
for i in range(0, Nrofrows):
# summerar värden kolonnvis till nämnare för normalisering
sum1 = sum1 + A[i, 0] ** 2
sum2 = sum2 + A[i, 1] ** 2
sum3 = sum3 + A[i, 2] ** 2
# definerar en vektor med normaliseringsvärden
myarray_normalize = [sum1 ** 0.5, sum2 ** 0.5, sum3 ** 0.5]
# Normaliserar matrisen
B = A / myarray_normalize
# kopierar matrisen och multiplicerar kolonnvis med viktningar
C = B.copy()
# Loop för 5 olika viktningar -> 5 optimala pareto punkter som output
for j in range(0, len(my_weights)):
for i in range(0, Nrofrows):
C[i, 0] = B[i, 0] * my_weights[j, 0]
C[i, 1] = B[i, 1] * my_weights[j, 1]
C[i, 2] = B[i, 2] * my_weights[j, 2]
# Definera ideala värden A_positive samt icke ideala värden A_negative
A_positive = [C[:, 0].min(), C[:, 1].min(), C[:, 2].min()]
A_negative = [C[:, 0].max(), C[:, 1].max(), C[:, 2].max()]
S_positive = np.zeros((Nrofrows, 1))
S_negative = np.zeros((Nrofrows, 1))
C_value = np.zeros((Nrofrows, 1))
# Vektor_ID_optimala=np.zeros((1,5))
for i in range(0, Nrofrows):
S_positive[i] = (
(C[i, 0] - A_positive[0]) ** 2
+ (C[i, 1] - A_positive[1]) ** 2
+ (C[i, 2] - A_positive[2]) ** 2
) ** 0.5
S_negative[i] = (
(C[i, 0] - A_negative[0]) ** 2
+ (C[i, 1] - A_negative[1]) ** 2
+ (C[i, 2] - A_negative[2]) ** 2
) ** 0.5
C_value[i] = S_negative[i] / (S_negative[i] + S_positive[i])
Best_value = C_value.max()
# ta fram vilken rad i C_vektorn som har det största värdet
Row_best_option = np.argmax(C_value)
# ta fram vilket ingående ID lösningen har
Vektor_ID_optimala = np.array(ID_vektor[Row_best_option]).tolist()
IDpoints.append(int(max(Vektor_ID_optimala)))
return IDpoints
def generate_pareto_cases(data_dir, problem_constants):
crm = get_case_reader(data_dir, problem_constants)
input_points = list(generate_valid_points(problem_constants, crm))
pareto_points, dominated_points, dp, pp = calculate(input_points, dominates)
my_weights = np.matrix(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
) # Weights used to pick points from the ParetoFront
pareto_case_ids = WeightPPpoints(pp, my_weights)
for i in pareto_case_ids:
yield crm.get_case(i)
|
[
"numpy.matrix",
"numpy.size",
"numpy.argmax",
"numpy.zeros",
"numpy.array",
"optigurator.utils.recording_filename",
"numpy.delete"
] |
[((3474, 3496), 'numpy.size', 'np.size', (['Pareto_points'], {}), '(Pareto_points)\n', (3481, 3496), True, 'import numpy as np\n'), ((3519, 3544), 'numpy.size', 'np.size', (['Pareto_points', '(0)'], {}), '(Pareto_points, 0)\n', (3526, 3544), True, 'import numpy as np\n'), ((3701, 3731), 'numpy.delete', 'np.delete', (['Pareto_points', '(3)', '(1)'], {}), '(Pareto_points, 3, 1)\n', (3710, 3731), True, 'import numpy as np\n'), ((3737, 3747), 'numpy.size', 'np.size', (['A'], {}), '(A)\n', (3744, 3747), True, 'import numpy as np\n'), ((3839, 3852), 'numpy.size', 'np.size', (['A', '(1)'], {}), '(A, 1)\n', (3846, 3852), True, 'import numpy as np\n'), ((3868, 3881), 'numpy.size', 'np.size', (['A', '(0)'], {}), '(A, 0)\n', (3875, 3881), True, 'import numpy as np\n'), ((6413, 6457), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (6422, 6457), True, 'import numpy as np\n'), ((179, 229), 'optigurator.utils.recording_filename', 'recording_filename', (['data_dir', 'problem_constants.id'], {}), '(data_dir, problem_constants.id)\n', (197, 229), False, 'from optigurator.utils import recording_filename\n'), ((1777, 1798), 'numpy.array', 'np.array', (['inputPoints'], {}), '(inputPoints)\n', (1785, 1798), True, 'import numpy as np\n'), ((1801, 1828), 'numpy.array', 'np.array', (['myarray_normalize'], {}), '(myarray_normalize)\n', (1809, 1828), True, 'import numpy as np\n'), ((1979, 2026), 'numpy.delete', 'np.delete', (['normalizedPoints', 'normalizedRowNr', '(0)'], {}), '(normalizedPoints, normalizedRowNr, 0)\n', (1988, 2026), True, 'import numpy as np\n'), ((5119, 5142), 'numpy.zeros', 'np.zeros', (['(Nrofrows, 1)'], {}), '((Nrofrows, 1))\n', (5127, 5142), True, 'import numpy as np\n'), ((5164, 5187), 'numpy.zeros', 'np.zeros', (['(Nrofrows, 1)'], {}), '((Nrofrows, 1))\n', (5172, 5187), True, 'import numpy as np\n'), ((5206, 5229), 'numpy.zeros', 'np.zeros', (['(Nrofrows, 1)'], {}), '((Nrofrows, 1))\n', (5214, 5229), True, 'import numpy as np\n'), ((5910, 5928), 'numpy.argmax', 'np.argmax', (['C_value'], {}), '(C_value)\n', (5919, 5928), True, 'import numpy as np\n'), ((3592, 3630), 'numpy.delete', 'np.delete', (['Pareto_points', '[0, 1, 2]', '(1)'], {}), '(Pareto_points, [0, 1, 2], 1)\n', (3601, 3630), True, 'import numpy as np\n'), ((2430, 2467), 'numpy.delete', 'np.delete', (['normalizedPoints', 'rowNr', '(0)'], {}), '(normalizedPoints, rowNr, 0)\n', (2439, 2467), True, 'import numpy as np\n'), ((6010, 6046), 'numpy.array', 'np.array', (['ID_vektor[Row_best_option]'], {}), '(ID_vektor[Row_best_option])\n', (6018, 6046), True, 'import numpy as np\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Module: alter_statement
# Purpose: Represents SQL alter statements
#
# Notes:
#
###############################################################################
import data_pipeline.sql.utils as sql_utils
import data_pipeline.constants.const as const
from .ddl_statement import DdlStatement
class AlterStatement(DdlStatement):
"""Contains data necessary to produce a valid SQL ALTER statement"""
def __init__(self, table_name):
super(AlterStatement, self).__init__(table_name)
self.statement_type = const.ALTER
def add_entry(self, **kwargs):
if const.ALTER_ENTRY in kwargs:
self.entries.append(kwargs[const.ALTER_ENTRY])
else:
alter_entry = {
const.OPERATION: kwargs[const.OPERATION],
const.FIELD_NAME: kwargs[const.FIELD_NAME],
const.DATA_TYPE: kwargs[const.DATA_TYPE],
const.PARAMS: kwargs[const.PARAMS],
const.CONSTRAINTS: kwargs[const.CONSTRAINTS]
}
self.add_entry(alter_entry=alter_entry)
def tosql(self, applier):
return applier.build_alter_sql(self)
def __str__(self):
return sql_utils.build_alter_sql(self)
|
[
"data_pipeline.sql.utils.build_alter_sql"
] |
[((2063, 2094), 'data_pipeline.sql.utils.build_alter_sql', 'sql_utils.build_alter_sql', (['self'], {}), '(self)\n', (2088, 2094), True, 'import data_pipeline.sql.utils as sql_utils\n')]
|
"""
Read in the data from csv files
"""
import pandas as pd
import os
import glob
from save_data import save_object, load_object
def load_csv(filename):
# load data from pickle file if it exists
obj = load_object(filename)
if obj != None:
return obj
# otherwise load from csv
else:
data = pd.read_csv(filename, encoding="latin_1")
save_object(filename, data)
return data
def load_data():
pickle_fn = "data/loaded_data"
data = load_object(pickle_fn)
# load data from pickle file if it exists
if data != None:
return data
# otherwise load from csv
else:
data = {}
# load all csv files in data directory
for f in glob.glob(os.path.join("data", "*.csv")):
# key based on their filename
f_key = os.path.basename(f).split('.')[0]
print("Loading:", f_key)
data[f_key] = load_csv(f)
save_object(pickle_fn, data)
return data
def test_load_csv():
print("Test loading csv")
data = load_csv("data/NCAATourneySeeds.csv")
print(data)
def main():
data = load_data()
print("Available DataSet Keys: ")
for key in data.keys():
print("\t"+key)
if __name__ == "__main__":
main()
|
[
"os.path.join",
"os.path.basename",
"pandas.read_csv",
"save_data.load_object",
"save_data.save_object"
] |
[((212, 233), 'save_data.load_object', 'load_object', (['filename'], {}), '(filename)\n', (223, 233), False, 'from save_data import save_object, load_object\n'), ((493, 515), 'save_data.load_object', 'load_object', (['pickle_fn'], {}), '(pickle_fn)\n', (504, 515), False, 'from save_data import save_object, load_object\n'), ((329, 370), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'encoding': '"""latin_1"""'}), "(filename, encoding='latin_1')\n", (340, 370), True, 'import pandas as pd\n'), ((379, 406), 'save_data.save_object', 'save_object', (['filename', 'data'], {}), '(filename, data)\n', (390, 406), False, 'from save_data import save_object, load_object\n'), ((950, 978), 'save_data.save_object', 'save_object', (['pickle_fn', 'data'], {}), '(pickle_fn, data)\n', (961, 978), False, 'from save_data import save_object, load_object\n'), ((737, 766), 'os.path.join', 'os.path.join', (['"""data"""', '"""*.csv"""'], {}), "('data', '*.csv')\n", (749, 766), False, 'import os\n'), ((831, 850), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (847, 850), False, 'import os\n')]
|
from urllib.parse import ParseResult
from os.path import realpath, dirname, join as _path_join
import requests
from json import load as json_load
script_loc = realpath(__file__)
script_dir = dirname(script_loc)
del dirname
del realpath
mime_types: dict
with open(_path_join(script_dir, "mimes.json")) as f:
mime_types = json_load(f)
UA_m = "Mozilla/5.0 (Linux; Android 8.1.0; Pixel Build/OPM2.171019.029; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/68.0.3325.109 Mobile Safari/537.36"
UA_d = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3526.73 Safari/537.36"
basic_headers = {
"Accept-Encoding": "gzip, deflate",
"User-Agent": UA_d,
"Upgrade-Insecure-Requests": "1",
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8",
"dnt": "1",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
}
def _abort_request_after(url: str, byte_len: int = 1024):
with requests.get(
url, headers=basic_headers, allow_redirects=True, stream=True
) as chunk:
for _ in chunk.iter_content(byte_len):
headers, url = chunk.headers, chunk.url
chunk.close()
return (headers, url)
def _normalise_url(parsed, remove_frag: bool = True):
d: dict = parsed._asdict()
d["scheme"] = d["scheme"].lower()
d["netloc"] = d["netloc"].lower()
d["fragment"] = ""
return ParseResult(**d)
def remove_quotes(s):
if s is None or len(s) < 2:
return s
for quote in ('"', "'"):
if s[0] == quote and s[-1] == quote:
return s[1:-1]
return s
def int_or_none(i: any):
if isinstance(i, int):
return i
try:
return int(i)
except:
return None
|
[
"json.load",
"urllib.parse.ParseResult",
"os.path.realpath",
"os.path.dirname",
"requests.get",
"os.path.join"
] |
[((160, 178), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (168, 178), False, 'from os.path import realpath, dirname, join as _path_join\n'), ((192, 211), 'os.path.dirname', 'dirname', (['script_loc'], {}), '(script_loc)\n', (199, 211), False, 'from os.path import realpath, dirname, join as _path_join\n'), ((326, 338), 'json.load', 'json_load', (['f'], {}), '(f)\n', (335, 338), True, 'from json import load as json_load\n'), ((1441, 1457), 'urllib.parse.ParseResult', 'ParseResult', ([], {}), '(**d)\n', (1452, 1457), False, 'from urllib.parse import ParseResult\n'), ((265, 301), 'os.path.join', '_path_join', (['script_dir', '"""mimes.json"""'], {}), "(script_dir, 'mimes.json')\n", (275, 301), True, 'from os.path import realpath, dirname, join as _path_join\n'), ((993, 1068), 'requests.get', 'requests.get', (['url'], {'headers': 'basic_headers', 'allow_redirects': '(True)', 'stream': '(True)'}), '(url, headers=basic_headers, allow_redirects=True, stream=True)\n', (1005, 1068), False, 'import requests\n')]
|
from disnake.ext import commands
from utils.clash import client, pingToChannel, getClan
import disnake
usafam = client.usafam
clans = usafam.clans
server = usafam.server
class autoB(commands.Cog, name="Board Setup"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.slash_command(name="autoboard")
async def autoboard(self, ctx):
pass
@autoboard.sub_command(name="create", description="Create server autoposting leaderboards")
async def setupboard(self, ctx: disnake.ApplicationCommandInteraction, channel: disnake.TextChannel, autoboard_type: str = commands.Param(choices=["Player Leaderboard", "Clan Leaderboard"])):
perms = ctx.author.guild_permissions.manage_guild
if not perms:
embed = disnake.Embed(description="Command requires you to have `Manage Server` permissions.",
color=disnake.Color.red())
return await ctx.send(embed=embed)
await ctx.response.defer()
msg = await ctx.original_message()
country = None
if autoboard_type == "Clan Leaderboard":
rr = []
tracked = clans.find({"server": ctx.guild.id})
limit = await clans.count_documents(filter={"server": ctx.guild.id})
for clan in await tracked.to_list(length=limit):
tag = clan.get("tag")
c = await getClan(tag)
location = str(c.location)
if location not in rr:
rr.append(str(location))
options = []
for country in rr:
options.append(disnake.SelectOption(label=f"{country}", value=f"{country}"))
select1 = disnake.ui.Select(
options=options,
placeholder="Page Navigation",
min_values=1, # the minimum number of options a user must select
max_values=1 # the maximum number of options a user can select
)
action_row = disnake.ui.ActionRow()
action_row.append_item(select1)
embed = disnake.Embed(title="**For what country would you like the leaderboard autoboard?**",
color=disnake.Color.green())
await ctx.edit_original_message(embed=embed, components=[action_row])
def check(res: disnake.MessageInteraction):
return res.message.id == msg.id
country = False
while country == False:
try:
res: disnake.MessageInteraction = await self.bot.wait_for("message_interaction", check=check,
timeout=600)
except:
await msg.edit(components=[])
break
if res.author.id != ctx.author.id:
await res.send(content="You must run the command to interact with components.", ephemeral=True)
continue
country = str(res.values[0])
tex = ""
if autoboard_type == "Player Leaderboard":
await server.update_one({"server": ctx.guild.id}, {'$set': {"topboardchannel": channel.id}})
await server.update_one({"server": ctx.guild.id}, {'$set': {"tophour": 5}})
else:
await server.update_one({"server": ctx.guild.id}, {'$set': {"lbboardChannel": channel.id}})
await server.update_one({"server": ctx.guild.id}, {'$set': {"country": country}})
await server.update_one({"server": ctx.guild.id}, {'$set': {"lbhour": 5}})
tex = f"\nCountry: {country}"
time = f"<t:{1643263200}:t>"
embed = disnake.Embed(title="**Autoboard Successfully Setup**",
description=f"Channel: {channel.mention}\n"
f"Time: {time}\n"
f"Type: {autoboard_type}{tex}",
color=disnake.Color.green())
await msg.edit(embed=embed)
@autoboard.sub_command(name="remove", description="Remove a server autoboard")
async def removeboard(self, ctx: disnake.ApplicationCommandInteraction, autoboard_type: str = commands.Param(choices=["Player Leaderboard", "Clan Leaderboard"])):
perms = ctx.author.guild_permissions.manage_guild
if not perms:
embed = disnake.Embed(description="Command requires you to have `Manage Server` permissions.",
color=disnake.Color.red())
return await ctx.send(embed=embed)
if autoboard_type == "Player Leaderboard":
await server.update_one({"server": ctx.guild.id}, {'$set': {"topboardchannel": None}})
await server.update_one({"server": ctx.guild.id}, {'$set': {"tophour": None}})
else:
await server.update_one({"server": ctx.guild.id}, {'$set': {"lbboardChannel": None}})
await server.update_one({"server": ctx.guild.id}, {'$set': {"country": None}})
await server.update_one({"server": ctx.guild.id}, {'$set': {"lbhour": None}})
embed = disnake.Embed(description=f"{autoboard_type} autoboard has been removed.",
color=disnake.Color.green())
await ctx.send(embed=embed, components=[])
@autoboard.sub_command(name="list", description="View server autoboards")
async def boardlist(self, ctx):
tbc = None
th = None
lbc = None
lbh = None
country = None
results = await server.find_one({"server": ctx.guild.id})
real_times = []
start_time = 1643263200
for x in range(0, 24):
t = start_time + (x * 3600)
real_times.append(t)
try:
tbc = results.get("topboardchannel")
tbc = await pingToChannel(ctx, tbc)
tbc = tbc.mention
except:
pass
try:
th = results.get("tophour")
th = real_times[th - 5]
th = f"<t:1643263200:t>"
except:
pass
try:
lbc = results.get("lbboardChannel")
lbc = await pingToChannel(ctx, lbc)
lbc = lbc.mention
except:
pass
try:
lbh = results.get("lbhour")
lbh = real_times[lbh - 5]
lbh = f"<t:1643263200:t>"
except:
pass
try:
country = results.get("country")
except:
pass
embed = disnake.Embed(title="**Autoboard List**",
description=f"Player leaderboard Channel: {tbc}\n"
f"Player leaderboard Post Time: {th}\n"
f"Clan leaderboard Channel: {lbc}\n"
f"Clan leaderboard Post Time: {lbh}\n"
f"Clan leaderboard Country: {country}\n",
color=disnake.Color.green())
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(autoB(bot))
|
[
"disnake.ui.ActionRow",
"disnake.Color.red",
"disnake.ext.commands.Param",
"disnake.SelectOption",
"utils.clash.getClan",
"utils.clash.pingToChannel",
"disnake.ui.Select",
"disnake.ext.commands.slash_command",
"disnake.Color.green"
] |
[((292, 332), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {'name': '"""autoboard"""'}), "(name='autoboard')\n", (314, 332), False, 'from disnake.ext import commands\n'), ((606, 672), 'disnake.ext.commands.Param', 'commands.Param', ([], {'choices': "['Player Leaderboard', 'Clan Leaderboard']"}), "(choices=['Player Leaderboard', 'Clan Leaderboard'])\n", (620, 672), False, 'from disnake.ext import commands\n'), ((4281, 4347), 'disnake.ext.commands.Param', 'commands.Param', ([], {'choices': "['Player Leaderboard', 'Clan Leaderboard']"}), "(choices=['Player Leaderboard', 'Clan Leaderboard'])\n", (4295, 4347), False, 'from disnake.ext import commands\n'), ((1721, 1818), 'disnake.ui.Select', 'disnake.ui.Select', ([], {'options': 'options', 'placeholder': '"""Page Navigation"""', 'min_values': '(1)', 'max_values': '(1)'}), "(options=options, placeholder='Page Navigation',\n min_values=1, max_values=1)\n", (1738, 1818), False, 'import disnake\n'), ((2021, 2043), 'disnake.ui.ActionRow', 'disnake.ui.ActionRow', ([], {}), '()\n', (2041, 2043), False, 'import disnake\n'), ((4039, 4060), 'disnake.Color.green', 'disnake.Color.green', ([], {}), '()\n', (4058, 4060), False, 'import disnake\n'), ((5308, 5329), 'disnake.Color.green', 'disnake.Color.green', ([], {}), '()\n', (5327, 5329), False, 'import disnake\n'), ((5912, 5935), 'utils.clash.pingToChannel', 'pingToChannel', (['ctx', 'tbc'], {}), '(ctx, tbc)\n', (5925, 5935), False, 'from utils.clash import client, pingToChannel, getClan\n'), ((6246, 6269), 'utils.clash.pingToChannel', 'pingToChannel', (['ctx', 'lbc'], {}), '(ctx, lbc)\n', (6259, 6269), False, 'from utils.clash import client, pingToChannel, getClan\n'), ((7066, 7087), 'disnake.Color.green', 'disnake.Color.green', ([], {}), '()\n', (7085, 7087), False, 'import disnake\n'), ((902, 921), 'disnake.Color.red', 'disnake.Color.red', ([], {}), '()\n', (919, 921), False, 'import disnake\n'), ((1408, 1420), 'utils.clash.getClan', 'getClan', (['tag'], {}), '(tag)\n', (1415, 1420), False, 'from utils.clash import client, pingToChannel, getClan\n'), ((1636, 1696), 'disnake.SelectOption', 'disnake.SelectOption', ([], {'label': 'f"""{country}"""', 'value': 'f"""{country}"""'}), "(label=f'{country}', value=f'{country}')\n", (1656, 1696), False, 'import disnake\n'), ((2235, 2256), 'disnake.Color.green', 'disnake.Color.green', ([], {}), '()\n', (2254, 2256), False, 'import disnake\n'), ((4577, 4596), 'disnake.Color.red', 'disnake.Color.red', ([], {}), '()\n', (4594, 4596), False, 'import disnake\n')]
|
# coding: utf-8
# Third Party Libraries
from sanic_transmute import add_route
from transmute_core.compat import string_type
from transmute_core.function import TransmuteAttributes
def describe_add_route(blueprint, **kwargs):
# if we have a single method, make it a list.
if isinstance(kwargs.get("paths"), string_type):
kwargs["paths"] = [kwargs["paths"]]
if isinstance(kwargs.get("methods"), string_type):
kwargs["methods"] = [kwargs["methods"]]
attrs = TransmuteAttributes(**kwargs)
def decorator(fnc):
if hasattr(fnc, "transmute"):
fnc.transmute = fnc.transmute | attrs
else:
fnc.transmute = attrs
add_route(blueprint, fnc)
return fnc
return decorator
|
[
"transmute_core.function.TransmuteAttributes",
"sanic_transmute.add_route"
] |
[((491, 520), 'transmute_core.function.TransmuteAttributes', 'TransmuteAttributes', ([], {}), '(**kwargs)\n', (510, 520), False, 'from transmute_core.function import TransmuteAttributes\n'), ((690, 715), 'sanic_transmute.add_route', 'add_route', (['blueprint', 'fnc'], {}), '(blueprint, fnc)\n', (699, 715), False, 'from sanic_transmute import add_route\n')]
|
import os
from typing import Dict
from abc import ABC
from easy_sdm.data import ShapefileRegion
import geopandas as gpd
import numpy as np
import pandas as pd
import requests
from easy_sdm.configs import configs
from easy_sdm.utils import logger
from typing import Dict, Optional
from pathlib import Path
class GBIFOccurencesRequester:
"""[This class makes request to GBIF]
"""
def __init__(self, taxon_key: int, species_name: str):
self.taxon_key = taxon_key
self.species_name = species_name
self.base_url = "http://api.gbif.org/v1/occurrence/search"
def request(self, offset: int = 0):
"""[ Request GBIF information about an species]
Args:
offset (int, optional): [Offsset is a parameter to where starting the
request in GBIF databse, since the requests have a
limit of 300 row for request]. Defaults to 0.
Returns:
[type]: [int]
"""
gbif_configs = configs["gbif"]
params = {
"taxonKey": str(self.taxon_key),
"limit": gbif_configs["one_request_limit"],
"hasCoordinate": True,
"year": f"{gbif_configs['low_year']},{gbif_configs['up_year']}",
"country": gbif_configs["country"],
"offset": offset,
}
r = requests.get(self.base_url, params=params)
status_code = r.status_code
if r.status_code != 200:
logger.logging.info(
f"API call failed at offset {offset} with a status code of {r.status_code}."
)
end_of_records = True
else:
r = r.json()
end_of_records = r["endOfRecords"]
return r, end_of_records, status_code
class Species:
def __init__(self, taxon_key: int, name: str):
self.taxon_key = taxon_key
self.name = name
def __str__(self) -> str:
return "Species {self.name} with taxon key {self.taxon_key}"
class SpeciesDFBuilder:
"""[This class organize data requested to GBIF into pandas dataframes]
"""
def __init__(self, species: Species):
self.gbif_occ_requester = GBIFOccurencesRequester(
species.taxon_key, species.name
)
self.__df_memory = None
def get_specie_df(self):
"""Get species as DataFrame"""
if self.__df_memory:
df = self.__df_memory
else:
df = self.__request_species_df()
df = self.__clean_species_df(df)
self.__df_memory = df
return df
def __request_species_df(self):
"""[Organizes GBIF information in a dataframe considering offsets ]"""
end_of_records = False
offset = 0
status = 200
df = None
while end_of_records == False and status == 200:
r, end_of_records, status = self.gbif_occ_requester.request(offset)
df = self.__build_species_df(r, df)
offset = len(df) + 1
self.__clean_species_df(df)
return df
def __build_species_df(self, request, df=None):
"""[Create species dataframe with the request data]
Args:
df ([type]): [description]
request ([type]): [description]
Returns:
[df]: [description]
"""
if df is None:
df = pd.DataFrame(
columns=[
"SCIENTIFIC_NAME",
"LONGITUDE",
"LATITUDE",
"COUNTRY",
"STATE_PROVINCE",
"IDENTIFICATION_DATE",
"DAY",
"MONTH",
"YEAR",
]
)
for result in request["results"]:
result = self.__refact_dict(result)
df = df.append(
{
"SCIENTIFIC_NAME": result["scientificName"],
"LONGITUDE": result["decimalLongitude"],
"LATITUDE": result["decimalLatitude"],
"COUNTRY": result["country"],
"STATE_PROVINCE": result["stateProvince"],
"IDENTIFICATION_DATE": result["eventDate"],
"DAY": result["day"],
"MONTH": result["month"],
"YEAR": result["year"],
},
ignore_index=True,
)
return df
def __refact_dict(self, result: Dict):
"""Refact dict placing None in empty cells"""
columns = result.keys()
desired_columns = [
"scientificName",
"decimalLongitude",
"decimalLatitude",
"country",
"stateProvince",
"eventDate",
"day",
"month",
"year",
"occurrenceRemarks",
]
for d_col in desired_columns:
if d_col not in columns:
result[d_col] = None
return result
def __clean_species_df(self, df: pd.DataFrame):
"""[Cleaning Gbif Data]
Args:
df ([pd.DaraFrame]): [description]
Returns:
[pd.DaraFrame]: [description]
"""
# Double check to certify there is no empty lat/long data
df = df[pd.notnull(df["LATITUDE"])]
df = df[pd.notnull(df["LONGITUDE"])]
# Removing duplicate data
df = (
df.drop_duplicates(ignore_index=True)
if configs["gbif"]["drop_duplicates"]
else df
)
# Sorting Data by STATE_PROVINCE
df.sort_values("STATE_PROVINCE", inplace=True, ignore_index=True)
return df
class SpeciesGDFBuilder(SpeciesDFBuilder):
"""[This class organize data requested to GBIF into geopandas geodataframes]
"""
def __init__(
self, species: Species, proposed_region: Optional[ShapefileRegion] = None
):
super().__init__(species)
self.proposed_region = proposed_region
self.__gdf_memory = None
def save_species_gdf(self, output_path: Path):
if not str(output_path).endswith(".shp"):
raise TypeError("output_path must ends with shp")
output_path.parent.mkdir(parents=True, exist_ok=True)
gdf = self.get_species_gdf()
gdf.to_file(output_path)
def get_species_gdf(self):
if not (self.__gdf_memory is None):
gdf = self.__gdf_memory
else:
df = self.get_specie_df()
gdf = gpd.GeoDataFrame(
df, geometry=gpd.points_from_xy(df.LONGITUDE, df.LATITUDE)
)
gdf = gdf.set_crs(f"EPSG:{configs['maps']['default_epsg']}")
gdf = (
self.__filter_species_in_region(gdf)
if not (self.proposed_region is None)
else gdf
)
self.__gdf_memory = gdf
return gdf
def __filter_species_in_region(self, gdf: gpd.GeoDataFrame):
return self.proposed_region.get_points_inside(gdf)
class SpeciesInfoExtractor:
"""[A Wrapper to extract relevant information from spescies geodataframes]
"""
def __init__(self, species_geodataframe: gpd.GeoDataFrame) -> None:
self.species_geodataframe = species_geodataframe
def get_coordinates(self,):
coordinates = np.array(
(
np.array(self.species_geodataframe["LATITUDE"]),
np.array(self.species_geodataframe["LONGITUDE"]),
)
).T
return coordinates
def get_longitudes(self,):
coordinates = self.get_coordinates()
return coordinates[:, 1]
def get_latitudes(self,):
coordinates = self.get_coordinates()
return coordinates[:, 0]
|
[
"pandas.DataFrame",
"pandas.notnull",
"easy_sdm.utils.logger.logging.info",
"geopandas.points_from_xy",
"numpy.array",
"requests.get"
] |
[((1336, 1378), 'requests.get', 'requests.get', (['self.base_url'], {'params': 'params'}), '(self.base_url, params=params)\n', (1348, 1378), False, 'import requests\n'), ((1460, 1567), 'easy_sdm.utils.logger.logging.info', 'logger.logging.info', (['f"""API call failed at offset {offset} with a status code of {r.status_code}."""'], {}), "(\n f'API call failed at offset {offset} with a status code of {r.status_code}.'\n )\n", (1479, 1567), False, 'from easy_sdm.utils import logger\n'), ((3360, 3506), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['SCIENTIFIC_NAME', 'LONGITUDE', 'LATITUDE', 'COUNTRY', 'STATE_PROVINCE',\n 'IDENTIFICATION_DATE', 'DAY', 'MONTH', 'YEAR']"}), "(columns=['SCIENTIFIC_NAME', 'LONGITUDE', 'LATITUDE', 'COUNTRY',\n 'STATE_PROVINCE', 'IDENTIFICATION_DATE', 'DAY', 'MONTH', 'YEAR'])\n", (3372, 3506), True, 'import pandas as pd\n'), ((5316, 5342), 'pandas.notnull', 'pd.notnull', (["df['LATITUDE']"], {}), "(df['LATITUDE'])\n", (5326, 5342), True, 'import pandas as pd\n'), ((5360, 5387), 'pandas.notnull', 'pd.notnull', (["df['LONGITUDE']"], {}), "(df['LONGITUDE'])\n", (5370, 5387), True, 'import pandas as pd\n'), ((6584, 6629), 'geopandas.points_from_xy', 'gpd.points_from_xy', (['df.LONGITUDE', 'df.LATITUDE'], {}), '(df.LONGITUDE, df.LATITUDE)\n', (6602, 6629), True, 'import geopandas as gpd\n'), ((7405, 7452), 'numpy.array', 'np.array', (["self.species_geodataframe['LATITUDE']"], {}), "(self.species_geodataframe['LATITUDE'])\n", (7413, 7452), True, 'import numpy as np\n'), ((7470, 7518), 'numpy.array', 'np.array', (["self.species_geodataframe['LONGITUDE']"], {}), "(self.species_geodataframe['LONGITUDE'])\n", (7478, 7518), True, 'import numpy as np\n')]
|
from __future__ import division
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.fpn import Head
from chainercv.links.model.fpn import head_loss_post
from chainercv.links.model.fpn import head_loss_pre
def _random_array(xp, shape):
return xp.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
@testing.parameterize(
{'n_class': 1 + 1},
{'n_class': 5 + 1},
{'n_class': 20 + 1},
)
class TestHead(unittest.TestCase):
def setUp(self):
self.link = Head(n_class=self.n_class, scales=(1 / 2, 1 / 4, 1 / 8))
def _check_call(self):
hs = [
chainer.Variable(_random_array(self.link.xp, (2, 64, 32, 32))),
chainer.Variable(_random_array(self.link.xp, (2, 64, 16, 16))),
chainer.Variable(_random_array(self.link.xp, (2, 64, 8, 8))),
]
rois = [
self.link.xp.array(((4, 1, 6, 3),), dtype=np.float32),
self.link.xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
self.link.xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
roi_indices = [
self.link.xp.array((0,), dtype=np.int32),
self.link.xp.array((1, 0), dtype=np.int32),
self.link.xp.array((1,), dtype=np.int32),
]
locs, confs = self.link(hs, rois, roi_indices)
self.assertIsInstance(locs, chainer.Variable)
self.assertIsInstance(locs.array, self.link.xp.ndarray)
self.assertEqual(locs.shape, (4, self.n_class, 4))
self.assertIsInstance(confs, chainer.Variable)
self.assertIsInstance(confs.array, self.link.xp.ndarray)
self.assertEqual(confs.shape, (4, self.n_class))
def test_call_cpu(self):
self._check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
def _check_distribute(self):
rois = self.link.xp.array((
(0, 0, 10, 10),
(0, 1000, 0, 1000),
(0, 0, 224, 224),
(100, 100, 224, 224),
), dtype=np.float32)
roi_indices = self.link.xp.array((0, 1, 0, 0), dtype=np.int32)
rois, roi_indices = self.link.distribute(rois, roi_indices)
self.assertEqual(len(rois), 3)
self.assertEqual(len(roi_indices), 3)
for l in range(3):
self.assertIsInstance(rois[l], self.link.xp.ndarray)
self.assertIsInstance(roi_indices[l], self.link.xp.ndarray)
self.assertEqual(rois[l].shape[0], roi_indices[l].shape[0])
self.assertEqual(rois[l].shape[1:], (4,))
self.assertEqual(roi_indices[l].shape[1:], ())
self.assertEqual(sum(rois[l].shape[0] for l in range(3)), 4)
def test_distribute_cpu(self):
self._check_distribute()
@attr.gpu
def test_distribute_gpu(self):
self.link.to_gpu()
self._check_distribute()
def _check_decode(self):
rois = [
self.link.xp.array(((4, 1, 6, 3),), dtype=np.float32),
self.link.xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
self.link.xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
roi_indices = [
self.link.xp.array((0,), dtype=np.int32),
self.link.xp.array((1, 0), dtype=np.int32),
self.link.xp.array((1,), dtype=np.int32),
]
locs = chainer.Variable(_random_array(
self.link.xp, (4, self.n_class, 4)))
confs = chainer.Variable(_random_array(
self.link.xp, (4, self.n_class)))
bboxes, labels, scores = self.link.decode(
rois, roi_indices,
locs, confs,
(0.4, 0.2), ((100, 100), (200, 200)),
0.5, 0.1)
self.assertEqual(len(bboxes), 2)
self.assertEqual(len(labels), 2)
self.assertEqual(len(scores), 2)
for n in range(2):
self.assertIsInstance(bboxes[n], self.link.xp.ndarray)
self.assertIsInstance(labels[n], self.link.xp.ndarray)
self.assertIsInstance(scores[n], self.link.xp.ndarray)
self.assertEqual(bboxes[n].shape[0], labels[n].shape[0])
self.assertEqual(bboxes[n].shape[0], scores[n].shape[0])
self.assertEqual(bboxes[n].shape[1:], (4,))
self.assertEqual(labels[n].shape[1:], ())
self.assertEqual(scores[n].shape[1:], ())
def test_decode_cpu(self):
self._check_decode()
@attr.gpu
def test_decode_gpu(self):
self.link.to_gpu()
self._check_decode()
class TestHeadLoss(unittest.TestCase):
def _check_head_loss_pre(self, xp):
rois = [
xp.array(((4, 1, 6, 3),), dtype=np.float32),
xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
roi_indices = [
xp.array((0,), dtype=np.int32),
xp.array((1, 0), dtype=np.int32),
xp.array((1,), dtype=np.int32),
]
bboxes = [
xp.array(((2, 4, 6, 7), (1, 12, 3, 30)), dtype=np.float32),
xp.array(((10, 2, 12, 12),), dtype=np.float32),
]
labels = [
xp.array((10, 4), dtype=np.float32),
xp.array((1,), dtype=np.float32),
]
rois, roi_indices, gt_locs, gt_labels = head_loss_pre(
rois, roi_indices, (0.1, 0.2), bboxes, labels)
self.assertEqual(len(rois), 3)
self.assertEqual(len(roi_indices), 3)
self.assertEqual(len(gt_locs), 3)
self.assertEqual(len(gt_labels), 3)
for l in range(3):
self.assertIsInstance(rois[l], xp.ndarray)
self.assertIsInstance(roi_indices[l], xp.ndarray)
self.assertIsInstance(gt_locs[l], xp.ndarray)
self.assertIsInstance(gt_labels[l], xp.ndarray)
self.assertEqual(rois[l].shape[0], roi_indices[l].shape[0])
self.assertEqual(rois[l].shape[0], gt_locs[l].shape[0])
self.assertEqual(rois[l].shape[0], gt_labels[l].shape[0])
self.assertEqual(rois[l].shape[1:], (4,))
self.assertEqual(roi_indices[l].shape[1:], ())
self.assertEqual(gt_locs[l].shape[1:], (4,))
self.assertEqual(gt_labels[l].shape[1:], ())
def test_head_loss_pre_cpu(self):
self._check_head_loss_pre(np)
@attr.gpu
def test_head_loss_pre_gpu(self):
import cupy
self._check_head_loss_pre(cupy)
def _check_head_loss_post(self, xp):
locs = chainer.Variable(_random_array(xp, (20, 81, 4)))
confs = chainer.Variable(_random_array(xp, (20, 81)))
roi_indices = [
xp.random.randint(0, 2, size=5).astype(np.int32),
xp.random.randint(0, 2, size=7).astype(np.int32),
xp.random.randint(0, 2, size=8).astype(np.int32),
]
gt_locs = [
_random_array(xp, (5, 4)),
_random_array(xp, (7, 4)),
_random_array(xp, (8, 4)),
]
gt_labels = [
xp.random.randint(0, 80, size=5).astype(np.int32),
xp.random.randint(0, 80, size=7).astype(np.int32),
xp.random.randint(0, 80, size=8).astype(np.int32),
]
loc_loss, conf_loss = head_loss_post(
locs, confs, roi_indices, gt_locs, gt_labels, 2)
self.assertIsInstance(loc_loss, chainer.Variable)
self.assertIsInstance(loc_loss.array, xp.ndarray)
self.assertEqual(loc_loss.shape, ())
self.assertIsInstance(conf_loss, chainer.Variable)
self.assertIsInstance(conf_loss.array, xp.ndarray)
self.assertEqual(conf_loss.shape, ())
def test_head_loss_post_cpu(self):
self._check_head_loss_post(np)
@attr.gpu
def test_head_loss_post_gpu(self):
import cupy
self._check_head_loss_post(cupy)
testing.run_module(__name__, __file__)
|
[
"numpy.random.uniform",
"chainercv.links.model.fpn.head_loss_pre",
"chainercv.links.model.fpn.Head",
"chainer.testing.parameterize",
"chainercv.links.model.fpn.head_loss_post",
"chainer.testing.run_module"
] |
[((414, 499), 'chainer.testing.parameterize', 'testing.parameterize', (["{'n_class': 1 + 1}", "{'n_class': 5 + 1}", "{'n_class': 20 + 1}"], {}), "({'n_class': 1 + 1}, {'n_class': 5 + 1}, {'n_class': 20 +\n 1})\n", (434, 499), False, 'from chainer import testing\n'), ((8017, 8055), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (8035, 8055), False, 'from chainer import testing\n'), ((355, 391), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'shape'}), '(-1, 1, size=shape)\n', (372, 391), True, 'import numpy as np\n'), ((588, 644), 'chainercv.links.model.fpn.Head', 'Head', ([], {'n_class': 'self.n_class', 'scales': '(1 / 2, 1 / 4, 1 / 8)'}), '(n_class=self.n_class, scales=(1 / 2, 1 / 4, 1 / 8))\n', (592, 644), False, 'from chainercv.links.model.fpn import Head\n'), ((5493, 5553), 'chainercv.links.model.fpn.head_loss_pre', 'head_loss_pre', (['rois', 'roi_indices', '(0.1, 0.2)', 'bboxes', 'labels'], {}), '(rois, roi_indices, (0.1, 0.2), bboxes, labels)\n', (5506, 5553), False, 'from chainercv.links.model.fpn import head_loss_pre\n'), ((7417, 7480), 'chainercv.links.model.fpn.head_loss_post', 'head_loss_post', (['locs', 'confs', 'roi_indices', 'gt_locs', 'gt_labels', '(2)'], {}), '(locs, confs, roi_indices, gt_locs, gt_labels, 2)\n', (7431, 7480), False, 'from chainercv.links.model.fpn import head_loss_post\n')]
|
#!/usr/bin/env python
"""
Module for reading STM files
Expected file format is derived from http://www1.icsi.berkeley.edu/Speech/docs/sctk-1.2/infmts.htm#stm_fmt_name_0
This expects a segment from class derived in convert_text
"""
from asrtoolkit.data_structures.segment import segment
def format_segment(seg):
"""
Formats a segment assuming it's an instance of class segment with elements
audiofile, channel, speaker, start and stop times, label, and text
"""
return " ".join(seg.__dict__[_] for _ in ('audiofile', 'channel', 'speaker', 'start', 'stop', 'label', 'text'))
def parse_line(line):
" parse a single line of an stm file"
data = line.strip().split()
seg = None
if len(data) > 6:
audiofile, channel, speaker, start, stop, label = data[:6]
text = " ".join(data[6:])
seg = segment(
{
'audiofile': audiofile,
'channel': channel,
'speaker': speaker,
'start': start,
'stop': stop,
'label': label,
'text': text
}
)
return seg if seg and seg.validate() else None
def read_file(file_name):
"""
Reads an STM file, skipping any gap lines
"""
segments = []
with open(file_name, encoding="utf-8") as f:
for line in f:
seg = parse_line(line)
if seg is not None:
segments.append(seg)
return segments
|
[
"asrtoolkit.data_structures.segment.segment"
] |
[((824, 961), 'asrtoolkit.data_structures.segment.segment', 'segment', (["{'audiofile': audiofile, 'channel': channel, 'speaker': speaker, 'start':\n start, 'stop': stop, 'label': label, 'text': text}"], {}), "({'audiofile': audiofile, 'channel': channel, 'speaker': speaker,\n 'start': start, 'stop': stop, 'label': label, 'text': text})\n", (831, 961), False, 'from asrtoolkit.data_structures.segment import segment\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 5 14:29:04 2020
@author: ptrda
"""
import os
os.chdir(os.path.dirname(os.path.abspath('../tests')))
import sys
sys.path.append(os.path.abspath('../Team-4-Code/src/UserStories'))
sys.path.append(os.path.abspath('../Team-4-Code/src'))
cwd = os.getcwd()
os.chdir(os.path.join(cwd, 'seeds'))
from us23 import us23
import Project02
import unittest
class us23_test(unittest.TestCase):
def test1(self):
individuals = Project02.createIndividualsDataFrame('seed.ged')
df = individuals[0:3]
df = df.append(individuals.iloc[0]).reset_index(drop = True)
res = "not unique"
self.assertEqual(us23(df), res)
def test2(self):
individuals = Project02.createIndividualsDataFrame('seed.ged')
df = individuals[0:5]
res = "unique"
self.assertEqual(us23(df), res)
def test3(self):
individuals = Project02.createIndividualsDataFrame('seed.ged')
df = individuals[0:7]
df = df.append(individuals.iloc[3]).reset_index(drop = True)
res = "not unique"
self.assertEqual(us23(df), res)
def test4(self):
individuals = Project02.createIndividualsDataFrame('seed.ged')
df = individuals[0:9]
df = df.append(individuals.iloc[11]).reset_index(drop = True)
res = "unique"
self.assertEqual(us23(df), res)
def test5(self):
individuals = Project02.createIndividualsDataFrame('seed.ged')
df = individuals[0:11]
df = df.append(individuals.iloc[4]).reset_index(drop = True)
res = "not unique"
self.assertEqual(us23(df), res)
unittest.main(argv=['first-arg-is-ignored'], exit=False)
|
[
"unittest.main",
"Project02.createIndividualsDataFrame",
"os.path.abspath",
"us23.us23",
"os.getcwd",
"os.path.join"
] |
[((290, 301), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (299, 301), False, 'import os\n'), ((1683, 1739), 'unittest.main', 'unittest.main', ([], {'argv': "['first-arg-is-ignored']", 'exit': '(False)'}), "(argv=['first-arg-is-ignored'], exit=False)\n", (1696, 1739), False, 'import unittest\n'), ((177, 226), 'os.path.abspath', 'os.path.abspath', (['"""../Team-4-Code/src/UserStories"""'], {}), "('../Team-4-Code/src/UserStories')\n", (192, 226), False, 'import os\n'), ((244, 281), 'os.path.abspath', 'os.path.abspath', (['"""../Team-4-Code/src"""'], {}), "('../Team-4-Code/src')\n", (259, 281), False, 'import os\n'), ((311, 337), 'os.path.join', 'os.path.join', (['cwd', '"""seeds"""'], {}), "(cwd, 'seeds')\n", (323, 337), False, 'import os\n'), ((119, 146), 'os.path.abspath', 'os.path.abspath', (['"""../tests"""'], {}), "('../tests')\n", (134, 146), False, 'import os\n'), ((478, 526), 'Project02.createIndividualsDataFrame', 'Project02.createIndividualsDataFrame', (['"""seed.ged"""'], {}), "('seed.ged')\n", (514, 526), False, 'import Project02\n'), ((745, 793), 'Project02.createIndividualsDataFrame', 'Project02.createIndividualsDataFrame', (['"""seed.ged"""'], {}), "('seed.ged')\n", (781, 793), False, 'import Project02\n'), ((935, 983), 'Project02.createIndividualsDataFrame', 'Project02.createIndividualsDataFrame', (['"""seed.ged"""'], {}), "('seed.ged')\n", (971, 983), False, 'import Project02\n'), ((1202, 1250), 'Project02.createIndividualsDataFrame', 'Project02.createIndividualsDataFrame', (['"""seed.ged"""'], {}), "('seed.ged')\n", (1238, 1250), False, 'import Project02\n'), ((1466, 1514), 'Project02.createIndividualsDataFrame', 'Project02.createIndividualsDataFrame', (['"""seed.ged"""'], {}), "('seed.ged')\n", (1502, 1514), False, 'import Project02\n'), ((678, 686), 'us23.us23', 'us23', (['df'], {}), '(df)\n', (682, 686), False, 'from us23 import us23\n'), ((872, 880), 'us23.us23', 'us23', (['df'], {}), '(df)\n', (876, 880), False, 'from us23 import us23\n'), ((1135, 1143), 'us23.us23', 'us23', (['df'], {}), '(df)\n', (1139, 1143), False, 'from us23 import us23\n'), ((1399, 1407), 'us23.us23', 'us23', (['df'], {}), '(df)\n', (1403, 1407), False, 'from us23 import us23\n'), ((1667, 1675), 'us23.us23', 'us23', (['df'], {}), '(df)\n', (1671, 1675), False, 'from us23 import us23\n')]
|
from json import loads, dumps
from random import randint
import stanza
import praw
import re
import os
from urllib.parse import quote
from stanza import Pipeline
def log_into_reddit():
reddit = praw.Reddit('bot1')
print(reddit.user.me())
return reddit
def get_posts_replied_to():
# Have we run this code before? If not, create an empty list
if not os.path.isfile("posts_replied_to.txt"):
posts_replied_to = []
# If we have run the code before, load the list of posts we have replied to
else:
# Read the file into a list and remove any empty values
with open("posts_replied_to.txt", "r") as f:
posts_replied_to = f.read()
posts_replied_to = posts_replied_to.split("\n")
posts_replied_to = list(filter(None, posts_replied_to))
return posts_replied_to
# variable
LOCATIONS = [
" Sales of Hitler's political autobiography \"<NAME>\"sometimes referred to as the bible of the Nazi Party, made him a millionaire. ",
"Hitler had dreams of playing a musical instrument. He had short but unsuccessful lessons in piano and violin and also dabbled in the flute and harmonica. In the end, he settled for whistling, which he did frequently.",
"Though he shunned meat, Hitler was a voracious ‘sweet tooth’, consuming large amounts of cake, pastries, chocolate and sugar. He sometimes took as many as five teaspoons of sugar in his tea.",
"When the regime came into power in 1933, they passed a comprehensive set of laws for animal protection. When all of these were in place, Hitler said something about animal cruelty. With the new Reich, there will be no grounds for any form of animal abuse and cruelty.",
"It’s already a known fact that during Hitler’s reign, their main objective was to free the world of Jews. However, Hilter unknowingly had a Jewish chauffeur. <NAME> was also his friend and personal chauffeur. When it got known to many, <NAME> was ready to target Maurice for expulsion. Hitler came to the rescue and made an exception for him and his brothers. He called them “honorary Aryans”.",
"In a pre-cursor to modern stances and laws in this area, the Nazi party were the first people to ban smoking. Nazi doctors were the first to establish a link between smoking and lung cancer which meant that a fierce anti-smoking campaign began under Hitler. The Nazi leadership strongly condemned smoking and advised the general population to give it up.",
"During the Second World War, German doctors came up with a methamphetamine based experimental drug to increase soldier’s performance. This was very successful in trials when tested and made the troops super tough. It was found that they could march 55 miles without any tiredness which is pretty amazing. The plan was to roll it out to all soldiers serving in the war but the German’s lost before it could be put into place."]
ANALYTICS_JSON = "posts_analytics.json"
def get_posts_analytics():
if not os.path.isfile(ANALYTICS_JSON):
posts_analytics = []
# If we have run the code before, load the list of posts we have replied to
else:
# Read the file into a list and remove any empty values
with open(ANALYTICS_JSON, "r") as f:
posts_analytics = loads(f.read())
return posts_analytics
def initiate_nlp() -> Pipeline:
stanza.download('en')
nlp_pipe = stanza.Pipeline('en', processors="tokenize,pos")
return nlp_pipe
def fetch_reddit_posts(selected_subreddit: str, limit: int) -> list:
subreddit = reddit.subreddit(selected_subreddit)
return subreddit.hot(limit=limit)
def process_post(post, nlp_pipe: Pipeline):
doc = nlp_pipe(post.title)
keywords = get_keywords_from_post(doc.sentences)
# write all keywords in lower case
keywords = [keyword.lower() for keyword in keywords]
# remove all duplicates and keep original order
unique_keywords = list(dict.fromkeys(keywords))
print(" ".join(unique_keywords))
print(f"{post.title}")
print(f"https://www.reddit.com/r/fakehistoryporn/comments/{post.id}")
URL_keywords = quote(' '.join(unique_keywords))
print(f"https://en.wikipedia.org/w/index.php?search={URL_keywords}")
return post.id, post.title, unique_keywords
def filter_analytics(posts, posts_analytics):
post_ids = [post_id for post_id, _, _ in posts_analytics]
filtered_posts = []
for post in posts:
if post.id in post_ids:
continue
filtered_posts.append(post)
return filtered_posts
def get_keywords_from_post(sentences: list):
keywords = []
for sentence in sentences:
for word in sentence.words:
if word.upos not in ['NOUN', 'VERB', 'NUM', 'PROPN']:
continue
keywords.append(word.text)
return keywords
def filter_posts(posts, posts_replied_to):
filtered_posts = []
for post in posts:
if post.id in posts_replied_to:
continue
if not re.search("(nazi|hitler|hilter|german)", post.title, re.IGNORECASE):
continue
filtered_posts.append(post)
return filtered_posts
def reply_to_post(post):
# Reply to the post
randomnumber = randint(0, len(LOCATIONS))
post.reply(f"Did you know that: {LOCATIONS[randomnumber]}")
print(f"Bot replying to: {post.title} https://www.reddit.com/r/fakehistoryporn/comments/{post.id}")
def store_line(f, line):
f.write(line + "\n")
if __name__ == '__main__':
# log into reddit
reddit = log_into_reddit()
# check posts replied to
posts_replied_to = get_posts_replied_to()
# initiate nlp
nlp_pipe = initiate_nlp()
# create posts_analytics
posts_analytics = get_posts_analytics()
# fetch reddit posts
posts = fetch_reddit_posts("fakehistoryporn", 10)
analytics_filtered = filter_analytics(posts, posts_analytics)
# read submission titles
for post in analytics_filtered:
nlp_data = process_post(post, nlp_pipe)
posts_analytics.append(nlp_data)
# store nlp doc in posts_analytics
with open(ANALYTICS_JSON, "w") as f:
f.write(dumps(posts_analytics))
# filter for keywords
filtered_posts = filter_posts(posts, posts_replied_to)
# respond to filtered posts
with open("posts_replied_to.txt", "a") as f:
for post in filtered_posts:
reply_to_post(post)
# store post_id in posts_replied_to
store_line(f, post.id)
|
[
"json.dumps",
"os.path.isfile",
"stanza.Pipeline",
"praw.Reddit",
"re.search",
"stanza.download"
] |
[((200, 219), 'praw.Reddit', 'praw.Reddit', (['"""bot1"""'], {}), "('bot1')\n", (211, 219), False, 'import praw\n'), ((3354, 3375), 'stanza.download', 'stanza.download', (['"""en"""'], {}), "('en')\n", (3369, 3375), False, 'import stanza\n'), ((3391, 3439), 'stanza.Pipeline', 'stanza.Pipeline', (['"""en"""'], {'processors': '"""tokenize,pos"""'}), "('en', processors='tokenize,pos')\n", (3406, 3439), False, 'import stanza\n'), ((371, 409), 'os.path.isfile', 'os.path.isfile', (['"""posts_replied_to.txt"""'], {}), "('posts_replied_to.txt')\n", (385, 409), False, 'import os\n'), ((2983, 3013), 'os.path.isfile', 'os.path.isfile', (['ANALYTICS_JSON'], {}), '(ANALYTICS_JSON)\n', (2997, 3013), False, 'import os\n'), ((4981, 5048), 're.search', 're.search', (['"""(nazi|hitler|hilter|german)"""', 'post.title', 're.IGNORECASE'], {}), "('(nazi|hitler|hilter|german)', post.title, re.IGNORECASE)\n", (4990, 5048), False, 'import re\n'), ((6121, 6143), 'json.dumps', 'dumps', (['posts_analytics'], {}), '(posts_analytics)\n', (6126, 6143), False, 'from json import loads, dumps\n')]
|
import pytest
from sys import version_info
import fstr
def test_basic():
template = fstr("{x} + {y} = {x + y}", x=1)
assert template.format(y=2) == "1 + 2 = 3"
assert template.format(y=3) == "1 + 3 = 4"
def test_basic_format_language():
template = fstr("{x!r} + {y!r} = {x + y!r}", x="a")
assert template.format(y="b") == "'a' + 'b' = 'ab'"
assert template.format(y="c") == "'a' + 'c' = 'ac'"
_A_GLOBAL = 1
def test_simple_fstr_evaluate():
a_local = 2 # noqa: F841
assert fstr("{_A_GLOBAL} {a_local}").evaluate() == "1 2"
def test_format_language_with_inner_fstr():
template = fstr("{x:{width}}")
assert template.format(x=10, width=3) == " 10"
assert template.format(x=3, width=4) == " 3"
template = fstr("{x:{width}.{precision}}")
assert template.format(x=1.2345, width=4, precision=2) == " 1.2"
def test_dict():
d = {'"': "double-quote", "'": "single-quote", "foo": "bar"}
assert fstr("""{d["'"]}""").format(d=d) == "single-quote"
assert fstr("""{d['"']}""").format(d=d) == "double-quote"
assert fstr('{d["foo"]}').format(d=d) == "bar"
assert fstr("{d['foo']}").format(d=d) == "bar"
def test_format_with_function():
def add(x, y):
return x + y
template = fstr("{add(x, y)}", add=add)
assert template.format(x=1, y=2) == "3"
def test_even_double_brace_replacement():
template = fstr("{{}}")
assert template.format() == "{}"
def test_odd_double_brace_replacement():
template = fstr("{{{x}}}")
assert template.format(x=1) == "{1}"
def test_trailing_and_leading_space():
assert fstr("{ 1 + 2}").format() == "3"
assert fstr("{1 + 2 }").format() == "3"
assert fstr("{ 1 + 2 }").format() == "3"
def dict_inside_braces_with_padding():
template = fstr("{ {x: y} }", x="a")
assert template.format(y=1) == "{'a': 1}"
def test_hash_in_string():
# These aren't comments, since they're in strings.
d = {"#": "hash"}
assert fstr("{'#'}").format() == "#"
assert fstr("{d['#']}").format(d=d) == "hash"
@pytest.mark.parametrize("brace", "])}")
def test_unclosed_braces(brace):
with pytest.raises(SyntaxError):
fstr("{%s}" % brace).format()
def test_many_expressions():
context = {"x": "X", "width": 1}
def make_template(n, extra=""):
return fstr(("{x} " + extra) * n)
for n in range(250, 260):
make_template(n).format(**context)
# Test around 256.
for i in range(250, 260):
actual = make_template(i).format(**context)
expected = (context["x"] + " ") * i
assert actual == expected
actual = make_template(250, "{x:{width}} ").format(**context)
expected = (context["x"] + " ") * 500
assert actual == expected
# Test lots of expressions and constants.
assert fstr("{1} {'x'} {'y'} " * 1000).format() == "1 x y " * 1000
_format_specifier_width_precision_templates = [
"result: {value:{width}.{precision}}",
"result: {value:{width!r}.{precision}}",
"result: {value:{width:0}.{precision:1}}",
"result: {value:{1}{0:0}.{precision:1}}",
"result: {value:{ 1}{ 0:0}.{ precision:1}}",
]
_format_specifier_expression_expecteds = [
"result: 12.35",
"result: 12.35",
"result: 12.35",
"result: 12.35",
"result: 12.35",
" 0xa",
" 0xa",
" -0xa",
" -0xa",
" 0xa",
]
@pytest.mark.parametrize("template", _format_specifier_width_precision_templates)
def test_format_width_precision_specifier_expressions(template):
context = {"width": 10, "precision": 4, "value": 12.34567}
assert fstr(template).format(**context) == "result: 12.35"
_format_hex_specifier_templates = [
(10, "{value:#{1}0x}"),
(10, "{value:{'#'}1{0}{'x'}}"),
(-10, "{value:-{'#'}1{0}x}"),
(-10, "{value:{'-'}#{1}0{'x'}}"),
(10, "{value:#{3 != {4:5} and width}x}"),
]
@pytest.mark.parametrize("value, template", _format_hex_specifier_templates)
def test_format_hex_specifier_expressions(value, template):
expected = " -0xa" if value < 0 else " 0xa"
assert fstr(template).format(value=value, width=10) == expected
_invalid_format_specifier_templates = ["{'s'!r{':10'}}", "{4:{/5}}", "{'s'!{'r'}}"]
@pytest.mark.parametrize("template", _invalid_format_specifier_templates)
def test_invalid_format_specifier_expressions(template):
with pytest.raises(SyntaxError):
fstr(template).format()
def test_side_effect_order():
class X:
def __init__(self):
self.i = 0
def __format__(self, spec):
self.i += 1
return str(self.i)
fstr("{x} {x}").format(x=X()) == "1 2"
_bad_missing_expressions = [
"{}",
"{ '" " {} ",
"{!r}",
"{ !r}",
"{10:{ }}",
" { } ",
# The Python parser ignores also the following
# whitespace characters in additional to a space.
"{\t\f\r\n}",
# Catch the empty expression before the
# invalid conversion.
"{!x}",
"{ !xr}",
"{!x:}",
"{!x:a}",
"{ !xr:}",
"{ !xr:a}",
"{!}",
"{:}",
# We find the empty expression before the
# missing closing brace.
"{!",
"{!s:",
"{:",
"{:x",
"{\xa0}",
]
@pytest.mark.parametrize("template", _bad_missing_expressions)
def test_missing_expression(template):
with pytest.raises(SyntaxError):
fstr(template).format()
_bad_parens_in_expressions = ["{,}", "{,}", "{3)+(4}", "{\n}"]
@pytest.mark.parametrize("template", _bad_parens_in_expressions)
def test_bad_parens_in_expressions(template):
with pytest.raises(SyntaxError):
fstr(template).format()
_backlashes_in_string_part = [
("\t", "\t"),
(r"\t", "\\t"),
("{2}\t", "2\t"),
("{2}\t{3}", "2\t3"),
("\t{3}", "\t3"),
("\u0394", "\u0394"),
(r"\u0394", "\\u0394"),
(r"\u0394", "\\u0394"),
("{2}\u0394", "2\u0394"),
("{2}\u0394{3}", "2\u03943"),
("\u0394{3}", "\u03943"),
("\x20", " "),
(r"\x20", "\\x20"),
(r"\x20", "\\x20"),
("{2}\x20", "2 "),
("{2}\x20{3}", "2 3"),
("\x20{3}", " 3"),
("2\x20", "2 "),
("2\x203", "2 3"),
("\x203", " 3"),
("\\{6*7}", "\\42"),
(r"\{6*7}", "\\42"),
]
if version_info >= (3, 0):
_backlashes_in_string_part.extend(
[
("\U00000394", "\u0394"),
(r"\U00000394", "\\U00000394"),
(r"\U00000394", "\\U00000394"),
("{2}\U00000394", "2\u0394"),
("{2}\U00000394{3}", "2\u03943"),
("\U00000394{3}", "\u03943"),
("\N{GREEK CAPITAL LETTER DELTA}", "\u0394"),
("{2}\N{GREEK CAPITAL LETTER DELTA}", "2\u0394"),
("{2}\N{GREEK CAPITAL LETTER DELTA}{3}", "2\u03943"),
("\N{GREEK CAPITAL LETTER DELTA}{3}", "\u03943"),
("2\N{GREEK CAPITAL LETTER DELTA}", "2\u0394"),
("2\N{GREEK CAPITAL LETTER DELTA}3", "2\u03943"),
("\N{GREEK CAPITAL LETTER DELTA}3", "\u03943"),
]
)
@pytest.mark.parametrize("template, expected", _backlashes_in_string_part)
def test_backslashes_in_string_part(template, expected):
assert fstr(template).format() == expected
_backslashes_in_expression = [r"{\}", r"{\'a\'}", r"{\t3}", "{\n}"]
@pytest.mark.parametrize("template", _backslashes_in_expression)
def test_no_backslashes_in_expression_part(template):
with pytest.raises(SyntaxError):
fstr(template).format()
def test_newlines_in_expressions():
assert fstr("{0}").format() == "0"
assert (
fstr(
"""{3+
4}"""
).format()
== "7" # noqa: W503
)
_empty_format_specifiers = [
("{x}", "test"),
("{x:}", "test"),
("{x!s:}", "test"),
("{x!r:}", "'test'"),
]
@pytest.mark.parametrize("template, expected", _empty_format_specifiers)
def test_empty_format_specifier(template, expected):
assert fstr(template).format(x="test") == expected
_bad_mismatched_braces = [
"{{}",
"{{}}}",
"}",
"x}",
"x}x",
"{3:}>10}",
"{3:}}>10}",
"{3:{{>10}",
"{3",
"{3!",
"{3:",
"{3!s",
"{3!s:",
"{3!s:3",
"x{",
"x{x",
"{x",
"{3:s",
"{{{",
"{{}}{",
"{",
]
@pytest.mark.parametrize("template", _bad_mismatched_braces)
def test_bad_mismatched_braces(template):
with pytest.raises(SyntaxError):
fstr(template).format()
_ok_mismatched_braces = [("{'{'}", "{"), ("{'}'}", "}")]
@pytest.mark.parametrize("template, expected", _ok_mismatched_braces)
def test_ok_mistmatched_braces(template, expected):
assert fstr(template).format() == expected
_ok_lambdas = [
("{(lambda y:x*y)('8')!r}", "'88888'"),
("{(lambda y:x*y)('8')!r:10}", "'88888' "),
("{(lambda y:x*y)('8'):10}", "88888 "),
]
@pytest.mark.parametrize("template, expected", _ok_lambdas)
def test_lambda(template, expected):
assert fstr(template, x=5).format() == expected
_triple_quoted_strings = [
("{'''x'''}", "x"),
("{'''eric's'''}", "eric's"),
('{"x" """eric"s""" "y"}', 'xeric"sy'),
('{"x" """eric"s"""}', 'xeric"s'),
('{"""eric"s""" "y"}', 'eric"sy'),
('{"""x""" """eric"s""" "y"}', 'xeric"sy'),
('{"""x""" """eric"s""" """y"""}', 'xeric"sy'),
('{r"""x""" """eric"s""" """y"""}', 'xeric"sy'),
]
@pytest.mark.parametrize("template, expected", _triple_quoted_strings)
def test_expressions_with_triple_quoted_strings(template, expected):
assert fstr(template).format() == expected
def test_missing_variable():
with pytest.raises(NameError):
fstr("v:{value}").format()
def test_missing_format_spec():
class Obj:
def __format__(self, spec):
if not spec:
return "*"
return spec
assert fstr("{Obj():x}").format(Obj=Obj) == "x"
assert fstr("{Obj()}").format(Obj=Obj) == "*"
assert fstr("{Obj():}").format(Obj=Obj) == "*"
assert fstr("{3:}").format() == "3"
assert fstr("{3!s:}").format() == "3"
def test_call():
def foo(x):
return "x=" + str(x)
assert fstr("{foo(10)}").format(foo=foo) == "x=10"
def test_leading_trailing_spaces():
assert fstr("{ 3}").format() == "3"
assert fstr("{ 3}").format() == "3"
assert fstr("{3 }").format() == "3"
assert fstr("{3 }").format() == "3"
assert fstr("expr={ {x: y for x, y in [(1, 2), ]} }").format() == "expr={1: 2}"
assert fstr("expr={ {x: y for x, y in [(1, 2), ]}}").format() == "expr={1: 2}"
def test_not_equal():
# There's a special test for this because there's a special
# case in the f-string parser to look for != as not ending an
# expression. Normally it would, while looking for !s or !r.
assert fstr("{3!=4}").format() == "True"
assert fstr("{3!=4:}").format() == "True"
assert fstr("{3!=4!s}").format() == "True"
assert fstr("{3!=4!s:.3}").format() == "Tru"
def test_conversions():
assert fstr("{3.14:10.10}").format() == " 3.14"
assert fstr("{3.14!s:10.10}").format() == "3.14 "
assert fstr("{3.14!r:10.10}").format() == "3.14 "
if version_info >= (3, 0):
assert fstr("{3.14!a:10.10}").format() == "3.14 "
assert fstr('{"a"}').format() == "a"
assert fstr('{"a"!r}').format() == "'a'"
if version_info >= (3, 0):
assert fstr('{"a"!a}').format() == "'a'"
# Not a conversion.
assert fstr('{"a!r"}').format() == "a!r"
# Not a conversion, but show that ! is allowed in a format spec.
assert fstr("{3.14:!<10.10}").format() == "3.14!!!!!!"
bad_conversions = [
"{3!g}"
"{3!A}"
"{3!3}"
"{3!G}"
"{3!!}"
"{3!:}"
"{3! s}" # no space before conversion char
"{x!s{y}}",
"{3!ss}",
"{3!ss:}",
"{3!ss:s}",
]
for bad in bad_conversions:
with pytest.raises(SyntaxError):
fstr(bad).format()
_invalid_expressions = ["{a[4)}", "{a(4]}"]
@pytest.mark.parametrize("invalid", _invalid_expressions)
def test_invalid_expressions(invalid):
with pytest.raises(SyntaxError):
fstr(invalid).format()
if version_info < (3, 0):
_causes_errors = [("{1000:j}", SyntaxError)]
elif version_info < (3, 6):
_causes_errors = [
("{(lambda: 0):x}", TypeError),
("{(0,):x}", TypeError),
("{1000:j}", SyntaxError),
]
else:
_causes_errors = [
("{(lambda: 0):x}", TypeError),
("{(0,):x}", TypeError),
("{1000:j}", ValueError),
]
@pytest.mark.parametrize("bad, etype", _causes_errors)
def test_errors(bad, etype):
with pytest.raises(etype):
fstr(bad).format()
|
[
"pytest.mark.parametrize",
"pytest.raises",
"fstr"
] |
[((2072, 2111), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""brace"""', '"""])}"""'], {}), "('brace', '])}')\n", (2095, 2111), False, 'import pytest\n'), ((3433, 3518), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""template"""', '_format_specifier_width_precision_templates'], {}), "('template', _format_specifier_width_precision_templates\n )\n", (3456, 3518), False, 'import pytest\n'), ((3935, 4010), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value, template"""', '_format_hex_specifier_templates'], {}), "('value, template', _format_hex_specifier_templates)\n", (3958, 4010), False, 'import pytest\n'), ((4287, 4359), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""template"""', '_invalid_format_specifier_templates'], {}), "('template', _invalid_format_specifier_templates)\n", (4310, 4359), False, 'import pytest\n'), ((5269, 5330), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""template"""', '_bad_missing_expressions'], {}), "('template', _bad_missing_expressions)\n", (5292, 5330), False, 'import pytest\n'), ((5507, 5570), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""template"""', '_bad_parens_in_expressions'], {}), "('template', _bad_parens_in_expressions)\n", (5530, 5570), False, 'import pytest\n'), ((7042, 7115), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""template, expected"""', '_backlashes_in_string_part'], {}), "('template, expected', _backlashes_in_string_part)\n", (7065, 7115), False, 'import pytest\n'), ((7293, 7356), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""template"""', '_backslashes_in_expression'], {}), "('template', _backslashes_in_expression)\n", (7316, 7356), False, 'import pytest\n'), ((7792, 7863), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""template, expected"""', '_empty_format_specifiers'], {}), "('template, expected', _empty_format_specifiers)\n", (7815, 7863), False, 'import pytest\n'), ((8257, 8316), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""template"""', '_bad_mismatched_braces'], {}), "('template', _bad_mismatched_braces)\n", (8280, 8316), False, 'import pytest\n'), ((8490, 8558), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""template, expected"""', '_ok_mismatched_braces'], {}), "('template, expected', _ok_mismatched_braces)\n", (8513, 8558), False, 'import pytest\n'), ((8823, 8881), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""template, expected"""', '_ok_lambdas'], {}), "('template, expected', _ok_lambdas)\n", (8846, 8881), False, 'import pytest\n'), ((9338, 9407), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""template, expected"""', '_triple_quoted_strings'], {}), "('template, expected', _triple_quoted_strings)\n", (9361, 9407), False, 'import pytest\n'), ((11989, 12045), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""invalid"""', '_invalid_expressions'], {}), "('invalid', _invalid_expressions)\n", (12012, 12045), False, 'import pytest\n'), ((12540, 12593), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bad, etype"""', '_causes_errors'], {}), "('bad, etype', _causes_errors)\n", (12563, 12593), False, 'import pytest\n'), ((91, 123), 'fstr', 'fstr', (['"""{x} + {y} = {x + y}"""'], {'x': '(1)'}), "('{x} + {y} = {x + y}', x=1)\n", (95, 123), False, 'import fstr\n'), ((269, 309), 'fstr', 'fstr', (['"""{x!r} + {y!r} = {x + y!r}"""'], {'x': '"""a"""'}), "('{x!r} + {y!r} = {x + y!r}', x='a')\n", (273, 309), False, 'import fstr\n'), ((625, 644), 'fstr', 'fstr', (['"""{x:{width}}"""'], {}), "('{x:{width}}')\n", (629, 644), False, 'import fstr\n'), ((763, 794), 'fstr', 'fstr', (['"""{x:{width}.{precision}}"""'], {}), "('{x:{width}.{precision}}')\n", (767, 794), False, 'import fstr\n'), ((1265, 1293), 'fstr', 'fstr', (['"""{add(x, y)}"""'], {'add': 'add'}), "('{add(x, y)}', add=add)\n", (1269, 1293), False, 'import fstr\n'), ((1397, 1409), 'fstr', 'fstr', (['"""{{}}"""'], {}), "('{{}}')\n", (1401, 1409), False, 'import fstr\n'), ((1505, 1520), 'fstr', 'fstr', (['"""{{{x}}}"""'], {}), "('{{{x}}}')\n", (1509, 1520), False, 'import fstr\n'), ((1800, 1825), 'fstr', 'fstr', (['"""{ {x: y} }"""'], {'x': '"""a"""'}), "('{ {x: y} }', x='a')\n", (1804, 1825), False, 'import fstr\n'), ((2154, 2180), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {}), '(SyntaxError)\n', (2167, 2180), False, 'import pytest\n'), ((2340, 2366), 'fstr', 'fstr', (["(('{x} ' + extra) * n)"], {}), "(('{x} ' + extra) * n)\n", (2344, 2366), False, 'import fstr\n'), ((4426, 4452), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {}), '(SyntaxError)\n', (4439, 4452), False, 'import pytest\n'), ((5379, 5405), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {}), '(SyntaxError)\n', (5392, 5405), False, 'import pytest\n'), ((5626, 5652), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {}), '(SyntaxError)\n', (5639, 5652), False, 'import pytest\n'), ((7420, 7446), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {}), '(SyntaxError)\n', (7433, 7446), False, 'import pytest\n'), ((8368, 8394), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {}), '(SyntaxError)\n', (8381, 8394), False, 'import pytest\n'), ((9564, 9588), 'pytest.raises', 'pytest.raises', (['NameError'], {}), '(NameError)\n', (9577, 9588), False, 'import pytest\n'), ((12094, 12120), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {}), '(SyntaxError)\n', (12107, 12120), False, 'import pytest\n'), ((12632, 12652), 'pytest.raises', 'pytest.raises', (['etype'], {}), '(etype)\n', (12645, 12652), False, 'import pytest\n'), ((11881, 11907), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {}), '(SyntaxError)\n', (11894, 11907), False, 'import pytest\n'), ((514, 543), 'fstr', 'fstr', (['"""{_A_GLOBAL} {a_local}"""'], {}), "('{_A_GLOBAL} {a_local}')\n", (518, 543), False, 'import fstr\n'), ((959, 976), 'fstr', 'fstr', (['"""{d["\'"]}"""'], {}), '(\'{d["\\\'"]}\')\n', (963, 976), False, 'import fstr\n'), ((1021, 1039), 'fstr', 'fstr', (['"""{d[\'"\']}"""'], {}), '(\'{d[\\\'"\\\']}\')\n', (1025, 1039), False, 'import fstr\n'), ((1083, 1101), 'fstr', 'fstr', (['"""{d["foo"]}"""'], {}), '(\'{d["foo"]}\')\n', (1087, 1101), False, 'import fstr\n'), ((1134, 1152), 'fstr', 'fstr', (['"""{d[\'foo\']}"""'], {}), '("{d[\'foo\']}")\n', (1138, 1152), False, 'import fstr\n'), ((1614, 1632), 'fstr', 'fstr', (['"""{ 1 + 2}"""'], {}), "('{ 1 + 2}')\n", (1618, 1632), False, 'import fstr\n'), ((1660, 1678), 'fstr', 'fstr', (['"""{1 + 2 }"""'], {}), "('{1 + 2 }')\n", (1664, 1678), False, 'import fstr\n'), ((1706, 1727), 'fstr', 'fstr', (['"""{ 1 + 2 }"""'], {}), "('{ 1 + 2 }')\n", (1710, 1727), False, 'import fstr\n'), ((1989, 2002), 'fstr', 'fstr', (['"""{\'#\'}"""'], {}), '("{\'#\'}")\n', (1993, 2002), False, 'import fstr\n'), ((2030, 2046), 'fstr', 'fstr', (['"""{d[\'#\']}"""'], {}), '("{d[\'#\']}")\n', (2034, 2046), False, 'import fstr\n'), ((2190, 2210), 'fstr', 'fstr', (["('{%s}' % brace)"], {}), "('{%s}' % brace)\n", (2194, 2210), False, 'import fstr\n'), ((2822, 2853), 'fstr', 'fstr', (['("{1} {\'x\'} {\'y\'} " * 1000)'], {}), '("{1} {\'x\'} {\'y\'} " * 1000)\n', (2826, 2853), False, 'import fstr\n'), ((3653, 3667), 'fstr', 'fstr', (['template'], {}), '(template)\n', (3657, 3667), False, 'import fstr\n'), ((4141, 4155), 'fstr', 'fstr', (['template'], {}), '(template)\n', (4145, 4155), False, 'import fstr\n'), ((4462, 4476), 'fstr', 'fstr', (['template'], {}), '(template)\n', (4466, 4476), False, 'import fstr\n'), ((4679, 4694), 'fstr', 'fstr', (['"""{x} {x}"""'], {}), "('{x} {x}')\n", (4683, 4694), False, 'import fstr\n'), ((5415, 5429), 'fstr', 'fstr', (['template'], {}), '(template)\n', (5419, 5429), False, 'import fstr\n'), ((5662, 5676), 'fstr', 'fstr', (['template'], {}), '(template)\n', (5666, 5676), False, 'import fstr\n'), ((7184, 7198), 'fstr', 'fstr', (['template'], {}), '(template)\n', (7188, 7198), False, 'import fstr\n'), ((7456, 7470), 'fstr', 'fstr', (['template'], {}), '(template)\n', (7460, 7470), False, 'import fstr\n'), ((7529, 7540), 'fstr', 'fstr', (['"""{0}"""'], {}), "('{0}')\n", (7533, 7540), False, 'import fstr\n'), ((7578, 7593), 'fstr', 'fstr', (['"""{3+\n4}"""'], {}), "('{3+\\n4}')\n", (7582, 7593), False, 'import fstr\n'), ((7928, 7942), 'fstr', 'fstr', (['template'], {}), '(template)\n', (7932, 7942), False, 'import fstr\n'), ((8404, 8418), 'fstr', 'fstr', (['template'], {}), '(template)\n', (8408, 8418), False, 'import fstr\n'), ((8622, 8636), 'fstr', 'fstr', (['template'], {}), '(template)\n', (8626, 8636), False, 'import fstr\n'), ((8930, 8949), 'fstr', 'fstr', (['template'], {'x': '(5)'}), '(template, x=5)\n', (8934, 8949), False, 'import fstr\n'), ((9488, 9502), 'fstr', 'fstr', (['template'], {}), '(template)\n', (9492, 9502), False, 'import fstr\n'), ((9598, 9615), 'fstr', 'fstr', (['"""v:{value}"""'], {}), "('v:{value}')\n", (9602, 9615), False, 'import fstr\n'), ((9798, 9815), 'fstr', 'fstr', (['"""{Obj():x}"""'], {}), "('{Obj():x}')\n", (9802, 9815), False, 'import fstr\n'), ((9850, 9865), 'fstr', 'fstr', (['"""{Obj()}"""'], {}), "('{Obj()}')\n", (9854, 9865), False, 'import fstr\n'), ((9900, 9916), 'fstr', 'fstr', (['"""{Obj():}"""'], {}), "('{Obj():}')\n", (9904, 9916), False, 'import fstr\n'), ((9952, 9964), 'fstr', 'fstr', (['"""{3:}"""'], {}), "('{3:}')\n", (9956, 9964), False, 'import fstr\n'), ((9992, 10006), 'fstr', 'fstr', (['"""{3!s:}"""'], {}), "('{3!s:}')\n", (9996, 10006), False, 'import fstr\n'), ((10099, 10116), 'fstr', 'fstr', (['"""{foo(10)}"""'], {}), "('{foo(10)}')\n", (10103, 10116), False, 'import fstr\n'), ((10192, 10204), 'fstr', 'fstr', (['"""{ 3}"""'], {}), "('{ 3}')\n", (10196, 10204), False, 'import fstr\n'), ((10232, 10245), 'fstr', 'fstr', (['"""{ 3}"""'], {}), "('{ 3}')\n", (10236, 10245), False, 'import fstr\n'), ((10273, 10285), 'fstr', 'fstr', (['"""{3 }"""'], {}), "('{3 }')\n", (10277, 10285), False, 'import fstr\n'), ((10313, 10326), 'fstr', 'fstr', (['"""{3 }"""'], {}), "('{3 }')\n", (10317, 10326), False, 'import fstr\n'), ((10355, 10401), 'fstr', 'fstr', (['"""expr={ {x: y for x, y in [(1, 2), ]} }"""'], {}), "('expr={ {x: y for x, y in [(1, 2), ]} }')\n", (10359, 10401), False, 'import fstr\n'), ((10439, 10484), 'fstr', 'fstr', (['"""expr={ {x: y for x, y in [(1, 2), ]}}"""'], {}), "('expr={ {x: y for x, y in [(1, 2), ]}}')\n", (10443, 10484), False, 'import fstr\n'), ((10742, 10756), 'fstr', 'fstr', (['"""{3!=4}"""'], {}), "('{3!=4}')\n", (10746, 10756), False, 'import fstr\n'), ((10787, 10802), 'fstr', 'fstr', (['"""{3!=4:}"""'], {}), "('{3!=4:}')\n", (10791, 10802), False, 'import fstr\n'), ((10833, 10849), 'fstr', 'fstr', (['"""{3!=4!s}"""'], {}), "('{3!=4!s}')\n", (10837, 10849), False, 'import fstr\n'), ((10880, 10899), 'fstr', 'fstr', (['"""{3!=4!s:.3}"""'], {}), "('{3!=4!s:.3}')\n", (10884, 10899), False, 'import fstr\n'), ((10955, 10975), 'fstr', 'fstr', (['"""{3.14:10.10}"""'], {}), "('{3.14:10.10}')\n", (10959, 10975), False, 'import fstr\n'), ((11012, 11034), 'fstr', 'fstr', (['"""{3.14!s:10.10}"""'], {}), "('{3.14!s:10.10}')\n", (11016, 11034), False, 'import fstr\n'), ((11071, 11093), 'fstr', 'fstr', (['"""{3.14!r:10.10}"""'], {}), "('{3.14!r:10.10}')\n", (11075, 11093), False, 'import fstr\n'), ((11225, 11238), 'fstr', 'fstr', (['"""{"a"}"""'], {}), '(\'{"a"}\')\n', (11229, 11238), False, 'import fstr\n'), ((11266, 11281), 'fstr', 'fstr', (['"""{"a"!r}"""'], {}), '(\'{"a"!r}\')\n', (11270, 11281), False, 'import fstr\n'), ((11416, 11431), 'fstr', 'fstr', (['"""{"a!r"}"""'], {}), '(\'{"a!r"}\')\n', (11420, 11431), False, 'import fstr\n'), ((11531, 11553), 'fstr', 'fstr', (['"""{3.14:!<10.10}"""'], {}), "('{3.14:!<10.10}')\n", (11535, 11553), False, 'import fstr\n'), ((12130, 12143), 'fstr', 'fstr', (['invalid'], {}), '(invalid)\n', (12134, 12143), False, 'import fstr\n'), ((12662, 12671), 'fstr', 'fstr', (['bad'], {}), '(bad)\n', (12666, 12671), False, 'import fstr\n'), ((11165, 11187), 'fstr', 'fstr', (['"""{3.14!a:10.10}"""'], {}), "('{3.14!a:10.10}')\n", (11169, 11187), False, 'import fstr\n'), ((11346, 11361), 'fstr', 'fstr', (['"""{"a"!a}"""'], {}), '(\'{"a"!a}\')\n', (11350, 11361), False, 'import fstr\n'), ((11921, 11930), 'fstr', 'fstr', (['bad'], {}), '(bad)\n', (11925, 11930), False, 'import fstr\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
from builders import deb, rpm, amazon
from builders.util import shell_call
__author__ = "<NAME>"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
if __name__ == '__main__':
package = 'nginx-amplify-agent' if len(sys.argv) == 1 else sys.argv[1]
if os.path.isfile('/etc/debian_version'):
deb.build(package=package)
elif os.path.isfile('/etc/redhat-release'):
rpm.build(package=package)
else:
os_release = shell_call('cat /etc/os-release', important=False)
if 'amazon linux' in os_release.lower():
amazon.build(package=package)
else:
print("sorry, it will be done later\n")
|
[
"builders.amazon.build",
"builders.deb.build",
"builders.util.shell_call",
"os.path.isfile",
"builders.rpm.build"
] |
[((404, 441), 'os.path.isfile', 'os.path.isfile', (['"""/etc/debian_version"""'], {}), "('/etc/debian_version')\n", (418, 441), False, 'import os\n'), ((451, 477), 'builders.deb.build', 'deb.build', ([], {'package': 'package'}), '(package=package)\n', (460, 477), False, 'from builders import deb, rpm, amazon\n'), ((487, 524), 'os.path.isfile', 'os.path.isfile', (['"""/etc/redhat-release"""'], {}), "('/etc/redhat-release')\n", (501, 524), False, 'import os\n'), ((534, 560), 'builders.rpm.build', 'rpm.build', ([], {'package': 'package'}), '(package=package)\n', (543, 560), False, 'from builders import deb, rpm, amazon\n'), ((592, 642), 'builders.util.shell_call', 'shell_call', (['"""cat /etc/os-release"""'], {'important': '(False)'}), "('cat /etc/os-release', important=False)\n", (602, 642), False, 'from builders.util import shell_call\n'), ((705, 734), 'builders.amazon.build', 'amazon.build', ([], {'package': 'package'}), '(package=package)\n', (717, 734), False, 'from builders import deb, rpm, amazon\n')]
|
# Generated by Django 3.0.8 on 2020-12-19 21:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('category', '0001_initial'),
('product', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='category.Category'),
),
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='images/'),
),
migrations.AlterField(
model_name='product',
name='is_active',
field=models.BooleanField(blank=True, default=True),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.ImageField"
] |
[((397, 512), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""category.Category"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='category.Category')\n", (414, 512), False, 'from django.db import migrations, models\n'), ((629, 690), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': '"""images/"""'}), "(blank=True, null=True, upload_to='images/')\n", (646, 690), False, 'from django.db import migrations, models\n'), ((816, 861), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'blank': '(True)', 'default': '(True)'}), '(blank=True, default=True)\n', (835, 861), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
from setuptools import find_packages, setup
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding="utf-8").read()
setup(
name="pardal",
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
license="MIT",
url="https://github.com/anapaulagomes/pardal",
description="An accessible and customizable Twitter client",
packages=find_packages(exclude=["tests", "docs"]),
python_requires=">=3.7",
install_requires=[""], # FIXME
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: End Users/Desktop",
"Topic :: Adaptive Technologies",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
],
entry_points={},
)
|
[
"os.path.dirname",
"codecs.open",
"setuptools.find_packages"
] |
[((164, 189), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (179, 189), False, 'import os\n'), ((560, 600), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests', 'docs']"}), "(exclude=['tests', 'docs'])\n", (573, 600), False, 'from setuptools import find_packages, setup\n'), ((209, 249), 'codecs.open', 'codecs.open', (['file_path'], {'encoding': '"""utf-8"""'}), "(file_path, encoding='utf-8')\n", (220, 249), False, 'import codecs\n')]
|
"""Create dataframe and check the quality
This script downloads a dataset from Seattle Open Data Portal and imports
as a Pandas Dataframe.
This tool checks if the dataframe:
1. Has at least 10 rows of data
2. Contains only the columns that specified as the second argument
3. Values in each column have the same python type
This script requires that `pandas` be installed within the Python
environment you are running this script in.
This file can also be imported as a module and contains the following
functions:
* test_column_names - returns bool if the column name match
* test_nan_values - returns bool if the dataframe has nan value
* test_least_row_counts - returns bool if the dataframe has at least one row of data
* main - the main function of the script
"""
import pandas as pd
DATAFRAMES = pd.read_csv(
'https://data.seattle.gov/api/views/tw7j-df_importaw/rows.csv?accessType=DOWNLOAD')
def test_datatype(df_import):
"""Test if all columns have values of the correct type
Parameters
----------
df_import : Pandas Dataframe
The dataset imported as Pandas Dataframe
Returns
-------
bool
a bool value: True if the datatype of each column match
"""
columns = list(df_import)
for name in columns:
try:
tp_name = (
isinstance(
df_import[name].iloc[1].item(),
df_import[name].map(type))).any().tolist()
except AttributeError:
tp_name = (
isinstance(
df_import[name].iloc[1],
df_import[name].map(type))).any().tolist()
return tp_name
def test_column_names(df_import):
"""Test if the dataframe has expected columns
Parameters
----------
df_import : Pandas Dataframe
The dataset imported as Pandas Dataframe
Returns
-------
bool
a bool value: True if the dataframe has expected columns
"""
df_import_columns = sorted(df_import.columns.tolist())
df_import_checklist = ['trip_id',
'starttime',
'stoptime',
'bikeid',
'tripduration',
'from_station_name',
'to_station_name',
'from_station_id',
'to_station_id',
'usertype',
'gender',
'birthyear']
if df_import_columns == sorted(df_import_checklist):
return True
def test_nan_values(df_import):
"""Test if the dataframe has non value
Parameters
----------
df_import : Pandas Dataframe
The dataset imported as Pandas Dataframe
Returns
-------
bool
a bool value: True if the dataframe has non value
"""
return df_import.notnull().values.any()
def test_least_row_counts(df_import):
"""Test if the dataframe has at least one row of data
Parameters
----------
df_import : Pandas Dataframe
The dataset imported as Pandas Dataframe
Returns
-------
bool
a bool value: True if the dataframe has at least one row of data
"""
return df_import.shape[0] >= 1
if __name__ == '__main__':
"""Main function
Returns
-------
bool
a bool value if the dataframe pass all the tests
"""
DATAFRAME = pd.read_csv(
'https://data.seattle.gov/api/views/tw7j-df_importaw/rows.csv?accessType=DOWNLOAD')
# only fetch first 10 rows for testing
DATAFRAME = DATAFRAME.head(10)
print(test_column_names(DATAFRAME) & test_datatype(DATAFRAME) &
test_least_row_counts(DATAFRAME) & test_nan_values(DATAFRAME))
|
[
"pandas.read_csv"
] |
[((824, 929), 'pandas.read_csv', 'pd.read_csv', (['"""https://data.seattle.gov/api/views/tw7j-df_importaw/rows.csv?accessType=DOWNLOAD"""'], {}), "(\n 'https://data.seattle.gov/api/views/tw7j-df_importaw/rows.csv?accessType=DOWNLOAD'\n )\n", (835, 929), True, 'import pandas as pd\n'), ((3476, 3581), 'pandas.read_csv', 'pd.read_csv', (['"""https://data.seattle.gov/api/views/tw7j-df_importaw/rows.csv?accessType=DOWNLOAD"""'], {}), "(\n 'https://data.seattle.gov/api/views/tw7j-df_importaw/rows.csv?accessType=DOWNLOAD'\n )\n", (3487, 3581), True, 'import pandas as pd\n')]
|
import unittest
import zserio
from testutils import getZserioApi
class StructureConstraintsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "constraints.zs").structure_constraints
def testReadCorrectColors(self):
writer = zserio.BitStreamWriter()
self.__class__._write(writer, self.api.BasicColor.BLACK, self.api.BasicColor.WHITE,
self.api.ExtendedColor.PURPLE)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
structureConstraints = self.api.StructureConstraints()
structureConstraints.read(reader)
self.assertEqual(self.api.BasicColor.BLACK, structureConstraints.black_color)
self.assertEqual(self.api.BasicColor.WHITE, structureConstraints.white_color)
self.assertEqual(self.api.ExtendedColor.PURPLE, structureConstraints.purple_color)
def testReadWrongBlackConstraint(self):
writer = zserio.BitStreamWriter()
self.__class__._write(writer, self.api.BasicColor.RED, self.api.BasicColor.WHITE,
self.api.ExtendedColor.PURPLE)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
structureConstraints = self.api.StructureConstraints()
with self.assertRaises(zserio.PythonRuntimeException):
structureConstraints.read(reader)
def testReadWrongWhiteConstraint(self):
writer = zserio.BitStreamWriter()
self.__class__._write(writer, self.api.BasicColor.BLACK, self.api.BasicColor.RED,
self.api.ExtendedColor.PURPLE)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
structureConstraints = self.api.StructureConstraints()
with self.assertRaises(zserio.PythonRuntimeException):
structureConstraints.read(reader)
def testReadWrongPurpleConstraint(self):
writer = zserio.BitStreamWriter()
self.__class__._write(writer, self.api.BasicColor.BLACK, self.api.BasicColor.WHITE,
self.api.ExtendedColor.LIME)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
structureConstraints = self.api.StructureConstraints()
with self.assertRaises(zserio.PythonRuntimeException):
structureConstraints.read(reader)
def testWriteCorrectConstraints(self):
structureConstraints = self.api.StructureConstraints(self.api.BasicColor.BLACK,
self.api.BasicColor.WHITE,
True,
self.api.ExtendedColor.PURPLE)
bitBuffer = zserio.serialize(structureConstraints)
readStructureConstraints = zserio.deserialize(self.api.StructureConstraints, bitBuffer)
self.assertEqual(self.api.BasicColor.BLACK, readStructureConstraints.black_color)
self.assertEqual(self.api.BasicColor.WHITE, readStructureConstraints.white_color)
self.assertEqual(self.api.ExtendedColor.PURPLE, readStructureConstraints.purple_color)
self.assertEqual(structureConstraints, readStructureConstraints)
def testWriteWrongBlackConstraint(self):
structureConstraints = self.api.StructureConstraints(self.api.BasicColor.RED,
self.api.BasicColor.WHITE,
True,
self.api.ExtendedColor.PURPLE)
writer = zserio.BitStreamWriter()
with self.assertRaises(zserio.PythonRuntimeException):
structureConstraints.write(writer)
def testWriteWrongWhiteConstraint(self):
structureConstraints = self.api.StructureConstraints(self.api.BasicColor.BLACK,
self.api.BasicColor.RED,
True,
self.api.ExtendedColor.PURPLE)
writer = zserio.BitStreamWriter()
with self.assertRaises(zserio.PythonRuntimeException):
structureConstraints.write(writer)
def testWriteWrongPurpleConstraint(self):
structureConstraints = self.api.StructureConstraints(self.api.BasicColor.BLACK,
self.api.BasicColor.WHITE,
True,
self.api.ExtendedColor.LIME)
writer = zserio.BitStreamWriter()
with self.assertRaises(zserio.PythonRuntimeException):
structureConstraints.write(writer)
@staticmethod
def _write(writer, blackColor, whiteColor, purpleColor):
writer.write_bits(blackColor.value, 8)
writer.write_bool(True)
writer.write_bits(whiteColor.value, 8)
writer.write_bool(True)
writer.write_bits(purpleColor.value, 16)
|
[
"zserio.serialize",
"zserio.deserialize",
"zserio.BitStreamReader",
"testutils.getZserioApi",
"zserio.BitStreamWriter"
] |
[((296, 320), 'zserio.BitStreamWriter', 'zserio.BitStreamWriter', ([], {}), '()\n', (318, 320), False, 'import zserio\n'), ((491, 552), 'zserio.BitStreamReader', 'zserio.BitStreamReader', (['writer.byte_array', 'writer.bitposition'], {}), '(writer.byte_array, writer.bitposition)\n', (513, 552), False, 'import zserio\n'), ((984, 1008), 'zserio.BitStreamWriter', 'zserio.BitStreamWriter', ([], {}), '()\n', (1006, 1008), False, 'import zserio\n'), ((1177, 1238), 'zserio.BitStreamReader', 'zserio.BitStreamReader', (['writer.byte_array', 'writer.bitposition'], {}), '(writer.byte_array, writer.bitposition)\n', (1199, 1238), False, 'import zserio\n'), ((1474, 1498), 'zserio.BitStreamWriter', 'zserio.BitStreamWriter', ([], {}), '()\n', (1496, 1498), False, 'import zserio\n'), ((1667, 1728), 'zserio.BitStreamReader', 'zserio.BitStreamReader', (['writer.byte_array', 'writer.bitposition'], {}), '(writer.byte_array, writer.bitposition)\n', (1689, 1728), False, 'import zserio\n'), ((1965, 1989), 'zserio.BitStreamWriter', 'zserio.BitStreamWriter', ([], {}), '()\n', (1987, 1989), False, 'import zserio\n'), ((2158, 2219), 'zserio.BitStreamReader', 'zserio.BitStreamReader', (['writer.byte_array', 'writer.bitposition'], {}), '(writer.byte_array, writer.bitposition)\n', (2180, 2219), False, 'import zserio\n'), ((2792, 2830), 'zserio.serialize', 'zserio.serialize', (['structureConstraints'], {}), '(structureConstraints)\n', (2808, 2830), False, 'import zserio\n'), ((2866, 2926), 'zserio.deserialize', 'zserio.deserialize', (['self.api.StructureConstraints', 'bitBuffer'], {}), '(self.api.StructureConstraints, bitBuffer)\n', (2884, 2926), False, 'import zserio\n'), ((3671, 3695), 'zserio.BitStreamWriter', 'zserio.BitStreamWriter', ([], {}), '()\n', (3693, 3695), False, 'import zserio\n'), ((4202, 4226), 'zserio.BitStreamWriter', 'zserio.BitStreamWriter', ([], {}), '()\n', (4224, 4226), False, 'import zserio\n'), ((4734, 4758), 'zserio.BitStreamWriter', 'zserio.BitStreamWriter', ([], {}), '()\n', (4756, 4758), False, 'import zserio\n'), ((178, 218), 'testutils.getZserioApi', 'getZserioApi', (['__file__', '"""constraints.zs"""'], {}), "(__file__, 'constraints.zs')\n", (190, 218), False, 'from testutils import getZserioApi\n')]
|
import yaml
import pandas as pd
from more_itertools import flatten
import os
os.chdir('notebook')
fbgn2symbol = (
pd.read_feather('../references/gene_annotation_dmel_r6-26.feather', columns=['FBgn', 'gene_symbol'])
.set_index('FBgn')
.to_dict()['gene_symbol']
)
config = yaml.safe_load(open('../config/common.yaml'))
CLUSTER_ANNOT = config['cluster_annot']
CLUSTER_ORDER = config['cluster_order']
lit_genes = yaml.safe_load(open('../config/literature_genes.yaml'))
lit_genes_all = list(flatten([
v
for k, v in lit_genes.items()
]))
bm = (
pd.read_feather('../output/seurat3-cluster-wf/combined_n3_biomarkers.feather', columns=['FBgn', 'gene_symbol', 'cluster', 'p_val_adj', 'pct.1'])
.query('p_val_adj <= 0.01')
.drop_duplicates(subset='FBgn', keep=False)
.reset_index(drop=True)
.assign(cluster=lambda df: df.cluster.cat.rename_categories(CLUSTER_ANNOT))
.assign(cluster=lambda df: df.cluster.cat.reorder_categories(CLUSTER_ORDER))
.set_index('FBgn')
.groupby('cluster')
)
def get_lit(cluster):
print(cluster)
df = bm.get_group(cluster)
return df.reindex(lit_genes_all).dropna()
get_lit('SP')
get_lit('EPS')
get_lit('PS1')
get_lit('PS2')
get_lit('PS3')
get_lit('ECY')
get_lit("CY1")
get_lit("CY2")
get_lit("TE")
get_lit("PC")
|
[
"pandas.read_feather",
"os.chdir"
] |
[((78, 98), 'os.chdir', 'os.chdir', (['"""notebook"""'], {}), "('notebook')\n", (86, 98), False, 'import os\n'), ((120, 225), 'pandas.read_feather', 'pd.read_feather', (['"""../references/gene_annotation_dmel_r6-26.feather"""'], {'columns': "['FBgn', 'gene_symbol']"}), "('../references/gene_annotation_dmel_r6-26.feather', columns\n =['FBgn', 'gene_symbol'])\n", (135, 225), True, 'import pandas as pd\n'), ((569, 717), 'pandas.read_feather', 'pd.read_feather', (['"""../output/seurat3-cluster-wf/combined_n3_biomarkers.feather"""'], {'columns': "['FBgn', 'gene_symbol', 'cluster', 'p_val_adj', 'pct.1']"}), "('../output/seurat3-cluster-wf/combined_n3_biomarkers.feather',\n columns=['FBgn', 'gene_symbol', 'cluster', 'p_val_adj', 'pct.1'])\n", (584, 717), True, 'import pandas as pd\n')]
|
"""
<NAME>
json from yr. Location Remmen, Halden: lat: 59.1304, lon: 11.3546, altitude: ca. 80
https://api.met.no/weatherapi/locationforecast/2.0/#!/data/get_compact_format
request api: https://api.met.no/weatherapi/locationforecast/2.0/compact?altitude=80&lat=63.4305&lon=10.3950
curl: curl -X GET --header 'Accept: application/json' 'https://api.met.no/weatherapi/locationforecast/2.0/compact?altitude=80&lat=59.1304&lon=11.3545'
"""
import requests # api module
import json #Save as Json
#Remmen location og altitude:
lat = "59.1304"
lon = "11.3546"
alt = "80"
# url to yr api
url = "https://api.met.no/weatherapi/locationforecast/2.0/complete.json?altitude=" + alt + "&lat=" + lat + "&lon=" + lon
# Header to tell yr where the request is coming from.
# NB! Find your user-agent and put the feeld
headers = {
"Content-type": "application/json",
"Cache-Control": "no-cache",
"user-agent": "Put your user-agent here"
}
# get the json api
response = requests.request("GET", url, headers=headers)
if response:
print('Success!')
else:
print('An error has occurred.')
data = response.json()
def write_json_file():
""" Save data as json file """
with open('yr_data_complete_format.json', 'w') as f:
json.dump(data, f)
write_json_file()
# TODO! If-Modified-Since
def updated_time():
""" Time updated at yr """
updated = (data["properties"]["meta"]["updated_at"])
return updated
#print(data["properties"]["timeseries"][0]["data"]["instant"]["details"])
def air_temperature():
""" Return the instant air temperature in celsius """
air_temp = (data["properties"]["timeseries"][0]["data"]["instant"]["details"]["air_temperature"])
return air_temp
def wind_speed():
""" Wind speed in m/s """
wind_speed = (data["properties"]["timeseries"][0]["data"]["instant"]["details"]["wind_speed"])
return wind_speed
# Precentage value of the total cloud cover at all heights
cloud_area = (data["properties"]["timeseries"][0]["data"]["instant"]["details"]["cloud_area_fraction"])
rel_humidity = (data["properties"]["timeseries"][0]["data"]["instant"]["details"]["relative_humidity"])
def summary_1_hour():
""" String value giving a summary for +1 hour """
summary_1_hour = (data["properties"]["timeseries"][0]["data"]["next_1_hours"]["summary"]["symbol_code"])
return summary_1_hour
def precipitation_1_hour():
""" Precipitation for +1 hour in mm """
precipitation_1_hour = (data["properties"]["timeseries"][0]["data"]["next_1_hours"]["details"]["precipitation_amount"])
return precipitation_1_hour
def wind_direction():
""" Return the wind from direction """
wind_from_direction = (data["properties"]["timeseries"][0]["data"]["instant"]["details"]["wind_from_direction"])
if wind_from_direction > 326.25 or wind_from_direction < 11.25:
print("Nord")
return "North"
elif wind_from_direction < 56.25:
print("Nordøst")
return "Northeast"
elif wind_from_direction < 101.25:
print("Øst")
return "East"
elif wind_from_direction < 146.25:
print("Sørøst")
return "Southeast"
elif wind_from_direction < 191.25:
print("Sør")
return "South"
elif wind_from_direction < 236.25:
print("Sørvest")
return "Southwest"
elif wind_from_direction < 281.25:
print("Vest")
return "West"
elif wind_from_direction < 326.25:
print("Nordvest")
return "Northwest"
#print(wind_direction())
|
[
"json.dump",
"requests.request"
] |
[((987, 1032), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'headers': 'headers'}), "('GET', url, headers=headers)\n", (1003, 1032), False, 'import requests\n'), ((1257, 1275), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (1266, 1275), False, 'import json\n')]
|
import re
from glom import glom
import json
from jsonschema import validate
from jsonschema import ValidationError
from jsonpath_ng import parse
class HarVerifications:
def __init__(self, har):
self.har = har
def rmatch(self, val, str_rxp):
if val is None:
return False
if isinstance(val, bytes):
val = str(val, "utf-8")
return re.search(str_rxp, val, flags=re.IGNORECASE)
def rmatch_any(self, items, rxp):
if items is None or len(items) == 0 or rxp is None:
return False
for item in items:
if isinstance(item, bytes):
item = str(item, "utf-8")
if self.rmatch(item, rxp):
return True
return False
def rmatch_key_val(self, items, kv):
name = True
value = True
if items is None or len(items) == 0 or kv is None:
return False
for item in items:
if 'name' in kv:
name = self.rmatch(item['name'], kv['name'])
if 'value'in kv:
value = self.rmatch(kv['value'], item['value'])
if name and value:
return True
return False
def schema_validate(self, item, schema):
try:
if type(item) == str:
item = json.loads(item)
validate(instance=item, schema=schema)
except (ValidationError, ValueError) as e:
return False
return True
def valid_json(self, item):
if item is None:
return False
try:
json.loads(item)
except ValueError as e:
return False
return True
def has_json_path(self, json_str, json_path):
if self.valid_json(json_str) == False:
return False
jsonpath_expr = parse(json_path)
matches = jsonpath_expr.find(json.loads(json_str))
return len(matches) > 0
def current_page(self):
return self.har['log']['pages'][-1]['id']
# Use glom to dig into the har entries, responses and websockets to get down to an array of something or others
# (headers, websocket messages, content then we execute our test against that item
# current, *, or filter
def entries(self, criteria=False):
entry_list = self.har['log']['entries']
har_entry_filters = {
'page': (lambda item, pgref: glom(item, 'pageref', default='') == pgref ),
'status': (lambda item, status: self.rmatch(str(glom(item, 'response.status', default=None)), status)),
'url': (lambda item, url_rxp: self.rmatch(str(glom(item, 'request.url', default=None)), url_rxp)),
'content': (lambda item, content_rxp: self.rmatch(str(glom(item, 'response.content.text', default=None)), content_rxp)),
'content_type': (lambda item, content_type_rxp: self.rmatch(str(glom(item, 'response.content.mimeType', default=None)), content_type_rxp)),
'request_header': (lambda item, match_rgxp: self.rmatch_key_val(glom(item, 'request.headers',default=[]), match_rgxp)),
'response_header': (lambda item, match_rgxp: self.rmatch_key_val(glom(item, 'response.headers', default=[]), match_rgxp)),
'request_cookie': (lambda item, match_rgxp: self.rmatch_key_val(glom(item, 'request.cookies', default=[]), match_rgxp)),
'response_cookie': (lambda item, match_rgxp: self.rmatch_key_val(glom(item, 'response.cookies', default=[]), match_rgxp)),
'websocket_message': (lambda item, ws_rxp: self.rmatch_any(glom(item, ('_webSocketMessages', ['data']), default=[]), ws_rxp)),
'json_valid': (lambda item, _: self.valid_json(str(glom(item, 'response.content.text', default=None)))),
'json_path': (lambda item, path: self.has_json_path(str(glom(item, 'response.content.text', default=None)), path)),
'json_schema': (lambda item, schema: self.schema_validate(str(glom(item, 'response.content.text', default=None)),schema)),
}
for filter_name, target_value in criteria.items():
filter_lambda = har_entry_filters[filter_name]
if filter_name == 'page' and target_value == 'current':
target_value = self.current_page()
entry_list = [entry for entry in entry_list if filter_lambda(entry, target_value)]
return entry_list
def gsize(self, item, path):
return self.not_neg(glom(item, 'request.headersSize', default=0))
def not_neg(self, val):
val = int(val)
return 0 if val == -1 or val is None else val
def measure(self, items, measurement):
measurements = {
'request_headers': (lambda item: self.gsize(item, 'request.headersSize')),
'response_headers': (lambda item: self.gsize(item, 'response.headersSize')),
'request_body': (lambda item: self.gsize(item, 'request.bodySize')),
'response_body': (lambda item: self.gsize(item, 'request.bodySize')),
'request': (lambda item: self.gsize(item, 'request.bodySize') + self.gsize(item, 'request.headerSize')),
'response': (lambda item: self.gsize(item, 'response.bodySize') + self.gsize(item, 'response.headerSize')),
'time': (lambda item: self.gsize(item, 'time')),
}
method = measurements[measurement]
return list(map(method, items))
def present(self, criteria):
return len(self.entries(criteria)) > 0
def not_present(self, criteria):
return len(self.entries(criteria)) == 0
def max(self, criteria, measurement_name):
items = self.entries(criteria)
return max(self.measure(items, measurement_name), default=0)
def get_sum(self, criteria, measurement_name):
items = self.entries(criteria)
return sum(self.measure(items, measurement_name))
|
[
"jsonschema.validate",
"glom.glom",
"json.loads",
"jsonpath_ng.parse",
"re.search"
] |
[((395, 439), 're.search', 're.search', (['str_rxp', 'val'], {'flags': 're.IGNORECASE'}), '(str_rxp, val, flags=re.IGNORECASE)\n', (404, 439), False, 'import re\n'), ((1853, 1869), 'jsonpath_ng.parse', 'parse', (['json_path'], {}), '(json_path)\n', (1858, 1869), False, 'from jsonpath_ng import parse\n'), ((1369, 1407), 'jsonschema.validate', 'validate', ([], {'instance': 'item', 'schema': 'schema'}), '(instance=item, schema=schema)\n', (1377, 1407), False, 'from jsonschema import validate\n'), ((1612, 1628), 'json.loads', 'json.loads', (['item'], {}), '(item)\n', (1622, 1628), False, 'import json\n'), ((1907, 1927), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (1917, 1927), False, 'import json\n'), ((4482, 4526), 'glom.glom', 'glom', (['item', '"""request.headersSize"""'], {'default': '(0)'}), "(item, 'request.headersSize', default=0)\n", (4486, 4526), False, 'from glom import glom\n'), ((1340, 1356), 'json.loads', 'json.loads', (['item'], {}), '(item)\n', (1350, 1356), False, 'import json\n'), ((2431, 2464), 'glom.glom', 'glom', (['item', '"""pageref"""'], {'default': '""""""'}), "(item, 'pageref', default='')\n", (2435, 2464), False, 'from glom import glom\n'), ((3068, 3109), 'glom.glom', 'glom', (['item', '"""request.headers"""'], {'default': '[]'}), "(item, 'request.headers', default=[])\n", (3072, 3109), False, 'from glom import glom\n'), ((3201, 3243), 'glom.glom', 'glom', (['item', '"""response.headers"""'], {'default': '[]'}), "(item, 'response.headers', default=[])\n", (3205, 3243), False, 'from glom import glom\n'), ((3336, 3377), 'glom.glom', 'glom', (['item', '"""request.cookies"""'], {'default': '[]'}), "(item, 'request.cookies', default=[])\n", (3340, 3377), False, 'from glom import glom\n'), ((3470, 3512), 'glom.glom', 'glom', (['item', '"""response.cookies"""'], {'default': '[]'}), "(item, 'response.cookies', default=[])\n", (3474, 3512), False, 'from glom import glom\n'), ((3600, 3656), 'glom.glom', 'glom', (['item', "('_webSocketMessages', ['data'])"], {'default': '[]'}), "(item, ('_webSocketMessages', ['data']), default=[])\n", (3604, 3656), False, 'from glom import glom\n'), ((2539, 2582), 'glom.glom', 'glom', (['item', '"""response.status"""'], {'default': 'None'}), "(item, 'response.status', default=None)\n", (2543, 2582), False, 'from glom import glom\n'), ((2653, 2692), 'glom.glom', 'glom', (['item', '"""request.url"""'], {'default': 'None'}), "(item, 'request.url', default=None)\n", (2657, 2692), False, 'from glom import glom\n'), ((2772, 2821), 'glom.glom', 'glom', (['item', '"""response.content.text"""'], {'default': 'None'}), "(item, 'response.content.text', default=None)\n", (2776, 2821), False, 'from glom import glom\n'), ((2915, 2968), 'glom.glom', 'glom', (['item', '"""response.content.mimeType"""'], {'default': 'None'}), "(item, 'response.content.mimeType', default=None)\n", (2919, 2968), False, 'from glom import glom\n'), ((3733, 3782), 'glom.glom', 'glom', (['item', '"""response.content.text"""'], {'default': 'None'}), "(item, 'response.content.text', default=None)\n", (3737, 3782), False, 'from glom import glom\n'), ((3855, 3904), 'glom.glom', 'glom', (['item', '"""response.content.text"""'], {'default': 'None'}), "(item, 'response.content.text', default=None)\n", (3859, 3904), False, 'from glom import glom\n'), ((3989, 4038), 'glom.glom', 'glom', (['item', '"""response.content.text"""'], {'default': 'None'}), "(item, 'response.content.text', default=None)\n", (3993, 4038), False, 'from glom import glom\n')]
|
from django.http import JsonResponse, HttpResponseNotFound
from django.template import RequestContext, loader, Template, TemplateDoesNotExist
import logging
logger = logging.getLogger(__name__)
def json_html_response(request, template_name, code, message):
"""
Provide response in json or html format accordingly content-type
:param request: HttpRequest
:param template_name: String
:param code: Integer
:param message: String
:return: JsonResponse|HttpResponseNotFound
:TODO fix format data duplication for v1/utils format for error response
"""
if request.META.get('CONTENT_TYPE') == 'application/json':
response = JsonResponse(status=code, data={'results': {'code': code, 'msg': message}})
else:
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
template = Template(message)
response = HttpResponseNotFound(template.render(RequestContext(request)))
return response
def e500(request, template_name='500.html'):
return json_html_response(request, template_name, 500, 'Internal Server Error')
def e404(request, template_name='404.html'):
return json_html_response(request, template_name, 404, 'Not Found')
def csrf_failure(request, reason=""):
logger.error('error 403: ' + str(request))
return json_html_response(request, '403.html', 403, 'Forbidden')
|
[
"django.http.JsonResponse",
"django.template.loader.get_template",
"django.template.Template",
"logging.getLogger",
"django.template.RequestContext"
] |
[((167, 194), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (184, 194), False, 'import logging\n'), ((668, 743), 'django.http.JsonResponse', 'JsonResponse', ([], {'status': 'code', 'data': "{'results': {'code': code, 'msg': message}}"}), "(status=code, data={'results': {'code': code, 'msg': message}})\n", (680, 743), False, 'from django.http import JsonResponse, HttpResponseNotFound\n'), ((790, 824), 'django.template.loader.get_template', 'loader.get_template', (['template_name'], {}), '(template_name)\n', (809, 824), False, 'from django.template import RequestContext, loader, Template, TemplateDoesNotExist\n'), ((885, 902), 'django.template.Template', 'Template', (['message'], {}), '(message)\n', (893, 902), False, 'from django.template import RequestContext, loader, Template, TemplateDoesNotExist\n'), ((959, 982), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (973, 982), False, 'from django.template import RequestContext, loader, Template, TemplateDoesNotExist\n')]
|
from ..parser import Atomic, Variable, Compound, List
from ..parser import isvariable, isatom, isnumber, islist, ispartiallist, iscallable
from ..core import BuiltIn
###
### Term unification (ISO 8.2)
###
class Unify_2(BuiltIn):
"""'='(?term, ?term)
If X and Y are NSTO (Not Subject To Occur-check) then '='(X, Y) is true
iff X and Y are unifiable."""
def execute(self, x, y):
# TODO prologlib crashes if you attempt to unify two STO terms by =/2
# instead of using the proper unify_with_occur_check/2 predicate.
return self.unify(x, y)
class NotUnifiable_2(BuiltIn):
"""'\\='(@term, @term)
If X and Y are NSTO (Not Subject To Occur-check) then '\\='(X, Y) is true
iff X and Y are not unifiable."""
def execute(self, x, y):
from .. import core
return core.unify(x, y) is None
###
### Type testing (ISO 8.3)
###
class Var_1(BuiltIn):
'''var(@term)
var(X) is true iff X is a member of the set V.'''
def execute(self, x):
return isvariable(x)
class Atom_1(BuiltIn):
'''atom(@term)
atom(X) is true iff X is a member of the set A.'''
def execute(self, x):
return isatom(x)
class Integer_1(BuiltIn):
'''integer(@term)
integer(X) is true iff X is a member of the set I.'''
def execute(self, x):
return x.arity == 0 and isinstance(x.value, int)
class Float_1(BuiltIn):
'''float(@term)
float(X) is true iff X is a member of the set F.'''
def execute(self, x):
return x.arity == 0 and isinstance(x.value, float)
class Atomic_1(BuiltIn):
'''atomic(@term)
atomic(X) is true if X is a member of the set A or I
or F and is false if X is a member of the set V or CT.'''
def execute(self, x):
return isinstance(x, Atomic)
class Compound_1(BuiltIn):
'''compound(@term)
compound(X) is true iff X is a member of the set CT.'''
def execute(self, x):
return isinstance(x, (Compound, List))
class Nonvar_1(BuiltIn):
'''nonvar(@term)
nonvar(X) is true iff X is not a member of the set V.'''
def execute(self, x):
return not isvariable(x)
class Number_1(BuiltIn):
'''number(@term)
number(X) is true if X is a member of the set I or F
and is false if X is a member of the set V, A, or CT.'''
def execute(self, x):
return isnumber(x)
###
### Term comparison (ISO 8.4)
###
class TermLessThanOrEqual_2(BuiltIn):
"""'@=<'(@term, @term)
Test the ordering of two terms. '@=<'(X, Y) is true iff
X preceeds Y or X and Y are identical terms."""
def execute(self, x, y):
# The Python __eq__ method does not hold Prolog
# semantics for anonymous variables
if (isvariable(x) and isvariable(y) and
x.name == '_' and y.name == '_'):
return True
return x <= y
class TermIdentical_2(BuiltIn):
"""'=='(@term, @term)
Test the ordering of two terms. '=='(X, Y) is true iff
X and Y are identical terms."""
def execute(self, x, y):
# The Python __eq__ method does not hold Prolog
# semantics for anonymous variables
if (isvariable(x) and isvariable(y) and
x.name == '_' and y.name == '_'):
return False
return x == y
class TermNotIdentical_2(BuiltIn):
"""'\=='(@term, @term)
Test the ordering of two terms. '\=='(X, Y) is true iff
X and Y are not identical terms."""
def execute(self, x, y):
# The Python __ne__ method does not hold Prolog
# semantics for anonymous variables
if (isvariable(x) and isvariable(y) and
x.name == '_' and y.name == '_'):
return True
return x != y
class TermLessThan_2(BuiltIn):
"""'@<'(@term, @term)
Test the ordering of two terms. '@<'(X, Y) is true iff
X preceeds Y."""
def execute(self, x, y):
return x < y
class TermGreaterThan_2(BuiltIn):
"""'@>(@term, @term)
Test the ordering of two terms. '@>'(X, Y) is true iff
Y preceeds X."""
def execute(self, x, y):
return x > y
class TermGreaterThanOrEqual_2(BuiltIn):
"""'@>=(@term, @term)
Test the ordering of two terms. '@>='(X, Y) is true iff
Y preceeds X or Y and X are identical terms."""
def execute(self, x, y):
# The Python __eq__ method does not hold Prolog
# semantics for anonymous variables
if (isvariable(x) and isvariable(y) and
x.name == '_' and y.name == '_'):
return False
return x >= y
###
### Term creation and decomposition (ISO 8.5)
###
class Functor_3(BuiltIn):
'''functor(-nonvar, +atomic, +integer)
functor(+nonvar, ?atomic, ?integer)
functor(Term, Name, Arity) is true iff:
* Term is a compound term with a functor whose identifier
is Name and arity Arity, or
* Term is an atomic term equal to Name and Arity is 0.'''
def execute(self, term, name, arity):
if isvariable(term) and isvariable(name):
self.throw_instantiation_error()
if isvariable(term) and isvariable(arity):
self.throw_instantiation_error()
if isvariable(term) and not isinstance(arity.value, int):
self.throw_type_error('integer', arity)
# TODO Missing max_arity related error
if isvariable(term) and arity.value < 0:
self.throw_domain_error('not_less_than_zero', arity)
if isvariable(term) and not isinstance(name, Atomic):
self.throw_type_error('atomic', name)
if isvariable(term) and not isatom(name) and arity.value > 0:
self.throw_type_error('atom', name)
if isinstance(term, Atomic):
return self.unify(term, name) and self.unify(arity, Atomic(0))
if isinstance(term, (Compound, List)):
return (self.unify(Atomic(term.name), name) and
self.unify(Atomic(term.arity), arity))
if isinstance(term, Variable):
if isinstance(name, Atomic) and arity.value == 0:
return self.unify(term, name)
if isatom(name) and arity.value > 0:
t = (Variable('_') for i in range(arity.value))
c = Compound(name.name, *t)
return self.unify(term, c)
return False
class Arg_3(BuiltIn):
'''arg(+integer, +compound_term, ?term)
arg(N, Term, Arg) is true iff the Nth argument of Term is Arg.'''
def execute(self, n, term, arg):
if isvariable(n) or isvariable(term):
self.throw_instantiation_error()
if not isinstance(n.value, int):
self.throw_type_error('integer', n)
if not isinstance(term, Compound):
self.throw_type_error('compound', term)
if n.value < 0:
self.throw_domain_error('not_less_than_zero', n)
if n.value >= len(term.value):
return False
return self.unify(arg, term.value[n.value])
class Univ_2(BuiltIn):
"""'=..'(+nonvar, ?list)
'=..'(-nonvar, +list)
'=..'(Term, List) is true iff:
* Term is an atomic term and List is the list whose only
element is Term, or
* Term is a compound term and List is the list whose head
is the functor name of Term and whose tail is a list of the
arguments of Term."""
def execute(self, term, elements):
if isvariable(term) and ispartiallist(elements):
self.throw_instantiation_error()
if not islist(elements) and not ispartiallist(elements):
self.throw_type_error('list', elements)
if isvariable(term) and islist(elements) and isvariable(elements.head):
self.throw_instantiation_error()
if islist(elements) and not isatom(elements.head) and len(elements) > 1:
self.throw_type_error('atom', elements.head)
if islist(elements) and isinstance(elements.head, Compound) and len(elements) > 1:
self.throw_type_error('atomic', elements.head)
if isvariable(term) and elements == List.EMPTY_LIST:
self.throw_domain_error('non_empty_list', elements)
# TODO Missing max_arity related error
if isinstance(term, Atomic):
l = List(term)
return self.unify(elements, l)
if isinstance(term, Compound):
l = List.from_list([Atomic(term.name)] + list(term.value[1:]))
return self.unify(elements, l)
if isinstance(term, Variable):
# elements is a list
if elements.name == '.' and elements.arity == 2:
if len(elements) == 1:
t = elements.head
return self.unify(term, t)
elif len(elements) > 1:
name = elements.head.name
t = Compound(name, *elements.as_list()[1:])
return self.unify(term, t)
else:
return False
else:
return False
class CopyTerm_2(BuiltIn):
'''copy_term(?term, ?term)
copy_term(Term_1, Term_2) is true iff Term_2 unifies with
a term T which is a renamed copy of Term_1.'''
def execute(self, t1, t2):
from .. import core
#t = core.renamed_copy(t1)
t = t1._copy_term()
# Can't directly use BuiltIn.unify because the bindings
# between the renamed copy of t1 and t2 retain validity
# only in the context of the copy_term/2 built-in
mgu = core.unify(t2, t)
if mgu is not None:
if mgu:
t2.apply(mgu)
# Do not propagate renamed term variables bindings
# outside the context of the copy_term/2 built-in
if t2.name in mgu:
# Still preserve the binding for t2 just in
# case t2 were a renamed variable (e.g. coming
# from a clause renaming)
temp = mgu[t2.name]
mgu.reduce()
mgu.update({t2.name : temp})
else:
mgu.reduce()
self.substitution.update(mgu)
return True
return False
###
### Arithmetic evaluation (ISO 8.6)
### Simple arithmetic functors (ISO 9.1)
### Other arithmetic functors (ISO 9.3)
### Bitwise functors (ISO 9.4)
###
class Is_2(BuiltIn):
"""is(?term, @evaluable)
'is'(Result, Expression) is true iff the value of evaluating
Expression as an expression is Result."""
def execute(self, result, expression):
if isvariable(expression):
self.throw_instantiation_error()
c = evaluate_expression(expression)
return self.unify(result, Atomic(c))
def evaluate_expression(term):
# TODO No overflow/underflow errors
# TODO No undefined errors
if isvariable(term):
from ..core import PrologInstantiationError
raise PrologInstantiationError()
if term.arity == 0 and term._isnumber():
return term.value
if isinstance(term, Compound):
from ..core import deref
args = (evaluate_expression(deref(a)) for a in term.value[1:])
pi = term.predicate_indicator()
functor = search_evaluable_functor(pi)
if not functor:
from ..core import PrologTypeError
raise PrologTypeError('evaluable', Atomic(pi))
return functor(*args)
from ..core import PrologTypeError
raise PrologTypeError('number', term)
def search_evaluable_functor(name):
import math
import operator
d = {'+/2' : operator.add, '*/2' : operator.mul, '-/2' : operator.sub,
'-/1' : operator.neg, '//2' : divide, '///2' : intdivide,
'mod/2' : module, 'rem/2' : module, 'floor/1' : math.floor,
'round/1' : round, 'ceiling/1' : math.ceil, 'truncate/1' : math.trunc,
'float/1' : float, 'abs/1' : operator.abs, 'sign/1' : sign,
'float_integer_part/1' : float_integer_part,
'float_fractional_part/1' : float_fractional_part,
'**/2' : power, 'sin/1' : math.sin, 'cos/1' : math.cos,
'atan/1' : math.atan, 'exp/1' : math.exp, 'log/1' : logarithm,
'sqrt/1' : squareroot,
'>>/2' : rightshift, '<</2' : leftshift,
'/\\/2' : bitand, '\\//2' : bitor, '\\/1' : bitnot}
return d.get(name)
def divide(x, y):
'''Redefined w.r.t. Python because in ISO Prolog div(x, y)
with x and y integers is equivalent to intdiv(x, y). Also,
we need to manage ZeroDivisionError errors on our own.'''
if not y:
from ..core import PrologEvaluationError
raise PrologEvaluationError('zero_divisor')
if isinstance(x, int) and isinstance(y, int):
return x // y
return x / y
def intdivide(x, y):
'''Redefined w.r.t. Python because in ISO Prolog x // y
is valid only when x and y are integers. Also, we need to
manage ZeroDivisionError errors on our own.'''
if not y:
from ..core import PrologEvaluationError
raise PrologEvaluationError('zero_divisor')
if not isinstance(x, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(x))
if not isinstance(y, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(y))
return x // y
def module(x, y):
'''Redefined w.r.t. Python because in ISO Prolog mod(x, y)
is valid only when x and y are integers. Also, we need to
manage ZeroDivisionError errors on our own.'''
if not y:
from ..core import PrologEvaluationError
raise PrologEvaluationError('zero_divisor')
if not isinstance(x, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(x))
if not isinstance(y, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(y))
return x % y
def sign(x):
'''Redefined w.r.t. Python because in ISO Prolog sign(x)
must return the same type of number as its input.'''
if not x:
return 0 if isinstance(x, int) else 0.0
from math import copysign
s = copysign(1, x)
return int(s) if isinstance(x, int) else s
def float_integer_part(x):
'''Redefined w.r.t. Python because in ISO Prolog
float_integer_part(x) is valid only when x is a float.'''
if not isinstance(x, float):
from ..core import PrologTypeError
raise PrologTypeError('float', Atomic(x))
from math import modf
f, i = modf(x)
return i
def float_fractional_part(x):
'''Redefined w.r.t. Python because in ISO Prolog
float_fractional_part(x) is valid only when x is a float.'''
if not isinstance(x, float):
from ..core import PrologTypeError
raise PrologTypeError('float', Atomic(x))
from math import modf
f, i = modf(x)
return f
def power(x, y):
'''Redefined w.r.t. Python because in ISO Prolog x ** y
with x < 0 is defined only when y is an integer, and
always returns a float. Also, we need to manage
ZeroDivisionError errors on our own.'''
if x < 0 and isinstance(y, float):
from ..core import PrologEvaluationError
raise PrologEvaluationError('undefined')
if not x and y < 0:
from ..core import PrologEvaluationError
raise PrologEvaluationError('undefined')
return float(x ** y)
def logarithm(x):
'''Redefined w.r.t. Python because we need to manage
ValueError errors (e.g. for log(0)) on our own.'''
if not x:
from ..core import PrologEvaluationError
raise PrologEvaluationError('undefined')
from math import log
return log(x)
def squareroot(x):
'''Redefined w.r.t. Python because we need to manage
ValueError errors (e.g. for x < 0) on our own.'''
if x < 0:
from ..core import PrologEvaluationError
raise PrologEvaluationError('undefined')
from math import sqrt
return sqrt(x)
def rightshift(n, s):
'''Redefined w.r.t. Python because we need to manage
TypeError errors (e.g. n as float) on our own.'''
if not isinstance(n, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(n))
if not isinstance(s, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(s))
return n >> s
def leftshift(n, s):
'''Redefined w.r.t. Python because we need to manage
TypeError errors (e.g. n as float) on our own.'''
if not isinstance(n, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(n))
if not isinstance(s, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(s))
return n << s
def bitand(x, y):
'''Redefined w.r.t. Python because we need to manage
TypeError errors (e.g. x or y as float) on our own.'''
if not isinstance(x, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(n))
if not isinstance(y, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(s))
return x & y
def bitor(x, y):
'''Redefined w.r.t. Python because we need to manage
TypeError errors (e.g. x or y as float) on our own.'''
if not isinstance(x, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(n))
if not isinstance(y, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(s))
return x | y
def bitnot(x):
'''Redefined w.r.t. Python because we need to manage
TypeError errors (e.g. x or y as float) on our own.'''
if not isinstance(x, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(x))
return ~x
###
### Arithmetic comparison (ISO 8.7)
###
class ArithmeticEqual_2(BuiltIn):
"""'=:='(@evaluable, @evaluable)
'=:='(E1, E2) is true iff evaluating E1 and E2 as expressions
the corresponding arithmetic values are equal."""
def execute(self, e1, e2):
if isvariable(e1) or isvariable(e2):
self.throw_instantiation_error()
v1 = evaluate_expression(e1)
v2 = evaluate_expression(e2)
return v1 == v2
class ArithmeticNotEqual_2(BuiltIn):
"""'=\='(@evaluable, @evaluable)
'=\='(E1, E2) is true iff evaluating E1 and E2 as expressions
the corresponding arithmetic values are not equal."""
def execute(self, e1, e2):
if isvariable(e1) or isvariable(e2):
self.throw_instantiation_error()
v1 = evaluate_expression(e1)
v2 = evaluate_expression(e2)
return v1 != v2
class ArithmeticLessThan_2(BuiltIn):
"""'<'(@evaluable, @evaluable)
'<'(E1, E2) is true iff evaluating E1 and E2 as expressions
the corresponding arithmetic value of E1 is less than the
corresponding arithmetic value of E2."""
def execute(self, e1, e2):
if isvariable(e1) or isvariable(e2):
self.throw_instantiation_error()
v1 = evaluate_expression(e1)
v2 = evaluate_expression(e2)
return v1 < v2
class ArithmeticLessThanOrEqual_2(BuiltIn):
"""'=<'(@evaluable, @evaluable)
'=<'(E1, E2) is true iff evaluating E1 and E2 as expressions
the corresponding arithmetic value of E1 is less than or
equal to the corresponding arithmetic value of E2."""
def execute(self, e1, e2):
if isvariable(e1) or isvariable(e2):
self.throw_instantiation_error()
v1 = evaluate_expression(e1)
v2 = evaluate_expression(e2)
return v1 <= v2
class ArithmeticGreaterThan_2(BuiltIn):
"""'>'(@evaluable, @evaluable)
'>'(E1, E2) is true iff evaluating E1 and E2 as expressions
the corresponding arithmetic value of E1 is greater than
the corresponding arithmetic value of E2."""
def execute(self, e1, e2):
if isvariable(e1) or isvariable(e2):
self.throw_instantiation_error()
v1 = evaluate_expression(e1)
v2 = evaluate_expression(e2)
return v1 > v2
class ArithmeticGreaterThanOrEqual_2(BuiltIn):
"""'>='(@evaluable, @evaluable)
'>='(E1, E2) is true iff evaluating E1 and E2 as expressions
the corresponding arithmetic value of E1 is greater than or
equal to the corresponding arithmetic value of E2."""
def execute(self, e1, e2):
if isvariable(e1) or isvariable(e2):
self.throw_instantiation_error()
v1 = evaluate_expression(e1)
v2 = evaluate_expression(e2)
return v1 >= v2
###
### Clause retrival and information (ISO 8.8)
###
class Clause_2(BuiltIn):
'''clause(+head, ?callable_term)
clause(Head, Body) is true iff:
* the predicate of Head is public, and
* there is a clause in the database which corresponds
to a term H :- B which unifies with Head :- Body.'''
def execute(self, head, body):
if isvariable(head):
self.throw_instantiation_error()
if not iscallable(head):
self.throw_type_error('callable', head)
if not (isvariable(body) or iscallable(body)):
self.throw_type_error('callable', body)
self.clauses = []
procedure = self.kb.search(head)
if not procedure:
return False
if not procedure._public:
pi = Compound('/', Atomic(head.name), Atomic(head.arity))
self.throw_permission_error('access', 'private_procedure', pi)
from .. import core
for clause in procedure.clauses():
h, b = convert_clause_to_term(clause.head(), clause.body())
if (core.unify(h, head) is not None and
core.unify(b, body) is not None):
self.clauses.append(Compound('clause', h, b))
return self.pick_clause(head, body)
def reexecute(self, head, body):
self.reset_substitution()
return self.pick_clause(head, body)
def pick_clause(self, head, body):
if not self.clauses:
return False
c = self.clauses.pop(0)
return self.unify(Compound('clause', head, body), c)
def convert_clause_to_term(head, body):
return (convert_to_term(head), convert_to_term(body))
def convert_to_term(head):
if head.arity == 0:
return Atomic(head.name)
from ..core import renamed_copy
return renamed_copy(head)
class CurrentPredicate_1(BuiltIn):
'''current_predicate(?predicate_indicator)
current_predicate(PI) is true iff PI is a predicate indicator
for one of the user-defined procedures in the database.'''
def execute(self, pi):
if not isvariable(pi) and not (pi.name == '/' and pi.arity == 2):
self.throw_type_error('predicate_indicator', pi)
self.indicators = []
for i in self.kb:
n, a = i.split('/')
indicator = Compound('/', Atomic(n), Atomic(int(a)))
from .. import core
if core.unify(pi, indicator) is not None:
self.indicators.append(indicator)
return self.pick_indicator(pi)
def reexecute(self, pi):
self.reset_substitution()
return self.pick_indicator(pi)
def pick_indicator(self, pi):
if not self.indicators:
return False
# the order in which predicate indicators are found by
# current_predicate/1 is implementation dependent
i = self.indicators.pop()
return self.unify(pi, i)
###
### Clause creation and destruction (ISO 8.9)
###
class Asserta_1(BuiltIn):
'''asserta(@clause)
asserta(Clause) is true. It is used to add Clause to the
database before all existing clauses of the procedure whose
predicate is equal to the functor of the head of Clause.'''
def execute(self, clause):
head = clause.value[1] if clause.predicate_indicator() == ':-/2' else clause
if isvariable(head):
self.throw_instantiation_error()
if isnumber(head):
self.throw_type_error('callable', head)
# errors on the conversion of the clause body to a
# goal and on access permission to a user-defined
# procedure are handled directly by the database
from ..builtin import search_builtin
if search_builtin(head):
pi = Compound('/', Atomic(head.name), Atomic(head.arity))
self.throw_permission_error('modify', 'static_procedure', pi)
self.kb.assert_clause(clause, append=False)
return True
class Assertz_1(BuiltIn):
'''assertz(@clause)
assertz(Clause) is true. It is used to add Clause to the
database after all existing clauses of the procedure whose
predicate is equal to the functor of the head of Clause.'''
def execute(self, clause):
head = clause.value[1] if clause.predicate_indicator() == ':-/2' else clause
if isvariable(head):
self.throw_instantiation_error()
if isnumber(head):
self.throw_type_error('callable', head)
# errors on the conversion of the clause body to a
# goal and on access permission to a user-defined
# procedure are handled directly by the database
from ..builtin import search_builtin
if search_builtin(head):
pi = Compound('/', Atomic(head.name), Atomic(head.arity))
self.throw_permission_error('modify', 'static_procedure', pi)
self.kb.assert_clause(clause, append=True)
return True
class Retract_1(BuiltIn):
'''retract(+clause)
retract(Clause) is true iff the database contains at least
one dynamic procedure with a clause Clause which unifies
with Head :- Body. It is used to remove those unifying
clauses from the database.'''
def execute(self, clause):
if clause.predicate_indicator() == ':-/2':
head = clause.value[1]
body = clause.value[2]
else:
head = clause
body = Atomic.TRUE
if isvariable(head):
self.throw_instantiation_error()
if isnumber(head):
self.throw_type_error('callable', head)
# error on access permission to a user-defined
# procedure is handled directly by the database
from ..builtin import search_builtin
if search_builtin(head):
pi = Compound('/', Atomic(head.name), Atomic(head.arity))
self.throw_permission_error('modify', 'static_procedure', pi)
self.clauses_to_unify = []
self.clauses_to_remove = []
procedure = self.kb.search(head)
if not procedure:
return False
from .. import core
for clause in procedure.clauses():
h, b = convert_clause_to_term(clause.head(), clause.body())
if (core.unify(h, head) is not None and
core.unify(b, body) is not None):
self.clauses_to_unify.append(Compound('clause', h, b))
self.clauses_to_remove.append(clause)
return self.pick_clause(head, body)
def reexecute(self, clause):
self.reset_substitution()
if clause.predicate_indicator() == ':-/2':
head = clause.value[1]
body = clause.value[2]
else:
head = clause
body = Atomic.TRUE
return self.pick_clause(head, body)
def pick_clause(self, head, body):
if not self.clauses_to_remove:
return False
self.kb.retract(self.clauses_to_remove.pop(0))
c = self.clauses_to_unify.pop(0)
return self.unify(Compound('clause', head, body), c)
class Abolish_1(BuiltIn):
'''abolish(@predicate_indicator)
abolish(Pred) is true. It is used to remove from the database
the procedure specified by the predicate indicator Pred and
all its clauses, leaving the database in the same state as if
the procedure identified by Pred had never existed.'''
def execute(self, pi):
if isvariable(pi):
self.throw_instantiation_error()
if pi.name == '/' and pi.arity == 2:
name, arity = pi.value[1:]
if isvariable(name) or isvariable(arity):
self.throw_instantiation_error()
if not isinstance(arity.value, int):
self.throw_type_error('integer', arity)
if not isatom(name):
self.throw_type_error('atom', name)
# TODO Missing max_arity related errors
if arity.value < 0:
self.throw_domain_error('not_less_than_zero', arity)
# error on access permission to a user-defined
# procedure is handled directly by the database
t = tuple(Variable('_') for i in range(arity.value))
c = Compound(name.name, *t)
from ..builtin import search_builtin
if search_builtin(c):
self.throw_permission_error('modify', 'static_procedure', pi)
else:
self.throw_type_error('predicate_indicator', pi)
self.kb.abolish(pi)
return True
###
### All solutions (ISO 8.10)
###
class Findall_3(BuiltIn):
'''findall(?term, +callable_term, ?list)
findall(Template, Goal, Instances) is true iff Instances
unifies with the list of values to which a variable X not
occurring in Template or Goal would be instantiated by
successive re-executions of "call(Goal), X=Template" after
systematic replacement of all variables in X by new
variables.'''
def execute(self, template, goal, instances):
if isvariable(goal):
self.throw_instantiation_error()
if isnumber(goal):
self.throw_type_error('callable', goal)
if (not isvariable(instances) and
(not islist(instances) and not ispartiallist(instances))):
self.throw_type_error('list', instances)
from .. import core
caller = core.Caller()
caller._kb = self.kb
values = []
result = caller.solve(goal)
while result:
from copy import deepcopy
v = ground(deepcopy(template), caller.currsubst())
#values.append(core.renamed_copy(v))
values.append(v._copy_term())
result = caller.solve_next()
values = List.EMPTY_LIST if not values else List.from_list(values)
return self.unify(values, instances)
def ground(term, mgu):
if isinstance(term, Variable):
if not term.value:
value = mgu.get(term.name)
if value:
return value
else:
return ground(term.binding(), mgu)
if isinstance(term, Compound):
args = []
for arg in term.value[1:]:
args.append(ground(arg, mgu))
return Compound(term.name, *args)
return term
class Bagof_3(BuiltIn):
'''bagof(?term, +callable_term, ?list)
bagof(Template, Goal, Instances) assembles as a list the
solutions of Goal for each different instantiation of the
free variables in it. The elements of each list are in
order of solution, but the order in which each list is
found is undefined.'''
def execute(self, template, goal, instances):
fvs = free_variable_set(goal, template)
self.witness = Compound('witness', *fvs) if fvs else Atomic('witness')
g = iterated_goal_term(goal)
findall = Findall_3(self.kb)
findall.execute(Compound('+', self.witness, template), g, Variable('S'))
s = findall.substitution['S']
self.s = self._create_solution_list(s)
if not self.s:
return False
return self.pick_bag(template, goal, instances)
def reexecute(self, template, goal, instances):
self.reset_substitution()
if not self.s:
return False
return self.pick_bag(template, goal, instances)
def pick_bag(self, template, goal, instances):
wt = self.s[0]
wt_list = [e for e in self.s if isvariant(wt.value[1], e.value[1])]
t_list = [e.value[2] for e in wt_list]
s_next = [e for e in self.s if e not in wt_list]
from .. import core
for wwtt, t in zip(wt_list, t_list):
ww = wwtt.value[1]
#from copy import deepcopy
#subst = core.unify(ww, deepcopy(self.witness))
subst = core.unify(ww, self.witness)
ww.apply(subst)
t.apply(subst)
self.substitution.update(subst)
t_list = List.from_list(t_list)
self.s = s_next
return self.unify(t_list, instances)
def _create_solution_list(self, s):
return [] if s == List.EMPTY_LIST else s.as_list()
class Setof_3(Bagof_3):
'''setof(?term, +callable_term, ?list)
setof/3 assembles as a list the solutions of a goal for each different
instantiation of the free variables in that goal. Each list is a sorted
list, but the order in which each list is found is undefined.'''
def _create_solution_list(self, s):
solutions = [] if s == List.EMPTY_LIST else s.as_list()
solutions = list(set(solutions))
solutions.sort()
return solutions
###
### Logic and control (ISO 8.15)
###
class Not_1(BuiltIn):
"""not(@callable_term)
not(Term) is true iff call(Term) is false."""
# """'\\+'(@callable_term)
#
# '\\+'(Term) is true iff call(Term) is false."""
def execute(self, term):
if isvariable(term):
self.throw_instantiation_error()
if isnumber(term):
self.throw_type_error('callable', term)
from .. import core
caller = core.Caller()
caller._kb = self.kb
result = caller.solve(term)
return not result
class Repeat_0(BuiltIn):
'''repeat
repeat is true. repeat is re-executable.
'''
def execute(self):
return True
def reexecute(self):
return True
###
### Atomic term processing (ISO 8.16)
###
class AtomLength_2(BuiltIn):
'''atom_length(+atom, ?integer)
atom_length(Atom, Length) is true iff integer Length
equals the number of characters of the name of the
atom Atom.'''
def execute(self, atom, length):
if isvariable(atom):
self.throw_instantiation_error()
if not isatom(atom):
self.throw_type_error('atom', atom)
if (not isvariable(length) and
not (isnumber(length) and isinstance(length.value, int))):
self.throw_type_error('integer', length)
if isnumber(length) and length.value < 0:
self.throw_domain_error('not_less_than_zero', length)
size = Atomic(len(atom.name))
return self.unify(length, size)
class AtomConcat_3(BuiltIn):
'''atom_concat(?atom, ?atom, +atom)\natom_concat(+atom, +atom, -atom)
atom_concat(Atom_1, Atom_2, Atom_12) is true iff characters
of the name of the atom Atom_12 are the result of concatenating
the characters of the name of the atom Atom_2 to the characters
of the name of the atom Atom_1.'''
def execute(self, atom1, atom2, atom12):
if isvariable(atom1) and isvariable(atom12):
self.throw_instantiation_error()
if isvariable(atom2) and isvariable(atom12):
self.throw_instantiation_error()
if not isvariable(atom1) and not isatom(atom1):
self.throw_type_error('atom', atom1)
if not isvariable(atom2) and not isatom(atom2):
self.throw_type_error('atom', atom2)
if not isvariable(atom12) and not isatom(atom12):
self.throw_type_error('atom', atom12)
if isvariable(atom1) and isvariable(atom2):
s = atom12.name
self.data = [(s[:i], s[i:], s) for i in range(len(s) + 1)]
elif isvariable(atom1):
s = atom12.name
if not s.endswith(atom2.name):
return False
else:
i = s.index(atom2.name)
self.data = [(s[:i], s[i:], s)]
elif isvariable(atom2):
s = atom12.name
if not s.startswith(atom1.name):
return False
else:
i = len(atom1.name)
self.data = [(s[:i], s[i:], s)]
else:
n1 = atom1.name
n2 = atom2.name
self.data = [(n1, n2, n1 + n2)]
return self.pick_data(atom1, atom2, atom12)
def reexecute(self, atom1, atom2, atom12):
self.reset_substitution()
if not self.data:
return False
return self.pick_data(atom1, atom2, atom12)
def pick_data(self, atom1, atom2, atom12):
c = self.data.pop(0)
return (self.unify(atom1, Atomic(c[0])) and
self.unify(atom2, Atomic(c[1])) and
self.unify(atom12, Atomic(c[2])))
class SubAtom_5(BuiltIn):
'''sub_atom(+atom, ?integer, ?integer, ?integer, ?atom)
sub_atom(Atom, Before, Length, After, Sub_atom) is true iff atom Atom can
be broken into three pieces, AtomL, Sub_atom, and AtomR, such that Before
is the number of characters of the name of AtomL, Length is the number of
characters of the name of Sub_atom, and After is the number of characters
of the name of AtomR.'''
def execute(self, atom, before, length, after, subatom):
if isvariable(atom):
self.throw_instantiation_error()
if not isvariable(atom) and not isatom(atom):
self.throw_type_error('atom', atom)
if not isvariable(subatom) and not isatom(subatom):
self.throw_type_error('atom', subatom)
if (not isvariable(before) and
not (isnumber(before) and isinstance(before.value, int))):
self.throw_type_error('integer', before)
if (not isvariable(length) and
not (isnumber(length) and isinstance(length.value, int))):
self.throw_type_error('integer', length)
if (not isvariable(after) and
not (isnumber(after) and isinstance(after.value, int))):
self.throw_type_error('integer', after)
if isnumber(before) and before.value < 0:
self.throw_domain_error('not_less_than_zero', before)
if isnumber(length) and length.value < 0:
self.throw_domain_error('not_less_than_zero', length)
if isnumber(after) and after.value < 0:
self.throw_domain_error('not_less_than_zero', after)
n = atom.name
start = before.value if isinstance(before, Atomic) else 0
end = len(n) - (after.value if isinstance(after, Atomic) else 0)
self.data = []
while start <= end:
for i in range(start, end + 1):
self.data.append((n[start:i], start))
start += 1
if isinstance(before, Atomic):
self.data = [(d, p) for (d, p) in self.data if n.index(d, p) == before.value]
if isinstance(length, Atomic):
self.data = [(d, p) for (d, p) in self.data if len(d) == length.value]
if isinstance(after, Atomic):
self.data = [(d, p) for (d, p) in self.data if len(n) - n.index(d, p) - len(d) == after.value]
if isinstance(subatom, Atomic):
self.data = [(d, p) for (d, p) in self.data if d == subatom.value]
if not self.data:
return False
return self.pick_data(atom, before, length, after, subatom)
def reexecute(self, atom, before, length, after, subatom):
self.reset_substitution()
if not self.data:
return False
return self.pick_data(atom, before, length, after, subatom)
def pick_data(self, atom, before, length, after, subatom):
s, p = self.data.pop(0)
b = atom.name.index(s, p)
l = len(s)
a = len(atom.name) - (b + l)
return (self.unify(before, Atomic(b)) and
self.unify(length, Atomic(l)) and
self.unify(after, Atomic(a)) and
self.unify(subatom, Atomic(s)))
class AtomChars_2(BuiltIn):
'''atom_chars(+atom, ?character_list)\natom_chars(-atom, +character_list)
atom_chars(Atom, List) is true iff List is a list whose elements
are the one-char atoms whose names are the successive characters
of the name of atom Atom.'''
def execute(self, atom, charlist):
if not isvariable(atom) and not isatom(atom):
self.throw_type_error('atom', atom)
if isvariable(atom):
if isvariable(charlist):
self.throw_instantiation_error()
if not islist(charlist) and not ispartiallist(charlist):
self.throw_type_error('list', charlist)
for element in charlist.as_list():
if isvariable(element):
self.throw_instantiation_error()
if isatom(element) and len(element.name) != 1:
self.throw_type_error('character', element)
if isvariable(atom):
from ..core import deref
chars = [deref(c).name for c in charlist.as_list()]
return self.unify(atom, Atomic(''.join(chars)))
elif isvariable(charlist) or islist(charlist) or ispartiallist(charlist):
chars = [Atomic(c) for c in atom.name]
return self.unify(charlist, List.from_list(chars))
else:
chars = [c.name for c in charlist.as_list()]
return atom.name == ''.join(chars)
class AtomCodes_2(BuiltIn):
'''atom_codes(+atom, ?character_code_list)\natom_codes(-atom, +character_code_list)
atom_codes(Atom, List) is true iff List is a list whose elements
correspond to the successive characters of the name of atom Atom,
and the value of each element is the character code for the
corresponding character of the name.'''
def execute(self, atom, codelist):
if not isvariable(atom) and not isatom(atom):
self.throw_type_error('atom', atom)
if isvariable(atom):
if ispartiallist(codelist):
self.throw_instantiation_error()
if not islist(codelist) and not ispartiallist(codelist):
self.throw_type_error('list', codelist)
for element in codelist.as_list():
if isvariable(element):
self.throw_instantiation_error()
if not isvariable(element):
try:
chr(element.value)
except UnicodeDecodeError:
self.throw_representation_error(element)
if isvariable(atom):
chars = [chr(code.value) for code in codelist.as_list()]
return self.unify(atom, Atomic(''.join(chars)))
elif isvariable(codelist) or ispartiallist(codelist):
codes = [Atomic(ord(char)) for char in atom.name]
return self.unify(codelist, List.from_list(codes))
else:
chars = [chr(code.value) for code in codelist.as_list()]
return atom.name == ''.join(chars)
class CharCode_2(BuiltIn):
'''char_code(+character, ?character_code)\nchar_code(-character, +character_code)
char_code(Char, Code) is true iff the character code for the one-char atom
Char is Code.'''
def execute(self, char, code):
if isvariable(char) and isvariable(code):
self.throw_instantiation_error()
if not isvariable(char) and len(char.name) != 1:
self.throw_type_error('character', char)
if not isvariable(code) and not isinstance(code.value, int):
self.throw_type_error('integer', code)
if not isvariable(code):
try:
chr(code.value)
except UnicodeDecodeError:
self.throw_representation_error(code)
if isvariable(char):
c = chr(code.value)
return self.unify(char, Atomic(c))
elif isvariable(code):
c = ord(char.name)
return self.unify(code, Atomic(c))
else:
return ord(char.name) == code.value
class NumberChars_2(BuiltIn):
'''number_chars(+number, ?character_list)\nnumber_chars(-number, +character_list)
number_chars(Number, List) is true iff List is a list whose elements are
the one-char atoms corresponding to a character sequence of Number which
could be output.'''
def execute(self, number, charlist):
if isvariable(number) and ispartiallist(charlist):
self.throw_instantiation_error()
if isvariable(number):
for element in charlist.as_list():
if isvariable(element):
self.throw_instantiation_error()
if not isvariable(number) and not isnumber(number):
self.throw_type_error('number', number)
if isvariable(number) and not islist(charlist) and not ispartiallist(charlist):
self.throw_type_error('list', charlist)
if islist(charlist):
for element in charlist.as_list():
if isatom(element) and len(element.name) != 1:
self.throw_type_error('character', element)
if isvariable(number) or islist(charlist):
from ..parser import PrologParser, InvalidTermException
s = ''.join([char.name for char in charlist.as_list()])
try:
# the parser needs an End Token
n = PrologParser(s + '.').read_term()
return self.unify(number, n)
except InvalidTermException as e:
self.throw_syntax_error(Atomic(s))
else:
chars = list(str(number.value)) # FIXME this should use write_canonical/1
lst = [Atomic(c) for c in chars]
return self.unify(charlist, List.from_list(lst))
class NumberCodes_2(BuiltIn):
'''number_codes(+number, ?character_code_list)\nnumber_codes(-number, ?character_code_list)
number_codes(Number, List) is true iff List is a list whose elements are
the character codes corresponding to a character sequence of Number which
could be output.'''
def execute(self, number, codelist):
if isvariable(number) and ispartiallist(codelist):
self.throw_instantiation_error()
if isvariable(number):
for element in codelist.as_list():
if isvariable(element):
self.throw_instantiation_error()
if not isvariable(number) and not isnumber(number):
self.throw_type_error('number', number)
if isvariable(number) and not islist(codelist) and not ispartiallist(codelist):
self.throw_type_error('list', codelist)
if islist(codelist):
for element in codelist.as_list():
if not isvariable(element):
try:
chr(element.value)
except UnicodeDecodeError:
self.throw_representation_error(element)
if isvariable(number) or islist(codelist):
from ..parser import PrologParser, InvalidTermException
s = ''.join([chr(code.value) for code in codelist.as_list()])
try:
# the parser needs an End Token
n = PrologParser(s + '.').read_term()
return self.unify(number, n)
except InvalidTermException as e:
self.throw_syntax_error(Atomic(s))
else:
chars = list(str(number.value)) # FIXME this should use write_canonical/1
lst = [Atomic(ord(c)) for c in chars]
return self.unify(codelist, List.from_list(lst))
###
### Implementation defined hooks (ISO 8.17)
###
class SetPrologFlag_2(BuiltIn):
'''set_prolog_flag(+flag, @nonvar)
A goal set_prolog_flag(Flag, Value) enables the value associated with a
Prolog flag to be altered.'''
def execute(self, flag, value):
if isvariable(flag) or isvariable(value):
self.throw_instantiation_error()
if not isvariable(flag) and not isatom(flag):
self.throw_type_error('atom', flag)
from .. import core # for flags
if flag.name not in core._FLAGS:
self.throw_domain_error('prolog_flag', flag)
f = core._FLAGS[flag.name]
if len(f.allowed) == 1:
self.throw_permission_error('modify', 'flag', flag)
if value.name not in f.allowed:
culprit = Compound('+', flag, value)
self.throw_domain_error('flag_value', culprit)
core._FLAGS[flag.name] = f._replace(value=value.name)
return True
class CurrentPrologFlag_2(BuiltIn):
'''current_prolog_flag(?flag, ?term)
current_prolog_flag(Flag, Value) is true iff Flag is a flag supported by
the processor, and Value is the value currently associated with it.'''
def execute(self, flag, value):
from .. import core # for flags
if not isvariable(flag) and not isatom(flag):
self.throw_type_error('atom', flag)
if isatom(flag) and not core._FLAGS[flag.name]:
self.throw_domain_error('prolog_flag', flag)
self.flags = {f for f in core._FLAGS.values() if core.unify(flag, Atomic(f.name)) is not None}
if not self.flags:
return False
return self.pick_flag(flag, value)
def reexecute(self, flag, value):
self.reset_substitution()
if not self.flags:
return False
return self.pick_flag(flag, value)
def pick_flag(self, flag, value):
f = self.flags.pop()
return self.unify(flag, Atomic(f.name)) and self.unify(value, Atomic(f.value))
class Halt_0(BuiltIn):
'''halt
halt neither succeeds nor fails. It has the side effect of exiting from the
processor and returning to whatever system invoked Prolog.'''
def execute(self):
exit(0)
class Halt_1(BuiltIn):
'''halt(+integer)
halt(X) neither succeeds nor fails. It has the side effect of exiting from
the processor and returning to whatever system invoked Prolog, passing the
value of X as a message.'''
def execute(self, x):
if isvariable(x):
self.throw_instantiation_error()
if not isvariable(x) and not isnumber(x) and not isinstance(x.value, int):
self.throw_type_error('integer', x)
exit(x.value)
# Utility functions
def free_variable_set(t, v):
'''The free variable set FV of a term T with respect to
a term V is a set of variables defined as the set
difference of the variable set of T and BV where BV is
a set of variables defined as the union of the variable
set of V and the existential variable set of T.'''
vst = variable_set(t)
vsv = variable_set(v)
est = existential_variable_set(t)
return vst.difference(vsv.union(est))
# TODO This should be distributed onto the Term hierarchy classes
def variable_set(term):
'''The variable set Sv of a term T is a set of variables
defined recursively as follows:
* if T is an atomic term, then Sv is the empty set
* else if T is a variable then Sv is {T}
* else if T is a compound term then Sv is the union of
the variable sets for each of the arguments of T.'''
from ..core import deref
if isinstance(term, Variable):
if term.isfree():
return {term}
else:
term = deref(term)
if isinstance(term, Atomic):
return set()
s = set()
if isinstance(term, Compound):
for arg in term.value[1:]:
s.update(variable_set(arg))
else: # a list
for e in term.as_list():
s.update(variable_set(e))
return s
def existential_variable_set(term):
'''The existential variables set EV of a term T is a set
of variables defined recursively as follows:
* if T is a variable or an atomic term, then EV is the
empty set
* else if T unifies with ^(V, G) then EV is the union
of the variable set of V and the existential variables
set of the term G
* else EV is the empty set.'''
s = set()
if isinstance(term, Atomic) or isvariable(term):
return s
if term.name == '^' and term.arity == 2:
s.update(variable_set(term.value[1]))
s.update(existential_variable_set(term.value[2]))
return s
return s
def iterated_goal_term(term):
'''The iterated goal term G of a term T is a term defined
recursively as follows:
* if T unifies with ^(_, Goal) then G is the iterated
goal term of Goal
* else G is T.'''
if term.name == '^' and term.arity == 2:
return iterated_goal_term(term.value[2])
return term
def isvariant(t, v):
'''Two terms are variants if there is a bijection s of
the variables of the former to the variables of the
latter such that the latter term results from replacing
each variable X in the former by Xs.'''
from ..core import deref
t = deref(t)
v = deref(v)
if isinstance(t, Atomic) and isinstance(v, Atomic):
return t == v
if isvariable(t) and isvariable(v):
return True
if isinstance(t, Compound) and isinstance(v, Compound):
if t.name != v.name or t.arity != v.arity:
return False
bijection = {}
for a1, a2 in zip(t.value[1:], v.value[1:]):
if isvariable(a1) and isvariable(a2) and not a1.name.startswith('_'):
a = bijection.get(a1)
if a is not None and a2 != a:
return False
else:
bijection[a1] = a2
else:
if not isvariant(a1, a2):
return False
return True
return False
PREDICATES = {
# Term unification (ISO 8.2)
'=/2' : Unify_2,
'\=/2' : NotUnifiable_2,
# Type testing (ISO 8.3)
'var/1' : Var_1,
'atom/1' : Atom_1,
'integer/1' : Integer_1,
'float/1' : Float_1,
'atomic/1' : Atomic_1,
'compound/1' : Compound_1,
'nonvar/1' : Nonvar_1,
'number/1' : Number_1,
# Term comparison (ISO 8.4)
'@=</2' : TermLessThanOrEqual_2,
'==/2' : TermIdentical_2,
'\==/2' : TermNotIdentical_2,
'@</2' : TermLessThan_2,
'@>/2' : TermGreaterThan_2,
'@>=/2' : TermGreaterThanOrEqual_2,
# Term creation and decomposition (ISO 8.5)
'functor/3' : Functor_3,
'arg/3' : Arg_3,
'=../2' : Univ_2,
'copy_term/2' : CopyTerm_2,
# Arithmetic evaluation (ISO 8.6)
'is/2' : Is_2,
# Arithmetic comparison (ISO 8.7)
'=:=/2' : ArithmeticEqual_2,
'=\=/2' : ArithmeticNotEqual_2,
'</2' : ArithmeticLessThan_2,
'=</2' : ArithmeticLessThanOrEqual_2,
'>/2' : ArithmeticGreaterThan_2,
'>=/2' : ArithmeticGreaterThanOrEqual_2,
# Clause retrival and information (ISO 8.8)
'clause/2' : Clause_2,
'current_predicate/1' : CurrentPredicate_1,
# Clause creation and destruction (ISO 8.9)
'asserta/1' : Asserta_1,
'assertz/1' : Assertz_1,
'retract/1' : Retract_1,
'abolish/1' : Abolish_1,
# All solutions (ISO 8.10)
'findall/3' : Findall_3,
'bagof/3' : Bagof_3,
'setof/3' : Setof_3,
# Logic and control (ISO 8.15)
# FIXME \+ does not work because of what is probably a parser
# bug: the operator's "scope" is much wider than the single
# goal, even when using parentheses!
'\+/1' : Not_1, 'not/1' : Not_1,
'repeat/0' : Repeat_0,
# Atomic term processing (ISO 8.16)
'atom_length/2' : AtomLength_2,
'atom_concat/3' : AtomConcat_3,
'sub_atom/5' : SubAtom_5,
'atom_chars/2' : AtomChars_2,
'atom_codes/2' : AtomCodes_2,
'char_code/2' : CharCode_2,
'number_chars/2' : NumberChars_2,
'number_codes/2' : NumberCodes_2,
# Implementation defined hooks (ISO 8.17)
'set_prolog_flag/2' : SetPrologFlag_2,
'current_prolog_flag/2' : CurrentPrologFlag_2,
'halt/0' : Halt_0,
'halt/1' : Halt_1
}
|
[
"copy.deepcopy",
"math.sqrt",
"math.modf",
"math.copysign",
"math.log"
] |
[((14667, 14681), 'math.copysign', 'copysign', (['(1)', 'x'], {}), '(1, x)\n', (14675, 14681), False, 'from math import copysign\n'), ((15045, 15052), 'math.modf', 'modf', (['x'], {}), '(x)\n', (15049, 15052), False, 'from math import modf\n'), ((15388, 15395), 'math.modf', 'modf', (['x'], {}), '(x)\n', (15392, 15395), False, 'from math import modf\n'), ((16226, 16232), 'math.log', 'log', (['x'], {}), '(x)\n', (16229, 16232), False, 'from math import log\n'), ((16522, 16529), 'math.sqrt', 'sqrt', (['x'], {}), '(x)\n', (16526, 16529), False, 'from math import sqrt\n'), ((31211, 31229), 'copy.deepcopy', 'deepcopy', (['template'], {}), '(template)\n', (31219, 31229), False, 'from copy import deepcopy\n')]
|
#!/usr/bin/env python
from itertools import izip
import numpy as np
import h5py
from progress.bar import Bar
import sys
import rospy
import rosbag
from sensor_msgs.msg import Imu, Image
def main():
if len(sys.argv) < 2:
print("Usage: {} dataset_name".format(sys.argv[0]))
exit(1)
file_name = sys.argv[1]
log_file = h5py.File('../dataset/log/{}.h5'.format(file_name))
camera_file = h5py.File('../dataset/camera/{}.h5'.format(file_name))
zipped_log = izip(
log_file['times'],
log_file['fiber_accel'],
log_file['fiber_gyro'])
with rosbag.Bag('{}.bag'.format(file_name), 'w') as bag:
bar = Bar('Camera', max=len(camera_file['X']))
for i, img_data in enumerate(camera_file['X']):
m_img = Image()
m_img.header.stamp = rospy.Time.from_sec(0.01 * i)
m_img.height = img_data.shape[1]
m_img.width = img_data.shape[2]
m_img.step = 3 * img_data.shape[2]
m_img.encoding = 'rgb8'
m_img.data = np.transpose(img_data, (1, 2, 0)).flatten().tolist()
bag.write('/camera/image_raw', m_img, m_img.header.stamp)
bar.next()
bar.finish()
bar = Bar('IMU', max=len(log_file['times']))
for time, v_accel, v_gyro in zipped_log:
m_imu = Imu()
m_imu.header.stamp = rospy.Time.from_sec(time)
[setattr(m_imu.linear_acceleration, c, v_accel[i]) for i, c in enumerate('xyz')]
[setattr(m_imu.angular_velocity, c, v_gyro[i]) for i, c in enumerate('xyz')]
bag.write('/fiber_imu', m_imu, m_imu.header.stamp)
bar.next()
bar.finish()
if __name__ == "__main__":
main()
|
[
"sensor_msgs.msg.Image",
"numpy.transpose",
"sensor_msgs.msg.Imu",
"rospy.Time.from_sec",
"itertools.izip"
] |
[((474, 546), 'itertools.izip', 'izip', (["log_file['times']", "log_file['fiber_accel']", "log_file['fiber_gyro']"], {}), "(log_file['times'], log_file['fiber_accel'], log_file['fiber_gyro'])\n", (478, 546), False, 'from itertools import izip\n'), ((737, 744), 'sensor_msgs.msg.Image', 'Image', ([], {}), '()\n', (742, 744), False, 'from sensor_msgs.msg import Imu, Image\n'), ((772, 801), 'rospy.Time.from_sec', 'rospy.Time.from_sec', (['(0.01 * i)'], {}), '(0.01 * i)\n', (791, 801), False, 'import rospy\n'), ((1243, 1248), 'sensor_msgs.msg.Imu', 'Imu', ([], {}), '()\n', (1246, 1248), False, 'from sensor_msgs.msg import Imu, Image\n'), ((1276, 1301), 'rospy.Time.from_sec', 'rospy.Time.from_sec', (['time'], {}), '(time)\n', (1295, 1301), False, 'import rospy\n'), ((969, 1002), 'numpy.transpose', 'np.transpose', (['img_data', '(1, 2, 0)'], {}), '(img_data, (1, 2, 0))\n', (981, 1002), True, 'import numpy as np\n')]
|
from django.db import models
class Pokemon(models.Model):
title = models.CharField(max_length=200, verbose_name="Русское название")
title_en = models.CharField(max_length=200, verbose_name="Английское название", blank=True)
title_jp = models.CharField(max_length=200, verbose_name="Японское название", blank=True)
image = models.ImageField(verbose_name="Картинка покемона", null=True, blank=True)
description = models.TextField(verbose_name="Описание", blank=True)
previous_evolution = models.ForeignKey("self", verbose_name="Из кого эволюционирует",
on_delete=models.SET_NULL, null=True, blank=True,
related_name="evolution")
def __str__(self):
return self.title
class PokemonEntity(models.Model):
pokemon = models.ForeignKey(Pokemon, on_delete=models.CASCADE, related_name="entities")
lat = models.FloatField(verbose_name="Lat")
lon = models.FloatField(verbose_name="Lon")
appeared_at = models.DateTimeField(null=True, verbose_name="Appeared at", blank=True)
disappeared_at = models.DateTimeField(null=True, verbose_name="Disappeared at", blank=True)
level = models.IntegerField(null=True, verbose_name="Level", blank=True)
health = models.IntegerField(null=True, verbose_name="Health", blank=True)
strength = models.IntegerField(null=True, verbose_name="Strength", blank=True)
defence = models.IntegerField(null=True, verbose_name="Defence", blank=True)
stamina = models.IntegerField(null=True, verbose_name="Stamina", blank=True)
def __str__(self):
return f'{self.pokemon}, lvl: {self.level}'
|
[
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.FloatField",
"django.db.models.ImageField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((72, 137), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Русское название"""'}), "(max_length=200, verbose_name='Русское название')\n", (88, 137), False, 'from django.db import models\n'), ((153, 238), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Английское название"""', 'blank': '(True)'}), "(max_length=200, verbose_name='Английское название', blank=True\n )\n", (169, 238), False, 'from django.db import models\n'), ((249, 327), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Японское название"""', 'blank': '(True)'}), "(max_length=200, verbose_name='Японское название', blank=True)\n", (265, 327), False, 'from django.db import models\n'), ((340, 414), 'django.db.models.ImageField', 'models.ImageField', ([], {'verbose_name': '"""Картинка покемона"""', 'null': '(True)', 'blank': '(True)'}), "(verbose_name='Картинка покемона', null=True, blank=True)\n", (357, 414), False, 'from django.db import models\n'), ((433, 486), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Описание"""', 'blank': '(True)'}), "(verbose_name='Описание', blank=True)\n", (449, 486), False, 'from django.db import models\n'), ((512, 657), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""self"""'], {'verbose_name': '"""Из кого эволюционирует"""', 'on_delete': 'models.SET_NULL', 'null': '(True)', 'blank': '(True)', 'related_name': '"""evolution"""'}), "('self', verbose_name='Из кого эволюционирует', on_delete=\n models.SET_NULL, null=True, blank=True, related_name='evolution')\n", (529, 657), False, 'from django.db import models\n'), ((840, 917), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Pokemon'], {'on_delete': 'models.CASCADE', 'related_name': '"""entities"""'}), "(Pokemon, on_delete=models.CASCADE, related_name='entities')\n", (857, 917), False, 'from django.db import models\n'), ((928, 965), 'django.db.models.FloatField', 'models.FloatField', ([], {'verbose_name': '"""Lat"""'}), "(verbose_name='Lat')\n", (945, 965), False, 'from django.db import models\n'), ((976, 1013), 'django.db.models.FloatField', 'models.FloatField', ([], {'verbose_name': '"""Lon"""'}), "(verbose_name='Lon')\n", (993, 1013), False, 'from django.db import models\n'), ((1032, 1103), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'verbose_name': '"""Appeared at"""', 'blank': '(True)'}), "(null=True, verbose_name='Appeared at', blank=True)\n", (1052, 1103), False, 'from django.db import models\n'), ((1125, 1199), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'verbose_name': '"""Disappeared at"""', 'blank': '(True)'}), "(null=True, verbose_name='Disappeared at', blank=True)\n", (1145, 1199), False, 'from django.db import models\n'), ((1212, 1276), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'verbose_name': '"""Level"""', 'blank': '(True)'}), "(null=True, verbose_name='Level', blank=True)\n", (1231, 1276), False, 'from django.db import models\n'), ((1290, 1355), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'verbose_name': '"""Health"""', 'blank': '(True)'}), "(null=True, verbose_name='Health', blank=True)\n", (1309, 1355), False, 'from django.db import models\n'), ((1371, 1438), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'verbose_name': '"""Strength"""', 'blank': '(True)'}), "(null=True, verbose_name='Strength', blank=True)\n", (1390, 1438), False, 'from django.db import models\n'), ((1453, 1519), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'verbose_name': '"""Defence"""', 'blank': '(True)'}), "(null=True, verbose_name='Defence', blank=True)\n", (1472, 1519), False, 'from django.db import models\n'), ((1534, 1600), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'verbose_name': '"""Stamina"""', 'blank': '(True)'}), "(null=True, verbose_name='Stamina', blank=True)\n", (1553, 1600), False, 'from django.db import models\n')]
|
import random
import asyncio
from discord.ext import commands
import discord
import typing
from base import BaseCog
# https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py
class GuessingGame(BaseCog, name="Free guessing game -- with nothing at stake."):
def __init__(self, bot):
self.bot = bot
async def tick(self, ctx, correct):
emoji = '\N{WHITE HEAVY CHECK MARK}' if correct else '\N{CROSS MARK}'
try:
await ctx.message.add_reaction(emoji)
except discord.HTTPException:
pass
@commands.command(
name='free_guess_now',
help='Guess a random number from 1-9',
)
async def free_guess_now(self, ctx, num: int):
answer = random.randint(1, 9)
correct = num == answer
await self.tick(ctx, correct)
await ctx.reply('Correct!' if correct else f'Incorrect. The answer is {answer}', mention_author=True)
@commands.command(
name='free_guess',
help='Guess a random number between 1-99 or a provided range.'
)
async def free_guess(self, ctx, start: typing.Optional[int] = 1, end: typing.Optional[int]= 99):
await ctx.send(f'Guess a number between {start}-{end}')
def is_correct(m):
return m.author == ctx.message.author and m.content.isdigit()
answer = random.randint(start, end)
try:
guess = await self.bot.wait_for('message', check=is_correct, timeout=5.0)
except asyncio.TimeoutError:
return await ctx.reply(f'Sorry, you took too long. The answer is {answer}')
correct = int(guess.content) == answer
await self.tick(ctx, correct)
await ctx.reply('Correct!' if correct else f'Incorrect. The answer is {answer}', mention_author=True)
def setup(bot):
bot.add_cog(GuessingGame(bot))
|
[
"discord.ext.commands.command",
"random.randint"
] |
[((578, 656), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""free_guess_now"""', 'help': '"""Guess a random number from 1-9"""'}), "(name='free_guess_now', help='Guess a random number from 1-9')\n", (594, 656), False, 'from discord.ext import commands\n'), ((951, 1055), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""free_guess"""', 'help': '"""Guess a random number between 1-99 or a provided range."""'}), "(name='free_guess', help=\n 'Guess a random number between 1-99 or a provided range.')\n", (967, 1055), False, 'from discord.ext import commands\n'), ((744, 764), 'random.randint', 'random.randint', (['(1)', '(9)'], {}), '(1, 9)\n', (758, 764), False, 'import random\n'), ((1354, 1380), 'random.randint', 'random.randint', (['start', 'end'], {}), '(start, end)\n', (1368, 1380), False, 'import random\n')]
|
# -*- coding: utf-8 -*-
"""
Tests for py33_exceptions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from trollius import py33_exceptions
class TestWrapErrors(unittest.TestCase):
def test_ebadf_wrapped_to_OSError(self):
# https://github.com/jamadden/trollius/issues/17
import socket
import os
import errno
s = socket.socket()
os.close(s.fileno())
with self.assertRaises(socket.error) as exc:
s.send(b'abc')
self.assertEqual(exc.exception.errno, errno.EBADF)
with self.assertRaises(OSError) as exc:
py33_exceptions.wrap_error(s.send, b'abc')
self.assertEqual(exc.exception.errno, errno.EBADF)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"trollius.py33_exceptions.wrap_error",
"socket.socket"
] |
[((824, 839), 'unittest.main', 'unittest.main', ([], {}), '()\n', (837, 839), False, 'import unittest\n'), ((442, 457), 'socket.socket', 'socket.socket', ([], {}), '()\n', (455, 457), False, 'import socket\n'), ((689, 731), 'trollius.py33_exceptions.wrap_error', 'py33_exceptions.wrap_error', (['s.send', "b'abc'"], {}), "(s.send, b'abc')\n", (715, 731), False, 'from trollius import py33_exceptions\n')]
|
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
import tensorflow_datasets as tfds
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import json
from glob import glob
from PIL import Image
import pickle
import re
import os
import time
import datetime
from tqdm import tqdm
# our visual transformer code
import visual_transformer as vt
####### GPU CONFIGS FOR RTX 2070/NVidia GPU ###############
## Please comment out if not training on GPU ##
## this is important for running CuDNN on GPU ##
tf.keras.backend.clear_session() # - for easy reset of notebook state
# chck if GPU can be seen by TF
tf.config.list_physical_devices('GPU')
# tf.debugging.set_log_device_placement(True) # only to check GPU usage
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only use the first GPU
try:
tf.config.experimental.set_memory_growth(gpus[0], True)
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
except RuntimeError as e:
# Visible devices must be set before GPUs have been initialized
print(e)
###############################################
#########################
# Load Data file mapping captions to images
#########################
prefix = './data/'
save_prefix = prefix + "features/" # for storing prefixes
annot = prefix + 'data.csv'
inputs = pd.read_csv(annot, header=None, names=["caption", "image"])
print("Data file loaded")
#########################
# Tokenize Captions
#########################
cap_tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file(
"captions")
print(cap_tokenizer.encode("A man riding a wave on top of a surfboard.".lower()))
print("Tokenizer hydrated")
# Max length of captions split by spaces
lens = inputs['caption'].map(lambda x: len(x.split()))
# Max length of captions after tokenization
# tfds demonstrated in earlier chapters
# This is a quick way if data fits in memory
lens = inputs['caption'].map(lambda x: len(cap_tokenizer.encode(x.lower())))
# We will set this as the max length of captions
# which cover 99% of the captions without truncation
max_len = int(lens.quantile(0.99) + 1) # for special tokens
start = '<s>'
end = '</s>'
inputs['tokenized'] = inputs['caption'].map(
lambda x: start + x.lower().strip() + end)
print("Some prepared captions: ", inputs.tokenized[:5])
def tokenize_pad(x):
x = cap_tokenizer.encode(x)
if len(x) < max_len:
x = x + [0] * int(max_len - len(x))
return x[:max_len]
inputs['tokens'] = inputs.tokenized.map(lambda x: tokenize_pad(x))
print("Captions tokenized and padded/truncated")
# now to compute a column with the new name of the saved image feature file
inputs['img_features'] = inputs['image'].map(lambda x:
save_prefix +
x.split('/')[-1][:-3]
+ 'npy')
#########################
# Prepare tf.DataSet for training
#########################
captions = inputs.tokens.tolist()
img_names = inputs.img_features.tolist()
# we only took half validation examples so we dont need to split
# img_train, img_val, cap_train, cap_val = train_test_split(img_names,
# captions,
# test_size=0.2,
# random_state=42)
img_train, cap_train = img_names, captions
# Load the numpy file with extracted ResNet50 feature
def load_image_feature(img_name, cap):
img_tensor = np.load(img_name.decode('utf-8'))
return img_tensor, cap
dataset = tf.data.Dataset.from_tensor_slices((img_train, cap_train))
# Use map to load the numpy files in parallel
dataset = dataset.map(lambda item1, item2: tf.numpy_function(
load_image_feature, [item1, item2], [tf.float32, tf.int32]),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# To verify
for img, cap in dataset.take(2):
print(img.shape)
print(cap.numpy())
print("Training dataset prepared.")
#########################
# Build Transformer Model
#########################
# These parameters control the size and complexity of the model
# BERT (base) uses 12 layers, 768 as embedding dim, 12 attention heads
# and 4H (4x768) as feedforward size
# Small Model
num_layers = 4
d_model = 128
dff = d_model * 4
num_heads = 8
# BERT Base Model
# num_layers = 12
# d_model = 768
# dff = d_model * 4 # as per BERT paper
# num_heads = 12
target_vocab_size = cap_tokenizer.vocab_size # already includes start/end tokens
dropout_rate = 0.1
EPOCHS = 20 # should see results in 4-10 epochs also
transformer = vt.Transformer(num_layers, d_model, num_heads, dff,
target_vocab_size,
pe_input=49, # 7x7 pixels
pe_target=target_vocab_size,
rate=dropout_rate,
use_pe=False
)
#########################
# Training Setup
#########################
# Learning Rate Schedule, as per `Attention is All You Need' paper
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
learning_rate = CustomSchedule(d_model)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98,
epsilon=1e-9)
# Visualize the schedule: uncomment to plot
# import matplotlib.pyplot as plt
# temp_learning_rate_schedule = CustomSchedule(d_model)
#
# plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32)))
# plt.ylabel("Learning Rate")
# plt.xlabel("Train Step")
#########################
# Loss and Metrics
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_sum(loss_) / tf.reduce_sum(mask)
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='train_accuracy')
#########################
# Helper function for creating masks
def create_masks(inp, tar):
# Encoder padding mask - This should just be 1's
# input shape should be (batch_size, 49, 2048)
inp_seq = tf.ones([inp.shape[0], inp.shape[1]]) # all pixels to be used
enc_padding_mask = vt.create_padding_mask(inp_seq)
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = vt.create_padding_mask(inp_seq)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = vt.create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = vt.create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
# Checkpoints setup
checkpoint_path = "./checkpoints/train-small-model-nope-20ep"
ckpt = tf.train.Checkpoint(transformer=transformer,
optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path,
max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
#########################
# Training Loops
#########################
# setup training parameters
BUFFER_SIZE = 1000
BATCH_SIZE = 64 # can reduce or increase depending on GPU capacity
# Shuffle and batch
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
# Perform one step of raining on one batch in an epoch
@tf.function
def train_step(inp, tar):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp,
tar_inp)
with tf.GradientTape() as tape:
predictions, _ = transformer(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions)
gradients = tape.gradient(loss, transformer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
train_loss(loss)
train_accuracy(tar_real, predictions)
# Begin Training
for epoch in range(EPOCHS):
start_tm = time.time()
train_loss.reset_states()
train_accuracy.reset_states()
# inp -> images, tar -> caption
for (batch, (inp, tar)) in enumerate(dataset):
train_step(inp, tar)
if batch % 100 == 0:
ts = datetime.datetime.now().strftime("%d-%b-%Y (%H:%M:%S)")
print('[{}] Epoch {} Batch {} Loss {:.6f} Accuracy {:.6f}'.format(
ts, epoch + 1, batch, train_loss.result(),
train_accuracy.result()))
if (epoch + 1) % 2 == 0:
ckpt_save_path = ckpt_manager.save()
print('Saving checkpoint for epoch {} at {}'.format(epoch + 1,
ckpt_save_path))
print('Epoch {} Loss {:.6f} Accuracy {:.6f}'.format(epoch + 1,
train_loss.result(),
train_accuracy.result()))
print('Time taken for 1 epoch: {} secs\n'.format(time.time() - start_tm))
transformer.summary()
|
[
"tensorflow.reduce_sum",
"pandas.read_csv",
"tensorflow.keras.metrics.Mean",
"tensorflow.maximum",
"tensorflow.numpy_function",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.train.Checkpoint",
"tensorflow.math.equal",
"tensorflow.cast",
"tensorflow.keras.optimizers.Adam",
"datetime.datetime.now",
"tensorflow.ones",
"visual_transformer.create_padding_mask",
"tensorflow.config.experimental.set_visible_devices",
"tensorflow.keras.backend.clear_session",
"tensorflow.config.experimental.set_memory_growth",
"visual_transformer.Transformer",
"tensorflow.config.experimental.list_logical_devices",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.math.minimum",
"tensorflow_datasets.features.text.SubwordTextEncoder.load_from_file",
"tensorflow.config.list_physical_devices",
"tensorflow.data.Dataset.from_tensor_slices",
"time.time",
"tensorflow.shape",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.train.CheckpointManager",
"tensorflow.GradientTape",
"tensorflow.math.rsqrt"
] |
[((705, 737), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (735, 737), True, 'import tensorflow as tf\n'), ((809, 847), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (840, 847), True, 'import tensorflow as tf\n'), ((928, 979), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (972, 979), True, 'import tensorflow as tf\n'), ((1712, 1771), 'pandas.read_csv', 'pd.read_csv', (['annot'], {'header': 'None', 'names': "['caption', 'image']"}), "(annot, header=None, names=['caption', 'image'])\n", (1723, 1771), True, 'import pandas as pd\n'), ((1887, 1951), 'tensorflow_datasets.features.text.SubwordTextEncoder.load_from_file', 'tfds.features.text.SubwordTextEncoder.load_from_file', (['"""captions"""'], {}), "('captions')\n", (1939, 1951), True, 'import tensorflow_datasets as tfds\n'), ((4035, 4093), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(img_train, cap_train)'], {}), '((img_train, cap_train))\n', (4069, 4093), True, 'import tensorflow as tf\n'), ((5059, 5208), 'visual_transformer.Transformer', 'vt.Transformer', (['num_layers', 'd_model', 'num_heads', 'dff', 'target_vocab_size'], {'pe_input': '(49)', 'pe_target': 'target_vocab_size', 'rate': 'dropout_rate', 'use_pe': '(False)'}), '(num_layers, d_model, num_heads, dff, target_vocab_size,\n pe_input=49, pe_target=target_vocab_size, rate=dropout_rate, use_pe=False)\n', (5073, 5208), True, 'import visual_transformer as vt\n'), ((6080, 6159), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['learning_rate'], {'beta_1': '(0.9)', 'beta_2': '(0.98)', 'epsilon': '(1e-09)'}), '(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-09)\n', (6104, 6159), True, 'import tensorflow as tf\n'), ((6525, 6611), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': '"""none"""'}), "(from_logits=True, reduction=\n 'none')\n", (6570, 6611), True, 'import tensorflow as tf\n'), ((6869, 6909), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""train_loss"""'}), "(name='train_loss')\n", (6890, 6909), True, 'import tensorflow as tf\n'), ((6927, 6992), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""train_accuracy"""'}), "(name='train_accuracy')\n", (6969, 6992), True, 'import tensorflow as tf\n'), ((7996, 8061), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'transformer': 'transformer', 'optimizer': 'optimizer'}), '(transformer=transformer, optimizer=optimizer)\n', (8015, 8061), True, 'import tensorflow as tf\n'), ((8105, 8169), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['ckpt', 'checkpoint_path'], {'max_to_keep': '(5)'}), '(ckpt, checkpoint_path, max_to_keep=5)\n', (8131, 8169), True, 'import tensorflow as tf\n'), ((6748, 6780), 'tensorflow.cast', 'tf.cast', (['mask'], {'dtype': 'loss_.dtype'}), '(mask, dtype=loss_.dtype)\n', (6755, 6780), True, 'import tensorflow as tf\n'), ((7209, 7246), 'tensorflow.ones', 'tf.ones', (['[inp.shape[0], inp.shape[1]]'], {}), '([inp.shape[0], inp.shape[1]])\n', (7216, 7246), True, 'import tensorflow as tf\n'), ((7296, 7327), 'visual_transformer.create_padding_mask', 'vt.create_padding_mask', (['inp_seq'], {}), '(inp_seq)\n', (7318, 7327), True, 'import visual_transformer as vt\n'), ((7467, 7498), 'visual_transformer.create_padding_mask', 'vt.create_padding_mask', (['inp_seq'], {}), '(inp_seq)\n', (7489, 7498), True, 'import visual_transformer as vt\n'), ((7741, 7768), 'visual_transformer.create_padding_mask', 'vt.create_padding_mask', (['tar'], {}), '(tar)\n', (7763, 7768), True, 'import visual_transformer as vt\n'), ((7789, 7841), 'tensorflow.maximum', 'tf.maximum', (['dec_target_padding_mask', 'look_ahead_mask'], {}), '(dec_target_padding_mask, look_ahead_mask)\n', (7799, 7841), True, 'import tensorflow as tf\n'), ((9650, 9661), 'time.time', 'time.time', ([], {}), '()\n', (9659, 9661), False, 'import time\n'), ((1058, 1113), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpus[0]', '(True)'], {}), '(gpus[0], True)\n', (1098, 1113), True, 'import tensorflow as tf\n'), ((1122, 1180), 'tensorflow.config.experimental.set_visible_devices', 'tf.config.experimental.set_visible_devices', (['gpus[0]', '"""GPU"""'], {}), "(gpus[0], 'GPU')\n", (1164, 1180), True, 'import tensorflow as tf\n'), ((1204, 1254), 'tensorflow.config.experimental.list_logical_devices', 'tf.config.experimental.list_logical_devices', (['"""GPU"""'], {}), "('GPU')\n", (1247, 1254), True, 'import tensorflow as tf\n'), ((4184, 4261), 'tensorflow.numpy_function', 'tf.numpy_function', (['load_image_feature', '[item1, item2]', '[tf.float32, tf.int32]'], {}), '(load_image_feature, [item1, item2], [tf.float32, tf.int32])\n', (4201, 4261), True, 'import tensorflow as tf\n'), ((5759, 5792), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (5766, 5792), True, 'import tensorflow as tf\n'), ((5881, 5900), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['step'], {}), '(step)\n', (5894, 5900), True, 'import tensorflow as tf\n'), ((6676, 6698), 'tensorflow.math.equal', 'tf.math.equal', (['real', '(0)'], {}), '(real, 0)\n', (6689, 6698), True, 'import tensorflow as tf\n'), ((6811, 6831), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss_'], {}), '(loss_)\n', (6824, 6831), True, 'import tensorflow as tf\n'), ((6834, 6853), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {}), '(mask)\n', (6847, 6853), True, 'import tensorflow as tf\n'), ((9040, 9057), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (9055, 9057), True, 'import tensorflow as tf\n'), ((5967, 5994), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['self.d_model'], {}), '(self.d_model)\n', (5980, 5994), True, 'import tensorflow as tf\n'), ((5997, 6024), 'tensorflow.math.minimum', 'tf.math.minimum', (['arg1', 'arg2'], {}), '(arg1, arg2)\n', (6012, 6024), True, 'import tensorflow as tf\n'), ((7693, 7706), 'tensorflow.shape', 'tf.shape', (['tar'], {}), '(tar)\n', (7701, 7706), True, 'import tensorflow as tf\n'), ((10631, 10642), 'time.time', 'time.time', ([], {}), '()\n', (10640, 10642), False, 'import time\n'), ((9891, 9914), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9912, 9914), False, 'import datetime\n')]
|
#!/usr/bin/env python3
"""
main.py - The main module for processing data and creating visual summaries
for this study.
"""
# =========================================================================== #
# METADATA
# =========================================================================== #
__author__ = 'Robert (Bob) <NAME>'
__credits__ = ['Robert (Bob) <NAME>']
__created_date__ = 'Sep 16, 2020'
__modified_date__ = 'Sep 16, 2020'
# =========================================================================== #
# EXPORTS
# =========================================================================== #
# Define the module's API -- the list of exportable objects (classes,
# functions, etc.) -- when performing a "wild import" (`from field import *`).
__all__ = [
'DEBUG',
]
# =========================================================================== #
# IMPORTS
# =========================================================================== #
# -- Python Standard Library -- #
import os
# -- 3rd Party -- #
import matplotlib.dates as mpl_dates
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# =========================================================================== #
# CONSTANTS
# =========================================================================== #
# -- Data -- #
DAILY = 'daily'
WEEKLY = 'weekly'
COLUMNS = {
'positive': 'pos',
'negative': 'neg',
'negativeIncrease': 'negIncrease',
'positiveIncrease': 'posIncrease',
}
DOW = [
'Sunday', 'Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday',
]
# -- Debugging -- #
DEBUG = True
# -- Filesytem -- #
ROOT_DIR = os.path.join(os.getcwd(), '..')
DATA_DIR = '../data'
RESULTS_DIR = '../results'
# -- URLs -- #
SOURCE_URL = 'https://covidtracking.com/api/v1/us/daily.csv'
# =========================================================================== #
# FUNCTIONS
# =========================================================================== #
# -- Data Analytics -- #
def plot_series(df: pd.DataFrame):
fig = plt.figure()
ax = plt.subplot(111)
ax.xaxis.set_major_formatter(
mpl_dates.DateFormatter('%m-%d-%Y'),
)
sns.lineplot(
data=df,
x='date',
y='posIncrease',
marker='o',
)
ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases')
ax.set_xlabel('Date')
ax.set_ylabel('Count of Cases')
ax.xaxis_date()
# plt.show()
# Debug data frame.
DEBUG and preview(df, plot_series.__name__)
fig.savefig(f'{RESULTS_DIR}/plot_series.png')
def set_figure_defaults():
# Use seaborn style defaults. Set the default figure size.
sns.set(
style='darkgrid',
rc={'figure.figsize': (16, 9)},
)
def summarize_by_dow(df: pd.DataFrame):
fig = plt.figure()
ax = plt.subplot(111)
sns.boxplot(
data=df,
x='dow',
y='posIncrease',
order=DOW,
)
ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases')
ax.set_xlabel('Day of Week')
ax.set_ylabel('Count of Cases')
# Debug data frame.
DEBUG and preview(df, summarize_by_dow.__name__)
# plt.show()
fig.savefig(f'{RESULTS_DIR}/summarize_dow.png')
def summarize_by_dow_percent(df: pd.DataFrame):
fig = plt.figure()
ax = plt.subplot(111)
sns.boxplot(
data=df,
x='dow',
y='pctWeeklyPosIncrease',
order=DOW,
)
ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases')
ax.set_xlabel('Day of Week')
ax.set_ylabel('Percent of Weekly Count of Cases')
# Debug data frame.
DEBUG and preview(df, summarize_by_dow_percent.__name__)
# plt.show()
fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png')
def summarize_by_dow_zscore(df: pd.DataFrame):
fig = plt.figure()
ax = plt.subplot(111)
sns.boxplot(
data=df,
x='dow',
y='zscoreWeeklyPosIncrease',
order=DOW,
)
ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases')
ax.set_xlabel('Day of Week')
ax.set_ylabel('Z-Score of Weekly Count of Cases')
# Debug data frame.
DEBUG and preview(df, summarize_by_dow_zscore.__name__)
# plt.show()
fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png')
def summarize_maxima(df: pd.DataFrame):
fig = plt.figure()
ax = plt.subplot(111)
sns.countplot(
data=df,
x='dow',
order=DOW,
)
ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases')
ax.set_xlabel('Day of Week')
ax.set_ylabel('Count of Local Maxima of Cases')
# Debug data frame.
DEBUG and preview(df, summarize_maxima.__name__)
# plt.show()
fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png')
def visualize_data(df: pd.DataFrame):
set_figure_defaults()
plot_series(df.sort_values('date'))
summarize_by_dow(df)
summarize_by_dow_percent(df)
summarize_by_dow_zscore(df)
summarize_maxima(df[df['localMaximum'].eq(True)])
# Debug data frame.
DEBUG and preview(df, visualize_data.__name__)
# Return data frame for reuse.
return df
# -- Data Processing: Extract -- #
def extract_data() -> pd.DataFrame:
# Download source data as CSV from an API.
df = pd.read_csv(SOURCE_URL)
# Save a copy of the extracted data.
df.to_csv(
f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv',
index=False,
)
# Debug data frame.
DEBUG and preview(df, extract_data.__name__)
# Return data frame for reuse.
return df
# -- Data Processing: Transform -- #
def transform_data(df: pd.DataFrame) -> pd.DataFrame:
df = rename_columns(df)
df = add_columns(df)
# Debug data frame.
DEBUG and preview(df, transform_data.__name__)
# Return data frame for reuse.
return df
def add_columns(df: pd.DataFrame):
# Format date.
df.date = pd.to_datetime(df.date, format='%Y%m%d')
# Set the date as the DataFrame's index.
df = df.set_index('date')
# Add date-derived columns.
df['date'] = df.index.date
df['year'] = df.index.year
df['month'] = df.index.month
df['week'] = df.index.week
df['dow'] = df.index.day_name()
df['dowIndex'] = df.index.dayofweek
# Add group-summarization columns.
df_weekly = df.groupby('week', as_index=False)['posIncrease'].agg(
{
'weeklyPosIncrease': 'sum',
'meanWeeklyPosIncrease': 'mean',
'stdWeeklyPosIncrease': 'std',
},
)
df = pd.merge(
df, df_weekly,
how='left', on='week',
)
df['pctWeeklyPosIncrease'] = percent(df.posIncrease, df.weeklyPosIncrease)
df['zscoreWeeklyPosIncrease'] = zScore(
df.posIncrease,
df.meanWeeklyPosIncrease,
df.stdWeeklyPosIncrease,
)
# Add delta columns.
df['day1LagDelta'] = lag_delta(df.posIncrease, 1)
df['day1LeadDelta'] = lead_delta(df.posIncrease, 1)
# Add local extrema columns.
df['localMaximum'] = df.apply(local_max, axis=1)
df['localMinimum'] = df.apply(local_min, axis=1)
# Save a copy of the processed data.
df.to_csv(
f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv',
index=True,
)
# Debug data frame.
DEBUG and preview(df, add_columns.__name__)
# Return data frame for reuse.
return df
def rename_columns(df: pd.DataFrame) -> pd.DataFrame:
# Rename columns.
df.rename(columns=COLUMNS, inplace=True)
# Save a copy of the processed data.
df.to_csv(
f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv',
index=True,
)
# Debug data frame.
DEBUG and preview(df, rename_columns.__name__)
# Return data frame for reuse.
return df
# -- Data Processing: Load -- #
# -- Utilities -- #
def lag_delta(series, period):
return series - series.shift(period)
def lead_delta(series, period):
return series.shift(-period) - series
def local_max(row):
if row['day1LagDelta'] > 0 and row['day1LeadDelta'] < 0:
return True
else:
return False
def local_min(row):
if row['day1LagDelta'] < 0 and row['day1LeadDelta'] > 0:
return True
else:
return False
def percent(num, denom):
return 100 * num / denom
def preview(df: pd.DataFrame, func_name: str):
print(f'INSIDE {func_name}(): type =', type(df).__name__)
print(df.head(5))
def zScore(x, mean, std):
return (x - mean) / std
# -- Main Program -- #
def main():
df = extract_data()
df = transform_data(df)
visualize_data(df)
# =========================================================================== #
# MAIN EXECUTION
# =========================================================================== #
# -- Main Program -- #
# If this module is in the main module, call the main() function.
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.subplot",
"seaborn.lineplot",
"os.getcwd",
"pandas.read_csv",
"pandas.merge",
"matplotlib.pyplot.figure",
"seaborn.boxplot",
"pandas.to_datetime",
"seaborn.countplot",
"matplotlib.dates.DateFormatter",
"seaborn.set"
] |
[((1681, 1692), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1690, 1692), False, 'import os\n'), ((2073, 2085), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2083, 2085), True, 'import matplotlib.pyplot as plt\n'), ((2096, 2112), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (2107, 2112), True, 'import matplotlib.pyplot as plt\n'), ((2204, 2264), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'df', 'x': '"""date"""', 'y': '"""posIncrease"""', 'marker': '"""o"""'}), "(data=df, x='date', y='posIncrease', marker='o')\n", (2216, 2264), True, 'import seaborn as sns\n'), ((2699, 2756), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""', 'rc': "{'figure.figsize': (16, 9)}"}), "(style='darkgrid', rc={'figure.figsize': (16, 9)})\n", (2706, 2756), True, 'import seaborn as sns\n'), ((2832, 2844), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2842, 2844), True, 'import matplotlib.pyplot as plt\n'), ((2855, 2871), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (2866, 2871), True, 'import matplotlib.pyplot as plt\n'), ((2877, 2934), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'df', 'x': '"""dow"""', 'y': '"""posIncrease"""', 'order': 'DOW'}), "(data=df, x='dow', y='posIncrease', order=DOW)\n", (2888, 2934), True, 'import seaborn as sns\n'), ((3326, 3338), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3336, 3338), True, 'import matplotlib.pyplot as plt\n'), ((3349, 3365), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (3360, 3365), True, 'import matplotlib.pyplot as plt\n'), ((3371, 3437), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'df', 'x': '"""dow"""', 'y': '"""pctWeeklyPosIncrease"""', 'order': 'DOW'}), "(data=df, x='dow', y='pctWeeklyPosIncrease', order=DOW)\n", (3382, 3437), True, 'import seaborn as sns\n'), ((3862, 3874), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3872, 3874), True, 'import matplotlib.pyplot as plt\n'), ((3885, 3901), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (3896, 3901), True, 'import matplotlib.pyplot as plt\n'), ((3907, 3976), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'df', 'x': '"""dow"""', 'y': '"""zscoreWeeklyPosIncrease"""', 'order': 'DOW'}), "(data=df, x='dow', y='zscoreWeeklyPosIncrease', order=DOW)\n", (3918, 3976), True, 'import seaborn as sns\n'), ((4392, 4404), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4402, 4404), True, 'import matplotlib.pyplot as plt\n'), ((4415, 4431), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (4426, 4431), True, 'import matplotlib.pyplot as plt\n'), ((4437, 4479), 'seaborn.countplot', 'sns.countplot', ([], {'data': 'df', 'x': '"""dow"""', 'order': 'DOW'}), "(data=df, x='dow', order=DOW)\n", (4450, 4479), True, 'import seaborn as sns\n'), ((5329, 5352), 'pandas.read_csv', 'pd.read_csv', (['SOURCE_URL'], {}), '(SOURCE_URL)\n', (5340, 5352), True, 'import pandas as pd\n'), ((5960, 6000), 'pandas.to_datetime', 'pd.to_datetime', (['df.date'], {'format': '"""%Y%m%d"""'}), "(df.date, format='%Y%m%d')\n", (5974, 6000), True, 'import pandas as pd\n'), ((6587, 6633), 'pandas.merge', 'pd.merge', (['df', 'df_weekly'], {'how': '"""left"""', 'on': '"""week"""'}), "(df, df_weekly, how='left', on='week')\n", (6595, 6633), True, 'import pandas as pd\n'), ((2156, 2191), 'matplotlib.dates.DateFormatter', 'mpl_dates.DateFormatter', (['"""%m-%d-%Y"""'], {}), "('%m-%d-%Y')\n", (2179, 2191), True, 'import matplotlib.dates as mpl_dates\n')]
|
"""Govee API client package."""
import asyncio
import logging
import time
import math
from contextlib import asynccontextmanager
from dataclasses import dataclass
from datetime import datetime
from events import Events
from typing import Any, List, Optional, Tuple, Union
import aiohttp
from govee_api_laggat.__version__ import VERSION
from govee_api_laggat.learning_storage import (
GoveeAbstractLearningStorage,
GoveeLearnedInfo,
)
_LOGGER = logging.getLogger(__name__)
_API_BASE_URL = "https://developer-api.govee.com"
_API_PING = _API_BASE_URL + "/ping"
_API_DEVICES = _API_BASE_URL + "/v1/devices"
_API_DEVICES_CONTROL = _API_BASE_URL + "/v1/devices/control"
_API_DEVICES_STATE = _API_BASE_URL + "/v1/devices/state"
# API rate limit header keys
_RATELIMIT_TOTAL = "Rate-Limit-Total" # The maximum number of requests you're permitted to make per minute.
_RATELIMIT_REMAINING = "Rate-Limit-Remaining" # The number of requests remaining in the current rate limit window.
_RATELIMIT_RESET = "Rate-Limit-Reset" # The time at which the current rate limit window resets in UTC epoch seconds.
# return state from hisory for n seconds after controlling the device
DELAY_GET_FOLLOWING_SET_SECONDS = 2
# do not send another control within n seconds after controlling the device
DELAY_SET_FOLLOWING_SET_SECONDS = 1
@dataclass
class GoveeDevice(object):
""" Govee Device DTO """
device: str
model: str
device_name: str
controllable: bool
retrievable: bool
support_cmds: List[str]
support_turn: bool
support_brightness: bool
support_color: bool
support_color_tem: bool
online: bool
power_state: bool
brightness: int
color: Tuple[int, int, int]
color_temp: int
timestamp: int
source: str
error: str
lock_set_until: int
lock_get_until: int
learned_set_brightness_max: int
learned_get_brightness_max: int
before_set_brightness_turn_on: bool
config_offline_is_off: bool # this is the learning config, possibly overridden by a global config
class GoveeError(Exception):
"""Base Exception thrown from govee_api_laggat."""
class GoveeDeviceNotFound(GoveeError):
"""Device is unknown."""
class Govee(object):
"""Govee API client."""
async def __aenter__(self):
"""Async context manager enter."""
self._session = aiohttp.ClientSession()
return self
async def __aexit__(self, *err):
"""Async context manager exit."""
if self._session:
await self._session.close()
self._session = None
def __init__(
self,
api_key: str,
*,
learning_storage: Optional[GoveeAbstractLearningStorage] = None,
):
"""Init with an API_KEY and storage for learned values."""
_LOGGER.debug("govee_api_laggat v%s", VERSION)
self._online = True # assume we are online
self.events = Events()
self._api_key = api_key
self._devices = {}
self._rate_limit_on = 5 # safe available call count for multiple processes
self._limit = 100
self._limit_remaining = 100
self._limit_reset = 0
self._config_offline_is_off = None
self._learning_storage = learning_storage
if not self._learning_storage:
# use an internal learning storage as long as we run.
# we will need to re-learn every time again.
self._learning_storage = GoveeAbstractLearningStorage()
@classmethod
async def create(
cls,
api_key: str,
*,
learning_storage: Optional[GoveeAbstractLearningStorage] = None,
):
"""Use create method if you want to use this Client without an async context manager."""
self = Govee(api_key, learning_storage=learning_storage)
await self.__aenter__()
return self
async def close(self):
"""Use close when your are finished with the Client without using an async context manager."""
await self.__aexit__()
def _getHeaders(self, auth: bool):
"""Return Request headers with/without authentication."""
if auth:
return {"Govee-API-Key": self._api_key}
return {}
@asynccontextmanager
async def _api_put(self, *, auth=True, url: str, json):
"""API HTTP Put call."""
async with self._api_request_internal(
lambda: self._session.put(
url=url, headers=self._getHeaders(auth), json=json
)
) as response:
yield response
@asynccontextmanager
async def _api_get(self, *, auth=True, url: str, params=None):
"""API HTTP Get call."""
async with self._api_request_internal(
lambda: self._session.get(
url=url, headers=self._getHeaders(auth), params=params
)
) as response:
yield response
@asynccontextmanager
async def _api_request_internal(self, request_lambda):
"""API Methond handling all HTTP calls.
This also handles:
- rate-limiting
- online/offline status
"""
err = None
await self.rate_limit_delay()
try:
async with request_lambda() as response:
self._set_online(True) # we got something, so we are online
self._track_rate_limit(response)
# return the async content manager response
yield response
except aiohttp.ClientError as ex:
# we are offline
self._set_online(False)
err = "error from aiohttp: %s" % repr(ex)
except Exception as ex:
err = "unknown error: %s" % repr(ex)
if err:
class error_response:
def __init__(self, err_msg):
self._err_msg = err_msg
status = -1
async def text(self):
return self._err_msg
yield error_response("_api_request_internal: " + err)
def _utcnow(self):
"""Helper method to get utc now as seconds."""
return datetime.timestamp(datetime.now())
def _track_rate_limit(self, response):
"""Track rate limiting."""
if response.status == 429:
_LOGGER.warning(
f"Rate limit exceeded, check if other devices also utilize the govee API"
)
limit_unknown = True
if (
_RATELIMIT_TOTAL in response.headers
and _RATELIMIT_REMAINING in response.headers
and _RATELIMIT_RESET in response.headers
):
try:
self._limit = int(response.headers[_RATELIMIT_TOTAL])
self._limit_remaining = int(response.headers[_RATELIMIT_REMAINING])
self._limit_reset = float(response.headers[_RATELIMIT_RESET])
_LOGGER.debug(
f"Rate limit total: {self._limit}, remaining: {self._limit_remaining} in {self.rate_limit_reset_seconds} seconds"
)
limit_unknown = False
except Exception as ex:
_LOGGER.warning(f"Error trying to get rate limits: {ex}")
if limit_unknown:
self._limit_remaining -= 1
async def rate_limit_delay(self):
"""Delay a call when rate limiting is active."""
# do we have requests left?
if self.rate_limit_remaining <= self.rate_limit_on:
# do we need to sleep?
sleep_sec = self.rate_limit_reset_seconds
if sleep_sec > 0:
_LOGGER.warning(
f"Rate limiting active, {self._limit_remaining} of {self._limit} remaining, sleeping for {sleep_sec}s."
)
await asyncio.sleep(sleep_sec)
@property
def rate_limit_total(self):
"""Rate limit is counted down from this value."""
return self._limit
@property
def rate_limit_remaining(self):
"""Remaining Rate limit."""
return self._limit_remaining
@property
def rate_limit_reset(self):
"""UTC time in seconds when the rate limit will be reset."""
return self._limit_reset
@property
def rate_limit_reset_seconds(self):
"""Seconds until the rate limit will be reset."""
return self._limit_reset - self._utcnow()
@property
def rate_limit_on(self):
"""Remaining calls that trigger rate limiting.
Defaults to 5, which means there is some room for other clients.
"""
return self._rate_limit_on
@rate_limit_on.setter
def rate_limit_on(self, val):
"""Set the remaining calls that trigger rate limiting."""
if val > self._limit:
raise GoveeError(
f"Rate limiter threshold {val} must be below {self._limit}"
)
if val < 1:
raise GoveeError(f"Rate limiter threshold {val} must be above 1")
self._rate_limit_on = val
@property
def config_offline_is_off(self):
"""Get the global config option config_offline_is_off."""
return self._config_offline_is_off
@config_offline_is_off.setter
def config_offline_is_off(self, val: bool):
"""
Set global behavour when device is offline.
None: default, use config_offline_is_off from learning, or False by default.
False: an offline device doesn't change power state.
True: an offline device is shown as off.
"""
self._config_offline_is_off = val
@property
def devices(self) -> List[GoveeDevice]:
"""Cached devices list."""
lst = []
for dev in self._devices:
lst.append(self._devices[dev])
return lst
def device(self, device) -> GoveeDevice:
"""Single device from cache."""
_, device = self._get_device(device)
return device
@property
def online(self):
"""Last request was able to connect to the API."""
return self._online
def _set_online(self, online: bool):
"""Set the online state and fire an event on change."""
if self._online != online:
self._online = online
# inform about state change
self.events.online(self._online)
if not online:
# show all devices as offline
for device in self.devices:
device.online = False
async def check_connection(self) -> bool:
"""Check connection to API."""
try:
# this will set self.online
await self.ping()
except:
pass
return self.online
async def ping(self) -> Tuple[float, str]:
"""Ping the api endpoint. No API_KEY is needed."""
_LOGGER.debug("ping")
start = time.time()
ping_ok_delay = None
err = None
async with self._api_get(url=_API_PING, auth=False) as response:
result = await response.text()
delay = int((time.time() - start) * 1000)
if response.status == 200:
if "Pong" == result:
ping_ok_delay = max(1, delay)
else:
err = f"API-Result wrong: {result}"
else:
result = await response.text()
err = f"API-Error {response.status}: {result}"
return ping_ok_delay, err
async def get_devices(self) -> Tuple[List[GoveeDevice], str]:
"""Get and cache devices."""
_LOGGER.debug("get_devices")
devices = {}
err = None
async with self._api_get(url=_API_DEVICES) as response:
if response.status == 200:
result = await response.json()
timestamp = self._utcnow()
learning_infos = await self._learning_storage._read_cached()
for item in result["data"]["devices"]:
device_str = item["device"]
model_str = item["model"]
is_retrievable = item["retrievable"]
# assuming defaults for learned/configured values
learned_set_brightness_max = None
learned_get_brightness_max = None
before_set_brightness_turn_on = False
config_offline_is_off = False # effenctive state
# defaults by some conditions
if not is_retrievable:
learned_get_brightness_max = -1
if model_str == "H6104":
before_set_brightness_turn_on = True
# load learned/configured values
if device_str in learning_infos:
learning_info = learning_infos[device_str]
learned_set_brightness_max = learning_info.set_brightness_max
learned_get_brightness_max = learning_info.get_brightness_max
before_set_brightness_turn_on = learning_info.before_set_brightness_turn_on
config_offline_is_off = learning_info.config_offline_is_off
# create device DTO
devices[device_str] = GoveeDevice(
device=device_str,
model=model_str,
device_name=item["deviceName"],
controllable=item["controllable"],
retrievable=is_retrievable,
support_cmds=item["supportCmds"],
support_turn="turn" in item["supportCmds"],
support_brightness="brightness" in item["supportCmds"],
support_color="color" in item["supportCmds"],
support_color_tem="colorTem" in item["supportCmds"],
# defaults for state
online=True,
power_state=False,
brightness=0,
color=(0, 0, 0),
color_temp=0,
timestamp=timestamp,
source="history",
error=None,
lock_set_until=0,
lock_get_until=0,
learned_set_brightness_max=learned_set_brightness_max,
learned_get_brightness_max=learned_get_brightness_max,
before_set_brightness_turn_on=before_set_brightness_turn_on,
config_offline_is_off=config_offline_is_off
)
else:
result = await response.text()
err = f"API-Error {response.status}: {result}"
# cache last get_devices result
self._devices = devices
return self.devices, err
def _get_device(self, device: Union[str, GoveeDevice]) -> Tuple[str, GoveeDevice]:
"""Get a device by address or GoveeDevice DTO.
returns: device_address, device_dto
"""
device_str = device
if isinstance(device, GoveeDevice):
device_str = device.device
if not device_str in self._devices:
device = None # disallow unknown devices
elif isinstance(device, str) and device_str in self._devices:
device = self._devices[device_str]
else:
raise GoveeDeviceNotFound(device_str)
return device_str, device
def _is_success_result_message(self, result) -> bool:
"""Given an aiohttp result checks if it is a success result."""
return "message" in result and result["message"] == "Success"
async def turn_on(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]:
"""Turn on a device, return success and error message."""
return await self._turn(device, "on")
async def turn_off(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]:
"""Turn off a device, return success and error message."""
return await self._turn(device, "off")
async def _turn(
self, device: Union[str, GoveeDevice], onOff: str
) -> Tuple[bool, str]:
"""Turn command called by turn_on and turn_off."""
success = False
err = None
device_str, device = self._get_device(device)
if not device:
err = f"Invalid device {device_str}, {device}"
else:
command = "turn"
params = onOff
result, err = await self._control(device, command, params)
success = False
if not err:
success = self._is_success_result_message(result)
if success:
self._devices[device_str].timestamp = self._utcnow
self._devices[device_str].source = "history"
self._devices[device_str].power_state = onOff == "on"
return success, err
async def set_brightness(
self, device: Union[str, GoveeDevice], brightness: int
) -> Tuple[bool, str]:
"""Set brightness to 0-254."""
success = False
err = None
device_str, device = self._get_device(device)
if not device:
err = f"Invalid device {device_str}, {device}"
else:
if brightness < 0 or brightness > 254:
err = f"set_brightness: invalid value {brightness}, allowed range 0 .. 254"
else:
if brightness > 0 and device.before_set_brightness_turn_on:
await self.turn_on(device)
# api doesn't work if we don't sleep
await asyncio.sleep(1)
# set brightness as 0..254
brightness_set = brightness
brightness_result = brightness_set
brightness_set_100 = 0
if brightness_set > 0:
brightness_set_100 = max(1, math.floor(brightness * 100 / 254))
brightness_result_100 = math.ceil(brightness_set_100 * 254 / 100)
if device.learned_set_brightness_max == 100:
# set brightness as 0..100
brightness_set = brightness_set_100
brightness_result = brightness_result_100
command = "brightness"
result, err = await self._control(device, command, brightness_set)
if err:
# try again with 0-100 range
if "API-Error 400" in err: # Unsupported Cmd Value
# set brightness as 0..100 as 0..254 didn't work
brightness_set = brightness_set_100
brightness_result = brightness_result_100
result, err = await self._control(
device, command, brightness_set
)
if not err:
device.learned_set_brightness_max = 100
await self._learn(device)
else:
if brightness_set > 100:
device.learned_set_brightness_max = 254
await self._learn(device)
if not err:
success = self._is_success_result_message(result)
if success:
self._devices[device_str].timestamp = self._utcnow
self._devices[device_str].source = "history"
self._devices[device_str].brightness = brightness_result
self._devices[device_str].power_state = brightness_result > 0
return success, err
async def _learn(self, device):
"""Persist learned information from device DTO."""
learning_infos: Dict[
str, GoveeLearnedInfo
] = await self._learning_storage._read_cached()
changed = False
# init Dict and entry for device
if learning_infos == None:
learning_infos = {}
if device.device not in learning_infos:
learning_infos[device.device] = GoveeLearnedInfo()
# output what was lerned, and learn
if (
learning_infos[device.device].set_brightness_max
!= device.learned_set_brightness_max
):
_LOGGER.debug(
"learned device %s uses range 0-%s for setting brightness.",
device.device,
device.learned_set_brightness_max,
)
learning_infos[
device.device
].set_brightness_max = device.learned_set_brightness_max
changed = True
if (
learning_infos[device.device].get_brightness_max
!= device.learned_get_brightness_max
):
_LOGGER.debug(
"learned device %s uses range 0-%s for getting brightness state.",
device.device,
device.learned_get_brightness_max,
)
if device.learned_get_brightness_max == 100:
_LOGGER.info(
"brightness range for %s is assumed. If the brightness slider doesn't match the actual brightness pull the brightness up to max once.",
device.device,
)
changed = True
learning_infos[
device.device
].get_brightness_max = device.learned_get_brightness_max
if changed:
await self._learning_storage._write_cached(learning_infos)
async def set_color_temp(
self, device: Union[str, GoveeDevice], color_temp: int
) -> Tuple[bool, str]:
"""Set color temperature to 2000-9000."""
success = False
err = None
device_str, device = self._get_device(device)
if not device:
err = f"Invalid device {device_str}, {device}"
else:
if color_temp < 2000 or color_temp > 9000:
err = f"set_color_temp: invalid value {color_temp}, allowed range 2000-9000"
else:
command = "colorTem"
result, err = await self._control(device, command, color_temp)
if not err:
success = self._is_success_result_message(result)
if success:
self._devices[device_str].timestamp = self._utcnow
self._devices[device_str].source = "history"
self._devices[device_str].color_temp = color_temp
return success, err
async def set_color(
self, device: Union[str, GoveeDevice], color: Tuple[int, int, int]
) -> Tuple[bool, str]:
"""Set color (r, g, b) where each value may be in range 0-255 """
success = False
err = None
device_str, device = self._get_device(device)
if not device:
err = f"Invalid device {device_str}, {device}"
else:
if len(color) != 3:
err = f"set_color: invalid value {color}, must be tuple with (r, g, b) values"
else:
red = color[0]
green = color[1]
blue = color[2]
if red < 0 or red > 255:
err = (
f"set_color: invalid value {color}, red must be within 0 .. 254"
)
elif green < 0 or green > 255:
err = f"set_color: invalid value {color}, green must be within 0 .. 254"
elif blue < 0 or blue > 255:
err = f"set_color: invalid value {color}, blue must be within 0 .. 254"
else:
command = "color"
command_color = {"r": red, "g": green, "b": blue}
result, err = await self._control(device, command, command_color)
if not err:
success = self._is_success_result_message(result)
if success:
self._devices[device_str].timestamp = self._utcnow
self._devices[device_str].source = "history"
self._devices[device_str].color = color
return success, err
def _get_lock_seconds(self, utcSeconds: int) -> int:
"""Get seconds to wait."""
seconds_lock = utcSeconds - self._utcnow()
if seconds_lock < 0:
seconds_lock = 0
return seconds_lock
async def _control(
self, device: Union[str, GoveeDevice], command: str, params: Any
) -> Tuple[Any, str]:
"""Control led strips and bulbs."""
device_str, device = self._get_device(device)
cmd = {"name": command, "value": params}
_LOGGER.debug(f"control {device_str}: {cmd}")
result = None
err = None
if not device:
err = f"Invalid device {device_str}, {device}"
else:
if not device.controllable:
err = f"Device {device.device} is not controllable"
_LOGGER.debug(f"control {device_str} not possible: {err}")
elif not command in device.support_cmds:
err = f"Command {command} not possible on device {device.device}"
_LOGGER.warning(f"control {device_str} not possible: {err}")
else:
while True:
seconds_locked = self._get_lock_seconds(device.lock_set_until)
if not seconds_locked:
break;
_LOGGER.debug(f"control {device_str} is locked for {seconds_locked} seconds. Command waiting: {cmd}")
await asyncio.sleep(seconds_locked)
json = {"device": device.device, "model": device.model, "cmd": cmd}
await self.rate_limit_delay()
async with self._api_put(
url=_API_DEVICES_CONTROL, json=json
) as response:
if response.status == 200:
device.lock_set_until = (
self._utcnow() + DELAY_SET_FOLLOWING_SET_SECONDS
)
device.lock_get_until = (
self._utcnow() + DELAY_GET_FOLLOWING_SET_SECONDS
)
result = await response.json()
else:
text = await response.text()
err = f"API-Error {response.status} on command {cmd}: {text} for device {device}"
_LOGGER.warning(f"control {device_str} not possible: {err}")
return result, err
async def get_states(self) -> List[GoveeDevice]:
"""Request states for all devices from API."""
_LOGGER.debug("get_states")
for device_str in self._devices:
state, err = await self._get_device_state(device_str)
if err:
_LOGGER.warning("error getting state for device %s: %s",
device_str, err,
)
self._devices[device_str].error = err
else:
self._devices[device_str] = state
self._devices[device_str].error = None
return self.devices
async def _get_device_state(
self, device: Union[str, GoveeDevice]
) -> Tuple[GoveeDevice, str]:
"""Get state for one specific device."""
device_str, device = self._get_device(device)
result = None
err = None
seconds_locked = self._get_lock_seconds(device.lock_get_until)
if not device:
err = f"Invalid device {device_str}"
elif not device.retrievable:
# device {device_str} isn't able to return state, return 'history' state
self._devices[device_str].source = "history"
result = self._devices[device_str]
elif seconds_locked:
# we just changed something, return state from history
self._devices[device_str].source = "history"
result = self._devices[device_str]
_LOGGER.debug(
f"state object returned from cache: {result}, next state for {device.device} from api allowed in {seconds_locked} seconds"
)
else:
params = {"device": device.device, "model": device.model}
async with self._api_get(url=_API_DEVICES_STATE, params=params) as response:
if response.status == 200:
timestamp = self._utcnow()
json_obj = await response.json()
prop_online = False
prop_power_state = False
prop_brightness = False
prop_color = (0, 0, 0)
prop_color_temp = 0
for prop in json_obj["data"]["properties"]:
# somehow these are all dicts with one element
if "online" in prop:
prop_online = prop["online"] is True
elif "powerState" in prop:
prop_power_state = prop["powerState"] == "on"
elif "brightness" in prop:
prop_brightness = prop["brightness"]
elif "color" in prop:
prop_color = (
prop["color"]["r"],
prop["color"]["g"],
prop["color"]["b"],
)
elif "colorTemInKelvin" in prop:
prop_color_temp = prop["colorTemInKelvin"]
else:
_LOGGER.debug(f"unknown state property '{prop}'")
if not prop_online:
if self.config_offline_is_off is not None:
# global option
if self.config_offline_is_off:
prop_power_state = False
elif device.config_offline_is_off:
# learning option
prop_power_state = False
# autobrightness learning
if device.learned_get_brightness_max == None or (
device.learned_get_brightness_max == 100
and prop_brightness > 100
):
device.learned_get_brightness_max = (
100 # assumption, as we didn't get anything higher
)
if prop_brightness > 100:
device.learned_get_brightness_max = 254
await self._learn(device)
if device.learned_get_brightness_max == 100:
# scale range 0-100 up to 0-254
prop_brightness = math.floor( prop_brightness * 254 / 100 )
result = self._devices[device_str]
result.online = prop_online
result.power_state = prop_power_state
result.brightness = prop_brightness
result.color = prop_color
result.color_temp = prop_color_temp
result.timestamp = timestamp
result.source = "api"
result.error = None
_LOGGER.debug(
f"state returned from API: {json_obj}, resulting state object: {result}"
)
else:
errText = await response.text()
err = f"API-Error {response.status}: {errText}"
return result, err
|
[
"govee_api_laggat.learning_storage.GoveeLearnedInfo",
"govee_api_laggat.learning_storage.GoveeAbstractLearningStorage",
"math.ceil",
"asyncio.sleep",
"math.floor",
"time.time",
"aiohttp.ClientSession",
"events.Events",
"datetime.datetime.now",
"logging.getLogger"
] |
[((456, 483), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (473, 483), False, 'import logging\n'), ((2352, 2375), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (2373, 2375), False, 'import aiohttp\n'), ((2913, 2921), 'events.Events', 'Events', ([], {}), '()\n', (2919, 2921), False, 'from events import Events\n'), ((10823, 10834), 'time.time', 'time.time', ([], {}), '()\n', (10832, 10834), False, 'import time\n'), ((3449, 3479), 'govee_api_laggat.learning_storage.GoveeAbstractLearningStorage', 'GoveeAbstractLearningStorage', ([], {}), '()\n', (3477, 3479), False, 'from govee_api_laggat.learning_storage import GoveeAbstractLearningStorage, GoveeLearnedInfo\n'), ((6146, 6160), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6158, 6160), False, 'from datetime import datetime\n'), ((20195, 20213), 'govee_api_laggat.learning_storage.GoveeLearnedInfo', 'GoveeLearnedInfo', ([], {}), '()\n', (20211, 20213), False, 'from govee_api_laggat.learning_storage import GoveeAbstractLearningStorage, GoveeLearnedInfo\n'), ((18067, 18108), 'math.ceil', 'math.ceil', (['(brightness_set_100 * 254 / 100)'], {}), '(brightness_set_100 * 254 / 100)\n', (18076, 18108), False, 'import math\n'), ((7774, 7798), 'asyncio.sleep', 'asyncio.sleep', (['sleep_sec'], {}), '(sleep_sec)\n', (7787, 7798), False, 'import asyncio\n'), ((11025, 11036), 'time.time', 'time.time', ([], {}), '()\n', (11034, 11036), False, 'import time\n'), ((17710, 17726), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (17723, 17726), False, 'import asyncio\n'), ((17991, 18025), 'math.floor', 'math.floor', (['(brightness * 100 / 254)'], {}), '(brightness * 100 / 254)\n', (18001, 18025), False, 'import math\n'), ((25787, 25816), 'asyncio.sleep', 'asyncio.sleep', (['seconds_locked'], {}), '(seconds_locked)\n', (25800, 25816), False, 'import asyncio\n'), ((31119, 31158), 'math.floor', 'math.floor', (['(prop_brightness * 254 / 100)'], {}), '(prop_brightness * 254 / 100)\n', (31129, 31158), False, 'import math\n')]
|
"""
Obtain the single photoelectron response for an SiPM. Can be used as an input
to sim_telarray after normalisation with Konrads script
"""
import argparse
from argparse import ArgumentDefaultsHelpFormatter as Formatter
import numpy as np
from scipy.special import binom
from scipy.stats import norm
from IPython import embed
from matplotlib import pyplot as plt
import os
def sipm_enf(x, spe_sigma, opct, pap, dap):
"""
SiPM formula from Gentile 2010
http://adsabs.harvard.edu/abs/2010arXiv1006.3263G
This implementation only considers the case for a 100% probability of a
single inital fired microcell
Parameters
----------
x : ndarray
X points to evaluate at
spe_sigma : float
Width of the single photoelectron peak
opct : float
Probability of optical crosstalk
pap : float
Probability of afterpulse
dap : float
Distance of afterpulse peak from main peak
"""
n_peaks = 100
N = np.arange(n_peaks)[:, None]
K = np.arange(1, n_peaks)[:, None]
# Probability of n fired microcells due to optical crosstalk
pct = ((1 - opct) * np.power(opct, N - 1) * binom(N - 1, 0))[:, 0]
sap = spe_sigma
papk = np.power(1 - pap, N[:, 0])
p0ap = pct * papk
pap1 = pct * (1-papk) * papk
pe_sigma = np.sqrt(K * spe_sigma ** 2)
ap_sigma = np.sqrt(K * sap ** 2)
signal = p0ap[K] * norm.pdf(x, K, pe_sigma)
signal += pap1[K] * norm.pdf(x, K * (1.0-dap), ap_sigma)
return signal.sum(0)
def main():
description = ('Obtain the single photoelectron response for an SiPM. '
'Can be used as an input to sim_telarray after '
'normalisation with Konrads script')
parser = argparse.ArgumentParser(description=description,
formatter_class=Formatter)
parser.add_argument('-o', '--output', dest='output_dir', action='store',
required=True,
help='Output directory for the files')
parser.add_argument('--spe_sigma', dest='spe_sigma', action='store',
default=0.1, type=float,
help='Value for the standard deviation of the single '
'photoelectron peak')
parser.add_argument('--opct', dest='opct', action='store', default=0.1,
type = float,
help='Value for optical crosstalk')
parser.add_argument('--pap', dest='pap', action='store', default=0,
type=float,
help='Value for the probability of afterpulses')
parser.add_argument('--dap', dest='dap', action='store', default=0,
type=float,
help='Value for the distance of the afterpulse peak '
'from main peak')
args = parser.parse_args()
output_dir = args.output_dir
spe_sigma = args.spe_sigma
opct = args.opct
pap = args.pap
dap = args.dap
print(
"""
SPE Parameters: spe_sigma = {}
opct = {}
pap = {}
dap = {}
""".format(spe_sigma, opct, pap, dap)
)
x = np.linspace(0, 100, 1000)
y = sipm_enf(x, spe_sigma, opct, pap, dap)
gt = y > 1E-15
x = x[gt]
y = y[gt]
# Resample
x = np.linspace(x.min(), x.max(), 1000)
y = sipm_enf(x, spe_sigma, opct, pap, dap)
if not os.path.exists(output_dir):
print("Creating directory: {}".format(output_dir))
os.makedirs(output_dir)
output_path = os.path.join(output_dir, "checs_spe_spectrum.txt")
np.savetxt(output_path, np.column_stack((x, y, y)))
print("Created config : {}".format(output_path))
output_path = os.path.join(output_dir, "checs_spe_spectrum.pdf")
plt.semilogy(x, y)
plt.savefig(output_path, bbox_inches='tight')
print("Created figure : {}".format(output_path))
if __name__ == '__main__':
main()
|
[
"scipy.special.binom",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.power",
"os.path.exists",
"scipy.stats.norm.pdf",
"numpy.arange",
"numpy.linspace",
"numpy.column_stack",
"matplotlib.pyplot.semilogy",
"os.path.join",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((1223, 1249), 'numpy.power', 'np.power', (['(1 - pap)', 'N[:, 0]'], {}), '(1 - pap, N[:, 0])\n', (1231, 1249), True, 'import numpy as np\n'), ((1321, 1348), 'numpy.sqrt', 'np.sqrt', (['(K * spe_sigma ** 2)'], {}), '(K * spe_sigma ** 2)\n', (1328, 1348), True, 'import numpy as np\n'), ((1364, 1385), 'numpy.sqrt', 'np.sqrt', (['(K * sap ** 2)'], {}), '(K * sap ** 2)\n', (1371, 1385), True, 'import numpy as np\n'), ((1749, 1824), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'formatter_class': 'Formatter'}), '(description=description, formatter_class=Formatter)\n', (1772, 1824), False, 'import argparse\n'), ((3253, 3278), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(1000)'], {}), '(0, 100, 1000)\n', (3264, 3278), True, 'import numpy as np\n'), ((3630, 3680), 'os.path.join', 'os.path.join', (['output_dir', '"""checs_spe_spectrum.txt"""'], {}), "(output_dir, 'checs_spe_spectrum.txt')\n", (3642, 3680), False, 'import os\n'), ((3809, 3859), 'os.path.join', 'os.path.join', (['output_dir', '"""checs_spe_spectrum.pdf"""'], {}), "(output_dir, 'checs_spe_spectrum.pdf')\n", (3821, 3859), False, 'import os\n'), ((3864, 3882), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['x', 'y'], {}), '(x, y)\n', (3876, 3882), True, 'from matplotlib import pyplot as plt\n'), ((3887, 3932), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_path'], {'bbox_inches': '"""tight"""'}), "(output_path, bbox_inches='tight')\n", (3898, 3932), True, 'from matplotlib import pyplot as plt\n'), ((986, 1004), 'numpy.arange', 'np.arange', (['n_peaks'], {}), '(n_peaks)\n', (995, 1004), True, 'import numpy as np\n'), ((1022, 1043), 'numpy.arange', 'np.arange', (['(1)', 'n_peaks'], {}), '(1, n_peaks)\n', (1031, 1043), True, 'import numpy as np\n'), ((1410, 1434), 'scipy.stats.norm.pdf', 'norm.pdf', (['x', 'K', 'pe_sigma'], {}), '(x, K, pe_sigma)\n', (1418, 1434), False, 'from scipy.stats import norm\n'), ((1459, 1497), 'scipy.stats.norm.pdf', 'norm.pdf', (['x', '(K * (1.0 - dap))', 'ap_sigma'], {}), '(x, K * (1.0 - dap), ap_sigma)\n', (1467, 1497), False, 'from scipy.stats import norm\n'), ((3492, 3518), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (3506, 3518), False, 'import os\n'), ((3587, 3610), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (3598, 3610), False, 'import os\n'), ((3709, 3735), 'numpy.column_stack', 'np.column_stack', (['(x, y, y)'], {}), '((x, y, y))\n', (3724, 3735), True, 'import numpy as np\n'), ((1167, 1182), 'scipy.special.binom', 'binom', (['(N - 1)', '(0)'], {}), '(N - 1, 0)\n', (1172, 1182), False, 'from scipy.special import binom\n'), ((1143, 1164), 'numpy.power', 'np.power', (['opct', '(N - 1)'], {}), '(opct, N - 1)\n', (1151, 1164), True, 'import numpy as np\n')]
|
# %%
import pandas as pd
from collections import defaultdict
import pickle
from typing import DefaultDict
cmap_data = pickle.load(open("./cmap_transformer.pkl", "rb"))
mm_data = pickle.load(open("./mm_report_transformer.pkl", "rb"))
# %%
def convert_to_metric_first(data):
rows = defaultdict(dict)
for model, metrics in data.items():
for metric, values in metrics.items():
for i, value in enumerate(values):
rows[metric][model + f"_{i}"] = value
return rows
def save_to_csv(data, save_path):
df = pd.DataFrame(data)
df.to_csv(save_path)
save_to_csv(
convert_to_metric_first(cmap_data), "./cmap_report_transformer.csv"
)
save_to_csv(convert_to_metric_first(mm_data), "./mm_report_transformer.csv")
# %%
|
[
"collections.defaultdict",
"pandas.DataFrame"
] |
[((286, 303), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (297, 303), False, 'from collections import defaultdict\n'), ((555, 573), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (567, 573), True, 'import pandas as pd\n')]
|
# -*- coding: utf-8 -*-
"""
New Discussions -- Provides a list of new discussions within a WikiProject's scope
Copyright (C) 2015 <NAME>, 2016 <NAME>
Licensed under MIT License: http://mitlicense.org
"""
from collections import namedtuple
from datetime import datetime
import re
from reportsbot.task import Task
from reportsbot.util import join_full_title
import mwparserfromhell
from pywikibot.data.api import Request
__all__ = ["NewDiscussions"]
_Section = namedtuple("_Section", ["name", "timestamp"])
_Discussion = namedtuple("_Discussion", ["title", "name", "timestamp"])
class NewDiscussions(Task):
"""Updates a list of new discussions within a WikiProject's scope."""
DISCUSSION_TEMPLATE = "WPX new discussion"
DISCUSSIONS_PER_PAGE = 15
DISCUSSIONS_BEFORE_FOLD = 4
@staticmethod
def _parse_timestamp(text):
"""Return a datetime for the given timestamp string, or ValueError."""
return datetime.strptime(str(text), "%H:%M, %d %B %Y (UTC)")
def _extract_sections(self, text):
"""Return a list of section tuples for the given page."""
code = mwparserfromhell.parse(text)
sections = set()
for section in code.get_sections(levels=[2]):
clean = section.strip_code()
match = re.search(r"\d\d:\d\d,\s\d\d?\s\w+\s\d{4}\s\(UTC\)", clean)
if not match:
continue
try:
timestamp = self._parse_timestamp(match.group(0))
except ValueError:
continue
name = str(section.get(0).title.strip_code()).strip()
sections.add(_Section(name, timestamp))
return sections
def _load_pages(self, titles):
"""Load a chunk of pages from the API."""
def _get_rev(page):
try:
return page["revisions"][0]["slots"]["main"]["content"]
except (KeyError, IndexError):
return ""
req = Request(self._bot.site, parameters={
"action": "query", "prop": "revisions", "rvprop": "content",
"rvslots": "main", "formatversion": "2", "titles": "|".join(titles)
})
data = req.submit()
return [(page["title"], _get_rev(page))
for page in data["query"]["pages"]]
def _get_updated_discussions(self, start, end):
"""Return a dict mapping talk page titles to lists of section tuples.
The only pages included in the dict are those that have been updated
in the given time range.
"""
query = """SELECT DISTINCT rc_namespace, rc_title
FROM recentchanges
WHERE rc_timestamp >= ? AND rc_timestamp < ?
AND rc_namespace % 2 = 1 AND rc_namespace != 3
AND (rc_type = 0 OR rc_type = 1 OR rc_type = 3) AND rc_bot = 0"""
startts = start.strftime("%Y%m%d%H%M%S")
endts = end.strftime("%Y%m%d%H%M%S")
self._logger.info("Fetching discussions updated between %s and %s",
startts, endts)
with self._bot.wikidb as cursor:
cursor.execute(query, (startts, endts))
titles = [join_full_title(self._bot.site, ns, title.decode("utf8"))
for (ns, title) in cursor.fetchall()]
self._logger.debug("Fetching sections for %s pages", len(titles))
sections = {}
chunksize = 50
for start in range(0, len(titles), chunksize):
chunk = titles[start:start+chunksize]
pages = self._load_pages(chunk)
for title, text in pages:
try:
sections[title] = self._extract_sections(text)
except mwparserfromhell.parser.ParserError:
self._logger.exception("Failed to parse [[%s]]", title)
return sections
def _get_current_discussions(self, title):
"""Return a dict mapping talk page titles to lists of section tuples.
Given a WikiProject new discussions page, return all discussions
currently listed.
"""
text = self._bot.get_page(title).text
code = mwparserfromhell.parse(text)
discussions = {}
for tmpl in code.filter_templates():
if tmpl.name != self.DISCUSSION_TEMPLATE:
continue
if not (tmpl.has("title") and tmpl.has("section") and
tmpl.has("timestamp")):
continue
try:
timestamp = self._parse_timestamp(tmpl.get("timestamp").value)
except ValueError:
continue
title = str(tmpl.get("title").value)
section = _Section(str(tmpl.get("section").value), timestamp)
if title in discussions:
discussions[title].add(section)
else:
discussions[title] = {section}
return discussions
def _process_discussions(self, pages, current, updated):
"""Return a sorted list of the most recent discussion tuples."""
sections = {}
for page in pages:
title = join_full_title(self._bot.site, page.ns + 1, page.title)
if title in updated:
sections[title] = updated[title]
elif title in current:
sections[title] = current[title]
discussions = [_Discussion(title, section.name, section.timestamp)
for title in sections for section in sections[title]]
discussions.sort(key=lambda disc: disc.timestamp, reverse=True)
discussions = discussions[:self.DISCUSSIONS_PER_PAGE]
for disc in discussions:
self._logger.debug(" [[%s#%s]] at %s", disc.title, disc.name,
disc.timestamp.strftime("%Y %m %d, %H:%M:%S"))
news = [disc.title for disc in discussions
if disc.title not in current][:3]
return discussions, news
def _save_discussions(self, project, title, discussions, news):
"""Save the given list of discussions to the given page title."""
text = """<noinclude><div style="padding-bottom:1em;">{{Clickable button 2|%(projname)s|Return to WikiProject|class=mw-ui-neutral}}</div></noinclude>
{{WPX action box|color={{{2|#086}}}|title=Have a question?|content=
{{Clickable button 2|url={{fullurl:%(projtalk)s|action=edit§ion=new}}|Ask the WikiProject|class=mw-ui-progressive mw-ui-block}}
{{Clickable button 2|%(projtalk)s|View Other Discussions|class=mw-ui-block}}
}}
{{WPX list start|intro={{WPX last updated|%(title)s}}}}
%(discussions)s
{{WPX list end|more=%(title)s}}
"""
template = "{{WPX new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}"
discitems = [
template % {
"title": disc.title,
"name": disc.name,
"timestamp": disc.timestamp.strftime("%H:%M, %d %B %Y (UTC)")
}
for disc in discussions]
fold = self.DISCUSSIONS_BEFORE_FOLD
if len(discitems) > fold:
before = "\n".join(discitems[:fold])
after = "\n".join(discitems[fold:])
disclist = before + "<noinclude>\n" + after + "</noinclude>"
else:
disclist = "\n".join(discitems)
projtalk = self._bot.get_page(project.name).toggleTalkPage().title()
page = self._bot.get_page(title)
page.text = text % {
"title": title,
"projname": project.name,
"projtalk": projtalk,
"discussions": disclist
}
summary = "Updating new discussions"
if news:
summary += ": " + ", ".join("[[%s]]" % item for item in news)
page.save(summary, minor=False)
def _process(self, project, updated):
"""Process new discussions for the given project."""
self._logger.debug("Updating new discussions for %s", project.name)
title = project.name + "/Discussions"
pages = project.get_members()
current = self._get_current_discussions(title)
discussions, news = self._process_discussions(pages, current, updated)
self._save_discussions(project, title, discussions, news)
def run(self):
start = self._bot.get_last_updated("new_discussions")
end = datetime.utcnow()
updated = self._get_updated_discussions(start, end)
self._logger.info("Updating discussion reports")
for project in self._bot.get_configured_projects():
if project.config.get("new_discussions"):
self._process(project, updated)
self._bot.set_last_updated("new_discussions", end)
|
[
"reportsbot.util.join_full_title",
"mwparserfromhell.parse",
"datetime.datetime.utcnow",
"collections.namedtuple",
"re.search"
] |
[((465, 510), 'collections.namedtuple', 'namedtuple', (['"""_Section"""', "['name', 'timestamp']"], {}), "('_Section', ['name', 'timestamp'])\n", (475, 510), False, 'from collections import namedtuple\n'), ((525, 582), 'collections.namedtuple', 'namedtuple', (['"""_Discussion"""', "['title', 'name', 'timestamp']"], {}), "('_Discussion', ['title', 'name', 'timestamp'])\n", (535, 582), False, 'from collections import namedtuple\n'), ((1115, 1143), 'mwparserfromhell.parse', 'mwparserfromhell.parse', (['text'], {}), '(text)\n', (1137, 1143), False, 'import mwparserfromhell\n'), ((4136, 4164), 'mwparserfromhell.parse', 'mwparserfromhell.parse', (['text'], {}), '(text)\n', (4158, 4164), False, 'import mwparserfromhell\n'), ((8350, 8367), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (8365, 8367), False, 'from datetime import datetime\n'), ((1285, 1357), 're.search', 're.search', (['"""\\\\d\\\\d:\\\\d\\\\d,\\\\s\\\\d\\\\d?\\\\s\\\\w+\\\\s\\\\d{4}\\\\s\\\\(UTC\\\\)"""', 'clean'], {}), "('\\\\d\\\\d:\\\\d\\\\d,\\\\s\\\\d\\\\d?\\\\s\\\\w+\\\\s\\\\d{4}\\\\s\\\\(UTC\\\\)', clean)\n", (1294, 1357), False, 'import re\n'), ((5109, 5165), 'reportsbot.util.join_full_title', 'join_full_title', (['self._bot.site', '(page.ns + 1)', 'page.title'], {}), '(self._bot.site, page.ns + 1, page.title)\n', (5124, 5165), False, 'from reportsbot.util import join_full_title\n')]
|
##########################
# Test script to check if "is_triggered_only = yes" events are triggered from somewhere
# If they not - they'll never be triggered
# By Pelmen, https://github.com/Pelmen323
##########################
import re
from ..test_classes.generic_test_class import DataCleaner, ResultsReporter
from ..test_classes.events_class import Events
FALSE_POSITIVES = ['ace_promoted.1', 'ace_promoted.2', 'ace_died.1',
'ace_killed_by_ace.1', 'ace_killed_other_ace.1',
'aces_killed_each_other.1', 'nuke_dropped.0']
def test_check_triggered_events(test_runner: object):
all_events = []
triggered_events_id = dict()
invoked_events_id = []
# 1. Get all events code
all_events = Events.get_all_events(test_runner=test_runner, lowercase=True)
# 2. Get the "triggered only events"
for event in all_events:
if "is_triggered_only = yes" in event:
pattern_matches = re.findall('id = .*', event)
event_id = pattern_matches[0].strip('\t').strip() # Only first match is taken
if '#' in event_id:
event_id = event_id[:event_id.index('#')].strip() # Clean up comments
event_id = event_id[5:].strip() # Remove "id =" part
triggered_events_id[event_id] = 0 # Default value is set to zero
# 3. Get all events triggered in files
triggered_events_id = DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES)
invoked_events_id = Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True)
# 4. Check if events are used
for event in invoked_events_id:
if event in triggered_events_id.keys():
triggered_events_id[event] += 1
results = [i for i in triggered_events_id.keys() if triggered_events_id[i] == 0]
ResultsReporter.report_results(results=results, message="Those events have 'is_triggered_only = yes' attr but are never triggered from outside. Check console output")
|
[
"re.findall"
] |
[((954, 982), 're.findall', 're.findall', (['"""id = .*"""', 'event'], {}), "('id = .*', event)\n", (964, 982), False, 'import re\n')]
|
from pathlib import Path
from typing import Dict, Type, Iterator, List, Tuple
import pyarrow as pa
from pyarrow import csv as pcsv
from pyarrow import parquet as pq
from sqlalchemy import MetaData as AlchemyMetadata, Table as AlchemyTable
from sqlalchemy import Integer, SmallInteger, Float, String, CHAR, Text, Boolean, Date, DateTime
from sqlalchemy.sql.type_api import TypeEngine
from src.schemas import all_metadata
from src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX
PARQUET_PREFIX = TRANSFORM_PATH_PREFIX.joinpath("parquet")
CSV_PREFIX = TRANSFORM_PATH_PREFIX.joinpath("csv")
# How many bytes in each CSV chunk to bring into memory.
# Larger sizes result in better compression and slightly faster time,
# but don't want to risk OOM issues on small build boxes.
BUFFER_SIZE_BYTES = 1000000000
sql_type_lookup: Dict[Type[TypeEngine], str] = {
Integer: 'int32',
SmallInteger: 'int16',
Float: 'float64',
String: 'str',
CHAR: 'str',
Text: 'str',
Boolean: 'bool',
# Some Parquet targets can't handle Parquet dates, so we need to parse and pass timestamps
Date: 'timestamp[ms]',
DateTime: 'timestamp[ms]'
}
def get_fields(table: AlchemyTable) -> List[Tuple[str, str]]:
cols = [(c.name, c.type) for c in table.columns.values() if c.autoincrement is not True]
return [(name, sql_type_lookup[type(dtype)]) for name, dtype in cols]
def write_files(metadata: AlchemyMetadata) -> None:
"""
Creates a Parquet file for each table in the schema.
"""
tables: Iterator[AlchemyTable] = metadata.tables.values()
for table in tables:
name = table.name
print(name)
def get_path(prefix: Path, suffix: str):
parent_dir = prefix.joinpath(metadata.schema)
parent_dir.mkdir(exist_ok=True, parents=True)
return parent_dir.joinpath(name).with_suffix(suffix)
extract_file = get_path(EXTRACT_PATH_PREFIX, ".csv.zst")
parquet_file = get_path(PARQUET_PREFIX, ".parquet")
arrow_schema = pa.schema(get_fields(table))
column_names = [name for name, dtype in get_fields(table)]
read_options = pcsv.ReadOptions(column_names=column_names, block_size=1000000000)
parse_options = pcsv.ParseOptions(newlines_in_values=True)
convert_options = pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=["%Y%m%d", "%Y-%m-%d"],
true_values=["1", "T"], false_values=["0", "F"], strings_can_be_null=True)
parquet_writer = pq.ParquetWriter(parquet_file, schema=arrow_schema, compression='zstd',
version="2.0", use_dictionary=True)
stream_reader = pcsv.open_csv(extract_file, read_options=read_options, parse_options=parse_options,
convert_options=convert_options)
for batch in stream_reader:
table = pa.Table.from_batches([batch])
parquet_writer.write_table(table)
parquet_writer.close()
if __name__ == "__main__":
for m in all_metadata:
write_files(m)
|
[
"pyarrow.csv.ParseOptions",
"pyarrow.Table.from_batches",
"pyarrow.csv.ConvertOptions",
"pyarrow.csv.ReadOptions",
"pyarrow.csv.open_csv",
"pyarrow.parquet.ParquetWriter",
"src.TRANSFORM_PATH_PREFIX.joinpath"
] |
[((499, 540), 'src.TRANSFORM_PATH_PREFIX.joinpath', 'TRANSFORM_PATH_PREFIX.joinpath', (['"""parquet"""'], {}), "('parquet')\n", (529, 540), False, 'from src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX\n'), ((554, 591), 'src.TRANSFORM_PATH_PREFIX.joinpath', 'TRANSFORM_PATH_PREFIX.joinpath', (['"""csv"""'], {}), "('csv')\n", (584, 591), False, 'from src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX\n'), ((2149, 2215), 'pyarrow.csv.ReadOptions', 'pcsv.ReadOptions', ([], {'column_names': 'column_names', 'block_size': '(1000000000)'}), '(column_names=column_names, block_size=1000000000)\n', (2165, 2215), True, 'from pyarrow import csv as pcsv\n'), ((2240, 2282), 'pyarrow.csv.ParseOptions', 'pcsv.ParseOptions', ([], {'newlines_in_values': '(True)'}), '(newlines_in_values=True)\n', (2257, 2282), True, 'from pyarrow import csv as pcsv\n'), ((2309, 2480), 'pyarrow.csv.ConvertOptions', 'pcsv.ConvertOptions', ([], {'column_types': 'arrow_schema', 'timestamp_parsers': "['%Y%m%d', '%Y-%m-%d']", 'true_values': "['1', 'T']", 'false_values': "['0', 'F']", 'strings_can_be_null': '(True)'}), "(column_types=arrow_schema, timestamp_parsers=['%Y%m%d',\n '%Y-%m-%d'], true_values=['1', 'T'], false_values=['0', 'F'],\n strings_can_be_null=True)\n", (2328, 2480), True, 'from pyarrow import csv as pcsv\n'), ((2545, 2656), 'pyarrow.parquet.ParquetWriter', 'pq.ParquetWriter', (['parquet_file'], {'schema': 'arrow_schema', 'compression': '"""zstd"""', 'version': '"""2.0"""', 'use_dictionary': '(True)'}), "(parquet_file, schema=arrow_schema, compression='zstd',\n version='2.0', use_dictionary=True)\n", (2561, 2656), True, 'from pyarrow import parquet as pq\n'), ((2719, 2840), 'pyarrow.csv.open_csv', 'pcsv.open_csv', (['extract_file'], {'read_options': 'read_options', 'parse_options': 'parse_options', 'convert_options': 'convert_options'}), '(extract_file, read_options=read_options, parse_options=\n parse_options, convert_options=convert_options)\n', (2732, 2840), True, 'from pyarrow import csv as pcsv\n'), ((2930, 2960), 'pyarrow.Table.from_batches', 'pa.Table.from_batches', (['[batch]'], {}), '([batch])\n', (2951, 2960), True, 'import pyarrow as pa\n')]
|
# -*- coding: utf-8 -*-
import tools
import time
import db
import threading
from .threads import ThreadPool
class DetectorBase(object):
"""the base class for detecting"""
def __init__(self):
self.T = tools.Tools()
self.now = int(time.time() * 1000)
def getItems(self):
pass
def onCompleted(self):
pass
def getSource(self):
sourceItems = self.getItems()
threads = ThreadPool(20)
for info in sourceItems:
threads.add_task(self.checkData, item = info)
threads.wait_completion()
self.onCompleted()
def checkData(self, item):
if 'url' in item and len(item['url']) > 0:
self.T.logger('正在分析[ %s ]: %s' % (item['name'], item['url']))
netstat = self.T.chkPlayable(item['url'])
item['online'] = 1 if netstat > 0 else 0
item['delay'] = netstat
item['udTime'] = self.now
if netstat == 0:
item['failcount'] += 1
self.addData(item)
def addData (self, data) :
DB = db.DataBase()
sql = "SELECT * FROM %s WHERE url = '%s'" % (DB.table, data['url'])
result = DB.query(sql)
if len(result) == 0 :
DB.insert(data)
else :
id = result[0][0]
if data['failcount'] >= 10:
DB.delete(id)
else:
DB.edit(id, data)
|
[
"tools.Tools",
"db.DataBase",
"time.time"
] |
[((218, 231), 'tools.Tools', 'tools.Tools', ([], {}), '()\n', (229, 231), False, 'import tools\n'), ((1085, 1098), 'db.DataBase', 'db.DataBase', ([], {}), '()\n', (1096, 1098), False, 'import db\n'), ((255, 266), 'time.time', 'time.time', ([], {}), '()\n', (264, 266), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
from django.db import models
from model_utils.models import TimeStampedModel
from apps.post.models import Post
class Comment(TimeStampedModel):
"""Comment for Post
"""
class Meta:
db_table = "comment"
ordering = ["-created"]
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name="comments")
text = models.CharField(max_length=200)
author = models.CharField(max_length=20)
def __str__(self):
return self.text
|
[
"django.db.models.ForeignKey",
"django.db.models.CharField"
] |
[((293, 367), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Post'], {'on_delete': 'models.CASCADE', 'related_name': '"""comments"""'}), "(Post, on_delete=models.CASCADE, related_name='comments')\n", (310, 367), False, 'from django.db import models\n'), ((379, 411), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (395, 411), False, 'from django.db import models\n'), ((425, 456), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (441, 456), False, 'from django.db import models\n')]
|
from django.core.validators import MaxValueValidator
from django.core.validators import MaxValueValidator
from django.db import models
from django.utils import timezone
class Faculty(models.Model):
name = models.CharField(max_length=80, blank=True, null=True)
faculty_describtion = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'faculty'
def __str__(self):
return f'Faculty {self.name}'
class Course(models.Model):
# id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=80, blank=True, null=True)
course_describtion = models.TextField(blank=True, null=True)
faculty = models.ForeignKey('Faculty', models.DO_NOTHING, db_column='faculty', blank=True, null=True)
category = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'course'
def __str__(self):
return f'Course {self.id} | Name: {self.name}'
class Category(models.Model):
name = models.CharField(max_length=80, blank=True, null=True)
parent = models.ForeignKey('self', models.DO_NOTHING, db_column='parent', blank=True, null=True)
class Meta:
managed = False
db_table = 'category'
def __str__(self):
return f'Category {self.name}'
class Subject(models.Model):
name = models.CharField(max_length=80, blank=True, null=True)
category = models.ForeignKey(Category, models.DO_NOTHING, db_column='category', blank=True, null=True)
thumb = models.CharField(max_length=100, blank=True, null=True)
pic = models.CharField(max_length=200, blank=True, null=True)
description = models.CharField(max_length=1000, blank=True, null=True)
class Meta:
managed = False
db_table = 'subject'
def __str__(self):
return f'Subject {self.id} | Name: {self.name}'
class SubjectRating(models.Model):
subject = models.ForeignKey(Subject, models.DO_NOTHING, db_column='subject', blank=True, null=True)
student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student', blank=True, null=True)
rating = models.IntegerField(blank=True, null=True)
commence = models.TextField(blank=True, null=True)
timestamp = models.DateTimeField(blank=True, null=True)
class Meta:
managed = False
db_table = 'subject_rating'
def __str__(self):
return f'Subject: {self.subject.name} | Student {self.student.account.username} | Rating: {self.rating}'
class Enrollment(models.Model):
subject = models.ForeignKey('Subject', models.DO_NOTHING, db_column='subject')
student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student')
status = models.IntegerField(blank=True, null=True)
# lesson = models.ForeignKey('Lesson', models.DO_NOTHING, db_column='lesson', blank=True, null=True)
class Meta:
managed = False
db_table = 'enrollment'
def __str__(self):
return f'Student {self.student.account.username} | Subject: {self.subject.name}'
|
[
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((211, 265), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(80)', 'blank': '(True)', 'null': '(True)'}), '(max_length=80, blank=True, null=True)\n', (227, 265), False, 'from django.db import models\n'), ((292, 331), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (308, 331), False, 'from django.db import models\n'), ((554, 608), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(80)', 'blank': '(True)', 'null': '(True)'}), '(max_length=80, blank=True, null=True)\n', (570, 608), False, 'from django.db import models\n'), ((634, 673), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (650, 673), False, 'from django.db import models\n'), ((688, 784), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Faculty"""', 'models.DO_NOTHING'], {'db_column': '"""faculty"""', 'blank': '(True)', 'null': '(True)'}), "('Faculty', models.DO_NOTHING, db_column='faculty', blank=\n True, null=True)\n", (705, 784), False, 'from django.db import models\n'), ((795, 837), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (814, 837), False, 'from django.db import models\n'), ((1033, 1087), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(80)', 'blank': '(True)', 'null': '(True)'}), '(max_length=80, blank=True, null=True)\n', (1049, 1087), False, 'from django.db import models\n'), ((1101, 1192), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""self"""', 'models.DO_NOTHING'], {'db_column': '"""parent"""', 'blank': '(True)', 'null': '(True)'}), "('self', models.DO_NOTHING, db_column='parent', blank=True,\n null=True)\n", (1118, 1192), False, 'from django.db import models\n'), ((1369, 1423), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(80)', 'blank': '(True)', 'null': '(True)'}), '(max_length=80, blank=True, null=True)\n', (1385, 1423), False, 'from django.db import models\n'), ((1439, 1535), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Category', 'models.DO_NOTHING'], {'db_column': '"""category"""', 'blank': '(True)', 'null': '(True)'}), "(Category, models.DO_NOTHING, db_column='category', blank=\n True, null=True)\n", (1456, 1535), False, 'from django.db import models\n'), ((1543, 1598), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (1559, 1598), False, 'from django.db import models\n'), ((1609, 1664), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)', 'null': '(True)'}), '(max_length=200, blank=True, null=True)\n', (1625, 1664), False, 'from django.db import models\n'), ((1683, 1739), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'blank': '(True)', 'null': '(True)'}), '(max_length=1000, blank=True, null=True)\n', (1699, 1739), False, 'from django.db import models\n'), ((1941, 2035), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Subject', 'models.DO_NOTHING'], {'db_column': '"""subject"""', 'blank': '(True)', 'null': '(True)'}), "(Subject, models.DO_NOTHING, db_column='subject', blank=\n True, null=True)\n", (1958, 2035), False, 'from django.db import models\n'), ((2045, 2146), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""users.Student"""', 'models.DO_NOTHING'], {'db_column': '"""student"""', 'blank': '(True)', 'null': '(True)'}), "('users.Student', models.DO_NOTHING, db_column='student',\n blank=True, null=True)\n", (2062, 2146), False, 'from django.db import models\n'), ((2156, 2198), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2175, 2198), False, 'from django.db import models\n'), ((2214, 2253), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2230, 2253), False, 'from django.db import models\n'), ((2270, 2313), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2290, 2313), False, 'from django.db import models\n'), ((2576, 2644), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Subject"""', 'models.DO_NOTHING'], {'db_column': '"""subject"""'}), "('Subject', models.DO_NOTHING, db_column='subject')\n", (2593, 2644), False, 'from django.db import models\n'), ((2659, 2733), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""users.Student"""', 'models.DO_NOTHING'], {'db_column': '"""student"""'}), "('users.Student', models.DO_NOTHING, db_column='student')\n", (2676, 2733), False, 'from django.db import models\n'), ((2747, 2789), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2766, 2789), False, 'from django.db import models\n')]
|
from dbnd._core.tracking.schemas.base import ApiStrictSchema
from dbnd._vendor.marshmallow import fields, pre_load
class MLAlert(ApiStrictSchema):
sensitivity = fields.Float()
look_back = fields.Integer()
class AlertDefsSchema(ApiStrictSchema):
severity = fields.Str(required=True)
type = fields.Str(required=True)
user_metric = fields.Str()
operator = fields.Str()
is_str_value = fields.Bool()
created_at = fields.DateTime()
scheduled_job_name = fields.Str(attribute="scheduled_job.name")
source_instance_name = fields.Method("get_tracking_source_name")
env = fields.Method("get_tracking_source_env")
# TODO_CORE: API: Deprecate airflow_server_info
airflow_instance_name = fields.Method("get_tracking_source_name")
project_id = fields.Int(attribute="job.project_id")
project_name = fields.Str(attribute="job.project.name")
alert_on_historical_runs = fields.Bool()
alert_group_uid = fields.Str(allow_none=True)
uid = fields.Str(allow_none=True)
value = fields.Str(allow_none=True)
job_id = fields.Int(allow_none=True)
summary = fields.Str(allow_none=True)
job_name = fields.Str(attribute="job.name", allow_none=True)
task_repr = fields.Str(allow_none=True)
task_name = fields.Str(allow_none=True)
custom_name = fields.Str(allow_none=True)
original_uid = fields.Str(allow_none=True)
advanced_json = fields.Str(allow_none=True)
scheduled_job_uid = fields.Str(allow_none=True)
custom_description = fields.Str(allow_none=True)
ml_alert = fields.Nested(MLAlert, allow_none=True)
# Fields for DatasetSlaAlert/DatasetSlaAdvancedAlert alert
# --------------------------------------
seconds_delta = fields.Int(allow_none=True) # Converts to datetime.timedelta
dataset_partial_name = fields.Str(allow_none=True)
datasets_uids = fields.List(fields.Str(), allow_none=True)
# Fields for OperationColumnStatAdvancedAlert alert
# --------------------------------------
dataset_uid = fields.Str(allow_none=True)
# Operation type (e.g. "read", "write", None=any) to filter stats by
operation_type = fields.Str(allow_none=True)
# Type of MetricRule, found in dbnd_web. Used to build advanced_json
metrics_rules = fields.List(fields.Dict(), allow_none=True)
# Used only used by the UI
affected_datasets = fields.List(fields.Dict(), allow_none=True, dump_only=True)
is_system = fields.Function(
lambda alert_def: alert_def.owner == "system", dump_only=True,
)
def get_tracking_source_name(self, obj):
return self._get_tracking_source_instance(obj).name
def get_tracking_source_env(self, obj):
return self._get_tracking_source_instance(obj).env
def _get_tracking_source_instance(self, obj):
if obj.job:
return obj.job.tracking_source
return obj.tracking_source
@pre_load
def prepere(self, data: dict, **kwargs):
value = data.get("value", None)
if value is not None:
data["value"] = str(data["value"])
return data
class GroupAlertDefsSchema(ApiStrictSchema):
type = fields.Str(required=True)
tracking_source_uid = fields.UUID(required=True)
severity = fields.Str(required=True)
user_metric = fields.Str(required=True)
value = fields.Str(allow_none=True)
operator = fields.Str(allow_none=True)
ml_alert = fields.Nested(MLAlert, allow_none=True)
owner = fields.Str(allow_none=True)
jobs = fields.List(fields.Int(), allow_none=True)
custom_name = fields.Str(allow_none=True)
custom_description = fields.Str(allow_none=True)
@pre_load
def prepere(self, data: dict, **kwargs):
value = data.get("value", None)
if value is not None:
data["value"] = str(data["value"])
return data
|
[
"dbnd._vendor.marshmallow.fields.Function",
"dbnd._vendor.marshmallow.fields.DateTime",
"dbnd._vendor.marshmallow.fields.Integer",
"dbnd._vendor.marshmallow.fields.Method",
"dbnd._vendor.marshmallow.fields.Int",
"dbnd._vendor.marshmallow.fields.UUID",
"dbnd._vendor.marshmallow.fields.Dict",
"dbnd._vendor.marshmallow.fields.Bool",
"dbnd._vendor.marshmallow.fields.Float",
"dbnd._vendor.marshmallow.fields.Str",
"dbnd._vendor.marshmallow.fields.Nested"
] |
[((167, 181), 'dbnd._vendor.marshmallow.fields.Float', 'fields.Float', ([], {}), '()\n', (179, 181), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((198, 214), 'dbnd._vendor.marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (212, 214), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((272, 297), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'required': '(True)'}), '(required=True)\n', (282, 297), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((309, 334), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'required': '(True)'}), '(required=True)\n', (319, 334), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((353, 365), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (363, 365), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((381, 393), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (391, 393), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((413, 426), 'dbnd._vendor.marshmallow.fields.Bool', 'fields.Bool', ([], {}), '()\n', (424, 426), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((445, 462), 'dbnd._vendor.marshmallow.fields.DateTime', 'fields.DateTime', ([], {}), '()\n', (460, 462), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((488, 530), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'attribute': '"""scheduled_job.name"""'}), "(attribute='scheduled_job.name')\n", (498, 530), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((558, 599), 'dbnd._vendor.marshmallow.fields.Method', 'fields.Method', (['"""get_tracking_source_name"""'], {}), "('get_tracking_source_name')\n", (571, 599), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((610, 650), 'dbnd._vendor.marshmallow.fields.Method', 'fields.Method', (['"""get_tracking_source_env"""'], {}), "('get_tracking_source_env')\n", (623, 650), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((731, 772), 'dbnd._vendor.marshmallow.fields.Method', 'fields.Method', (['"""get_tracking_source_name"""'], {}), "('get_tracking_source_name')\n", (744, 772), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((790, 828), 'dbnd._vendor.marshmallow.fields.Int', 'fields.Int', ([], {'attribute': '"""job.project_id"""'}), "(attribute='job.project_id')\n", (800, 828), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((848, 888), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'attribute': '"""job.project.name"""'}), "(attribute='job.project.name')\n", (858, 888), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((920, 933), 'dbnd._vendor.marshmallow.fields.Bool', 'fields.Bool', ([], {}), '()\n', (931, 933), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((956, 983), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (966, 983), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((995, 1022), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1005, 1022), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1035, 1062), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1045, 1062), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1076, 1103), 'dbnd._vendor.marshmallow.fields.Int', 'fields.Int', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1086, 1103), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1118, 1145), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1128, 1145), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1161, 1210), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'attribute': '"""job.name"""', 'allow_none': '(True)'}), "(attribute='job.name', allow_none=True)\n", (1171, 1210), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1227, 1254), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1237, 1254), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1271, 1298), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1281, 1298), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1317, 1344), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1327, 1344), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1364, 1391), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1374, 1391), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1412, 1439), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1422, 1439), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1464, 1491), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1474, 1491), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1517, 1544), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1527, 1544), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1560, 1599), 'dbnd._vendor.marshmallow.fields.Nested', 'fields.Nested', (['MLAlert'], {'allow_none': '(True)'}), '(MLAlert, allow_none=True)\n', (1573, 1599), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1729, 1756), 'dbnd._vendor.marshmallow.fields.Int', 'fields.Int', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1739, 1756), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1818, 1845), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (1828, 1845), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((2029, 2056), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (2039, 2056), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((2151, 2178), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (2161, 2178), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((2450, 2528), 'dbnd._vendor.marshmallow.fields.Function', 'fields.Function', (["(lambda alert_def: alert_def.owner == 'system')"], {'dump_only': '(True)'}), "(lambda alert_def: alert_def.owner == 'system', dump_only=True)\n", (2465, 2528), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((3159, 3184), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'required': '(True)'}), '(required=True)\n', (3169, 3184), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((3211, 3237), 'dbnd._vendor.marshmallow.fields.UUID', 'fields.UUID', ([], {'required': '(True)'}), '(required=True)\n', (3222, 3237), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((3253, 3278), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'required': '(True)'}), '(required=True)\n', (3263, 3278), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((3297, 3322), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'required': '(True)'}), '(required=True)\n', (3307, 3322), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((3336, 3363), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (3346, 3363), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((3379, 3406), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (3389, 3406), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((3422, 3461), 'dbnd._vendor.marshmallow.fields.Nested', 'fields.Nested', (['MLAlert'], {'allow_none': '(True)'}), '(MLAlert, allow_none=True)\n', (3435, 3461), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((3474, 3501), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (3484, 3501), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((3575, 3602), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (3585, 3602), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((3628, 3655), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (3638, 3655), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((1878, 1890), 'dbnd._vendor.marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (1888, 1890), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((2285, 2298), 'dbnd._vendor.marshmallow.fields.Dict', 'fields.Dict', ([], {}), '()\n', (2296, 2298), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((2385, 2398), 'dbnd._vendor.marshmallow.fields.Dict', 'fields.Dict', ([], {}), '()\n', (2396, 2398), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n'), ((3525, 3537), 'dbnd._vendor.marshmallow.fields.Int', 'fields.Int', ([], {}), '()\n', (3535, 3537), False, 'from dbnd._vendor.marshmallow import fields, pre_load\n')]
|
import datetime as dt
import numpy as np
import pandas as pd
# START USER INPUT
lgs_filepath = 'U:/CIO/#Data/output/investment/checker/lgs_table.csv'
jpm_filepath = 'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary Performance 202005.xlsx'
lgs_dictionary_filepath = 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx'
FYTD = 11
report_date = dt.datetime(2020, 5, 31)
# End USER INPUT
# Reads LGS table
df_lgs = pd.read_csv(lgs_filepath)
# Reads LGS dictionary
df_lgs_dict = pd.read_excel(
pd.ExcelFile(lgs_dictionary_filepath),
sheet_name='Sheet1',
header=0
)
# Reads JPM Performance Report
df_jpm = pd.DataFrame()
sheet_to_columns_dict = {
'Page 3 NOF': 'A:N',
'Page 5 NOF': 'B:O',
'Page 6 NOF': 'B:O',
'Page 7 NOF': 'B:O',
'Page 8': 'D:O'
}
for sheet, columns in sheet_to_columns_dict.items():
print('Accessing:', sheet)
df_sheet = pd.read_excel(
pd.ExcelFile(jpm_filepath),
sheet_name=sheet,
usecols=columns,
skiprows=[0, 1, 2]
)
df_sheet = df_sheet.rename(
columns={
'Unnamed: 0': 'ModelCode',
'Unnamed: 1': 'JPM ReportName',
'Unnamed: 2': 'JPM ReportName',
}
)
if sheet == 'Page 8':
df_sheet = df_sheet.rename(
columns={
'Unnamed: 0': 'ModelCode',
'Unnamed: 4': 'JPM ReportName',
}
)
df_jpm = pd.concat([df_jpm, df_sheet], sort=False)
df_jpm = df_jpm.reset_index(drop=True)
df_jpm = df_jpm.replace('-', np.nan)
df_jpm = df_jpm.drop(columns=['ModelCode'], axis=1)
df_jpm['Market Value'] = (df_jpm['Market Value']/1000000).round(2)
# Reads footers and removes them
df_footers = pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx')
remove_items = list(df_footers['Footers']) + [np.nan, 'Excess return']
df_jpm = df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True)
df_lgs_jpm = pd.merge(
left=df_lgs,
right=df_jpm,
on=['JPM ReportName'],
how='outer'
)
df_later = df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True)
df_lgs_jpm = df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True)
# Creates LGS to JPM column dictionary
lgscolumn_to_jpmcolumn_dict = {
'Market Value_x': 'Market Value_y',
'1_Return': '1 Month',
'3_Return': '3 Months',
'FYTD_Return': 'FYTD',
'12_Return': '1 Year',
'36_Return': '3 Years',
'60_Return': '5 Years',
'84_Return': '7 Years'
}
# Performs the deviant check
df_deviations = pd.DataFrame()
deviants = []
columns = []
deviations = []
jpm_missing = []
lgs_missing = []
total_count = 0
deviant_count = 0
for lgscolumn, jpmcolumn in lgscolumn_to_jpmcolumn_dict.items():
for i in range(0, len(df_lgs_jpm)):
deviation = df_lgs_jpm[lgscolumn][i] - df_lgs_jpm[jpmcolumn][i]
if deviation >= 0.01:
deviants.append(df_lgs_jpm['Manager'][i])
columns.append(jpmcolumn)
deviations.append(deviation)
deviant_count += 1
if (not pd.isna(df_lgs_jpm[jpmcolumn][i])) and (pd.isna(df_lgs_jpm[lgscolumn][i])):
lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn))
if (pd.isna(df_lgs_jpm[jpmcolumn][i])) and (not pd.isna(df_lgs_jpm[lgscolumn][i])):
jpm_missing.append((df_lgs_jpm['JPM ReportName'][i], jpmcolumn))
total_count += 1
# Fixes the column names
columns_fix = []
for column in columns:
if column == 'Market Value_y':
columns_fix.append('Market Value')
else:
columns_fix.append(column)
df_deviations['Manager'] = deviants
df_deviations['Column'] = columns_fix
df_deviations['Deviations'] = deviations
df_lgs_missing = pd.DataFrame(lgs_missing, columns=['Manager', 'Column'])
df_jpm_missing = pd.DataFrame(jpm_missing, columns=['Manager', 'Column'])
# Calculates accuracy
accuracy = round(((total_count - deviant_count)/total_count)*100, 2)
# Prints accuracy results
print('\nMissing during check from LGS', lgs_missing)
print('\nMissing during check from JPM', jpm_missing)
print('\nThe deviants are:\n')
print(df_deviations, '\n')
print('Total Count: ', total_count, 'Deviant Count: ', deviant_count, 'Accuracy: ', accuracy, '%')
# Checks for managers that have been completely missed.
# Creates set containing fund managers that are currently open accounts.
df_lgs_open = df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True)
df_lgs_open = df_lgs_open.rename(columns={'LGS Name': 'Manager'})
lgs_open_set = set(list(df_lgs_open['Manager']))
# Creates set containing strategies.
df_lgs_strategy = df_lgs_dict[df_lgs_dict['LGS Strategy Aggregate'].isin([1])].reset_index(drop=True)
df_lgs_strategy = df_lgs_strategy.rename(columns={'LGS Name': 'Manager'})
lgs_strategy_set = set(list(df_lgs_strategy['Manager']))
# Creates set containing liquidity accounts.
df_lgs_liquidity = df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True)
df_lgs_liquidity = df_lgs_liquidity.rename(columns={'LGS Name': 'Manager'})
lgs_liquidity_set = set(list(df_lgs_liquidity['Manager']))
# Creates set containing fund managers that have been checked.
lgs_check_set = set(list(df_lgs_jpm['Manager']))
# Creates set containing fund managers that are open accounts but are not checked.
df_lgs_missing_completely = lgs_open_set - lgs_check_set - lgs_strategy_set - lgs_liquidity_set - {np.nan}
# Prints open accounts that are missing from LGS.
print('\nMissing completely from LGS', df_lgs_missing_completely)
# Import JPM_IAP, Accounts; By ID; Include Closed Accounts; Select All; Mode: Portfolio Only
# jpm_iap_filepath = 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/'
# jpm_iap_filenames = sorted(os.listdir(jpm_iap_filepath))
# df_jpm_iap = pd.DataFrame()
# for filename in jpm_iap_filenames:
# jpm_iap_xlsx = pd.ExcelFile(jpm_iap_filepath + filename)
# df_jpm_iap_temp = pd.read_excel(
# jpm_iap_xlsx,
# sheet_name='Sheet1',
# skiprows=[0, 1],
# header=0
# )
# df_jpm_iap_temp['Date'] = dt.datetime(int(filename[:4]), int(filename[4:6]), int(filename[6:8]))
# df_jpm_iap = pd.concat([df_jpm_iap, df_jpm_iap_temp], sort=False)
#
# df_jpm_iap = df_jpm_iap.rename(columns={'Account Id': 'Manager'}).reset_index(drop=True)
# df_jpm_iap = df_jpm_iap[['Manager', 'Date', 'Market Value']]
#
# # Merges the market values from JPM IAP with JPM HTS
# df_jpm_main = pd\
# .merge(
# left=df_jpm_iap,
# right=df_jpm,
# left_on=['Manager', 'Date'],
# right_on=['Manager', 'Date'],
# how='right'
# )\
# .sort_values(['Manager', 'Date'])\
# .reset_index(drop=True)
|
[
"pandas.DataFrame",
"pandas.read_csv",
"pandas.merge",
"pandas.ExcelFile",
"datetime.datetime",
"pandas.read_excel",
"pandas.isna",
"pandas.concat"
] |
[((368, 392), 'datetime.datetime', 'dt.datetime', (['(2020)', '(5)', '(31)'], {}), '(2020, 5, 31)\n', (379, 392), True, 'import datetime as dt\n'), ((438, 463), 'pandas.read_csv', 'pd.read_csv', (['lgs_filepath'], {}), '(lgs_filepath)\n', (449, 463), True, 'import pandas as pd\n'), ((641, 655), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (653, 655), True, 'import pandas as pd\n'), ((1730, 1818), 'pandas.read_excel', 'pd.read_excel', (['"""U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx"""'], {}), "(\n 'U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx')\n", (1743, 1818), True, 'import pandas as pd\n'), ((1985, 2056), 'pandas.merge', 'pd.merge', ([], {'left': 'df_lgs', 'right': 'df_jpm', 'on': "['JPM ReportName']", 'how': '"""outer"""'}), "(left=df_lgs, right=df_jpm, on=['JPM ReportName'], how='outer')\n", (1993, 2056), True, 'import pandas as pd\n'), ((2597, 2611), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2609, 2611), True, 'import pandas as pd\n'), ((3777, 3833), 'pandas.DataFrame', 'pd.DataFrame', (['lgs_missing'], {'columns': "['Manager', 'Column']"}), "(lgs_missing, columns=['Manager', 'Column'])\n", (3789, 3833), True, 'import pandas as pd\n'), ((3851, 3907), 'pandas.DataFrame', 'pd.DataFrame', (['jpm_missing'], {'columns': "['Manager', 'Column']"}), "(jpm_missing, columns=['Manager', 'Column'])\n", (3863, 3907), True, 'import pandas as pd\n'), ((521, 558), 'pandas.ExcelFile', 'pd.ExcelFile', (['lgs_dictionary_filepath'], {}), '(lgs_dictionary_filepath)\n', (533, 558), True, 'import pandas as pd\n'), ((1445, 1486), 'pandas.concat', 'pd.concat', (['[df_jpm, df_sheet]'], {'sort': '(False)'}), '([df_jpm, df_sheet], sort=False)\n', (1454, 1486), True, 'import pandas as pd\n'), ((926, 952), 'pandas.ExcelFile', 'pd.ExcelFile', (['jpm_filepath'], {}), '(jpm_filepath)\n', (938, 952), True, 'import pandas as pd\n'), ((3153, 3186), 'pandas.isna', 'pd.isna', (['df_lgs_jpm[lgscolumn][i]'], {}), '(df_lgs_jpm[lgscolumn][i])\n', (3160, 3186), True, 'import pandas as pd\n'), ((3272, 3305), 'pandas.isna', 'pd.isna', (['df_lgs_jpm[jpmcolumn][i]'], {}), '(df_lgs_jpm[jpmcolumn][i])\n', (3279, 3305), True, 'import pandas as pd\n'), ((3113, 3146), 'pandas.isna', 'pd.isna', (['df_lgs_jpm[jpmcolumn][i]'], {}), '(df_lgs_jpm[jpmcolumn][i])\n', (3120, 3146), True, 'import pandas as pd\n'), ((3316, 3349), 'pandas.isna', 'pd.isna', (['df_lgs_jpm[lgscolumn][i]'], {}), '(df_lgs_jpm[lgscolumn][i])\n', (3323, 3349), True, 'import pandas as pd\n')]
|
import os
import json
import calendar
import time
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from .cachingHelper import getCache
from .constants import LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER
from .dbhelper import parseConnectionString, getS3Connection
from .doc import Doc
def _getEpochSecs(t):
return calendar.timegm(time.strptime(t[:19], "%Y-%m-%dT%H:%M:%S"))
class DocManager:
"""
Manage documents stored in cloud.
Contains functions for CRUD operations on documents
"""
def __init__(self):
"""
Instantiates a new instance of DocManager class
'bucketConnString' : connection string of s3 bucket in which docs
are stored.
"""
self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING']
self.cache = getCache()
self.__cacheExpiry= 900
def __getBucket(self):
bucketConnParams = parseConnectionString(self.bucketConnString)
conn = getS3Connection(self.bucketConnString)
return conn.get_bucket(bucketConnParams['bucketName'], validate=False)
def __isDocNew(self, key, timeLimit):
if _getEpochSecs(key.last_modified) < timeLimit:
return False
doc = self.get(key.name)
return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \
(FEEDTAG_DO_NOT_CLUSTER not in doc.tags)
def put(self, doc):
k = Key(self.__getBucket())
k.key = doc.key
# not storing tags directly in blob's metadata as the maximum size
# allowed there is only 2kb.
tags = dict(doc.tags)
tags['content'] = doc.content
keyContents = json.dumps(tags)
k.set_contents_from_string(keyContents)
self.cache.set(k.key, keyContents, self.__cacheExpiry)
def get(self, docKey):
keyContents = self.cache.get(docKey)
if not keyContents:
k = Key(self.__getBucket())
k.key = docKey
keyContents = k.get_contents_as_string()
self.cache.set(docKey, keyContents, self.__cacheExpiry)
storedTags = json.loads(keyContents)
content = storedTags.pop('content', None)
tags = storedTags
return Doc(docKey, content, tags)
def delete(self, docKey):
k = Key(self.__getBucket())
k.key = docKey
k.delete()
self.cache.delete(docKey)
|
[
"time.strptime",
"json.loads",
"json.dumps"
] |
[((355, 397), 'time.strptime', 'time.strptime', (['t[:19]', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(t[:19], '%Y-%m-%dT%H:%M:%S')\n", (368, 397), False, 'import time\n'), ((1663, 1679), 'json.dumps', 'json.dumps', (['tags'], {}), '(tags)\n', (1673, 1679), False, 'import json\n'), ((2102, 2125), 'json.loads', 'json.loads', (['keyContents'], {}), '(keyContents)\n', (2112, 2125), False, 'import json\n')]
|
from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.post_list, name='post_list'),
path('<slug:post>/', views.post_detail, name='post_detail'),
path('comment/reply/', views.reply_page, name='reply'),
path('about', views.about_page, name='about'),
]
|
[
"django.urls.path"
] |
[((88, 131), 'django.urls.path', 'path', (['""""""', 'views.post_list'], {'name': '"""post_list"""'}), "('', views.post_list, name='post_list')\n", (92, 131), False, 'from django.urls import path\n'), ((137, 196), 'django.urls.path', 'path', (['"""<slug:post>/"""', 'views.post_detail'], {'name': '"""post_detail"""'}), "('<slug:post>/', views.post_detail, name='post_detail')\n", (141, 196), False, 'from django.urls import path\n'), ((202, 256), 'django.urls.path', 'path', (['"""comment/reply/"""', 'views.reply_page'], {'name': '"""reply"""'}), "('comment/reply/', views.reply_page, name='reply')\n", (206, 256), False, 'from django.urls import path\n'), ((262, 307), 'django.urls.path', 'path', (['"""about"""', 'views.about_page'], {'name': '"""about"""'}), "('about', views.about_page, name='about')\n", (266, 307), False, 'from django.urls import path\n')]
|
from mycroft import MycroftSkill, intent_file_handler
class Midicontrol(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('midicontrol.intent')
def handle_midicontrol(self, message):
self.speak_dialog('midicontrol')
def create_skill():
return Midicontrol()
|
[
"mycroft.MycroftSkill.__init__",
"mycroft.intent_file_handler"
] |
[((155, 196), 'mycroft.intent_file_handler', 'intent_file_handler', (['"""midicontrol.intent"""'], {}), "('midicontrol.intent')\n", (174, 196), False, 'from mycroft import MycroftSkill, intent_file_handler\n'), ((121, 148), 'mycroft.MycroftSkill.__init__', 'MycroftSkill.__init__', (['self'], {}), '(self)\n', (142, 148), False, 'from mycroft import MycroftSkill, intent_file_handler\n')]
|
from os.path import expanduser
from os import sep
from re import split
from functools import reduce
from xmtrace import xmtrace
@xmtrace
def xm_path_translate(lua, ph):
return expanduser(reduce(lambda a, b: a + sep + b, split(r"\\|/", ph)))
|
[
"re.split"
] |
[((225, 244), 're.split', 'split', (['"""\\\\\\\\|/"""', 'ph'], {}), "('\\\\\\\\|/', ph)\n", (230, 244), False, 'from re import split\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
import logging.handlers
CRITICAL = 1
ERROR = 2
WARNING = 3
INFO = 4
DEBUG = 5
class Logger:
def __init__(self, fileName, level=DEBUG):
dictLevel = {
CRITICAL: logging.CRITICAL,
ERROR: logging.ERROR,
WARNING: logging.WARNING,
INFO: logging.INFO,
DEBUG: logging.DEBUG
}
if level < CRITICAL or level > DEBUG:
level = DEBUG
logLevel = dictLevel[level]
# mkdir
abspath = os.path.abspath(fileName)
dir = os.path.dirname(abspath)
if not os.path.exists(dir):
os.makedirs(dir)
self.logger = logging.getLogger(dir)
self.logger.setLevel(logLevel)
fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s')
# 控制台日志
sh = logging.StreamHandler()
sh.setFormatter(fmt)
sh.setLevel(logLevel)
# 文件日志
fh = logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024, backupCount=20)
fh.setFormatter(fmt)
fh.setLevel(logLevel)
self.logger.addHandler(fh)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
def warn(self, message):
self.logger.warn(message)
def error(self, message):
self.logger.error(message)
def critical(self, message):
self.logger.critical(message)
if __name__ == "__main__":
logger = Logger('test.log', INFO)
logger.debug('this is a debug message')
logger.info('this is a info message')
logger.warn('this is a warn message')
|
[
"os.path.abspath",
"os.makedirs",
"os.path.dirname",
"logging.StreamHandler",
"os.path.exists",
"logging.Formatter",
"logging.handlers.RotatingFileHandler",
"logging.getLogger"
] |
[((588, 613), 'os.path.abspath', 'os.path.abspath', (['fileName'], {}), '(fileName)\n', (603, 613), False, 'import os\n'), ((628, 652), 'os.path.dirname', 'os.path.dirname', (['abspath'], {}), '(abspath)\n', (643, 652), False, 'import os\n'), ((740, 762), 'logging.getLogger', 'logging.getLogger', (['dir'], {}), '(dir)\n', (757, 762), False, 'import logging\n'), ((816, 878), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s] [%(levelname)s] %(message)s"""'], {}), "('[%(asctime)s] [%(levelname)s] %(message)s')\n", (833, 878), False, 'import logging\n'), ((908, 931), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (929, 931), False, 'import logging\n'), ((1019, 1112), 'logging.handlers.RotatingFileHandler', 'logging.handlers.RotatingFileHandler', (['fileName'], {'maxBytes': '(50 * 1024 * 1024)', 'backupCount': '(20)'}), '(fileName, maxBytes=50 * 1024 * 1024,\n backupCount=20)\n', (1055, 1112), False, 'import logging\n'), ((668, 687), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (682, 687), False, 'import os\n'), ((701, 717), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (712, 717), False, 'import os\n')]
|
import matplotlib.pyplot as plt
import numpy as np
nus_lpf,mu_lpf=np.load("clpf.npz",allow_pickle=True)["arr_0"]
nus_modit,mu_modit=np.load("cmodit4500.npz",allow_pickle=True)["arr_0"]
fig=plt.figure(figsize=(8,4))
plt.plot(nus_modit,mu_modit,label="MODIT",color="C1")
plt.plot(nus_lpf,mu_lpf,label="DIRECT",ls="dashed",color="C0")
plt.xlabel("wavenumber (cm-1)")
plt.ylabel("spectrum")
plt.legend()
plt.savefig("compspec_luhman16A.png")
plt.show()
|
[
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((192, 218), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (202, 218), True, 'import matplotlib.pyplot as plt\n'), ((218, 274), 'matplotlib.pyplot.plot', 'plt.plot', (['nus_modit', 'mu_modit'], {'label': '"""MODIT"""', 'color': '"""C1"""'}), "(nus_modit, mu_modit, label='MODIT', color='C1')\n", (226, 274), True, 'import matplotlib.pyplot as plt\n'), ((272, 338), 'matplotlib.pyplot.plot', 'plt.plot', (['nus_lpf', 'mu_lpf'], {'label': '"""DIRECT"""', 'ls': '"""dashed"""', 'color': '"""C0"""'}), "(nus_lpf, mu_lpf, label='DIRECT', ls='dashed', color='C0')\n", (280, 338), True, 'import matplotlib.pyplot as plt\n'), ((336, 367), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""wavenumber (cm-1)"""'], {}), "('wavenumber (cm-1)')\n", (346, 367), True, 'import matplotlib.pyplot as plt\n'), ((368, 390), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""spectrum"""'], {}), "('spectrum')\n", (378, 390), True, 'import matplotlib.pyplot as plt\n'), ((391, 403), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (401, 403), True, 'import matplotlib.pyplot as plt\n'), ((404, 441), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""compspec_luhman16A.png"""'], {}), "('compspec_luhman16A.png')\n", (415, 441), True, 'import matplotlib.pyplot as plt\n'), ((442, 452), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (450, 452), True, 'import matplotlib.pyplot as plt\n'), ((68, 106), 'numpy.load', 'np.load', (['"""clpf.npz"""'], {'allow_pickle': '(True)'}), "('clpf.npz', allow_pickle=True)\n", (75, 106), True, 'import numpy as np\n'), ((134, 178), 'numpy.load', 'np.load', (['"""cmodit4500.npz"""'], {'allow_pickle': '(True)'}), "('cmodit4500.npz', allow_pickle=True)\n", (141, 178), True, 'import numpy as np\n')]
|
import icon_get
import unittest
import mainutils
import iconmanager
test_ini = """
[Rainmeter]
Author=<EMAIL>.<EMAIL>.<EMAIL>
Name=Mid Dock
------------------------------------------------------------------------
;Metadata added by RainBrowser
;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin
[Metadata]
Name=
Config=
Description=
Instructions=
Version=
Tags=
License=
Variant=
Preview=
;End of added Metadata
;-----------------------------------------------------------------------
; MEASURES MEASURES
[MeasurePower]
Measure=Plugin
Plugin=Plugins\PowerPlugin.dll
PowerState=PERCENT
[MeasureBin]
Measure=Plugin
Plugin=RecycleManager.dll
RecycleType=COUNT
Drives=ALL
[MeasureBin2]
Measure=Plugin
Plugin=RecycleManager.dll
RecycleType=SIZE
Drives=ALL
[BinAction]
Measure=Calc
Formula=MeasureBin
IfAboveAction=!execute [!RainmeterHideMeter IconEmpty][!RainmeterShowMeter IconFull]
IfAboveValue=0
IfEqualAction=!execute [!RainmeterHideMeter IconFull][!RainmeterShowMeter IconEmpty]
IfEqualValue=0
;---------------------------------------------------------------------------
; APPLICATIONS
[video editor]
Meter=Button
Y=2R
ButtonImage="E:\Desktop\Test junk\video editor icon.png"
ButtonCommand=!execute ["E:\Desktop\Test junk\video editor.exe"]
[stackoverflow help]
Meter=Button
Y=2R
ButtonImage="E:\Desktop\Test junk\stackoverflow help icon.png"
ButtonCommand=!execute ["E:\Desktop\Test junk\stackoverflow help.exe"]
[movie maker]
Meter=Button
Y=2R
ButtonImage="E:\Desktop\Test junk\movie maker icon.png"
ButtonCommand=!execute ["E:\Desktop\Test junk\movie maker.exe"]
[Terraria]
Meter=Button
Y=2R
ButtonImage="E:\Desktop\Test junk\Terraria icon.png"
ButtonCommand=!execute ["E:\Desktop\Test junk\Terraria.url"]"""
class SmokeTests(unittest.TestCase):
def test_get_urls(self):
T = icon_get.get_urls
assert T("minecraft")
assert T("Dota 2")
assert T("Photoshop")
def test_sorting_by_ini(self):
icon_names = ["Terraria", "movie maker", "video editor", "stackoverflow help", "new program"]
icons = [iconmanager.IconManager(name=icon_name, image_save_path=".", app_path=".") for icon_name in icon_names]
correctly_sorted_names = ["new program", "video editor", "stackoverflow help", "movie maker", "Terraria"]
icons = mainutils.sort_by_ini(icons, ini_str=test_ini)
for correct_name, actual_icon in zip(correctly_sorted_names, icons):
assert actual_icon.name == correct_name, "Incorrectly sorted icons"
|
[
"iconmanager.IconManager",
"mainutils.sort_by_ini"
] |
[((2355, 2401), 'mainutils.sort_by_ini', 'mainutils.sort_by_ini', (['icons'], {'ini_str': 'test_ini'}), '(icons, ini_str=test_ini)\n', (2376, 2401), False, 'import mainutils\n'), ((2120, 2194), 'iconmanager.IconManager', 'iconmanager.IconManager', ([], {'name': 'icon_name', 'image_save_path': '"""."""', 'app_path': '"""."""'}), "(name=icon_name, image_save_path='.', app_path='.')\n", (2143, 2194), False, 'import iconmanager\n')]
|
from transformers import pipeline
import wikipedia
import warnings
import streamlit as st
warnings.filterwarnings("ignore")
def get_context_from_wiki(query: str) -> str:
"Given a query, return the summary about the query from wikipedia"
results = wikipedia.search(query)
# There could be more than 1 due to Disambiguation issue
try:
summary = wikipedia.summary(results[0], sentences=10)
except wikipedia.DisambiguationError as e:
ambiguous_terms = e.options
# take the first one from the list of ambiguous terms and try again
return wikipedia.summary(ambiguous_terms[0], sentences=10)
return summary
def get_qa_pipeline():
qa_pipeline = pipeline("question-answering")
return qa_pipeline
def answer_question(pipeline, question, context):
result = pipeline(question=question, context=context)
#return f"Answer: {result['answer']}, score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}"
return result
if __name__ == '__main__':
st.title("Extractive Question Answering")
pipeline = get_qa_pipeline()
add_select_option = st.sidebar.selectbox(
"Exploration Options", ("Query Based", "Paragraph based")
)
if add_select_option == "Query Based":
paragraph_slot = st.empty()
query = st.text_area("WIKI SEARCH TERM", "")
if query:
context = get_context_from_wiki(query)
paragraph_slot.markdown(context)
elif add_select_option == "Paragraph based":
question = st.empty()
context = st.text_area("Enter the paragraph to explore", value="...")
question = st.text_input("QUESTION", "")
# print(f"Context: {context}\n")
# print(f"Question: {question}\n")
# print(answer_question(pipeline, question=question, context=context))
if question:
try:
answer = answer_question(pipeline, question=question, context=context)
st.write(answer['answer'])
except:
st.write("Provide a valid paragraph")
|
[
"wikipedia.search",
"streamlit.text_input",
"transformers.pipeline",
"warnings.filterwarnings",
"streamlit.title",
"streamlit.write",
"streamlit.text_area",
"streamlit.sidebar.selectbox",
"wikipedia.summary",
"streamlit.empty"
] |
[((92, 125), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (115, 125), False, 'import warnings\n'), ((255, 278), 'wikipedia.search', 'wikipedia.search', (['query'], {}), '(query)\n', (271, 278), False, 'import wikipedia\n'), ((675, 705), 'transformers.pipeline', 'pipeline', (['"""question-answering"""'], {}), "('question-answering')\n", (683, 705), False, 'from transformers import pipeline\n'), ((794, 838), 'transformers.pipeline', 'pipeline', ([], {'question': 'question', 'context': 'context'}), '(question=question, context=context)\n', (802, 838), False, 'from transformers import pipeline\n'), ((1017, 1058), 'streamlit.title', 'st.title', (['"""Extractive Question Answering"""'], {}), "('Extractive Question Answering')\n", (1025, 1058), True, 'import streamlit as st\n'), ((1117, 1196), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Exploration Options"""', "('Query Based', 'Paragraph based')"], {}), "('Exploration Options', ('Query Based', 'Paragraph based'))\n", (1137, 1196), True, 'import streamlit as st\n'), ((1631, 1660), 'streamlit.text_input', 'st.text_input', (['"""QUESTION"""', '""""""'], {}), "('QUESTION', '')\n", (1644, 1660), True, 'import streamlit as st\n'), ((359, 402), 'wikipedia.summary', 'wikipedia.summary', (['results[0]'], {'sentences': '(10)'}), '(results[0], sentences=10)\n', (376, 402), False, 'import wikipedia\n'), ((1276, 1286), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (1284, 1286), True, 'import streamlit as st\n'), ((1303, 1339), 'streamlit.text_area', 'st.text_area', (['"""WIKI SEARCH TERM"""', '""""""'], {}), "('WIKI SEARCH TERM', '')\n", (1315, 1339), True, 'import streamlit as st\n'), ((563, 614), 'wikipedia.summary', 'wikipedia.summary', (['ambiguous_terms[0]'], {'sentences': '(10)'}), '(ambiguous_terms[0], sentences=10)\n', (580, 614), False, 'import wikipedia\n'), ((1522, 1532), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (1530, 1532), True, 'import streamlit as st\n'), ((1551, 1610), 'streamlit.text_area', 'st.text_area', (['"""Enter the paragraph to explore"""'], {'value': '"""..."""'}), "('Enter the paragraph to explore', value='...')\n", (1563, 1610), True, 'import streamlit as st\n'), ((1938, 1964), 'streamlit.write', 'st.write', (["answer['answer']"], {}), "(answer['answer'])\n", (1946, 1964), True, 'import streamlit as st\n'), ((1993, 2030), 'streamlit.write', 'st.write', (['"""Provide a valid paragraph"""'], {}), "('Provide a valid paragraph')\n", (2001, 2030), True, 'import streamlit as st\n')]
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for defining the data used to train machine learning models.
Data to be used in training, prediction and evaluation is described in terms of
features. This module provides functionality to define those features, and data
transformations to apply to produce those features.
"""
from _features import CategoricalFeatureColumn
from _features import Feature
from _features import FeatureColumn
from _features import FeatureFormat
from _features import FeatureMetadata
from _features import ImageFeatureColumn
from _features import KeyFeatureColumn
from _features import NumericFeatureColumn
from _features import TargetFeatureColumn
from _features import TextFeatureColumn
from _predict import FeatureProducer
from _registries import register_analyzer
from _registries import register_transformer
from _transforms import ExampleProtoFormatter
from _transforms import FeatureVector
def key(name):
"""Creates a feature representing the key of the instance.
Args:
name: Name of feature column.
Returns:
An instance of KeyFeatureColumn.
"""
return KeyFeatureColumn(name)
def target(name='target'):
"""Creates a feature representing the target label or value of the instance.
Args:
name: Name of feature column.
Returns:
An instance of TargetFeatureColumn.
"""
return TargetFeatureColumn(name)
def numeric(name, default=None, log_base=0):
"""Creates a numeric column within a feature.
Args:
name: Name of feature column.
default: Default value for the column.
log_base: Base of logarithm to be applied.
Returns:
An instance of NumericFeatureColumn.
"""
return NumericFeatureColumn(name, default=default, log_base=log_base)
def categorical(name, default=None, frequency_threshold=5,
split_regex=None):
r"""Creates a categorical or discrete value column within a feature.
Args:
name: Name of feature column.
default: Default value for the column.
frequency_threshold: Frequency threshold below which words are not added to
the vocab.
split_regex: Regex rule to extract the column value. Defaults to None,
which means no splitting.
Examples:
- Use r'\w{1,}' to group alphanumerical characters of len 1.
- Use r'\w{3,}' to group alphanumerical characters of len 3.
- Use r'\S+' to group on non-whitespace.
Returns:
An instance of CategoricalFeatureColumn.
"""
return CategoricalFeatureColumn(name, default=default,
frequency_threshold=frequency_threshold,
split_regex=split_regex)
def text(name,
default=None,
sampling_percentage=100,
split_regex=r'\w{3,}',
stop_words='english',
use_stemmer=False,
ngrams=1,
use_tf_idf=False,
normalize=False,
strip_html=False,
removable_tags=None,
word2vec_dict=None,
frequency_threshold=0):
"""Creates a free-form text value column within a feature.
Args:
name: Name of feature column.
default: Default value for the column.
sampling_percentage: Percentage value (0-100) for the number of rows that
should be sampled for constructing the vocabulary/ngrams.
split_regex: Regex rule to split text
stop_words: Either list or set, specifying the stop words to be ignored or a
string representing the language of stopwords to be requested from nltk.
Use [] for no stopwords. For more info nltk.corpus.stopwords.readme()
use_stemmer: Boolean on whether text should be stemmed
ngrams: number of ngrams the tokenizer should generate (2 for bigrams etc)
use_tf_idf: Boolean on whether the BOW representation should be tf*idf
normalize: Boolean on whether sparse vector (BOW or tf*idf) should be
normalize (used with L2 norm)
strip_html: Boolean on whether html_markup should be removed before
processing
removable_tags: list of html tags whose text should be ignored
word2vec_dict: Dictionary of word -> word_vectors. If it is not empty, then
the words will be replaced with a matrix, one row for each word
frequency_threshold: Frequency threshold below which words/ngrams
are not added to the vocab.
Returns:
An instance of TextFeatureColumn.
"""
return TextFeatureColumn(
name,
default=default,
sampling_percentage=sampling_percentage,
split_regex=split_regex,
stop_words=stop_words,
use_stemmer=use_stemmer,
ngrams=ngrams,
use_tf_idf=use_tf_idf,
normalize=normalize,
strip_html=strip_html,
removable_tags=removable_tags,
word2vec_dict=word2vec_dict,
frequency_threshold=frequency_threshold)
def image(name, default=None):
"""Creates an image column within a feature..
Args:
name: name of image feature
default: Default value for the column.
Returns:
An instance of ImageFeatureColumn.
"""
return ImageFeatureColumn(name, default=default)
|
[
"_features.ImageFeatureColumn",
"_features.TextFeatureColumn",
"_features.CategoricalFeatureColumn",
"_features.KeyFeatureColumn",
"_features.NumericFeatureColumn",
"_features.TargetFeatureColumn"
] |
[((1671, 1693), '_features.KeyFeatureColumn', 'KeyFeatureColumn', (['name'], {}), '(name)\n', (1687, 1693), False, 'from _features import KeyFeatureColumn\n'), ((1912, 1937), '_features.TargetFeatureColumn', 'TargetFeatureColumn', (['name'], {}), '(name)\n', (1931, 1937), False, 'from _features import TargetFeatureColumn\n'), ((2234, 2296), '_features.NumericFeatureColumn', 'NumericFeatureColumn', (['name'], {'default': 'default', 'log_base': 'log_base'}), '(name, default=default, log_base=log_base)\n', (2254, 2296), False, 'from _features import NumericFeatureColumn\n'), ((3023, 3141), '_features.CategoricalFeatureColumn', 'CategoricalFeatureColumn', (['name'], {'default': 'default', 'frequency_threshold': 'frequency_threshold', 'split_regex': 'split_regex'}), '(name, default=default, frequency_threshold=\n frequency_threshold, split_regex=split_regex)\n', (3047, 3141), False, 'from _features import CategoricalFeatureColumn\n'), ((4926, 5286), '_features.TextFeatureColumn', 'TextFeatureColumn', (['name'], {'default': 'default', 'sampling_percentage': 'sampling_percentage', 'split_regex': 'split_regex', 'stop_words': 'stop_words', 'use_stemmer': 'use_stemmer', 'ngrams': 'ngrams', 'use_tf_idf': 'use_tf_idf', 'normalize': 'normalize', 'strip_html': 'strip_html', 'removable_tags': 'removable_tags', 'word2vec_dict': 'word2vec_dict', 'frequency_threshold': 'frequency_threshold'}), '(name, default=default, sampling_percentage=\n sampling_percentage, split_regex=split_regex, stop_words=stop_words,\n use_stemmer=use_stemmer, ngrams=ngrams, use_tf_idf=use_tf_idf,\n normalize=normalize, strip_html=strip_html, removable_tags=\n removable_tags, word2vec_dict=word2vec_dict, frequency_threshold=\n frequency_threshold)\n', (4943, 5286), False, 'from _features import TextFeatureColumn\n'), ((5574, 5615), '_features.ImageFeatureColumn', 'ImageFeatureColumn', (['name'], {'default': 'default'}), '(name, default=default)\n', (5592, 5615), False, 'from _features import ImageFeatureColumn\n')]
|
from aedes_server.core.clusters import compute_clusters
from django.core.management import BaseCommand
class Command(BaseCommand):
help = 'Calculate clusters for AedeSpot app.'
def handle(self, *args, **options):
'''
Computing clusters.
'''
compute_clusters()
|
[
"aedes_server.core.clusters.compute_clusters"
] |
[((284, 302), 'aedes_server.core.clusters.compute_clusters', 'compute_clusters', ([], {}), '()\n', (300, 302), False, 'from aedes_server.core.clusters import compute_clusters\n')]
|
import py_cui
from pymusicterm.music import SongFile
from pymusicterm.util.file import File, FileMetadata
class SongInfoBlockLabel:
_row:int=0
_column:int=2
_row_span:int=2
_column_span:int=3
_center:bool=False
window:py_cui.widget_set.WidgetSet
def __init__(self,window:py_cui.widget_set.WidgetSet):
self.window=window
self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column,
row_span=self._row_span,column_span=self._column_span,center=self._center)
self.__config()
def _initial_text(self):
file_path=File().get_file_path()
text=""" Actual path: {}
No Song Selected
""".format(file_path)
return text
def set_song_info(self,song_file:SongFile):
pass
def __config(self):
""" Function that configure the widget
"""
self.block_label._draw_border=True
|
[
"pymusicterm.util.file.File"
] |
[((615, 621), 'pymusicterm.util.file.File', 'File', ([], {}), '()\n', (619, 621), False, 'from pymusicterm.util.file import File, FileMetadata\n')]
|
# Generated by Django 3.2.7 on 2021-09-18 10:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("podcasts", "0042_podcast_hub_exception"),
]
operations = [
migrations.AlterField(
model_name="podcast",
name="hub_token",
field=models.UUIDField(blank=True, editable=False, null=True, unique=True),
),
]
|
[
"django.db.models.UUIDField"
] |
[((344, 412), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'blank': '(True)', 'editable': '(False)', 'null': '(True)', 'unique': '(True)'}), '(blank=True, editable=False, null=True, unique=True)\n', (360, 412), False, 'from django.db import migrations, models\n')]
|
"""
Admin access page settings
"""
from django.contrib import admin
from blog.models import get_model_factory
from .posts_admin import PostAdmin
# Register your models here.
admin.site.register(get_model_factory('PostsFactory').create(), PostAdmin)
|
[
"blog.models.get_model_factory"
] |
[((195, 228), 'blog.models.get_model_factory', 'get_model_factory', (['"""PostsFactory"""'], {}), "('PostsFactory')\n", (212, 228), False, 'from blog.models import get_model_factory\n')]
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
# # The following code can be used if you have private dependencies. Basically it requires the user to set an
# # environment variable `GH_PAT` to a Github Personal Access Token (with access to the private repository). If the env
# # var cannot be found, an error is raised. If it can be found, the private package is installed.
# import os
# try:
# gh_pat = os.environ["GH_PAT"]
# except KeyError as e:
# raise RuntimeError("You didn't set the environment variable `GH_PAT`. This is necessary because this package "
# "relies on private package(s), and you need to be authenticated to install these. Please set "
# "`GH_PAT` environment variable to your Personnal Access Token (from Github).") from e
# # Example of specifying private dependencies :
# reqs = [f"<package_name> @ git+https://{gh_pat}@github.com/<user>/<repo>@<tag>#egg=<package_name>"]
reqs = []
extras_require = {
"test": ["pytest~=7.0", "pytest-cov~=3.0", "coverage-badge~=1.0"],
"hook": ["pre-commit~=2.15"],
"lint": ["isort~=5.9", "black~=22.1", "flake518~=1.2", "darglint~=1.8"],
"docs": ["mkdocs-material~=8.1", "mkdocstrings[python]~=0.18", "mike~=1.1"],
}
extras_require["all"] = sum(extras_require.values(), [])
extras_require["dev"] = (
extras_require["test"] + extras_require["hook"] + extras_require["lint"] + extras_require["docs"]
)
setuptools.setup(
name="pytere",
version="1.0.0.dev0",
author="<NAME>",
author_email="<EMAIL>",
description="A Python Template Repository",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/astariul/pytere",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
install_requires=reqs,
extras_require=extras_require,
)
|
[
"setuptools.find_packages"
] |
[((1812, 1838), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (1836, 1838), False, 'import setuptools\n')]
|
import numpy as np
from ligo.skymap import kde
import matplotlib
matplotlib.use('Agg')
from matplotlib.colors import to_rgb
from matplotlib import pyplot as plt
from mpl_toolkits.basemap import Basemap
#matplotlib.rc('text', usetex=True)
def greedy(density):
i,j = np.shape(density)
idx = np.argsort(density.flatten())[::-1]
c = np.cumsum(density.flatten()[idx])
c = c/c[-1]
np.append(c,1.0)
p = np.zeros(i*j)
p[idx] = c[:]
return p.reshape(i,j)
def plot_sky(pts,contour=True,filled=False,ax=None,trueloc=None,cmap='Reds',col='red'):
cls = kde.Clustered2DSkyKDE
pts[:,0] = pts[:,0] - np.pi
skypost = cls(pts, trials=5, jobs=8)
# make up some data on a regular lat/lon grid.
# nlats = 145; nlons = 291; delta = 2.*np.pi/(nlons-1)
nlats = 145; nlons = 291; delta = 2.*np.pi/(nlons-1)
lats = (0.5*np.pi-delta*np.indices((nlats,nlons))[0,:,:])
# lons = (delta*np.indices((nlats,nlons))[1,:,:])
lons = (delta*np.indices((nlats,nlons))[1,:,:]-np.pi)
locs = np.column_stack((lons.flatten(),lats.flatten()))
prob = skypost(locs).reshape(nlats,nlons)
p1 = greedy(prob)
# compute mean location of samples
nx = np.cos(pts[:,1])*np.cos(pts[:,0])
ny = np.cos(pts[:,1])*np.sin(pts[:,0])
nz = np.sin(pts[:,1])
mean_n = [np.mean(nx),np.mean(ny),np.mean(nz)]
# bestloc = [np.remainder(np.arctan2(mean_n[1],mean_n[0]),2.0*np.pi),np.arctan2(mean_n[2],np.sqrt(mean_n[0]**2 + mean_n[1]**2))]
bestloc = [trueloc[0],trueloc[1]]
if ax is None:
# map = Basemap(projection='ortho',lon_0=-bestloc[0]*180/np.pi,lat_0=bestloc[1]*180/np.pi,resolution=None,celestial=True)
map = Basemap(projection='moll',lon_0=0,resolution=None,celestial=True)
map.drawmapboundary(fill_color='white')
# draw lat/lon grid lines every 30 degrees.
# map.drawmeridians(np.arange(0,360,30))
meridian = ["-180","-150","-120","-90","-60","-30","0","30","+60","+90","+120","+150"]
map.drawmeridians(np.arange(-180,180,30),labels=[1,1,1,1])
for i in np.arange(len(meridian)):
plt.annotate(r"$\textrm{%s}$" % meridian[i] + u"\u00b0",xy=map(np.arange(-180,180,30)[i],0),xycoords='data')
map.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0])
else:
map = ax
# compute native map projection coordinates of lat/lon grid.
# x, y = map(lons*180./np.pi, lats*180./np.pi)
x, y = map(lons*180./np.pi, lats*180./np.pi)
# contour data over the map.
if filled:
base_color = np.array(to_rgb(col))
opp_color = 1.0 - base_color
cs1 = map.contourf(x,y,1.0-p1,levels=[0.0,0.1,0.5,1.0],colors=[base_color+opp_color,base_color+0.8*opp_color,base_color+0.6*opp_color,base_color])
cs2 = map.contour(x,y,p1,levels=[0.5,0.9],linewidths=2.0,colors=col)
if trueloc is not None:
xx, yy = map((trueloc[0]*180./np.pi)-180.0, trueloc[1]*180./np.pi)
map.plot(xx,yy,marker='+',markersize=20,linewidth=5,color='black')
return map
|
[
"numpy.zeros",
"matplotlib.colors.to_rgb",
"numpy.shape",
"numpy.append",
"numpy.indices",
"matplotlib.use",
"numpy.sin",
"numpy.mean",
"numpy.cos",
"numpy.arange",
"mpl_toolkits.basemap.Basemap"
] |
[((65, 86), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (79, 86), False, 'import matplotlib\n'), ((270, 287), 'numpy.shape', 'np.shape', (['density'], {}), '(density)\n', (278, 287), True, 'import numpy as np\n'), ((396, 413), 'numpy.append', 'np.append', (['c', '(1.0)'], {}), '(c, 1.0)\n', (405, 413), True, 'import numpy as np\n'), ((421, 436), 'numpy.zeros', 'np.zeros', (['(i * j)'], {}), '(i * j)\n', (429, 436), True, 'import numpy as np\n'), ((1278, 1295), 'numpy.sin', 'np.sin', (['pts[:, 1]'], {}), '(pts[:, 1])\n', (1284, 1295), True, 'import numpy as np\n'), ((1192, 1209), 'numpy.cos', 'np.cos', (['pts[:, 1]'], {}), '(pts[:, 1])\n', (1198, 1209), True, 'import numpy as np\n'), ((1209, 1226), 'numpy.cos', 'np.cos', (['pts[:, 0]'], {}), '(pts[:, 0])\n', (1215, 1226), True, 'import numpy as np\n'), ((1235, 1252), 'numpy.cos', 'np.cos', (['pts[:, 1]'], {}), '(pts[:, 1])\n', (1241, 1252), True, 'import numpy as np\n'), ((1252, 1269), 'numpy.sin', 'np.sin', (['pts[:, 0]'], {}), '(pts[:, 0])\n', (1258, 1269), True, 'import numpy as np\n'), ((1309, 1320), 'numpy.mean', 'np.mean', (['nx'], {}), '(nx)\n', (1316, 1320), True, 'import numpy as np\n'), ((1321, 1332), 'numpy.mean', 'np.mean', (['ny'], {}), '(ny)\n', (1328, 1332), True, 'import numpy as np\n'), ((1333, 1344), 'numpy.mean', 'np.mean', (['nz'], {}), '(nz)\n', (1340, 1344), True, 'import numpy as np\n'), ((1679, 1747), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""moll"""', 'lon_0': '(0)', 'resolution': 'None', 'celestial': '(True)'}), "(projection='moll', lon_0=0, resolution=None, celestial=True)\n", (1686, 1747), False, 'from mpl_toolkits.basemap import Basemap\n'), ((2014, 2038), 'numpy.arange', 'np.arange', (['(-180)', '(180)', '(30)'], {}), '(-180, 180, 30)\n', (2023, 2038), True, 'import numpy as np\n'), ((2245, 2267), 'numpy.arange', 'np.arange', (['(-90)', '(90)', '(30)'], {}), '(-90, 90, 30)\n', (2254, 2267), True, 'import numpy as np\n'), ((2560, 2571), 'matplotlib.colors.to_rgb', 'to_rgb', (['col'], {}), '(col)\n', (2566, 2571), False, 'from matplotlib.colors import to_rgb\n'), ((869, 895), 'numpy.indices', 'np.indices', (['(nlats, nlons)'], {}), '((nlats, nlons))\n', (879, 895), True, 'import numpy as np\n'), ((974, 1000), 'numpy.indices', 'np.indices', (['(nlats, nlons)'], {}), '((nlats, nlons))\n', (984, 1000), True, 'import numpy as np\n'), ((2173, 2197), 'numpy.arange', 'np.arange', (['(-180)', '(180)', '(30)'], {}), '(-180, 180, 30)\n', (2182, 2197), True, 'import numpy as np\n')]
|
# DLトレーニングで共通のロジック・モジュール
import tensorflow as tf
from tensorflow.python.data.ops.readers import TFRecordDatasetV2
from tensorflow.python.keras.callbacks import History
from google.cloud import storage
from typing import Callable, List
import os
def get_tfrecord_dataset(
dataset_path: str,
preprocessing: Callable,
global_batch_size: int,
split: str,
data_augmentation: Callable = lambda x, y: (x, y),
) -> TFRecordDatasetV2:
"""TFRecordからデータパイプラインを構築する.
Args:
dataset_path (str): 目的のTFRecordファイルが保存されているパス.
preprocessing (Callable): 適用する前処理関数.
global_batch_size (int): バッチサイズ(分散処理の場合は合計).
split (str): train or valid
data_augmentation (Callable, optional): データオーグメンテーション関数. Defaults to lambdax:x.
Raises:
FileNotFoundError: dataset_pathにファイルが存在しない場合.
Returns:
TFRecordDatasetV2: 定義済みのデータパイプライン.
"""
# Build a pipeline
file_names = tf.io.gfile.glob(
f"{dataset_path}/{split}-*.tfrec"
)
dataset = tf.data.TFRecordDataset(
file_names, num_parallel_reads=tf.data.AUTOTUNE)
if not file_names:
raise FileNotFoundError(f"Not found: {dataset}")
option = tf.data.Options()
if split == "train":
option.experimental_deterministic = False
dataset = dataset.with_options(option) \
.map(lambda example: preprocessing(example=example), num_parallel_calls=tf.data.AUTOTUNE) \
.map(lambda x, y: data_augmentation(x, y)) \
.shuffle(512, reshuffle_each_iteration=True) \
.batch(global_batch_size, drop_remainder=True) \
.prefetch(tf.data.AUTOTUNE)
else:
option.experimental_deterministic = True
dataset = dataset.with_options(option) \
.map(lambda example: preprocessing(example=example), num_parallel_calls=tf.data.AUTOTUNE) \
.batch(global_batch_size, drop_remainder=False) \
.prefetch(tf.data.AUTOTUNE)
return dataset
class Training:
def __init__(
self,
build_model_func: Callable,
job_dir: str,
artifacts_dir: str = "",
use_tpu: bool = True,
) -> None:
"""トレーニングの初期設定を行う.
TPUノードの管理、TPUStrategyの設定、モデルのロード、コンパイル、checkpointの復旧などを行う.
Arguments:
build_model_func (Callable): 実験に使うモデルのbuild関数を渡す.
job_dir (str): job管理用のGCSパス. checkpointやlogの保存をする.
artifacts_dir (str): 実験結果の保存先GCSパス.
use_tpu (bool): トレーニングにTPUを使うかどうか.
"""
# For job management
self.job_dir = job_dir
self.artifacts_dir = artifacts_dir
self.use_tpu = use_tpu
self.last_epoch = self._get_last_epoch()
if self.use_tpu:
# Tpu cluster setup
cluster = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(cluster)
tf.tpu.experimental.initialize_tpu_system(cluster)
self.distribute_strategy = tf.distribute.TPUStrategy(cluster)
# Load model in distribute_strategy scope
with self.distribute_strategy.scope():
self._setup_model(build_model=build_model_func)
else:
self._setup_model(build_model=build_model_func)
self.callbacks = [
tf.keras.callbacks.TensorBoard(log_dir=f"{self.job_dir}/logs", histogram_freq=1),
tf.keras.callbacks.TerminateOnNaN(),
tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(self.job_dir, "checkpoints/{epoch:05d}"),
save_weights_only=True,
save_freq="epoch"
)
]
def _setup_model(self, build_model: Callable) -> None:
if self.last_epoch == 0:
self.model = build_model()
else:
checkpoint = f"{self.job_dir}/checkpoints/{self.last_epoch:0>5}"
self.model = build_model(checkpoint=checkpoint)
def _get_last_epoch(self) -> int:
client = storage.Client()
bucket_name = self.job_dir.split("/")[2]
dest = self.job_dir.replace(f"gs://{bucket_name}/", "")
blobs = client.list_blobs(bucket_name, prefix=f"{dest}/checkpoints")
checkpoints = [0]
for b in blobs:
epoch = b.name.replace(f"{dest}/checkpoints/", "").split(".")[0]
if epoch:
checkpoints.append(int(epoch))
last_epoch = max(checkpoints)
return last_epoch
def add_callbacks(self, callbacks: List) -> None:
self.callbacks.extend(callbacks)
def run_train(
self,
train_ds: TFRecordDatasetV2,
valid_ds: TFRecordDatasetV2,
epochs: int
) -> History:
"""トレーニングを実施し、ログや結果を保存する.
tf.keras.Model.fitでのトレーニングを行う.
複雑なトレーニングループが必要な場合もtf.keras.Model.train_stepをオーバーライドするなどして使う.
Arguments:
train_ds (TFRecordDatasetV2): tensorflowのデータセットパイプライン(学習用).
valid_ds (TFRecordDatasetV2): tensorflowのデータセットパイプライン(検証用).
epochs (int): トレーニングを回す合計エポック数.
"""
history = self.model.fit(
train_ds,
validation_data=valid_ds,
callbacks=self.callbacks,
initial_epoch=self.last_epoch,
epochs=epochs
)
if self.artifacts_dir:
self.model.save(f"{self.artifacts_dir}/saved_model", include_optimizer=False)
return history
|
[
"tensorflow.distribute.cluster_resolver.TPUClusterResolver",
"tensorflow.data.TFRecordDataset",
"tensorflow.distribute.TPUStrategy",
"tensorflow.keras.callbacks.TerminateOnNaN",
"tensorflow.data.Options",
"google.cloud.storage.Client",
"tensorflow.config.experimental_connect_to_cluster",
"tensorflow.keras.callbacks.TensorBoard",
"os.path.join",
"tensorflow.tpu.experimental.initialize_tpu_system",
"tensorflow.io.gfile.glob"
] |
[((942, 993), 'tensorflow.io.gfile.glob', 'tf.io.gfile.glob', (['f"""{dataset_path}/{split}-*.tfrec"""'], {}), "(f'{dataset_path}/{split}-*.tfrec')\n", (958, 993), True, 'import tensorflow as tf\n'), ((1022, 1094), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['file_names'], {'num_parallel_reads': 'tf.data.AUTOTUNE'}), '(file_names, num_parallel_reads=tf.data.AUTOTUNE)\n', (1045, 1094), True, 'import tensorflow as tf\n'), ((1198, 1215), 'tensorflow.data.Options', 'tf.data.Options', ([], {}), '()\n', (1213, 1215), True, 'import tensorflow as tf\n'), ((4022, 4038), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (4036, 4038), False, 'from google.cloud import storage\n'), ((2790, 2841), 'tensorflow.distribute.cluster_resolver.TPUClusterResolver', 'tf.distribute.cluster_resolver.TPUClusterResolver', ([], {}), '()\n', (2839, 2841), True, 'import tensorflow as tf\n'), ((2854, 2904), 'tensorflow.config.experimental_connect_to_cluster', 'tf.config.experimental_connect_to_cluster', (['cluster'], {}), '(cluster)\n', (2895, 2904), True, 'import tensorflow as tf\n'), ((2917, 2967), 'tensorflow.tpu.experimental.initialize_tpu_system', 'tf.tpu.experimental.initialize_tpu_system', (['cluster'], {}), '(cluster)\n', (2958, 2967), True, 'import tensorflow as tf\n'), ((3007, 3041), 'tensorflow.distribute.TPUStrategy', 'tf.distribute.TPUStrategy', (['cluster'], {}), '(cluster)\n', (3032, 3041), True, 'import tensorflow as tf\n'), ((3326, 3411), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'f"""{self.job_dir}/logs"""', 'histogram_freq': '(1)'}), "(log_dir=f'{self.job_dir}/logs', histogram_freq=1\n )\n", (3356, 3411), True, 'import tensorflow as tf\n'), ((3420, 3455), 'tensorflow.keras.callbacks.TerminateOnNaN', 'tf.keras.callbacks.TerminateOnNaN', ([], {}), '()\n', (3453, 3455), True, 'import tensorflow as tf\n'), ((3530, 3583), 'os.path.join', 'os.path.join', (['self.job_dir', '"""checkpoints/{epoch:05d}"""'], {}), "(self.job_dir, 'checkpoints/{epoch:05d}')\n", (3542, 3583), False, 'import os\n')]
|
import numpy as np
def align_depth_to_rgb(
depth,
bgr_cameramodel,
depth_cameramodel,
depth_to_rgb_transform):
"""Align depth image to color image.
Parameters
----------
depth : numpy.ndarray
depth image in meter order.
bgr_cameramodel : cameramodels.PinholeCameraModel
bgr cameramodel
depth_cameramodel : cameramodels.PinholeCameraModel
depth cameramodel
depth_to_rgb_transform : numpy.ndarray
4x4 transformation matrix.
Returns
-------
aligned_img : numpy.ndarray
aligned image.
"""
if depth.shape[0] != depth_cameramodel.height \
or depth.shape[1] != depth_cameramodel.width:
raise ValueError
depth = depth.copy()
aligned_img = np.zeros((bgr_cameramodel.height, bgr_cameramodel.width),
dtype=np.float32)
depth[np.isnan(depth)] = 0
v, u = np.array(np.where(depth))
uv = np.array([u, v]).T
rotation = depth_to_rgb_transform[:3, :3]
translation = depth_to_rgb_transform[:3, 3]
xyz_depth_frame = depth_cameramodel.batch_project_pixel_to_3d_ray(
uv, depth=depth[depth > 0])
xyz_rgb_frame = (np.matmul(
rotation.T, xyz_depth_frame.T)
- np.matmul(
rotation.T, translation).reshape(3, -1)).T
rgb_uv, indices = bgr_cameramodel.batch_project3d_to_pixel(
xyz_rgb_frame,
project_valid_depth_only=True,
return_indices=True)
aligned_img.reshape(-1)[bgr_cameramodel.flatten_uv(rgb_uv)] = \
depth[depth > 0][indices]
return aligned_img
|
[
"numpy.zeros",
"numpy.isnan",
"numpy.where",
"numpy.array",
"numpy.matmul"
] |
[((780, 855), 'numpy.zeros', 'np.zeros', (['(bgr_cameramodel.height, bgr_cameramodel.width)'], {'dtype': 'np.float32'}), '((bgr_cameramodel.height, bgr_cameramodel.width), dtype=np.float32)\n', (788, 855), True, 'import numpy as np\n'), ((893, 908), 'numpy.isnan', 'np.isnan', (['depth'], {}), '(depth)\n', (901, 908), True, 'import numpy as np\n'), ((934, 949), 'numpy.where', 'np.where', (['depth'], {}), '(depth)\n', (942, 949), True, 'import numpy as np\n'), ((960, 976), 'numpy.array', 'np.array', (['[u, v]'], {}), '([u, v])\n', (968, 976), True, 'import numpy as np\n'), ((1203, 1243), 'numpy.matmul', 'np.matmul', (['rotation.T', 'xyz_depth_frame.T'], {}), '(rotation.T, xyz_depth_frame.T)\n', (1212, 1243), True, 'import numpy as np\n'), ((1276, 1310), 'numpy.matmul', 'np.matmul', (['rotation.T', 'translation'], {}), '(rotation.T, translation)\n', (1285, 1310), True, 'import numpy as np\n')]
|
#!/usr/bin/python
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('AppDataPublisher')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import copy
import threading
from DustLinkData import DustLinkData
from EventBus import EventBusClient
class AppDataPublisher(EventBusClient.EventBusClient):
'''
\brief Publishes the data into the DustLinkData database.
One instance of this class is created for each application.
'''
def __init__(self,appName):
# store params
self._appName = appName
# log
log.info('creating instance')
# initialize parent class
EventBusClient.EventBusClient.__init__(self,
'parsedAppData_{0}'.format(self._appName),
self._publish,
)
self.name = 'DataConnector_AppDataPublisher_{0}'.format(self._appName)
# add stats
# local variables
#======================== public ==========================================
#======================== private =========================================
def _publish(self,sender,signal,data):
dld = DustLinkData.DustLinkData()
if not dld.getFastMode():
# add mote if needed
try:
dld.addMote(data['mac'])
except ValueError:
pass # happens when mote already exists
# in demo mode, add demo mode apps to mote
if dld.getDemoMode():
for appname in dld.DEMO_MODE_APPS.keys():
try:
dld.attachAppToMote(data['mac'],appname)
except ValueError:
pass # happens when app does not exist, or already attached
# attach app to mote if needed
try:
dld.attachAppToMote(data['mac'],self._appName)
except ValueError:
pass # happens when app not known, or app already attached to mote
# publish in DustLinkData
dld.indicateData(data['mac'],
self._appName,
data['fields'],
timestamp=data['timestamp'],
)
# log
if log.isEnabledFor(logging.DEBUG):
log.debug('published {0}'.format(data))
|
[
"DustLinkData.DustLinkData.DustLinkData",
"logging.getLogger"
] |
[((117, 154), 'logging.getLogger', 'logging.getLogger', (['"""AppDataPublisher"""'], {}), "('AppDataPublisher')\n", (134, 154), False, 'import logging\n'), ((1252, 1279), 'DustLinkData.DustLinkData.DustLinkData', 'DustLinkData.DustLinkData', ([], {}), '()\n', (1277, 1279), False, 'from DustLinkData import DustLinkData\n')]
|
import unittest
from Sastrawi.Morphology.Disambiguator.DisambiguatorPrefixRule1 import DisambiguatorPrefixRule1a, DisambiguatorPrefixRule1b
class Test_DisambiguatorPrefixRule1Test(unittest.TestCase):
def setUp(self):
self.subject1a = DisambiguatorPrefixRule1a()
self.subject1b = DisambiguatorPrefixRule1b()
return super(Test_DisambiguatorPrefixRule1Test, self).setUp()
def test_disambiguate1a(self):
self.assertEquals('ia-ia', self.subject1a.disambiguate('beria-ia'))
self.assertIsNone(self.subject1a.disambiguate('berlari'))
def test_disambiguate1b(self):
self.assertEquals('rakit', self.subject1b.disambiguate('berakit'))
self.assertIsNone(self.subject1b.disambiguate('bertabur'))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"Sastrawi.Morphology.Disambiguator.DisambiguatorPrefixRule1.DisambiguatorPrefixRule1a",
"Sastrawi.Morphology.Disambiguator.DisambiguatorPrefixRule1.DisambiguatorPrefixRule1b"
] |
[((786, 801), 'unittest.main', 'unittest.main', ([], {}), '()\n', (799, 801), False, 'import unittest\n'), ((247, 274), 'Sastrawi.Morphology.Disambiguator.DisambiguatorPrefixRule1.DisambiguatorPrefixRule1a', 'DisambiguatorPrefixRule1a', ([], {}), '()\n', (272, 274), False, 'from Sastrawi.Morphology.Disambiguator.DisambiguatorPrefixRule1 import DisambiguatorPrefixRule1a, DisambiguatorPrefixRule1b\n'), ((300, 327), 'Sastrawi.Morphology.Disambiguator.DisambiguatorPrefixRule1.DisambiguatorPrefixRule1b', 'DisambiguatorPrefixRule1b', ([], {}), '()\n', (325, 327), False, 'from Sastrawi.Morphology.Disambiguator.DisambiguatorPrefixRule1 import DisambiguatorPrefixRule1a, DisambiguatorPrefixRule1b\n')]
|
import itertools
# This snippet has been turned into a full repo:
# github.com/patrickleweryharris/anagram_solver
def anagram_solver(lst):
"""
Return all possible combinations of letters in lst
@type lst: [str]
@rtype: None
"""
for i in range(0, len(lst) + 1):
for subset in itertools.permutations(lst, i):
possible = ''
for letter in subset:
possible += letter
if len(possible) == len(lst):
# itertools.permutations returns smaller lists
print(possible)
if __name__ == '__main__':
lst = ['o', 'r', 'y', 'a', 'n']
anagram_solver(lst)
|
[
"itertools.permutations"
] |
[((310, 340), 'itertools.permutations', 'itertools.permutations', (['lst', 'i'], {}), '(lst, i)\n', (332, 340), False, 'import itertools\n')]
|
from BranchFilters.BranchFilterer import BranchFilterer
from Interoperability.ShellCommandExecuter import ShellCommandExecuter
from RepositoryWalkers.BranchToCommitWalker import BranchToCommitWalker
from Logger import Logger
class HeadToMasterBranchFilterer(BranchFilterer):
def __init__(self, repository):
self.logger = Logger(self)
self.repository = repository
self.repository_directory = repr(repository).split('\'')[1][:-4]
self.head_branch_name = self.repository.head.name[11:]
self.generate_log_from_head_to_merge_base()
def generate_log_from_head_to_merge_base(self):
self.logger.log("Determining commit ids between the current head and master:")
self.log_from_head_to_merge_base = []
self.logger.log("v head v")
for id in self.walk_log_from_head_to_merge_base():
self.log_from_head_to_merge_base.append(id)
self.logger.log(id)
self.logger.log("^ master ^")
def walk_log_from_head_to_merge_base(self):
head_master_merge_base = self.get_merge_base("master", self.head_branch_name)
walker = BranchToCommitWalker(self.repository, head_master_merge_base)
head_branch = self.repository.branches[self.head_branch_name]
for commit in walker.walk(head_branch):
yield commit.hex
def get_merge_base(self, branch_name, other_branch_name):
args = ['git', 'merge-base', branch_name, other_branch_name]
executer = ShellCommandExecuter(self.repository_directory, args)
return executer.execute_for_output()
def should_include_branch(self, branch_name):
merge_base = self.get_merge_base(self.head_branch_name, branch_name)
return merge_base in self.log_from_head_to_merge_base
|
[
"RepositoryWalkers.BranchToCommitWalker.BranchToCommitWalker",
"Logger.Logger",
"Interoperability.ShellCommandExecuter.ShellCommandExecuter"
] |
[((334, 346), 'Logger.Logger', 'Logger', (['self'], {}), '(self)\n', (340, 346), False, 'from Logger import Logger\n'), ((1139, 1200), 'RepositoryWalkers.BranchToCommitWalker.BranchToCommitWalker', 'BranchToCommitWalker', (['self.repository', 'head_master_merge_base'], {}), '(self.repository, head_master_merge_base)\n', (1159, 1200), False, 'from RepositoryWalkers.BranchToCommitWalker import BranchToCommitWalker\n'), ((1499, 1552), 'Interoperability.ShellCommandExecuter.ShellCommandExecuter', 'ShellCommandExecuter', (['self.repository_directory', 'args'], {}), '(self.repository_directory, args)\n', (1519, 1552), False, 'from Interoperability.ShellCommandExecuter import ShellCommandExecuter\n')]
|
'''
Colormapping
The final glyph customization we'll practice is using the CategoricalColorMapper to color each glyph by a categorical property.
Here, you're going to use the automobile dataset to plot miles-per-gallon vs weight and color each circle glyph by the region where the automobile was manufactured.
The origin column will be used in the ColorMapper to color automobiles manufactured in the US as blue, Europe as red and Asia as green.
The automobile data set is provided to you as a Pandas DataFrame called df. The figure is provided for you as p.
INSTRUCTIONS
100XP
Import CategoricalColorMapper from bokeh.models.
Convert the DataFrame df to a ColumnDataSource called source. This has already been done for you.
Make a CategoricalColorMapper object called color_mapper with the CategoricalColorMapper() function. It has two parameters here: factors and palette.
Add a circle glyph to the figure p to plot 'mpg' (on the y-axis) vs 'weight' (on the x-axis). Remember to pass in source and 'origin' as arguments to source and legend. For the color parameter, use dict(field='origin', transform=color_mapper).
'''
#Import CategoricalColorMapper from bokeh.models
from bokeh.models import CategoricalColorMapper
# Convert df to a ColumnDataSource: source
source = ColumnDataSource(df)
# Make a CategoricalColorMapper object: color_mapper
color_mapper = CategoricalColorMapper(factors=['Europe', 'Asia', 'US'],
palette=['red', 'green', 'blue'])
# Add a circle glyph to the figure p
p.circle('weight', 'mpg', source=source,
color=dict(field='origin', transform=color_mapper),
legend='origin')
# Specify the name of the output file and show the result
output_file('colormap.html')
show(p)
|
[
"bokeh.models.CategoricalColorMapper"
] |
[((1368, 1462), 'bokeh.models.CategoricalColorMapper', 'CategoricalColorMapper', ([], {'factors': "['Europe', 'Asia', 'US']", 'palette': "['red', 'green', 'blue']"}), "(factors=['Europe', 'Asia', 'US'], palette=['red',\n 'green', 'blue'])\n", (1390, 1462), False, 'from bokeh.models import CategoricalColorMapper\n')]
|
import asyncio
import aiohttp
import requests
import json
from .op import EsiOp
from .auth import EsiAuth
from .cache import EsiCache, DictCache
from .esisession import EsiSession
import logging
logger = logging.getLogger("EsiPysi")
class EsiPysi(object):
"""
The EsiPysi class creates "EsiOp" operations based on a provided swagger spec
"""
def __init__(self, swagger_url, **kwargs):
"""
Initialize the class
Arguments:
swagger_url -- Url to the swagger spec
Keyword arguments:
user_agent -- user agent to send with ESI calls
cache -- EsiCache object to use for caching
auth -- EsiAuth to use for authorized calls to ESI
retries -- Number of retries when ESI returns a retryable error, 0 disables, -1 is unlimited
loop -- Event loop to use for asyncio
session -- aiohttp session to use, note: loop will be useless if set with session, set the loop you want in the session instead
"""
self.args = kwargs
cache = kwargs.get("cache", DictCache())
if cache is not None:
if not issubclass(type(cache), EsiCache):
raise TypeError("cache should be of the type EsiCache")
session = self.args.get('session')
if session is not None:
if not isinstance(type(session), aiohttp.ClientSession):
raise TypeError("session must be a aiohttp ClientSession")
self.operations = {}
self.data = {}
r = requests.get(swagger_url)
try:
data = r.json()
except:
logger.exception("Parse error, spec written to file")
with open('esi-spec-error.json', 'w') as esifile:
esifile.write(r.text)
return
finally:
r.close()
self.data = data
self.__analyze_swagger()
def session(self):
session = EsiSession(self.base_url, self.operations, **self.args)
return session
def __analyze_swagger(self):
#Get base url
self.base_url = "https://" + self.data.get("host","") + self.data.get("basePath", "")
#Reformat json
paths = self.data.get("paths", {})
#each path
for route, verbs in paths.items():
#each http verb in a path
for verb, operation in verbs.items():
operation_id = operation.get("operationId")
if operation_id is None:
continue
new_op = operation.copy()
new_op["path"] = route
new_op["verb"] = verb
#Handle parameter refs
params = operation.get("parameters")
new_op["parameters"] = {}
for param in params:
path = param.get("$ref")
if path is None:
param_details = param.copy()
else:
param_details = self.__get_ref(path)
param_name = param_details.get("name")
new_op["parameters"][param_name] = param_details
self.operations[operation_id] = new_op
def __get_ref(self, path):
path_split = path.split("/")
if path_split[0] != "#":
#Unsupported
return None
ref = self.data
for i in range(1, len(path_split)):
ref = ref.get(path_split[i], {})
return ref
|
[
"requests.get",
"logging.getLogger"
] |
[((205, 233), 'logging.getLogger', 'logging.getLogger', (['"""EsiPysi"""'], {}), "('EsiPysi')\n", (222, 233), False, 'import logging\n'), ((1557, 1582), 'requests.get', 'requests.get', (['swagger_url'], {}), '(swagger_url)\n', (1569, 1582), False, 'import requests\n')]
|
import numpy as np
import torch
def compute_lid(x, x_train, k, exclude_self=False):
"""
Calculate LID using the estimation from [1]
[1] Ma et al., "Characterizing Adversarial Subspaces Using
Local Intrinsic Dimensionality," ICLR 2018.
"""
with torch.no_grad():
x = x.view((x.size(0), -1))
x_train = x_train.view((x_train.size(0), -1))
lid = torch.zeros((x.size(0), ))
for i, x_cur in enumerate(x):
dist = (x_cur.view(1, -1) - x_train).norm(2, 1)
# `largest` should be True when using cosine distance
if exclude_self:
topk_dist = dist.topk(k + 1, largest=False)[0][1:]
else:
topk_dist = dist.topk(k, largest=False)[0]
mean_log = torch.log(topk_dist / topk_dist[-1]).mean()
lid[i] = -1 / mean_log
return lid
# def cal_class_lid(x, x_train, k, exclude_self=False):
# """
# Calculate LID on sample using the estimation from [1]
# [1] Ma et al., "Characterizing Adversarial Subspaces Using
# Local Intrinsic Dimensionality," ICLR 2018.
# """
# x = x.view((x.size(0), -1))
# x_train = x_train.view((x_train.size(0), -1))
# lid = torch.zeros((x.size(0), ))
# for i, x_cur in enumerate(x):
# dist = (x_cur.view(1, -1) - x_train).norm(2, 1)
# # `largest` should be True when using cosine distance
# if exclude_self:
# topk_dist = dist.topk(k + 1, largest=False)[0][1:]
# else:
# topk_dist = dist.topk(k, largest=False)[0]
# mean_log = torch.log(topk_dist / topk_dist[-1]).mean()
# lid[i] = -1 / mean_log
# return lid
def compute_spnorm(inputs, dknn, layers, batch_size=200):
assert inputs.requires_grad
num_total = inputs.size(0)
norm = np.zeros((num_total, len(layers)))
num_batches = int(np.ceil(num_total / batch_size))
for i in range(num_batches):
begin, end = i * batch_size, (i + 1) * batch_size
x = inputs[begin:end]
reps = dknn.get_activations(x)
for l, layer in enumerate(layers):
y = reps[layer]
norm[begin:end, l] = compute_spnorm_batch(x, y)
return norm
def compute_spnorm_batch(inputs, output):
"""
:param inputs: (batch_size, input_size)
:param output: (batch_size, output_size)
:return: jacobian: (batch_size, output_size, input_size)
"""
batch_size, input_dim = inputs.view(inputs.size(0), -1).size()
output = output.view(batch_size, -1)
jacobian = torch.zeros((batch_size, output.size(1), input_dim))
for i in range(output.size(1)):
grad = torch.autograd.grad(
output[:, i].sum(), inputs, retain_graph=True)[0]
jacobian[:, i, :] = grad.view(batch_size, input_dim)
norm = np.zeros((batch_size, ))
for i in range(batch_size):
norm[i] = np.linalg.norm(jacobian[i].detach().cpu().numpy(), 2)
return norm
|
[
"torch.no_grad",
"numpy.zeros",
"numpy.ceil",
"torch.log"
] |
[((2835, 2858), 'numpy.zeros', 'np.zeros', (['(batch_size,)'], {}), '((batch_size,))\n', (2843, 2858), True, 'import numpy as np\n'), ((276, 291), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (289, 291), False, 'import torch\n'), ((1899, 1930), 'numpy.ceil', 'np.ceil', (['(num_total / batch_size)'], {}), '(num_total / batch_size)\n', (1906, 1930), True, 'import numpy as np\n'), ((785, 821), 'torch.log', 'torch.log', (['(topk_dist / topk_dist[-1])'], {}), '(topk_dist / topk_dist[-1])\n', (794, 821), False, 'import torch\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.