max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
src/sentry/api/bases/incident.py | overquota/sentry | 1 | 12788751 | from __future__ import absolute_import
from sentry.api.bases.organization import OrganizationPermission
class IncidentPermission(OrganizationPermission):
scope_map = {
'GET': ['org:read', 'org:write', 'org:admin'],
'POST': ['org:write', 'org:admin'],
'PUT': ['org:write', 'org:admin'],
}
| 1.71875 | 2 |
python/003.py | Evergreen200/projecteuler | 0 | 12788752 | <filename>python/003.py<gh_stars>0
def is_prime(n):
for i in range(2, int(n / 2) + 1):
if n % i == 0 and n != i:
return False
return True
def solution(n):
for i in range(1, n):
if n % i == 0:
p = n // i
if is_prime(p):
return p
if __name__ == '__main__':
print(solution(600851475143))
| 3.78125 | 4 |
model_src/black_white/bw_network/backward_model_direction.py | imbyjuli/blackboard-tensorflow-nrp | 0 | 12788753 | <reponame>imbyjuli/blackboard-tensorflow-nrp<filename>model_src/black_white/bw_network/backward_model_direction.py<gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import numpy as np
import tensorflow as tf
import cv2
import csv
from backward_weights import init_locally_backward_weights ,initiate_backward_weights
from backward_layers import ait_backwards,pit_backwards,v4_backwards,v2_backwards, create_direction_for_eval
tf.logging.set_verbosity(tf.logging.INFO)
def ventral_feed_backward(features, labels, mode):
#==========================================================================================
####### weights and biases described in v_layers_weights
#==========================================================================================
n_field_1 = 40 * 40 * 3
use_sparse = True
input_layer_size = {
#"classes": tf.argmax(input=logits, axis=1),
"v2_1": (40,40),
"v2_2": (40,40),
"v4": (40,40),
"PIT": (40,40),
"AIT": (5,1)
}
for degrees in [0,45,90,135]:
input_layer_size ["v1_"+str(degrees)] = (40,40)
weights, bias = initiate_backward_weights(input_layer_size)
packed_backward_weights = init_locally_backward_weights(input_layer_size)
#==========================================================================================
####### initiation
#==========================================================================================
input_layers = {}
for key in input_layer_size.keys():
img_x,img_y = input_layer_size [key]
features_float = tf.cast(features[key], tf.float32)
input_layers[key] = tf.reshape(features_float, [-1, img_x*img_y * 1])
#print(input_layers.keys())
#==========================================================================================
####### layers described in v_layers
#==========================================================================================
AIT_b = ait_backwards(input_layers)
PIT_b = pit_backwards(AIT_b,input_layers,packed_backward_weights, weights,bias, use_sparse = use_sparse)
v4_b = v4_backwards(PIT_b,input_layers,packed_backward_weights,weights,bias, use_sparse = use_sparse)
v2_b,_,_ = v2_backwards(v4_b,input_layers,packed_backward_weights,weights,bias, use_sparse = use_sparse)
# final_dense = tf.layers.dense(v2_backward)
logits = v2_b#tf.layers.dense(inputs = v2_b, units = 40 * 40)
tf.summary.image("AIT_b",tf.reshape(AIT_b,[-1,40,40,1]),1)
tf.summary.image("PIT_b",tf.reshape(PIT_b,[-1,40,40,1]),1)
tf.summary.image("v4_b",tf.reshape(v4_b,[-1,40,40,1]),1)
tf.summary.image("v2_b",tf.reshape(v2_b,[-1,40,40,1]),1)
# final_dense = tf.layers.dense(v2_backward)
logits = tf.layers.dense(inputs = v2_b, units = 4)
tf.summary.image("logits",tf.reshape(logits,[-1,2,2,1]),1)
#==========================================================================================
####### Prediction with Tensorflow
#==========================================================================================
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
#sess.run(print_activation_dict(return_tensors))
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
one_hot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=4)
tf.summary.image("labels",tf.reshape(one_hot_labels,[-1,2,2,1]),1)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=one_hot_labels , logits=logits)
# figure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
# change optimiser if wanted
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
| 2.34375 | 2 |
tests/scrubber/sensitive_string_scrubber_test.py | cgruber/make-open-easy | 5 | 12788754 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
"""Tests for finding sensitive strings."""
__author__ = '<EMAIL> (<NAME>)'
from google.apputils import resources
from google.apputils import basetest
from moe import config_utils
from moe.scrubber import sensitive_string_scrubber
import test_util
STRINGS_JSON = config_utils.ReadConfigResource(
test_util.TestResourceName('sensitive_strings.json'))
class SensitiveWordsTest(basetest.TestCase):
"""Unittests for the sensitive word search."""
def setUp(self):
self.word_scrubber = sensitive_string_scrubber.SensitiveWordScrubber(
STRINGS_JSON[u'sensitive_words'])
def assertMatch(self, expected_word, line):
self.assertEquals([expected_word],
self.word_scrubber.FindSensitiveStrings(line))
def assertNoMatch(self, line):
self.assertEquals([], self.word_scrubber.FindSensitiveStrings(line))
def testObviousWords(self):
self.assertMatch(u'testy', u'testy.delegate()')
self.assertMatch(u'secrety', u'void fixForSecrety')
self.assertMatch(u'testy', u'http://foo.com/testy/1234')
self.assertMatch(u'http://secret.wiki/', u'http://secret.wiki/secret-url')
self.assertMatch(u'internal.website.com', u'foo.internal.website.com')
self.assertMatch(u'http://secret.wiki/',
u'here is one line\nhttp://secret.wiki/secret-url')
def testCapitalization(self):
self.assertMatch(u'abc', u'void fixForABC')
self.assertMatch(u'testy', u'check out the Testy')
self.assertMatch(u'secrety', u'notSECRETY')
self.assertNoMatch(u'NOTSECRETY')
self.assertNoMatch(u'latEsty') # does not match testy
self.assertNoMatch(u'notsecretY') # does not match secrety
def testNonMatches(self):
self.assertNoMatch(u'go to the next line')
def testWordExtraction(self):
self.assertMatch(u'testy', u'testy')
self.assertMatch(u'testy', u' testy ')
self.assertMatch(u'testy', u'ThisIsATestyString')
self.assertMatch(u'testy', u' public void buildTesty(')
self.assertMatch(u'testy', u'THIS_IS_TESTY_A_SECRET_PROJECT')
self.assertNoMatch(u'kittens attesty')
class SensitiveResTest(basetest.TestCase):
"""Unittests for the sensitive word search."""
def setUp(self):
self.re_scrubber = sensitive_string_scrubber.SensitiveReScrubber(
STRINGS_JSON[u'sensitive_res'])
def assertMatch(self, expected_string, line):
self.assertEquals([expected_string],
self.re_scrubber.FindSensitiveStrings(line))
def assertNoMatch(self, line):
self.assertEquals([], self.re_scrubber.FindSensitiveStrings(line))
def testSensitiveRes(self):
self.assertMatch(u'supersecret',
u'thisissosupersecretweneedtoscrubitevenwithinaword')
self.assertMatch(u'SUPERSECRET',
u'THISISSOSUPERSECRETWENEEDTOSCRUBITEVENWITHINAWORD')
self.assertMatch(u'SuPeRsEcReT',
u'ThIsIsSoSuPeRsEcReTwEnEeDtOsCrUbItEvEnWiThInAwOrD')
self.assertNoMatch(u'notasecret')
self.assertMatch(u'.secretcode1.', u'.secretcode1.')
self.assertMatch(u' secret_code123 ', u'the secret_code123 is secret')
self.assertNoMatch(u'SECRET_CODE_123')
self.assertNoMatch(u'THESECRETCODE123')
if __name__ == '__main__':
basetest.main()
| 2.75 | 3 |
hypertrophy_dataset.py | dozsam13/LHYP | 0 | 12788755 | from torch.utils.data import Dataset
from torchvision import transforms
import torch
class HypertrophyDataset(Dataset):
def __init__(self, images, targets, device):
self.images = images
self.targets = targets
self.device = device
self.augmenter = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomAffine([-90, 90]),
transforms.ToTensor()
])
self.preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_ = self.images[index]
# image_ = self.augmenter(image_)
sample = {
'image': torch.tensor(image_, dtype=torch.float, device=self.device),
'target': torch.tensor(self.targets[index], dtype=torch.long, device=self.device)
}
return sample | 2.71875 | 3 |
constants.py | Kaoline/ShinobiTool | 0 | 12788756 | # file --constants.py--
waiting_message = "Opération en cours... Ça peut être long, et ça bloque la fenêtre."
default_search_file = "Shinobis.txt"
default_receivers_file = "Destinataires.txt"
default_moles_file = "Ennemis.txt"
default_message_file = "Message.txt"
default_config_file = "Config.txt" | 1.148438 | 1 |
hawser/errors.py | 5elenay/hawser | 4 | 12788757 | class UserNotMonitoredError(Exception):
"""Raises when user not monitored."""
pass
class LanyardException(Exception):
"""Raises when lanyard gives success false but we don't have a support for the exception."""
pass
| 2.03125 | 2 |
oecp/main/category.py | openeuler-mirror/oecp | 0 | 12788758 | # -*- encoding=utf-8 -*-
"""
# **********************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# [oecp] is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
# Author:
# Create: 2021-09-06
# Description: package category
# **********************************************************************************
"""
import json
import logging
from enum import Enum, unique
from oecp.proxy.rpm_proxy import RPMProxy
logger = logging.getLogger("oecp")
@unique
class CategoryLevel(Enum):
CATEGORY_LEVEL_ZERO = 0 # 核心包等级
CATEGORY_LEVEL_ONE = 1
CATEGORY_LEVEL_TWO = 2
CATEGORY_LEVEL_THREE = 3
CATEGORY_LEVEL_NOT_SPECIFIED = 4 # 未指定
@classmethod
def level_name_2_enum(cls, name):
return {"level1": cls.CATEGORY_LEVEL_ONE, "level2": cls.CATEGORY_LEVEL_TWO,
"level3": cls.CATEGORY_LEVEL_THREE}.get(name, cls.CATEGORY_LEVEL_NOT_SPECIFIED)
class Category(object):
def __init__(self, path):
"""
:param path: 分类文件,json格式
"""
self._src_categories = {}
self._bin_categories = {}
self.CORE_PKG = {'gcc', 'glibc', 'qemu', 'libvirt', 'docker-engine', 'java-11-openjdk', 'java-1.8.0-openjdk',
'systemd', 'openssh', 'lvm2', 'busybox', 'initscripts'}
self._load(path)
def _load(self, path):
"""
:param path:
:return:
"""
try:
with open(path, "r") as f:
categories = json.load(f)
for category in categories:
level = CategoryLevel.level_name_2_enum(category["level"])
try:
if category["src"]:
name = RPMProxy.rpm_n_v_r_d_a(category["src"], dist="category")[0]
self._src_categories[name] = level
if category["bin"]:
name = RPMProxy.rpm_n_v_r_d_a(category["bin"], dist="category")[0]
self._bin_categories[name] = level
except AttributeError as e:
logger.exception(f"\"{category['oecp']}\" or \"{category['bin']}\" is illegal rpm name")
raise
except FileNotFoundError:
logger.exception(f"{path} not exist")
raise
def category_of_src_package(self, name):
"""
:param name:
:return:
"""
return self._src_categories.get(name, CategoryLevel.CATEGORY_LEVEL_NOT_SPECIFIED)
def category_of_bin_package(self, name):
"""
:param name:
:return:
"""
if name in self.CORE_PKG:
return CategoryLevel.CATEGORY_LEVEL_ZERO
return self._bin_categories.get(name, CategoryLevel.CATEGORY_LEVEL_NOT_SPECIFIED) | 1.929688 | 2 |
src/bleanser/modules/firefox.py | karlicoss/bleanser | 13 | 12788759 | #!/usr/bin/env python3
from pathlib import Path
from sqlite3 import Connection
from bleanser.core import logger
from bleanser.core.utils import get_tables
from bleanser.core.sqlite import SqliteNormaliser, Tool
class Normaliser(SqliteNormaliser):
DELETE_DOMINATED = True
MULTIWAY = True
def __init__(self, db: Path) -> None:
# todo not sure about this?.. also makes sense to run checked for cleanup/extract?
with self.checked(db) as conn:
self.tables = get_tables(conn)
def check_table(name: str) -> None:
assert name in self.tables, (name, self.tables)
check_table('moz_bookmarks')
check_table('moz_historyvisits')
# moz_annos -- apparently, downloads?
def cleanup(self, c: Connection) -> None:
tool = Tool(c)
tool.drop_index('moz_places_guid_uniqueindex')
tool.drop_index('guid_uniqueindex') # on mobile only
[(visits_before,)] = c.execute('SELECT count(*) FROM moz_historyvisits')
tool.drop_cols(
table='moz_places',
cols=[
# aggregates, changing all the time
'frecency',
'last_visit_date',
'visit_count',
# ugh... sometimes changes because of notifications, e.g. twitter/youtube?, or during page load
'hidden',
'typed',
'title',
'description',
'preview_image_url',
'foreign_count', # jus some internal refcount thing... https://bugzilla.mozilla.org/show_bug.cgi?id=1017502
## mobile only
'visit_count_local',
'last_visit_date_local',
'last_visit_date_remote',
'sync_status',
'sync_change_counter',
##
]
)
# ugh. sometimes changes for no reason...
# and anyway, for history the historyvisits table refers place_id (this table's actual id)
# also use update instead delete because phone db used to have UNIQUE constraint...
c.execute('UPDATE moz_places SET guid=id')
tool.drop_cols(
table='moz_bookmarks',
cols=['lastModified'], # changing all the time for no reason?
# todo hmm dateAdded might change when e.g. firefox reinstalls and it adds default bookmarks
# probably not worth the trouble
)
tool.drop('moz_meta')
tool.drop('moz_origins') # prefix/host/frequency -- not interesting
# todo not sure...
tool.drop('moz_inputhistory')
# sanity check just in case... can remove after we get rid of triggers properly...
[(visits_after,)] = c.execute('SELECT count(*) FROM moz_historyvisits')
assert visits_before == visits_after, (visits_before, visits_after)
if __name__ == '__main__':
from bleanser.core import main
main(Normaliser=Normaliser)
| 2.21875 | 2 |
Problems/Two Pointers/easy/ReverseOnlyLetters/reverse_only_letters.py | dolong2110/Algorithm-By-Problems-Python | 1 | 12788760 | def reverseOnlyLetters(s: str) -> str:
s = list(s)
l, r = 0, len(s) - 1
while l < r:
if not s[l].isalpha():
l += 1
elif not s[r].isalpha():
r -= 1
else:
s[l], s[r] = s[r], s[l]
l += 1
r -= 1
return ''.join(s)
| 3.5625 | 4 |
HarTex/cmds/censor.py | HTG-YT/hartex-discord.py | 4 | 12788761 | <filename>HarTex/cmds/censor.py
import discord
from discord.ext import commands
import re
import yaml
from better_profanity import profanity
from core.classes import *
def is_zalgo_nickname_enabled(guild_id):
with open(f'configurations/{guild_id}_config.yaml', 'r') as zalgo_prevention_enabled:
accessor = yaml.safe_load(zalgo_prevention_enabled)
nickname_enabled = accessor['plugins']['censorship']['settings']['zalgo']['filter_nicknames']
return nickname_enabled
def is_invite_enabled(guild_id):
with open(f'configurations/{guild_id}_config.yaml', 'r') as invites_enabled:
accessor = yaml.safe_load(invites_enabled)
invite_enabled = accessor['plugins']['censorship']['settings']['invites']['filter']
return invite_enabled
def is_nickname_invite_enabled(guild_id):
with open(f'configurations/{guild_id}_config.yaml', 'r') as nickname_invites:
accessor = yaml.safe_load(nickname_invites)
invite_nick_enabled = accessor['plugins']['censorship']['settings']['invites']['filter_nicknames']
return invite_nick_enabled
def is_domains_enabled(guild_id):
with open(f'configurations/{guild_id}_config.yaml', 'r') as domains_enabled:
accessor = yaml.safe_load(domains_enabled)
domains = accessor['plugins']['censorship']['settings']['domains']['filter']
return domains
def domains_list(guild_id):
with open(f'configurations/{guild_id}_config.yaml', 'r') as domains_blacklist:
accessor = yaml.safe_load(domains_blacklist)
domain_list = accessor['plugins']['censorship']['settings']['domains']['filter_list']
return domain_list
def is_domains_nickname_enabled(guild_id):
with open(f'configurations/{guild_id}_config.yaml', 'r') as nickname_blacklist:
accessor = yaml.safe_load(nickname_blacklist)
nick_enabled = accessor['plugins']['censorship']['settings']['domains']['filter_nicknames']
return nick_enabled
def get_censored_words(guild_id):
with open(f'configurations/{guild_id}_config.yaml', 'r') as censored_words:
accessor = yaml.safe_load(censored_words)
list_of_censored_words = accessor['plugins']['censorship']['settings']['blocked']['words']
return list_of_censored_words
def get_ignored_users(guild_id):
with open(f'configurations/{guild_id}_config.yaml', 'r') as censored_words:
accessor = yaml.safe_load(censored_words)
list_of_ignored_users = accessor['plugins']['censorship']['settings']['blocked']['ignored_users']
return list_of_ignored_users
def is_zalgo_enabled(guild_id):
with open(f'configurations/{guild_id}_config.yaml', 'r') as zalgo_prevention_enabled:
accessor = yaml.safe_load(zalgo_prevention_enabled)
enabled = accessor['plugins']['censorship']['settings']['zalgo']['filter']
return enabled
class Censor(commands.Cog):
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.author.bot:
return
message_guild: discord.Guild = message.guild
list_to_censor = get_censored_words(message_guild.id)
profanity.load_censor_words(list_to_censor)
ignored_users = get_ignored_users(message_guild.id)
if profanity.contains_profanity(message.content):
for user in ignored_users:
if message.author.id == user:
pass
else:
await message.channel.send("Blacklisted word!")
await message.delete()
else:
pass
zalgo_enabled = is_zalgo_enabled(message_guild.id)
zalgo_re = re.compile(r"[\u0300-\u036F\u0489]")
if zalgo_enabled:
if zalgo_re.search(message.content):
await message.delete()
await message.channel.send("Zalgo detected!")
else:
pass
invites = is_invite_enabled(message_guild.id)
if invites:
if 'discord.gg' in message.content:
await message.delete()
await message.channel.send("Invites are not allowed!")
else:
pass
domains = is_domains_enabled(message_guild.id)
domain_blacklist = domains_list(message_guild.id)
if domains:
for domain in domain_blacklist:
if str(domain) in message.content:
await message.delete()
await message.channel.send("This domain is blacklisted!")
break
else:
continue
else:
pass
@commands.Cog.listener()
async def on_member_update(self, before: discord.Member, after: discord.Member):
member_guild = before.guild
def get_censored_nicknames():
with open(f'configurations/{member_guild.id}_config.yaml', 'r') as censored_nicknames:
accessor = yaml.safe_load(censored_nicknames)
list_of_censored_nicknames = accessor['plugins']['censorship']['settings']['blocked']['nicknames']
return list_of_censored_nicknames
censored_nickname = get_censored_nicknames()
zalgo_nick_enabled = is_zalgo_nickname_enabled(member_guild.id)
zalgo_nick_re = re.compile(r"[\u0300-\u036F\u0489]")
if censored_nickname == "True":
if profanity.contains_profanity(str(before.nick)):
last = before.nick
if last:
await after.edit(nick=last)
else:
await after.edit(nick="Nickname Censored")
else:
pass
if zalgo_nick_enabled:
if zalgo_nick_re.search(after.nick):
last = before.nick
if last:
await after.edit(nick=last)
else:
await after.edit(nick="Zalgo Censored")
else:
pass
nickname_invite = is_nickname_invite_enabled(member_guild.id)
if nickname_invite:
if 'discord.gg' in after.nick:
last = before.nick
if last:
await after.edit(nick=last)
else:
await after.edit(nick="Invite Censored")
else:
pass
domain_blacklist_enabled = is_domains_nickname_enabled(member_guild.id)
domain_list = domains_list(member_guild.id)
if domain_blacklist_enabled:
for domain in domain_list:
if str(domain) in after.nick:
last = before.nick
if last:
await after.edit(nick=last)
break
else:
await after.edit(nick="Domain Censored")
break
else:
continue
else:
pass
def setup(hartex):
hartex.add_cog(Censor(hartex))
| 2.296875 | 2 |
day2/passValidator.py | IvantheDugtrio/adventofcode2020 | 0 | 12788762 | <filename>day2/passValidator.py<gh_stars>0
#!/usr/bin/env python3
import sys
validCount = 0
lineCount = 0
with open(sys.argv[1],'r') as fp:
for line in fp:
lineCount+=1
criteria = line.split(": ")[0]
password = line.split(": ")[1].strip()
keyChar = criteria.split(" ")[1]
keyRange = criteria.split(" ")[0]
minKeyCnt = int(keyRange.split("-")[0])
maxKeyCnt = int(keyRange.split("-")[1])
# Part 1
#keyCharCount = password.count(keyChar)
#if keyCharCount >= minKeyCnt and keyCharCount <= maxKeyCnt:
# validCount+=1
# Part 2
if ((password[minKeyCnt-1] == keyChar or
password[maxKeyCnt-1] == keyChar) and
not (password[minKeyCnt-1] == keyChar and
password[maxKeyCnt-1] == keyChar)):
validCount+=1
print("There are ",lineCount," passwords given")
print("There are ",validCount," valid passwords")
| 3.703125 | 4 |
tests/utest/oxygen/test_oxygen_listener.py | vrchmvgx/robotframework-oxygen | 0 | 12788763 | <filename>tests/utest/oxygen/test_oxygen_listener.py
from unittest import TestCase
from unittest.mock import Mock, patch
from oxygen import listener
class OxygenListenerBasicTests(TestCase):
def setUp(self):
self.listener = listener()
def test_listener_api_version_is_not_changed_accidentally(self):
self.assertEqual(self.listener.ROBOT_LISTENER_API_VERSION, 2)
def mock_lib_instance(self, mock_builtin, return_value):
m = Mock()
m.get_library_instance.return_value = return_value
mock_builtin.return_value = m
return m
@patch('oxygen.oxygen.BuiltIn')
def test_end_test_when_library_was_not_used(self, mock_builtin):
m = self.mock_lib_instance(mock_builtin, None)
self.listener.end_test('foo', {})
m.get_library_instance.assert_called_once_with('oxygen.OxygenLibrary')
self.assertEqual(self.listener.run_time_data, {})
@patch('oxygen.oxygen.BuiltIn')
def test_end_test_when_library_was_used(self, mock_builtin):
o = lambda: None
o.data = 'I do not have a solution, but I do admire the problem'
m = self.mock_lib_instance(mock_builtin, o)
self.listener.end_test('oxygen.OxygenLibrary', {'longname': 'hello'})
m.get_library_instance.assert_called_once_with('oxygen.OxygenLibrary')
self.assertEqual(self.listener.run_time_data,
{'hello': ('I do not have a solution, but I do '
'admire the problem')})
| 2.578125 | 3 |
src/sendresults.py | ldolberg/the_port_ors_hdx | 0 | 12788764 | <reponame>ldolberg/the_port_ors_hdx<gh_stars>0
import pandas as pd
import json
def return_jsonlist(district,lat,lon,tag,json_list):
# -- open 'Nepal_F_Tagged.csv'
new_df = pd.read_csv('../data/Nepal/Nepal_F_Tagged.csv')
total, percentage = total_percent_from_district_tag(district, tag, df)
# -- get district from order_districts
# -- use get_coordinates(district) to get lat, lon
# -- get tag from query
# -- add entry in json-compatible format:
json_list.append({"Name":district, "lat":lat, "long":lon, "tag": tag,"type": "feedback",
"d":{
"#1 Concern": total,
"#1 Concern percentage":percentage}
})
return json_list
def total_percent_from_district_tag(district, tag, df):
total = df[(df['District']==district)&(df['Tag']==tag)]['Count'].sum()
surveyed_ppl_from_district = df[df['District']==district]['Count'].sum()
percentage = total/surveyed_ppl_from_district
return total, percentage
def slice_df(df, query={}):
# -- approach two (builds slicing vector, *then* applies it)
# -- more efficient
# I'm assuming query is a dictionary
# query = {'Tag':tag, 'District':district, 'Gender':gender, 'Ethnicity':ethnicity}
# -- select fields s.t. the df has them
valid_query_args = list(df.columns)
ret = df.copy()
for requirement, value in query.iteritems():
if requirement in valid_query_args:
if requirement != 'Tag':
ret = ret[ret[requirement] == value] # slicing without Tag
denominator = ret['Count'].sum()
ret = ret[ret['Tag'] == query['Tag']] # slicing with Tag
numerator = ret['Count'].sum()
total = numerator
percentage = numerator/max(denominator,1)
return total, percentage
| 3.078125 | 3 |
examples/basic/mesh_threshold.py | Gjacquenot/vtkplotter | 7 | 12788765 | <filename>examples/basic/mesh_threshold.py
"""
Extracts the cells where scalar value
satisfies a threshold criterion.
"""
from vtkplotter import *
doc = Text(__doc__)
man = load(datadir+"man.vtk")
scals = man.coordinates()[:, 1] + 37 # pick y coords of vertices
man.pointColors(scals, cmap="cool")
man.addScalarBar(title="threshold", horizontal=True)
# make a copy and threshold the mesh
cutman = man.clone().threshold(scals, vmin=36.9, vmax=37.5)
printInfo(cutman)
# distribute the actors on 2 renderers
show([[man, doc], cutman], N=2, elevation=-30, axes=0)
| 2.875 | 3 |
coviddata.py | Paul567/MBCovidVaxTracker | 0 | 12788766 | import requests
import json
class CovidData:
__data = [{}]
__province = ''
__population = -1
def __init__(self, province):
self.__province = province.upper()
reports = json.loads(
requests.get(
'https://api.covid19tracker.ca/reports/province/' +
self.__province
).text
)
self.__data = reports['data']
provinces = json.loads(
requests.get('https://api.covid19tracker.ca/provinces').text
)
for prov in provinces:
if prov['code'].upper() == self.__province:
self.__population = prov['population']
break
@property
def data(self):
return self.__data
@property
def __latest_data(self):
return self.__data[-1]
@property
def latest_data(self):
return self.__latest_data;
@property
def __last_week_data(self):
return self.__data[-7:]
@property
def province(self):
return self.__province
@property
def date(self):
return self.__latest_data['date']
@property
def new_cases(self):
return self.__latest_data['change_cases']
@property
def total_active(self):
return (
self.__latest_data['total_cases'] -
self.__latest_data['total_recoveries'] -
self.__latest_data['total_fatalities']
)
@property
def new_deaths(self):
return self.__latest_data['change_fatalities']
@property
def total_deaths(self):
return self.__latest_data['total_fatalities']
@property
def test_positivity(self):
cases = 0
tests = 0
for data in self.__data[-5:]:
cases += data['change_cases']
tests += data['change_tests']
return cases / tests
@property
def new_vaccinations(self):
return self.__latest_data['change_vaccinations']
@property
def total_vaccinations(self):
return self.__latest_data['total_vaccinations']
@property
def total_vaccines_recieved(self):
return self.__latest_data['total_vaccines_distributed']
@property
def population(self):
return self.__population
@property
def percent_vaccinated(self):
return self.total_vaccinations / self.population
@property
def percent_vaccines_recieved(self):
return self.total_vaccines_recieved / self.population
@property
def days_until_one_dose_per_person(self):
vaccines = 0
for data in self.__last_week_data:
vaccines += data['change_vaccinations']
vaccine_rate = vaccines / len(self.__last_week_data)
return (
(self.population - self.total_vaccinations) /
vaccine_rate
)
if __name__ == '__main__':
data = CovidData('MB')
print(f'{data.province} {data.date}')
print(f'-----------------------------------------')
print(f'New Cases: {data.new_cases}')
print(f'Total Active: {data.total_active}')
print(f"Test Positivity: {data.test_positivity:.2%}")
print('')
print(f'New Deaths: {data.new_deaths}')
print(f'Total Deaths: {data.total_deaths}')
print('')
print(f'New Vaccinations: {data.new_vaccinations}')
print(f'Total Vaccinations: {data.total_vaccinations}')
print(f"Percent Vaccinated: {data.percent_vaccinated:.2%}")
print(f'Percent Vaccine Recieved: {data.percent_vaccines_recieved:.2%}')
print(f"Days Until One Dose Per Person: {data.days_until_one_dose_per_person:.0f}")
| 3.015625 | 3 |
helpscout/endpoints/endpoint.py | Gogen120/helpscout | 0 | 12788767 | from typing import Dict
import requests
import helpscout.exceptions as exc
class Endpoint:
"""Base endpoint class."""
def __init__(self, client, base_url: str):
"""
Params:
client: helpscout client with credentials
base_url: url for endpoint
"""
self.client = client
self.base_url = base_url
def process_get_result(self, response: requests.Response) -> Dict:
"""Process response with coresponding status code."""
if response.status_code == 400:
raise exc.BadRequestException(response.json())
elif response.status_code == 401:
raise exc.NotAuthorizedException
elif response.status_code == 404:
return {}
return response.json()
def process_result_with_status_code(self, response: requests.Response, status_code):
"""Process result with given status code.
Raise exception if response status code does't match provided one
"""
if response.status_code != status_code:
print(status_code)
raise exc.BadRequestException(response.json())
return response.status_code
def base_get_request(self, base_url: str, **kwargs) -> requests.Response:
"""Base get request."""
return requests.get(
base_url,
headers={"Authorization": f"Bearer {self.client.access_token}"},
params={**kwargs},
)
def base_put_request(self, base_url: str, **kwargs) -> requests.Response:
"""Base put request."""
return requests.put(
base_url,
headers={
"Authorization": f"Bearer {self.client.access_token}",
"Content-Type": "application/json; charset=UTF-8",
},
json={**kwargs},
)
def base_patch_request(self, base_url: str, **kwargs) -> requests.Response:
"""Base patch request."""
return requests.patch(
base_url,
headers={
"Authorization": f"Bearer {self.client.access_token}",
"Content-Type": "application/json; charset=UTF-8",
},
json={**kwargs},
)
def base_post_request(self, base_url: str, **kwargs) -> requests.Response:
"""Base post request."""
return requests.post(
base_url,
headers={
"Authorization": f"Bearer {self.client.access_token}",
"Content-Type": "application/json; charset=UTF-8",
},
json={**kwargs},
)
def base_delete_request(self, base_url: str, **kwargs) -> requests.Response:
"""Base delete request."""
return requests.delete(
base_url, headers={"Authorization": f"Bearer {self.client.access_token}"}
)
| 2.703125 | 3 |
geocamUtil/Installer.py | geocam/geocamUtilWeb | 4 | 12788768 | #!/usr/bin/env python
# __BEGIN_LICENSE__
#Copyright (c) 2015, United States Government, as represented by the
#Administrator of the National Aeronautics and Space Administration.
#All rights reserved.
# __END_LICENSE__
import os
import logging
import stat
from glob import glob
import shutil
import itertools
from geocamUtil.Builder import Builder
from django.conf import settings
class Installer(object):
def __init__(self, builder=None, logger=None):
if builder is None:
builder = Builder()
if logger is None:
logger = logging
self.builder = builder
self.logger = logger
@staticmethod
def joinNoTrailingSlash(a, b):
if b == '':
return a
else:
return a + os.path.sep + b
def dosys(self, cmd):
self.logger.info('running:', cmd)
ret = os.system(cmd)
if ret != 0:
self.logger.warning('[command exited with non-zero return value %d]' % ret)
def getFiles(self, src, suffix=''):
path = self.joinNoTrailingSlash(src, suffix)
try:
pathMode = os.stat(path)[stat.ST_MODE]
except OSError:
# couldn't stat file, e.g. broken symlink, ignore it
return []
if stat.S_ISREG(pathMode):
return [suffix]
elif stat.S_ISDIR(pathMode):
return itertools.chain([suffix],
*[self.getFiles(src, os.path.join(suffix, f))
for f in os.listdir(path)])
else:
return [] # not a dir or regular file, ignore
def installFile(self, src, dst):
if os.path.isdir(src):
if os.path.exists(dst):
if not os.path.isdir(dst):
# replace plain file with directory
os.unlink(dst)
os.makedirs(dst)
else:
# make directory
os.makedirs(dst)
else:
# install plain file
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
if settings.GEOCAM_UTIL_INSTALLER_USE_SYMLINKS:
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
if os.path.lexists(dst):
os.unlink(dst)
os.symlink(os.path.realpath(src), dst)
else:
shutil.copy(src, dst)
def installRecurse0(self, src, dst):
for f in self.getFiles(src):
dst1 = self.joinNoTrailingSlash(dst, f)
src1 = self.joinNoTrailingSlash(src, f)
self.builder.applyRule(dst1, [src1],
lambda: self.installFile(src1, dst1))
def installRecurse(self, src, dst):
logging.info('installRecurse %s %s', src, dst)
self.installRecurse0(src, dst)
def installRecurseGlob0(self, srcs, dst):
logging.debug('installRecurseGlob0 srcs=%s dst=%s', srcs, dst)
for src in srcs:
self.installRecurse0(src, os.path.join(dst, os.path.basename(src)))
def installRecurseGlob(self, pat, dst):
logging.info('installRecurseGlob %s %s', pat, dst)
self.installRecurseGlob0(glob(pat), dst)
| 2.015625 | 2 |
train_model.py | olaals/end-to-end-RGB-pose-estimation-baseline | 2 | 12788769 | #from models.baseline_net import BaseNet
import torch
from data_loaders import *
from image_dataloaders import get_dataloaders
from loss import compute_ADD_L1_loss, compute_disentangled_ADD_L1_loss, compute_scaled_disentl_ADD_L1_loss
from rotation_representation import calculate_T_CO_pred
#from models.efficient_net import
from models import fetch_network
import os
from parser_config import get_dict_from_cli
import pickle
import matplotlib.pyplot as plt
from visualization import visualize_examples
from test_model import evaluate_model, validate_model
from torch.utils.tensorboard import SummaryWriter
import time
import datetime
torch.autograd.set_detect_anomaly(True)
def pickle_log_dict(log_dict, logdir):
save_path = os.path.join(logdir, "log_dict.pkl")
with open(save_path, 'wb') as handle:
pickle.dump(log_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
def calculate_eta(start_time, perc_complete):
curr_time = time.time()
sec_since_start = curr_time - start_time
est_total_time = sec_since_start/perc_complete
est_remaining = est_total_time-sec_since_start
return str(datetime.timedelta(seconds=est_remaining))
def save_loss_plot(losses, training_examples, loss_name, logdir):
assert len(losses) == len(training_examples)
fig,ax = plt.subplots()
fig.set_size_inches(9.5, 5.5)
ax.set_title(loss_name)
ax.set_xlabel("Training examples")
ax.set_ylabel(loss_name)
ax.set_yscale('log')
plt.plot(training_examples, losses)
save_path = os.path.join(logdir, loss_name.replace(" ", "-")+".png")
plt.savefig(save_path)
plt.close()
def save_plot_validation_loss(val_data_struct,logdir, loss_name):
fig,ax = plt.subplots()
fig.set_size_inches(9.5, 5.5)
ax.set_title("Validation " + loss_name)
ax.set_xlabel("Training examples")
ax.set_ylabel(loss_name)
ax.set_yscale('log')
train_exs = []
val_losses_arr = []
for (train_ex, val_losses) in val_data_struct:
train_exs.append(train_ex)
val_losses_arr.append(val_losses)
val_losses = np.array(val_losses_arr)
train_exs = np.array(train_exs)
legends = []
for pred_iter in range(val_losses.shape[1]):
legends.append("Pred.iter"+str(pred_iter+1))
iter_val_losses = val_losses[:,pred_iter]
plt.plot(train_exs,iter_val_losses, label="Iter. "+str(pred_iter))
ax.legend(legends)
save_path = os.path.join(logdir, "validation-"+loss_name.replace(" ", "-")+".png")
plt.savefig(save_path)
plt.close()
def logging(model, config, writer, log_dict, logdir, batch_num, train_examples):
log_interval = config["logging"]["log_save_interval"]
if(batch_num%log_interval == 0):
current_loss = log_dict["loss"]["add_l1"][:batch_num]
current_train_ex =log_dict["loss"]["train_ex"][:batch_num]
save_loss_plot(current_loss, current_train_ex, "ADD L1 Loss", logdir)
pickle_log_dict(log_dict, logdir)
save_viz_batches = config["logging"]["save_visualization_at_batches"]
save_viz_every_n_batch = config["logging"]["save_viz_every_n_batch"]
if((batch_num in save_viz_batches) or (batch_num%save_viz_every_n_batch==0 and batch_num!=0)):
save_dir = os.path.join(logdir, "visualizations")
os.makedirs(save_dir, exist_ok=True)
visualize_examples(model, config, "train", show_fig=False, save_dir=save_dir, n_train_examples=train_examples)
visualize_examples(model, config, "val", show_fig=False, save_dir=save_dir, n_train_examples=train_examples)
validation_interval = config["logging"]["validation_interval"]
if(batch_num%validation_interval == 0 and batch_num != 0):
val_ex = config["logging"]["val_examples_from_each_class"]
#loss_dict, mean_losses = evaluate_model(model, config, "train", use_all_examples=False, max_examples_from_each_class=val_ex)
mean_losses = validate_model(model, config, "val")
#log_dict["val_loss_dicts"].append((train_examples, loss_dict))
log_dict["val_loss"].append((train_examples, mean_losses))
pickle_log_dict(log_dict, logdir)
save_plot_validation_loss(log_dict["val_loss"], logdir, "ADD L1 loss")
#tensorboard
iter_dict = {}
for i in range(len(mean_losses)):
writer.add_scalar(f'Validation_ADD_L1_loss/Iter{i}', mean_losses[i], train_examples)
iter_dict[f'Iter{i}'] = mean_losses[i]
writer.add_scalars('Validation_ADD_L1_loss_iters', iter_dict, train_examples)
model.train()
def train(config):
scene_config = config["scene_config"]
# dataset config
model3d_dataset = config["dataset_config"]["model3d_dataset"]
train_classes = config["dataset_config"]["train_classes"]
train_from_imgs = config["dataset_config"]["train_from_images"]
ds_conf = config["dataset_config"]
batch_size = config["train_params"]["batch_size"]
img_ds_name = ds_conf["img_dataset"]
if train_from_imgs:
train_loader, val_loader, test_loader = get_dataloaders(ds_conf, batch_size)
# model load parameters
model_name = config["network"]["backend_network"]
rotation_repr = config["network"]["rotation_representation"]
device = config["train_params"]["device"]
use_pretrained = config["model_io"]["use_pretrained_model"]
model_save_dir = config["model_io"]["model_save_dir"]
os.makedirs(model_save_dir, exist_ok=True)
pretrained_name = config["model_io"]["pretrained_model_name"]
pretrained_path = os.path.join(model_save_dir, pretrained_name)
use_norm_depth = config["advanced"]["use_normalized_depth"]
# model saving
save_every_n_batch = config["model_io"]["batch_model_save_interval"]
model_save_name = config["model_io"]["model_save_name"]
model_save_path = os.path.join(model_save_dir, model_save_name)
cam_intrinsics = config["camera_intrinsics"]
img_size = cam_intrinsics["image_resolution"]
model = fetch_network(model_name, rotation_repr, use_norm_depth, use_pretrained, pretrained_path)
model = model.to(device)
#train params
learning_rate = config["train_params"]["learning_rate"]
opt_name = config["train_params"]["optimizer"]
num_train_batches = config["train_params"]["num_batches_to_train"]
num_sample_verts = config["train_params"]["num_sample_vertices"]
device = config["train_params"]["device"]
loss_fn_name = config["train_params"]["loss"]
# train iteration policy, i.e. determine how many iterations per batch
train_iter_policy_name = config["advanced"]["train_iter_policy"]
policy_argument = config["advanced"]["train_iter_policy_argument"]
if train_iter_policy_name == 'constant':
train_iter_policy = train_iter_policy_constant
elif train_iter_policy_name == 'incremental':
train_iter_policy = train_iter_policy_incremental
else:
assert False
# parallel rendering
use_par_render = config["scene_config"]["use_parallel_rendering"]
if(opt_name == "adam"):
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
elif(opt_name == 'sgd'):
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
else:
assert False
# print training info
print("")
print(" ### TRAINING IS STARTING ### ")
print("Loading backend network", model_name.upper(), "with rotation representation", rotation_repr)
print("Batch size", batch_size, "Learning rate", learning_rate, "Optimizer", opt_name.upper())
print("Training on device", device)
if use_pretrained:
print("Pretrained model is loaded from", pretrained_path)
else:
print("No pretrained model used, training from scratch")
print("The model will be saved to", model_save_path)
if use_norm_depth:
print("The model is trained with the normalized depth from the CAD model (advanced)")
print("")
# logging
log_dict = {}
log_dict["loss"] = {}
log_dict["loss"]["add_l1"] = np.zeros((num_train_batches+1))
log_dict["loss"]["train_ex"] = np.zeros((num_train_batches+1))
log_dict["val_loss_dicts"] = []
log_dict["val_loss"] = []
logdir = config["logging"]["logdir"]
os.makedirs(logdir, exist_ok=True)
writer = SummaryWriter(log_dir=os.path.join("tensorboard", img_ds_name, config["config_name"]))
start_time = time.time()
"""
TRAINING LOOP
"""
train_examples=0
new_batch_num=0
batch_num=0
while(True):
start_time = time.time()
init_imgs, gt_imgs, T_CO_init, T_CO_gt, mesh_verts, mesh_paths, depths, cam_mats = next(iter(train_loader))
init_imgs = init_imgs.numpy()
gt_imgs = gt_imgs.numpy()
depths = depths.numpy()
T_CO_gt = T_CO_gt.to(device)
mesh_verts = mesh_verts.to(device)
#cam_mats = get_camera_mat_tensor(cam_intrinsics, batch_size).to(device)
T_CO_pred = T_CO_init # current pred is initial
train_iterations = train_iter_policy(batch_num, policy_argument)
for j in range(train_iterations):
optimizer.zero_grad()
if(j==0 and train_from_imgs):
pred_imgs = init_imgs
T_CO_pred = T_CO_pred.to(device)
else:
pred_imgs, depths = render_batch(T_CO_pred, mesh_paths, cam_mats, img_size, use_par_render)
T_CO_pred = torch.tensor(T_CO_pred).to(device)
model_input = prepare_model_input(pred_imgs, gt_imgs, depths, use_norm_depth).to(device)
model_output = model(model_input)
T_CO_pred_new = calculate_T_CO_pred(model_output, T_CO_pred, rotation_repr, cam_mats)
addl1_loss = compute_ADD_L1_loss(T_CO_gt, T_CO_pred_new, mesh_verts)
loss_handler(loss_fn_name, addl1_loss, T_CO_pred_new, T_CO_pred, T_CO_gt, mesh_verts)
optimizer.step()
T_CO_pred = T_CO_pred_new.detach().cpu().numpy()
# Printing and logging
elapsed = time.time() - start_time
print(f'ADD L1 loss for train batch {batch_num}, with {new_batch_num} new batches, train iter {j}: {addl1_loss.item():.4f}, batch time: {elapsed:.3f}')
log_dict["loss"]["add_l1"][batch_num] = addl1_loss.item()
log_dict["loss"]["train_ex"][batch_num] = train_examples
logging(model, config, writer, log_dict, logdir, batch_num, train_examples)
if batch_num != 0 and batch_num%save_every_n_batch == 0:
writer.add_scalar("ADD_L1_loss", addl1_loss.item(), train_examples)
perc_complete = (batch_num*1.0)/num_train_batches
print("Saving model to", model_save_path)
print(f'Trained {batch_num} of {num_train_batches}. Training {(perc_complete*100.0):.3f} % complete.')
print(f'Estimated remaining training time (hour,min,sec): {calculate_eta(start_time, perc_complete)}')
torch.save(model.state_dict(), model_save_path)
if batch_num >= num_train_batches:
break
train_examples=train_examples+batch_size
batch_num += 1
new_batch_num += 1
if batch_num >= num_train_batches:
break
"""
END TRAIN LOOP
"""
def loss_handler(loss_fn_name, addl1_loss, T_CO_pred_new, T_CO_pred, T_CO_gt, mesh_verts):
if loss_fn_name == "add_l1":
addl1_loss.backward()
elif loss_fn_name == "add_l1_disentangled":
disentl_loss = compute_disentangled_ADD_L1_loss(T_CO_gt, T_CO_pred_new, mesh_verts)
disentl_loss.backward()
elif loss_fn_name == "add_l1_disentl_scaled":
sc_disentl_loss = compute_scaled_disentl_ADD_L1_loss(T_CO_pred, T_CO_pred_new, T_CO_gt, mesh_verts)
sc_disentl_loss.backward()
def train_iter_policy_constant(current_batch, num):
return num
def train_iter_policy_incremental(current_batch, increments_tuple_list):
# input must have form [(300, 2), (1000,3), (3000,4)]
new_train_iters = 1
for (batch_num, train_iters) in increments_tuple_list:
if (current_batch>batch_num):
new_train_iters = train_iters
return new_train_iters
if __name__ == '__main__':
config = get_dict_from_cli()
train(config)
| 1.929688 | 2 |
stats.py | Vidar-Petersson/Gymnasiearbete | 0 | 12788770 | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import numpy as np
from misc import set_size
from scipy import stats
from scipy.interpolate import interp1d
from pandas.plotting import table
import statsmodels.api as sm
df_knolls_grund = pd.read_csv("data-set\knolls_grund.csv", sep=";", parse_dates=["Datum Tid (UTC)"], index_col="Datum Tid (UTC)", usecols = ['Datum Tid (UTC)','Havstemperatur'])
df_huvudskar = pd.read_csv("data-set\huvudskar.csv", sep=";", parse_dates=["Datum Tid (UTC)"], index_col="Datum Tid (UTC)")
df_huvudskar = df_huvudskar.loc[df_huvudskar["Matdjup"]==1]
df_huvudskar = df_huvudskar.drop(columns=["Kvalitet", "Matdjup"])
df_finngrundet = pd.read_csv("data-set/finngrundet.csv", sep=";", parse_dates=["Datum Tid (UTC)"], index_col="Datum Tid (UTC)", usecols = ['Datum Tid (UTC)','Havstemperatur'])
start, end = '2020-09-28', '2020-11-29'
df_finngrundet = df_finngrundet.loc[start:end]
df_huvudskar = df_huvudskar.loc[start:end]
df_knolls_grund = df_knolls_grund.loc[start:end]
smhi_mean = pd.concat([df_knolls_grund, df_huvudskar, df_finngrundet]).groupby(level=0).mean()
smhi_mean = smhi_mean["Havstemperatur"].rolling(3, center=True).mean()
df1 = pd.read_csv("data-set/sst.csv", sep=",", parse_dates=["Datum Tid (UTC)"], index_col="Datum Tid (UTC)")
df1.sort_values(by=['Datum Tid (UTC)'], inplace=True)
df1 = df1.loc[start:end]
df1['month'] = [d.strftime('%b') for d in df1.index]
df1['week'] = [d.strftime('%U') for d in df1.index]
#print(smhi_mean)
#temp_bias = 3.35
#df1["Havstemperatur"] = df1["Havstemperatur"] + temp_bias
def bias(df):
df_1d = df["Havstemperatur"].resample('D').mean()
smhi_1d = smhi_mean["Havstemperatur"].resample('D').mean()
concatTemp = pd.concat([df_1d, smhi_1d]).groupby(level=0)
print(concatTemp.head(20))
print(concatTemp)
def data_comp(df):
pd.set_option("display.max_rows", None, "display.max_columns", None)
df_1d = df["Havstemperatur"].resample('D').mean()
smhi_1d = smhi_mean.resample('D').mean()
df_1d, smhi_1d = df_1d.align(smhi_1d)
print(df_1d)
#df_1d = df_1d.interpolate(method='time')
#diff = smhi_1d - df_1d
#slope = pd.Series(np.gradient(df_1d.values), df_1d.index, name='slope')
#print(slope.mean())
def smhi():
df_finngrundet.reset_index(inplace=True)
df_huvudskar.reset_index(inplace=True)
df_knolls_grund.reset_index(inplace=True)
#smhi_7d.reset_index(inplace=True)
fig, ax = plt.subplots()
ax.plot(df_finngrundet["Datum Tid (UTC)"], df_finngrundet["Havstemperatur"],linestyle='--', label='Finngrundet')
ax.plot(df_huvudskar["Datum Tid (UTC)"], df_huvudskar["Havstemperatur"],linestyle='--', label='Huvudskär')
ax.plot(df_knolls_grund["Datum Tid (UTC)"], df_knolls_grund["Havstemperatur"],linestyle='--', label='Knolls grund')
ax.plot(smhi_mean.loc[start:end], label='Medelvärde (Referensdata)')
ax.legend()
ax.set_ylabel('Temperatur [°C]', fontweight='demi')
ax.yaxis.set_label_position("right")
ax.set_xlabel("Vecka", fontweight='demi')
ax.set_title("Temperaturutveckling på 0,5 m - SMHIs bojar", fontweight='demi')
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=0))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%U'))
ax.set_ylim(ymin=4)
def seasonality(df):
end = "2020-11-28"
df = df.loc[:end]
sns.boxplot(data=df, x='week', y="Havstemperatur").set(ylabel= 'Temperatur [°C]', xlabel="Vecka")
plt.ylim(4)
def histogram(df):
df["Havstemperatur"].hist(bins=11, range=(0,11))
plt.xlabel("Temperatur [°C]")
def observations(df):
obs = df.groupby(df.index.date).count()
#print(obs["Havstemperatur"].std())
obs["Havstemperatur"].hist(bins=24, range=(0,12))
#df.groupby([df.index.date,]).count().plot(kind='bar')
plt.ylabel("Frekvens")
plt.xlabel("Observation/dag")
def average(df):
df_weekly_mean = df["Havstemperatur"].resample('W', label='left', loffset=pd.DateOffset(days=4.5)).mean()
smhi_weekly_mean = smhi_mean.resample('W', label='left', loffset=pd.DateOffset(days=4.5)).mean()
df_1d = df["Havstemperatur"].resample('D').mean()
df_5d = df["Havstemperatur"].rolling("5d").mean()
df_std = smhi_mean.resample("D").std().mean()
print(df_weekly_mean)
# Plot daily and weekly resampled time series together
fig, ax = plt.subplots()
ax.plot(df.loc[start:end, 'Havstemperatur'], marker='.', linestyle='None', alpha=0.5, label='Observation: $SST_{skin}$')
ax.plot(df_5d.loc[start:end], marker='.', linestyle='-', label='5-d rullande medelvärde')
#ax.plot(intdf.loc[start:end], marker='.', linestyle='-', label='Dagligt medelvärde')
ax.plot(df_weekly_mean.loc[start:end], marker='D', linestyle='--', markersize=7, label='Veckovis medelvärde')
ax.plot(smhi_mean.loc[start:end], label="Referensdata: 0,5 m (SMHI)")
#ax.fill_between(df_std.index, df_7d - 2 * df_std, df_7d + 2 * df_std, color='b', alpha=0.2)
ax.set_ylabel('Temperatur [°C]', fontweight='demi')
ax.yaxis.set_label_position("right")
ax.set_xlabel("Vecka", fontweight='demi')
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=0))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%U'))
ax.set_title('Havstemperaturutveckling i Östersjöområdet', fontweight='demi')
ax.set_ylim(ymin=4)
ax.legend()
def pixel_average(df):
px_std = df.std(axis=0)["Pixlar"]
px_mean = df.mean(axis=0)["Pixlar"]
df_px_std = df[df["Pixlar"] < (px_mean-px_std)]
df.reset_index(inplace=True)
df_px_std.reset_index(inplace=True)
# Plot daily and weekly resampled time series together
#fig, ax = plt.subplots()
df.plot.scatter("Datum Tid (UTC)", "Havstemperatur", c="Pixlar", colormap="inferno", label='Observation')
ax = df.plot.scatter("Datum Tid (UTC)", "Havstemperatur", color='Red', label='Observation')
df_px_std.plot.scatter("Datum Tid (UTC)", "Havstemperatur", label='Observation', ax=ax)
def satellites(df):
N15 = df.loc[df['Satellit'] == "NOAA 15"]
N18 = df.loc[df['Satellit'] == "NOAA 18"]
N19 = df.loc[df['Satellit'] == "NOAA 19"]
print(N15["Havstemperatur"].mean())
print(N18["Havstemperatur"].mean())
print(N19["Havstemperatur"].mean())
fig, ax = plt.subplots()
ax.plot(N15.loc[start:end, "Havstemperatur"].rolling("5d").mean(), marker=".", label=("NOAA 15"), linestyle="-")
ax.plot(N18.loc[start:end, "Havstemperatur"].rolling("5d").mean(), marker=".", label=("NOAA 18"), linestyle="-")
ax.plot(N19.loc[start:end, "Havstemperatur"].rolling("5d").mean(), marker=".", label=("NOAA 19"), linestyle="-")
#ax.plot(df.loc[start:end, "Havstemperatur"].rolling("5d").mean(), label=("Kombinerade observationer"), linestyle="-")
ax.set_ylabel('Temperatur [°C]')
ax.set_xlabel("Vecka")
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=0))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%U'))
ax.set_ylim(ymin=4)
ax.legend()
def dist(df):
#sns.displot(df, x="Pixlar", binwidth=1000, kde=True) #, kind="kde"
#sns.distplot(df["Pixlar"], rug=True, kde=True)
#sns.displot(df, x="Pixlar", y="Havstemperatur")
# Note the difference in argument order
model = sm.OLS(df["Pixlar"], df["Elevation"]).fit()
predictions = model.predict(df["Pixlar"]) # make the predictions by the model
# Print out the statistics
print(model.summary())
""" ax=sns.jointplot(x="Elevation", y='Pixlar', data=df, kind="reg")
ax.ax_joint.set_ylabel("Pixlar")
ax.ax_joint.set_xlabel("Elevation [°]")
ax.ax_marg_x.set_xlim(0, 90) """
tex_fonts = {
# Use LaTeX to write all text
#"text.usetex": False,
"font.family": "sans-serif",
"font.sans-serif": "Avenir Next LT Pro",
"font.weight": "demi",
# Use 10pt font in plots, to match 10pt font in document
"axes.labelsize": 12,
"font.size": 12,
# Make the legend/label fonts a little smaller
"legend.fontsize": 10,
"xtick.labelsize": 10,
"ytick.labelsize": 10
}
sns.set(rc={'figure.figsize':(set_size(600))})
sns.set_theme(style="whitegrid")
#plt.rcParams.update(tex_fonts)
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Avenir Next LT Pro'
#plt.rcParams['font.weight'] = 'demi'
#plt.rcParams["figure.figsize"] = set_size(390)
#seasonality(df1)
#histogram(df1)
average(df1)
#satellites(df1)
#regression(df1)
#dist(df1)
#pixel_average(df1)
#smhi()
#observations(df1)
#calendar(df1)
#bias(df1)
#data_comp(df1)
#plt.tight_layout(pad=0.0,h_pad=0.0,w_pad=0.0)
plt.tight_layout()
#plt.show()
#plt.savefig("exported/bias.svg", format="svg")
plt.savefig("exported/6.png", dpi=300) | 2.46875 | 2 |
emotions.py | Omrigan/essay-writer | 33 | 12788771 | mat = [
'сука', "блять", "пиздец", "нахуй", "<NAME>", "епта"]
import random
import re
# strong_emotions = re.sub('[^а-я]', ' ', open('strong_emotions').read().lower()).split()
def process(txt, ch):
words = txt.split(" ")
nxt = words[0] + ' '
i = 1
while i < len(words) - 1:
if words[i - 1][-1] != '.' and random.random() < ch:
nxt += random.choice(mat) + " "
else:
nxt += words[i] + " "
i += 1
nxt += words[-1]
return nxt
| 3.15625 | 3 |
sdkAutomator/sdkAutomator.py | chebroluharika/SDK_Automation_Generator | 0 | 12788772 | import fnmatch
import executePythonResources
import writeEndPointsFile
import executeResources
import changeLogGenerator
import sys
import os
import shutil
from datetime import datetime
import json
import git # if git module is not found, use 'pip install gitpython'
resource_dict = {
'FC Networks': 'fc_networks',
'FCoE Networks': 'fcoe_networks',
'Ethernet Networks': 'ethernet_networks',
'Network Sets': 'network_sets',
'Connection Templates': 'connection_templates',
'Certificates Server': 'certificates_server',
'Enclosures': 'enclosures',
'Enclosure Groups': 'enclosure_groups',
'Firmware Drivers': 'firmware_drivers',
'Hypervisor Cluster Profiles': 'hypervisor_cluster_profiles',
'Hypervisor Managers': 'hypervisor_managers',
'Interconnects': 'interconnects',
'Interconnect Types': 'interconnect_types',
'Logical Enclosures': 'logical_enclosures',
'Logical Interconnects': 'logical_interconnects',
'Logical Interconnect Groups': 'logical_interconnect_groups',
'Scopes': 'scopes',
'Server Hardware': 'server_hardware',
'Server Hardware Types': 'server_hardware_types',
'Server Profiles': 'server_profiles',
'Server Profile Templates': 'server_profile_templates',
'Storage Pools': 'storage_pools',
'Storage Systems': 'storage_systems',
'Storage Volume Templates': 'storage_volume_templates',
'Storage Volume Attachments': 'storage_volume_attachments',
'Volumes': 'volumes',
'Tasks': 'tasks',
'Uplink Sets': 'uplink_sets'
}
class LogWriter(object):
"""
To show logs on console and flushing the same to logs file.
"""
def __init__(self, filename):
self.stdout = sys.stdout
self.file = filename
def write(self, obj):
self.file.write(obj)
self.stdout.write(obj)
self.file.flush()
def flush(self):
self.stdout.flush()
self.file.flush()
def clean_up_files():
print("---------Removing all log files---------------")
for rootDir, subdirs, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, 'logfile*.log'):
try:
os.remove(os.path.join(rootDir, filename))
except OSError:
print("Error while deleting file")
print("---------Completed removing log files--------------")
try:
folder_names = ['oneview-python', 'oneview-ansible-collections','oneview-golang','oneview-terraform-provider']
for i in range(len(folder_names)):
os.remove(os.getcwd() + '/' + str(i))
except Exception as e:
print("Error {} occurred while deleting folder {}".format(str(e), str(i)))
def createGitRepositories(selected_sdk):
git_url = 'https://github.com/HewlettPackard/oneview' + str(selected_sdk)
repo = git.Repo.clone_from(git_url,
os.getcwd() + '/' + str(selected_sdk) + '/')
return repo
def createFeatureBranch(repo, branchName):
remote_branches = []
num = 0
for ref in repo.git.branch('-r').split('\n'):
remote_branches.append(ref.replace(" ", ""))
branch_present = True if 'origin/' + branchName in remote_branches else False
if branch_present:
branchName = branchName + '_' + str(num)
num = num + 1
createFeatureBranch(repo, branchName)
else:
new_branch = repo.create_head(branchName)
new_branch.checkout()
return
def updateJsonFile():
jsonFile = open("./auto_config.json", "r") # Open the JSON file for reading
data = json.load(jsonFile) # Read the JSON into the buffer
jsonFile.close() # Close the JSON file
ipAddressFile = open('ipaddress.txt', 'r')
oneview_ip = ipAddressFile.read()
## Working with buffered content
tmp = data["oneview_ip"]
data["oneview_ip"] = oneview_ip
## Save our changes to JSON file
jsonFile = open("auto_config.json", "w+")
jsonFile.write(json.dumps(data))
jsonFile.close()
if __name__ == '__main__':
updateJsonFile()
selected_sdk = sys.argv[1]
api_version = sys.argv[2]
#repo = createGitRepositories(selected_sdk)
#branchName = createFeatureBranch(repo, 'feature')
print("---------Started executing files---------")
# LOG_FILENAME = datetime.now().strftime('logfile_%H_%M_%d_%m_%Y.log')
# f = open(LOG_FILENAME, 'w')
# original = sys.stdout
# sys.stdout = LogWriter(f)
resources_executor = executeResources.executeResources(selected_sdk, api_version)
executed_files = resources_executor.execute(resource_dict)
# sys.stdout = original
if executed_files:
print("---------Started writing to CHANGELOG.md---------")
changelog_generator = changeLogGenerator.changeLogGenerator(resource_dict, api_version)
changelog_generator.write_data()
print("---------Completed writing to CHANGELOG.md---------")
endpointsfile_writer = writeEndPointsFile.writeEndpointsFile('## HPE OneView', resource_dict, api_version)
endpointsfile_writer.main()
repo.git.add(A=True)
repo.git.commit('-m', 'PR for reelase changes #pr',
author='<EMAIL>') # to commit changes
repo.git.push('--set-upstream', 'origin', branchName)
repo.close()
os.chdir(path) # Navigate to parent directory
# Delete git cloned directory as cleanup
if os.path.exists(os.getcwd() + '/' + str(selected_sdk)):
shutil.rmtree(os.getcwd() + '/' + str(selected_sdk) + '/', ignore_errors=True)
# clean_up_files()
| 1.945313 | 2 |
src/openpersonen/contrib/demo/converters/ouder.py | maykinmedia/open-personen | 2 | 12788773 | from django.conf import settings
from openpersonen.features.country_code_and_omschrijving.models import (
CountryCodeAndOmschrijving,
)
from openpersonen.features.gemeente_code_and_omschrijving.models import (
GemeenteCodeAndOmschrijving,
)
from openpersonen.utils.helpers import is_valid_date_format
def convert_ouder_instance_to_dict(ouder):
ouder_dict = {
"burgerservicenummer": ouder.burgerservicenummer_ouder,
"geslachtsaanduiding": ouder.geslachtsaanduiding_ouder,
"ouderAanduiding": ouder.geslachtsaanduiding_ouder,
"datumIngangFamilierechtelijkeBetrekking": {
"dag": int(
ouder.datum_ingang_familierechtelijke_betrekking_ouder[
settings.OPENPERSONEN_DAY_START : settings.OPENPERSONEN_DAY_END
]
if is_valid_date_format(
ouder.datum_ingang_familierechtelijke_betrekking_ouder
)
else 0
),
"datum": ouder.datum_ingang_familierechtelijke_betrekking_ouder,
"jaar": int(
ouder.datum_ingang_familierechtelijke_betrekking_ouder[
settings.OPENPERSONEN_YEAR_START : settings.OPENPERSONEN_YEAR_END
]
if is_valid_date_format(
ouder.datum_ingang_familierechtelijke_betrekking_ouder
)
else 0
),
"maand": int(
ouder.datum_ingang_familierechtelijke_betrekking_ouder[
settings.OPENPERSONEN_MONTH_START : settings.OPENPERSONEN_MONTH_END
]
if is_valid_date_format(
ouder.datum_ingang_familierechtelijke_betrekking_ouder
)
else 0
),
},
"naam": {
"geslachtsnaam": ouder.geslachtsnaam_ouder,
"voorletters": "string",
"voornamen": ouder.voornamen_ouder,
"voorvoegsel": ouder.voorvoegsel_geslachtsnaam_ouder,
"inOnderzoek": {
"geslachtsnaam": bool(ouder.geslachtsnaam_ouder),
"voornamen": bool(ouder.voornamen_ouder),
"voorvoegsel": bool(ouder.voorvoegsel_geslachtsnaam_ouder),
"datumIngangOnderzoek": {
"dag": 0,
"datum": "string",
"jaar": 0,
"maand": 0,
},
},
},
"inOnderzoek": {
"burgerservicenummer": bool(ouder.burgerservicenummer_ouder),
"datumIngangFamilierechtelijkeBetrekking": bool(
ouder.datum_ingang_familierechtelijke_betrekking_ouder
),
"geslachtsaanduiding": bool(ouder.geslachtsaanduiding_ouder),
"datumIngangOnderzoek": {
"dag": int(
ouder.datum_ingang_onderzoek[
settings.OPENPERSONEN_DAY_START : settings.OPENPERSONEN_DAY_END
]
if is_valid_date_format(ouder.datum_ingang_onderzoek)
else 0
),
"datum": ouder.datum_ingang_onderzoek,
"jaar": int(
ouder.datum_ingang_onderzoek[
settings.OPENPERSONEN_YEAR_START : settings.OPENPERSONEN_YEAR_END
]
if is_valid_date_format(ouder.datum_ingang_onderzoek)
else 0
),
"maand": int(
ouder.datum_ingang_onderzoek[
settings.OPENPERSONEN_MONTH_START : settings.OPENPERSONEN_MONTH_END
]
if is_valid_date_format(ouder.datum_ingang_onderzoek)
else 0
),
},
},
"geboorte": {
"datum": {
"dag": int(
ouder.geboortedatum_ouder[
settings.OPENPERSONEN_DAY_START : settings.OPENPERSONEN_DAY_END
]
)
if is_valid_date_format(ouder.geboortedatum_ouder)
else 0,
"datum": ouder.geboortedatum_ouder,
"jaar": int(
ouder.geboortedatum_ouder[
settings.OPENPERSONEN_YEAR_START : settings.OPENPERSONEN_YEAR_END
]
)
if is_valid_date_format(ouder.geboortedatum_ouder)
else 0,
"maand": int(
ouder.geboortedatum_ouder[
settings.OPENPERSONEN_MONTH_START : settings.OPENPERSONEN_MONTH_END
]
)
if is_valid_date_format(ouder.geboortedatum_ouder)
else 0,
},
"land": {
"code": ouder.geboorteland_ouder,
"omschrijving": CountryCodeAndOmschrijving.get_omschrijving_from_code(
ouder.geboorteland_ouder
),
},
"plaats": {
"code": ouder.geboorteplaats_ouder,
"omschrijving": GemeenteCodeAndOmschrijving.get_omschrijving_from_code(
ouder.geboorteplaats_ouder
),
},
"inOnderzoek": {
"datum": bool(ouder.geboortedatum_ouder),
"land": bool(ouder.geboorteland_ouder),
"plaats": bool(ouder.geboorteplaats_ouder),
"datumIngangOnderzoek": {
"dag": 0,
"datum": "string",
"jaar": 0,
"maand": 0,
},
},
},
"geheimhoudingPersoonsgegevens": True,
}
return ouder_dict
| 2.078125 | 2 |
app/forms.py | samkreter/TigerBuilds-Website | 0 | 12788774 | from flask.ext.wtf import Form
from wtforms import BooleanField, TextField, PasswordField, validators
from wtforms.validators import DataRequired
from flask_wtf.file import FileField
class LoginForm(Form):
first_name = TextField('first_name', validators=[DataRequired()])
last_name = TextField('first_name', validators=[DataRequired()])
email = TextField('Email Address', [validators.required(),validators.Length(min=6, max=35)])
resume = FileField()
remember_me = BooleanField('remember_me', default=False) | 2.765625 | 3 |
vhoops/modules/on_call/ui/routes.py | yigitbasalma/vhoops | 4 | 12788775 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template
from flask_login import login_required
from vhoops.modules.teams.api.controllers import get_all_teams_func
from vhoops.modules.on_call.forms.new_on_call import NewOnCallSchedule
on_call_router = Blueprint("on_call_router", __name__)
@on_call_router.route("/on-call", methods=["GET"])
@login_required
def on_call_page():
# Form config
teams = get_all_teams_func(as_object=True)
form = NewOnCallSchedule()
form.user.choices = [
(member.id, member.username)
for team in teams["data"]
for member in team.members
]
return render_template(
"on-call/on-call.html",
teams=teams["data"],
form=form
)
| 2.203125 | 2 |
codes/Arms.py | imadaouali/bootstrapping-multi-armed-bandits | 0 | 12788776 | <reponame>imadaouali/bootstrapping-multi-armed-bandits
"""different classes of arms, all of them have a sample() method which produce rewards"""
import numpy as np
from random import random
from math import sqrt,log,exp
class Bernoulli:
def __init__(self,p):
# create a Bernoulli arm with mean p
self.mean = p
self.variance = p*(1-p)
def sample(self):
# generate a reward from a Bernoulli arm
return float(random()<self.mean)
class Gaussian:
def __init__(self,mu,var=1):
# create a Gaussian arm with specified mean and variance
self.mean = mu
self.variance = var
def sample(self):
# generate a reward from a Gaussian arm
return self.mean + sqrt(self.variance)*np.random.normal()
class Exponential:
def __init__(self,p):
# create an Exponential arm with parameter p
self.mean = 1/p
self.variance = 1/(p*p)
def sample(self):
# generate a reward from an Exponential arm
return -(self.mean)*log(random())
class TruncatedExponential:
def __init__(self,p,trunc):
# create a truncated Exponential arm with parameter p
self.p = p
self.trunc = trunc
self.mean = (1.-exp(-p * trunc)) / p
self.variance=0
def sample(self):
# generate a reward from an Exponential arm
return min(-(1/self.p)*log(random()),self.trunc)
| 3.46875 | 3 |
categorical/plot_sweep.py | remilepriol/causal-adaptation-speed | 15 | 12788777 | import os
import pickle
from collections import defaultdict
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
def add_capitals(dico):
return {**dico, **{key[0].capitalize() + key[1:]: item for key, item in dico.items()}}
COLORS = {
'causal': 'blue',
'anti': 'red',
'joint': 'green',
'causal_average': 'darkblue',
'anti_average': 'darkred',
'joint_average': 'darkgreen',
'MAP_uniform': 'yellow',
'MAP_source': 'gold',
# guess
'CausalGuessX': 'skyblue',
'CausalGuessY': 'darkcyan',
'AntiGuessX': 'salmon',
'AntiGuessY': 'chocolate',
}
MARKERS = {key: 'o' for key in COLORS}
MARKERS['causal'] = '^'
MARKERS['anti'] = 'v'
COLORS = add_capitals(COLORS)
MARKERS = add_capitals(MARKERS)
def value_at_step(trajectory, nsteps=1000):
"""Return the KL and the integral KL up to nsteps."""
steps = trajectory['steps']
index = np.searchsorted(steps, nsteps) - 1
ans = {}
# ans['end_step'] = steps[index]
for key, item in trajectory.items():
if key.startswith('kl_'):
ans[key[3:]] = item[index].mean()
# ans['endkl_' + key[3:]] = item[index].mean()
# ans['intkl_' + key[3:]] = item[:index].mean()
return ans
def get_best(results, nsteps):
"""Store per model each parameter and kl values
then for each model return the argmax parameters and curves
for kl and integral kl
"""
by_model = {}
# dictionary where each key is a model,
# and each value is a list of this model's hyperparameter
# and outcome at step nsteps
for exp in results:
trajectory = exp['trajectory']
for model, metric in value_at_step(trajectory, nsteps).items():
if model not in by_model:
by_model[model] = []
toadd = {
'hyperparameters': exp['hyperparameters'],
**exp['hyperparameters'],
'value': metric,
'kl': trajectory['kl_' + model],
'steps': trajectory['steps']
}
if 'scoredist_' + model in trajectory:
toadd['scoredist'] = trajectory['scoredist_' + model]
by_model[model] += [toadd]
# select only the best hyperparameters for this model.
for model, metrics in by_model.items():
dalist = sorted(metrics, key=lambda x: x['value'])
# Ensure that the optimal configuration does not diverge as optimization goes on.
for duh in dalist:
if duh['kl'][0].mean() * 2 > duh['kl'][-1].mean():
break
by_model[model] = duh
# print the outcome
for model, item in by_model.items():
if 'MAP' in model:
print(model, ('\t n0={n0:.0f},'
'\t kl={value:.3f}').format(**item))
else:
print(model, ('\t alpha={scheduler_exponent},'
'\t lr={lr:.1e},'
'\t kl={value:.3f}').format(**item))
return by_model
def curve_plot(bestof, nsteps, figsize, logscale=False, endstep=400, confidence=(5, 95)):
"""Draw mean trajectory plot with percentiles"""
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
for model, item in sorted(bestof.items()):
xx = item['steps']
values = item['kl']
# truncate plot for k-invariance
end_id = np.searchsorted(xx, endstep) + 1
xx = xx[:end_id]
values = values[:end_id]
# plot mean and percentile statistics
ax.plot(xx, values.mean(axis=1), label=model,
marker=MARKERS[model], markevery=len(xx) // 6, markeredgewidth=0,
color=COLORS[model], alpha=.9)
ax.fill_between(
xx,
np.percentile(values, confidence[0], axis=1),
np.percentile(values, confidence[1], axis=1),
alpha=.4,
color=COLORS[model]
)
ax.axvline(nsteps, linestyle='--', color='black')
ax.grid(True)
if logscale:
ax.set_yscale('log')
ax.set_ylabel(r'$\mathrm{KL}(\mathbf{p}^*, \mathbf{p}^{(t)})$')
ax.set_xlabel('number of samples t')
ax.legend()
return fig, ax
def scatter_plot(bestof, nsteps, figsize, logscale=False):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
alldist = []
allkl = []
for model, item in sorted(bestof.items()):
if 'scoredist' not in item:
continue
index = min(np.searchsorted(item['steps'], nsteps), len(item['steps']) - 1)
initial_distances = item['scoredist'][0]
end_kl = item['kl'][index]
ax.scatter(
initial_distances,
end_kl,
alpha=.3,
color=COLORS[model],
marker=MARKERS[model],
linewidth=0,
label=model if False else None
)
alldist += list(initial_distances)
allkl += list(end_kl)
# linear regression
slope, intercept, rval, pval, _ = scipy.stats.linregress(alldist, allkl)
x_vals = np.array(ax.get_xlim())
y_vals = intercept + slope * x_vals
ax.plot(
x_vals, y_vals, '--', color='black', alpha=.8,
label=f'y=ax+b, r2={rval ** 2:.2f}'
f',\na={slope:.1e}, b={intercept:.2f}'
)
# look
ax.legend()
ax.grid(True)
if logscale:
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(min(alldist), max(alldist))
else:
ax.ticklabel_format(axis='both', style='sci', scilimits=(0, 0), useMathText=True)
ax.set_ylabel(r'$\mathrm{KL}(\mathbf{p}^*, \mathbf{p}^{(t)}); T=$' + str(nsteps))
ax.set_xlabel(r'$||\theta^{(0)} - \theta^* ||^2$')
return fig, ax
def two_plots(results, nsteps, plotname, dirname, verbose=False, figsize=(6, 3)):
print(dirname, plotname)
bestof = get_best(results, nsteps)
# remove the models I don't want to compare
# eg remove SGD, MAP. Keep ASGD and rename them to remove average.
selected = {
key[0].capitalize() + key[1:-len('_average')].replace('A', 'X').replace('B', 'Y'): item
for key, item in bestof.items()
if key.endswith('_average')}
for key in ['MAP_uniform', 'MAP_source']:
# selected[key] = bestof[key]
pass
if dirname.startswith('guess'):
selected.pop('Joint', None)
curves, ax1 = curve_plot(selected, nsteps, figsize, logscale=False)
# initstring = 'denseinit' if results[0]["is_init_dense"] else 'sparseinit'
# curves.suptitle(f'Average KL tuned for {nsteps} samples with {confidence} percentiles, '
# f'{initstring}, k={results[0]["k"]}')
scatter, ax2 = scatter_plot(selected, nsteps, figsize,
logscale=(dirname == 'guess_sparseinit'))
if verbose:
for ax in [ax1, ax2]:
info = str(next(iter(selected.values()))['hyperparameters'])
txt = ax.text(0.5, 1, info, ha='center', va='top',
wrap=True, transform=ax.transAxes,
# bbox=dict(boxstyle='square')
)
txt._get_wrap_line_width = lambda: 400. # wrap to 600 screen pixels
# small adjustments for intervention guessing
if dirname.startswith('guess'):
curves.axes[0].set_ylim(0, 1.5)
for fig in [curves, scatter]:
fig.axes[0].set_xlabel('')
fig.axes[0].set_ylabel('')
for style, fig in {'curves': curves, 'scatter': scatter}.items():
for figpath in [
os.path.join('plots', dirname, f'{style}_{plotname}.pdf')]:
print("Saving ", figpath)
os.makedirs(os.path.dirname(figpath), exist_ok=True)
# os.path.join('plots/sweep/png', f'{style}_{plotname}.png')]:
fig.savefig(figpath, bbox_inches='tight')
plt.close(curves)
plt.close(scatter)
print()
def plot_marginal_likelihoods(results, intervention, k, dirname):
exp = results[0]
values = {}
for whom in ['A', 'B']:
values[whom] = exp['loglikelihood' + whom][:100].cumsum(0)
xx = np.arange(1, values[whom].shape[0] + 1)
values[whom] /= xx[:, np.newaxis]
if intervention == 'cause':
right, wrong = 'A', 'B'
else:
right, wrong = 'B', 'A'
plt.plot(values[wrong] - values[right], alpha=.2)
plt.hlines(0, 0, values['B'].shape[0])
plt.grid()
plt.ylim(-1, 1)
figpath = os.path.join('plots', dirname, 'guessing', f'guess_{intervention}_k={k}.pdf')
os.makedirs(os.path.dirname(figpath), exist_ok=True)
plt.savefig(figpath, bbox_inches='tight')
plt.close()
def merge_results(results1, results2, bs=5):
"""Combine results from intervention on cause and effect.
Also report statistics about pooled results.
Pooled records the average over 10 cause and 10 effect interventions
the goal is to have tighter percentile curves
which are representative of the algorithm's performance
"""
combined = []
pooled = []
for e1, e2 in zip(results1, results2):
h1, h2 = e1['hyperparameters'], e2['hyperparameters']
assert h1['lr'] == h2['lr']
t1, t2 = e1['trajectory'], e2['trajectory']
combined_trajs = {'steps': t1['steps']}
pooled_trajs = combined_trajs.copy()
for key in t1.keys():
if key.startswith(('scoredist', 'kl')):
combined_trajs[key] = np.concatenate((t1[key], t2[key]), axis=1)
meantraj = (t1[key] + t2[key]) / 2
pooled_trajs[key] = np.array([
meantraj[:, bs * i:bs * (i + 1)].mean(axis=1)
for i in range(meantraj.shape[1] // bs)
]).T
combined += [{'hyperparameters': h1, 'trajectory': combined_trajs}]
pooled += [{'hyperparameters': h2, 'trajectory': pooled_trajs}]
return combined, pooled
def all_plot(guess=False, dense=True,
input_dir='categorical_results', output_dir='camera_ready',
figsize=(3.6, 2.2)):
basefile = '_'.join(['guess' if guess else 'sweep2',
'denseinit' if dense else 'sparseinit'])
print(basefile, '\n---------------------')
prior_string = 'dense' if dense else 'sparse'
for k in [20]: # [10, 20, 50]:
# Optimize hyperparameters for nsteps such that curves are k-invariant
nsteps = k ** 2 // 4
allresults = defaultdict(list)
for intervention in ['cause', 'effect']:
# 'singlecond', 'gmechanism', 'independent', 'geometric', 'weightedgeo']:
plotname = f'{prior_string}_{intervention}_k={k}'
file = f'{basefile}_{intervention}_k={k}.pkl'
filepath = os.path.join(input_dir, file)
print(os.path.abspath(filepath))
if os.path.isfile(filepath):
with open(filepath, 'rb') as fin:
results = pickle.load(fin)
print(1)
two_plots(results, nsteps,
plotname=plotname,
dirname=output_dir,
figsize=figsize)
allresults[intervention] = results
# if guess:
# plot_marginal_likelihoods(results, intervention, k, basefile)
# if not guess and 'cause' in allresults and 'effect' in allresults:
# combined, pooled = merge_results(allresults['cause'], allresults['effect'])
# if len(combined) > 0:
# for key, item in {'combined': combined, 'pooled': pooled}.items():
# two_plots(item, nsteps,
# plotname=f'{prior_string}_{key}_k={k}',
# dirname=output_dir,
# figsize=figsize)
if __name__ == '__main__':
np.set_printoptions(precision=2)
matplotlib.use('pgf')
matplotlib.rcParams['mathtext.fontset'] = 'cm'
matplotlib.rcParams['pdf.fonttype'] = 42
# all_plot(guess=True, dense=True)
# all_plot(guess=True, dense=False)
all_plot(guess=False, dense=True)
all_plot(guess=False, dense=False)
| 2.640625 | 3 |
tests/v1tests/test_offices_view.py | Davidodari/POLITICO-API | 1 | 12788778 | from tests.v1tests import BaseTestCase
import json
class OfficeEndpointsTestCase(BaseTestCase):
def test_create_office(self):
"""Tests valid data POST Http method request on /offices endpoint"""
# Post, uses office specification model
response = self.client.post('api/v1/offices', data=json.dumps(self.office))
# Data section returned as per response specification
expected_response_json = {
'data': [{
'id': 1,
'type': 'Senior',
'name': 'Permanent Secretary'
}],
'status': 201
}
self.assertEqual(response.status_code, 201, "Should Return a 201 HTTP Status Code Response:Created")
self.assertEqual(expected_response_json, response.json)
def test_create_office_invalid_forbidden(self):
"""Tests invalid data on POST method request on /offices endpoint"""
response = self.client.post('api/v1/offices',
json={
'type': 'n',
'name': 'p'
})
self.assertEqual(response.status_code, 400, "Should Return a 400 HTTP Status Code Response:Bad Request")
# Should return error message
self.assertIn("Check Input Values", response.json['error'])
def test_create_office_bad_request(self):
"""Tests malformed POST Http method request on /offices endpoint"""
response = self.client.post('api/v1/offices',
json={
# Missing type key
'name': 'Permanent Secretary'
})
self.assertEqual(response.status_code, 400, "Should Return a 400 HTTP Status Code Response:Bad Request")
# Should return error message
self.assertIn("Missing Key value", response.json['error'])
def test_view_all_offices(self):
"""Tests GET Http method request on /offices endpoint"""
# Post, create an office first
self.client.post('api/v1/offices', data=json.dumps(self.office))
# Retrieve the office
response = self.client.get('api/v1/offices')
self.assertEqual(response.status_code, 200, "Should Return a 200 HTTP Status Code Response:Success")
expected_response_json = {
"data": [{
"id": 1,
'type': 'Senior',
'name': '<NAME>'
}],
"status": 200
}
# Converts to string
self.assertEqual(response.json, expected_response_json)
def test_view_all_offices_bad_request(self):
"""Tests malformed GET Http method request on /office endpoint"""
response = self.client.get('api/v1/ofices')
self.assertEqual(response.status_code, 404, "Should Return a 404 HTTP Status Code Response:Resource Not Found")
# Should return error message
self.assertEqual(response.json, self.error_default_not_found)
def test_view_specific_office(self):
"""Tests GET Http method request on /office/{:id} endpoint"""
# Post, add an office
self.client.post('api/v1/offices', data=json.dumps(self.office))
# Get data for specific office
response = self.client.get('api/v1/offices/1')
expected_response = {
"id": 1,
"name": "<NAME>",
"type": "Senior"
}
self.assertEqual(response.status_code, 200, "Should Return a 200 HTTP Status Code:Success")
# Returns Dict as string and compares if its in response
self.assertEqual(response.json['data'][0], expected_response)
def test_view_specific_office_invalid_id(self):
"""Tests malformed GET Http method request on /office/{:id} endpoint"""
response = self.client.get('api/v1/offices/{}'.format(4578))
self.assertEqual(response.status_code, 404, "Should Return a 404 HTTP Status Code Response:Bad Request")
# Should return error message
self.assertEqual(response.json['error'], "Invalid Id Not Found", "Should return resource not found response")
def test_view_specific_office_not_found(self):
"""Tests malformed GET Http method request on /office/{:id} endpoint"""
response = self.client.get('api/v1/offies/{}'.format(0))
self.assertEqual(response.status_code, 404, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json, self.error_default_not_found, "Should return resource not found response")
def test_view_specific_office_invalid_id_value_error(self):
"""Tests valid request but invalid data on DELETE request on /parties/{:id}/name endpoint"""
self.client.post('api/v1/offices', data=json.dumps(self.office))
response = self.client.get('api/v1/offices/e')
self.assertEqual(response.status_code, 400, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Invalid Id',
'Should return not found response')
def test_edit_government_office(self):
"""Tests PATCH Http method request on /offices/{:id}/name endpoint"""
# Save Post First
self.client.post('api/v1/offices', data=json.dumps(self.office))
edit_request_json = {
"name": "<NAME>"
}
# Update Name
response = self.client.patch('api/v1/offices/{}/name'.format(1),
data=json.dumps(edit_request_json))
self.assertEqual(response.status_code, 200, "Should Return a 200 HTTP Status Code Response:Updated")
self.assertEqual(edit_request_json.get('name'), response.json[0]['data'][0]['name'])
def test_edit_office_invalid_id(self):
"""Tests invalid id on PATCH request on /offices/{:id}/name endpoint"""
edit_request_json = {
"name": "<NAME>"
}
response = self.client.patch('api/v1/offices/0/name', data=json.dumps(edit_request_json))
self.assertEqual(response.status_code, 404, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Invalid Id Not Found',
'Should return invalid id response')
def test_edit_offices_not_found(self):
"""Tests valid but non existent id on PATCH request on /parties/{:id}/name endpoint"""
edit_request_json = {
"name": "Secretary"
}
response = self.client.patch('api/v1/offices/3/name', data=json.dumps(edit_request_json))
self.assertEqual(response.status_code, 404, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Invalid Id Not Found',
'Should return not found response')
def test_edit_office_invalid_data(self):
"""Tests valid request but invalid data on PATCH request on /offices/{:id}/name endpoint"""
self.client.post('api/v1/offices', data=json.dumps(self.office))
edit_request_json = {
"name": "D"
}
response = self.client.patch('api/v1/offices/1/name', data=json.dumps(edit_request_json))
self.assertEqual(response.status_code, 400, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Incorrect Data Received,Bad request',
'Should return not found response')
def test_edit_office_invalid_id_value_error(self):
"""Tests valid request but invalid data on PATCH request on /offices/{:id}/name endpoint"""
self.client.post('api/v1/offices', data=json.dumps(self.office))
edit_request_json = {
"name": "<NAME>"
}
response = self.client.patch('api/v1/offices/e/name', data=json.dumps(edit_request_json))
self.assertEqual(response.status_code, 400, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Invalid Id',
'Should return not found response')
def test_delete_office(self):
"""Tests DELETE Http method request on /offices/{:id} endpoint"""
# Save Post First
self.client.post('api/v1/offices', data=json.dumps(self.office))
# Delete Party
response = self.client.delete('api/v1/offices/{0}'.format(1))
self.assertEqual(response.status_code, 200, "Should Return a 200 HTTP Status Code Response:Deleted")
self.assertEqual("Deleted Successfully", response.json['message'])
def test_delete_office_not_found(self):
""""Tests malformed DELETE Http method request on /offices/{:id} endpoint"""
# Save Post First
response = self.client.delete('api/v1/offices/{0}'.format(-1))
self.assertEqual(response.status_code, 404, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Invalid Id Not Found', "Should return resource not found response")
def test_delete_office_invalid_id_value_error(self):
"""Tests valid request but invalid data on DELETE request on /offices/{:id}/name endpoint"""
self.client.post('api/v1/offices', data=json.dumps(self.office))
response = self.client.delete('api/v1/offices/e')
self.assertEqual(response.status_code, 400, "Should Return a 404 HTTP Status Code Response:Not Found")
# Should return error message
self.assertEqual(response.json['error'], 'Invalid Id')
def test_no_duplication(self):
# Create
self.client.post('api/v1/offices', data=json.dumps(self.office))
response = self.client.post('api/v1/offices', data=json.dumps(self.office))
self.assertEqual(response.status_code, 409, "Should Create Party")
self.assertEqual(response.json['error'], "Office Already Exists", "Should Create Non Duplicate Ids")
| 2.875 | 3 |
validate_signature/serializers.py | Arquitectura-de-Software-UFPS-2022-I/-validate-signature-api-documentation- | 0 | 12788779 | from rest_framework import serializers
class ValidateSerializer(serializers.Serializer):
class_label = serializers.IntegerField()
confidence = serializers.FloatField() | 1.890625 | 2 |
my_phonolammps/_lammps.py | araghukas/my-phonolammps | 1 | 12788780 | <reponame>araghukas/my-phonolammps<filename>my_phonolammps/_lammps.py
"""Here the original lammps class is wrapped to allow specifying the `modpath`"""
from lammps import *
from lammps import lammps
class MyLammps(lammps):
"""new version of the lammps class"""
# path to the directory containing liblammps.so or liblammps.dylib
MODPATH: str = None
def __init__(self, name="", cmdargs=None, ptr=None, comm=None):
try:
from mpi4py import MPI
from mpi4py import __version__ as mpi4py_version
if mpi4py_version.split('.')[0] in ['2', '3']:
self.has_mpi4py = True
except ModuleNotFoundError:
self.has_mpi4py = False
if self.MODPATH is None:
# instantiate the original class as usual
lammps.__init__(self, name, cmdargs, ptr, comm)
else:
# instantiate with a specific `modpath`
modpath = os.path.abspath(self.MODPATH)
try:
self._create_instance(modpath, name, cmdargs, ptr, comm)
except OSError:
raise FileNotFoundError(
f"LAMMPS shared library not found in directory {modpath}"
)
def _create_instance(self, modpath: str,
name="", cmdargs=None, ptr=None, comm=None) -> None:
"""
Run the original instance creation code, except `modpath` is now arbitrary.
Specifying `modpath` avoids the issue of not finding the LAMMPS shared library,
even though it may exist elsewhere.
:param modpath: path to the directory containing "liblammps.so" or "liblammps.dylib"
"""
self.comm = comm
self.opened = 0
# determine module location
self.lib = None
self.lmp = None
# if a pointer to a LAMMPS object is handed in,
# all symbols should already be available
try:
if ptr: self.lib = CDLL("", RTLD_GLOBAL)
except:
self.lib = None
# load liblammps.so unless name is given
# if name = "g++", load liblammps_g++.so
# try loading the LAMMPS shared object from the location
# of my_lammps.py with an absolute path,
# so that LD_LIBRARY_PATH does not need to be set for regular install
# fall back to loading with a relative path,
# typically requires LD_LIBRARY_PATH to be set appropriately
if any([f.startswith('liblammps') and f.endswith('.dylib') for f in os.listdir(modpath)]):
lib_ext = ".dylib"
else:
lib_ext = ".so"
if not self.lib:
try:
if not name:
self.lib = CDLL(join(modpath, "liblammps" + lib_ext), RTLD_GLOBAL)
else:
self.lib = CDLL(join(modpath, "liblammps_%s" % name + lib_ext),
RTLD_GLOBAL)
except:
if not name:
self.lib = CDLL("liblammps" + lib_ext, RTLD_GLOBAL)
else:
self.lib = CDLL("liblammps_%s" % name + lib_ext, RTLD_GLOBAL)
# define ctypes API for each library method
# NOTE: should add one of these for each lib function
self.lib.lammps_extract_box.argtypes = \
[c_void_p, POINTER(c_double), POINTER(c_double),
POINTER(c_double), POINTER(c_double), POINTER(c_double),
POINTER(c_int), POINTER(c_int)]
self.lib.lammps_extract_box.restype = None
self.lib.lammps_reset_box.argtypes = \
[c_void_p, POINTER(c_double), POINTER(c_double), c_double, c_double, c_double]
self.lib.lammps_reset_box.restype = None
self.lib.lammps_gather_atoms.argtypes = \
[c_void_p, c_char_p, c_int, c_int, c_void_p]
self.lib.lammps_gather_atoms.restype = None
self.lib.lammps_gather_atoms_concat.argtypes = \
[c_void_p, c_char_p, c_int, c_int, c_void_p]
self.lib.lammps_gather_atoms_concat.restype = None
self.lib.lammps_gather_atoms_subset.argtypes = \
[c_void_p, c_char_p, c_int, c_int, c_int, POINTER(c_int), c_void_p]
self.lib.lammps_gather_atoms_subset.restype = None
self.lib.lammps_scatter_atoms.argtypes = \
[c_void_p, c_char_p, c_int, c_int, c_void_p]
self.lib.lammps_scatter_atoms.restype = None
self.lib.lammps_scatter_atoms_subset.argtypes = \
[c_void_p, c_char_p, c_int, c_int, c_int, POINTER(c_int), c_void_p]
self.lib.lammps_scatter_atoms_subset.restype = None
self.lib.lammps_find_pair_neighlist.argtypes = [c_void_p, c_char_p, c_int, c_int, c_int]
self.lib.lammps_find_pair_neighlist.restype = c_int
self.lib.lammps_find_fix_neighlist.argtypes = [c_void_p, c_char_p, c_int]
self.lib.lammps_find_fix_neighlist.restype = c_int
self.lib.lammps_find_compute_neighlist.argtypes = [c_void_p, c_char_p, c_int]
self.lib.lammps_find_compute_neighlist.restype = c_int
self.lib.lammps_neighlist_num_elements.argtypes = [c_void_p, c_int]
self.lib.lammps_neighlist_num_elements.restype = c_int
self.lib.lammps_neighlist_element_neighbors.argtypes = [c_void_p, c_int, c_int,
POINTER(c_int), POINTER(c_int),
POINTER(POINTER(c_int))]
self.lib.lammps_neighlist_element_neighbors.restype = None
# if no ptr provided, create an instance of LAMMPS
# don't know how to pass an MPI communicator from PyPar
# but we can pass an MPI communicator from mpi4py v2.0.0 and later
# no_mpi call lets LAMMPS use MPI_COMM_WORLD
# cargs = array of C strings from args
# if ptr, then are embedding Python in LAMMPS input script
# ptr is the desired instance of LAMMPS
# just convert it to ctypes ptr and store in self.lmp
if not ptr:
# with mpi4py v2, can pass MPI communicator to LAMMPS
# need to adjust for type of MPI communicator object
# allow for int (like MPICH) or void* (like OpenMPI)
if comm:
if not self.has_mpi4py:
raise Exception('Python mpi4py version is not 2 or 3')
if MyLammps.MPI._sizeof(MyLammps.MPI.Comm) == sizeof(c_int):
MPI_Comm = c_int
else:
MPI_Comm = c_void_p
narg = 0
cargs = 0
if cmdargs:
cmdargs.insert(0, "my_lammps.py")
narg = len(cmdargs)
for i in range(narg):
if type(cmdargs[i]) is str:
cmdargs[i] = cmdargs[i].encode()
cargs = (c_char_p * narg)(*cmdargs)
self.lib.lammps_open.argtypes = [c_int, c_char_p * narg, \
MPI_Comm, c_void_p()]
else:
self.lib.lammps_open.argtypes = [c_int, c_int, \
MPI_Comm, c_void_p()]
self.lib.lammps_open.restype = None
self.opened = 1
self.lmp = c_void_p()
comm_ptr = MyLammps.MPI._addressof(comm)
comm_val = MPI_Comm.from_address(comm_ptr)
self.lib.lammps_open(narg, cargs, comm_val, byref(self.lmp))
else:
if self.has_mpi4py:
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.opened = 1
if cmdargs:
cmdargs.insert(0, "my_lammps.py")
narg = len(cmdargs)
for i in range(narg):
if type(cmdargs[i]) is str:
cmdargs[i] = cmdargs[i].encode()
cargs = (c_char_p * narg)(*cmdargs)
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(narg, cargs, byref(self.lmp))
else:
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(0, None, byref(self.lmp))
# could use just this if LAMMPS lib interface supported it
# self.lmp = self.lib.lammps_open_no_mpi(0,None)
else:
# magic to convert ptr to ctypes ptr
if sys.version_info >= (3, 0):
# Python 3 (uses PyCapsule API)
pythonapi.PyCapsule_GetPointer.restype = c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p]
self.lmp = c_void_p(pythonapi.PyCapsule_GetPointer(ptr, None))
else:
# Python 2 (uses PyCObject API)
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
self.lmp = c_void_p(pythonapi.PyCObject_AsVoidPtr(ptr))
# optional numpy support (lazy loading)
self._numpy = None
# set default types
self.c_bigint = get_ctypes_int(self.extract_setting("bigint"))
self.c_tagint = get_ctypes_int(self.extract_setting("tagint"))
self.c_imageint = get_ctypes_int(self.extract_setting("imageint"))
self._installed_packages = None
# add way to insert Python callback for fix external
self.callback = {}
self.FIX_EXTERNAL_CALLBACK_FUNC = CFUNCTYPE(None, py_object, self.c_bigint, c_int,
POINTER(self.c_tagint),
POINTER(POINTER(c_double)),
POINTER(POINTER(c_double)))
self.lib.lammps_set_fix_external_callback.argtypes = [c_void_p, c_char_p,
self.FIX_EXTERNAL_CALLBACK_FUNC,
py_object]
self.lib.lammps_set_fix_external_callback.restype = None
| 2.4375 | 2 |
jabledl/downloader.py | Yooootsuba/jabledl | 10 | 12788781 | <reponame>Yooootsuba/jabledl
import time
import requests
import threading
class Downloader:
def __init__(self, segments, request_headers, requests_callback):
self.threads = []
self.threads_count = 0
self.threads_limit = 50
self.segments = segments
self.segments_count = len(segments)
self.requests_headers = request_headers
self.requests_callback = requests_callback
def wait_threads(self):
for thread in self.threads:
thread.join()
self.threads_count = 0
self.threads.clear()
def save(self, filename, response):
with open(filename, 'wb') as f:
f.write(response.content)
def thread_job(self, filename, segment):
try:
response = requests.get(segment, headers = self.requests_headers)
if response.status_code != 200:
raise Exception
except Exception:
time.sleep(5)
self.thread_job(filename, segment)
else:
self.save(filename, response)
self.requests_callback()
def download(self):
for i in range(self.segments_count):
if self.threads_count > self.threads_limit:
self.wait_threads()
self.threads.append(threading.Thread(target = self.thread_job, args = (str(i) + '.ts', self.segments[i])))
self.threads[-1].start()
self.threads_count += 1
self.wait_threads()
| 3.15625 | 3 |
02 - Curso Em Video/Aula 15/E - 069.py | GabrielTrentino/Python_Basico | 0 | 12788782 | maior18 = 0
homens = 0
mulheres = 0
while True:
idade = int(input('Digite a idade: '))
sexo = str(input('Qual o sexo? [H/M] ')).strip().upper()[0]
if idade > 18:
maior18 += 1
if sexo == 'H':
homens += 1
if sexo == 'M' and idade < 20:
mulheres += 1
opcao = str(input('Deseja Continuar? [S/N] ')).strip().upper()[0]
if opcao == 'N':
break
print('A) {} pessoas acima de 18 anos'.format(maior18))
print('B) {} homens cadastrados'.format(homens))
print('C) {} mulheres com menos de 20 anos'.format(mulheres)) | 3.734375 | 4 |
src/inmanta/compiler/help/explainer.py | inmanta/inmanta-core | 6 | 12788783 | """
Copyright 2018 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: <EMAIL>
"""
import os
import re
from abc import ABC, abstractmethod
from typing import Generic, List, Mapping, Optional, Sequence, Set, Type, TypeVar
from jinja2 import Environment, PackageLoader
from inmanta.ast import CompilerException, ModifiedAfterFreezeException
from inmanta.ast.statements import AssignStatement
from inmanta.ast.statements.generator import Constructor
from inmanta.execute.runtime import OptionVariable
from inmanta.module import ModuleV2InV1PathException
def bold(content: Optional[str] = None) -> str:
if content is None:
return "\033[1m"
return "\033[1m{0}\033[0m".format(content)
def underline(content: Optional[str] = None) -> str:
if content is None:
return "\033[4m"
return "\033[4m{0}\033[0m".format(content)
def noformat(content: Optional[str] = None) -> str:
return "\033[0m"
CUSTOM_FILTERS = {"bold": bold, "underline": underline, "noformat": noformat}
class ExplainerABC(ABC):
"""
Abstract base class for explainers. This class is purposely kept non-Generic to present a public interface that is invariant
of the compiler exception type. This allows correct typing of sequences of explainers.
"""
@abstractmethod
def explain(self, problem: CompilerException) -> List[str]:
...
Explainable = TypeVar("Explainable", bound=CompilerException)
class Explainer(Generic[Explainable], ExplainerABC, ABC):
"""
Abstract explainer, Generic in the compiler exception subtype to allow correct typing of the exception for subtype-specific
explanation logic.
Concrete subclasses must not be generic in the exception type because this would break explainable checking.
"""
explainable_type: Type[Explainable]
def explain(self, problem: CompilerException) -> List[str]:
"""
Returns a list of explanations for this exception. If neither the exception or any of its causes (recursively)
is explainable by this explainer, returns an empty list.
"""
allcauses: Set[CompilerException] = set()
work: List[CompilerException] = [problem]
while work:
w = work.pop()
allcauses.add(w)
work.extend(w.get_causes())
return [self.do_explain(c) for c in allcauses if isinstance(c, self.explainable_type)]
@abstractmethod
def do_explain(self, problem: Explainable) -> str:
"""
Explain a single exception, explainable by this explainer. Does not recurse on its causes.
"""
...
class JinjaExplainer(Explainer[Explainable], ABC):
"""
Abstract explainer for explanations based on a Jinja template.
:param template: path to the Jinja template to use for the explanation.
"""
def __init__(self, template: str) -> None:
self.template: str = template
def get_template(self, problem: Explainable) -> str:
path = os.path.join(os.path.dirname(__file__), self.template)
with open(path, "r", encoding="utf-8") as fh:
return fh.read()
def do_explain(self, problem: Explainable) -> str:
env = Environment(loader=PackageLoader("inmanta.compiler.help"))
for name, filter in CUSTOM_FILTERS.items():
env.filters[name] = filter
template = env.get_template(self.template)
return template.render(**self.get_arguments(problem))
@abstractmethod
def get_arguments(self, problem: Explainable) -> Mapping[str, object]:
"""
Returns a mapping for names that are used in the Jinja template.
"""
...
class ModifiedAfterFreezeExplainer(JinjaExplainer[ModifiedAfterFreezeException]):
"""
Explainer for ModifiedAfterFreezeException.
"""
explainable_type: Type[ModifiedAfterFreezeException] = ModifiedAfterFreezeException
def __init__(self) -> None:
super().__init__("modified_after_freeze.j2")
def build_reverse_hint(self, problem: ModifiedAfterFreezeException) -> str:
if isinstance(problem.stmt, AssignStatement):
return "%s.%s = %s" % (
problem.stmt.rhs.pretty_print(),
problem.attribute.get_name(),
problem.stmt.lhs.pretty_print(),
)
if isinstance(problem.stmt, Constructor):
# find right parameter:
attr = problem.attribute.end.get_name()
if attr not in problem.stmt.get_attributes():
attr_rhs = "?"
else:
attr_rhs = problem.stmt.get_attributes()[attr].pretty_print()
return "%s.%s = %s" % (attr_rhs, problem.attribute.get_name(), problem.stmt.pretty_print())
def get_arguments(self, problem: ModifiedAfterFreezeException) -> Mapping[str, object]:
return {
"relation": problem.attribute.get_name(),
"instance": problem.instance,
"values": problem.resultvariable.value,
"value": problem.value,
"location": problem.location,
"reverse": problem.reverse,
"reverse_example": "" if not problem.reverse else self.build_reverse_hint(problem),
"optional": isinstance(problem.resultvariable, OptionVariable),
}
class ModuleV2InV1PathExplainer(JinjaExplainer[ModuleV2InV1PathException]):
"""
Explainer for ModuleV2InV1PathException
"""
explainable_type: Type[ModuleV2InV1PathException] = ModuleV2InV1PathException
def __init__(self) -> None:
super().__init__("module_v2_in_v1_path.j2")
def get_arguments(self, problem: ModuleV2InV1PathException) -> Mapping[str, object]:
v2_source_configured: bool = problem.project.module_v2_source_configured() if problem.project is not None else False
return {
"name": problem.module.name,
"path": problem.module.path,
"project": problem.project is not None,
"v2_source_configured": v2_source_configured,
}
def escape_ansi(line: str) -> str:
ansi_escape = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]")
return ansi_escape.sub("", line)
class ExplainerFactory:
def get_explainers(self) -> Sequence[ExplainerABC]:
return [ModifiedAfterFreezeExplainer(), ModuleV2InV1PathExplainer()]
def explain(self, problem: CompilerException) -> List[str]:
return [explanation for explainer in self.get_explainers() for explanation in explainer.explain(problem)]
def explain_and_format(self, problem: CompilerException, plain: bool = True) -> Optional[str]:
"""
:param plain: remove tty color codes, only return plain text
"""
raw = self.explain(problem)
if not raw:
return None
else:
pre = """
\033[1mException explanation
=====================\033[0m
"""
pre += "\n\n".join(raw)
if not plain:
return pre
else:
return escape_ansi(pre)
| 1.953125 | 2 |
pyorderedfuzzy/ofmodels/ofautoreg.py | amarszalek/PyOrderedFuzzyTools | 0 | 12788784 | <reponame>amarszalek/PyOrderedFuzzyTools
# -*- coding: utf-8 -*-
import numpy as np
from pyorderedfuzzy.ofnumbers.ofnumber import OFNumber
from pyorderedfuzzy.ofmodels.ofseries import OFSeries
from scipy.optimize import minimize
from pyorderedfuzzy.ofmodels.oflinreg import ofns2array, array2ofns
from pyorderedfuzzy.ofmodels._objective import obj_func_ar_ls, obj_func_ar_cml
__author__ = "amarszalek"
class OFAutoRegressive(object):
def __init__(self, order=1, intercept=True, coef=[], initial=[]):
super(OFAutoRegressive, self).__init__()
self.intercept = intercept
self.order = order
self.coef = OFSeries(coef)
self.initial = initial
self.residuals = OFSeries([])
def fit(self, ofseries, order, intercept=True, method='ls', solver='L-BFGS-B', options={}):
dim = ofseries[0].branch_f.dim
self.order = order
self.intercept = intercept
self.initial = ofseries[-order-1:]
n_coef = order
if self.intercept:
n_coef += 1
# initial coef
ran = np.random.random(n_coef)
coef = [OFNumber(np.ones(dim)*r, np.ones(dim)*r) for r in ran]
if solver == 'LU':
pass
elif solver == 'L-BFGS-B':
if options == {}:
options = {'disp': None, 'gtol': 1.0e-12, 'eps': 1e-08, 'maxiter': 1000, 'ftol': 2.22e-09}
p0 = ofns2array(coef)
ofns = ofns2array(ofseries)
args = (order, n_coef, dim, ofns, intercept)
if method == 'ls':
res = minimize(fun_obj_ols, p0, args=args, method='L-BFGS-B', jac=True, options=options)
coef = array2ofns(res.x, n_coef, dim)
elif method == 'cml':
p0 = np.concatenate((p0, np.ones(2*dim)*np.random.random()))
res = minimize(fun_obj_cmle, p0, args=args, method='L-BFGS-B', jac=True, options=options)
coef_s = array2ofns(res.x, n_coef+1, dim)
coef = coef_s[:-1]
self.sig2 = coef_s[-1]*coef_s[-1]
else:
raise ValueError('wrong method')
self.coef = OFSeries(coef)
elif solver == 'CL-BFGS-B':
if options == {}:
options = {'disp': None, 'gtol': 1.0e-12, 'eps': 1e-08, 'maxiter': 1000, 'ftol': 2.22e-09}
p0 = ofns2array(coef)
ofns = ofns2array(ofseries)
args = (n_coef, dim, ofns, self.intercept)
if method == 'ls':
res = minimize(fun_obj_ols_c, p0, args=args, method='L-BFGS-B', jac=True, options=options)
coef = array2ofns(res.x, n_coef, dim)
elif method == 'cml':
p0 = np.concatenate((p0, np.ones(2 * dim) * np.random.random()))
res = minimize(fun_obj_cmle_c, p0, args=args, method='L-BFGS-B', jac=True, options=options)
coef_s = array2ofns(res.x, n_coef + 1, dim)
coef = coef_s[:-1]
self.sig2 = coef_s[-1]*coef_s[-1]
else:
raise ValueError('wrong method')
self.coef = OFSeries(coef)
else:
raise ValueError('wrong solver')
residuals = []
for i in range(order, len(ofseries)):
if i < order:
residuals.append(OFNumber(np.zeros(dim), np.zeros(dim)))
else:
pred = self.predict(1, initial=ofseries[i-order:i])
residuals.append(ofseries[i]-pred[0])
self.residuals = OFSeries(residuals)
def predict(self, n, initial=None, mean=None):
if initial is None:
initial = self.initial
predicted = []
for t in range(1, n+1):
if self.intercept:
y = self.coef[0]
for p in range(1, self.order+1):
y = y + self.coef[p] * initial[-p]
else:
y = self.coef[0] * initial[-1]
for p in range(1, self.order):
y = y + self.coef[p] * initial[-p-1]
if mean is not None:
y = y + mean
predicted.append(y)
initial.append(y)
return OFSeries(predicted)
def autoreg_bias(coef, past):
y = coef[0].copy()
for p in range(1, len(coef)):
y = y + coef[p] * past[-p]
return y
def autoreg_unbias(coef, past):
y = coef[0] * past[-1]
for p in range(1, len(coef)):
y = y + coef[p] * past[-p-1]
return y
def fun_obj_ols(p, order, n_coef, dim, ofns, intercept):
e = 0.0
n_cans = int(len(ofns)/(2 * dim))
can = ofns.reshape((n_cans, 2 * dim))
coef = p.reshape((n_coef, 2 * dim))
grad = np.zeros(len(p))
if intercept:
for i in range(order, n_cans):
r = can[i] - autoreg_bias(coef, can[i-order:i])
e += np.sum(r * r)
grad[:2 * dim] -= 2.0 * r
for j in range(1, n_coef):
grad[2 * dim * j:2 * dim * (j + 1)] -= 2 * r * can[i - j]
else:
for i in range(n_coef, n_cans):
r = can[i] - autoreg_unbias(coef, can[i-n_coef:i])
e += np.sum(r * r)
for j in range(n_coef):
grad[2 * dim * j:2 * dim * (j + 1)] -= 2 * r * can[i - j-1]
return e, grad
def fun_obj_ols_c(p, n_coef, dim, ofns, intercept):
if intercept:
res = obj_func_ar_ls(p, ofns, n_coef, dim * 2, 1)
else:
res = obj_func_ar_ls(p, ofns, n_coef, dim * 2, 0)
return res[0], np.array(res[1])
def fun_obj_cmle(p, order, n_coef, dim, ofns, intercept):
pp = p[:-2*dim]
ps = p[-2*dim:]
n_cans = int(len(ofns) / (2 * dim))
can = ofns.reshape((n_cans, 2 * dim))
coef = pp.reshape((n_coef, 2 * dim))
e = np.sum(0.5*(n_cans - order)*np.log(ps*ps))
grad = np.zeros(len(p))
grad[-2*dim:] = (n_cans - order)/ps
if intercept:
for i in range(order, n_cans):
r = can[i] - autoreg_bias(coef, can[i - order:i])
grad[-2 * dim:] -= (r*r)/(ps*ps*ps)
e += np.sum((r * r)/(2.0*ps*ps))
grad[:2 * dim] -= r/(ps*ps)
for j in range(1, n_coef):
grad[2 * dim * j:2 * dim * (j + 1)] -= (r/(ps*ps)) * can[i - j]
else:
for i in range(n_coef, n_cans):
r = can[i] - autoreg_unbias(coef, can[i - n_coef:i])
grad[-2 * dim:] -= (r * r) / (ps * ps * ps)
e += np.sum((r * r) / (2.0 * ps * ps))
for j in range(n_coef):
grad[2 * dim * j:2 * dim * (j + 1)] -= (r/(ps*ps)) * can[i - j - 1]
return e, grad
def fun_obj_cmle_c(p, n_coef, dim, ofns, intercept):
if intercept:
res = obj_func_ar_cml(p, ofns, n_coef, dim * 2, 1)
else:
res = obj_func_ar_cml(p, ofns, n_coef, dim * 2, 0)
return res[0], np.array(res[1])
| 2.28125 | 2 |
rts/game_MC/model_unit_cmd_lstm.py | BOBSTK/ELF | 0 | 12788785 | <reponame>BOBSTK/ELF
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch.autograd import Variable
from copy import deepcopy
from collections import Counter
import numpy as np
from rlpytorch import Model, GRUActorCritic
from trunk import MiniRTSNet
from trunk import ProcessSet, DataProcess,TargetAttention,TowerAttention
from td_sampler import TD_Sampler
def flattern(x):
return x.view(x.size(0), -1)
class Model_GRUActorCritic(Model):
def __init__(self, args):
super(Model_GRUActorCritic, self).__init__(args)
self._init(args)
def _init(self, args):
params = args.params
assert isinstance(params["num_action"], int), "num_action has to be a number. action = " + str(params["num_action"])
self.params = params
#self.net = MiniRTSNet(args, output1d=False)
self.na = params["num_action"]
self.num_unit = params["num_unit_type"]
self.num_planes = params["num_planes"]
self.num_cmd_type = params["num_cmd_type"]
self.mapx = params["map_x"]
self.mapy = params["map_y"]
out_dim = self.num_planes * self.mapx * self.mapy
self.value_test = nn.Linear(256,1)
# self.relu = nn.LeakyReLU(0.1)
self.softmax = nn.Softmax()
self.dataProcess = DataProcess(args,ProcessSet(args,1,3,30),ProcessSet(args,2, 9, 100),ProcessSet(args,20, 8, 100),ProcessSet(args,10, 10, 100))
self.round_select = nn.Linear(256,3)
self.enemy_select = TargetAttention(args,256,100)
self.tower_select = TowerAttention(args,256,100,100)
self.td_sampler = TD_Sampler(0.1) # epsilon
self.gru = nn.GRU(
input_size=64, # 输入数据
hidden_size=256, # rnn hidden unit
batch_first = True,
)
def get_define_args():
return MiniRTSNet.get_define_args()
def forward(self, x, hiddens):
batch_size = x["s_global"].size(0)
self.dataProcess.set_volatile(self.volatile)
base,radar,tower,tower_embedding,enemy,enemy_embedding,global_stat = self.dataProcess(x)
# import pdb
# pdb.set_trace()
hiddens = hiddens.view(1,batch_size,-1) # seq = 1
output, hidden= self.gru(global_stat,hiddens) # None表示 h_0 置于0
hidden = hidden.view(batch_size,-1)
global_stat = output[:, -1, :]
self.dataProcess.set_volatile(False)
value_test = self.value_test(global_stat)
round_prob = self.softmax(self.round_select(global_stat))
round_choose = self.td_sampler.sample_with_eps(round_prob) # 攻击方式
# import pdb
# pdb.set_trace()
enemy_attention = self.enemy_select(global_stat,enemy_embedding,x['s_enemy'],True)# 单位注意力
enemy_prob = self.softmax(enemy_attention)
enemy_choose = self.td_sampler.sample_with_eps(enemy_prob) # 攻击目标
# 收集选择到的目标的特征
#batch_size = enemy_embedding.data.shape[0]
enemy_embedding_list = []
enemy_mask_list = []
for i in range(batch_size):
enemy_embedding_list.append(enemy_embedding[i][enemy_choose[i].data])
enemy_mask_list.append(x['s_enemy_mask'][i][enemy_choose[i].data])
enemy_embedding_select = Variable(torch.cat(enemy_embedding_list).data,volatile = self.volatile)
enemy_embedding_select = enemy_embedding_select.view(batch_size,-1)
# import pdb
# pdb.set_trace()
enemy_mask_select = torch.cat(enemy_mask_list)
enemy_mask_select = enemy_mask_select.view(batch_size,-1)
# 根据目标特征获取防御塔的注意力分布和mask
tower_attention = self.tower_select(global_stat,enemy_embedding_select,enemy_mask_select,tower_embedding,x['s_tower'],True)
tower_prob = self.softmax(tower_attention)
# tower_attention = Variable(tower_attention.data)
tower_choose = self.td_sampler.sample_with_eps(tower_prob) # 防御塔
import pdb
pdb.set_trace()
return dict(V=value_test, uloc_prob=tower_prob, tloc_prob=enemy_prob, ct_prob = round_prob,action_type=1),dict(uloc = tower_choose.data,uloc_prob=tower_prob.data,tloc=enemy_choose.data,tloc_prob=enemy_prob.data,ct=round_choose.data,ct_prob = round_prob.data),hidden
# return dict(V=value_test, uloc_prob=tower_attention, tloc_prob=enemy_attention, ct_prob = round_prob,action_type=1)
#return dict(V=value, uloc_prob=unit_locs, tloc_prob=target_locs, ct_prob = cmd_types,action_type=1)
# Format: key, [model, method]
# if method is None, fall back to default mapping from key to method
Models = {
"actor_critic": [Model_GRUActorCritic, GRUActorCritic],
}
Defaults = {
#"sample_nodes": "uloc_prob,uloc;tloc_prob,tloc;ct_prob,ct;bt_prob,bt",
#"policy_action_nodes": "uloc_prob,uloc;tloc_prob,tloc;ct_prob,ct;bt_prob,bt",
"sample_nodes": "uloc_prob,uloc;tloc_prob,tloc;ct_prob,ct",
"policy_action_nodes": "uloc_prob,uloc;tloc_prob,tloc;ct_prob,ct",
"arch" : "cccc;-,64,64,64,-"
}
| 1.929688 | 2 |
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/test_emformer.py | zhu-han/icefall | 0 | 12788786 | <reponame>zhu-han/icefall<gh_stars>0
#!/usr/bin/env python3
# Copyright 2022 Xiaomi Corp. (authors: <NAME>)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
To run this file, do:
cd icefall/egs/librispeech/ASR
python ./pruned_stateless_emformer_rnnt/test_emformer.py
"""
import torch
from emformer import Emformer, stack_states, unstack_states
def test_emformer():
N = 3
T = 300
C = 80
output_dim = 500
encoder = Emformer(
num_features=C,
output_dim=output_dim,
d_model=512,
nhead=8,
dim_feedforward=2048,
num_encoder_layers=20,
segment_length=16,
left_context_length=120,
right_context_length=4,
vgg_frontend=False,
)
x = torch.rand(N, T, C)
x_lens = torch.randint(100, T, (N,))
x_lens[0] = T
y, y_lens = encoder(x, x_lens)
y_lens = (((x_lens - 1) >> 1) - 1) >> 1
assert x.size(0) == x.size(0)
assert y.size(1) == max(y_lens)
assert y.size(2) == output_dim
num_param = sum([p.numel() for p in encoder.parameters()])
print(f"Number of encoder parameters: {num_param}")
def test_emformer_streaming_forward():
N = 3
C = 80
output_dim = 500
encoder = Emformer(
num_features=C,
output_dim=output_dim,
d_model=512,
nhead=8,
dim_feedforward=2048,
num_encoder_layers=20,
segment_length=16,
left_context_length=120,
right_context_length=4,
vgg_frontend=False,
)
x = torch.rand(N, 23, C)
x_lens = torch.full((N,), 23)
y, y_lens, states = encoder.streaming_forward(x=x, x_lens=x_lens)
state_list = unstack_states(states)
states2 = stack_states(state_list)
for ss, ss2 in zip(states, states2):
for s, s2 in zip(ss, ss2):
assert torch.allclose(s, s2), f"{s.sum()}, {s2.sum()}"
def test_emformer_init_state():
num_encoder_layers = 20
d_model = 512
encoder = Emformer(
num_features=80,
output_dim=500,
d_model=512,
nhead=8,
dim_feedforward=2048,
num_encoder_layers=num_encoder_layers,
segment_length=16,
left_context_length=120,
right_context_length=4,
vgg_frontend=False,
)
init_state = encoder.get_init_state()
assert len(init_state) == num_encoder_layers
layer0_state = init_state[0]
assert len(layer0_state) == 4
assert layer0_state[0].shape == (
0, # max_memory_size
1, # batch_size
d_model, # input_dim
)
assert layer0_state[1].shape == (
encoder.model.left_context_length,
1, # batch_size
d_model, # input_dim
)
assert layer0_state[2].shape == layer0_state[1].shape
assert layer0_state[3].shape == (
1, # always 1
1, # batch_size
)
@torch.no_grad()
def main():
test_emformer()
test_emformer_streaming_forward()
test_emformer_init_state()
if __name__ == "__main__":
torch.manual_seed(20220329)
main()
| 1.796875 | 2 |
cifar_imagenet/Attack_PGD_ResNet20.py | minhtannguyen/RAdam | 0 | 12788787 | # -*- coding: utf-8 -*-
"""
CW, FGSM, and IFGSM Attack CNN
"""
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.autograd import Variable
import copy
import math
import numpy as np
import os
import argparse
import torch.utils.data as data
#from utils import *
import numpy.matlib
import matplotlib.pyplot as plt
import pickle
# import cPickle
from collections import OrderedDict
import models.cifar as models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='Fool EnResNet')
ap = parser.add_argument
ap('--method', help='Attack Method', type=str, default="ifgsm") # fgsm, ifgsm, cwl2
ap('--epsilon', help='Attack Strength', type=float, default=0.031) # May 2
ap('--num-ensembles', '--ne', default=2, type=int, metavar='N')
ap('--noise-coef', '--nc', default=0.1, type=float, metavar='W', help='forward noise (default: 0.0)')
ap('--noise-coef-eval', '--nce', default=0.0, type=float, metavar='W', help='forward noise (default: 0.)')
ap('--arch', '-a', metavar='ARCH', default='resnet20',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
ap('--depth', type=int, default=29, help='Model depth.')
ap('--block-name', type=str, default='BasicBlock',
help='the building block for Resnet and Preresnet: BasicBlock, Bottleneck (default: Basicblock for cifar10/cifar100)')
ap('--cardinality', type=int, default=8, help='Model cardinality (group).')
ap('--widen-factor', type=int, default=4, help='Widen factor. 4 -> 64, 8 -> 128, ...')
ap('--growthRate', type=int, default=12, help='Growth rate for DenseNet.')
ap('--compressionRate', type=int, default=2, help='Compression Rate (theta) for DenseNet.')
ap('--feature_vec', default='x', type=str)
ap('-c', '--checkpoint', default='checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
ap('-d', '--dataset', default='cifar10', type=str)
ap('--eta', default=1.0, type=float, help='eta in HOResNet')
ap('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
opt = parser.parse_args()
def conv3x3(in_planes, out_planes, stride=1):
"""
3x3 convolution with padding
"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
if __name__ == '__main__':
"""
Load the trained DNN, and attack the DNN, finally save the adversarial images
"""
# Model
if opt.dataset == 'cifar10':
dataloader = dset.CIFAR10
num_classes = 10
else:
dataloader = dset.CIFAR100
num_classes = 100
print("==> creating model '{}'".format(opt.arch))
if opt.arch.startswith('resnext'):
net = models.__dict__[opt.arch](
cardinality=opt.cardinality,
num_classes=num_classes,
depth=opt.depth,
widen_factor=opt.widen_factor,
dropRate=opt.drop,
)
elif opt.arch.startswith('densenet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
growthRate=opt.growthRate,
compressionRate=opt.compressionRate,
dropRate=opt.drop,
)
elif opt.arch.startswith('wrn'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
widen_factor=opt.widen_factor,
dropRate=opt.drop,
)
elif opt.arch.startswith('resnet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
block_name=opt.block_name,
)
elif opt.arch.startswith('preresnet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
block_name=opt.block_name,
)
elif opt.arch.startswith('horesnet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
eta=opt.eta,
block_name=opt.block_name,
feature_vec=opt.feature_vec
)
elif opt.arch.startswith('hopreresnet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
eta=opt.eta,
block_name=opt.block_name,
feature_vec=opt.feature_vec
)
else:
net = models.__dict__[opt.arch](num_classes=num_classes)
# Load the model
print('==> Resuming from checkpoint..')
assert os.path.isfile(opt.checkpoint), 'Error: no checkpoint directory found!'
opt.checkpoint_dir = os.path.dirname(opt.checkpoint)
checkpoint = torch.load(opt.checkpoint)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
net.load_state_dict(checkpoint['state_dict'])
net = net.cuda()
epsilon = opt.epsilon
attack_type = opt.method
# Load the original test data
print('==> Load the clean image')
root = './data'
download = False
kwargs = {'num_workers':1, 'pin_memory':True}
batchsize_test = 1000
if attack_type == 'cw':
batchsize_test = 1
print('Batch size of the test set: ', batchsize_test)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_set = dataloader(root='./data', train=False, download=False, transform=transform_test)
test_loader = data.DataLoader(test_set, batch_size=batchsize_test, shuffle=False, num_workers=1, pin_memory=True)
criterion = nn.CrossEntropyLoss()
#--------------------------------------------------------------------------
# Testing
# images: the original images
# labels: labels of the original images
# images_adv: adversarial image
# labels_pred: the predicted labels of the adversarial images
# noise: the added noise
#--------------------------------------------------------------------------
images, labels, images_adv, labels_pred, noise = [], [], [], [], []
total_fooled = 0; total_correct_classified = 0
if attack_type == 'fgsm':
for batch_idx, (x1, y1_true) in enumerate(test_loader):
#if batch_idx < 2:
x_Test = x1.numpy()
#print x_Test.min(), x_Test.max()
#x_Test = ((x_Test - x_Test.min())/(x_Test.max() - x_Test.min()) - 0.5)*2
#x_Test = (x_Test - x_Test.min() )/(x_Test.max() - x_Test.min())
y_Test = y1_true.numpy()
#x = Variable(torch.cuda.FloatTensor(x_Test.reshape(1, 1, 28, 28)), requires_grad=True)
x = Variable(torch.cuda.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32)), requires_grad=True)
y = Variable(torch.cuda.LongTensor(y_Test), requires_grad=False)
# Classification before perturbation
pred_tmp = net(x)
y_pred = np.argmax(pred_tmp.cpu().data.numpy())
loss = criterion(pred_tmp, y)
# Attack
net.zero_grad()
if x.grad is not None:
x.grad.data.fill_(0)
loss.backward()
x_val_min = 0.0
x_val_max = 1.0
x.grad.sign_()
x_adversarial = x + epsilon*x.grad
x_adversarial = torch.clamp(x_adversarial, x_val_min, x_val_max)
x_adversarial = x_adversarial.data
# Classify the perturbed data
x_adversarial_tmp = Variable(x_adversarial)
pred_tmp = net(x_adversarial_tmp)
y_pred_adversarial = np.argmax(pred_tmp.cpu().data.numpy(), axis=1)
for i in range(len(x_Test)):
#print y_pred_adversarial
if y_Test[i] == y_pred_adversarial[i]:
#if y_Test == y_pred_adversarial:
total_correct_classified += 1
for i in range(len(x_Test)):
# Save the perturbed data
images.append(x_Test[i, :, :, :]) # Original image
images_adv.append(x_adversarial.cpu().numpy()[i, :, :, :]) # Perturbed image
noise.append(x_adversarial.cpu().numpy()[i, :, :, :]-x_Test[i, :, :, :]) # Noise
labels.append(y_Test[i])
labels_pred.append(y_pred_adversarial[i])
elif attack_type == 'ifgsm':
for batch_idx, (x1, y1_true) in enumerate(test_loader):
#if batch_idx < 100:
#x_Test = (x_Test - x_Test.min())/(x_Test.max()-x_Test.min())
x_Test = ((x1 - x1.min())/(x1.max() - x1.min()) - 0.5)*2
x_Test = x_Test.numpy()
y_Test = y1_true.numpy()
x = Variable(torch.cuda.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32)), requires_grad=True)
y = Variable(torch.cuda.LongTensor(y_Test), requires_grad=False)
# Classification before perturbation
pred_tmp = net(x)
y_pred = np.argmax(pred_tmp.cpu().data.numpy())
loss = criterion(pred_tmp, y)
# Attack
alpha = epsilon
#iteration = 10
iteration = 20
x_val_min = 0.; x_val_max = 1.
epsilon1 = 0.031
# Helper function
def where(cond, x, y):
"""
code from :
https://discuss.pytorch.org/t/how-can-i-do-the-operation-the-same-as-np-where/1329/8
"""
cond = cond.float()
return (cond*x) + ((1-cond)*y)
# Random perturbation
#x = x + torch.zeros_like(x).uniform_(-epsilon1, epsilon1) # May 2
x_adv = Variable(x.data, requires_grad=True)
for i in range(iteration):
h_adv = net(x_adv)
loss = criterion(h_adv, y)
net.zero_grad()
if x_adv.grad is not None:
x_adv.grad.data.fill_(0)
loss.backward()
x_adv.grad.sign_()
x_adv = x_adv + alpha*x_adv.grad
x_adv = where(x_adv > x+epsilon1, x+epsilon1, x_adv)
x_adv = where(x_adv < x-epsilon1, x-epsilon1, x_adv)
x_adv = torch.clamp(x_adv, x_val_min, x_val_max)
x_adv = Variable(x_adv.data, requires_grad=True)
x_adversarial = x_adv.data
x_adversarial_tmp = Variable(x_adversarial)
pred_tmp = net(x_adversarial_tmp)
loss = criterion(pred_tmp, y)
y_pred_adversarial = np.argmax(pred_tmp.cpu().data.numpy(), axis=1)
#if y_Test == y_pred_adversarial:
# total_correct_classified += 1
for i in range(len(x_Test)):
#print y_pred_adversarial
if y_Test[i] == y_pred_adversarial[i]:
#if y_Test == y_pred_adversarial:
total_correct_classified += 1
for i in range(len(x_Test)):
# Save the perturbed data
images.append(x_Test[i, :, :, :]) # Original image
images_adv.append(x_adversarial.cpu().numpy()[i, :, :, :]) # Perturbed image
noise.append(x_adversarial.cpu().numpy()[i, :, :, :]-x_Test[i, :, :, :]) # Noise
labels.append(y_Test[i])
labels_pred.append(y_pred_adversarial[i])
elif attack_type == 'cw':
for batch_idx, (x1, y1_true) in enumerate(test_loader):
#if batch_idx < 10:
if batch_idx - int(int(batch_idx/50.)*50) == 0:
print(batch_idx)
x_Test = x1.numpy()
y_Test = y1_true.numpy()
x = Variable(torch.cuda.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32)), requires_grad=True)
y = Variable(torch.cuda.LongTensor(y_Test), requires_grad=False)
# Classification before perturbation
pred_tmp = net(x)
loss = criterion(pred_tmp, y)
y_pred = np.argmax(pred_tmp.cpu().data.numpy())
# Attack
cwl2_learning_rate = 0.0006#0.01
max_iter = 50
lambdaf = 10.0
kappa = 0.0
# The input image we will perturb
input = torch.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32))
input_var = Variable(input)
# w is the variable we will optimize over. We will also save the best w and loss
w = Variable(input, requires_grad=True)
best_w = input.clone()
best_loss = float('inf')
# Use the Adam optimizer for the minimization
optimizer = optim.Adam([w], lr=cwl2_learning_rate)
# Get the top2 predictions of the model. Get the argmaxes for the objective function
probs = net(input_var.cuda())
probs_data = probs.data.cpu()
top1_idx = torch.max(probs_data, 1)[1]
probs_data[0][top1_idx] = -1 # making the previous top1 the lowest so we get the top2
top2_idx = torch.max(probs_data, 1)[1]
# Set the argmax (but maybe argmax will just equal top2_idx always?)
argmax = top1_idx[0]
if argmax == y_pred:
argmax = top2_idx[0]
# The iteration
for i in range(0, max_iter):
if i > 0:
w.grad.data.fill_(0)
# Zero grad (Only one line needed actually)
net.zero_grad()
optimizer.zero_grad()
# Compute L2 Loss
loss = torch.pow(w - input_var, 2).sum()
# w variable
w_data = w.data
w_in = Variable(w_data, requires_grad=True)
# Compute output
output = net.forward(w_in.cuda()) #second argument is unneeded
# Calculating the (hinge) loss
loss += lambdaf * torch.clamp( output[0][y_pred] - output[0][argmax] + kappa, min=0).cpu()
# Backprop the loss
loss.backward()
# Work on w (Don't think we need this)
w.grad.data.add_(w_in.grad.data)
# Optimizer step
optimizer.step()
# Save the best w and loss
total_loss = loss.data.cpu()[0]
if total_loss < best_loss:
best_loss = total_loss
##best_w = torch.clamp(best_w, 0., 1.) # BW Added Aug 26
best_w = w.data.clone()
# Set final adversarial image as the best-found w
x_adversarial = best_w
##x_adversarial = torch.clamp(x_adversarial, 0., 1.) # BW Added Aug 26
#--------------- Add to introduce the noise
noise_tmp = x_adversarial.cpu().numpy() - x_Test
x_adversarial = x_Test + epsilon * noise_tmp
#---------------
# Classify the perturbed data
x_adversarial_tmp = Variable(torch.cuda.FloatTensor(x_adversarial), requires_grad=False) #Variable(x_adversarial).cuda()
pred_tmp = net(x_adversarial_tmp)
y_pred_adversarial = np.argmax(pred_tmp.cpu().data.numpy()) # axis=1
if y_Test == y_pred_adversarial:
total_correct_classified += 1
# Save the perturbed data
images.append(x_Test) # Original image
images_adv.append(x_adversarial) # Perturbed image
noise.append(x_adversarial-x_Test) # Noise
labels.append(y_Test)
labels_pred.append(y_pred_adversarial)
elif attack_type == 'clean':
for batch_idx, (x1, y1_true) in enumerate(test_loader):
#if batch_idx < 2:
x_Test = x1.numpy()
#print x_Test.min(), x_Test.max()
#x_Test = ((x_Test - x_Test.min())/(x_Test.max() - x_Test.min()) - 0.5)*2
#x_Test = (x_Test - x_Test.min() )/(x_Test.max() - x_Test.min())
y_Test = y1_true.numpy()
#x = Variable(torch.cuda.FloatTensor(x_Test.reshape(1, 1, 28, 28)), requires_grad=True)
#x, y = torch.autograd.Variable(torch.cuda.FloatTensor(x_Test), volatile=True), torch.autograd.Variable(torch.cuda.LongTensor(y_Test))
x = Variable(torch.cuda.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32)), requires_grad=True)
y = Variable(torch.cuda.LongTensor(y_Test), requires_grad=False)
# Classification before perturbation
pred_tmp = net(x)
y_pred = np.argmax(pred_tmp.cpu().data.numpy(), axis=1)
for i in range(len(x_Test)):
#print y_pred_adversarial
if y_Test[i] == y_pred[i]:
#if y_Test == y_pred_adversarial:
total_correct_classified += 1
else:
ValueError('Unsupported Attack')
print(opt.checkpoint)
print('Number of correctly classified images: ', total_correct_classified)
# Save data
#with open("Adversarial" + attack_type + str(int(10*epsilon)) + ".pkl", "w") as f:
#with open("Adversarial" + attack_type + str(int(100*epsilon)) + ".pkl", "w") as f:
# adv_data_dict = {"images":images_adv, "labels":labels}
# cPickle.dump(adv_data_dict, f)
images = np.array(images).squeeze()
images_adv = np.array(images_adv).squeeze()
noise = np.array(noise).squeeze()
labels = np.array(labels).squeeze()
labels_pred = np.array(labels_pred).squeeze()
print([images.shape, images_adv.shape, noise.shape, labels.shape, labels_pred.shape])
# with open("fooled_EnResNet5_20_PGD_10iters_" + attack_type + str(int(1000*epsilon)) + ".pkl", "w") as f:
# #with open("fooled_EnResNet5_20_PGD_20iters_" + attack_type + str(int(1000*epsilon)) + ".pkl", "w") as f:
# adv_data_dict = {
# "images" : images,
# "images_adversarial" : images_adv,
# "y_trues" : labels,
# "noises" : noise,
# "y_preds_adversarial" : labels_pred
# }
# pickle.dump(adv_data_dict, f) | 1.960938 | 2 |
racing_models/dronet_NASNetMobileAPI.py | gengliangyu2008/Intelligent-Navigation-Systems | 0 | 12788788 | <gh_stars>0
from tensorflow.keras import Model
from tensorflow.python.keras.applications import NASNetMobile
class Dronet(Model):
def __init__(self, num_outputs, include_top=True):
super(Dronet, self).__init__()
self.include_top = include_top
self.create_model(num_outputs)
def call(self, img):
# Input
model_d = self.nasNet(img)
return model_d
def create_model(self, num_outputs):
print('[Dronet] Starting dronet with NASNetMobile')
self.nasNet = NASNetMobile(include_top=self.include_top, input_shape=(224, 224, 3), weights=None, classes=num_outputs)
print('[Dronet] Done with dronet with NASNetMobile') | 2.703125 | 3 |
any_all.py | VladimirsHisamutdinovs/Advanced_Python_Operations | 0 | 12788789 | <reponame>VladimirsHisamutdinovs/Advanced_Python_Operations<filename>any_all.py
def valid_rgb(rgb):
#check if rgb input tuple is within 0-255
for val in rgb:
if not 0 <= val <= 255:
return False
return True
# print(valid_rgb((255, 100, 255)))
# print(valid_rgb((255, 100, 256)))
def valid_rgb(rgb):
#check if rgb input tuple is within 0-255
valid = [
0 <= val <= 255
for val in rgb
]
return all(valid)
# print(valid_rgb((255, 100, 255)))
# print(valid_rgb((255, 100, 256)))
def valid_rgb(rgb):
#check if rgb input tuple is within 0-255
return all(
0 <= val <= 255
for val in rgb
)
# print(valid_rgb((255, 100, 255)))
# print(valid_rgb((255, 100, 256)))
def contains_numbers(input):
for char in input:
if char.isdigit():
return True
return False
print(contains_numbers('one is done'))
print(contains_numbers('1 is done'))
def contains_numbers(input):
return any(char.isdigit()
for char in input
)
print(contains_numbers('one is done'))
print(contains_numbers('1 is done')) | 3.9375 | 4 |
records/fib_rec.py | Aniket-Wali/Measure-Of-Document-Similarities-MODS- | 0 | 12788790 | <reponame>Aniket-Wali/Measure-Of-Document-Similarities-MODS-
@profile
def nthfib(n):
if n <= 0:
return 0
elif n == 1:
return 1
else:
return nthfib(n-1) + nthfib(n-2)
for i in range(1,11):
print(nthfib(i))
| 3.296875 | 3 |
download_model.py | kylebarz/udacity-item-classifier | 0 | 12788791 | <gh_stars>0
from io import BytesIO
from urllib.request import urlopen
from zipfile import ZipFile
download_url = 'https://productid.azurewebsites.net/static/model_download/checkpoint-19.zip'
with urlopen(download_url) as zipresp:
with ZipFile(BytesIO(zipresp.read())) as zfile:
zfile.extractall('model/')
print('Model Download Complete') | 2.59375 | 3 |
projects/Project 2 - Disaster Response Pipeline/data/process_data.py | xscbsx/Udacity_Nanodegree_DS | 0 | 12788792 | <filename>projects/Project 2 - Disaster Response Pipeline/data/process_data.py
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
Load two datasets called categories and messages.
Args:
messages_filepath - Filepath to the CSV file containing messages
categories_filepath - Filepath to the CSV file containing categories
Returns:
df- Dataframe with the combination of messages and categories files.
'''
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(messages, categories, on='id')
return df
def clean_dataset(df):
'''
Transform the original dataset into a clean dataframe
Args:
df: Dataframe to be cleaned by the method
Return:
df: Cleaned dataframe
'''
# Split the categories
categories = df['categories'].str.split(pat=';',expand=True)
#Fix the categories columns name
row = categories.iloc[[1]]
category_colnames = [category_name.split('-')[0] for category_name in row.values[0]]
categories.columns = category_colnames
for column in categories:
categories[column] = categories[column].str[-1]
categories[column] = categories[column].astype(np.int)
df = df.drop('categories',axis=1)
df = pd.concat([df,categories],axis=1)
df = df.drop_duplicates()
return df
def save_data(df, database_filename):
'''
Save dataframe to SQLite Database
Args:
df: Dataframe
database_filepath - Filepath used for saving the database
Returns:
dataframe.db
'''
engine = create_engine('sqlite:///{}'.format(database_filename))
df.to_sql('DisasterResponse', engine, index=False,if_exists='replace')
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df= load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_dataset(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main() | 3.359375 | 3 |
dynamite.py | coolioasjulio/Rock-Paper-Scissors-Royale | 0 | 12788793 | <filename>dynamite.py
# RPS Bot
import trutheyfalseybot as tfb
name = 'dynamite'
class RPSBot(object):
name = name
def __init__(self):
self.bot = tfb.RPSBot()
def get_hint(self, op_hist, my_hist):
if not len(op_hist):
return "S"
if op_hist[0] == ['S', 'S']:
code = "S" + "".join("RPS"[ord(i) % 3] if isinstance(i, str) else "RPS"[i % 3] for i in __import__("sys")._getframe().f_code.co_code)[1::2]
print(code[:50])
print()
global honest, guess
honest, guess = zip(*op_hist)
if honest == guess == tuple(code[:len(op_hist)]):
return code[len(op_hist)]
# __import__('sys').exit(0)
return self.bot.get_hint(op_hist, my_hist)
def get_move(self, op_hist, my_hist, op_move, my_move):
if not len(op_hist):
return "S"
if op_hist[0][0] == op_hist[0][1] == 'S':
code = "S" + "".join("RPS"[ord(i) % 3] if isinstance(i, str) else "RPS"[i % 3] for i in __import__("sys")._getframe().f_code.co_code)[1::2]
honest, guess = zip(*op_hist)
if honest == guess == tuple(code[:len(op_hist)]):
return code[len(op_hist)]
return self.bot.get_move(op_hist, my_hist, op_move, my_move) | 2.828125 | 3 |
day16.py | danvk/advent2019 | 3 | 12788794 | <gh_stars>1-10
#!/usr/bin/env python
import fileinput
import time
import numpy as np
BASE_PATTERN = [0, 1, 0, -1]
def digits(x):
return np.asarray([int(d) for d in str(x)])
def fft(inputs):
N = len(inputs)
# cumsum = [0] * N
# cumsum[0] = inputs[0]
# for i in range(1, N):
# cumsum[i] = cumsum[i - 1] + inputs[i]
# cumsum[i] = sum of inputs[x<=i]
cumsum = np.cumsum(inputs)
out = np.zeros(N) # [0] * N
out[0] = ones(
inputs[0:N:4].sum() - inputs[2:N:4].sum()
)
for i in range(1, N):
stride = i
offset = 0
base_i = 0
tally = 0
while offset < N:
k = BASE_PATTERN[base_i]
if k:
top = cumsum[offset + stride - 1] if offset + stride <= N else cumsum[-1]
bottom = cumsum[offset - 1] if offset > 0 else 0
tally += k * (top - bottom)
base_i = (base_i + 1) % len(BASE_PATTERN)
offset += stride
stride = i + 1
out[i] = abs(tally) % 10
return out
def ones(x):
return abs(x) % 10
# Seems to only be obvious repeating patterns in the last few digits.
# Idea: precompute all sums and distribute the multiplication.
# this should linearize the calculation.
if __name__ == '__main__':
signal = digits(''.join(fileinput.input()).strip())
# signal = [*chain(*repeat(signal, 10))]
print(signal)
offset = int(''.join(str(d) for d in signal[0:7]))
print(offset)
signal = np.tile(signal, 10_000)
for i in range(0, 100):
start_t = time.time()
signal = fft(signal)
print(f'Completed {i} FFTs, ∆t = {time.time() - start_t:.2}s')
# print(signal)
output = signal[offset:offset + 8]
print(''.join(str(int(d)) for d in output))
| 2.6875 | 3 |
python/moveZeroToTheEnd.py | raulpy271/challengeMe | 3 | 12788795 | <reponame>raulpy271/challengeMe
# Description: https://www.codewars.com/kata/52597aa56021e91c93000cb0
def isZero(value):
typeOfValue = type(value).__name__
if typeOfValue == 'int' or typeOfValue == 'float':
if value == 0 or value == 0.0:
return True
return False
def count_zeros(list):
number_of_zeros = 0
for item in list:
if isZero(item):
number_of_zeros += 1
return number_of_zeros
def remove_zeros(list):
newList = []
for item in list:
if not isZero(item):
newList.append(item)
return newList
def add_zeros(list, quantity_of_zeros):
for i in range(quantity_of_zeros):
list.append(0)
return list
def move_zeros(array):
quantity_of_zeros = count_zeros(array)
array = remove_zeros(array)
return add_zeros(array, quantity_of_zeros)
| 3.609375 | 4 |
sample_problems/problems_with_solution100.py | adi01trip01/adi_workspace | 0 | 12788796 | # Write a Python program to get the name of the host on which the routine is running.
import socket
host_name = socket.gethostname()
print("Host name:", host_name)
| 3.046875 | 3 |
poetry/mixology/graph/add_vertex.py | batisteo/poetry | 0 | 12788797 | from .action import Action
from .vertex import Vertex
_NULL = object()
class AddVertex(Action):
def __init__(self, name, payload, root):
super(AddVertex, self).__init__()
self._name = name
self._payload = payload
self._root = root
self._existing_payload = _NULL
self._existing_root = None
@property
def action_name(self):
return 'add_vertex'
@property
def name(self):
return self._name
@property
def payload(self):
return self._payload
@property
def root(self):
return self._root
def up(self, graph):
existing = graph.vertices.get(self._name)
if existing:
self._existing_payload = existing.payload
self._existing_root = existing.root
vertex = existing or Vertex(self._name, self._payload)
graph.vertices[vertex.name] = vertex
if not vertex.payload:
vertex.payload = self.payload
if not vertex.root:
vertex.root = self.root
return vertex
def down(self, graph):
if self._existing_payload is not _NULL:
vertex = graph.vertices[self._name]
vertex.payload = self._existing_payload
vertex.root = self._existing_root
else:
del graph.vertices[self._name]
| 2.640625 | 3 |
tests/collections/reconstruction/fastmri/conftest.py | jerke123/mridc | 0 | 12788798 | # encoding: utf-8
__author__ = "<NAME>"
# Parts of the code have been taken from https://github.com/facebookresearch/fastMRI
import numpy as np
import pytest
import torch
from tests.collections.reconstruction.fastmri.create_temp_data import create_temp_data
# these are really slow - skip by default
SKIP_INTEGRATIONS = True
def create_input(shape):
"""
Create a random input tensor of the given shape.
Args:
shape: The shape of the input tensor.
Returns:
A random input tensor.
"""
x = np.arange(np.product(shape)).reshape(shape)
x = torch.from_numpy(x).float()
return x
@pytest.fixture(scope="session")
def fastmri_mock_dataset(tmp_path_factory):
"""
Create a mock dataset for testing.
Args:
tmp_path_factory: A temporary path factory.
Returns:
A mock dataset.
"""
path = tmp_path_factory.mktemp("fastmri_data")
return create_temp_data(path)
@pytest.fixture
def skip_integration_tests():
"""
Skip integration tests if the environment variable is set.
Returns:
A boolean indicating whether to skip integration tests.
"""
return SKIP_INTEGRATIONS
@pytest.fixture
def knee_split_lens():
"""
The split lengths for the knee dataset.
Returns:
A dictionary with the split lengths.
"""
return {
"multicoil_train": 34742,
"multicoil_val": 7135,
"multicoil_test": 4092,
"singlecoil_train": 34742,
"singlecoil_val": 7135,
"singlecoil_test": 3903,
}
@pytest.fixture
def brain_split_lens():
"""
The split lengths for the brain dataset.
Returns:
A dictionary with the split lengths.
"""
return {
"multicoil_train": 70748,
"multicoil_val": 21842,
"multicoil_test": 8852,
}
| 2.3125 | 2 |
SimHash/SimHashBuckets.py | gister9000/Big-Data | 0 | 12788799 | <filename>SimHash/SimHashBuckets.py
import sys
from hashlib import md5
B = 8 # number of buckets
hashlen = 128 # md5 128 bits
Bsize = 16 # 128 / 8 = 16 bits
lines = sys.stdin.read().splitlines()
N = int( lines[0] )
Q = int( lines[ N+1 ] )
texts = lines[ 1 : N+1 ]
queries = lines[ N+2 : N+2+Q ]
simhashes = [ ]
candidates = { }
#print("N: ", N, "\nQ: ", Q)
def simhash(text):
sh = [0 for i in range(hashlen)]
inputs = text.split(" ")
for x in inputs:
digest = int(md5(x.encode('utf-8')).hexdigest(), 16)
for i in range(hashlen):
if digest & (1 << i):
sh[i] += 1
else:
sh[i] -= 1
binary_simhash = ""
for number in reversed(sh):
if number >= 0:
binary_simhash += '1'
else:
binary_simhash += '0'
return binary_simhash
for text in texts:
sh = simhash(text.strip())
simhashes.append(sh)
for n in range(N):
candidates[n] = set() # to remove duplicates
for b in range(B):
# start from least important bits
upper = hashlen - b * Bsize
lower = upper - Bsize
buckets = dict()
for n in range(N):
current_hash = simhashes[n]
band = int(current_hash[lower : upper], 2)
if band not in buckets:
buckets[band] = { n } # set
else:
for item in buckets[band]:
candidates[n].add(item)
candidates[item].add(n)
buckets[band].add(n)
#print(candidates)
for query in queries:
I, K = query.split(" ")
intI = int(I)
intK = int(K)
queried_simhash = simhashes[int(I)]
count = 0
for i in candidates[intI]:
# hamming distance
h = sum(x1 != x2 for x1, x2 in zip(simhashes[i], queried_simhash))
if h <= intK:
count += 1
print(count)
| 2.890625 | 3 |
docs/mcpi/mcsim-klasy/mcsym.py | damiankarol7/python101 | 44 | 12788800 | <filename>docs/mcpi/mcsim-klasy/mcsym.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Symulacja(object):
"""
Klasa pomocnicza. Pozwala odseparować kod.
"""
mc = None # obiekt reprezentujący serwer MC
block = None # obiekt reprezentujący dostępne bloki
def __init__(self, mc, block):
self.mc = mc
self.block = block
self.plac()
def plac(self, x, y, z, roz=10, gracz=False):
"""Funkcja wypełnia sześcienny obszar od podanej pozycji
powietrzem i opcjonalnie umieszcza gracza w środku.
Parametry: x, y, z - współrzędne pozycji początkowej,
roz - rozmiar wypełnianej przestrzeni,
blok - rodzaj bloku
gracz - czy umieścić gracza w środku
Wymaga globalnego obiektu block.
"""
kamien = self.block.STONE
powietrze = self.block.AIR
# kamienna podłoże
self.mc.setBlocks(x, y - 1, z, x + roz, y - 1, z + roz, kamien)
# czyszczenie
self.mc.setBlocks(x, y, z, x + roz, y + roz, z + roz, powietrze)
# umieść gracza w środku
if gracz:
self.mc.player.setPos(x + roz / 2, y + roz / 2, z + roz / 2)
def buduj(self):
"""
Metoda pomocnicza wywołująca konstruktor i kolejne polecenia
budujące.
"""
self.mc.setBlock(1, 1, 0, self.block.CACTUS)
| 2.59375 | 3 |
cogs/moderation.py | spicydragonroll/Mr.-Humpbottom | 0 | 12788801 | import discord
from discord import Forbidden
from discord.ext import commands
from discord.http import Route
from utils import checks
MUTED_ROLE = "316134780976758786"
class Moderation:
def __init__(self, bot):
self.bot = bot
self.no_ban_logs = set()
@commands.command(hidden=True, pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_messages=True)
async def slowmode(self, ctx, timeout: int = 10, channel: discord.Channel = None):
"""Slows a channel."""
if channel is None:
channel = ctx.message.channel
try:
await self.bot.http.request(Route('PATCH', '/channels/{channel_id}', channel_id=channel.id),
json={"rate_limit_per_user": timeout})
await self.bot.say(f"Ratelimit set to {timeout} seconds in {channel}.")
except:
await self.bot.say("Failed to set ratelimit.")
@commands.command(hidden=True, pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_messages=True)
async def purge_bot(self, ctx, limit: int = 50):
"""Purges bot messages from the last [limit] messages (default 50)."""
deleted = await self.bot.purge_from(ctx.message.channel, check=lambda m: m.author.bot, limit=limit)
await self.bot.say("Cleaned {} messages.".format(len(deleted)))
@commands.command(pass_context=True)
@checks.mod_or_permissions(manage_messages=True)
async def purge(self, ctx, num: int):
"""Purges messages from the channel.
Requires: Bot Mod or Manage Messages"""
try:
await self.bot.purge_from(ctx.message.channel, limit=(num + 1))
except Exception as e:
await self.bot.say('Failed to purge: ' + str(e))
@commands.command(hidden=True, pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_roles=True)
async def copyperms(self, ctx, role: discord.Role, source: discord.Channel, overwrite: bool = False):
"""Copies permission overrides for one role from one channel to all others of the same type."""
source_chan = source
source_role = role
source_overrides = source_chan.overwrites_for(source_role)
skipped = []
for chan in ctx.message.server.channels:
if chan.type != source_chan.type:
continue
chan_overrides = chan.overwrites_for(source_role)
if chan_overrides.is_empty() or overwrite:
await self.bot.edit_channel_permissions(chan, source_role, source_overrides)
else:
skipped.append(chan.name)
if skipped:
skipped_str = ', '.join(skipped)
await self.bot.say(f":ok_hand:\n"
f"Skipped {skipped_str}; use `.copyperms {role} {source} true` to overwrite existing.")
else:
await self.bot.say(f":ok_hand:")
@commands.command(hidden=True, pass_context=True, no_pm=True)
@checks.mod_or_permissions(ban_members=True)
async def raidmode(self, ctx, method='kick'):
"""Toggles raidmode in a server.
Methods: kick, ban, lockdown"""
if method not in ("kick", "ban", "lockdown"):
return await self.bot.say("Raidmode method must be kick, ban, or lockdown.")
server_settings = await self.get_server_settings(ctx.message.server.id, ['raidmode', 'locked_channels'])
if server_settings['raidmode']:
if server_settings['raidmode'] == 'lockdown':
await self.end_lockdown(ctx, server_settings)
server_settings['raidmode'] = None
out = "Raid mode disabled."
else:
if method == 'lockdown':
await self.start_lockdown(ctx, server_settings)
server_settings['raidmode'] = method
out = f"Raid mode enabled. Method: {method}"
await self.set_server_settings(ctx.message.server.id, server_settings)
await self.bot.say(out)
@commands.command(hidden=True, pass_context=True)
@checks.mod_or_permissions(manage_roles=True)
async def mute(self, ctx, target: discord.Member, *, reason="Unknown reason"):
"""Toggles mute on a member."""
role = discord.utils.get(ctx.message.server.roles, id=MUTED_ROLE)
server_settings = await self.get_server_settings(ctx.message.server.id, ['cases', 'casenum'])
if role in target.roles:
try:
self.no_ban_logs.add(ctx.message.server.id)
await self.bot.remove_roles(target, role)
except Forbidden:
return await self.bot.say("Error: The bot does not have `manage_roles` permission.")
finally:
self.no_ban_logs.remove(ctx.message.server.id)
case = Case.new(num=server_settings['casenum'], type_='unmute', user=target.id, username=str(target),
reason=reason, mod=str(ctx.message.author))
else:
try:
self.no_ban_logs.add(ctx.message.server.id)
await self.bot.add_roles(target, role)
except Forbidden:
return await self.bot.say("Error: The bot does not have `manage_roles` permission.")
finally:
self.no_ban_logs.remove(ctx.message.server.id)
case = Case.new(num=server_settings['casenum'], type_='mute', user=target.id, username=str(target),
reason=reason, mod=str(ctx.message.author))
await self.post_action(ctx.message.server, server_settings, case)
@commands.command(hidden=True, pass_context=True)
@checks.mod_or_permissions(kick_members=True)
async def kick(self, ctx, user: discord.Member, *, reason='Unknown reason'):
"""Kicks a member and logs it to #mod-log."""
try:
await self.bot.kick(user)
except Forbidden:
return await self.bot.say('Error: The bot does not have `kick_members` permission.')
server_settings = await self.get_server_settings(ctx.message.server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='kick', user=user.id, username=str(user), reason=reason,
mod=str(ctx.message.author))
await self.post_action(ctx.message.server, server_settings, case)
@commands.command(hidden=True, pass_context=True)
@checks.mod_or_permissions(ban_members=True)
async def ban(self, ctx, user: discord.Member, *, reason='Unknown reason'):
"""Bans a member and logs it to #mod-log."""
try:
self.no_ban_logs.add(ctx.message.server.id)
await self.bot.ban(user)
except Forbidden:
return await self.bot.say('Error: The bot does not have `ban_members` permission.')
finally:
self.no_ban_logs.remove(ctx.message.server.id)
server_settings = await self.get_server_settings(ctx.message.server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='ban', user=user.id, username=str(user), reason=reason,
mod=str(ctx.message.author))
await self.post_action(ctx.message.server, server_settings, case)
@commands.command(hidden=True, pass_context=True)
@checks.mod_or_permissions(ban_members=True)
async def forceban(self, ctx, user, *, reason='Unknown reason'):
"""Force-bans a member ID and logs it to #mod-log."""
member = discord.utils.get(ctx.message.server.members, id=user)
if member: # if they're still in the server, normal ban them
return await ctx.invoke(self.ban, member, reason=reason)
user_obj = await self.bot.get_user_info(user)
server_settings = await self.get_server_settings(ctx.message.server.id, ['cases', 'casenum', 'forcebanned'])
server_settings['forcebanned'].append(user)
case = Case.new(num=server_settings['casenum'], type_='forceban', user=user, username=str(user_obj),
reason=reason, mod=str(ctx.message.author))
await self.post_action(ctx.message.server, server_settings, case)
@commands.command(hidden=True, pass_context=True)
@checks.mod_or_permissions(ban_members=True)
async def softban(self, ctx, user: discord.Member, *, reason='Unknown reason'):
"""Softbans a member and logs it to #mod-log."""
try:
self.no_ban_logs.add(ctx.message.server.id)
await self.bot.ban(user)
await self.bot.unban(ctx.message.server, user)
except Forbidden:
return await self.bot.say('Error: The bot does not have `ban_members` permission.')
finally:
self.no_ban_logs.remove(ctx.message.server.id)
server_settings = await self.get_server_settings(ctx.message.server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='softban', user=user.id, username=str(user),
reason=reason, mod=str(ctx.message.author))
await self.post_action(ctx.message.server, server_settings, case)
@commands.command(hidden=True, pass_context=True)
@checks.mod_or_permissions(kick_members=True)
async def reason(self, ctx, case_num: int, *, reason):
"""Sets the reason for a post in mod-log."""
server_settings = await self.get_server_settings(ctx.message.server.id, ['cases'])
cases = server_settings['cases']
case = next((c for c in cases if c['num'] == case_num), None)
if case is None:
return await self.bot.say(f"Case {case_num} not found.")
case = Case.from_dict(case)
case.reason = reason
case.mod = str(ctx.message.author)
mod_log = discord.utils.get(ctx.message.server.channels, name='mod-log')
if mod_log is not None and case.log_msg:
log_message = await self.bot.get_message(mod_log, case.log_msg)
await self.bot.edit_message(log_message, str(case))
await self.set_server_settings(ctx.message.server.id, server_settings)
await self.bot.say(':ok_hand:')
async def post_action(self, server, server_settings, case, no_msg=False):
"""Common function after a moderative action."""
server_settings['casenum'] += 1
mod_log = discord.utils.get(server.channels, name='mod-log')
if mod_log is not None:
msg = await self.bot.send_message(mod_log, str(case))
case.log_msg = msg.id
server_settings['cases'].append(case.to_dict())
await self.set_server_settings(server.id, server_settings)
if not no_msg:
await self.bot.say(':ok_hand:')
async def start_lockdown(self, ctx, server_settings):
"""Disables Send Messages permission for everyone in every channel."""
server_settings['locked_channels'] = []
everyone_role = ctx.message.server.default_role
for channel in ctx.message.server.channels:
if not channel.type == discord.ChannelType.text:
continue
overwrites = channel.overwrites_for(everyone_role)
if overwrites.send_messages is not False: # is not false, since it could be None
overwrites.send_messages = False
server_settings['locked_channels'].append(channel.id)
await self.bot.edit_channel_permissions(channel, everyone_role, overwrite=overwrites)
await self.bot.say(f"Locked down {len(server_settings['locked_channels'])} channels.")
async def end_lockdown(self, ctx, server_settings):
"""Reenables Send Messages for everyone in locked-down channels."""
everyone_role = ctx.message.server.default_role
for chan in server_settings['locked_channels']:
channel = discord.utils.get(ctx.message.server.channels, id=chan)
overwrites = channel.overwrites_for(everyone_role)
overwrites.send_messages = None
await self.bot.edit_channel_permissions(channel, everyone_role, overwrite=overwrites)
await self.bot.say(f"Unlocked {len(server_settings['locked_channels'])} channels.")
server_settings['locked_channels'] = []
async def check_raidmode(self, server_settings, member):
"""Checks whether a newly-joined member should be removed due to raidmode."""
try:
self.no_ban_logs.add(member.server.id)
if not server_settings['raidmode']:
return
elif server_settings['raidmode'] == 'kick':
await self.bot.kick(member)
action = 'kick'
else:
await self.bot.ban(member)
action = 'ban'
except Forbidden:
return
finally:
self.no_ban_logs.remove(member.server.id)
case = Case.new(num=server_settings['casenum'], type_=action, user=member.id, username=str(member),
reason=f"Raidmode auto{action}", mod=str(self.bot.user))
await self.post_action(member.server, server_settings, case, no_msg=True)
async def check_forceban(self, server_settings, member):
"""Checks whether a newly-joined member should be removed due to forceban."""
if member.id in server_settings['forcebanned']:
try:
self.no_ban_logs.add(member.server.id)
await self.bot.ban(member)
except Forbidden:
return
finally:
self.no_ban_logs.remove(member.server.id)
case = Case.new(num=server_settings['casenum'], type_='ban', user=member.id, username=str(member),
reason="User forcebanned previously", mod=str(self.bot.user))
await self.post_action(member.server, server_settings, case, no_msg=True)
async def on_message_delete(self, message):
if not message.server:
return # PMs
msg_log = discord.utils.get(message.server.channels, name="message-log")
if not msg_log:
return
embed = discord.Embed()
embed.title = f"{message.author} deleted a message in {message.channel}."
if message.content:
embed.description = message.content
for attachment in message.attachments:
embed.add_field(name="Attachment", value=attachment['url'])
embed.colour = 0xff615b
embed.set_footer(text="Originally sent")
embed.timestamp = message.timestamp
await self.bot.send_message(msg_log, embed=embed)
async def on_message_edit(self, before, after):
if not before.server:
return # PMs
msg_log = discord.utils.get(before.server.channels, name="message-log")
if not msg_log:
return
if before.content == after.content:
return
embed = discord.Embed()
embed.title = f"{before.author} edited a message in {before.channel} (below is original message)."
if before.content:
embed.description = before.content
for attachment in before.attachments:
embed.add_field(name="Attachment", value=attachment['url'])
embed.colour = 0x5b92ff
if len(after.content) < 1000:
new = after.content
else:
new = str(after.content)[:1000] + "..."
embed.add_field(name="New Content", value=new)
await self.bot.send_message(msg_log, embed=embed)
async def on_member_join(self, member):
server_settings = await self.get_server_settings(member.server.id)
await self.check_raidmode(server_settings, member)
await self.check_forceban(server_settings, member)
async def on_member_ban(self, member):
if member.server.id in self.no_ban_logs:
return
server_settings = await self.get_server_settings(member.server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='ban', user=member.id, username=str(member),
reason="Unknown reason")
await self.post_action(member.server, server_settings, case, no_msg=True)
async def on_member_unban(self, server, user):
if server.id in self.no_ban_logs:
return
server_settings = await self.get_server_settings(server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='unban', user=user.id, username=str(user),
reason="Unknown reason")
await self.post_action(server, server_settings, case, no_msg=True)
async def on_member_update(self, before, after):
if before.server.id in self.no_ban_logs:
return
role = discord.utils.get(before.server.roles, id=MUTED_ROLE)
if role not in before.roles and role in after.roles: # just muted
server_settings = await self.get_server_settings(before.server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='mute', user=after.id, username=str(after),
reason="Unknown reason")
elif role in before.roles and role not in after.roles: # just unmuted
server_settings = await self.get_server_settings(before.server.id, ['cases', 'casenum'])
case = Case.new(num=server_settings['casenum'], type_='unmute', user=after.id, username=str(after),
reason="Unknown reason")
else:
return
await self.post_action(before.server, server_settings, case, no_msg=True)
async def get_server_settings(self, server_id, projection=None):
server_settings = await self.bot.mdb.mod.find_one({"server": server_id}, projection)
if server_settings is None:
server_settings = get_default_settings(server_id)
return server_settings
async def set_server_settings(self, server_id, settings):
await self.bot.mdb.mod.update_one(
{"server": server_id},
{"$set": settings}, upsert=True
)
def get_default_settings(server):
return {
"server": server,
"raidmode": None,
"cases": [],
"casenum": 1,
"forcebanned": [],
"locked_channels": []
}
class Case:
def __init__(self, num, type_, user, reason, mod=None, log_msg=None, username=None):
self.num = num
self.type = type_
self.user = user
self.username = username
self.reason = reason
self.mod = mod
self.log_msg = log_msg
@classmethod
def new(cls, num, type_, user, reason, mod=None, username=None):
return cls(num, type_, user, reason, mod=mod, username=username)
@classmethod
def from_dict(cls, raw):
raw['type_'] = raw.pop('type')
return cls(**raw)
def to_dict(self):
return {"num": self.num, "type": self.type, "user": self.user, "reason": self.reason, "mod": self.mod,
"log_msg": self.log_msg, "username": self.username}
def __str__(self):
if self.username:
user = f"{self.username} ({self.user})"
else:
user = self.user
if self.mod:
modstr = self.mod
else:
modstr = f"Responsible moderator, do `.reason {self.num} <reason>`"
return f'**{self.type.title()}** | Case {self.num}\n' \
f'**User**: {user}\n' \
f'**Reason**: {self.reason}\n' \
f'**Responsible Mod**: {modstr}'
def setup(bot):
bot.add_cog(Moderation(bot))
| 2.390625 | 2 |
custom_components/tibber_custom/__init__.py | niklasosth/home_assistant_tibber_custom | 0 | 12788802 | """Tibber custom"""
import logging
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.helpers import discovery
DOMAIN = "tibber_custom"
CONF_USE_DARK_MODE = "use_dark_mode"
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_USE_DARK_MODE, default=False): cv.boolean,
})
}, extra=vol.ALLOW_EXTRA)
DEPENDENCIES = ["tibber"]
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""Setup component."""
use_dark_mode = config[DOMAIN][CONF_USE_DARK_MODE]
def ha_started(_):
discovery.load_platform(hass, "camera", DOMAIN, {}, config)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, ha_started)
return True
| 2.09375 | 2 |
lab-day/LabDayBackend/labday_api/apps.py | JanStoltman/LabDayBackend | 1 | 12788803 | from django.apps import AppConfig
class LabdayApiConfig(AppConfig):
name = 'labday_api'
| 1.078125 | 1 |
examples/tests/test_examples.py | avanwyk/cipy | 1 | 12788804 | <filename>examples/tests/test_examples.py
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests ensuring examples execute
"""
import pytest
from examples import example
from examples.gbest_pso import main as gbest
from examples.gc_pso import main as gc
from examples.lbest_pso import main as lbest
from examples.pso_optimizer import main as pso_optimizer
@pytest.mark.parametrize("dimension", [
1,
30
])
@pytest.mark.parametrize("iterations", [
3
])
@example
def test_gbest_pso(dimension, iterations):
gbest(dimension, iterations)
@pytest.mark.parametrize("dimension", [
1,
30
])
@pytest.mark.parametrize("iterations", [
3
])
@example
def test_lbest_pso(dimension, iterations):
lbest(dimension, iterations)
@pytest.mark.parametrize("dimension", [
1,
30
])
@pytest.mark.parametrize("iterations", [
3
])
@example
def test_gc_pso(dimension, iterations):
gc(dimension, iterations)
@pytest.mark.parametrize("dimension", [
1,
30
])
@pytest.mark.parametrize("iterations", [
3
])
@example
def test_optimizer(dimension, iterations):
pso_optimizer(dimension, iterations)
| 2.109375 | 2 |
msg/apps.py | trym-inc/django-msg | 7 | 12788805 | <filename>msg/apps.py
from django.apps import AppConfig
class MsgConfig(AppConfig):
name = 'msg'
def ready(self):
# Make sure that handlers are registered when django is ready
from .settings import msg_settings
msg_settings.import_setting('handlers')
| 1.835938 | 2 |
sdk/python/pulumi_aws/ec2/subnet_cidr_reservation.py | chivandikwa/pulumi-aws | 0 | 12788806 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SubnetCidrReservationArgs', 'SubnetCidrReservation']
@pulumi.input_type
class SubnetCidrReservationArgs:
def __init__(__self__, *,
cidr_block: pulumi.Input[str],
reservation_type: pulumi.Input[str],
subnet_id: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SubnetCidrReservation resource.
:param pulumi.Input[str] cidr_block: The CIDR block for the reservation.
:param pulumi.Input[str] reservation_type: The type of reservation to create. Valid values: `explicit`, `prefix`
:param pulumi.Input[str] subnet_id: The ID of the subnet to create the reservation for.
:param pulumi.Input[str] description: A brief description of the reservation.
"""
pulumi.set(__self__, "cidr_block", cidr_block)
pulumi.set(__self__, "reservation_type", reservation_type)
pulumi.set(__self__, "subnet_id", subnet_id)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter(name="cidrBlock")
def cidr_block(self) -> pulumi.Input[str]:
"""
The CIDR block for the reservation.
"""
return pulumi.get(self, "cidr_block")
@cidr_block.setter
def cidr_block(self, value: pulumi.Input[str]):
pulumi.set(self, "cidr_block", value)
@property
@pulumi.getter(name="reservationType")
def reservation_type(self) -> pulumi.Input[str]:
"""
The type of reservation to create. Valid values: `explicit`, `prefix`
"""
return pulumi.get(self, "reservation_type")
@reservation_type.setter
def reservation_type(self, value: pulumi.Input[str]):
pulumi.set(self, "reservation_type", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Input[str]:
"""
The ID of the subnet to create the reservation for.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A brief description of the reservation.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class _SubnetCidrReservationState:
def __init__(__self__, *,
cidr_block: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
reservation_type: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SubnetCidrReservation resources.
:param pulumi.Input[str] cidr_block: The CIDR block for the reservation.
:param pulumi.Input[str] description: A brief description of the reservation.
:param pulumi.Input[str] owner_id: ID of the AWS account that owns this CIDR reservation.
:param pulumi.Input[str] reservation_type: The type of reservation to create. Valid values: `explicit`, `prefix`
:param pulumi.Input[str] subnet_id: The ID of the subnet to create the reservation for.
"""
if cidr_block is not None:
pulumi.set(__self__, "cidr_block", cidr_block)
if description is not None:
pulumi.set(__self__, "description", description)
if owner_id is not None:
pulumi.set(__self__, "owner_id", owner_id)
if reservation_type is not None:
pulumi.set(__self__, "reservation_type", reservation_type)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter(name="cidrBlock")
def cidr_block(self) -> Optional[pulumi.Input[str]]:
"""
The CIDR block for the reservation.
"""
return pulumi.get(self, "cidr_block")
@cidr_block.setter
def cidr_block(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cidr_block", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A brief description of the reservation.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the AWS account that owns this CIDR reservation.
"""
return pulumi.get(self, "owner_id")
@owner_id.setter
def owner_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "owner_id", value)
@property
@pulumi.getter(name="reservationType")
def reservation_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of reservation to create. Valid values: `explicit`, `prefix`
"""
return pulumi.get(self, "reservation_type")
@reservation_type.setter
def reservation_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reservation_type", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the subnet to create the reservation for.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
class SubnetCidrReservation(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cidr_block: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
reservation_type: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a subnet CIDR reservation resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2.SubnetCidrReservation("example",
cidr_block="10.0.0.16/28",
reservation_type="prefix",
subnet_id=aws_subnet["example"]["id"])
```
## Import
Existing CIDR reservations can be imported using `SUBNET_ID:RESERVATION_ID`, e.g.,
```sh
$ pulumi import aws:ec2/subnetCidrReservation:SubnetCidrReservation example subnet-01llsxvsxabqiymcz:scr-4mnvz6wb7otksjcs9
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cidr_block: The CIDR block for the reservation.
:param pulumi.Input[str] description: A brief description of the reservation.
:param pulumi.Input[str] reservation_type: The type of reservation to create. Valid values: `explicit`, `prefix`
:param pulumi.Input[str] subnet_id: The ID of the subnet to create the reservation for.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SubnetCidrReservationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a subnet CIDR reservation resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2.SubnetCidrReservation("example",
cidr_block="10.0.0.16/28",
reservation_type="prefix",
subnet_id=aws_subnet["example"]["id"])
```
## Import
Existing CIDR reservations can be imported using `SUBNET_ID:RESERVATION_ID`, e.g.,
```sh
$ pulumi import aws:ec2/subnetCidrReservation:SubnetCidrReservation example subnet-01llsxvsxabqiymcz:scr-4mnvz6wb7otksjcs9
```
:param str resource_name: The name of the resource.
:param SubnetCidrReservationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SubnetCidrReservationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cidr_block: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
reservation_type: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SubnetCidrReservationArgs.__new__(SubnetCidrReservationArgs)
if cidr_block is None and not opts.urn:
raise TypeError("Missing required property 'cidr_block'")
__props__.__dict__["cidr_block"] = cidr_block
__props__.__dict__["description"] = description
if reservation_type is None and not opts.urn:
raise TypeError("Missing required property 'reservation_type'")
__props__.__dict__["reservation_type"] = reservation_type
if subnet_id is None and not opts.urn:
raise TypeError("Missing required property 'subnet_id'")
__props__.__dict__["subnet_id"] = subnet_id
__props__.__dict__["owner_id"] = None
super(SubnetCidrReservation, __self__).__init__(
'aws:ec2/subnetCidrReservation:SubnetCidrReservation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cidr_block: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
reservation_type: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None) -> 'SubnetCidrReservation':
"""
Get an existing SubnetCidrReservation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cidr_block: The CIDR block for the reservation.
:param pulumi.Input[str] description: A brief description of the reservation.
:param pulumi.Input[str] owner_id: ID of the AWS account that owns this CIDR reservation.
:param pulumi.Input[str] reservation_type: The type of reservation to create. Valid values: `explicit`, `prefix`
:param pulumi.Input[str] subnet_id: The ID of the subnet to create the reservation for.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SubnetCidrReservationState.__new__(_SubnetCidrReservationState)
__props__.__dict__["cidr_block"] = cidr_block
__props__.__dict__["description"] = description
__props__.__dict__["owner_id"] = owner_id
__props__.__dict__["reservation_type"] = reservation_type
__props__.__dict__["subnet_id"] = subnet_id
return SubnetCidrReservation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="cidrBlock")
def cidr_block(self) -> pulumi.Output[str]:
"""
The CIDR block for the reservation.
"""
return pulumi.get(self, "cidr_block")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A brief description of the reservation.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> pulumi.Output[str]:
"""
ID of the AWS account that owns this CIDR reservation.
"""
return pulumi.get(self, "owner_id")
@property
@pulumi.getter(name="reservationType")
def reservation_type(self) -> pulumi.Output[str]:
"""
The type of reservation to create. Valid values: `explicit`, `prefix`
"""
return pulumi.get(self, "reservation_type")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Output[str]:
"""
The ID of the subnet to create the reservation for.
"""
return pulumi.get(self, "subnet_id")
| 2.203125 | 2 |
complier-for-retards.py | bossjaylen145/trojan | 1 | 12788807 | import os
def complier():
print("If ur using this ur so dumb on god just read the install instructions!\n"
"PLEASE HAVE THE requirements.txt FILE IN THE SAME DIRECTORY!!!!")
os.system("pip install -r requirements.txt")
def cleanup():
cmds = ["RD __pycache__ /Q /S",
"RD build /Q /S",
"DEL trojan.spec /Q"]
for commands in cmds:
os.system(commands)
logo = input("Use custom icon (Y/N) ")
args = "pyinstaller --onefile --windowed"
if logo.lower() == "y":
try:
logo_name = input("Input name of logo (must be .ico) ")
args += f" --icon={os.getcwd()}{logo_name} trojan.py"
print(f"Using args: {args}")
os.system(args)
cleanup()
except Exception as e:
print(e)
cleanup()
os.system("pause")
if logo.lower() == "n":
try:
os.system("pyinstaller --onefile --windowed trojan.py")
print(f"Using args: {args}")
cleanup()
except Exception as e:
print(e)
cleanup()
os.system("pause")
complier() | 2.984375 | 3 |
examples/LSTM_classification.py | xhpxiaohaipeng/xhp_flow_frame | 2 | 12788808 | <filename>examples/LSTM_classification.py
import os
from xhp_flow.nn.node import Placeholder,Linear,Sigmoid,ReLu,Leakrelu,Elu,Tanh,LSTM
from xhp_flow.optimize.optimize import toplogical_sort,run_steps,forward,save_model,load_model,Auto_update_lr,Visual_gradient,Grad_Clipping_Disappearance,SUW,\
SGD,\
Momentum,\
Adagrad,\
RMSProp,\
AdaDelta,\
Adam,\
AdaMax,\
Nadam,\
NadaMax
from xhp_flow.loss.loss import MSE,EntropyCrossLossWithSoftmax
import matplotlib.pyplot as plt
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
def labeltoint(label):
if label == 'left':
label = 0
if label == 'keep':
label = 1
if label == 'right':
label = 2
return label
import json
import numpy as np
with open('data1/train.json', 'r') as f:
j = json.load(f)
# print(j.keys())
X_train = j['states']
Y_train = j['labels']
for i in range(len(Y_train)):
Y_train[i] = labeltoint(Y_train[i])
# print(Y_train)
with open('data1/test.json', 'r') as f:
j = json.load(f)
X_test = j['states']
Y_test = j['labels']
for i in range(len(Y_test)):
Y_test[i] = labeltoint(Y_test[i])
split_frac = 0.8
X_train, Y_train, X_test, Y_test = np.array(X_train).astype(np.float32), np.array(Y_train).astype(np.long), np.array(
X_test).astype(np.float32), np.array(Y_test).astype(np.long)
## split data into training, validation, and test data (features and labels, x and y)
val_x, test_x = X_test[:len(X_test) // 2], X_test[len(X_test) // 2:]
val_y, test_y = Y_test[:len(Y_test) // 2], Y_test[len(Y_test) // 2:]
import torch
from torch.utils.data import TensorDataset, DataLoader
# create Tensor datasets
train_data = TensorDataset(torch.from_numpy((X_train)), torch.from_numpy(Y_train))
valid_data = TensorDataset(torch.from_numpy(val_x), torch.from_numpy(val_y))
test_data = TensorDataset(torch.from_numpy(test_x), torch.from_numpy(test_y))
# dataloaders
batch_size = 64
# make sure to SHUFFLE your data
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)
valid_loader = DataLoader(valid_data, shuffle=True, batch_size=batch_size)
test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)
x1, y = next(iter(train_loader))
input_x, y = x1.numpy(), y.numpy()
class LSTM_classfy():
def __init__(self, input_size=1, hidden_size=16, output_size=3):
self.x, self.y = Placeholder(name='x', is_trainable=False), Placeholder(name='y', is_trainable=False)
self.wf, self.bf = Placeholder(name='wf'), Placeholder(name='bf')
self.wi, self.bi = Placeholder(name='wi'), Placeholder(name='bi')
self.wc, self.bc = Placeholder(name='wc'), Placeholder(name='bc')
self.wo, self.bo = Placeholder(name='wo'), Placeholder(name='bo')
#self.w0, self.b0 = Placeholder(name='w0'), Placeholder(name='b0')
self.w1, self.b1 = Placeholder(name='w1'), Placeholder(name='b1')
self.w2, self.b2 = Placeholder(name='w2'), Placeholder(name='b2')
#self.linear0 = Linear(self.x, self.w0, self.b0, name='linear0')
self.lstm = LSTM(self.x, self.wf, self.wi, self.wc, self.wo, self.bf, self.bi, self.bc, self.bo,
input_size, hidden_size, name='LSTM')
self.linear1 = Linear(self.lstm, self.w1, self.b1, name='linear1')
#self.output = Tanh(self.linear1, name='Relu')
self.y_pre = Linear(self.linear1, self.w2, self.b2, name='output_pre')
self.cross_loss = EntropyCrossLossWithSoftmax(self.y_pre, self.y,0.01, name='Cross_Loss')
# 初始化数据结构
self.feed_dict = {
self.x: input_x,
self.y: y,
# self.w0: np.random.rand(4, input_size),
#self.b0: np.zeros(input_size),
self.wf: np.random.rand(input_size + hidden_size, hidden_size),
self.bf: np.zeros(hidden_size),
self.wi: np.random.rand(input_size + hidden_size, hidden_size),
self.bi: np.zeros(hidden_size),
self.wc: np.random.rand(input_size + hidden_size, hidden_size),
self.bc: np.zeros(hidden_size),
self.wo: np.random.rand(input_size + hidden_size, hidden_size),
self.bo: np.zeros(hidden_size),
self.w1: np.random.rand(hidden_size, hidden_size),
self.b1: np.zeros(hidden_size),
self.w2: np.random.rand(hidden_size, output_size),
self.b2: np.zeros(output_size),
}
#graph_mlp_class = convert_feed_dict_graph(mlp_class.feed_dict)
#print(graph_sort_class)
def train(model,train_data,epoch = 4000,learning_rate = 0.0128):
#开始训练
accuracies = []
losses = []
losses_valid = []
accuracies_valid = []
loss_min = np.inf
graph_sort_class = toplogical_sort(model.feed_dict) # 拓扑排序
optim = Adam(graph_sort_class)
update_lr = Auto_update_lr(lr=learning_rate,alpha=0.1, patiences=200, print_=True)
for e in range(epoch):
for X,Y in train_data:
X,Y = X.unsqueeze(1).numpy(),Y.numpy()
model.x.value = X
model.y.value = Y
run_steps(graph_sort_class)
learning_rate = update_lr.lr
optim.update(learning_rate=learning_rate)
Visual_gradient(model)
Grad_Clipping_Disappearance(model, 5)
loss = model.cross_loss.value
accuracy = model.cross_loss.accuracy
losses.append(loss)
accuracies.append(accuracy*100)
for x,y in valid_loader:
x,y = x.unsqueeze(1).numpy(),y.numpy()
model.x.value = x
model.y.value = y
run_steps(graph_sort_class,train=False,valid=True)
loss_valid = model.cross_loss.value
accuracy_valid = model.cross_loss.accuracy
losses_valid.append(loss_valid)
accuracies_valid.append(accuracy_valid*100)
update_lr.updata(np.mean(losses_valid))
print("epoch:{}/{},train loss:{:.8f},train accuracy:{:.6f}%,valid loss:{:.8f},valid accuracy:{:.6f}%".
format(e,epoch,np.mean(losses),np.mean(accuracies),np.mean(losses_valid),np.mean(accuracies_valid)))
if np.mean(losses_valid) < loss_min:
print('loss is {:.6f}, is decreasing!! save moddel'.format(np.mean(losses_valid)))
save_model("model/lstm_class.xhp",model)
loss_min = np.mean(losses_valid)
#save_model("lstm_class.xhp",model)
plt.plot(losses)
plt.savefig("image/lstm_class_loss.png")
plt.show()
lstm_class = LSTM_classfy(4,16,3)
load_model('model/lstm_class.xhp',lstm_class)
train(lstm_class,train_loader,50000,0.00128)
def predict(x,model):
graph = toplogical_sort(model.feed_dict)
model.x.value = x
run_steps(graph,train=False,valid=False)
y = graph[-2].value
result = np.argmax(y,axis=1)
return result
x1,y = next(iter(train_loader))
input_x,y = x1.numpy(),y.numpy()
load_model('model/lstm_class.xhp',lstm_class)
classs = predict(input_x[0][None,None,:],lstm_class)
print(classs,y[0])
def evaluator(test_loader,model):
graph = toplogical_sort(model.feed_dict)
accuracies = []
losses = []
for x, y in test_loader:
x, y = x.unsqueeze(1).numpy(), y.numpy()
model.x.value = x
model.y.value = y
run_steps(graph, train=False, valid=True)
loss_test = model.cross_loss.value
accuracy_test = model.cross_loss.accuracy
losses.append(loss_test)
accuracies.append(accuracy_test)
print("test loss:{},test accuracy:{}".format(np.mean(losses),np.mean(accuracies)))
evaluator(test_loader,lstm_class) | 2.640625 | 3 |
astro_fixer.py | boada/scripts | 0 | 12788809 | import subprocess
import os
def hms2decimal(RAString, delimiter):
"""Converts a delimited string of Hours:Minutes:Seconds format into decimal
degrees.
@type RAString: string
@param RAString: coordinate string in H:M:S format
@type delimiter: string
@param delimiter: delimiter character in RAString
@rtype: float
@return: coordinate in decimal degrees
"""
# is it in HH:MM:SS format?
if delimiter == "":
RABits = str(RAString).split()
else:
RABits = str(RAString).split(delimiter)
if len(RABits) > 1:
RAHDecimal = float(RABits[0])
if len(RABits) > 1:
RAHDecimal = RAHDecimal+(float(RABits[1])/60.0)
if len(RABits) > 2:
RAHDecimal = RAHDecimal+(float(RABits[2])/3600.0)
RADeg = (RAHDecimal/24.0)*360.0
else:
RADeg = float(RAString)
return RADeg
def dms2decimal(decString, delimiter):
"""Converts a delimited string of Degrees:Minutes:Seconds format into
decimal degrees.
@type decString: string
@param decString: coordinate string in D:M:S format
@type delimiter: string
@param delimiter: delimiter character in decString
@rtype: float
@return: coordinate in decimal degrees
"""
# is it in DD:MM:SS format?
if delimiter == "":
decBits = str(decString).split()
else:
decBits = str(decString).split(delimiter)
if len(decBits) > 1:
decDeg = float(decBits[0])
if decBits[0].find("-") != -1:
if len(decBits) > 1:
decDeg = decDeg-(float(decBits[1])/60.0)
if len(decBits) > 2:
decDeg = decDeg-(float(decBits[2])/3600.0)
else:
if len(decBits) > 1:
decDeg = decDeg+(float(decBits[1])/60.0)
if len(decBits) > 2:
decDeg = decDeg+(float(decBits[2])/3600.0)
else:
decDeg = float(decString)
return decDeg
def decimal2hms(RADeg, delimiter):
"""Converts decimal degrees to string in Hours:Minutes:Seconds format with
user specified delimiter.
@type RADeg: float
@param RADeg: coordinate in decimal degrees
@type delimiter: string
@param delimiter: delimiter character in returned string
@rtype: string
@return: coordinate string in H:M:S format
"""
hours = (RADeg/360.0)*24
#if hours < 10 and hours >= 1:
if 1 <= hours < 10:
sHours = "0"+str(hours)[0]
elif hours >= 10:
sHours = str(hours)[:2]
elif hours < 1:
sHours = "00"
if str(hours).find(".") == -1:
mins = float(hours)*60.0
else:
mins = float(str(hours)[str(hours).index("."):])*60.0
#if mins<10 and mins>=1:
if 1 <= mins<10:
sMins = "0"+str(mins)[:1]
elif mins >= 10:
sMins = str(mins)[:2]
elif mins < 1:
sMins = "00"
secs = (hours-(float(sHours)+float(sMins)/60.0))*3600.0
#if secs < 10 and secs>0.001:
if 0.001 < secs < 10:
sSecs = "0"+str(secs)[:str(secs).find(".")+4]
elif secs < 0.0001:
sSecs = "00.001"
else:
sSecs = str(secs)[:str(secs).find(".")+4]
if len(sSecs) < 5:
sSecs = sSecs+"00" # So all to 3dp
if float(sSecs) == 60.000:
sSecs = "00.00"
sMins = str(int(sMins)+1)
if int(sMins) == 60:
sMins = "00"
sDeg = str(int(sDeg)+1)
return sHours+delimiter+sMins+delimiter+sSecs
def decimal2dms(decDeg, delimiter):
"""Converts decimal degrees to string in Degrees:Minutes:Seconds format
with user specified delimiter.
@type decDeg: float
@param decDeg: coordinate in decimal degrees
@type delimiter: string
@param delimiter: delimiter character in returned string
@rtype: string
@return: coordinate string in D:M:S format
"""
# Positive
if decDeg > 0:
#if decDeg < 10 and decDeg>=1:
if 1 <= decDeg < 10:
sDeg = "0"+str(decDeg)[0]
elif decDeg >= 10:
sDeg = str(decDeg)[:2]
elif decDeg < 1:
sDeg = "00"
if str(decDeg).find(".") == -1:
mins = float(decDeg)*60.0
else:
mins = float(str(decDeg)[str(decDeg).index("."):])*60
#if mins<10 and mins>=1:
if 1 <= mins < 10:
sMins = "0"+str(mins)[:1]
elif mins >= 10:
sMins = str(mins)[:2]
elif mins < 1:
sMins = "00"
secs = (decDeg-(float(sDeg)+float(sMins)/60.0))*3600.0
#if secs<10 and secs>0:
if 0 < secs < 10:
sSecs = "0"+str(secs)[:str(secs).find(".")+3]
elif secs < 0.001:
sSecs = "00.00"
else:
sSecs = str(secs)[:str(secs).find(".")+3]
if len(sSecs) < 5:
sSecs = sSecs+"0" # So all to 2dp
if float(sSecs) == 60.00:
sSecs = "00.00"
sMins = str(int(sMins)+1)
if int(sMins) == 60:
sMins = "00"
sDeg = str(int(sDeg)+1)
return "+"+sDeg+delimiter+sMins+delimiter+sSecs
else:
#if decDeg>-10 and decDeg<=-1:
if -10 < decDeg <= -1:
sDeg = "-0"+str(decDeg)[1]
elif decDeg <= -10:
sDeg = str(decDeg)[:3]
elif decDeg > -1:
sDeg = "-00"
if str(decDeg).find(".") == -1:
mins = float(decDeg)*-60.0
else:
mins = float(str(decDeg)[str(decDeg).index("."):])*60
#if mins<10 and mins>=1:
if 1 <= mins < 10:
sMins = "0"+str(mins)[:1]
elif mins >= 10:
sMins = str(mins)[:2]
elif mins < 1:
sMins = "00"
secs = (decDeg-(float(sDeg)-float(sMins)/60.0))*3600.0
#if secs>-10 and secs<0:
# so don't get minus sign
if -10 < secs < 0:
sSecs = "0"+str(secs)[1:str(secs).find(".")+3]
elif secs > -0.001:
sSecs = "00.00"
else:
sSecs = str(secs)[1:str(secs).find(".")+3]
if len(sSecs) < 5:
sSecs = sSecs+"0" # So all to 2dp
if float(sSecs) == 60.00:
sSecs = "00.00"
sMins = str(int(sMins)+1)
if int(sMins) == 60:
sMins = "00"
sDeg = str(int(sDeg)-1)
return sDeg+delimiter+sMins+delimiter+sSecs
def shiftRADec(ra1, dec1, deltaRA, deltaDec):
"""Computes new right ascension and declination shifted from the original
by some delta RA and delta DEC. Input position is decimal degrees. Shifts
(deltaRA, deltaDec) are arcseconds, and output is decimal degrees. Based on
an IDL routine of the same name.
@param ra1: float
@type ra1: R.A. in decimal degrees
@param dec1: float
@type dec1: dec. in decimal degrees
@param deltaRA: float
@type deltaRA: shift in R.A. in arcseconds
@param deltaDec: float
@type deltaDec: shift in dec. in arcseconds
@rtype: float [newRA, newDec]
@return: shifted R.A. and dec.
"""
d2r = math.pi/180.
as2r = math.pi/648000.
# Convert everything to radians
#rara1 = ra1*d2r
dcrad1 = dec1*d2r
shiftRArad = deltaRA*as2r
shiftDCrad = deltaDec*as2r
# Shift!
#deldec2 = 0.0
sindis = math.sin(shiftRArad / 2.0)
sindelRA = sindis / math.cos(dcrad1)
delra = 2.0*math.asin(sindelRA) / d2r
# Make changes
ra2 = ra1+delra
dec2 = dec1 + deltaDec / 3600.0
return ra2, dec2
pixelsize = 0.523
tol = 0.5
print('Enter the name of the target: ')
name = str(raw_input())
print('Enter the RA of target (HH:MM:SS): ')
RA = str(raw_input())
print('Enter the DEC of target (DD:MM:SS): ')
DEC = str(raw_input())
print('Now enter x-fiducial coordinate: ')
xf = raw_input()
print('Now enter y-fiducial coordinate: ')
yf = raw_input()
print('Here we go!')
i =1
while True:
print('Try %d' % (i))
command = './find_gen.csh '+RA+' '+DEC+' '+name
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
print('Enter NEW x-fiducial coordinate: ')
xfnew = raw_input
print('Enter NEW y-fiducial coordinate: ')
yfnew = raw_input
deltaxf = float(xf) - float(xfnew)
deltayf = float(yf) - float(yfnew)
deltaDEC = deltayf * pixelsize
deltaRA = deltaxf * pixelsize
ra = hms2decimal(RA, ':')
dec = dms2decimal(DEC, ':')
if abs(deltaxf) < tol and abs(deltayf) < tol:
break
if abs(deltaxf) > tol:
deltaRA = -1 * deltaRA
else:
deltaRA = 0
if abs(deltayf) > tol:
deltaDEC = -1 * deltaRA
else:
deltaDEC = 0
# New RA and DEC
RA, DEC = shiftRADec(ra, dec, deltaRA, deltaDEC)
# Convert back to Sexigesimal
RA = decimal2hms(RA, ':')
DEC = decimal2dms(DEC, ':')
print deltaRA, deltaDEC
print('New RA %s' %(RA))
print('New DEC %s' %(DEC))
i+=1
print('Now close the finder software')
time.sleep(10)
| 3.34375 | 3 |
BlogProject/urls.py | JinitSan/Blog-App | 0 | 12788810 | <reponame>JinitSan/Blog-App
"""BlogProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from users import views as users_views
# redirects from our main project to apps
urlpatterns = [
# make the blog app the default/home
path('', include('blog.urls')),
# second approach to redirect directly to the view instead of
# doing it via the urls.py of the app (as done above)
# so the alias is named here itself
path('register/', users_views.register, name='register'),
path('profile/', users_views.profile, name='profile'),
# use Django's built-in views, but they need templates
# as they only handle the backend logic
path('login/',
auth_views.LoginView.as_view(template_name='users/login.html'),
name='login'),
path('logout/',
auth_views.LogoutView.as_view(template_name='users/logout.html'),
name='logout'),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
# tell Django to use specified url in case of using user uploaded media
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 2.953125 | 3 |
basiccomfundamental/List2.py | KApilBD/python | 0 | 12788811 | <filename>basiccomfundamental/List2.py
aList = [0,1,2,3,4,5]
bList = aList
aList[2] = 'hello'
print(aList == bList)
print(aList is bList)
print(aList)
print(bList)
cList = [6,5,4,3,2]
dList = []
for num in cList:
dList.append(num)
print(cList == dList) #validate the contain only
print(cList is dList) #validate address
cList[2] = 20
print(cList)
print(dList)
| 3.828125 | 4 |
python/backup.py | R-Varun/Sentimeter | 0 | 12788812 | <reponame>R-Varun/Sentimeter<gh_stars>0
import sys
import json
import parse
import contextsummary
import SentimentAnalysis
data = parse.parseInput(sys.argv[1])
#data = input.readBody()
input = data[0]
granularity = data[2]
begin = int(granularity[0])
end = int(granularity[1])
stride = data[3]
if begin is -1 or end is -1:
begin = 0
end = len(input)
elif begin < 0 or end > len(input):
print(json.dumps({"ERROR":"invalid granularity"}))
quit()
end = min(end, len(input))
contextList = []
sentimentList = []
topicList = {}
conversationSentiment = {}
if stride < 1:
stride = 1
if stride is None:
stride = len(input)
counter = 0
cur = begin
all_topics = {}
cumulative = []
# print("STRIDE:",stride)
for i in range( (end - begin) // stride):
for sentence in input[cur: cur + stride]:
if "utterance" not in sentence:
continue
taggedSentences = contextsummary.posTag(sentence["utterance"])
speaker = sentence["speaker"]
#context
topic = contextsummary.sentenctExtract(taggedSentences)
for top in topic:
if top in topicList:
topicList[top] += 1
# all_topics[top] += 1
else:
topicList[top] = 1
# all_topics[top] = 1
if top in all_topics:
all_topics[top] += 1
else:
all_topics[top] = 1
# begin += stride
cur += stride
cumulative.append(sorted(topicList, key = lambda x : -1 * topicList[x]))
topicList = {}
"""print(sortedTopics[:5])
for x in sortedTopics[:5]:
print(topicList[x])"""
print(json.dumps( {"timeline" : cumulative,
"total" : sorted(all_topics, key = lambda x : -1 * all_topics[x]) }))
| 2.40625 | 2 |
multitest_transport/ui2/main.py | maksonlee/multitest_transport | 0 | 12788813 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ATS UI flask server."""
import os
import flask
from multitest_transport.models import ndb_models
from multitest_transport.util import env
ROOT_PATH = os.path.dirname(__file__)
STATIC_PATH = os.path.join(ROOT_PATH, 'static')
APP = flask.Flask(
__name__,
root_path=ROOT_PATH,
static_folder=None,
template_folder=ROOT_PATH)
@APP.route('/static/<path:path>')
def Static(path):
"""Returns static files."""
return flask.send_from_directory(STATIC_PATH, path, conditional=False)
@APP.route('/app.js')
def App():
"""Returns application script."""
script = 'dev_sources.concat.js' if env.IS_DEV_MODE else 'app.js'
return flask.send_from_directory(ROOT_PATH, script, conditional=False)
@APP.route('/', defaults={'_': ''})
@APP.route('/<path:_>')
def Root(_):
"""Routes all other requests to index.html and angular."""
private_node_config = ndb_models.GetPrivateNodeConfig()
analytics_tracking_id = ''
if not env.IS_DEV_MODE and private_node_config.metrics_enabled:
analytics_tracking_id = 'UA-140187490-1'
return flask.render_template(
'index.html',
analytics_tracking_id=analytics_tracking_id,
env=env,
private_node_config=private_node_config)
| 1.804688 | 2 |
flags-dest/train_with_globals.py | lambdaofgod/examples | 9 | 12788814 | learning_rate = 0.01
epochs = 10
print("Training for %i epochs with a learning rate of %f"
% (epochs, learning_rate))
| 2.921875 | 3 |
app/migrations/0004_auto_20190110_1501.py | wusri66666/room_order | 0 | 12788815 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2019-01-10 07:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20190110_1037'),
]
operations = [
migrations.RenameModel(
old_name='Meeting_room',
new_name='MeetingRoom',
),
migrations.RenameModel(
old_name='Scheduled_record',
new_name='ReserveRecord',
),
]
| 1.609375 | 2 |
fridge/Constituent/Constituent.py | ryanstwrt/FRIDGE | 0 | 12788816 | <filename>fridge/Constituent/Constituent.py
import fridge.Material.Material as materialReader
import fridge.utilities.mcnpCreatorFunctions as mcnpCF
class Constituent(object):
"""Base class for creating an assembly constituent."""
def __init__(self, unit_info, void_percent=1.0):
"""Initializes the data for creating a constituent."""
self.universe = unit_info[0][0]
self.cellNum = unit_info[0][1]
self.surfaceNum = unit_info[0][2]
self.materialXCLibrary = unit_info[0][4]
self.position = unit_info[0][5]
self.materialNum = unit_info[0][6]
self.voidPercent = void_percent
self.surfaceCard = ''
self.cellCard = ''
self.materialCard = ''
self.material = None
def get_material_card(self, material_name):
"""Creates the material for the given constituent and creates the material card."""
self.material = materialReader.Material()
self.material.set_material(material_name)
if self.voidPercent != 1.0:
self.material.set_void(self.voidPercent)
self.materialCard = mcnpCF.build_material_card(self.material, self.materialXCLibrary, self.materialNum)
def make_component(self, unit_info):
"""Creates the component for the given constituent and creates the cell/surface cards."""
pass
| 2.8125 | 3 |
vsts/vsts/feed/v4_1/models/package_dependency.py | kenkuo/azure-devops-python-api | 0 | 12788817 | <reponame>kenkuo/azure-devops-python-api
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class PackageDependency(Model):
"""PackageDependency.
:param group:
:type group: str
:param package_name:
:type package_name: str
:param version_range:
:type version_range: str
"""
_attribute_map = {
'group': {'key': 'group', 'type': 'str'},
'package_name': {'key': 'packageName', 'type': 'str'},
'version_range': {'key': 'versionRange', 'type': 'str'}
}
def __init__(self, group=None, package_name=None, version_range=None):
super(PackageDependency, self).__init__()
self.group = group
self.package_name = package_name
self.version_range = version_range
| 1.945313 | 2 |
cocrawler/fetcher.py | machawk1/cocrawler | 0 | 12788818 | '''
async fetching of urls.
Assumes robots checks have already been done.
Supports server mocking; proxies are not yet implemented.
Success returns response object and response bytes (which were already
read in order to shake out all potential network-related exceptions.)
Failure returns enough details for the caller to do something smart:
503, other 5xx, DNS fail, connect timeout, error between connect and
full response, proxy failure. Plus an errorstring good enough for logging.
'''
import time
import traceback
from collections import namedtuple
import ssl
import urllib
import asyncio
import logging
import aiohttp
from . import stats
from . import config
from . import content
LOGGER = logging.getLogger(__name__)
# these errors get printed deep in aiohttp but they also bubble up
aiohttp_errors = {
'SSL handshake failed',
'SSL error errno:1 reason: CERTIFICATE_VERIFY_FAILED',
'SSL handshake failed on verifying the certificate',
'Fatal error on transport TCPTransport',
'Fatal error on SSL transport',
'SSL error errno:1 reason: UNKNOWN_PROTOCOL',
'Future exception was never retrieved',
'Unclosed connection',
'SSL error errno:1 reason: TLSV1_UNRECOGNIZED_NAME',
'SSL error errno:1 reason: SSLV3_ALERT_HANDSHAKE_FAILURE',
'SSL error errno:1 reason: TLSV1_ALERT_INTERNAL_ERROR',
}
class AsyncioSSLFilter(logging.Filter):
def filter(self, record):
if record.name == 'asyncio' and record.levelname == 'ERROR':
msg = record.getMessage()
for ae in aiohttp_errors:
if msg.startswith(ae):
return False
return True
def establish_filters():
f = AsyncioSSLFilter()
logging.getLogger('asyncio').addFilter(f)
# XXX should be a policy plugin
# XXX cookie handling -- no way to have a cookie jar other than at session level
# need to directly manipulate domain-level cookie jars to get cookies
def apply_url_policies(url, crawler):
headers = {}
proxy = None
mock_url = None
mock_robots = None
headers['User-Agent'] = crawler.ua
test_host = config.read('Testing', 'TestHostmapAll')
if test_host:
headers['Host'] = url.urlsplit.netloc
(scheme, netloc, path, query, fragment) = url.urlsplit
netloc = test_host
mock_url = urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))
mock_robots = url.urlsplit.scheme + '://' + test_host + '/robots.txt'
if crawler.prevent_compression:
headers['Accept-Encoding'] = 'identity'
else:
headers['Accept-Encoding'] = content.get_accept_encoding()
if crawler.upgrade_insecure_requests:
headers['Upgrade-Insecure-Requests'] = '1'
return headers, proxy, mock_url, mock_robots
FetcherResponse = namedtuple('FetcherResponse', ['response', 'body_bytes', 'req_headers',
't_first_byte', 't_last_byte', 'is_truncated',
'last_exception'])
async def fetch(url, session, headers=None, proxy=None, mock_url=None,
allow_redirects=None, max_redirects=None,
stats_prefix='', max_page_size=-1):
if proxy: # pragma: no cover
proxy = aiohttp.ProxyConnector(proxy=proxy)
# XXX we need to preserve the existing connector config (see cocrawler.__init__ for conn_kwargs)
# XXX we should rotate proxies every fetch in case some are borked
# XXX use proxy history to decide not to use some
raise ValueError('not yet implemented')
last_exception = None
is_truncated = False
try:
t0 = time.time()
last_exception = None
body_bytes = b''
blocks = []
left = max_page_size
with stats.coroutine_state(stats_prefix+'fetcher fetching'):
with stats.record_latency(stats_prefix+'fetcher fetching', url=url.url):
response = await session.get(mock_url or url.url,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
headers=headers)
# https://aiohttp.readthedocs.io/en/stable/tracing_reference.html
# XXX should use tracing events to get t_first_byte
t_first_byte = '{:.3f}'.format(time.time() - t0)
while left > 0:
block = await response.content.read(left)
if not block:
body_bytes = b''.join(blocks)
break
blocks.append(block)
left -= len(block)
else:
body_bytes = b''.join(blocks)
if not response.content.at_eof():
stats.stats_sum('fetch truncated length', 1)
response.close() # this does interrupt the network transfer
is_truncated = 'length' # testme WARC
t_last_byte = '{:.3f}'.format(time.time() - t0)
except asyncio.TimeoutError as e:
stats.stats_sum('fetch timeout', 1)
last_exception = 'TimeoutError'
body_bytes = b''.join(blocks)
if len(body_bytes):
is_truncated = 'time' # testme WARC
stats.stats_sum('fetch timeout body bytes found', 1)
stats.stats_sum('fetch timeout body bytes found bytes', len(body_bytes))
# except (aiohttp.ClientError.ClientResponseError.TooManyRedirects) as e:
# # XXX remove me when I stop using redirects for robots.txt fetching
# raise
except (aiohttp.ClientError) as e:
# ClientError is a catchall for a bunch of things
# e.g. DNS errors, '400' errors for http parser errors
# ClientConnectorCertificateError for an SSL cert that doesn't match hostname
# ClientConnectorError(None, None) caused by robots redir to DNS fail
# ServerDisconnectedError(None,) caused by servers that return 0 bytes for robots.txt fetches
# TooManyRedirects("0, message=''",) caused by too many robots.txt redirs
stats.stats_sum('fetch ClientError', 1)
detailed_name = str(type(e).__name__)
last_exception = 'ClientError: ' + detailed_name + ': ' + str(e)
body_bytes = b''.join(blocks)
if len(body_bytes):
is_truncated = 'disconnect' # testme WARC
stats.stats_sum('fetch ClientError body bytes found', 1)
stats.stats_sum('fetch ClientError body bytes found bytes', len(body_bytes))
except ssl.CertificateError as e:
# unfortunately many ssl errors raise and have tracebacks printed deep in aiohttp
# so this doesn't go off much
stats.stats_sum('fetch SSL error', 1)
last_exception = 'CertificateError: ' + str(e)
#except (ValueError, AttributeError, RuntimeError) as e:
# supposedly aiohttp 2.1 only fires these on programmer error, but here's what I've seen in the past:
# ValueError Location: https:/// 'Host could not be detected' -- robots fetch
# ValueError Location: http:// /URL should be absolute/ -- robots fetch
# ValueError 'Can redirect only to http or https' -- robots fetch -- looked OK to curl!
# AttributeError: ?
# RuntimeError: ?
except ValueError as e:
# no A records found -- raised by my dns code
stats.stats_sum('fetch other error - ValueError', 1)
last_exception = 'ValueErorr: ' + str(e)
except AttributeError as e:
stats.stats_sum('fetch other error - AttributeError', 1)
last_exception = 'AttributeError: ' + str(e)
except RuntimeError as e:
stats.stats_sum('fetch other error - RuntimeError', 1)
last_exception = 'RuntimeError: ' + str(e)
except asyncio.CancelledError:
raise
except Exception as e:
last_exception = 'Exception: ' + str(e)
stats.stats_sum('fetch surprising error', 1)
LOGGER.info('Saw surprising exception in fetcher working on %s:\n%s', mock_url or url.url, last_exception)
traceback.print_exc()
if last_exception is not None:
LOGGER.info('we failed working on %s, the last exception is %s', mock_url or url.url, last_exception)
return FetcherResponse(None, None, None, None, None, False, last_exception)
fr = FetcherResponse(response, body_bytes, response.request_info.headers,
t_first_byte, t_last_byte, is_truncated, None)
if response.status >= 500:
LOGGER.debug('server returned http status %d', response.status)
stats.stats_sum('fetch bytes', len(body_bytes) + len(response.raw_headers))
stats.stats_sum(stats_prefix+'fetch URLs', 1)
stats.stats_sum(stats_prefix+'fetch http code=' + str(response.status), 1)
# checks after fetch:
# hsts header?
# if ssl, check strict-transport-security header, remember max-age=foo part., other stuff like includeSubDomains
# did we receive cookies? was the security bit set?
return fr
def upgrade_scheme(url):
'''
Upgrade crawled scheme to https, if reasonable. This helps to reduce MITM attacks against the crawler.
https://chromium.googlesource.com/chromium/src/net/+/master/http/transport_security_state_static.json
Alternately, the return headers from a site might have strict-transport-security set ... a bit more
dangerous as we'd have to respect the timeout to avoid permanently learning something that's broken
TODO: use HTTPSEverwhere? would have to have a fallback if https failed, which it occasionally will
'''
return url
| 2.453125 | 2 |
spatialpandas/tools/__init__.py | isabella232/spatialpandas | 0 | 12788819 | <gh_stars>0
from .sjoin import sjoin
| 1.0625 | 1 |
brax/envs/doublehumanoid.py | carolinewang01/brax | 0 | 12788820 | # Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains a humanoid to run in the +x direction."""
from typing import Tuple, List
import functools
import dataclasses
import jax
import jax.numpy as jnp
import numpy as np
import brax
# from brax.envs import multiagent_env
from brax.envs import env
from brax.physics import bodies
from brax.physics.base import take
from google.protobuf import text_format
class DoubleHumanoid(env.Env):
"""Trains a humanoid to run in the +x direction."""
def __init__(self, **kwargs):
# TODO: define a function to copy the system config automatically based on num_agents
self.num_agents = 2
config = text_format.Parse(_SYSTEM_CONFIG, brax.Config())
super().__init__(config, **kwargs)
# TODO: define these as properties in multiagent env
self.agent_action_size = 17
self.agent_observation_size = 293
# body info
self.body_parts = ["torso", "lwaist", "pelvis",
"right_thigh", "right_shin",
"left_thigh", "left_shin",
"right_upper_arm", "right_lower_arm",
"left_upper_arm", "left_lower_arm"
]
self.world_parts = ["floor"]
# actuator info
self.agent_dof = 17
assert self.agent_dof * self.num_agents == self.sys.num_joint_dof
self.torque_1d_act_idx = jnp.array([2, 6, 10, 13, 16])
self.torque_2d_act_idx = jnp.array([[0, 1], [11, 12], [14, 15]])
self.torque_3d_act_idx = jnp.array([[3, 4, 5], [7, 8, 9]])
# joint info
self.agent_joints = 10
self.num_joints_1d = 5
self.num_joints_2d = 3
self.num_joints_3d = 2
# info to differentiate humanoids
all_bodies = bodies.Body.from_config(config) # body object only used to get object mass and inertia
all_bodies = take(all_bodies, all_bodies.idx[:-len(self.world_parts)]) # skip the world bodies
self.num_body_parts = len(self.body_parts)
for i in range(self.num_agents):
# get system body idx from self.sys
body_idxs = {f"{body_part}{i}": self.sys.body_idx[f"{body_part}{i}"] for body_part in self.body_parts}
setattr(self, f"agent{i}_idxs", body_idxs)
# get mass, inertia from Body object
body = take(all_bodies, all_bodies.idx[i * self.num_body_parts: i * self.num_body_parts + self.num_body_parts ])
assert len(body.idx) == self.num_body_parts
setattr(self, f"mass{i}", body.mass.reshape(-1, 1))
setattr(self, f"inertia{i}", body.inertia)
self.mass = jnp.array([getattr(self, f"mass{i}") for i in range(self.num_agents)])
self.inertia = jnp.array([getattr(self, f"inertia{i}") for i in range(self.num_agents)])
self.floor_idx = self.sys.body_idx["floor"]
# how far apart to initialize humanoids
self.field_distance = 20
def update_parts_xyz(self, carry, part_idx):
qp_pos, xyz_offset = carry
qp_pos = jax.ops.index_update(qp_pos, jax.ops.index[part_idx],
xyz_offset+qp_pos[jax.ops.index[part_idx]]
)
return (qp_pos, xyz_offset), ()
def set_agent_xyz(self, carry, part_idxs):
qp_pos, rng = carry
rng, xyz_offset = self._random_target(rng)
(qp_pos, xyz_offset), _ = jax.lax.scan(
self.update_parts_xyz, (qp_pos, xyz_offset), part_idxs
)
return (qp_pos, rng), ()
def reset(self, rng: jnp.ndarray) -> env.State:
"""Resets the environment to an initial state."""
qp = self.sys.default_qp()
# move the humanoids to different positions
pos = qp.pos
agents_parts_idxs = jnp.array([list(getattr(self, f"agent{i}_idxs").values()) for i in range(self.num_agents)])
(pos, rng), _ = jax.lax.scan(
self.set_agent_xyz, (pos, rng), agents_parts_idxs
)
qp = dataclasses.replace(qp, pos=pos)
info = self.sys.info(qp)
qp, info = self.sys.step(qp,
jax.random.uniform(rng, (self.action_size,)) * .5) # action size is for all agents
all_obs = self._get_obs(qp, info, jnp.zeros((self.num_agents, self.agent_dof)))
reward = jnp.zeros((self.num_agents,))
done = 0
steps = jnp.zeros(1)
metrics = {
'reward_linvel': jnp.zeros((self.num_agents,)),
'reward_quadctrl': jnp.zeros((self.num_agents,)),
'reward_alive': jnp.zeros((self.num_agents,)),
'reward_impact': jnp.zeros((self.num_agents,))
}
return env.State(rng, qp, info, all_obs, reward, done, steps, metrics)
def step(self, state: env.State, action: jnp.ndarray) -> env.State:
"""Run one timestep of the environment's dynamics."""
rng = state.rng
# note the minus sign. reverse torque improves performance over a range of
# hparams. as to why: ¯\_(ツ)_/¯
qp, info = self.sys.step(state.qp, -action.flatten())
all_obs = self._get_obs(qp, info, action) # should this be - action?
reward, lin_vel_cost, quad_ctrl_cost, alive_bonus, quad_impact_cost = self._compute_reward(state, action, qp)
metrics = {
'reward_linvel': lin_vel_cost,
'reward_quadctrl': -quad_ctrl_cost,
'reward_alive': alive_bonus,
'reward_impact': -quad_impact_cost
}
steps = state.steps + self.action_repeat
done = self._compute_done(qp, steps)
return env.State(rng, qp, info, all_obs, reward, done, steps, metrics)
def _get_obs(self, qp: brax.QP, info: brax.Info, action: jnp.ndarray):
all_obs = []
# TODO: figure out how to jit self._get_agent_obs
# (qp, info, action), all_obs = jax.lax.scan(
# self._get_agent_obs, (qp, info, action), jnp.arange(self.num_agents))
for agent_idx in range(self.num_agents):
(qp, info, action), obs = self._get_agent_obs((qp, info, action), agent_idx)
all_obs.append(obs)
all_obs = jnp.array(all_obs)
# humanoid: (128, 299)
# double humanoid: (128, 2, 293)
# TODO: Add world features! (floor loc)
return all_obs
def _compute_reward(self, state: env.State, action: jnp.ndarray, qp: brax.QP):
# TODO: how to ensure ordering of reshaping is correct??
pos_before = jnp.reshape(state.qp.pos[:-1], (self.num_agents, self.num_body_parts, 3)) # ignore floor at last index
pos_after = jnp.reshape(qp.pos[:-1], (self.num_agents, self.num_body_parts, 3)) # ignore floor at last index
com_before = jnp.sum(pos_before * self.mass, axis=1) / jnp.sum(self.mass, axis=1)
com_after = jnp.sum(pos_after * self.mass, axis=1) / jnp.sum(self.mass, axis=1)
lin_vel_cost = 1.25 * (com_after[:, 0] - com_before[:, 0]) / self.sys.config.dt
reshaped_actions = jnp.reshape(action, (self.num_agents, self.agent_dof))
quad_ctrl_cost = .01 * jnp.sum(jnp.square(reshaped_actions), axis=1)
# can ignore contact cost, see: https://github.com/openai/gym/issues/1541
quad_impact_cost = jnp.zeros(self.num_agents)
alive_bonus = 5.0 * jnp.ones(self.num_agents)
reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
return reward, lin_vel_cost, quad_ctrl_cost, alive_bonus, quad_impact_cost
def _compute_done(self, qp: brax.QP, steps: int, done_thres=0.75):
"""Return done if the proportion of agents that are done surpasses
done_thres
"""
torsos_idxs = jnp.arange(self.num_agents) * self.num_body_parts
torsos_zdim = take(qp.pos[:, 2], torsos_idxs)
done_cond0 = jnp.where(steps >= self.episode_length, x=1.0, y=0.0)
done_cond1 = jnp.where(torsos_zdim < 0.6, x=1.0, y=0.0)
done_cond2 = jnp.where(torsos_zdim > 2.1, x=1.0, y=0.0)
done_vec = done_cond0 + done_cond1 + done_cond2
done_vec = jnp.where(done_vec > 0.0, x=1.0, y=0.0)
done_ratio = jnp.sum(done_vec) / self.num_agents
done = jnp.where(done_ratio > done_thres, x=1.0, y=0.0)
return done
def _get_agent_obs(self, carry, agent_idx) -> jnp.ndarray:
"""Observe humanoid body position, velocities, and angles."""
qp, info, action = carry
qpos, qvel = self._get_agent_qpos_qvel(agent_idx, qp)
qfrc_actuator = self._get_agent_qfrc(agent_idx, action[agent_idx])
cfrc_ext = self._get_agent_cfrc_ext(agent_idx, info)
cinert, cvel = self._get_agent_com_obs(agent_idx, qp)
# obs = jnp.expand_dims(jnp.concatenate(qpos + qvel + cinert + cvel + qfrc_actuator + \
# cfrc_ext), axis=0)
obs = jnp.concatenate(qpos + qvel + cinert + cvel + qfrc_actuator + \
cfrc_ext)
return (qp, info, action), obs
def _get_agent_qpos_qvel(self, agent_idx: int, qp: brax.QP) -> Tuple[List[jnp.ndarray], List[jnp.ndarray]]:
"""
Some pre-processing to pull joint angles and velocities
"""
# TODO: move outside this function
joint_1d_angle, joint_1d_vel = self.sys.joint_revolute.angle_vel(qp)
joint_2d_angle, joint_2d_vel = self.sys.joint_universal.angle_vel(qp)
joint_3d_angle, joint_3d_vel = self.sys.joint_spherical.angle_vel(qp)
idx_offset = agent_idx * self.num_joints_1d
joint_1d_angle = take(joint_1d_angle, jnp.arange(idx_offset, idx_offset + self.num_joints_1d))
joint_1d_vel = take(joint_1d_vel, jnp.arange(idx_offset, idx_offset + self.num_joints_1d))
idx_offset = agent_idx * self.num_joints_2d
joint_2d_angle = take(joint_2d_angle, jnp.arange(idx_offset, idx_offset + self.num_joints_2d))
joint_2d_vel = take(joint_2d_vel, jnp.arange(idx_offset, idx_offset + self.num_joints_2d))
idx_offset = agent_idx * self.num_joints_3d
joint_3d_angle = take(joint_3d_angle, jnp.arange(idx_offset, idx_offset + self.num_joints_3d))
joint_3d_vel = take(joint_3d_vel, jnp.arange(idx_offset, idx_offset + self.num_joints_3d))
# qpos:
# Z of the torso of agent idx (1,)
# orientation of the torso as quaternion (4,)
# joint angles, all dofs (8,)
agent_torso_idx = agent_idx * self.num_body_parts
qpos = [
qp.pos[agent_torso_idx, 2:], qp.rot[agent_torso_idx],
*joint_1d_angle, *joint_2d_angle, *joint_3d_angle
]
# qvel:
# velocity of the torso (3,)
# angular velocity of the torso (3,)
# joint angle velocities, all dofs (8,)
qvel = [
qp.vel[agent_torso_idx], qp.ang[agent_torso_idx],
*joint_1d_vel, *joint_2d_vel, *joint_3d_vel
]
return qpos, qvel
def _get_agent_qfrc(self, agent_idx: int, agent_action: jnp.ndarray) -> List[jnp.ndarray]:
# actuator forces
idx_offset = agent_idx * self.num_joints_1d
torque_1d = take(agent_action, self.torque_1d_act_idx)
torque_1d *= take(self.sys.torque_1d.strength,
jnp.arange(idx_offset, idx_offset + self.num_joints_1d))
idx_offset = agent_idx * self.num_joints_2d
torque_2d = take(agent_action, self.torque_2d_act_idx)
torque_2d = torque_2d.reshape(torque_2d.shape[:-2] + (-1,))
torque_2d *= jnp.repeat(take(self.sys.torque_2d.strength,
jnp.arange(idx_offset, idx_offset + self.num_joints_2d)),
2)
idx_offset = agent_idx * self.num_joints_3d
torque_3d = take(agent_action, self.torque_3d_act_idx)
torque_3d = torque_3d.reshape(torque_3d.shape[:-2] + (-1,))
torque_3d *= jnp.repeat(take(self.sys.torque_3d.strength,
jnp.arange(idx_offset, idx_offset + self.num_joints_3d)),
3)
qfrc_actuator = [torque_1d, torque_2d, torque_3d]
return qfrc_actuator
def _get_agent_cfrc_ext(self, agent_idx: int, info: brax.Info) -> List[jnp.ndarray]:
agent_torso_idx = agent_idx * self.num_body_parts
# external contact forces:
# delta velocity (3,), delta ang (3,) * num bodies in the system
cfrc_ext = [info.contact.vel[agent_torso_idx:agent_torso_idx + self.num_body_parts],
info.contact.ang[agent_torso_idx:agent_torso_idx + self.num_body_parts]
]
# flatten bottom dimension
cfrc_ext = [x.reshape(x.shape[:-2] + (-1,)) for x in cfrc_ext]
return cfrc_ext
def _get_agent_com_obs(self, agent_idx: int, qp: brax.QP) -> Tuple[List[jnp.ndarray], List[jnp.ndarray]]:
"""Get center of mass observations for one agent"""
agent_torso_idx = agent_idx * self.num_body_parts
agent_mass = getattr(self, f"mass{agent_idx}")
agent_inertia = getattr(self, f"inertia{agent_idx}")
body_pos = qp.pos[agent_torso_idx:agent_torso_idx + self.num_body_parts] # ignore floor at last index
body_vel = qp.vel[agent_torso_idx:agent_torso_idx + self.num_body_parts] # ignore floor at last index
com_vec = jnp.sum(body_pos * agent_mass, axis=0) / jnp.sum(agent_mass)
com_vel = body_vel * agent_mass / jnp.sum(agent_mass)
def v_outer(a):
return jnp.outer(a, a)
def v_cross(a, b):
return jnp.cross(a, b)
v_outer = jax.vmap(v_outer, in_axes=[0])
v_cross = jax.vmap(v_cross, in_axes=[0, 0])
disp_vec = body_pos - com_vec
# there are 11 bodies for each humanoid
com_inert = agent_inertia + agent_mass.reshape(
(11, 1, 1)) * ((jnp.linalg.norm(disp_vec, axis=1)**2.).reshape(
(11, 1, 1)) * jnp.stack([jnp.eye(3)] * 11) - v_outer(disp_vec))
cinert = [com_inert.reshape(-1)]
square_disp = (1e-7 + (jnp.linalg.norm(disp_vec, axis=1)**2.)).reshape(
(11, 1))
com_angular_vel = (v_cross(disp_vec, body_vel) / square_disp)
cvel = [com_vel.reshape(-1), com_angular_vel.reshape(-1)]
return cinert, cvel
def _random_target(self, rng: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Returns a target location in a random circle on xz plane."""
rng, rng1, rng2 = jax.random.split(rng, 3)
dist = self.field_distance * jax.random.uniform(rng1)
ang = jnp.pi * 2. * jax.random.uniform(rng2)
target_x = dist * jnp.cos(ang)
target_y = dist * jnp.sin(ang)
target_z = 0
target = jnp.array([target_x, target_y, target_z]).transpose()
return rng, target
_HUMANOID0_CONFIG ="""
bodies {
name: "torso0"
colliders {
position {
}
rotation {
x: -90.0
}
capsule {
radius: 0.07
length: 0.28
}
}
colliders {
position {
z: 0.19
}
capsule {
radius: 0.09
length: 0.18
}
}
colliders {
position {
x: -0.01
z: -0.12
}
rotation {
x: -90.0
}
capsule {
radius: 0.06
length: 0.24
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 8.907463
}
bodies {
name: "lwaist0"
colliders {
position {
}
rotation {
x: -90.0
}
capsule {
radius: 0.06
length: 0.24
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 2.2619467
}
bodies {
name: "pelvis0"
colliders {
position {
x: -0.02
}
rotation {
x: -90.0
}
capsule {
radius: 0.09
length: 0.32
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 6.6161942
}
bodies {
name: "right_thigh0"
colliders {
position {
y: 0.005
z: -0.17
}
rotation {
x: -178.31532
}
capsule {
radius: 0.06
length: 0.46014702
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.751751
}
bodies {
name: "right_shin0"
colliders {
position {
z: -0.15
}
rotation {
x: -180.0
}
capsule {
radius: 0.049
length: 0.398
end: -1
}
}
colliders {
position {
z: -0.35
}
capsule {
radius: 0.075
length: 0.15
end: 1
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.5228419
}
bodies {
name: "left_thigh0"
colliders {
position {
y: -0.005
z: -0.17
}
rotation {
x: 178.31532
}
capsule {
radius: 0.06
length: 0.46014702
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.751751
}
bodies {
name: "left_shin0"
colliders {
position {
z: -0.15
}
rotation {
x: -180.0
}
capsule {
radius: 0.049
length: 0.398
end: -1
}
}
colliders {
position {
z: -0.35
}
capsule {
radius: 0.075
length: 0.15
end: 1
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.5228419
}
bodies {
name: "right_upper_arm0"
colliders {
position {
x: 0.08
y: -0.08
z: -0.08
}
rotation {
x: 135.0
y: 35.26439
z: -75.0
}
capsule {
radius: 0.04
length: 0.35712814
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.6610805
}
bodies {
name: "right_lower_arm0"
colliders {
position {
x: 0.09
y: 0.09
z: 0.09
}
rotation {
x: -45.0
y: 35.26439
z: 15.0
}
capsule {
radius: 0.031
length: 0.33912814
}
}
colliders {
position {
x: 0.18
y: 0.18
z: 0.18
}
capsule {
radius: 0.04
length: 0.08
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.2295402
}
bodies {
name: "left_upper_arm0"
colliders {
position {
x: 0.08
y: 0.08
z: -0.08
}
rotation {
x: -135.0
y: 35.26439
z: 75.0
}
capsule {
radius: 0.04
length: 0.35712814
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.6610805
}
bodies {
name: "left_lower_arm0"
colliders {
position {
x: 0.09
y: -0.09
z: 0.09
}
rotation {
x: 45.0
y: 35.26439
z: -15.0
}
capsule {
radius: 0.031
length: 0.33912814
}
}
colliders {
position {
x: 0.18
y: -0.18
z: 0.18
}
capsule {
radius: 0.04
length: 0.08
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.2295402
}
joints {
name: "abdomen_z0"
stiffness: 15000.0
parent: "torso0"
child: "lwaist0"
parent_offset {
x: -0.01
z: -0.195
}
child_offset {
z: 0.065
}
rotation {
y: -90.0
}
angular_damping: 20.0
angle_limit {
min: -45.0
max: 45.0
}
angle_limit {
min: -75.0
max: 30.0
}
}
joints {
name: "abdomen_x0"
stiffness: 15000.0
parent: "lwaist0"
child: "pelvis0"
parent_offset {
z: -0.065
}
child_offset {
z: 0.1
}
rotation {
x: 90.0
}
angular_damping: 20.0
angle_limit {
min: -35.0
max: 35.0
}
}
joints {
name: "right_hip_x0"
stiffness: 8000.0
parent: "pelvis0"
child: "right_thigh0"
parent_offset {
y: -0.1
z: -0.04
}
child_offset {
}
rotation {
}
angular_damping: 20.0
limit_strength: 2000.0
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -30.0
max: 70.0
}
}
joints {
name: "right_knee0"
stiffness: 15000.0
parent: "right_thigh0"
child: "right_shin0"
parent_offset {
y: 0.01
z: -0.383
}
child_offset {
z: 0.02
}
rotation {
z: -90.0
}
angular_damping: 20.0
angle_limit {
min: -160.0
max: -2.0
}
}
joints {
name: "left_hip_x0"
stiffness: 8000.0
parent: "pelvis0"
child: "left_thigh0"
parent_offset {
y: 0.1
z: -0.04
}
child_offset {
}
angular_damping: 20.0
limit_strength: 2000.0
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -30.0
max: 70.0
}
}
joints {
name: "left_knee0"
stiffness: 15000.0
parent: "left_thigh0"
child: "left_shin0"
parent_offset {
y: -0.01
z: -0.383
}
child_offset {
z: 0.02
}
rotation {
z: -90.0
}
angular_damping: 20.0
angle_limit {
min: -160.0
max: -2.0
}
}
joints {
name: "right_shoulder0"
stiffness: 15000.0
parent: "torso0"
child: "right_upper_arm0"
parent_offset {
y: -0.17
z: 0.06
}
child_offset {
}
rotation {
x: 135.0
y: 35.26439
}
angular_damping: 20.0
angle_limit {
min: -85.0
max: 60.0
}
angle_limit {
min: -85.0
max: 60.0
}
}
joints {
name: "right_elbow0"
stiffness: 15000.0
parent: "right_upper_arm0"
child: "right_lower_arm0"
parent_offset {
x: 0.18
y: -0.18
z: -0.18
}
child_offset {
}
rotation {
x: 135.0
z: 90.0
}
angular_damping: 20.0
angle_limit {
min: -90.0
max: 50.0
}
}
joints {
name: "left_shoulder0"
stiffness: 15000.0
parent: "torso0"
child: "left_upper_arm0"
parent_offset {
y: 0.17
z: 0.06
}
child_offset {
}
rotation {
x: 45.0
y: -35.26439
}
angular_damping: 20.0
angle_limit {
min: -60.0
max: 85.0
}
angle_limit {
min: -60.0
max: 85.0
}
}
joints {
name: "left_elbow0"
stiffness: 15000.0
parent: "left_upper_arm0"
child: "left_lower_arm0"
parent_offset {
x: 0.18
y: 0.18
z: -0.18
}
child_offset {
}
rotation {
x: 45.0
z: -90.0
}
angular_damping: 20.0
angle_limit {
min: -90.0
max: 50.0
}
}
actuators {
name: "abdomen_z0"
joint: "abdomen_z0"
strength: 300.0
torque {
}
}
actuators {
name: "abdomen_x0"
joint: "abdomen_x0"
strength: 300.0
torque {
}
}
actuators {
name: "right_hip_x0"
joint: "right_hip_x0"
strength: 300.0
torque {
}
}
actuators {
name: "right_knee0"
joint: "right_knee0"
strength: 300.0
torque {
}
}
actuators {
name: "left_hip_x0"
joint: "left_hip_x0"
strength: 300.0
torque {
}
}
actuators {
name: "left_knee0"
joint: "left_knee0"
strength: 300.0
torque {
}
}
actuators {
name: "right_shoulder0"
joint: "right_shoulder0"
strength: 75.0
torque {
}
}
actuators {
name: "right_elbow0"
joint: "right_elbow0"
strength: 75.0
torque {
}
}
actuators {
name: "left_shoulder0"
joint: "left_shoulder0"
strength: 75.0
torque {
}
}
actuators {
name: "left_elbow0"
joint: "left_elbow0"
strength: 75.0
torque {
}
}
collide_include {
first: "floor"
second: "left_shin0"
}
collide_include {
first: "floor"
second: "right_shin0"
}
"""
_HUMANOID1_CONFIG = """
bodies {
name: "torso1"
colliders {
position {
}
rotation {
x: -90.0
}
capsule {
radius: 0.07
length: 0.28
}
}
colliders {
position {
z: 0.19
}
capsule {
radius: 0.09
length: 0.18
}
}
colliders {
position {
x: -0.01
z: -0.12
}
rotation {
x: -90.0
}
capsule {
radius: 0.06
length: 0.24
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 8.907463
}
bodies {
name: "lwaist1"
colliders {
position {
}
rotation {
x: -90.0
}
capsule {
radius: 0.06
length: 0.24
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 2.2619467
}
bodies {
name: "pelvis1"
colliders {
position {
x: -0.02
}
rotation {
x: -90.0
}
capsule {
radius: 0.09
length: 0.32
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 6.6161942
}
bodies {
name: "right_thigh1"
colliders {
position {
y: 0.005
z: -0.17
}
rotation {
x: -178.31532
}
capsule {
radius: 0.06
length: 0.46014702
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.751751
}
bodies {
name: "right_shin1"
colliders {
position {
z: -0.15
}
rotation {
x: -180.0
}
capsule {
radius: 0.049
length: 0.398
end: -1
}
}
colliders {
position {
z: -0.35
}
capsule {
radius: 0.075
length: 0.15
end: 1
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.5228419
}
bodies {
name: "left_thigh1"
colliders {
position {
y: -0.005
z: -0.17
}
rotation {
x: 178.31532
}
capsule {
radius: 0.06
length: 0.46014702
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.751751
}
bodies {
name: "left_shin1"
colliders {
position {
z: -0.15
}
rotation {
x: -180.0
}
capsule {
radius: 0.049
length: 0.398
end: -1
}
}
colliders {
position {
z: -0.35
}
capsule {
radius: 0.075
length: 0.15
end: 1
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 4.5228419
}
bodies {
name: "right_upper_arm1"
colliders {
position {
x: 0.08
y: -0.08
z: -0.08
}
rotation {
x: 135.0
y: 35.26439
z: -75.0
}
capsule {
radius: 0.04
length: 0.35712814
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.6610805
}
bodies {
name: "right_lower_arm1"
colliders {
position {
x: 0.09
y: 0.09
z: 0.09
}
rotation {
x: -45.0
y: 35.26439
z: 15.0
}
capsule {
radius: 0.031
length: 0.33912814
}
}
colliders {
position {
x: 0.18
y: 0.18
z: 0.18
}
capsule {
radius: 0.04
length: 0.08
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.2295402
}
bodies {
name: "left_upper_arm1"
colliders {
position {
x: 0.08
y: 0.08
z: -0.08
}
rotation {
x: -135.0
y: 35.26439
z: 75.0
}
capsule {
radius: 0.04
length: 0.35712814
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.6610805
}
bodies {
name: "left_lower_arm1"
colliders {
position {
x: 0.09
y: -0.09
z: 0.09
}
rotation {
x: 45.0
y: 35.26439
z: -15.0
}
capsule {
radius: 0.031
length: 0.33912814
}
}
colliders {
position {
x: 0.18
y: -0.18
z: 0.18
}
capsule {
radius: 0.04
length: 0.08
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.2295402
}
joints {
name: "abdomen_z1"
stiffness: 15000.0
parent: "torso1"
child: "lwaist1"
parent_offset {
x: -0.01
z: -0.195
}
child_offset {
z: 0.065
}
rotation {
y: -90.0
}
angular_damping: 20.0
angle_limit {
min: -45.0
max: 45.0
}
angle_limit {
min: -75.0
max: 30.0
}
}
joints {
name: "abdomen_x1"
stiffness: 15000.0
parent: "lwaist1"
child: "pelvis1"
parent_offset {
z: -0.065
}
child_offset {
z: 0.1
}
rotation {
x: 90.0
}
angular_damping: 20.0
angle_limit {
min: -35.0
max: 35.0
}
}
joints {
name: "right_hip_x1"
stiffness: 8000.0
parent: "pelvis1"
child: "right_thigh1"
parent_offset {
y: -0.1
z: -0.04
}
child_offset {
}
rotation {
}
angular_damping: 20.0
limit_strength: 2000.0
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -30.0
max: 70.0
}
}
joints {
name: "right_knee1"
stiffness: 15000.0
parent: "right_thigh1"
child: "right_shin1"
parent_offset {
y: 0.01
z: -0.383
}
child_offset {
z: 0.02
}
rotation {
z: -90.0
}
angular_damping: 20.0
angle_limit {
min: -160.0
max: -2.0
}
}
joints {
name: "left_hip_x1"
stiffness: 8000.0
parent: "pelvis1"
child: "left_thigh1"
parent_offset {
y: 0.1
z: -0.04
}
child_offset {
}
angular_damping: 20.0
limit_strength: 2000.0
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -10.0
max: 10.0
}
angle_limit {
min: -30.0
max: 70.0
}
}
joints {
name: "left_knee1"
stiffness: 15000.0
parent: "left_thigh1"
child: "left_shin1"
parent_offset {
y: -0.01
z: -0.383
}
child_offset {
z: 0.02
}
rotation {
z: -90.0
}
angular_damping: 20.0
angle_limit {
min: -160.0
max: -2.0
}
}
joints {
name: "right_shoulder1"
stiffness: 15000.0
parent: "torso1"
child: "right_upper_arm1"
parent_offset {
y: -0.17
z: 0.06
}
child_offset {
}
rotation {
x: 135.0
y: 35.26439
}
angular_damping: 20.0
angle_limit {
min: -85.0
max: 60.0
}
angle_limit {
min: -85.0
max: 60.0
}
}
joints {
name: "right_elbow1"
stiffness: 15000.0
parent: "right_upper_arm1"
child: "right_lower_arm1"
parent_offset {
x: 0.18
y: -0.18
z: -0.18
}
child_offset {
}
rotation {
x: 135.0
z: 90.0
}
angular_damping: 20.0
angle_limit {
min: -90.0
max: 50.0
}
}
joints {
name: "left_shoulder1"
stiffness: 15000.0
parent: "torso1"
child: "left_upper_arm1"
parent_offset {
y: 0.17
z: 0.06
}
child_offset {
}
rotation {
x: 45.0
y: -35.26439
}
angular_damping: 20.0
angle_limit {
min: -60.0
max: 85.0
}
angle_limit {
min: -60.0
max: 85.0
}
}
joints {
name: "left_elbow1"
stiffness: 15000.0
parent: "left_upper_arm1"
child: "left_lower_arm1"
parent_offset {
x: 0.18
y: 0.18
z: -0.18
}
child_offset {
}
rotation {
x: 45.0
z: -90.0
}
angular_damping: 20.0
angle_limit {
min: -90.0
max: 50.0
}
}
actuators {
name: "abdomen_z1"
joint: "abdomen_z1"
strength: 300.0
torque {
}
}
actuators {
name: "abdomen_x1"
joint: "abdomen_x1"
strength: 300.0
torque {
}
}
actuators {
name: "right_hip_x1"
joint: "right_hip_x1"
strength: 300.0
torque {
}
}
actuators {
name: "right_knee1"
joint: "right_knee1"
strength: 300.0
torque {
}
}
actuators {
name: "left_hip_x1"
joint: "left_hip_x1"
strength: 300.0
torque {
}
}
actuators {
name: "left_knee1"
joint: "left_knee1"
strength: 300.0
torque {
}
}
actuators {
name: "right_shoulder1"
joint: "right_shoulder1"
strength: 75.0
torque {
}
}
actuators {
name: "right_elbow1"
joint: "right_elbow1"
strength: 75.0
torque {
}
}
actuators {
name: "left_shoulder1"
joint: "left_shoulder1"
strength: 75.0
torque {
}
}
actuators {
name: "left_elbow1"
joint: "left_elbow1"
strength: 75.0
torque {
}
}
collide_include {
first: "floor"
second: "left_shin1"
}
collide_include {
first: "floor"
second: "right_shin1"
}
"""
_ENV_CONFIG = """
bodies {
name: "floor"
colliders {
plane {
}
}
inertia {
x: 1.0
y: 1.0
z: 1.0
}
mass: 1.0
frozen { all: true }
}
friction: 1.0
gravity {
z: -9.81
}
angular_damping: -0.05
baumgarte_erp: 0.1
dt: 0.015
substeps: 8
"""
_SYSTEM_CONFIG = _HUMANOID0_CONFIG + _HUMANOID1_CONFIG + _ENV_CONFIG | 2.390625 | 2 |
app_iza.py | Sharmaxz/instabot.py | 2 | 12788821 | <reponame>Sharmaxz/instabot.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import dotenv
from src.location.extract_location import get_locations_id
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
dotenv.load_dotenv(os.path.join(BASE_DIR, '.env'))
from src import InstaBot
location_ids = get_locations_id('IZABELLA')
if __name__ == '__main__':
bot = InstaBot(
login=os.environ.get('IZALOGIN', ''),
password=<PASSWORD>.get('<PASSWORD>', ''),
like_per_day=1400,
comments_per_day=0,
tag_list=[id['location_id'] for id in location_ids][::-2],
user_blacklist={},
max_like_for_one_tag=50,
follow_per_day=0,
follow_time=1 * 60,
unfollow_per_day=0,
unfollow_break_min=15,
unfollow_break_max=30,
log_mod=0,
proxy='',
# List of list of words, each of which will be used to generate comment
# For example: "This shot feels wow!"
comment_list=[["this", "the", "your"],
["photo", "picture", "pic", "shot", "snapshot"],
["is", "looks", "feels", "is really"],
["great", "super", "good", "very good", "good", "wow",
"WOW", "cool", "GREAT", "magnificent", "magical",
"very cool", "stylish", "beautiful", "so beautiful",
"so stylish", "so professional", "lovely",
"so lovely", "very lovely", "glorious","so glorious",
"very glorious", "adorable", "excellent", "amazing"],
[".", "..", "...", "!", "!!", "!!!"]],
# Use unwanted_username_list to block usernames containing a string
## Will do partial matches; i.e. 'mozart' will block 'legend_mozart'
### 'free_followers' will be blocked because it contains 'free'
unwanted_username_list=[
'second', 'stuff', 'art', 'project', 'love', 'life', 'food', 'blog',
'free', 'keren', 'photo', 'graphy', 'indo', 'travel', 'art', 'shop',
'store', 'sex', 'toko', 'jual', 'online', 'murah', 'jam', 'kaos',
'case', 'baju', 'fashion', 'corp', 'tas', 'butik', 'grosir', 'karpet',
'sosis', 'salon', 'skin', 'care', 'cloth', 'tech', 'rental', 'kamera',
'beauty', 'express', 'kredit', 'collection', 'impor', 'preloved',
'follow', 'follower', 'gain', '.id', '_id', 'bags'
],
unfollow_whitelist=['example_user_1', 'example_user_2'])
while True:
mode = 0
if mode == 0:
bot.auto_mod()
| 2.34375 | 2 |
exercises/exercise4/image-downloader/downloader.py | jackxxu/2020F-AC295 | 0 | 12788822 | <gh_stars>0
import sys
import os
import requests
import time
import shutil
from selenium import webdriver
def download_google_images(search_term_list = ["tomato", "bell pepper"], num_images_requested = 10):
print("download_google_images...")
start_time = time.time()
# Setup download folder
downloads = "dataset"
if os.path.exists(downloads):
shutil.rmtree(downloads)
os.mkdir(downloads)
# Each scrolls provides 400 image approximately
number_of_scrolls = int(num_images_requested / 400) + 1
# Firefox Options
options = webdriver.FirefoxOptions()
options.headless = True
browser = webdriver.Firefox(options=options)
for search_term in search_term_list:
print("Searching for :", search_term)
browser.get('https://www.google.com/search?q=' + search_term)
# Go to Google Images
images_links = browser.find_elements_by_xpath('//a[contains(@class, "q qs")]')
for link in images_links:
#print(link)
print(link.get_attribute("href"))
# Wait
time.sleep(5)
# Go to images
images_link = images_links[0]
images_link.click()
for _ in range(number_of_scrolls):
for __ in range(10):
# multiple scrolls needed to show all 400 images
browser.execute_script("window.scrollBy(0, 1000000)")
time.sleep(2)
# to load next 400 images
time.sleep(5)
# try to find show more results bottom
try:
# if found click to load more image
browser.find_element_by_xpath("//input[@value='Show more results']").click()
except Exception as e:
# if not exit
print("End of page")
break
# Image link store
imgs_urls = set()
# Find the thumbnail images
thumbnails = browser.find_elements_by_xpath('//a[@class="wXeWr islib nfEiy mM5pbd"]')
# loop over the thumbs to retrive the links
for thumbnail in thumbnails:
# check if reached the request number of links
if len(imgs_urls) >= num_images_requested:
break
try:
thumbnail.click()
time.sleep(2)
except Exception as error:
print("Error clicking one thumbnail : ", error)
# Find the image url
url_elements = browser.find_elements_by_xpath('//img[@class="n3VNCb"]')
# check for the correct url
for url_element in url_elements:
try:
url = url_element.get_attribute('src')
except e:
print("Error getting url")
if url.startswith('http') and not url.startswith('https://encrypted-tbn0.gstatic.com'):
#print("Found image url:", url)
imgs_urls.add(url)
print('Number of image urls found:', len(imgs_urls))
# Wait 5 seconds
time.sleep(5)
# Save the images
img_dir = os.path.join(downloads, search_term.lower().replace(" ","_"))
if not os.path.exists(img_dir):
os.makedirs(img_dir)
count = 0
for url in imgs_urls:
try:
with requests.get(url, stream=True) as r:
r.raise_for_status()
file_path = os.path.join(img_dir, '{0}.jpg'.format(count))
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
count += 1
# sleep
if count % 10 == 0:
time.sleep(3)
except Exception as e:
print("Error in url:", url)
print(e)
continue
# Quit the browser
browser.quit()
execution_time = (time.time() - start_time) / 60.0
print("Download execution time (mins)", execution_time)
| 3.203125 | 3 |
atalaia/explore.py | vallantin/atalaia | 0 | 12788823 | # TODOS
#--------------------------------------
# imports
import matplotlib.pyplot as plt
from atalaia.atalaia import Atalaia
import numpy as np
import networkx as nx
class Explore:
"""Explore is used for text exploratory tasks.
"""
def __init__(self, language:str):
"""
Parameters
----------
language : str
The language of the corpus
"""
self.language = language
self.atalaia = self.__start_atalaia()
def __start_atalaia(self):
""" Starts an instance of Atalaia"""
return Atalaia(self.language)
def describe(self, corpus:list):
""" Gets the lengths of the sentences present in the corpus, based on the number of tokens.
Returns the lengths, the shortest value and the longest value and the average sentence size."""
# tokenize sentences
tokenized_sentences = [self.atalaia.tokenize(sentence) for sentence in corpus]
# get the lengths
lengths = [len(sentence) for sentence in tokenized_sentences]
# get the percentiles
a = np.array(lengths)
percentiles = (np.percentile(a,0), np.percentile(a,25), np.percentile(a,50), np.percentile(a,75), np.percentile(a,100))
# get shortest, longest and average sentence size using the percentiles values
shortest = percentiles[0] # 0%
longest = percentiles[4] # 100%
average = percentiles[2] # 50%
return lengths, shortest, longest, average, percentiles
def plot_sentences_size_histogram(self, corpus:list, bins = 30, xlabel = 'Number of tokens', ylabel = 'Frequency'):
""" Plots the tokens distribution """
# get sentences sizes
sentences_sizes, shortest, longest, average, percentiles = self.describe(corpus)
# plot
plt.hist(sentences_sizes, bins = bins)
plt.xlabel(xlabel)
plt.xlabel(ylabel)
plt.show()
# return sizes, shortest and longest values and average
return sentences_sizes, shortest, longest, average, percentiles
def plot_sentences_size_boxplot(self, corpus:list):
# get sentences sizes
sentences_sizes, shortest, longest, average, percentiles = self.describe(corpus)
# plot boxplot
plt.boxplot(sentences_sizes)
plt.show()
# return sizes, shortest and longest values and average
return sentences_sizes, shortest, longest, average, percentiles
def plot_representative_tokens(self, corpus:list, percentage=0.3):
#create corpus
corpus = self.atalaia.create_corpus(corpus)
# let's lowercase everything first
texts_lower = self.atalaia.lower_remove_white(corpus)
# plot
token_data = self.atalaia.representative_tokens(percentage,
texts_lower,
reverse=False)
token_data = token_data.items()
token_data = list(token_data)[:10]
tokens, counts = zip(*token_data)
# plot
plt.figure(figsize=(20,10))
plt.bar(tokens,
counts,
color='b')
plt.xlabel('Tokens');
plt.ylabel('Counts');
| 3.625 | 4 |
tests/test_io.py | gerritholl/sattools | 0 | 12788824 | """Test I/O related functionality."""
import tempfile
import os
import pathlib
def test_cache_dir():
"""Test getting cache directory."""
from sattools.io import get_cache_dir
with tempfile.TemporaryDirectory() as tmpdir:
d = get_cache_dir(tmpdir, "tofu")
assert str(d.parent) == tmpdir
assert d.name == "tofu"
try:
_environ = os.environ.copy()
os.environ.pop("XDG_CACHE_HOME", None)
d = get_cache_dir(subdir="raspberry")
assert d.parent.name == ".cache"
assert d.name == "raspberry"
finally:
try:
d.rmdir()
except OSError:
pass
os.environ.clear()
os.environ.update(_environ)
try:
_environ = os.environ.copy()
pt = pathlib.Path(os.environ.get("TMPDIR", "/tmp/"))
os.environ["XDG_CACHE_HOME"] = str(pt)
d = get_cache_dir(subdir="banana")
assert d.parent == pt
assert d.name == "banana"
finally:
try:
d.rmdir()
except OSError:
pass
os.environ.clear()
os.environ.update(_environ)
def test_plotdir(tmp_path, monkeypatch):
"""Test getting plotting directory."""
from sattools.io import plotdir
monkeypatch.delenv("PLOT_BASEDIR", raising=False)
pd = plotdir(create=False)
assert pd.parent.parent.parent == pathlib.Path(
"/media/nas/x21308/plots_and_maps")
pd = plotdir(create=False, basedir=tmp_path)
assert pd.parent.parent.parent == tmp_path
monkeypatch.setenv("PLOT_BASEDIR", str(tmp_path))
pd = plotdir(create=False)
assert pd.parent.parent.parent == tmp_path
assert not pd.exists()
pd = plotdir(create=True)
assert pd.exists()
def test_datadir(tmp_path, monkeypatch):
"""Test getting NAS data directory."""
from sattools.io import nas_data_out
monkeypatch.delenv("NAS_DATA", raising=False)
pd = nas_data_out(create=False)
assert pd == pathlib.Path("/media/nas/x21308/data_out")
monkeypatch.setenv("NAS_DATA", str(tmp_path))
pd = nas_data_out(create=False)
assert pd == tmp_path / "data_out"
assert not pd.exists()
pd = nas_data_out(create=True)
assert pd.exists()
pd = nas_data_out(tmp_path / "fionnay", subdir="datum", create=True)
assert pd == tmp_path / "fionnay" / "datum"
assert pd.exists()
| 2.40625 | 2 |
escalate/rest_api/tests/model_tests/organization/person.py | darkreactions/ESCALATE | 11 | 12788825 | <reponame>darkreactions/ESCALATE
from ..model_tests_utils import (
status_codes,
DELETE,
PUT,
POST,
GET,
ERROR,
random_model_dict,
check_status_code,
compare_data
)
from core.models import (
Organization,
Person,
Actor
)
person_test_data = {
'person_test_0':{
'org0': {
"description": "Test",
"full_name": "Test",
"short_name": "Test",
"address1": "Test",
"address2": "Test",
"city": "Test",
"state_province": "TT",
"zip": "21345",
"country": "Test",
"website_url": "www.test.com",
"phone": "1231231",
"parent": None
},
'org1': {
"description": "Test12",
"full_name": "Test12",
"short_name": "Test12",
"address1": "Test",
"address2": "Test",
"city": "Test",
"state_province": "TT",
"zip": "21345",
"country": "Test",
"website_url": "www.test.com",
"phone": "1231231",
"parent": None
},
'person': {
"first_name": "Test",
"last_name": "Test",
"middle_name": "Test",
"address1": "Test",
"address2": "Test",
"city": "Test",
"state_province": "TT",
"zip": "123124",
"country": "Test",
"phone": "123123123",
"email": "<EMAIL>",
"title": "Test",
"suffix": "",
},
'person_update0': {
"first_name":"updated_first_name",
"last_name":"updated_last_name",
"middle_name": "updated_middle_name",
"address1": "updated_address1",
"address2": "updated_address2",
"city": "updated_city",
"state_province": "bb",
"zip": "111111",
"country": "updated_country",
"phone": "1111111111",
"email": "<EMAIL>",
"title": "updated_title",
"suffix": "updated_suffix",
"added_organization": ['org0__url','org1__url']
},
'person_update1': {
"first_name":"updated_first_name",
"last_name":"updated_last_name",
"middle_name": "updated_middle_name",
"address1": "updated_address1",
"address2": "updated_address2",
"city": "updated_city",
"state_province": "bb",
"zip": "111111",
"country": "updated_country",
"phone": "1111111111",
"email": "<EMAIL>",
"title": "updated_title",
"suffix": "updated_suffix",
"added_organization": ['org0__url']
}
}
}
person_tests = [
##----TEST 0----##
#creates an organization
#creates a person
#creates an actor with the person and organization as foreign keys
#this will populate the added_organization field in person with
#the corresponding organization in the actor table
#deletes the actor
#empties the added_organization field
#gets the person
#updates person
#gets person
#deletes person
#gets person (should return error)
[
{
'name': 'org0',
'method': POST,
'endpoint': 'organization-list',
'body': random_model_dict(Organization),
'args': [],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': POST
}
}
},
{
'name': 'person0',
'method': POST,
'endpoint': 'person-list',
'body': (request_body := random_model_dict(Person)),
'args': [],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': POST,
'request_body': request_body
}
}
},
{
'name': 'actor0',
'method': POST,
'endpoint': 'actor-list',
'body': random_model_dict(Actor, organization='org0__url', person='person0__url'),
'args': [],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': POST
}
}
},
{
'name': 'actor0_delete_0',
'method': DELETE,
'endpoint': 'actor-detail',
'body': {},
'args': [
'actor0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': DELETE
}
}
},
{
'name': 'person0_get_0',
'method': GET,
'endpoint': 'person-detail',
'body': {},
'args': [
'person0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': GET
}
}
},
{
'name': 'person0_update_0',
'method': PUT,
'endpoint': 'person-detail',
'body': (request_body := random_model_dict(Person)),
'args': [
'person0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': PUT,
'request_body': request_body
}
}
},
{
'name': 'person0_get_1',
'method': GET,
'endpoint': 'person-detail',
'body': {},
'args': [
'person0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': GET
}
}
},
{
'name': 'person0_delete_0',
'method': DELETE,
'endpoint': 'person-detail',
'body': {},
'args': [
'person0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': DELETE
}
}
},
{
'name': 'person0_get_3',
'method': GET,
'endpoint': 'person-detail',
'body': {},
'args': [
'person0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': ERROR
}
}
},
],
]
| 2.09375 | 2 |
grr/server/grr_response_server/check_lib/__init__.py | khanhgithead/grr | 4,238 | 12788826 | <reponame>khanhgithead/grr
#!/usr/bin/env python
"""This is the check capabilities used to post-process host data."""
# pylint: disable=g-import-not-at-top,unused-import
from grr_response_server.check_lib import checks
from grr_response_server.check_lib import hints
from grr_response_server.check_lib import triggers
| 0.964844 | 1 |
Sandbox/gatherSV_zinfo.py | echaussidon/LSS | 8 | 12788827 | '''
gather redshift info across all observations for a given target type; for now from a single tile
'''
#test
#standard python
import sys
import os
import shutil
import unittest
from datetime import datetime
import json
import numpy as np
import fitsio
import glob
import argparse
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--type", help="tracer type to be selected")
parser.add_argument("--tile", help="observed tile to use") #eventually remove this and just gather everything
args = parser.parse_args()
type = args.type
tile = args.tile
if type == 'LRG':
tarbit = 0 #targeting bit
if type == 'QSO':
tarbit = 2
if type == 'ELG':
tarbit = 1
print('gathering type,tile')
print(type,tile)
tp = 'SV1_DESI_TARGET'
print('targeting bit, target program type; CHECK THEY ARE CORRECT!')
print(tarbit,tp)
#location of inputs
coaddir = '/global/cfs/cdirs/desi/spectro/redux/blanc/tiles/'+tile
subsets = [x[0][len(coaddir):].strip('/') for x in os.walk(coaddir)] #something must work better than this, but for now...
#outputs
svdir = '/project/projectdirs/desi/users/ajross/catalogs/SV/'
version = 'test/'
dirout = svdir+'redshift_comps/'+version
outf = dirout +'/'+tile+'_'+type+'zinfo.fits'
if not os.path.exists(svdir+'redshift_comps'):
os.mkdir(svdir+'redshift_comps')
print('made '+svdir+'redshift_comps random directory')
if not os.path.exists(dirout):
os.mkdir(dirout)
print('made '+dirout)
ss = 0 #use to switch from creating to concatenating
for night in subsets:
if len(night) > 0:
print('going through subset '+night)
specs = []
#find out which spectrograph have data
for si in range(0,10):
try:
fl = coaddir+'/'+night+'/zbest-'+str(si)+'-'+str(tile)+'-'+night+'.fits'
#print(fl)
fitsio.read(fl)
specs.append(si)
except:
print('no spectrograph '+str(si)+ ' on subset '+night)
tspec = Table.read(coaddir+'/'+night+'/zbest-'+str(specs[0])+'-'+str(tile)+'-'+night+'.fits',hdu='ZBEST')
tf = Table.read(coaddir+'/'+night+'/coadd-'+str(specs[0])+'-'+str(tile)+'-'+night+'.fits',hdu='FIBERMAP')
for i in range(1,len(specs)):
tn = Table.read(coaddir+'/'+night+'/zbest-'+str(specs[i])+'-'+str(tile)+'-'+night+'.fits',hdu='ZBEST')
tnf = Table.read(coaddir+'/'+night+'/coadd-'+str(specs[i])+'-'+str(tile)+'-'+night+'.fits',hdu='FIBERMAP')
tspec = vstack([tspec,tn])
tf = vstack([tf,tnf])
tspec = join(tspec,tf,keys=['TARGETID'])
wtype = ((tspec[tp] & 2**tarbit) > 0)
print(str(len(tspec))+' total entries '+str(len(tspec[wtype]))+' that are '+type)
tspec = tspec[wtype]
tspec['subset'] = night
if ss == 0:
tspect = tspec
ss = 1
else:
tspect = vstack([tspect,tspec])
print('there are now '+str(len(tspect)) +' entries with '+str(len(np.unique(tspect['TARGETID'])))+' unique target IDs')
tspect.sort('TARGETID')
tspect.write(outf,format='fits', overwrite=True)
| 2.453125 | 2 |
module03_research_data_in_python/greengraph/map.py | marquesafonso/rse-course | 0 | 12788828 | <gh_stars>0
import numpy as np
from io import BytesIO
import imageio as img
import requests
class Map:
def __init__(
self, lat, long, satellite=True, zoom=10, size=(400, 400), sensor=False
):
base = "https://static-maps.yandex.ru/1.x/?"
params = dict(
z=zoom,
size=str(size[0]) + "," + str(size[1]),
ll=str(long) + "," + str(lat),
l="sat" if satellite else "map",
lang="en_US",
)
self.image = requests.get(
base, params=params
).content # Fetch our PNG image data
content = BytesIO(self.image)
self.pixels = img.imread(content) # Parse our PNG image as a numpy array
def green(self, threshold):
# Use NumPy to build an element-by-element logical array
greener_than_red = self.pixels[:, :, 1] > threshold * self.pixels[:, :, 0]
greener_than_blue = self.pixels[:, :, 1] > threshold * self.pixels[:, :, 2]
return np.logical_and(greener_than_red, greener_than_blue)
def count_green(self, threshold=1.1):
return np.sum(self.green(threshold))
def show_green(self, threshold=1.1):
green = self.green(threshold)
out = green[:, :, np.newaxis] * np.array([0, 1, 0])[np.newaxis, np.newaxis, :]
buffer = BytesIO()
result = img.imwrite(buffer, out, format="png")
return buffer.getvalue()
| 3.171875 | 3 |
utils.py | yiluzhu/gomoku | 1 | 12788829 | from constants import P2P_PIXELS, PIECE_RADIUS_PIXELS, BOARD_SIZE
def index_to_pixel(x):
"""Given a point index, return the corresponding pixel. The Index can be either row or column"""
return P2P_PIXELS + P2P_PIXELS * x
def pixels_to_indices(pixel_x, pixel_y):
"""Given a point pixels, go through all points on the board to see which point is the intended selection.
:return: the selected point indices. If not found, return None"""
# TODO: optimize this method
square_distance_threshold = PIECE_RADIUS_PIXELS ** 2 # max distance allowed between the clicked position and the selected point
for i in range(BOARD_SIZE):
for j in range(BOARD_SIZE):
square_distance = (pixel_x - index_to_pixel(i)) ** 2 + (pixel_y - index_to_pixel(j)) ** 2
if square_distance <= square_distance_threshold:
return i, j
| 3.609375 | 4 |
OpenCVVideoFunctions/video4.py | zopagaduanjr/Microprocessor_Project | 0 | 12788830 | import cv2
import numpy as np
cap = cv2.VideoCapture('grace4.mp4')
def make_360p():
cap.set(3, 480)
cap.set(4, 360)
def rescale_frame(frame):
percent = 25;
width = int(frame.shape[1] * percent/100)
height = int(frame.shape[0] * percent/100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
subtractor = cv2.createBackgroundSubtractorMOG2()
fps = cap.get(cv2.CAP_PROP_FPS)
make_360p()
while True:
_, frame = cap.read()
frame38 = rescale_frame(frame)
frame38 = cv2.transpose(frame38,frame38)
frame38 = cv2.flip(frame38, 1)
mask = subtractor.apply(frame38)
(contours,_) = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
if cv2.contourArea(contour) < 190:
continue
(x,y,w,h) = cv2.boundingRect(contour)
cv2.rectangle(frame38, (x,y),(x+w,y+h),(240,32,160),3)
cv2.imshow("Zal", frame38)
key = cv2.waitKey(30)
if key == 27:
break
cap.release()
cv2.destroyAllWindows() | 2.75 | 3 |
bin/iamonds/hexiamonds-4x9.py | tiwo/puzzler | 0 | 12788831 | #!/usr/bin/env python
# $Id$
"""74 solutions"""
import puzzler
from puzzler.puzzles.hexiamonds import Hexiamonds4x9
puzzler.run(Hexiamonds4x9)
| 0.996094 | 1 |
run_novalid.py | zerebom/santander | 0 | 12788832 | <filename>run_novalid.py<gh_stars>0
from scripts.data_augumation import augmation
from sklearn.model_selection import KFold, StratifiedKFold
from models.lgbm import train_and_predict_novalid
from logs.logger import log_best
from utils import load_datasets, load_target
from sklearn.model_selection import train_test_split
import pandas as pd
import datetime
import logging
from sklearn.model_selection import KFold
import seaborn as sns
import matplotlib.pyplot as plt
import argparse
import json
import numpy as np
np.random.seed(seed=42)
# lgbm関係はmodel.pyからimportしている
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/default.json')
options = parser.parse_args()
config = json.load(open(options.config))
now = datetime.datetime.now()
logging.basicConfig(
filename='./logs/log_{0:%Y%m%d%H%M%S}.log'.format(now), level=logging.DEBUG
)
logging.debug('./logs/log_{0:%Y%m%d%H%M%S}.log'.format(now))
# このfeatsは特徴量ファイル名(カラム名ではないので、複数列でも可(base等))
feats = config['features']
lgbm_params = config['lgbm_params']
logging.debug(feats)
target_name = config['target_name']
X_train_all, X_test, features = load_datasets(feats)
y_train_all = load_target(target_name)
logging.debug(X_train_all.shape)
# lgbmの実行
y_pred, model = train_and_predict_novalid(
X_train_all, y_train_all, X_test, lgbm_params
)
# 結果の保存
ID_name = config['ID_name']
sub = pd.DataFrame(pd.read_csv('./data/input/test.csv')[ID_name])
sub[target_name] = y_pred
sub.to_csv(
'./data/output/final_sub_{0:%Y%m%d%H%M%S}_.csv'.format(now),
index=False
)
| 2.40625 | 2 |
source/constructor.py | janzmazek/Wave-propagation | 1 | 12788833 | """
This module constructs network of streets.
"""
import numpy as np
import json
# Adobe flat UI colour scheme
DARK_BLUE = "#2C3E50"
MEDIUM_BLUE = "#2980B9"
LIGHT_BLUE = "#3498DB"
RED = "#E74C3C"
WHITE = "#ECF0F1"
# Colour parameters
STROKE_COLOUR = DARK_BLUE
STREET_COLOUR = DARK_BLUE
JUNCTION_COLOUR = MEDIUM_BLUE
JUNCTION_TEXT = DARK_BLUE
RESULTS_COLOUR = RED
RESULTS_TEXT = DARK_BLUE
# Dimensions
OFFSET = 50
STREET_WIDTH = 8
STROKE_WIDTH = 2
JUNCTION_WIDTH = 20
MAX_RADIUS = 25
INITIAL_DECIBELS = 120
# Max absorption
MAX_ABSORPTION = 0.1
# Don't plot absorption coefficients (option)
ABSORPTION = False
class Constructor(object):
"""
This class of methods initialises a network object of specified dimensions,
modifies the network using modifying methods, outputs the adjacency matrix
of the network and outputs the visualisation in the svg format.
"""
def __init__(self):
self.__horizontals = None
self.__verticals = None
self.__nodes = None
self.__adjacency = None
self.__modified_adjacency = None
self.__positions = None
self.__stage = 0
def set_grid(self, horizontals, verticals, length):
"""
This setter method sets stage 1 (setting and moving) of the construction.
"""
try:
horizontals = int(horizontals)
verticals = int(verticals)
except ValueError:
raise ValueError("Horizontals and verticals must be integers.")
try:
length = float(length)
except ValueError:
raise ValueError("Length must be a floating point number.")
for quantity in [horizontals, verticals, length]:
if quantity < 0:
raise ValueError(
"Horizontals, verticals and length must be positive numbers.")
self.__horizontals = horizontals
self.__verticals = verticals
self.__nodes = horizontals*verticals
self.__adjacency = self.__create_adjacency()
self.__modified_adjacency = None
self.__positions = self.__create_positions(length)
self.__stage = 1
def unset_grid(self):
"""
This method is used to set the network to the stage 0 (instantiation) of
the construction.
"""
self.__horizontals = None
self.__verticals = None
self.__nodes = None
self.__adjacency = None
self.__modified_adjacency = None
self.__positions = None
self.__stage = 0
def __create_adjacency(self):
"""
This private method returns initial adjacency matrix.
"""
adjacency = np.zeros((self.__nodes, self.__nodes), dtype=np.int)
# Normal adjacency matrix for grid network
for i in range(self.__nodes):
for j in range(self.__nodes):
if (j == i+1 and j%self.__verticals != 0) or \
(j == i-1 and i%self.__verticals != 0) or \
j == i+self.__verticals or \
j == i-self.__verticals:
adjacency[i][j] = 1
return adjacency
def __create_positions(self, length):
"""
This private method returns initial positions matrix.
"""
positions = np.zeros((self.__nodes, 2))
for i in range(self.__nodes):
positions[i][0] = i%self.__verticals*length
positions[i][1] = i//self.__verticals*length
return positions
def move_horizontal_line(self, i, length):
"""
This method moves the horizontal line i.
"""
assert self.__stage == 1
if i not in range(self.__horizontals):
raise ValueError("No such horizontal line.")
for node in range(self.__nodes):
if node//self.__verticals == i:
self.__positions[node][1] += length
def move_vertical_line(self, j, length):
"""
This method moves the vertical line j.
"""
assert self.__stage == 1
if j not in range(self.__verticals):
raise ValueError("No such vertical line.")
for node in range(self.__nodes):
if node%self.__verticals == j:
self.__positions[node][0] += length
def delete_connection(self, i, j):
"""
This method deletes the street (i, j).
"""
if self.__stage == 1:
self.__stage = 2 # set stage to 1 so lines cannot be moved
assert self.__stage == 2
if i not in range(self.__nodes) or j not in range(self.__nodes):
raise ValueError("Nodes out of range.")
if self.__adjacency[i][j] == 0:
raise ValueError("Junctions are not neighbours.")
self.__adjacency[i][j] = 0
self.__adjacency[j][i] = 0
to_delete = []
if sum(self.__adjacency[i]) == 2:
connections = []
for k in range(self.__nodes):
if self.__adjacency[i][k] == 1:
connections.append(k)
if (self.__positions[i][0] == self.__positions[connections[0]][0] and \
self.__positions[i][0] == self.__positions[connections[1]][0]) or \
(self.__positions[i][1] == self.__positions[connections[0]][1] and \
self.__positions[i][1] == self.__positions[connections[1]][1]):
self.__adjacency[connections[0]][connections[1]] = 1
self.__adjacency[connections[1]][connections[0]] = 1
to_delete.append(i)
elif sum(self.__adjacency[i]) == 0:
to_delete.append(i)
if sum(self.__adjacency[j]) == 2:
connections = []
for k in range(self.__nodes):
if self.__adjacency[j][k] == 1:
connections.append(k)
if (self.__positions[j][0] == self.__positions[connections[0]][0] and \
self.__positions[j][0] == self.__positions[connections[1]][0]) or \
(self.__positions[j][1] == self.__positions[connections[0]][1] and \
self.__positions[j][1] == self.__positions[connections[1]][1]):
self.__adjacency[connections[0]][connections[1]] = 1
self.__adjacency[connections[1]][connections[0]] = 1
to_delete.append(j)
elif sum(self.__adjacency[j]) == 0:
to_delete.append(j)
if len(to_delete) != 0:
self.__adjacency = np.delete(self.__adjacency, to_delete, axis=0)
self.__adjacency = np.delete(self.__adjacency, to_delete, axis=1)
self.__positions = np.delete(self.__positions, to_delete, axis=0)
self.__nodes = int(self.__nodes - len(to_delete))
def modify_adjacency(self, width, alpha, beta):
"""
This method creates new adjacency matrix with dictionaries of keys
(alpha, beta, street width, street length, orientation) instead of 1s.
"""
if self.__stage == 1 or self.__stage == 2:
self.__stage = 3
assert self.__stage == 3
try:
width = float(width)
alpha = float(alpha)
beta = float(beta)
except ValueError:
raise ValueError("Width and absorption must be floating point numbers.")
if width <= 0:
raise ValueError("Width must be a positive number.")
if alpha < 0 or alpha > 1 or beta < 0 or beta > 1:
raise ValueError("Absorption must be a number between 0 and 1.")
self.__modified_adjacency = self.__adjacency.tolist() # To python structure
positions = self.__positions
for i in range(self.__nodes):
for j in range(i):
if self.__adjacency[i][j] == 1:
if positions[i][1] == positions[j][1]:
length = abs(positions[i][0] - positions[j][0]).tolist()
if positions[i][0] < positions[j][0]:
orientation = 0
elif positions[i][0] > positions[j][0]:
orientation = 2
else:
raise ValueError("Points are at the same position.")
elif positions[i][0] == positions[j][0]:
length = abs(positions[i][1] - positions[j][1]).tolist()
if positions[i][1] < positions[j][1]:
orientation = 1
elif positions[i][1] > positions[j][1]:
orientation = 3
else:
raise ValueError("Points are at the same position.")
else:
raise ValueError("Points are not colinear.")
self.__modified_adjacency[i][j] = {
"alpha": alpha,
"beta": beta,
"width": width,
"length": length,
"orientation": orientation}
self.__modified_adjacency[j][i] = {
"alpha": alpha,
"beta": beta,
"width": width,
"length": length,
"orientation": (orientation+2)%4}
def unmodify_adjacency(self):
"""
This method is used to set the stage to stage 2 (deleting) of the
construction.
"""
self.__stage = 2
self.__modified_adjacency = None
def change_width(self, i, j, width):
"""
This method changes the street width of street (i, j).
"""
assert self.__stage == 3
try:
width = float(width)
except ValueError:
raise ValueError("Width must be a floating point number.")
if width <= 0:
raise ValueError("Width must be a positive number.")
if i not in range(self.__nodes) or j not in range(self.__nodes):
raise ValueError("Nodes out of range")
if self.__modified_adjacency[i][j] == 0:
raise ValueError("Junctions are not neighbours.")
self.__modified_adjacency[i][j]["width"] = width
self.__modified_adjacency[j][i]["width"] = width
def change_alpha(self, i, j, alpha):
"""
This method changes the wall absorption of street (i, j).
"""
assert self.__stage == 3
try:
alpha = float(alpha)
except ValueError:
raise ValueError("Absorption must be a floating point number.")
if alpha < 0 or alpha > 1:
raise ValueError("Absorption must be a number between 0 and 1")
if i not in range(self.__nodes) or j not in range(self.__nodes):
raise ValueError("Nodes out of range.")
if self.__modified_adjacency[i][j] == 0:
raise ValueError("Junctions are not neighbours.")
self.__modified_adjacency[i][j]["alpha"] = alpha
self.__modified_adjacency[j][i]["alpha"] = alpha
def change_beta(self, i, j, beta):
"""
This method changes the air absorption of street (i, j).
"""
assert self.__stage == 3
try:
beta = float(beta)
except ValueError:
raise ValueError("Absorption must be a floating point number.")
if beta < 0 or beta > 1:
raise ValueError("Absorption must be a number between 0 and 1")
if i not in range(self.__nodes) or j not in range(self.__nodes):
raise ValueError("Nodes out of range.")
if self.__modified_adjacency[i][j] == 0:
raise ValueError("Junctions are not neighbours.")
self.__modified_adjacency[i][j]["beta"] = beta
self.__modified_adjacency[j][i]["beta"] = beta
def get_horizontals(self):
"""
This getter method returns the number of horizontal streets.
"""
return self.__horizontals
def get_verticals(self):
"""
This getter method returns the number of vertical streets.
"""
return self.__verticals
def get_adjacency(self):
"""
This getter method returns the normal adjacency matrix.
"""
return self.__adjacency
def get_modified_adjacency(self):
"""
This getter method returns the modified adjacency matrix.
"""
return self.__modified_adjacency
def get_positions(self):
"""
This getter method returns the positions matrix.
"""
return self.__positions
def get_stage(self):
"""
This getter method returns current stage index.
"""
return self.__stage
def import_network(self, invalues):
"""
This method is used to import existing network from the invalues
dictionary.
"""
self.__horizontals = invalues["horizontals"]
self.__verticals = invalues["verticals"]
self.__nodes = invalues["nodes"]
self.__adjacency = np.array(invalues["adjacency"])
self.__modified_adjacency = invalues["modified_adjacency"]
self.__positions = np.array(invalues["positions"])
self.__stage = invalues["stage"]
def export_network(self, filename):
"""
This method is used to export currently constructed network to json
format to some file.
"""
data = {
"horizontals": self.__horizontals,
"verticals": self.__verticals,
"nodes": self.__nodes,
"adjacency": self.__adjacency.tolist(),
"modified_adjacency": self.__modified_adjacency,
"positions": self.__positions.tolist(),
"stage": self.__stage
}
with open(filename, "w") as file:
json.dump(data, file)
def draw_network(self, filename, results=False):
"""
This method outputs file "output.html" with svg drawing of network and
optinally plots the results.
"""
def get_hex_fill(coefficient, max_absorption):
red = hex(int(coefficient/max_absorption*255))
red = red[-2:] if len(red)==4 else "0{0}".format(red[-1])
blue = hex(int((1-coefficient/max_absorption)*255))
blue = blue[-2:] if len(blue)==4 else "0{0}".format(blue[-1])
fill = "#{0}00{1}".format(red, blue)
return fill
def svg_header(width, height):
return "<svg width='{0}' height='{1}'>\n".format(width, height)
def svg_line(x1, y1, x2, y2, fill=STREET_COLOUR, width=STREET_WIDTH):
return "<line x1='{0}' y1='{1}' x2='{2}' y2='{3}' \
style='stroke: {4}; stroke-width: {5}'/>\n".format(x1+OFFSET, y1+OFFSET,
x2+OFFSET, y2+OFFSET,
fill, width)
def svg_square(x, y):
return "<rect x='{0}' y='{1}' width='{2}' height='{2}' \
style='stroke: {3}; stroke-width: {4}; fill: {5}'/>\n".format(x-JUNCTION_WIDTH/2+OFFSET,
y-JUNCTION_WIDTH/2+OFFSET,
JUNCTION_WIDTH,
STROKE_COLOUR,
STROKE_WIDTH,
JUNCTION_COLOUR
)
def svg_circle(x, y, r, fill):
return "<circle cx='{0}' cy='{1}' r='{2}' style='stroke: {3}; \
stroke-width: {4}; fill: {5}'/>\n".format(x+OFFSET,
y+OFFSET,
r,
STROKE_COLOUR,
STROKE_WIDTH,
fill
)
def svg_text(x, y, colour, size, text):
move = (size-15)/4 # adjust text position
return "<text text-anchor='middle' x='{0}' y='{1}' \
style='fill: {2}; font-size: {3}'>{4}</text>\n".format(x+OFFSET,
y+OFFSET+JUNCTION_WIDTH/4 + move,
colour,
size,
text
)
positions = self.__positions
if self.__stage == 3:
adjacency = self.__modified_adjacency
modified = True
else:
adjacency = self.__adjacency
modified = False
with open(filename, "w") as file:
width = positions[self.__nodes-1][0]+2*OFFSET
height = positions[self.__nodes-1][1]+2*OFFSET
file.write(svg_header(width, height))
# Draw walls if modified (with absorption)
if modified and ABSORPTION:
for i in range(self.__nodes):
for j in range(i):
if adjacency[i][j] != 0:
[xi, yi] = positions[i]
[xj, yj] = positions[j]
alpha = adjacency[i][j]["alpha"]
alpha_fill = get_hex_fill(alpha, MAX_ABSORPTION)
width = adjacency[i][j]["width"]
translation = width/2
if xi == xj:
file.write(svg_line(xi-translation, yi,
xj-translation, yj,
alpha_fill, width
))
file.write(svg_line(xi+translation, yi,
xj+translation, yj,
alpha_fill, width
))
elif yi == yj:
file.write(svg_line(xi, yi-translation,
xj, yj-translation,
alpha_fill, width
))
file.write(svg_line(xi, yi+translation,
xj, yj+translation,
alpha_fill, width
))
# Draw streets (with absorption if modified)
for i in range(self.__nodes):
for j in range(i):
if adjacency[i][j] != 0:
[xi, yi] = positions[i]
[xj, yj] = positions[j]
if not modified or not ABSORPTION:
file.write(svg_line(xi, yi, xj, yj))
else:
beta = adjacency[i][j]["beta"]
beta_fill = get_hex_fill(beta, MAX_ABSORPTION)
width = adjacency[i][j]["width"]
file.write(svg_line(xi, yi, xj, yj,
beta_fill, width
))
# Draw junctions (rectangles with numbers)
counter = 0
for position in positions:
file.write(svg_square(position[0], position[1]))
file.write(svg_text(position[0], position[1], JUNCTION_TEXT, 15, counter))
counter += 1
# Draw results
if results:
(X, Y, Z) = results
for i in range(len(Z)):
decibels = 20*np.log10(Z[i]*10**(INITIAL_DECIBELS/20))
if decibels < 0:
continue
# Radius
radius = (decibels/INITIAL_DECIBELS)*MAX_RADIUS
file.write(svg_circle(X[i], Y[i], radius, RESULTS_COLOUR))
if decibels > 30:
file.write(svg_text(X[i], Y[i], RESULTS_TEXT, radius, int(round(decibels))))
file.write("</svg>")
| 3.6875 | 4 |
chunonline/apps/organization/foms.py | andanlove/chunonline | 1 | 12788834 | # _*_ coding: utf-8 _*_
import re
__author__ = "andan"
__data__ = "2018/9/22 12:44"
from django import forms
from operation.models import UserAsk
class UserAskForm(forms.ModelForm):
class Meta:
model = UserAsk
fields = ['name', 'moblie', 'course_name']
def clean_moblie(self):
moblie = self.cleaned_data['moblie']
REGEX_MOBILE ="^1[3567890]\d{9}$"
p = re.compile(REGEX_MOBILE)
if p.match(moblie):
return moblie
else:
raise forms.ValidationError("手机号码非法",code="mobile_invaild")
| 2.53125 | 3 |
angr/engines/soot/expressions/phi.py | vwvw/angr | 0 | 12788835 | <reponame>vwvw/angr
import logging
from .base import SimSootExpr
l = logging.getLogger('angr.engines.soot.expressions.phi')
class SimSootExpr_Phi(SimSootExpr):
def _execute(self):
try:
local = [self._translate_value(v) for v, idx in self.expr.values if idx == self.state.scratch.source.block_idx][0]
value = self.state.memory.load(local, none_if_missing=True)
self.expr = value
except IndexError:
# TODO is there a better way to do this?
local_options = [self._translate_value(v) for v, idx in self.expr.values[::-1]]
for local in local_options:
value = self.state.memory.load(local, none_if_missing=True)
if value is not None:
self.expr = value
return
| 2.03125 | 2 |
src/nextstep_plist/__init__.py | techdragon/python-nextstep-plist | 1 | 12788836 | <filename>src/nextstep_plist/__init__.py
from nextstep_plist.decoder import PListDecoder
__version__ = "0.1.0"
__all__ = [
'load', 'loads',
'PListDecoder',
]
_default_decoder = PListDecoder()
def load(fp):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a NeXTSTEP property list document) to a Python object.
"""
return loads(fp.read())
def loads(s):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a
NeXTSTEP property list document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
"""
return _default_decoder.decode(s)
| 2.25 | 2 |
src/models/main.py | bh1995/mnist_cookiecutter | 0 | 12788837 | <filename>src/models/main.py
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 4 13:01:26 2021
@author: bjorn
main script to call and run all functions from for mnist_cookiecutter
"""
# from google.colab import files
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
# sys.path
sys.path.append(
"./Machine Learning Operations 21/mnist_cookiecutter"
) # need to add path
# load data
from src.data.data1 import *
(
train_loader,
test_loader,
) = load_mnist() # this will download mnist data, or load it if already downloaded
# load model
device = torch.device("cpu")
from model import *
model = Net()
model = model.to(device)
# load trainer and return trained model
from train import train_function
# loss_function = F.nll_loss()
optimizer = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9)
model, train_losses = train_function(model, train_loader, optimizer, n_epochs=30)
# evaluate model on test set
# not sure why, but test.py does not work, file must be named something else
from test1 import test_function
test_losses = test_function(model, test_loader)
# after 50 epochs -> Test set: Avg. loss: 0.1075, Accuracy: 9667/10000 (97%)
# after 30 epichs -> Test set: Avg. loss: 0.3469, Accuracy: 9082/10000 (91%)
# save test loss values
import pickle
name = "C:/Users/bjorn/OneDrive/Dokument/University/DTU/Machine Learning Operations 21/mnist_cookiecutter/reports/test_losses_1.csv"
open_file = open(name, "wb")
pickle.dump(test_losses, open_file)
open_file.close()
# save trained model weights for later evaluation
# 'C:/Users/bjorn/OneDrive/Dokument/University/DTU/Machine Learning Operations 21/mnist_cookiecutter/models/model_1.pth'
save_model_path = "./models/model_2.pth"
torch.save(model.state_dict(), save_model_path)
| 2.65625 | 3 |
ref/files/student.py | skrymets/python-core-and-advanced | 0 | 12788838 | class Student:
def __init__(self,id,name,testscore):
self.id = id
self.name = name
self.testscore = testscore
def display(self):
print(self.id,self.name,self.testscore) | 3.21875 | 3 |
run/sb3_NeedlePick_tester.py | rokas-bendikas/SurRoL | 0 | 12788839 | <gh_stars>0
import gym
import surrol
import numpy as np
from matplotlib import pyplot as plt
import time
from stable_baselines3 import DDPG,PPO,TD3, HerReplayBuffer, VecHerReplayBuffer
from stable_baselines3.common.noise import NormalActionNoise
from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize, SubprocVecEnv
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.env_util import make_vec_env
if __name__ == '__main__':
############################################
############## PARAMETERS ##################
############################################
env_id = 'NeedlePickPointSpecific-v0'
log_dir = "./logs/TD3/"+env_id+'/'
seed = 1
env = make_vec_env(env_id,1,seed,monitor_dir=log_dir,env_kwargs={'render_mode':'human','seed':seed})
env = VecNormalize.load(log_dir+"TD3_HER_"+env_id+"_stats", env)
# do not update them at test time
env.training = False
# reward normalization is not needed at test time
env.norm_reward = False
model = TD3.load(log_dir+'TD3_HER_'+env_id+'', env=env)
for _ in range(30):
obs = env.reset()
done = [False]
while not done[0] or not bool(info[0]["is_success"]):
time.sleep(0.1)
env.render()
action, _ = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
| 1.976563 | 2 |
core/recc/core/mixin/context_permission.py | bogonets/answer | 3 | 12788840 | <filename>core/recc/core/mixin/context_permission.py
# -*- coding: utf-8 -*-
from typing import List, Optional, Any, Union
from recc.core.mixin.context_base import ContextBase
from recc.database.struct.permission import Permission
from recc.packet.permission import RawPermission
from recc.packet.cvt.permission import permission_to_raw
from recc.session.session_ex import SessionEx
class ContextPermission(ContextBase):
async def create_permission(
self,
slug: str,
name: Optional[str] = None,
description: Optional[str] = None,
features: Optional[List[str]] = None,
extra: Optional[Any] = None,
r_layout=False,
w_layout=False,
r_storage=False,
w_storage=False,
r_manager=False,
w_manager=False,
r_graph=False,
w_graph=False,
r_member=False,
w_member=False,
r_setting=False,
w_setting=False,
hidden=False,
lock=False,
) -> int:
return await self.database.insert_permission(
slug=slug,
name=name,
description=description,
features=features,
extra=extra,
r_layout=r_layout,
w_layout=w_layout,
r_storage=r_storage,
w_storage=w_storage,
r_manager=r_manager,
w_manager=w_manager,
r_graph=r_graph,
w_graph=w_graph,
r_member=r_member,
w_member=w_member,
r_setting=r_setting,
w_setting=w_setting,
hidden=hidden,
lock=lock,
)
@staticmethod
def _is_permission_equals_for_update(
permission: Permission,
slug: Optional[str] = None,
name: Optional[str] = None,
description: Optional[str] = None,
features: Optional[List[str]] = None,
extra: Optional[Any] = None,
r_layout: Optional[bool] = None,
w_layout: Optional[bool] = None,
r_storage: Optional[bool] = None,
w_storage: Optional[bool] = None,
r_manager: Optional[bool] = None,
w_manager: Optional[bool] = None,
r_graph: Optional[bool] = None,
w_graph: Optional[bool] = None,
r_member: Optional[bool] = None,
w_member: Optional[bool] = None,
r_setting: Optional[bool] = None,
w_setting: Optional[bool] = None,
hidden: Optional[bool] = None,
) -> bool:
"""When permission is 'locked',
all attributes except `lock` must not be changed.
"""
if slug is not None:
if permission.slug != slug:
return False
if name is not None:
if permission.name != name:
return False
if description is not None:
if permission.description != description:
return False
if features is not None:
if permission.features != features:
return False
if extra is not None:
if permission.extra != extra:
return False
if r_layout is not None:
if permission.r_layout != r_layout:
return False
if w_layout is not None:
if permission.w_layout != w_layout:
return False
if r_storage is not None:
if permission.r_storage != r_storage:
return False
if w_storage is not None:
if permission.w_storage != w_storage:
return False
if r_manager is not None:
if permission.r_manager != r_manager:
return False
if w_manager is not None:
if permission.w_manager != w_manager:
return False
if r_graph is not None:
if permission.r_graph != r_graph:
return False
if w_graph is not None:
if permission.w_graph != w_graph:
return False
if r_member is not None:
if permission.r_member != r_member:
return False
if w_member is not None:
if permission.w_member != w_member:
return False
if r_setting is not None:
if permission.r_setting != r_setting:
return False
if w_setting is not None:
if permission.w_setting != w_setting:
return False
if hidden is not None:
if permission.hidden != hidden:
return False
return True
async def update_permission(
self,
uid: int,
slug: Optional[str] = None,
name: Optional[str] = None,
description: Optional[str] = None,
features: Optional[List[str]] = None,
extra: Optional[Any] = None,
r_layout: Optional[bool] = None,
w_layout: Optional[bool] = None,
r_storage: Optional[bool] = None,
w_storage: Optional[bool] = None,
r_manager: Optional[bool] = None,
w_manager: Optional[bool] = None,
r_graph: Optional[bool] = None,
w_graph: Optional[bool] = None,
r_member: Optional[bool] = None,
w_member: Optional[bool] = None,
r_setting: Optional[bool] = None,
w_setting: Optional[bool] = None,
hidden: Optional[bool] = None,
lock: Optional[bool] = None,
force=False,
) -> None:
if not force:
permission = await self.database.select_permission_by_uid(uid)
if permission.lock:
permission_equals = self._is_permission_equals_for_update(
permission,
slug,
name,
description,
features,
extra,
r_layout,
w_layout,
r_storage,
w_storage,
r_manager,
w_manager,
r_graph,
w_graph,
r_member,
w_member,
r_setting,
w_setting,
hidden,
)
if not permission_equals:
raise RuntimeError(f"Locked permission: {uid}")
await self.database.update_permission_by_uid(
uid=uid,
slug=slug,
name=name,
description=description,
features=features,
extra=extra,
r_layout=r_layout,
w_layout=w_layout,
r_storage=r_storage,
w_storage=w_storage,
r_manager=r_manager,
w_manager=w_manager,
r_graph=r_graph,
w_graph=w_graph,
r_member=r_member,
w_member=w_member,
r_setting=r_setting,
w_setting=w_setting,
hidden=hidden,
lock=lock,
)
async def delete_permission(self, uid: int, force=False) -> None:
if not force:
if await self.database.select_permission_lock_by_uid(uid):
raise RuntimeError(f"Locked permission: {uid}")
default_uids = self.database.get_default_permission_uids()
if uid in default_uids:
raise RuntimeError("Default permissions cannot be removed")
await self.database.delete_permission_by_uid(uid)
async def get_permission(self, uid: int) -> Permission:
return await self.database.select_permission_by_uid(uid)
async def get_permissions(self) -> List[Permission]:
return await self.database.select_permissions()
async def get_group_permission(self, user_uid: int, group_uid: int) -> Permission:
return await self.database.select_permission_by_user_uid_and_group_uid(
user_uid, group_uid
)
async def get_project_permission(
self, user_uid: int, project_uid: int
) -> Permission:
return await self.database.select_permission_by_user_uid_and_project_uid(
user_uid, project_uid
)
async def get_best_permission(
self, user_uid: int, group_uid: int, project_uid: int
) -> Permission:
try:
return await self.get_project_permission(user_uid, project_uid)
except: # noqa
return await self.get_group_permission(user_uid, group_uid)
async def get_group_raw_permission(
self, session: SessionEx, group: Union[str, int]
) -> RawPermission:
try:
if isinstance(group, int):
group_uid = group
elif isinstance(group, str):
group_uid = await self.get_group_uid(group)
else:
group_uid = await self.get_group_uid(str(group))
permission = await self.get_group_permission(session.uid, group_uid)
return permission_to_raw(permission, session.is_admin)
except: # noqa
if session.is_admin:
return RawPermission.all_true()
else:
return RawPermission.all_false()
async def get_project_raw_permission(
self, session: SessionEx, group: Union[str, int], project: Union[str, int]
) -> RawPermission:
try:
if isinstance(group, int):
group_uid = group
elif isinstance(group, str):
group_uid = await self.get_group_uid(group)
else:
group_uid = await self.get_group_uid(str(group))
if isinstance(project, int):
project_uid = project
elif isinstance(project, str):
project_uid = await self.get_project_uid(group_uid, project)
else:
project_uid = await self.get_project_uid(group_uid, str(project))
permission = await self.get_best_permission(
session.uid, group_uid, project_uid
)
return permission_to_raw(permission, session.is_admin)
except: # noqa
if session.is_admin:
return RawPermission.all_true()
else:
return RawPermission.all_false()
| 1.960938 | 2 |
tl_env/envs/double_goal.py | mhtb32/tl-env | 1 | 12788841 | <reponame>mhtb32/tl-env<filename>tl_env/envs/double_goal.py
from typing import Dict, Tuple
from highway_env.envs.common.abstract import Observation
from highway_env.envs.common.action import Action
from highway_env.road.objects import Landmark
from tl_env.envs.single_goal import SingleGoalIDMEnv
class DoubleGoalEnv(SingleGoalIDMEnv):
"""A continuous control environment with two goals and some adversary vehicles.
The vehicle must reach the goals while avoiding other vehicles.
"""
def default_config(self) -> Dict:
config = super().default_config()
config.update(
{
"duration": 50,
"goal2_position": [260, 4]
}
)
return config
def step(self, action: Action) -> Tuple[Observation, float, bool, Dict]:
obs, reward, terminal, info = super().step(action)
goal_achievement = self._goal_achievement()
# calculate mid-done flag
mid_done = self.vehicle.crashed or goal_achievement['g1'] or self.steps >= 25
# update info and terminal with new automaton state
info.update(dict(mid_done=mid_done))
terminal = self._is_terminal()
return obs, reward, terminal, info
def _create_road(self) -> None:
super()._create_road()
self.goal2 = Landmark(self.road, self.config["goal2_position"], heading=0)
self.road.objects.append(self.goal2)
def _goal_achievement(self) -> Dict:
# noinspection PyProtectedMember
return {
'g1': self.vehicle._is_colliding(self.goal),
'g2': self.vehicle._is_colliding(self.goal2)
}
def _is_terminal(self) -> bool:
"""Determines end of episode
Determine end of episode when:
- The vehicle crashes or,
- Reaches the second goal or,
- Episode duration passes an specific amount of time
:return: a boolean indicating end of episode
"""
return self.vehicle.crashed or self._goal_achievement()['g2'] or self.steps >= self.config['duration']
def _reward(self, action: Action) -> float:
# noinspection PyProtectedMember
return super()._reward(action) + self.vehicle._is_colliding(self.goal2) * 1.0
| 2.8125 | 3 |
ecdc_status/upcoming_events/admin.py | ess-dmsc/ecdc-status | 0 | 12788842 | <reponame>ess-dmsc/ecdc-status<filename>ecdc_status/upcoming_events/admin.py
from django.contrib import admin
from django.utils import timezone
from .models import EventIcon, Event
from django.utils.translation import gettext_lazy as _
class FilterOld(admin.SimpleListFilter):
title = _('future events')
# Parameter for the filter that will be used in the URL query.
parameter_name = 'show'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (
('false', _('Show old')),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
# Compare the requested value (either '80s' or '90s')
# to decide how to filter the queryset.
if self.value() == 'false':
return queryset.filter()
CurrentlyOngoing = queryset.exclude(EndDate__isnull = True).filter(EndDate__gte = timezone.now()).filter(StartDate__lt = timezone.now())
return queryset.filter(StartDate__gte=timezone.now()).union(CurrentlyOngoing)
@admin.register(Event)
class EventAdmin(admin.ModelAdmin):
list_display = ('Title', 'StartDate', 'Icon', 'isOngoing')
list_filter = (FilterOld, )
admin.site.register(EventIcon)
| 2.328125 | 2 |
run_participants.py | tbenne10/TopographicSurveyAnalysis | 0 | 12788843 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#This script scores results from each student
#Drawn images are downloaded from a .csv file, converted from string base64 encoding,
#and scored against machine learning models saved to disk
import csv
import os
#import file
import cv2
import re
import base64
import numpy as np
from keras.models import model_from_json
from sklearn.metrics import cohen_kappa_score
from tkinter import *
import tkinter as tk
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import askdirectory
from tkinter import simpledialog
import sys
import os.path
#Specify max size due to large size of Base64 images
#**~MAC/LINUX~**#
#csv.field_size_limit(sys.maxsize)
#**~WINDOWS 64 BIT~**#
csv.field_size_limit(2**30)
#Specify which questions are drawn images. Their associated value is the
#size of the image used in data preprocessing for the machine learning model.
drawn_images ={
"Q1": 64,
"Q2": 128,
"Q3": 64,
"Q4": 64,
"Q7": 128,
"Q8": 128,
"Q9": 128,
"Q17": 128,
"Q18": 64
}
#init variables
filename = ""
filedir = ""
modeldir = ""
prefix = ""
##Retrieve the CSV file to read image data
def getCSVfile():
global filename
global filedir
filename = askopenfilename()
filedir = os.path.abspath(os.path.join(filename, os.pardir))
filedir += "/"
print(filedir)
#Select the directory containing H5 and JSON model files.
def getModelDir():
global modeldir
modeldir = askdirectory()
modeldir += "/"
#Select a prefix to read only specific records starting with the prefix.
def getPrefix():
global prefix
prefix = simpledialog.askstring("input string", "Enter an ID prefix:")
#Run program and create two response CSV files.
def Start():
#for indexing
drawn_images_list = list(drawn_images)
#Load models:
models = []
print("Loading models... This may take a moment")
for key in drawn_images:
json_file_path = modeldir + key + ".json"
weight_file_path = modeldir + key + ".h5"
json_file = open(json_file_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(weight_file_path)
loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
models.append(loaded_model)
print(f"Loaded model {key}...")
print("Done loading models")
#Function to process each individual image
#Returns a prediction score of 1 or 0.
def process_image(Qnum, uri, partid):
print(f"Processing image: {Qnum}")
#Ensure value exists
if(uri == None): return 0
#Grab value to resize image
size = drawn_images[Qnum]
#create image file as temporary
path = modeldir + "temp.png"
img = open(path, "wb")
img.write(base64.b64decode(uri))
img = cv2.imread(path, 0)
#Test resizing image. If the URI is corrupted, return 'C'.
try:
img = cv2.resize(img, (size, size))
except:
return 'c'
img_reshape = np.array(img).reshape(-1,size,size,1)
#Run image against model
print("Acc: ")
print (models[drawn_images_list.index(Qnum)].predict(img_reshape))
pred = models[drawn_images_list.index(Qnum)].predict_classes(img_reshape)[0]
#This flips the class as the prediction score is on the opposite entry.
pred = ("1", "0")[pred == 0]
pred_array = models[drawn_images_list.index(Qnum)].predict(img_reshape)
#Remove the image to make room for another
os.remove(modeldir + "temp.png")
eps = .15 #Min. acceptable criterion
if(1-np.amax(pred_array) > eps):
return 'f'
return pred
#Open two files, one for response scores and the other for written
#question responses. Each file name is appended with a prefix if
#a prefix is give.
data = open(filename, 'r')
responses = open(filedir + 'responses_pref' + prefix + '.csv', 'w')
Wresponses = open(filedir + 'Wresponses_pref' + prefix + '.csv', 'w')
read_data = csv.reader(data, delimiter=',')
write_responses = csv.writer(responses, delimiter=',',
quotechar='"', quoting=csv.QUOTE_ALL)
write_Wresponses = csv.writer(Wresponses, delimiter=',',
quotechar='"', quoting=csv.QUOTE_ALL)
line_count = 0
for row in read_data:
if row[0].startswith(prefix, 0, len(prefix)):
print(row[0])
if line_count == 0:
line_count += 1
write_responses.writerow(['Number','Participant', 'Q1_drawn', 'Q2_drawn',
'Q3_drawn', 'Q4_drawn', 'Q7_drawn', 'Q8_drawn',
'Q9_drawn', 'Q17_drawn', 'Q18_drawn', 'Q5_response',
'Q5_correct_response', 'Q5_accuracy','Q6_response',
'Q6_correct_response', 'Q6_accuracy','Q10_1_response',
'Q10_1_correct_response','Q10_1_accuracy', 'Q10_2_response',
'Q10_2_correct_response', 'Q10_2_accuracy', 'Q11_response',
'Q11_correct_response', 'Q11_accuracy', 'Q12_response',
'Q12_correct_response','Q12_accuracy', 'Q13_response',
'Q13_correct_response', 'Q13_accuracy', 'Q14_1_response',
'Q14_1_correct_response', 'Q14_1_accuracy', 'Q14_2_response',
'Q14_2_correct_response','Q14_2_accuracy', 'Q15_AB_response',
'Q15_AB_correct_response','Q15_AB_accuracy', 'Q15_AD_response',
'Q15_AD_correct_response','Q15_AD_accuracy', 'Q15_BC_response',
'Q15_BC_correct_response','Q15_BC_accuracy', 'Q15_CD_response',
'Q15_CD_correct_response','Q15_CD_accuracy','Q15_BD_response',
'Q15_BD_correct_response','Q15_BD_accuracy', 'Total', 'Date Submitted'])
write_Wresponses.writerow(['Number','Participant','Q2_written', 'Q7_written', 'Q8_written',
'Q9_written', 'Q14_2_written', 'Q17_written', 'Q18_written', 'Date Submitted'])
else:
#Resp used for responses, respW for written reponses
resp = []
respW = []
count = 0
##logic here
#append number and name
resp.append(line_count)
resp.append(row[0])
respW.append(line_count)
respW.append(row[0])
#append drawn images
for x in drawn_images:
y = row[drawn_images_list.index(x) + 2].split(',')
if(len(y) > 1):
resp.append(process_image(x, y[1], row[0]))
else: resp.append("N/A")
#print(row[drawn_images_list.index(x) + 2])
##Q5
resp.append(row[23])
resp.append("A")
resp.append(("0", "1")[row[23] == "A"])
#Q6
resp.append(row[24])
resp.append("A")
resp.append(("0", "1")[row[24] == "A"])
#Q10_1
resp.append(row[15])
resp.append("Josh")
resp.append(("0", "1")["josh" in row[15].lower()])
#Q10_2
resp.append(row[18])
resp.append("josh")
resp.append(("0", "1")["josh" in row[18].lower()])
#Q11
resp.append(row[25])
resp.append("B")
resp.append(("0", "1")[row[25] == "B"])
#Q12
resp.append(row[26])
resp.append("B")
resp.append(("0", "1")[row[26] == "B"])
#Q13
resp.append(row[17])
resp.append("40")
resp.append(("0", "1")["40" in row[19]])
#Q14_1
resp.append(row[18])
resp.append("Josh")
resp.append(("0", "1")["josh" in row[18].lower()])
#Q15
##Refer to re library for digit extraction
resp.append(row[20])
resp.append("7040-7080")
val = re.findall("\d+", row[20])
if(len(val) > 0):
resp.append(("0", "1")[int(val[0]) >= 7040 and int(val[0]) <= 7080])
else: resp.append("0")
#Q16:
resp.append(row[27])
resp.append("yes")
resp.append(("0", "1")[row[27] == "yes"])
resp.append(row[28])
resp.append("yes")
resp.append(("0", "1")[row[28] == "yes"])
resp.append(row[29])
resp.append("yes")
resp.append(("0", "1")[row[29] == "yes"])
resp.append(row[30])
resp.append("no")
resp.append(("0", "1")[row[30] == "no"])
resp.append(row[31])
resp.append("yes")
resp.append(("0", "1")[row[31] == "yes"])
##WRITE ALL THE WRITTEN RESPONSES HERE
respW.append(row[11])
respW.append(row[12])
respW.append(row[13])
respW.append(row[14])
respW.append(row[16])
respW.append(row[19])
respW.append(row[21])
respW.append(row[22])
#Total
sum = 0
for x in resp:
if x == "1":
sum += 1
resp.append(sum)
#Dates
resp.append(row[32])
respW.append(row[32])
#Write rows
write_responses.writerow(resp)
write_Wresponses.writerow(respW)
line_count += 1
print(f"Finished, {line_count} rows read: ")
data.close()
responses.close()
##Run GUI
root = tk.Tk()
root.wm_title("Run Participant Data")
selectCsv = tk.Button(root, text='Select CSV file', width=25, command=getCSVfile)
selectCsv.pack()
selectDirectory = tk.Button(root, text='Select model directory', width=25, command=getModelDir)
selectDirectory.pack()
selectPrefix = tk.Button(root, text='Select an ID prefix', width=25, command=getPrefix)
selectPrefix.pack()
startButton = tk.Button(root, text='Start', width=25, command=Start)
startButton.pack()
root.mainloop()
| 3.5 | 4 |
journalClub/polls/views.py | chabanr/journalClub | 0 | 12788844 | <filename>journalClub/polls/views.py
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from django.http import Http404
from django.urls import reverse
from .models import Question
# Create your views here.
#
# def index(request):
# return HttpResponse("Hello World! You're at the polls index")
# Old detail function (stub)
# def detail(request, question_id):
# return HttpResponse("You are looking at question {:d}".format(question_id))
def detail(request, question_id):
""" These two pieces are equivalent """
# try:
# question = Question.objects.get(pk=question_id)
# except Question.DoesNotExist:
# raise Http404("Question does not exist!")
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question': question})
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/results.html', {'question': question})
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except KeyError as e:
# redisplay the question voting form
return render(request, 'polls/detail.html', {
'question': question,
'error_message': 'You need to make a choice',
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data This prevents data from being posted twice if a
# user hit the Back button
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
# older listing of the questions
# def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# output = ', '.join([q.question_text for q in latest_question_list])
# return HttpResponse(output)
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
template = loader.get_template('polls/index.html')
# This context dictionary maps template variable names to python objects
context = {
'latest_question_list':latest_question_list,
}
# the following two return statements are equivalent
return render(request, 'polls/index.html', context)
#return HttpResponse(template.render(context, request))
| 2.609375 | 3 |
mandarin/parser.py | joegasewicz/amber | 2 | 12788845 | from typing import Tuple, Union, Dict, Optional, Any
import re
from mandarin.core import ELEMENTS
class NodeHasNoValueError(Exception):
pass
class Parser:
def __init__(self):
pass
@staticmethod
def remove_white_space(value: str) -> str:
cleaned_val = value.lstrip()
# TODO handle escaped strings
if cleaned_val[0] == "'":
cleaned_val = cleaned_val.split("'")[1]
else:
cleaned_val = cleaned_val.split('"')[1]
return cleaned_val
@staticmethod
def aggregate_into_tuple(*, elem_name: str, content: Any, attr_str: str):
if not content and attr_str:
return ("<%s %s>" % (elem_name, attr_str)), "</%s>" % elem_name
elif content and attr_str:
return ("<%s %s>" % (elem_name, attr_str)), Parser.remove_white_space(content), "</%s>" % elem_name
elif content and not attr_str:
return "<%s>" % elem_name, content, "</%s>" % elem_name
else:
return "<%s>" % elem_name, "</%s>" % elem_name
def parse(self, node: "Node") -> Union[Tuple[str, str, str], Tuple[str, str], str]:
el = node.elem_name
attr_str = ""
if node.elem_name:
if node.attr:
for k, v in node.attr.items():
if not attr_str:
# If this is the first attr then don't add white space
attr_str += f"{k}='{v}'"
else:
attr_str += f" {k}='{v}'"
return Parser.aggregate_into_tuple(elem_name=el, content=node.value, attr_str=attr_str)
elif node.value:
return Parser.remove_white_space(node.value)
else:
raise NodeHasNoValueError("Node did not have any values to parse.")
def add_attrs_to_elem(self):
pass
def parse_elem(self, element: str) -> Tuple[str, Optional[Dict[str, str]]]:
elem = re.split("\(|\)", element)
if len(elem) == 1:
return elem, None
attr_dict = {}
attr_str = elem[1]
attrs = attr_str.split(" ")
for attr in attrs:
attr_name, attr_val = attr.split("=")
attr_dict[attr_name] = attr_val.strip('""')
return elem[0], attr_dict
| 3.015625 | 3 |
tests/test_foo.py | AlexDev-py/vkquick | 1 | 12788846 | def test_run():
import vkquick
assert True
| 1.085938 | 1 |
api/letter_templates/tests/tests_view.py | django-doctor/lite-api | 3 | 12788847 | <gh_stars>1-10
from api.letter_templates.helpers import format_user_text
from rest_framework import status
from rest_framework.reverse import reverse
from api.cases.enums import AdviceType, CaseTypeReferenceEnum
from api.cases.enums import CaseTypeSubTypeEnum, CaseTypeEnum
from api.staticdata.decisions.models import Decision
from parameterized import parameterized
from test_helpers.clients import DataTestClient
class LetterTemplatesListTests(DataTestClient):
def setUp(self):
super().setUp()
self.letter_template = self.create_letter_template(
name="SIEL", case_types=[CaseTypeEnum.GOODS.id, CaseTypeEnum.EUA.id]
)
def test_get_letter_templates_success(self):
url = reverse("letter_templates:letter_templates")
response = self.client.get(url, **self.gov_headers)
response_data = response.json()["results"][0]
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response_data["id"], str(self.letter_template.id))
self.assertEqual(response_data["name"], self.letter_template.name)
self.assertEqual(response_data["layout"]["name"], self.letter_template.layout.name)
case_types = [item["reference"]["key"] for item in response_data["case_types"]]
self.assertIn(CaseTypeReferenceEnum.GQY, case_types)
self.assertIn(CaseTypeReferenceEnum.EUA, case_types)
def test_filter_letter_templates_success(self):
url = reverse("letter_templates:letter_templates") + "?name=" + self.letter_template.name
response = self.client.get(url, **self.gov_headers)
response_data = response.json()["results"]
self.assertTrue(self.letter_template.name in [template["name"] for template in response_data])
self.assertTrue(str(self.letter_template.id) in [template["id"] for template in response_data])
def test_get_letter_templates_for_case_success(self):
url = reverse("letter_templates:letter_templates")
self.letter_template.case_types.set([CaseTypeEnum.SIEL.id])
case = self.create_standard_application_case(self.organisation)
response = self.client.get(url + "?case=" + str(case.id), **self.gov_headers)
response_data = response.json()["results"][0]
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response_data["id"], str(self.letter_template.id))
self.assertEqual(response_data["name"], self.letter_template.name)
self.assertEqual(response_data["layout"]["name"], self.letter_template.layout.name)
self.assertEqual(CaseTypeReferenceEnum.SIEL, response_data["case_types"][0]["reference"]["key"])
def test_get_letter_templates_for_decision_success(self):
decision = AdviceType.APPROVE
url = reverse("letter_templates:letter_templates")
self.letter_template.decisions.set([Decision.objects.get(name=decision)])
self.letter_template.case_types.set([CaseTypeEnum.SIEL.id])
case = self.create_standard_application_case(self.organisation)
response = self.client.get(url + "?case=" + str(case.id) + "&decision=" + decision, **self.gov_headers)
response_data = response.json()["results"][0]
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response_data["id"], str(self.letter_template.id))
self.assertEqual(response_data["name"], self.letter_template.name)
self.assertEqual(response_data["layout"]["name"], self.letter_template.layout.name)
def test_get_letter_templates_for_case_doesnt_show_templates_with_decisions_success(self):
self.letter_template.case_types.set([CaseTypeEnum.SIEL.id])
self.letter_template_with_decisions = self.create_letter_template(
name="SIEL_2", case_types=[CaseTypeEnum.SIEL.id], decisions=[Decision.objects.get(name="approve")]
)
case = self.create_standard_application_case(self.organisation)
response = self.client.get(
reverse("letter_templates:letter_templates") + "?case=" + str(case.id), **self.gov_headers
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()["results"]), 1)
self.assertEqual((response.json()["results"][0]["id"]), str(self.letter_template.id))
def test_get_letter_template_success(self):
url = reverse("letter_templates:letter_template", kwargs={"pk": str(self.letter_template.id)})
response = self.client.get(url, **self.gov_headers)
response_data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
template = response_data["template"]
self.assertEqual(template["id"], str(self.letter_template.id))
self.assertEqual(template["name"], self.letter_template.name)
self.assertEqual(template["layout"]["id"], str(self.letter_template.layout.id))
self.assertEqual(template["letter_paragraphs"], [str(self.letter_template.letter_paragraphs.first().id)])
self.assertIn(CaseTypeSubTypeEnum.GOODS, str(template["case_types"]))
self.assertIn(CaseTypeSubTypeEnum.EUA, str(template["case_types"]))
self.assertIsNotNone(template.get("created_at"))
self.assertIsNotNone(template.get("updated_at"))
def test_get_letter_template_with_preview_success(self):
url = reverse("letter_templates:letter_template", kwargs={"pk": str(self.letter_template.id)})
url += "?generate_preview=True"
response = self.client.get(url, **self.gov_headers)
response_data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue("preview" in response_data)
preview = response_data["preview"]
for tag in ["<style>", "</style>"]:
self.assertTrue(tag in preview)
self.assertTrue(self.letter_template.letter_paragraphs.first().text in preview)
def test_get_letter_template_with_text_success(self):
url = reverse("letter_templates:letter_template", kwargs={"pk": str(self.letter_template.id)})
url += "?text=True"
response = self.client.get(url, **self.gov_headers)
response_data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue("text" in response_data)
self.assertTrue(self.letter_template.letter_paragraphs.first().text in response_data["text"])
@parameterized.expand(
[
("**bold text**", "<p><strong>bold text</strong></p>"),
("_italic text_", "<p><em>italic text</em></p>"),
("<u>underlined text</u>", "<p><u>underlined text</u></p>"),
("<u>**bold** _italic_</u>", "<p><u><strong>bold</strong> <em>italic</em></u></p>"),
(
"# Heading1\r\n**{{organisation.name}}** {{address}} <u>NOTE:</u>",
"<h1>Heading1</h1>\n<p><strong>{{organisation.name}}</strong> {{address}} <u>NOTE:</u></p>",
),
("<script>malicious code</script>", "<p><script>malicious code</script></p>"),
]
)
def test_format_user_text_in_preview(self, raw_text, formatted):
self.assertEqual(format_user_text(raw_text), formatted)
| 2.3125 | 2 |
8723 Patyki.py | jangThang/Baekjoon-problem | 0 | 12788848 | <reponame>jangThang/Baekjoon-problem
# 입력
a, b, c = map(int, input().split())
# 판별 후 출력
if a == b == c: # 정삼각형 유무
print(2)
# 직각 삼각형 유무
elif a**2 == b**2 + c**2 or b**2 == a**2 + c**2 or c**2 == a**2 + b**2:
print(1)
# 모두 불가능
else:
print(0)
| 3.234375 | 3 |
core/train.py | kianakiaei/TSGL-EEGNet | 0 | 12788849 | <reponame>kianakiaei/TSGL-EEGNet
# coding:utf-8
import os
import gc
import sys
import math
import copy
import time
import logging
import itertools
import numpy as np
from numpy.core.numeric import cross
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.python.keras.api._v2.keras import backend as K
from core.models import EEGNet, TSGLEEGNet, ShallowConvNet, DeepConvNet, MB3DCNN, EEGAttentionNet
from core.splits import StratifiedKFold
from core.callbacks import MyModelCheckpoint, EarlyStopping
from core.utils import standardization, computeKappa
_console = sys.stdout
def create_EEGAttentionNet(nClasses,
Samples,
Chans=22,
Colors=1,
F=9,
D=4,
kernLength=64,
optimizer=tf.keras.optimizers.Adam,
lrate=1e-3,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
summary=True):
model = EEGAttentionNet(nClasses,
Chans=Chans,
Colors=Colors,
Samples=Samples,
kernLength=kernLength,
F1=F,
D=D)
model.compile(optimizer=optimizer(lrate), loss=loss, metrics=metrics)
if summary:
model.summary()
# export graph of the model
# tf.keras.utils.plot_model(model, 'EEGNet.png', show_shapes=True)
return model
def create_MB3DCNN(nClasses,
H,
W,
Samples,
optimizer=tf.keras.optimizers.Adam,
lrate=1e-3,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
summary=True):
model = MB3DCNN(nClasses, H=H, W=W, Samples=Samples)
model.compile(optimizer=optimizer(lrate), loss=loss, metrics=metrics)
if summary:
model.summary()
# export graph of the model
# tf.keras.utils.plot_model(model, 'MB3DCNN.png', show_shapes=True)
return model
def create_EEGNet(nClasses,
Samples,
Chans=22,
F=9,
D=4,
Ns=4,
kernLength=64,
FSLength=16,
dropoutRate=0.5,
optimizer=tf.keras.optimizers.Adam,
lrate=1e-3,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
summary=True):
model = EEGNet(nClasses,
Chans=Chans,
Samples=Samples,
kernLength=kernLength,
FSLength=FSLength,
dropoutRate=dropoutRate,
F1=F,
D=D,
F2=nClasses * 2 * Ns)
model.compile(optimizer=optimizer(lrate), loss=loss, metrics=metrics)
if summary:
model.summary()
# export graph of the model
# tf.keras.utils.plot_model(model, 'EEGNet.png', show_shapes=True)
return model
def create_TSGLEEGNet(nClasses,
Samples,
Chans=22,
Colors=1,
F=9,
D=4,
Ns=4,
kernLength=64,
FSLength=16,
dropoutRate=0.5,
l1=1e-4,
l21=1e-4,
tl1=1e-5,
optimizer=tf.keras.optimizers.Adam,
lrate=1e-3,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
summary=True):
model = TSGLEEGNet(nClasses,
Chans=Chans,
Samples=Samples,
Colors=Colors,
kernLength=kernLength,
FSLength=FSLength,
dropoutRate=dropoutRate,
F1=F,
D=D,
F2=nClasses * 2 * Ns,
l1=l1,
l21=l21,
tl1=tl1)
model.compile(optimizer=optimizer(lrate), loss=loss, metrics=metrics)
if summary:
model.summary()
# export graph of the model
# tf.keras.utils.plot_model(model, 'rawEEGConvNet.png', show_shapes=True)
return model
class crossValidate(object):
'''
Class for K-fold Cross Validation.
This framework can collect `model`, `loss`, `acc` and `history` from each fold, and
save them into files.
Data spliting methods from sklearn.model_selection are supported. you can pass the
classes as `splitMethod`.
This class has implemented a magic method `__call__()` wrapping `call()`, for which
it can be used like a function.
Parameters
----------
```txt
built_fn : function, Create Training model which need to cross-validate.
Please using string `create_` at the begining of function name,
like `create_modelname`.
dataGent : class, Generate data for @built_fn, shapes (n_trails, ...).
It should discriminate data and label.
More details see core.generators.
splitMethod : class, Support split methods from module sklearn.model_selection.
kFold : int, Number of K-fold.
shuffle : bool, Optional Whether to shuffle each class's samples before
splitting into batches, default = False.
random_state : int, RandomState instance or None, optional, default = None.
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by np.random. Used when shuffle == True.
subs : list, list of subjects' number, like `range(1, 10)`.
cropping : bool, Switch of cropped training. Default = False.
winLength : int, cropping window length, default = 2*srate.
cpt : float, cropping sencond, optional, only available when `winLength`
is not specified.
step : int, cropping step, default = 4.
standardizing : bool, Switch of standardizing data. Default = True.
batch_size : int, Batch size.
epochs : int, Training epochs.
patience : int, Early stopping patience.
verbose : int, One of 0, 1 and 2.
*a, *args : tuple, Parameters used by @dataGent and @built_fn respectively
**kw, **kwargs : dict, Parameters used by @dataGent and @built_fn respectively,
**kw should include parameters called `beg`, `end` and `srate`.
```
Returns
-------
```txt
avg_acc : list, Average accuracy for each subject with K-fold Cross Validation,
and total average accuracy is at the last of the list
avg_kappa : list, Average kappa for each subject with K-fold Cross Validation,
and total average kappa is at the last of the list
```
Example
-------
```python
from core.splits import StratifiedKFold
def create_model(Samples, *args, summary=True, **kwargs):
...
return keras_model
class dataGenerator:
def __init__(self, *a, beg=0, end=4, srate=250, **kw):
...
def __call__(self, filepath, label=False):
if label:
...
return label
else:
...
return data
...
...
avg_acc = crossValidate(
create_model,
dataGenerator,
beg=0,
end=4,
srate=250,
splitMethod=StratifiedKFold,
kFold=10,
subs=range(1, 10),
*a,
**kw)(*args, **kwargs)
```
Note
----
More details to see the codes.
'''
def __init__(self,
built_fn,
dataGent,
splitMethod=StratifiedKFold,
traindata_filepath=None,
testdata_filepath=None,
datadir=None,
beg=0.,
end=4.,
srate=250,
kFold=10,
shuffle=False,
random_state=None,
subs: list = range(1, 10),
cropping=False,
winLength=None,
cpt=None,
step=25,
standardizing=True,
batch_size=10,
epochs=300,
patience=100,
verbose=2,
preserve_initfile=False,
reinit=True,
*args,
**kwargs):
self.built_fn = built_fn
self.dataGent = dataGent(beg=beg,
end=end,
srate=srate,
*args,
**kwargs)
self.beg = beg
self.end = end
self.srate = srate
self.splitMethod = splitMethod
self.traindata_filepath = traindata_filepath
self.testdata_filepath = testdata_filepath
self.datadir = datadir
self.kFold = kFold
self.shuffle = shuffle
self.random_state = random_state
self.subs = subs
self.cropping = cropping
self.winLength = winLength
self.cpt = cpt
self.step = step
self.standardizing = standardizing
self.batch_size = batch_size
self.epochs = epochs
self.patience = patience
self.verbose = verbose
self.preserve_initfile = preserve_initfile
self.reinit = reinit
self.args = args
self.kwargs = kwargs
self.Samples = math.ceil(self.end * self.srate - self.beg * self.srate)
self._check_params()
if self.datadir:
for root, dirs, files in os.walk(self.datadir):
if files:
self.dn = files[0][0]
break
else:
self.dn = ''
self.modelstr = built_fn.__name__[7:]
if self.splitMethod.__name__ == 'AllTrain':
self.validation_name = 'Average Validation'
else:
self.validation_name = 'Cross Validation'
self._new_fold = True
self._last_batch = False
self._readed = False
self.X1 = None
self.Y1 = None
self.X2 = None
self.Y2 = None
if not os.path.exists('model'):
os.makedirs('model')
if not os.path.exists('result'):
os.makedirs('result')
# cropped training
if self.winLength:
if not isinstance(self.winLength, int):
raise TypeError('`winLength` must be passed as int.')
if self.winLength > (self.end - self.beg) * self.srate:
raise ValueError(
'`winLength` must less than or equal (`end` - '
'`beg`) * `srate`.')
if self.cpt and not self.winLength:
if (isinstance(self.cpt, float) or isinstance(self.cpt, int)):
if self.cpt <= self.end - self.beg:
self.winLength = self.cpt * self.srate
else:
raise ValueError(
'`cpt` must less than or equal `end` - `beg`.')
else:
raise TypeError('`cpt` must be passed as int or float.')
if not self.winLength:
self.winLength = 2 * self.srate
if self.step:
if not isinstance(self.step, int):
raise TypeError('`step` must be passed as int.')
else:
self.step = 4
def call(self, *args, **kwargs):
initfile = os.path.join('.', 'CV_initweight.h5')
tm = time.localtime()
dirname = (
'CV_{0:d}_{1:0>2d}_{2:0>2d}_{3:0>2d}_{4:0>2d}_{5:0>2d}_{6:s}'.
format(tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min,
tm.tm_sec, self.modelstr))
if not os.path.exists(os.path.join('model', dirname)):
os.mkdir(os.path.join('model', dirname))
if not os.path.exists(os.path.join('result', dirname)):
os.mkdir(os.path.join('result', dirname))
if self.cropping:
gent = self._gent_cropped_data
self.Samples -= self.winLength
else:
gent = self._gent_data
if not self.reinit:
model = self.built_fn(*args, **kwargs, Samples=self.Samples)
model.save_weights(initfile)
earlystopping = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=self.patience,
verbose=0,
mode='auto')
filename = ''
for key in kwargs.keys():
if key in ['l1', 'l21', 'tl1']:
filename += '{0:s}({1:.8f})_'.format(key, kwargs[key])
else:
filename += '{0:s}({1:0>2d})_'.format(key, kwargs[key])
avg_acci = []
avg_kappai = []
win_subs_list = []
for i in self.subs:
accik = []
kappaik = []
k = 0 # count kFolds
# cropped training
t = 0 # record model's saving time
c = 0 # count windows
win = 0 # selected windows
win_list = [] # selected windows list
for data in gent(subject=i):
if self._new_fold: # new fold for cropped training
self._new_fold = False
t = 0
c = 0
if self.reinit:
model = self.built_fn(*args,
**kwargs,
Samples=self.Samples)
k += 1
filepath = os.path.join(
'result', dirname,
filename + '{:s}.txt'.format(self.modelstr))
with open(filepath, 'w+') as f:
sys.stdout = f
print(('{0:s} {1:d}-fold ' + self.validation_name +
' Accuracy').format(self.modelstr, self.kFold))
print('Subject {:0>2d} fold {:0>2d} in processing'.
format(i, k))
sys.stdout = _console
f.seek(0, 0)
for line in f.readlines():
print(line)
f.close()
filepath = os.path.join(
'model', dirname, filename + self.dn +
'0{0:d}T_{1:s}({2:d}).h5'.format(i, self.modelstr, k))
checkpointer = MyModelCheckpoint(filepath=filepath,
verbose=1,
save_best_only=True,
statistic_best=True,
p=0.05)
history = {}
else:
c += 1
# TODO: fit(), evaluate() with tf.data.Dataset, then `self._new_fold`
# and `self._last_batch` will be DEPRECATED.
history = dict(
list(history.items()) + list(
model.fit(x=data['x_train'],
y=data['y_train'],
batch_size=self.batch_size,
epochs=self.epochs,
callbacks=[checkpointer, earlystopping],
verbose=self.verbose,
validation_data=[
data['x_val'], data['y_val']
]).history.items()))
if self.cropping:
if not t == os.path.getmtime(checkpointer._filepath):
t = os.path.getmtime(checkpointer._filepath)
win = c
# tf.keras.models.Model.fit()
# tf.keras.models.Model.evaluate()
# tf.data.Dataset.from_generator()
# load the best model for cropped training or evaluating its accuracy
model.load_weights(filepath)
if self._last_batch: # the last batch for cropped training
self._last_batch = False
if self.cropping:
win_list.append(win)
x_test = data['x_test'][:, :, win *
self.step:win * self.step +
self.Samples, :]
pd = model.predict(x_test, verbose=0)
pred = np.argmax(pd, axis=1)
acc = np.mean(
np.squeeze(pred) == np.squeeze(data['y_test']))
kappa = computeKappa(pred, data['y_test'])
print(
'win: {:0>2d}\nacc: {:.2%}\nkappa: {:.4f}'.format(
win, acc, kappa))
else:
loss, acc = model.evaluate(data['x_test'],
data['y_test'],
batch_size=self.batch_size,
verbose=self.verbose)
_pred = model.predict(data['x_test'],
batch_size=self.batch_size,
verbose=self.verbose)
pred = np.argmax(_pred, axis=1)
kappa = computeKappa(pred, data['y_test'])
# save the train history
filepath = filepath[:-3] + '.npy'
np.save(filepath, history)
# reset model's weights to train a new one next fold
if os.path.exists(initfile) and not self.reinit:
model.reset_states()
model.load_weights(initfile)
if self.reinit:
K.clear_session()
gc.collect()
accik.append(acc)
kappaik.append(kappa)
avg_acci.append(np.average(np.array(accik)))
avg_kappai.append(np.average(np.array(kappaik)))
win_subs_list.append(win_list)
print(win_list)
self._readed = False
del model
avg_acc = np.average(np.array(avg_acci))
avg_kappa = np.average(np.array(avg_kappai))
filepath = os.path.join(
'result',
'CV_{0:d}_{1:0>2d}_{2:0>2d}_{3:0>2d}_{4:0>2d}_{5:0>2d}_' \
'{6:s}.txt'.format(tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec, self.modelstr))
with open(filepath, 'w+') as f:
sys.stdout = f
print(('{0:s} {1:d}-fold ' + self.validation_name +
' Accuracy (kappa)').format(self.modelstr, self.kFold))
for i in range(len(self.subs)):
print('Subject {0:0>2d}: {1:.2%} ({2:.4f})'.format(
self.subs[i], avg_acci[i], avg_kappai[i]),
end='')
if self.cropping:
print(', Window:{:0>2d}'.format(win_subs_list[i][np.argmax(
np.bincount(win_subs_list[i]))]))
else:
print()
print('Average : {0:.2%} ({1:.4f})'.format(avg_acc, avg_kappa))
sys.stdout = _console
f.seek(0, 0)
for line in f.readlines():
print(line)
f.close()
if os.path.exists(initfile) and not self.preserve_initfile:
os.remove(initfile)
avg_acci.append(avg_acc)
avg_kappai.append(avg_kappa)
return avg_acci, avg_kappai
def __call__(self, *args, **kwargs):
'''Wraps `call()`.'''
return self.call(*args, **kwargs)
def getConfig(self):
config = {
'built_fn': self.built_fn,
'dataGent': self.dataGent,
'splitMethod': self.splitMethod,
'traindata_filepath': self.traindata_filepath,
'testdata_filepath': self.testdata_filepath,
'datadir': self.datadir,
'beg': self.beg,
'end': self.end,
'srate': self.srate,
'kFold': self.kFold,
'shuffle': self.shuffle,
'random_state': self.random_state,
'subs': self.subs,
'cropping': self.cropping,
'winLength': self.winLength,
'step': self.step,
'standardizing': self.standardizing,
'batch_size': self.batch_size,
'epochs': self.epochs,
'patience': self.patience,
'verbose': self.verbose,
'preserve_initfile': self.preserve_initfile,
'reinit': self.reinit,
'args': self.args,
'kwargs': self.kwargs
}
return config
def setConfig(self,
built_fn,
dataGent,
splitMethod=StratifiedKFold,
traindata_filepath=None,
testdata_filepath=None,
datadir=None,
beg=0.,
end=4.,
srate=250,
kFold=10,
shuffle=False,
random_state=None,
subs: list = range(1, 10),
cropping=False,
winLength=None,
cpt=None,
step=25,
standardizing=True,
batch_size=10,
epochs=300,
patience=100,
verbose=2,
preserve_initfile=False,
reinit=True,
*args,
**kwargs):
self.built_fn = built_fn
self.dataGent = dataGent(beg=beg,
end=end,
srate=srate,
*args,
**kwargs)
self.beg = beg
self.end = end
self.srate = srate
self.splitMethod = splitMethod
self.traindata_filepath = traindata_filepath
self.testdata_filepath = testdata_filepath
self.datadir = datadir
self.kFold = kFold
self.shuffle = shuffle
self.random_state = random_state
self.subs = subs
self.cropping = cropping
self.winLength = winLength
self.cpt = cpt
self.step = step
self.standardizing = standardizing
self.batch_size = batch_size
self.epochs = epochs
self.patience = patience
self.verbose = verbose
self.preserve_initfile = preserve_initfile
self.reinit = reinit
self.args = args
self.kwargs = kwargs
self.Samples = math.ceil(self.end * self.srate - self.beg * self.srate)
self._check_params()
if self.datadir:
for root, dirs, files in os.walk(self.datadir):
if files:
self.dn = files[0][0]
break
self.modelstr = built_fn.__name__[7:]
if self.splitMethod.__name__ == 'AllTrain':
self.validation_name = 'Average Validation'
else:
self.validation_name = 'Cross Validation'
self._new_fold = True
self._last_batch = False
self._readed = False
self.X1 = None
self.Y1 = None
self.X2 = None
self.Y2 = None
if not os.path.exists('model'):
os.makedirs('model')
if not os.path.exists('result'):
os.makedirs('result')
# cropped training
if self.winLength:
if not isinstance(self.winLength, int):
raise TypeError('`winLength` must be passed as int.')
if self.winLength > (self.end - self.beg) * self.srate:
raise ValueError(
'`winLength` must less than or equal (`end` - '
'`beg`) * `srate`.')
if self.cpt and not self.winLength:
if (isinstance(self.cpt, float) or isinstance(self.cpt, int)):
if self.cpt <= self.end - self.beg:
self.winLength = self.cpt * self.srate
else:
raise ValueError(
'`cpt` must less than or equal `end` - `beg`.')
else:
raise TypeError('`cpt` must be passed as int or float.')
if not self.winLength:
self.winLength = 2 * self.srate
if self.step:
if not isinstance(self.step, int):
raise TypeError('`step` must be passed as int.')
else:
self.step = 4
@staticmethod
def _standardize(data: dict, trialaxis=0):
'''Standardizing (z-score) on each trial, supports np.nan numbers'''
# suppose every trials are independent to each other
meta = ['x_train', 'x_test', 'x_val']
# to prevent different objects be the same one
data = copy.deepcopy(data)
for s in meta:
if s in data and not data[s] is None:
_len = len(data[s].shape)
if _len > 1:
axis = list(range(_len))
axis.pop(trialaxis)
axis = tuple(axis)
else:
axis = -1
# z-score on trials
data[s] = standardization(data[s], axis=axis)
return data
def _read_data(self, subject, mode):
'''
Read data from dataGent.
Parameters
----------
```txt
subject : int, Identifier of subject.
mode : str, One of 'train' and 'test'.
```
Yields
------
```txt
data : tuple, (x, y).
```
'''
meta = ['train', 'test']
if not isinstance(mode, str):
raise TypeError('`mode` must be passed as string.')
if not mode in meta:
raise ValueError('`mode` must be one of \'train\' and \'test\'.')
if mode == 'test':
if not self.testdata_filepath:
self.testdata_filepath = os.path.join(
self.datadir, 'Test',
self.dn + '0' + str(subject) + 'E.mat')
yield self.dataGent(self.testdata_filepath)
self.testdata_filepath = None
else:
yield self.dataGent(self.testdata_filepath)
else:
if not self.traindata_filepath:
self.traindata_filepath = os.path.join(
self.datadir, 'Train',
self.dn + '0' + str(subject) + 'T.mat')
yield self.dataGent(self.traindata_filepath)
self.traindata_filepath = None
else:
yield self.dataGent(self.traindata_filepath)
def _gent_data(self, subject):
'''
Generate (data, label) from dataGent.
Parameters
----------
```txt
subject : int, Identifier of subject.
```
Yields
------
```txt
data : dict, Includes train, val and test data.
```
'''
data = {
'x_train': None,
'y_train': None,
'x_val': None,
'y_val': None,
'x_test': None,
'y_test': None
}
if not self._readed:
# for once
for (self.X1, self.Y1) in self._read_data(subject=subject,
mode='test'):
pass
for (self.X2, self.Y2) in self._read_data(subject=subject,
mode='train'):
self._readed = True
data['x_test'] = self.X1
data['y_test'] = self.Y1
# for multiple times
for (x1, y1), (x2, y2) in self._spilt(self.X2, self.Y2):
data['x_train'] = x1
data['y_train'] = y1
data['x_val'] = x2
data['y_val'] = y2
if self.standardizing:
data = self._standardize(data)
if data['x_val'] is None:
data['x_val'] = data['x_test']
data['y_val'] = data['y_test']
self._new_fold = True
self._last_batch = True
yield data
def _gent_cropped_data(self, subject):
'''
Generate cropped (data, label) from dataGent.
Not including test data.
Parameters
----------
```txt
subject : int, Identifier of subject.
```
Yields
------
```txt
data : dict, Includes train, val and test data.
```
'''
temp = {
'x_train': None,
'y_train': None,
'x_val': None,
'y_val': None,
'x_test': None,
'y_test': None
}
L = range(0, self.Samples + 1, self.step)
L = len(L)
print('len(L): {0:d}'.format(L))
if not self._readed:
# for once
for (self.X1, self.Y1) in self._read_data(subject=subject,
mode='test'):
pass
for (self.X2, self.Y2) in self._read_data(subject=subject,
mode='train'):
self._readed = True
temp['x_test'] = self.X1
temp['y_test'] = self.Y1
for (x1, y1), (x2, y2) in self._spilt(self.X2, self.Y2):
temp['x_train'] = x1
temp['y_train'] = y1
temp['x_val'] = x2
temp['y_val'] = y2
if temp['x_val'] is None:
if self.standardizing:
data = self._standardize(temp)
temp['x_val'] = data['x_test']
temp['y_val'] = data['y_test']
temp['x_test'] = data['x_test']
temp['y_test'] = data['y_test']
else:
data['x_train'] = x1
data['x_val'] = temp['x_val']
else:
if self.standardizing:
data = self._standardize(temp)
temp['x_test'] = data['x_test']
temp['y_test'] = data['y_test']
else:
data['x_train'] = x1
data['x_val'] = x2
i = 0
for (temp['x_train'], temp['x_val']) in self._cropping_data(
(data['x_train'], data['x_val'])):
i += 1
if i == 1:
self._new_fold = True
if i == L:
self._last_batch = True
yield temp
def _cropping_data(self, datas: tuple):
L = range(0, self.Samples + 1, self.step)
for i in L:
temp = ()
for data in datas:
temp += (data[:, :, i:i + self.winLength, :], )
yield temp
def _spilt(self, X, y, groups=None):
"""
Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. Action depends on the split method you choose.
Yields
------
train : ndarray
The training set indices for that split.
val : ndarray
The validating set indices for that split.
"""
sm = self.splitMethod(n_splits=self.kFold,
shuffle=self.shuffle,
random_state=self.random_state)
for train_index, val_index in sm.split(X, y, groups):
# (x_train, y_train), (x_val, y_val)
if not train_index.any():
raise ValueError('Training data shouldn\'t be empty.')
elif not val_index.any():
yield (X[train_index], y[train_index]), (None, None)
else:
yield (X[train_index], y[train_index]), (X[val_index],
y[val_index])
def _check_params(self):
'''
Cross Validate check parameters out.
'''
# TODO: check parameters out.
pass
class gridSearch(crossValidate):
'''
Class for K-fold Cross Validation Grid Search.
Grid Search method. May better to be a subclass of `crossValidate`.
This framework can collect `model`, `loss`, `acc` and `history` from each fold, and
save them into files.
Data spliting methods from sklearn.model_selection are supported. you can pass the
classes as `splitMethod`.
It can't use multiple GPUs to speed up now. To grid search on a large parameter
matrix, you should use `Greedy Algorithm`.
This class has implemented a magic method `__call__()` wrapping `call()`, for which
it can be used like a function.
Parameters
----------
```txt
built_fn : function, Create Training model which need to cross-validate.
Please using string `create_` at the begining of function name,
like `create_modelname`.
parameters : dict, Parameters need to grid-search. Keys are the parameters'
name, and every parameter values are vectors which should be
passed as a list.
dataGent : class, Generate data for @built_fn, shapes (n_trails, ...).
It should discriminate data and label.
More details see core.generators.
splitMethod : class, Support split methods from module sklearn.model_selection.
kFold : int, Number of K-fold.
shuffle : bool, Optional Whether to shuffle each class's samples before
splitting into batches, default = False.
random_state : int, RandomState instance or None, optional, default = None.
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by np.random. Used when shuffle == True.
subs : list, list of subjects' number, like `range(1, 10)`.
cropping : bool, Switch of cropped training. Default = False.
winLength : int, cropping window length, default = 2*srate.
step : int, cropping step, default = 1.
standardizing : bool, Switch of standardizing data. Default = True.
batch_size : int, Batch size.
epochs : int, Training epochs.
patience : int, Early stopping patience.
verbose : int, One of 0, 1 and 2.
*a, *args : tuple, Parameters used by @dataGent and @built_fn respectively
**kw, **kwargs : dict, Parameters used by @dataGent and @built_fn respectively,
**kw should include parameters called `beg`, `end` and `srate`.
```
Returns
-------
```txt
avg_acc : list, Average accuracy for each subject with K-fold Cross Validation,
and total average accuracy is at the last of the list
avg_kappa : list, Average kappa for each subject with K-fold Cross Validation,
and total average kappa is at the last of the list
```
Example
-------
```python
from core.splits import StratifiedKFold
def create_model(Samples, *args, summary=True, **kwargs):
...
return keras_model
class dataGenerator:
def __init__(self, *a, beg=0, end=4, srate=250, **kw):
...
def __call__(self, filepath, label=False):
if label:
...
return label
else:
...
return data
...
...
parameters = {'para1':[...], 'para2':[...], ...}
avg_acc = gridSearch(
create_model,
parameters,
dataGenerator,
beg=0,
end=4,
srate=250,
splitMethod=StratifiedKFold,
kFold=10,
subs=range(1, 10),
*a,
**kw)(*args, **kwargs)
```
Note
----
More details to see the codes.
'''
def __init__(self,
built_fn,
parameters: dict,
dataGent,
splitMethod=StratifiedKFold,
traindata_filepath=None,
testdata_filepath=None,
datadir=None,
beg=0,
end=4,
srate=250,
kFold=10,
shuffle=False,
random_state=None,
subs=range(1, 10),
cropping=False,
winLength=None,
cpt=None,
step=25,
standardizing=True,
batch_size=10,
epochs=300,
patience=100,
verbose=2,
preserve_initfile=False,
reinit=False,
*args,
**kwargs):
super().__init__(built_fn=built_fn,
dataGent=dataGent,
splitMethod=splitMethod,
traindata_filepath=traindata_filepath,
testdata_filepath=testdata_filepath,
datadir=datadir,
beg=beg,
end=end,
srate=srate,
kFold=kFold,
shuffle=shuffle,
random_state=random_state,
subs=subs,
cropping=cropping,
winLength=winLength,
cpt=cpt,
step=step,
standardizing=standardizing,
batch_size=batch_size,
epochs=epochs,
patience=patience,
verbose=verbose,
preserve_initfile=preserve_initfile,
reinit=reinit,
*args,
**kwargs)
_subs_targeted = False
_subs_targeted_parameters = []
for parameter in parameters:
if not parameter in self.built_fn.__code__.co_varnames:
raise ValueError('`parameters` has unsupported parameter in'
' `built_fn`.')
if not isinstance(parameters[parameter], list) and not isinstance(
parameters[parameter], dict):
parameters[parameter] = list(parameters[parameter])
if isinstance(parameters[parameter], dict):
subs = list(self.subs)
for subject in parameters[parameter]:
if not int(subject) in self.subs:
raise ValueError('`parameters` has unsolved subject'
' numbers.')
if not isinstance(parameters[parameter][subject], list):
parameters[parameter][subject] = list(
parameters[parameter][subject])
subs.remove(int(subject))
if subs:
raise ValueError('`parameters` doesn\'t include all the'
' subject numbers.')
_subs_targeted = True
_subs_targeted_parameters.append(parameter)
temp = []
if _subs_targeted:
for subject in range(1, max(self.subs) + 1):
items = []
for parameter in parameters:
if subject in self.subs:
if parameter in _subs_targeted_parameters:
items += list({
parameter:
parameters[parameter][str(subject)]
}.items())
else:
items += list({parameter:
parameters[parameter]}.items())
temp.append(dict(items))
else:
for subject in range(1, max(self.subs) + 1):
if subject in self.subs:
temp.append(parameters)
else:
temp.append([])
self.parameters = temp
def call(self, *args, **kwargs):
'''
parameters should be lists to different subjects, then pass one
subject's parameter to cv.
'''
initfile = os.path.join('.', 'GSCV_initweight.h5')
tm = time.localtime()
dirname = (
'GS_{0:d}_{1:0>2d}_{2:0>2d}_{3:0>2d}_{4:0>2d}_{5:0>2d}_{6:s}'.
format(tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min,
tm.tm_sec, self.modelstr))
if not os.path.exists(os.path.join('model', dirname)):
os.mkdir(os.path.join('model', dirname))
if not os.path.exists(os.path.join('result', dirname)):
os.mkdir(os.path.join('result', dirname))
if self.cropping:
gent = self._gent_cropped_data
self.Samples -= self.winLength
else:
gent = self._gent_data
earlystopping = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=self.patience,
verbose=0,
mode='auto')
def cv(*args, **kwargs):
'''one subject, one parameter'''
if not self.reinit:
if not os.path.exists(initfile):
model = self.built_fn(*args,
**kwargs,
Samples=self.Samples)
model.save_weights(initfile)
else:
model = self.built_fn(*args,
**kwargs,
Samples=self.Samples)
model.load_weights(initfile)
filename = ''
for key in kwargs.keys():
if key in ['l1', 'l21', 'tl1']:
filename += '{0:s}({1:.8f})_'.format(key, kwargs[key])
else:
filename += '{0:s}({1:0>2d})_'.format(key, kwargs[key])
acck = []
kappak = []
k = 0 # count kFolds
# cropped training
t = 0 # record model's saving time
c = 0 # count windows
win = 0 # selected windows
win_list = [] # selected windows list
for data in gent(subject=self.subs):
if self._new_fold: # new fold for cropped training
self._new_fold = False
t = 0
c = 0
if self.reinit:
model = self.built_fn(*args,
**kwargs,
Samples=self.Samples)
k += 1
filepath = os.path.join(
'model', dirname,
filename + self.dn + '0{0:d}T_{1:s}({2:d}).h5'.format(
self.subs, self.modelstr, k))
checkpointer = MyModelCheckpoint(filepath=filepath,
verbose=1,
save_best_only=True,
statistic_best=True,
p=0.05)
history = {}
# TODO: fit(), evaluate() with tf.data.Dataset, then `self._new_fold`
# and `self._last_batch` will be DEPRECATED.
history = dict(
list(history.items()) + list(
model.fit(x=data['x_train'],
y=data['y_train'],
batch_size=self.batch_size,
epochs=self.epochs,
callbacks=[checkpointer, earlystopping],
verbose=self.verbose,
validation_data=[
data['x_val'], data['y_val']
]).history.items()))
if self.cropping:
if not t == os.path.getmtime(checkpointer._filepath):
t = os.path.getmtime(checkpointer._filepath)
win = c
# load the best model for cropped training or evaluating its accuracy
model.load_weights(filepath)
if self._last_batch: # the last batch for cropped training
self._last_batch = False
if self.cropping:
win_list.append(win)
x_test = data['x_test'][:, :, win *
self.step:win * self.step +
self.Samples, :]
pd = model.predict(x_test, verbose=0)
pred = np.argmax(pd, axis=1)
acc = np.mean(
np.squeeze(pred) == np.squeeze(data['y_test']))
kappa = computeKappa(pred, data['y_test'])
else:
loss, acc = model.evaluate(data['x_test'],
data['y_test'],
batch_size=self.batch_size,
verbose=self.verbose)
_pred = model.predict(data['x_test'],
batch_size=self.batch_size,
verbose=self.verbose)
pred = np.argmax(_pred, axis=1)
kappa = computeKappa(pred, data['y_test'])
# save the train history
npy_filepath = filepath[:-3] + '.npy'
np.save(npy_filepath, history)
# reset model's weights to train a new one next fold
if os.path.exists(initfile) and not self.reinit:
model.reset_states()
model.load_weights(initfile)
if self.reinit:
K.clear_session()
gc.collect()
acck.append(acc)
kappak.append(kappa)
data.clear()
del data
K.clear_session()
del model
gc.collect()
avg_acc = np.average(np.array(acck))
avg_kappa = np.average(np.array(kappak))
win = win_list[np.argmax(np.bincount(win_list))]
filepath = os.path.join(
'result', dirname, filename + self.dn +
'0{0:d}T_{1:s}.txt'.format(self.subs, self.modelstr))
with open(filepath, 'w+') as f:
sys.stdout = f
print(('{0:s} {1:d}-fold ' + self.validation_name +
' Accuracy').format(self.modelstr, self.kFold))
print('Subject {0:0>2d}'.format(self.subs))
for i in range(len(acck)):
print('Fold {0:0>2d}: {1:.2%} ({2:.4f})'.format(
i + 1, acck[i], kappak[i]))
if self.cropping:
print('Window:{:0>2d}'.format(win))
print('Average : {0:.2%} ({1:.4f})'.format(
avg_acc, avg_kappa))
sys.stdout = _console
f.seek(0, 0)
for line in f.readlines():
print(line)
f.close()
return avg_acc, avg_kappa
parameters = []
max_avg_acc = []
max_acc_kappa = []
indices = []
subs = copy.copy(self.subs)
filepath = os.path.join(
'result',
'GS_{0:d}_{1:0>2d}_{2:0>2d}_{3:0>2d}_{4:0>2d}_{5:0>2d}_' \
'{6:s}.txt'.format(tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec, self.modelstr))
for subject in subs:
parameters.append(self._combination(subject=subject))
count = 0
with open(filepath, 'w+') as f:
sys.stdout = f
print('Subject: {0:0>2d}/{1:0>2d}'.format(subject, len(subs)))
print(
'Grid Search progress: {0:0>4d}/{1:0>4d}' \
'\nThe No.{2:0>4d} is in processing'
.format(count, len(parameters[-1]), count + 1))
sys.stdout = _console
f.seek(0, 0)
for line in f.readlines():
print(line)
f.close()
avg_acc = []
avg_kappa = []
for parameter in parameters[-1]:
self.subs = subject
param = dict(parameter + list(kwargs.items()))
acc, kappa = cv(*args, **param)
avg_acc.append(acc)
avg_kappa.append(kappa)
count += 1
with open(filepath, 'w+') as f:
sys.stdout = f
print('Subject: {0:0>2d}/{1:0>2d}'.format(
subject, len(subs)))
if count < len(parameters[-1]):
print(
'Grid Search progress: {0:0>4d}/{1:0>4d}' \
'\nThe No.{2:0>4d} is in processing'
.format(count, len(parameters[-1]), count + 1))
else:
print('Grid Search progress: {0:0>4d}/{1:0>4d}'.format(
count, len(parameters[-1])))
sys.stdout = _console
f.seek(0, 0)
for line in f.readlines():
print(line)
f.close()
self._readed = False
max_avg_acc.append(np.max(avg_acc))
indices.append(np.argmax(avg_acc))
max_acc_kappa.append(avg_kappa[indices[-1]])
self.subs = subs
if os.path.exists(initfile) and not self.preserve_initfile:
os.remove(initfile)
with open(filepath, 'w+') as f:
sys.stdout = f
print(('{0:s} {1:d}-fold ' + self.validation_name +
'Grid Search Accuracy (kappa)').format(
self.modelstr, self.kFold))
for i in range(len(self.subs)):
print('Subject {0:0>2d}: {1:.2%} ({2:.4f})'.format(
self.subs[i], max_avg_acc[i], max_acc_kappa[i]))
print('Parameters', end='')
for n in range(len(parameters[i][indices[i]])):
if n == 0:
print(': {0:s} = {1:.8f}'.format(
parameters[i][indices[i]][n][0],
parameters[i][indices[i]][n][1]),
end='')
else:
print(', {0:s} = {1:.8f}'.format(
parameters[i][indices[i]][n][0],
parameters[i][indices[i]][n][1]),
end='')
print()
print('Average : {:.2%} ({:.4f})'.format(
np.average(max_avg_acc), np.average(max_acc_kappa)))
sys.stdout = _console
f.seek(0, 0)
for line in f.readlines():
print(line)
f.close()
avg_acc = max_avg_acc
avg_kappa = max_acc_kappa
avg_acc.append(np.average(max_avg_acc))
avg_kappa.append(np.average(max_acc_kappa))
return avg_acc, avg_kappa
def _combination(self, subject):
'''Solve the combaination of parameters given to Grid Search'''
parameters = []
parameter = []
keys = list(self.parameters[subject - 1].keys())
values = list(itertools.product(*self.parameters[subject -
1].values()))
for v in values:
for i in range(len(v)):
parameter.append((keys[i], v[i]))
parameters.append(parameter)
parameter = []
return parameters
def getConfig(self):
config = {'parameters': self.parameters}
base_config = super(crossValidate, self).getConfig()
return dict(list(base_config.items()) + list(config.items()))
def getSuperConfig(self):
return super(crossValidate, self).getConfig()
def setConfig(self,
built_fn,
parameters: dict,
dataGent,
splitMethod=StratifiedKFold,
traindata_filepath=None,
testdata_filepath=None,
datadir=None,
beg=0,
end=4,
srate=250,
kFold=10,
shuffle=False,
random_state=None,
subs=range(1, 10),
cropping=False,
winLength=None,
cpt=None,
step=25,
standardizing=True,
batch_size=10,
epochs=300,
patience=100,
verbose=2,
preserve_initfile=False,
reinit=False,
*args,
**kwargs):
super().setConfig(built_fn=built_fn,
dataGent=dataGent,
splitMethod=splitMethod,
traindata_filepath=traindata_filepath,
testdata_filepath=testdata_filepath,
datadir=datadir,
beg=beg,
end=end,
srate=srate,
kFold=kFold,
shuffle=shuffle,
random_state=random_state,
subs=subs,
cropping=cropping,
winLength=winLength,
cpt=cpt,
step=step,
standardizing=standardizing,
batch_size=batch_size,
epochs=epochs,
patience=patience,
verbose=verbose,
preserve_initfile=preserve_initfile,
reinit=reinit,
*args,
**kwargs)
_subs_targeted = False
_subs_targeted_parameters = []
for parameter in parameters:
if not parameter in self.built_fn.__code__.co_varnames:
raise ValueError('`parameters` has unsupported parameter in'
' `built_fn`.')
if not isinstance(parameters[parameter], list) and not isinstance(
parameters[parameter], dict):
parameters[parameter] = list(parameters[parameter])
if isinstance(parameters[parameter], dict):
subs = list(self.subs)
for subject in parameters[parameter]:
if not int(subject) in self.subs:
raise ValueError('`parameters` has unsolved subject'
' numbers.')
if not isinstance(parameters[parameter][subject], list):
parameters[parameter][subject] = list(
parameters[parameter][subject])
subs.remove(int(subject))
if subs:
raise ValueError('`parameters` doesn\'t include all the'
' subject numbers.')
_subs_targeted = True
_subs_targeted_parameters.append(parameter)
temp = []
if _subs_targeted:
for subject in range(1, max(self.subs) + 1):
items = []
for parameter in parameters:
if subject in self.subs:
if parameter in _subs_targeted_parameters:
items += list({
parameter:
parameters[parameter][str(subject)]
}.items())
else:
items += list({parameter:
parameters[parameter]}.items())
temp.append(dict(items))
else:
for subject in range(1, max(self.subs) + 1):
if subject in self.subs:
temp.append(parameters)
else:
temp.append([])
self.parameters = temp | 1.976563 | 2 |
anomaly_tuning/__init__.py | albertcthomas/anomaly_tuning | 40 | 12788850 | <filename>anomaly_tuning/__init__.py
from .tuning import anomaly_tuning # noqa
from . import estimators # noqa
| 1.132813 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.