id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3384421
|
from django import forms
from django.forms import ModelForm
from django.forms.widgets import TextInput
from accounts.forms import LongCharField
from core.models import Message, Room
class MessageForm(ModelForm):
""" Form for chat messages """
message_id = forms.IntegerField(required=False)
class Meta:
model = Message
fields = [
'text', 'sender', 'room', 'message_id', 'seen'
]
class RoomForm(ModelForm):
""" Form for Chat Room """
class Meta:
model = Room
fields = [
'users'
]
|
StarcoderdataPython
|
47499
|
<reponame>c4s4/mysql_commando
#!/usr/bin/env python
# encoding: UTF-8
from distutils.core import setup
setup(
name = 'mysql_commando',
version = 'VERSION',
author = '<NAME>',
author_email = '<EMAIL>',
packages = ['mysql_commando'],
url = 'http://pypi.python.org/pypi/mysql_commando/',
license = 'Apache Software License',
description = 'mysql_commando is an Oracle driver calling mysql',
long_description=open('README.rst').read(),
)
|
StarcoderdataPython
|
3334017
|
<reponame>eshbeata/open-paperless
from __future__ import unicode_literals
default_app_config = 'documents.apps.DocumentsApp'
|
StarcoderdataPython
|
1799285
|
<reponame>andrewtavis/poli-sci-k
"""
Appointment Method Checks
-------------------------
Functions to conditionally check appointment methods.
Contents:
quota_condition,
consistency_condition
"""
from math import ceil, floor
import pandas as pd
from poli_sci_kit.appointment.metrics import ideal_share
def quota_condition(shares, seats):
"""
Checks whether assignment method results fall within the range of the ideal share rounded down and up.
Notes
-----
https://en.wikipedia.org/wiki/Quota_rule
Parameters
----------
shares : list
The preportion of the population or votes for the regions or parties.
seats : list
The share of seats given to the regions or parties.
Returns
-------
check_pass or fail_report: bool or list (contains tuples)
A value of True, or a list of corresponding arguments where the check has failed and their indexes.
"""
assert len(shares) == len(
seats
), "The total different shares of a population or vote must equal that of the allocated seats."
check_list = [
ceil(ideal_share(s, sum(shares), sum(seats))) >= seats[i]
and floor(ideal_share(s, sum(shares), sum(seats))) <= seats[i]
for i, s in enumerate(shares)
]
fail_report = {
i: (shares[i], seats[i]) for i, c in enumerate(check_list) if c == False
}
check_pass = False not in check_list
print("Quota condition passed:", check_pass)
if check_pass:
return check_pass
print("Returning list of argument elements that failed the condition.")
return fail_report
def consistency_condition(df_shares=None, df_seats=None, check_type="seat_monotony"):
"""
Checks the consistency of assignment method results given dataframes of shares and allocations.
Notes
-----
Rows and columns of the df(s) will be marked and dropped if consistent, with a failed condition being if the resulting df has size > 0 (some where inconsistent).
Parameters
----------
df_shares : pd.DataFrame (num_region_party, num_variation; contains ints, default=None)
Preportions of the population or votes for the regions or parties given variance.
df_seats : pd.DataFrame (num_region_party, num_variation; contains ints, default=None)
Shares of seats given to the regions or parties given variance.
check_type : str
Whether the consistency of a change in seats or a change in shares is checked.
Options:
The style of monotony to derive the consistency with.
- seat_monotony : An incease in total seats does not decrease alloted seats
Note: use sums of cols of df_seats, checking col element monotony given a differences in sums.
- share_monotony : An incease in shares does not decrease alloted seats
Note: use rows of df_shares and check coinciding elements of df_seats for monotony.
Returns
-------
check_pass or df_fail_report: bool or pd.DataFrame (contains ints)
A value of True, or False with a df of corresponding arguments where the check has failed.
"""
if df_shares is not None and df_seats is not None:
assert (
df_shares.shape == df_seats.shape
), "The number of share variations must be equal to the number of seat allocation variations."
if check_type == "seat_monotony":
df_fail_report = df_seats.copy()
seat_sums = [df_seats[col].sum() for col in df_seats.columns]
seat_sums_sorted_indexes = [
tup[0] for tup in sorted(enumerate(seat_sums), key=lambda i: i[1])
]
# Order seat allocation columns by increasing total.
df_seats = df_seats[[df_seats.columns[i] for i in seat_sums_sorted_indexes]]
# Check that elements of each column are less than corresponding
# ones in later columns.
check_cols = [
[
df_seats.loc[:, df_seats.columns[j]]
<= df_seats.loc[:, df_seats.columns[i]]
for i in range(len(df_seats.columns))[j:]
]
for j in range(len(df_seats.columns))
]
# Return True if the column elements are always less than following
# ones, or the str of the later columns that break the condition.
# str() is used to assure that 1 != True in the later sets.
check_cols = [
[True if c[j].all() == True else str(j) for j in range(len(c))]
for c in check_cols
]
# Return True if the column's total allotment passes the condition,
# or the index of columns with which the column fails.
check_cols = [
True
if list(set(c))[0] == True and len(set(c)) == 1
else [i + int(item) for item in list(set(c)) if item != True]
for i, c in enumerate(check_cols)
]
col_range = list(range(len(df_fail_report.columns))) # list to use .pop()
cols_droppped = 0
for i in col_range:
if check_cols[i] == True:
# Drop the column, and add to an indexer to maintain lengths.
df_fail_report.drop(
df_fail_report.columns[i - cols_droppped], axis=1, inplace=True
)
cols_droppped += 1
else:
# Keep the column, and remove the indexes of any columns that
# break the condition to keep them as well.
for later_col in check_cols[i]:
col_range.pop(later_col)
if len(df_fail_report.columns) != 0:
# Find elements in a row that are greater than following elements.
check_rows = [
[
[
df_fail_report.loc[row, df_fail_report.columns[col]]
<= df_fail_report.loc[row, df_fail_report.columns[col_after]]
for col_after in range(len(df_fail_report.columns))[col:]
]
for col in range(len(df_fail_report.columns))
]
for row in df_fail_report.index
]
check_rows = [
[
True
if list(set(comparison))[0] == True and len(set(comparison)) == 1
else False
for comparison in i
]
for i in check_rows
]
check_rows = [
True if list(set(i))[0] == True and len(set(i)) == 1 else False
for i in check_rows
]
rows_droppped = 0
for i in range(len(df_fail_report.index)):
if check_rows[i] == True:
# Drop the row if no elements are greater than following ones,
# and add to an indexer to maintain lengths.
df_fail_report.drop(
df_fail_report.index[i - rows_droppped], axis=0, inplace=True
)
rows_droppped += 1
check_pass = len(df_fail_report.columns) == 0
print(
f"Consistency condition based on {check_type.split('_')[0]} monotony passed:",
check_pass,
)
if not check_pass:
print("Returning df of argument elements that failed the condition.")
return df_fail_report
else:
return check_pass
elif check_type == "share_monotony":
# The fail report df has share and seat columns alternated.
df_fail_report = pd.DataFrame()
col = 0
for i in range(len(df_shares.columns)):
df_fail_report.loc[:, col] = pd.Series(
df_shares[df_shares.columns[i]], index=df_shares.index
)
col += 1
df_fail_report.loc[:, col] = pd.Series(
df_seats[df_seats.columns[i]], index=df_seats.index
)
col += 1
# Check which share and seat columns are less than one another.
check_share_rows = [
[
[
df_shares.loc[row, df_shares.columns[col]]
<= df_shares.loc[row, df_shares.columns[other_col]]
for other_col in range(len(df_shares.columns))
]
for col in range(len(df_shares.columns))
]
for row in df_shares.index
]
check_seat_rows = [
[
[
df_seats.loc[row, df_seats.columns[col]]
<= df_seats.loc[row, df_seats.columns[other_col]]
for other_col in range(len(df_seats.columns))
]
for col in range(len(df_seats.columns))
]
for row in df_seats.index
]
# Combine the above for indexes where the condition is met and not.
check_shares_seats = [
[
[
False
if check_share_rows[i][j][k] == True
and check_seat_rows[i][j][k] != True
else True
for k in range(len(check_share_rows[0][0]))
]
for j in range(len(check_share_rows[0]))
]
for i in range(len(check_share_rows))
]
rows_kept = []
for i in range(len(df_fail_report.index)):
row_element_checker = 0
for element_check in check_shares_seats[i]:
if list(set(element_check))[0] == True and len(set(element_check)) == 1:
row_element_checker += 1
if row_element_checker == len(check_shares_seats[i]):
df_fail_report.drop(i, axis=0, inplace=True)
else:
rows_kept.append(i)
# Column indexes, indexing over pairs as share and seat columns are
# dropped together.
col_pair_range = list(range(int(len(df_fail_report.columns) / 2)))
# Indexing which columns to keep.
col_pairs_to_keep = []
for r in rows_kept:
for c in col_pair_range:
if (
list(set(check_shares_seats[r][c]))[0] != True
or len(set(check_shares_seats[r][c])) != 1
):
col_pairs_to_keep.append(c)
for later_col in range(len(check_shares_seats[r][c])):
if check_shares_seats[r][c][later_col] == False:
col_pairs_to_keep.append(later_col)
col_pairs_to_keep = list(set(col_pairs_to_keep))
# Return those columns to be dropped.
cols_to_keep = [[2 * i, 2 * i + 1] for i in col_pairs_to_keep]
cols_to_keep = [item for sublist in cols_to_keep for item in sublist]
cols_droppped = 0
for col in range(len(df_fail_report.columns)):
if col not in cols_to_keep:
df_fail_report.drop(
df_fail_report.columns[col - cols_droppped], axis=1, inplace=True
)
cols_droppped += 1
else:
ValueError(
"The 'check_type' argument myst be either seat_monotony or share_monotony"
)
check_pass = len(df_fail_report) == 0
print(
f"Consistency condition based on {check_type.split('_')[0]} monotony passed:",
check_pass,
)
if not check_pass:
print("Returning df of argument elements that failed the condition.")
return df_fail_report
else:
return check_pass
|
StarcoderdataPython
|
139252
|
<gh_stars>10-100
import unittest
from unittest import mock
from tethys_apps.management.commands import pre_collectstatic
class ManagementCommandsPreCollectStaticTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_add_arguments(self):
mock_parser = mock.MagicMock()
cmd = pre_collectstatic.Command()
cmd.add_arguments(mock_parser)
add_arguments_calls = mock_parser.add_argument.call_args_list
self.assertEqual(1, len(add_arguments_calls))
self.assertIn('-l', add_arguments_calls[0][0])
self.assertIn('--link', add_arguments_calls[0][0])
@mock.patch('tethys_apps.management.commands.pre_collectstatic.print')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.exit')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.settings')
def test_handle_no_static_root(self, mock_settings, mock_exit, mock_print):
mock_settings.STATIC_ROOT = None
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
cmd = pre_collectstatic.Command()
self.assertRaises(SystemExit, cmd.handle)
print_args = mock_print.call_args_list
msg_warning = 'WARNING: Cannot find the STATIC_ROOT setting. Please provide the ' \
'path to the static directory using the STATIC_ROOT setting in the portal_config.yml ' \
'file and try again.'
self.assertEqual(msg_warning, print_args[0][0][0])
@mock.patch('tethys_apps.management.commands.pre_collectstatic.print')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.shutil.copytree')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.os.path.isdir')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.os.remove')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.get_installed_tethys_extensions')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.get_installed_tethys_apps')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.settings')
def test_handle__not_named_static_or_public(self, mock_settings, mock_get_apps, mock_get_extensions, mock_os_remove,
mock_os_path_isdir, mock_shutil_copytree, mock_print):
options = {'link': False} # Don't create symbolic link (copy instead)
static_root_dir = '/foo/static/root'
app_source_dir = '/foo/sources/foo_app'
ext_source_dir = '/foo/sources/foo_ext'
app_public_dir = app_source_dir + '/public'
ext_public_dir = ext_source_dir + '/public'
app_static_dir = app_source_dir + '/static'
ext_static_dir = ext_source_dir + '/static'
mock_settings.STATIC_ROOT = static_root_dir
mock_get_apps.return_value = {'foo_app': app_source_dir}
mock_get_extensions.return_value = {'foo_ext': ext_source_dir}
mock_os_remove.return_value = True # Successfully remove old link or dir with os.remove
mock_os_path_isdir.side_effect = (False, False, False, False) # "public" and "static" path don't exist
cmd = pre_collectstatic.Command()
cmd.handle(**options)
# Verify apps and extensions were gathered
mock_get_apps.assert_called_once()
mock_get_extensions.assert_called_once()
# Verify check for public dir was performed for app and extension
mock_os_path_isdir.assert_any_call(app_public_dir)
mock_os_path_isdir.assert_any_call(ext_public_dir)
mock_os_path_isdir.assert_any_call(app_static_dir)
mock_os_path_isdir.assert_any_call(ext_static_dir)
# Verify attempt to remove old dirs/links
mock_os_remove.assert_not_called()
# Verify attempt to copy public dir to static root location
mock_shutil_copytree.assert_not_called()
# Verify messages
print_args = mock_print.call_args_list
msg = 'INFO: Collecting static and public directories of apps and extensions to "{0}".' \
.format(mock_settings.STATIC_ROOT)
msg_info_first = 'WARNING: Cannot find a directory named "static" or "public" for app "foo_app". Skipping...'
msg_info_second = 'WARNING: Cannot find a directory named "static" or "public" for app "foo_ext". Skipping...'
check_list = []
for i in range(len(print_args)):
check_list.append(print_args[i][0][0])
self.assertIn(msg, check_list)
self.assertIn(msg_info_first, check_list)
self.assertIn(msg_info_second, check_list)
msg_warning_not_in = 'WARNING: Cannot find the STATIC_ROOT setting'
msg_not_in = 'Please provide the path to the static directory'
info_not_in_first = 'INFO: Successfully copied static directory to STATIC_ROOT for app "foo_app".'
info_not_in_second = 'INFO: Successfully copied static directory to STATIC_ROOT for app "foo_ext".'
for i in range(len(print_args)):
self.assertNotEqual(msg_warning_not_in, print_args[i][0][0])
self.assertNotEqual(msg_not_in, print_args[i][0][0])
self.assertNotEqual(info_not_in_first, print_args[i][0][0])
self.assertNotEqual(info_not_in_second, print_args[i][0][0])
@mock.patch('tethys_apps.management.commands.pre_collectstatic.print')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.shutil.copytree')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.os.path.isdir')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.shutil.rmtree')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.os.remove')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.get_installed_tethys_extensions')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.get_installed_tethys_apps')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.settings')
def test_handle__public__remove_fail__rmtree_fail(self, mock_settings, mock_get_apps, mock_get_extensions,
mock_os_remove, mock_shutil_rmtree, mock_os_path_isdir,
mock_shutil_copytree, mock_print):
options = {'link': False} # Don't create symbolic link (copy instead)
static_root_dir = '/foo/static/root'
app_source_dir = '/foo/sources/foo_app'
ext_source_dir = '/foo/sources/foo_ext'
app_public_dir = app_source_dir + '/public'
ext_public_dir = ext_source_dir + '/public'
app_static_root_dir = static_root_dir + '/foo_app'
ext_static_root_dir = static_root_dir + '/foo_ext'
mock_settings.STATIC_ROOT = static_root_dir
mock_get_apps.return_value = {'foo_app': app_source_dir}
mock_get_extensions.return_value = {'foo_ext': ext_source_dir}
mock_os_remove.side_effect = OSError # remove fails
mock_shutil_rmtree.side_effect = OSError # rmtree fails
mock_os_path_isdir.side_effect = (True, True) # "public" dir found
cmd = pre_collectstatic.Command()
cmd.handle(**options)
# Verify apps and extensions were gathered
mock_get_apps.assert_called_once()
mock_get_extensions.assert_called_once()
# Verify check for public dir was performed for app and extension
mock_os_path_isdir.assert_any_call(app_public_dir)
mock_os_path_isdir.assert_any_call(ext_public_dir)
# Verify attempt to remove old dirs/links
mock_os_remove.assert_any_call(app_static_root_dir)
mock_os_remove.assert_any_call(ext_static_root_dir)
mock_shutil_rmtree.assert_any_call(app_static_root_dir)
mock_shutil_rmtree.assert_any_call(ext_static_root_dir)
# Verify attempt to copy public dir to static root location
mock_shutil_copytree.assert_any_call(app_public_dir, app_static_root_dir)
mock_shutil_copytree.assert_any_call(ext_public_dir, ext_static_root_dir)
# Verify messages
print_args = mock_print.call_args_list
msg = 'INFO: Collecting static and public directories of apps and extensions to "{0}".' \
.format(mock_settings.STATIC_ROOT)
msg_info_first = 'INFO: Successfully copied public directory to STATIC_ROOT for app "foo_app".'
msg_info_second = 'INFO: Successfully copied public directory to STATIC_ROOT for app "foo_ext".'
check_list = []
for i in range(len(print_args)):
check_list.append(print_args[i][0][0])
self.assertIn(msg, check_list)
self.assertIn(msg_info_first, check_list)
self.assertIn(msg_info_second, check_list)
msg_warning_not_in = 'WARNING: Cannot find the STATIC_ROOT setting'
msg_not_in = 'Please provide the path to the static directory'
info_not_in_first = 'INFO: Successfully linked static directory to STATIC_ROOT for app "foo_app".'
info_not_in_second = 'INFO: Successfully linked static directory to STATIC_ROOT for app "foo_ext".'
for i in range(len(print_args)):
self.assertNotEqual(msg_warning_not_in, print_args[i][0][0])
self.assertNotEqual(msg_not_in, print_args[i][0][0])
self.assertNotEqual(info_not_in_first, print_args[i][0][0])
self.assertNotEqual(info_not_in_second, print_args[i][0][0])
@mock.patch('tethys_apps.management.commands.pre_collectstatic.print')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.shutil.copytree')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.os.path.isdir')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.os.remove')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.get_installed_tethys_extensions')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.get_installed_tethys_apps')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.settings')
def test_handle__named_public__copy(self, mock_settings, mock_get_apps, mock_get_extensions, mock_os_remove,
mock_os_path_isdir, mock_shutil_copytree, mock_print):
options = {'link': False} # Don't create symbolic link (copy instead)
static_root_dir = '/foo/static/root'
app_source_dir = '/foo/sources/foo_app'
app_public_dir = app_source_dir + '/public'
ext_source_dir = '/foo/sources/foo_ext'
ext_public_dir = ext_source_dir + '/public'
app_static_root_dir = static_root_dir + '/foo_app'
ext_static_root_dir = static_root_dir + '/foo_ext'
mock_settings.STATIC_ROOT = static_root_dir
mock_get_apps.return_value = {'foo_app': app_source_dir}
mock_get_extensions.return_value = {'foo_ext': ext_source_dir}
mock_os_remove.return_value = True # Successfully remove old link or dir with os.remove
mock_os_path_isdir.side_effect = (True, True) # "public" test path exists
cmd = pre_collectstatic.Command()
cmd.handle(**options)
# Verify apps and extensions were gathered
mock_get_apps.assert_called_once()
mock_get_extensions.assert_called_once()
# Verify check for public dir was performed for app and extension
mock_os_path_isdir.assert_any_call(app_public_dir)
mock_os_path_isdir.assert_any_call(ext_public_dir)
# Verify attempt to remove old dirs/links
mock_os_remove.assert_any_call(app_static_root_dir)
mock_os_remove.assert_any_call(ext_static_root_dir)
# Verify attempt to copy public dir to static root location
mock_shutil_copytree.assert_any_call(app_public_dir, app_static_root_dir)
mock_shutil_copytree.assert_any_call(ext_public_dir, ext_static_root_dir)
# Verify messages
print_args = mock_print.call_args_list
msg = 'INFO: Collecting static and public directories of apps and extensions to "{0}".'\
. format(mock_settings.STATIC_ROOT)
msg_info_first = 'INFO: Successfully copied public directory to STATIC_ROOT for app "foo_app".'
msg_info_second = 'INFO: Successfully copied public directory to STATIC_ROOT for app "foo_ext".'
check_list = []
for i in range(len(print_args)):
check_list.append(print_args[i][0][0])
self.assertIn(msg, check_list)
self.assertIn(msg_info_first, check_list)
self.assertIn(msg_info_second, check_list)
msg_warning_not_in = 'WARNING: Cannot find the STATIC_ROOT setting'
msg_not_in = 'Please provide the path to the static directory'
info_not_in_first = 'INFO: Successfully linked static directory to STATIC_ROOT for app "foo_app".'
info_not_in_second = 'INFO: Successfully linked static directory to STATIC_ROOT for app "foo_ext".'
for i in range(len(print_args)):
self.assertNotEqual(msg_warning_not_in, print_args[i][0][0])
self.assertNotEqual(msg_not_in, print_args[i][0][0])
self.assertNotEqual(info_not_in_first, print_args[i][0][0])
self.assertNotEqual(info_not_in_second, print_args[i][0][0])
@mock.patch('tethys_apps.management.commands.pre_collectstatic.print')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.os.symlink')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.os.path.isdir')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.os.remove')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.get_installed_tethys_extensions')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.get_installed_tethys_apps')
@mock.patch('tethys_apps.management.commands.pre_collectstatic.settings')
def test_handle__named_static__link(self, mock_settings, mock_get_apps, mock_get_extensions, mock_os_remove,
mock_os_path_isdir, mock_os_symlink, mock_print):
options = {'link': True} # Create symbolic link (instead of copy)
static_root_dir = '/foo/static/root'
app_source_dir = '/foo/sources/foo_app'
ext_source_dir = '/foo/sources/foo_ext'
app_static_root_dir = static_root_dir + '/foo_app'
ext_static_root_dir = static_root_dir + '/foo_ext'
app_public_dir = app_source_dir + '/public'
ext_public_dir = ext_source_dir + '/public'
app_static_dir = app_source_dir + '/static'
ext_static_dir = ext_source_dir + '/static'
mock_settings.STATIC_ROOT = static_root_dir
mock_get_apps.return_value = {'foo_app': app_source_dir}
mock_get_extensions.return_value = {'foo_ext': ext_source_dir}
mock_os_remove.return_value = True # Successfully remove old link or dir with os.remove
mock_os_path_isdir.side_effect = (False, True, False, True) # "public" path doesn't exist, "static" path does
cmd = pre_collectstatic.Command()
cmd.handle(**options)
# Verify apps and extensions were gathered
mock_get_apps.assert_called_once()
mock_get_extensions.assert_called_once()
# Verify check for public dir was performed for app and extension
mock_os_path_isdir.assert_any_call(app_public_dir)
mock_os_path_isdir.assert_any_call(ext_public_dir)
mock_os_path_isdir.assert_any_call(app_static_dir)
mock_os_path_isdir.assert_any_call(ext_static_dir)
# Verify attempt to remove old dirs/links
mock_os_remove.assert_any_call(app_static_root_dir)
mock_os_remove.assert_any_call(ext_static_root_dir)
# Verify attempt to copy public dir to static root location
mock_os_symlink.assert_any_call(app_static_dir, app_static_root_dir)
mock_os_symlink.assert_any_call(ext_static_dir, ext_static_root_dir)
# Verify messages
print_args = mock_print.call_args_list
msg = 'INFO: Collecting static and public directories of apps and extensions to "{0}".' \
.format(mock_settings.STATIC_ROOT)
msg_info_first = 'INFO: Successfully linked public directory to STATIC_ROOT for app "foo_app".'
msg_info_second = 'INFO: Successfully linked public directory to STATIC_ROOT for app "foo_ext".'
check_list = []
for i in range(len(print_args)):
check_list.append(print_args[i][0][0])
self.assertIn(msg, check_list)
self.assertIn(msg_info_first, check_list)
self.assertIn(msg_info_second, check_list)
msg_warning_not_in = 'WARNING: Cannot find the STATIC_ROOT setting'
msg_not_in = 'Please provide the path to the static directory'
info_not_in_first = 'INFO: Successfully copied static directory to STATIC_ROOT for app "foo_app".'
info_not_in_second = 'INFO: Successfully copied static directory to STATIC_ROOT for app "foo_ext".'
for i in range(len(print_args)):
self.assertNotEqual(msg_warning_not_in, print_args[i][0][0])
self.assertNotEqual(msg_not_in, print_args[i][0][0])
self.assertNotEqual(info_not_in_first, print_args[i][0][0])
self.assertNotEqual(info_not_in_second, print_args[i][0][0])
|
StarcoderdataPython
|
3386473
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
import json
import pytest
from micropy.exceptions import StubError
from micropy.stubs import source
def test_get_source(shared_datadir, test_urls, test_repo):
"""should return correct subclass"""
test_path = shared_datadir / "esp8266_test_stub"
local_stub = source.get_source(test_path)
assert isinstance(local_stub, source.LocalStubSource)
remote_stub = source.get_source("esp8266-test-stub")
assert isinstance(remote_stub, source.RemoteStubSource)
stub_source = source.get_source(test_urls["valid"])
print(str(stub_source))
assert str(stub_source) == f"<RemoteStubSource@{stub_source.location}>"
def test_source_ready(shared_datadir, test_urls, tmp_path, mocker, test_archive, test_repo):
"""should prepare and resolve stub"""
# Test LocalStub ready
test_path = shared_datadir / "esp8266_test_stub"
local_stub = source.get_source(test_path)
expected_path = local_stub.location.resolve()
with local_stub.ready() as source_path:
assert source_path == expected_path
# Setup RemoteStub
test_parent = tmp_path / "tmpdir"
test_parent.mkdir()
expected_path = (test_parent / "archive_test_stub").resolve()
mocker.patch.object(source.tempfile, "mkdtemp", return_value=test_parent)
mocker.patch.object(source.utils, "stream_download", return_value=test_archive)
# Test Remote Stub
remote_stub = source.get_source(test_urls["download"])
with remote_stub.ready() as source_path:
print(list(source_path.parent.iterdir()))
assert (source_path / "info.json").exists()
assert len(list(source_path.iterdir())) == 3
def test_repo_from_json(shared_datadir, mocker):
mocker.patch.object(source.utils, "ensure_valid_url", return_value="https://testsource.com")
test_sources = shared_datadir / "test_sources.json"
test_repo = json.loads((shared_datadir / "test_repo.json").read_text())
mock_get = mocker.patch.object(source.requests, "get")
mock_get.return_value.json.return_value = test_repo
content = test_sources.read_text()
repos = list(source.StubRepo.from_json(content))
assert repos[0].name == "Test Repo"
assert len(repos) == 1
def test_repo_resolve_pkg(mocker, test_urls):
url = test_urls["valid"]
mocker.patch.object(source.utils, "ensure_valid_url", return_value=url)
mocker.patch.object(source.utils, "is_downloadable", return_value=False)
source.StubRepo("TestRepo", url, "packages")
with pytest.raises(StubError):
source.StubRepo.resolve_package("not-valid")
def test_repo_search(mocker, test_urls, test_repo):
url = test_urls["valid"]
mocker.patch.object(source.utils, "ensure_valid_url", return_value=url)
results = test_repo.search("esp32-micropython")
assert len(results) == 1
assert "esp32-micropython-1.11.0" in results
results = test_repo.search("esp32")
assert len(results) == 2
assert sorted(results) == sorted(["esp32-micropython-1.11.0", "esp32_LoBo-esp32_LoBo-3.2.24"])
|
StarcoderdataPython
|
1654330
|
from bs4 import BeautifulSoup
from re import search as re_search
from bgg_regex import clean_integer, clean_float
# get initial game data of all games on given rank page
def bgg_scrape_rank_page(soup):
partial_bgg_data = {}
rows = soup.find_all('tr', {'id': 'row_'})
for row in rows:
columns = row.find_all('td')
rank = clean_integer(columns[0].get_text())
game_name = columns[2].find('a').get_text()
game_page = columns[2].find('a')['href']
game_page = 'https://boardgamegeek.com' + game_page
geek_rating = clean_float(columns[3].get_text())
avg_rating = clean_float(columns[4].get_text())
num_votes = clean_integer(columns[5].get_text())
game_id = re_search('\d+', game_page).group(0)
game_data = {'game_id': game_id, 'rank': rank, 'name': game_name, 'page': game_page, 'geek_rating': geek_rating,
'avg_rating': avg_rating, 'num_votes': num_votes}
partial_bgg_data[game_id] = game_data
return partial_bgg_data
# first scrape of data
# start building bgg_data
def bgg_scrape_ranks(browser, search_page_format, bgg_data, start_page, end_page):
for page_number in xrange(start_page, end_page + 1):
url = search_page_format.format(page_number)
browser.get(url)
# make sure page has loaded
_ = browser.find_element_by_xpath('//*[@id="row_"]/td[1]')
soup = BeautifulSoup(browser.page_source, 'html.parser')
bgg_data.update(bgg_scrape_rank_page(soup))
number_gotten = len(bgg_data)
|
StarcoderdataPython
|
3343303
|
<filename>catkin_ws/src/ligong_control/src/kinematic_model.py
##################################
#Project: The kinematic model for ligong legged robot`s gait
#Author:<NAME>
#Time: 2019-11-29
##################################
import numpy as np
import math
step_trip = 0.075
offset = 0.15
step_angle = math.pi / 12
turn_para = 2
def forward_gait():
radio = 20
gait_data = np.zeros((radio, 8))
rate = 2
for t in range(gait_data.shape[0]):
yleg = step_angle * math.sin(2 * math.pi * t / radio)
yfoot = step_trip * (-math.sin(2 * math.pi * t / radio)) + offset
gait_data[t, 0] = -yleg
gait_data[t, 1] = -yleg
gait_data[t, 2] = yleg
gait_data[t, 3] = yleg
gait_data[t, 4] = -yfoot + 2 * offset
gait_data[t, 5] = yfoot
gait_data[t, 6] = yfoot
gait_data[t, 7] = -yfoot + 2 * offset
return rate, gait_data
def backward_gait():
radio = 20
gait_data = np.zeros((radio, 8))
rate = 2
for t in range(gait_data.shape[0]):
yleg = step_angle * math.sin(2 * math.pi * t / radio )
yfoot = step_trip * (-math.sin(2 * math.pi * t / radio)) + offset
gait_data[t, 0] = -yleg
gait_data[t, 1] = -yleg
gait_data[t, 2] = yleg
gait_data[t, 3] = yleg
gait_data[t, 4] = yfoot
gait_data[t, 5] = -yfoot + 2 * offset
gait_data[t, 6] = -yfoot + 2 * offset
gait_data[t, 7] = yfoot
return rate, gait_data
def turnleft_gait():
radio = 20
gait_data = np.zeros((radio, 8))
rate = 2.5
for t in range(gait_data.shape[0]):
yleg = step_angle * math.sin(2 * math.pi * t / radio)
yfoot = step_trip * (-math.sin(2 * math.pi * t / radio)) + offset
gait_data[t, 0] = -yleg
gait_data[t, 1] = -yleg * turn_para
gait_data[t, 2] = yleg
gait_data[t, 3] = yleg * turn_para
gait_data[t, 4] = -yfoot + 2 * offset
gait_data[t, 5] = yfoot
gait_data[t, 6] = yfoot
gait_data[t, 7] = -yfoot + 2 * offset
return rate, gait_data
def turnright_gait():
radio = 20
gait_data = np.zeros((radio, 8))
rate = 2.5
for t in range(gait_data.shape[0]):
yleg = step_angle * math.sin(2 * math.pi * t / radio )
yfoot = step_trip * (-math.sin(2 * math.pi * t / radio)) + offset
gait_data[t, 0] = -yleg
gait_data[t, 1] = -yleg /turn_para
gait_data[t, 2] = yleg
gait_data[t, 3] = yleg /turn_para
gait_data[t, 4] = -yfoot + 2 * offset
gait_data[t, 5] = yfoot
gait_data[t, 6] = yfoot
gait_data[t, 7] = -yfoot + 2 * offset
return rate, gait_data
def jump_gait():
radio = 20
gait_data = np.zeros((radio, 8))
rate = 2
for t in range(gait_data.shape[0]):
yleg = 2.5 * step_angle * math.sin(2 * math.pi * t / radio )
yfoot = 2.5 * step_trip * (-math.sin(2 * math.pi * t / radio)) + offset
gait_data[t, 0] = -yleg
gait_data[t, 1] = yleg
gait_data[t, 2] = yleg
gait_data[t, 3] = -yleg
gait_data[t, 4] = -yfoot + 2 * offset
gait_data[t, 5] = -yfoot + 2 * offset
gait_data[t, 6] = yfoot
gait_data[t, 7] = yfoot
return rate, gait_data
def keep_gait():
radio = 20
gait_data = np.zeros((radio, 8))
rate = 0.75
for t in range(gait_data.shape[0]):
gait_data[t, 0] = 0
gait_data[t, 1] = 0
gait_data[t, 2] = 0
gait_data[t, 3] = 0
gait_data[t, 4] = offset * t / radio
gait_data[t, 5] = offset * t / radio
gait_data[t, 6] = offset * t / radio
gait_data[t, 7] = offset * t / radio
return rate, gait_data
|
StarcoderdataPython
|
3250922
|
<filename>Somalia-COVID-impact/Compare_projections.py
import matplotlib.pyplot as plt
import pandas as pd
import os
from pandas.core import base
dir_path = os.path.dirname(os.path.realpath(__file__))
for disease in ["Cholera","Malaria","Measles"]:
baseline=pd.read_excel(f'{dir_path}/Results_Baseline_{disease}_National.xlsx')
covid=pd.read_excel(f'{dir_path}/Results_COVID_{disease}_National.xlsx')
all=pd.merge(left=baseline,right=covid,how='left',left_on='time',right_on='time',\
suffixes=[' - Baseline',' - COVID scenario'])
all=all.filter(regex='Infected')
# print(all.columns)
all.plot(title=disease)
plt.legend()
plt.savefig(f"{dir_path}/Comparison_{disease}.png")
plt.show()
|
StarcoderdataPython
|
1789201
|
<filename>NippoKun/report/urls.py
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import login, logout
from . import views
from .views import (
CreateReport,
ListReport,
ListMyReport,
DetailReport,
UpdateReport,
DeleteReport,
CreateUser,
CreateScore,
UpdateScore,
DeleteScore,
ListScore,
)
urlpatterns = [
url(r'^search/', login_required(views.search), name='search'),
url(r'^index/', login_required(ListReport.as_view()), name='index'),
url(r'^mypage/', login_required(ListMyReport.as_view()), name='mypage'),
url(r'^report_entries/', login_required(CreateReport.as_view()), name='create_report'),
url(r'^(?P<pk>[0-9]+)/$', login_required(DetailReport.as_view()), name='detail_report'),
url(r'^(?P<pk>[0-9]+)/edition/$', login_required(UpdateReport.as_view()), name='update_report'),
url(r'^(?P<pk>[0-9]+)/delete/$', login_required(DeleteReport.as_view()), name='delete_report'),
url(r'^login/$', login, {'template_name': 'report/login.html'}, name='login'),
url(r'^logout/$', logout, {'template_name': 'report/logout.html'}, name='logout'),
url(r'^user_register/$', CreateUser.as_view(), name='create_user'),
url(r'^(?P<report>[0-9]+)/score/', login_required(CreateScore.as_view()), name='create_score'),
url(r'^edition/(?P<pk>[0-9]+)/$', login_required(UpdateScore.as_view()), name='update_score'),
url(r'^delete/(?P<pk>[0-9]+)/$', login_required(DeleteScore.as_view()), name='delete_score'),
url(r'^(?P<report>[0-9]+)/score_list/$', login_required(ListScore.as_view()), name='list_score'),
]
|
StarcoderdataPython
|
3232002
|
<reponame>cloudinary/pycloudinary<gh_stars>100-1000
import contextlib
import unittest
from cloudinary.utils import normalize_expression, generate_transformation_string
NORMALIZATION_EXAMPLES = {
'None is not affected': [None, None],
'number replaced with a string value': [10, '10'],
'empty string is not affected': ['', ''],
'single space is replaced with a single underscore': [' ', '_'],
'blank string is replaced with a single underscore': [' ', '_'],
'underscore is not affected': ['_', '_'],
'sequence of underscores and spaces is replaced with a single underscore': [' _ __ _', '_'],
'arbitrary text is not affected': ['foobar', 'foobar'],
'double ampersand replaced with and operator': ['foo && bar', 'foo_and_bar'],
'double ampersand with no space at the end is not affected': ['foo&&bar', 'foo&&bar'],
'width recognized as variable and replaced with w': ['width', 'w'],
'initial aspect ratio recognized as variable and replaced with iar': ['initial_aspect_ratio', 'iar'],
'duration is recognized as a variable and replaced with du': ['duration', 'du'],
'duration after : is not a variable and is not affected': ['preview:duration_2', 'preview:duration_2'],
'$width recognized as user variable and not affected': ['$width', '$width'],
'$initial_aspect_ratio recognized as user variable followed by aspect_ratio variable': [
'$initial_aspect_ratio',
'$initial_ar',
],
'$mywidth recognized as user variable and not affected': ['$mywidth', '$mywidth'],
'$widthwidth recognized as user variable and not affected': ['$widthwidth', '$widthwidth'],
'$_width recognized as user variable and not affected': ['$_width', '$_width'],
'$__width recognized as user variable and not affected': ['$__width', '$_width'],
'$$width recognized as user variable and not affected': ['$$width', '$$width'],
'$height recognized as user variable and not affected': ['$height_100', '$height_100'],
'$heightt_100 recognized as user variable and not affected': ['$heightt_100', '$heightt_100'],
'$$height_100 recognized as user variable and not affected': ['$$height_100', '$$height_100'],
'$heightmy_100 recognized as user variable and not affected': ['$heightmy_100', '$heightmy_100'],
'$myheight_100 recognized as user variable and not affected': ['$myheight_100', '$myheight_100'],
'$heightheight_100 recognized as user variable and not affected': [
'$heightheight_100',
'$heightheight_100',
],
'$theheight_100 recognized as user variable and not affected': ['$theheight_100', '$theheight_100'],
'$__height_100 recognized as user variable and not affected': ['$__height_100', '$_height_100']
}
class ExpressionNormalizationTest(unittest.TestCase):
def test_expression_normalization(self):
for description, (input_expression, expected_expression) in NORMALIZATION_EXAMPLES.items():
with self.subTest(description, input_expression=input_expression):
self.assertEqual(expected_expression, normalize_expression(input_expression))
if not hasattr(unittest.TestCase, "subTest"):
# Support Python before version 3.4
@contextlib.contextmanager
def subTest(self, msg="", **params):
yield
|
StarcoderdataPython
|
3220265
|
import pygame
import math
import numpy as np
from pygame.locals import *
class Wheel():
def __init__(self, window, colour, pos, size):
self.window = window
self.colour = colour
self.size = size
self.pos = np.array(pos)
self.ang = 0.0
self.mat = np.array([[math.cos(self.ang), -math.sin(self.ang)],
[math.sin(self.ang), math.cos(self.ang)]])
self.points_mat = np.array([[-self.size / 2, -self.size],
[self.size / 2, -self.size],
[self.size / 2, self.size],
[-self.size / 2, self.size]])
def render(self):
points = np.matmul(self.mat, np.transpose(self.points_mat))
points = np.transpose(points)
pygame.draw.polygon(self.window, self.colour, points + self.pos)
def set_pos(self, pos):
self.pos = pos
def set_ang(self, ang):
self.ang = ang
self.mat = np.array([[math.cos(self.ang), -math.sin(self.ang)],
[math.sin(self.ang), math.cos(self.ang)]])
class Car():
def __init__(self, window, colours, pos, size):
self.window = window
self.colours = colours
self.size = size
self.pos = np.array(pos)
self.speed = 0.0
self.vel = np.array([0.0,0.0])
self.acc = 0.0
self.term_speed = 400
self.ang = 0.0
self.ang_mat = np.array([[math.cos(self.ang), -math.sin(self.ang)],
[math.sin(self.ang), math.cos(self.ang)]])
self.wheel_vel = 0.0
self.wheel_ang = 0.0
self.max_wheel_ang = math.pi/6
self.points_mat = np.array([[self.pos[0] - self.size, self.pos[1] - self.size*2.5],
[self.pos[0] + self.size, self.pos[1] - self.size*2.5],
[self.pos[0] + self.size, self.pos[1] + self.size*2.5],
[self.pos[0] - self.size, self.pos[1] + self.size*2.5]])
self.wheel_pos = np.array([[-self.size,-self.size*1.6],
[ self.size, -self.size*1.6],
[ self.size, self.size*1.6],
[-self.size, self.size*1.6],
[0, self.size*1.6],
[0, -self.size*1.6]])
self.front_axel = np.array([self.pos[0], self.pos[1] + self.size * 1.6])
self.rear_axel = np.array([self.pos[0], self.pos[1] - self.size * 1.6])
self.turning_point = np.array([0.0,0.0])
self.wheels = []
self.wheels.append(Wheel(window, colours["grey"], [self.pos[0] - self.size, self.pos[1] - self.size * 1.6], size / 3))
self.wheels.append(Wheel(window, colours["grey"], [self.pos[0] + self.size, self.pos[1] - self.size * 1.6], size / 3))
self.wheels.append(Wheel(window, colours["grey"], [self.pos[0] + self.size, self.pos[1] + self.size * 1.6], size / 3))
self.wheels.append(Wheel(window, colours["grey"], [self.pos[0] - self.size, self.pos[1] + self.size * 1.6], size / 3))
def dynamics(self, frame_time):
pressed = pygame.key.get_pressed()
#Rotation inputs
if (pressed[pygame.K_a] and not pressed[pygame.K_d]) or (not pressed[pygame.K_a] and pressed[pygame.K_d]):
if pressed[pygame.K_a] and not pressed[pygame.K_d]:
self.wheel_vel = -2
elif not pressed[pygame.K_a] and pressed[pygame.K_d]:
self.wheel_vel = 2
else:
if self.wheel_ang > 0.01:
self.wheel_vel = -2
elif self.wheel_ang < -0.01:
self.wheel_vel = 2
else:
self.wheel_vel = 0
#Limit rotation angle to maximum
self.wheel_ang += self.wheel_vel * frame_time
if self.wheel_ang > self.max_wheel_ang:
self.wheel_ang = self.max_wheel_ang
elif self.wheel_ang < -self.max_wheel_ang:
self.wheel_ang = -self.max_wheel_ang
#Translation inputs
if pressed[pygame.K_w] and not pressed[pygame.K_s]:
self.acc = 100
elif not pressed[pygame.K_w] and pressed[pygame.K_s]:
self.acc = -100
else:
if self.speed > 0.0001:
self.acc = -50
elif self.speed < 0.0001:
self.acc = 50
else:
self.acc = 0
#Limit speed to terminal speed
if self.speed > self.term_speed:
self.speed = self.term_speed
elif self.speed < -self.term_speed/3:
self.speed = -self.term_speed/3
#Recalculate wheel positions
wheel_pos = np.matmul(self.ang_mat, np.transpose(self.wheel_pos))
wheel_pos = np.transpose(wheel_pos)
#Find axel pivot points
self.front_axel = wheel_pos[4]
self.rear_axel = wheel_pos[5]
#Recalculate wheel matrix
self.front_mat = np.array([[math.cos(self.wheel_ang + self.ang), -math.sin(self.wheel_ang + self.ang)],
[math.sin(self.wheel_ang + self.ang), math.cos(self.wheel_ang + self.ang)]])
#Calculate wheel normals
self.front_norm = np.matmul(self.front_mat, np.transpose(np.array([1.0, 0.0])))
self.front_norm = np.transpose(self.front_norm)
self.rear_norm = np.matmul(self.ang_mat, np.transpose(np.array([1.0, 0.0])))
self.rear_norm = np.transpose(self.rear_norm)
#Find turing point
if (self.rear_norm[0] * self.front_norm[1] - self.rear_norm[1] * self.front_norm[0]) != 0:
mu = ((self.rear_norm[0]*(self.rear_axel[1] - self.front_axel[1]) - self.rear_norm[1]*(self.rear_axel[0] - self.front_axel[0]))
/ (self.rear_norm[0] * self.front_norm[1] - self.rear_norm[1] * self.front_norm[0]))
self.turning_point = self.front_axel + mu * self.front_norm + self.pos
else:
mu = 100000
self.turning_point = self.rear_axel + mu * self.rear_norm + self.pos
#Move car geomery away from turning point
self.points_mat = np.array([[self.points_mat[0][0] - self.turning_point[0], self.points_mat[0][1] - self.turning_point[1]],
[self.points_mat[1][0] - self.turning_point[0], self.points_mat[1][1] - self.turning_point[1]],
[self.points_mat[2][0] - self.turning_point[0], self.points_mat[2][1] - self.turning_point[1]],
[self.points_mat[3][0] - self.turning_point[0], self.points_mat[3][1] - self.turning_point[1]]])
#Calculate rotation angle
radius = np.sqrt((self.pos - self.turning_point).dot(self.pos - self.turning_point))
self.speed += self.acc * frame_time
displacement = self.speed * frame_time
angle = displacement / radius
if self.wheel_ang < 0:
angle *= -1
self.ang += angle
self.ang_mat = np.array([[math.cos(self.ang), -math.sin(self.ang)],
[math.sin(self.ang), math.cos(self.ang)]])
translation_mat = np.array([[math.cos(angle), -math.sin(angle)],
[math.sin(angle), math.cos(angle)]])
#Apply translation matrix
self.points_mat = np.matmul(translation_mat, np.transpose(self.points_mat))
self.points_mat = np.transpose(self.points_mat)
#Move car geometry back from turning point
self.points_mat = np.array([[self.points_mat[0][0] + self.turning_point[0], self.points_mat[0][1] + self.turning_point[1]],
[self.points_mat[1][0] + self.turning_point[0], self.points_mat[1][1] + self.turning_point[1]],
[self.points_mat[2][0] + self.turning_point[0], self.points_mat[2][1] + self.turning_point[1]],
[self.points_mat[3][0] + self.turning_point[0], self.points_mat[3][1] + self.turning_point[1]]])
self.pos = np.array([(self.points_mat[0][0] + self.points_mat[1][0] + self.points_mat[2][0] + self.points_mat[3][0]) / 4,
(self.points_mat[0][1] + self.points_mat[1][1] + self.points_mat[2][1] + self.points_mat[3][1]) / 4])
#Recalculate wheel positions
wheel_pos = np.matmul(self.ang_mat, np.transpose(self.wheel_pos))
wheel_pos = np.transpose(wheel_pos)
#Apply new wheel_positions
self.wheels[0].set_pos([wheel_pos[0][0] + self.pos[0], wheel_pos[0][1] + self.pos[1]])
self.wheels[1].set_pos([wheel_pos[1][0] + self.pos[0], wheel_pos[1][1] + self.pos[1]])
self.wheels[2].set_pos([wheel_pos[2][0] + self.pos[0], wheel_pos[2][1] + self.pos[1]])
self.wheels[3].set_pos([wheel_pos[3][0] + self.pos[0], wheel_pos[3][1] + self.pos[1]])
#Apply new wheel rotations
self.wheels[0].set_ang(self.ang)
self.wheels[1].set_ang(self.ang)
self.wheels[2].set_ang(self.wheel_ang + self.ang)
self.wheels[3].set_ang(self.wheel_ang + self.ang)
def render(self):
for wheel in self.wheels:
wheel.render()
pygame.draw.polygon(self.window, self.colours["red"], self.points_mat)
def display_debug(self):
pygame.draw.line(self.window, self.colours["yellow"], self.pos + self.rear_axel + 10000 * self.rear_norm,
self.pos + self.rear_axel - 10000 * self.rear_norm)
pygame.draw.line(self.window, self.colours["yellow"], self.pos + self.front_axel + 10000 * self.front_norm,
self.pos + self.front_axel - 10000 * self.front_norm)
pygame.draw.circle(self.window, self.colours["blue"], [int(self.turning_point[0]), int(self.turning_point[1])], 3)
#----------------------------------------------------------------------------------------------------------------------------------
def main():
pass
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1727319
|
import pandas as pd
import plotly as py
import plotly.graph_objs as go
#import excel file
failed = pd.read_csv('banklist.csv')
#list of states(unique)
failed_states = list(set(failed['ST']))
#list of years(unique). had to strip last 2 characters of column to get the year then sort the list
failed_years = sorted(list(set(failed['Closing Date'].str.strip().str[-2:])))
#create dictionary for each year
d = {year: pd.DataFrame for year in failed_years}
#select rows for each year and assign to dictionary
for key in d.keys():
d[key] = failed[failed['Closing Date'].str.endswith(key)]
#nested loop so I can count the number of bank failures for each year by state
for key in d.keys():
#totals by state go in this list
listx = []
for i in failed_states:
x = len(d[key][d[key]['ST'].str.contains(i)])
listx.append(x)
#loop until total data for each year acheived
if len(listx) == len(failed_states):
#uses each loop to output a graph of a choropleth map for bank failures per year in the US for all keys in the dictionary
data = dict(
type = 'choropleth',
locations = failed_states,
locationmode = 'USA-states',
colorscale = 'Viridis',
z = listx,
zmin = 0,
zmax = 30,
title='test')
lyt = dict(geo=dict(scope='usa'))
map = go.Figure(data=[data], layout=lyt)
py.offline.plot(map)
else:
continue
|
StarcoderdataPython
|
171691
|
<reponame>mcmaxwell/frontier<gh_stars>0
from modeltranslation.translator import translator, TranslationOptions
from .models import News
from common.translation import CommonPostTranslationOptions
class NewsTranslationOptions(TranslationOptions):
fields = ('title', 'text_preview', 'text')
translator.register(News, NewsTranslationOptions)
|
StarcoderdataPython
|
1700425
|
from python_framework import Serializer, HttpStatus, JwtConstant
from MessageEmitterAnnotation import MessageEmitter
from MessageEmitterAnnotation import MessageEmitterMethod
from config import MessageConfig
import MessageDto
import Message
@MessageEmitter(
url = MessageConfig.EMITTER_URL,
timeout = MessageConfig.EMITTER_TIMEOUT,
headers = {
'Content-Type': 'application/json',
JwtConstant.DEFAULT_JWT_API_KEY_HEADER_NAME: f'Bearer {MessageConfig.MESSAGE_API_KEY}'
}
, muteLogs = False
, logRequest = True
, logResponse = True
)
class MessageEmitter:
@MessageEmitterMethod(
queueKey = MessageConfig.SOME_UNIQUE_QUEUE_KEY,
requestClass=[MessageDto.MessageRequestDto],
responseClass=[MessageDto.MessageRequestDto]
, logRequest = True
, logResponse = True
)
def send(self, dto):
return self.emit(body=dto)
|
StarcoderdataPython
|
1792298
|
<filename>tests/test_format.py
# pyflyby/test_format.py
# License for THIS FILE ONLY: CC0 Public Domain Dedication
# http://creativecommons.org/publicdomain/zero/1.0/
from __future__ import absolute_import, division, with_statement
from textwrap import dedent
from pyflyby._format import FormatParams, fill, pyfill
def test_fill_1():
result = fill(["'hello world'", "'hello two'"],
prefix=("print ", " "), suffix=(" \\", ""),
max_line_length=25)
expected = "print 'hello world', \\\n 'hello two'\n"
assert result == expected
def test_pyfill_1():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"])
expected = 'print foo.bar, baz, quux, quuuuux\n'
assert result == expected
def test_pyfill_2():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"],
FormatParams(max_line_length=15))
expected = dedent("""
print (foo.bar,
baz,
quux,
quuuuux)
""").lstrip()
assert result == expected
def test_pyfill_3():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"],
FormatParams(max_line_length=14, hanging_indent='always'))
expected = dedent("""
print (
foo.bar,
baz, quux,
quuuuux)
""").lstrip()
assert result == expected
def test_pyfill_4():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"],
FormatParams(max_line_length=14, hanging_indent='always'))
expected = dedent("""
print (
foo.bar,
baz, quux,
quuuuux)
""").lstrip()
assert result == expected
def test_pyfill_5():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"],
FormatParams(max_line_length=14, hanging_indent='auto'))
expected = dedent("""
print (
foo.bar,
baz, quux,
quuuuux)
""").lstrip()
assert result == expected
def test_pyfill_hanging_indent_never_1():
prefix = 'from foo import '
# <---------------39 chars-------------->
tokens = ['x23456789a123456789b123456789c123456789','z1','z2']
params = FormatParams(max_line_length=79, hanging_indent='never')
result = pyfill(prefix, tokens, params)
expected = dedent("""
from foo import (x23456789a123456789b123456789c123456789,
z1, z2)
""").lstrip()
assert result == expected
def test_pyfill_hanging_indent_always_1():
prefix = 'from foo import '
# <---------------39 chars-------------->
tokens = ['x23456789a123456789b123456789c123456789','z1','z2']
params = FormatParams(max_line_length=79, hanging_indent='always')
result = pyfill(prefix, tokens, params)
expected = dedent("""
from foo import (
x23456789a123456789b123456789c123456789, z1, z2)
""").lstrip()
assert result == expected
def test_pyfill_hanging_indent_auto_yes_1():
prefix = 'from foo import '
# <---------------39 chars-------------->
tokens = ['x23456789a123456789b123456789c123456789','z1','z2']
params = FormatParams(max_line_length=79, hanging_indent='auto')
result = pyfill(prefix, tokens, params)
expected = dedent("""
from foo import (
x23456789a123456789b123456789c123456789, z1, z2)
""").lstrip()
assert result == expected
def test_pyfill_hanging_indent_auto_no_1():
prefix = 'from foo import '
# <---------------38 chars-------------->
tokens = ['x23456789a123456789b123456789c12345678','z1','z2']
params = FormatParams(max_line_length=79, hanging_indent='auto')
result = pyfill(prefix, tokens, params)
expected = dedent("""
from foo import (x23456789a123456789b123456789c12345678,
z1, z2)
""").lstrip()
assert result == expected
|
StarcoderdataPython
|
1686192
|
import math
import pandas
import random
import os
import numpy as np
import tensorflow
from pandas import DataFrame
from tensorflow.keras.utils import Sequence
class DataSequence(Sequence):
"""
Keras Sequence object to train a model on a list of csv files
"""
def __init__(self, rootdir, batch_size, shuffle=False, class_format='categorical', classes=['Electronic', 'Experimental', 'Folk', 'Hip-Hop', 'Instrumental', 'International', 'Pop', 'Rock']):
"""
df = dataframe with two columns: the labels and a list of filenames
"""
df = DataFrame(columns=['file_names', 'label'])
for root, subdirs, files in os.walk(rootdir):
for subdir in subdirs:
for r, s, f in os.walk(os.path.join(root, subdir)):
paths = [os.path.join(r, name) for name in f]
temp = DataFrame(data=paths, columns=['file_names'])
temp['label'] = classes.index(subdir)
df = df.append(temp, ignore_index=True)
self.df = df
self.classes = classes
self.bsz = batch_size
self.shuffle = shuffle
self.n = self.round(len(df.index), batch_size)
# self.indexes = random.sample(range(self.n), k=self.n)
self.indexes = range(self.n)
# Take labels and a list of image locations in memory
self.labels = tensorflow.keras.utils.to_categorical(self.df['label'].values, num_classes=len(self.classes)) if class_format=='categorical' else self.df['label'].values
self.file_list = self.df['file_names']
def __len__(self):
return int(math.floor(self.n / float(self.bsz)))
def round(self, n, multiple):
# Smaller multiple
a = (n // multiple) * multiple
# Return of closest of two
return a
def on_epoch_end(self):
self.indexes = range(self.n)
if self.shuffle:
# Shuffles indexes after each epoch if in training mode
self.indexes = random.sample(self.indexes, k=len(self.indexes))
def get_batch_labels(self, idx, arr):
# Fetch a batch of labels
return arr[idx * self.bsz: (idx + 1) * self.bsz]
def get_batch_features(self, arr):
# Fetch a batch of inputs
feats = np.array([self.read_csv_data(f) for f in arr])
return feats
def __getitem__(self, idx):
indexes = self.indexes[idx*self.bsz:(idx+1)*self.bsz]
files_temp = np.array([self.file_list[k] for k in indexes])
y = np.array([self.labels[k] for k in indexes])
batch_x = self.get_batch_features(files_temp)
return batch_x, y
def read_csv_data(self, filename):
df = pandas.read_csv(filename, index_col=0).fillna(0.00000000000000001)
df = self.normalize(df)
return df.values
def normalize(self, df: DataFrame):
return (df - df.mean()) / (df.std())
if __name__=='__main__':
DATASET_DIR = "dataset/"
cwd = os.path.dirname(os.path.realpath(__file__))
base_dir = os.path.join(cwd, DATASET_DIR, 'mfcc_fma_small', 'train')
gen = DataSequence(base_dir, 64, True)
print(int(np.floor(gen.n / float(64))))
#
for i in range(gen.__len__() - 1, 0,-1):
x, y = gen.__getitem__(i)
print("{}: {}".format(i, x.shape))
|
StarcoderdataPython
|
1717578
|
"""
constants.py
"""
from binascii import unhexlify, hexlify
from base64 import b64encode as b64e
from base64 import b64decode as b64d
# NOTE: delay experiments
ARTIFICIAL_DELAY = 4.0
ARTIFICIAL_DELAY_INCREMENT = 0.5
# NOTE: estimated delays
NC_EKA_PERIOD = 5.0 # sec
NC_EKA_TIMEOUT = 31.0 # sec
# NOTE: ECDH
POINT_LENS_BYTES = [31, 32, 33]
# NOTE: AES256 CBC
AES_ROUNDS = 14
AES_KEY_BYTES = 32
AES_KEY_BITS = AES_KEY_BYTES * 8 ## 256
assert AES_KEY_BYTES % 8 == 0
assert AES_KEY_BITS % 8 == 0
AES_BLOCK_BYTES = 16 # Fixed
AES_BLOCK_BITS = AES_BLOCK_BYTES * 8 ## 128
assert AES_BLOCK_BYTES % 8 == 0
assert AES_BLOCK_BITS % 8 == 0
AES_IV_BYTES = AES_BLOCK_BYTES
AES_IV_BITS = AES_BLOCK_BITS
# NOTE: HMAC SHA256
HMAC_SHA256_KEY_BYTES = 32
HMAC_SHA256_KEY_BITS = HMAC_SHA256_KEY_BYTES * 8
assert HMAC_SHA256_KEY_BYTES % 8 == 0
assert HMAC_SHA256_KEY_BITS % 8 == 0
# NOTE: ECDH with secp256r1 aka NIST P-256 (secp256r1)
EC_P = 0x00ffffffff00000001000000000000000000000000ffffffffffffffffffffffff;
EC_A = 0x00ffffffff00000001000000000000000000000000fffffffffffffffffffffffc;
EC_B = 0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b;
EC_XG_INT = 0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296;
# EC_XG_BIN = unhexlify('6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296')
EC_XG_BIN = b'k\x17\xd1\xf2\xe1,BG\xf8\xbc\xe6\xe5c\xa4@\xf2w\x03}\x81-\xeb3\xa0\xf4\xa19E\xd8\x98\xc2\x96'
EC_YG_INT = 0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5;
# EC_YG_BIN = unhexlify('4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5')
EC_YG_BIN = b'O\xe3B\xe2\xfe\x1a\x7f\x9b\x8e\xe7\xebJ|\x0f\x9e\x16+\xce3Wk1^\xce\xcb\xb6@h7\xbfQ\xf5'
EC_N = 0x00ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551;
EC_X2G_INT = 0x7CF27B188D034F7E8A52380304B51AC3C08969E277F21B35A60B48FC47669978;
EC_Y2G_INT = 0x07775510DB8ED040293D9AC69F7430DBBA7DADE63CE982299E04B79D227873D1;
EC_X2G_BIN = b'|\xf2{\x18\x8d\x03O~\x8aR8\x03\x04\xb5\x1a\xc3\xc0\x89i\xe2w\xf2\x1b5\xa6\x0bH\xfcGf\x99x'
EC_Y2G_BIN = b'\x07wU\x10\xdb\x8e\xd0@)=\x9a\xc6\x9ft0\xdb\xba}\xad\xe6<\xe9\x82)\x9e\x04\xb7\x9d"xs\xd1'
# NOTE: invalid points for NIST P-256 (secp256r1)
# https://web-in-security.blogspot.com/2015/09/practical-invalid-curve-attacks.html
EC_IX5 = 82794344854243450371984501721340198645022926339504713863786955730156937886079
EC_IY5 = 33552521881581467670836617859178523407344471948513881718969729275859461829010
X1_BT_ADDR = 'DC:53:60:04:93:35'
NEX_BTADDR = "48:59:29:01:AD:6F"
MOTO_BTADDR = "24:da:9b:66:9f:82"
PI2_BTADDR = "40:4E:36:A8:BF:5F"
SDP_UUID = {
'PNP': '1200',
'L2CAP': '0100',
}
P2P_STAR = '!' # 0x21
P2P_STAR_HEX = b'!'.hex()
P2P_CLUSTER = '"' # 0x22
P2P_CLUSTER_HEX = b'"'.hex()
P2P_POINT_TO_POINT = 'TODO',
RFCOMM_PORT = [5, 8, 10]
# NOTE: modified version of md5(sid)
UUID_SID = "b8c1a306-9167-347e-b503-f0daba6c5723"
# NOTE: man ascii
NEWLINE = b'\x0a'
SUBSTITUTE = b'\x1a'
SPACE = b"\x20"
QUOTE = b"\x27"
ETB = b"\x17"
DOUBLE_QUOTE = b"\x22"
DOUBLE_QUOTE_ETB = b"\x22\x17"
EID_LEN = 4
AUTH_LEN = 5
NC_ERR_HEAD = b'\x08\x68\x12'
# NOTE: used both for KEP and AL
NC_HEAD2 = b'\x08\x01\x12'
CAES = b64e(NC_HEAD2)
NC_HEAD3 = b'\x08\x03*'
NC_HEAD4 = b'\x08\x04*'
NC_HEAD5 = b'\x08\x04\x1a'
NC_SEP = b'\x12' # NOTE: aka form feed \f
NC_SEP2 = b'\x20' # NOTE: aka space
NC_SEP3 = b'\x08' # NOTE: aka space
NC_SEP4 = b'\x10' # NOTE: aka dle
NC_KEP2_HEAD = b'\x08d'
NC_KEP3_HEAD = b'\x18d"'
NC_HEAD = b'x1c\x08\x01\x10\x02*'
NC_HEAD_PAY = b'0'
NC_HEAD_EKA = b'2\x04\x08\r\x10\x01\x12\x10'
NC_EKA_HEAD = unhexlify('0a08080112040805320010')
NC_EKA_LEN = 88
NC_EKA_TEMPLATE = unhexlify("000000540a300a1c080110022a103aebf352c5c1465377098ab7a2004bce3204080d100112102ddac9b3ec6158e8bed5621522997739122073b317233dc130288771e22d9f9313d8c9775ca9f465c25528ff3e9063942a59")
assert(len(NC_EKA_TEMPLATE) == NC_EKA_LEN)
NC_PAY_LEN = 120
NC_PT2_LEN = 37 # Bytes decoded with Pt2
NC_PT2_HEX_LEN = 37*2 # Hexdigits
# NOTE: always first packet from the adv to tell WLAN
NC_EWL_LEN = 104
# NOTE: always third packet from the adv to tell softAP
NC_EHA_LEN = 152
NC_EWD_LEN = 136
# NOTE: I'm using k1[0] from test_dissector.py
NC_KEP1_V2_TEMPLATE = unhexlify("0000001c08011218080112140a043044725612046e616d6520b1fbb8fa022802")
# NOTE: I'm using k1s[0] from test_dissector.py
NC_KEP1_STAR_HA_TEMPLATE = unhexlify("000000210801121d080112190a043475644112046e616d652086faa5702805280328022804")
NC_KEP1_STAR_WD_TEMPLATE = unhexlify("00000025080112210801121d0a046579565912046e616d6520fcbfebdef9ffffffff01280328022804")
# NOTE: I'm using k1c[0] from test_dissector.py
NC_KEP1_CLUS_TEMPLATE = unhexlify("000000200801121c080112180a043475644112046e616d6520ab92a9f806280528022804")
# NOTE: I'm using k1[1] from test_dissector.py
NC_KEP2_TEMPLATE = unhexlify("0000008808021283010801122090073f06afa1fee90af5336d04b00af6a366493151f60851245f8e7e59f10c3f1a44086412403c616654b5e51e5714489586655ea083881b8070673172036c3522c0e40e0838f3eed88a3911a243ac468ed1a6c5e314ea117066857b1f82738f284a07994e7022174145535f3235365f4342432d484d41435f534841323536")
NC_KEP2_LEN = 140
assert(len(NC_KEP2_TEMPLATE) == NC_KEP2_LEN)
# NOTE: I'm always using the 120 Bytes template, no need to adjust the len fields
NC_KEP3_TEMPLATE = unhexlify("0000007408031270080112208d83b8a9cd5cb2183cbc50f7fde33b435fe30fe7a94a905892d35562f1bff83318642248080112440a204840d302b4208deac7bf2e37f4ddce5974bbd2561ca2bc1c86e10cccf8eaa5d712207a2789ab50d9c1817c9503654c382926098f8c19e148bed81ff743249df687ed")
NC_KEP3_LEN = 120
assert(len(NC_KEP3_TEMPLATE) == NC_KEP3_LEN)
# NOTE: I'm using k1[3] from test_dissector.py
NC_KEP4_TEMPLATE = unhexlify("0000004e0804124a0a48080112440a207691b1f6a57371d7dad3bf154f34f91075ccabe7bbfcfcfd8c574757f5f59249122071e5a2993e2074113bc85aa04e84894ba50f29378971b8902c9e388ece763ba9")
# NOTE: I'm using r_ewl[0] from test_dissector.py
NC_EWL_TEMPLATE = unhexlify("000000640a400a1c080110022a105050fe126507dc579de7c105a44b8e423204080d1001122026a22e97d17157b42626cd459557049fa4ec5fbe82bd630a57a63ca11bc3debe12205d26af2adf1465ab60dad860c1e657552dc8354d764ffb4e76390e265d209dc9")
# NOTE: I'm using r_wl[0] from test_dissector.py
NC_WL_TEMPLATE = unhexlify("0a1a0801121608042a120801120e08051a0a0a04c0a8016410bfa1021001")
NC_WL_LEN = 30
# NOTE: Only with star I'm using _eha from test_dissector.py
NC_EHA_TEMPLATE = unhexlify("000000940a700a1c080110022a108f8aa8deca54eeb67fc67b5faf86702d3204080d100112504243cfb47cd8afd1b44783125d582f2a199c0c41d566a15790a4c0edfcbf46417b54d435650acbd2dc8b3c7a3c2d396f2ddf3383702c3fa8986968f57027c99f013c14e2f8c84433d57e53b090a047dd1220163b92bf47027294c8b4495dbf3583544d0f16e72db7e74b3c9c6573817be5e9")
# NOTE: I'm using ha from test_dissector.py
NC_HA_TEMPLATE = unhexlify("0a400801123c08042a3808011234080312300a1c72724f5575454e484a4a5f6f725151475f4376397270356554654848120c694e506d3343477655654e4c18bbac031003")
NC_HA_TEMPLATE = unhexlify("0a400801123c08042a3808011234080312300a1c53676144536f6c30586e61314e305f6e5f61424a632d71315243624e120c6d4c314231684c6a54726d3318b1dc031003")
NC_HA_LEN = 68 # hostapd
# NOTE: I'm using r_wd from test_dissector.py
NC_EWD_TEMPLATE = unhexlify("000000840a600a1c080110022a103f4c96b14cf8c5fa7ba1b70325b7bd833204080d100112404a380fbe38f5339dd6e6f0ed711a34d30288c51c3ec414f32c6da2513f431861af0fd45f2c794585f0bc4959cc00178dd8cf04adc9314f0733b5fa9c29f307cb12205f8a0414fc13aecbbe258037ae7357c0d4a9ad90f69d190cd8b1d23129d6e698")
NC_WD_TEMPLATE = unhexlify("0a350801123108042a2d08011229080312250a154449524543542d4b772d4d6f746f47335f653635371208533054627633333918dbe8021001")
NC_WD_LEN = 57 # direct
# NOTE: I'm using r_sh[0] from test_dissector.py
NC_SH_TEMPLATE = unhexlify("0a0a0801120608042a0208021004")
NC_SH_LEN = 14
# NOTE: I'm using r_sh2[0] from test_dissector.py
NC_SH2_TEMPLATE = unhexlify("0a0a0801120608042a0208031004")
NC_SH2_LEN = 14
# NOTE: I'm using r_iw[0] from test_dissector.py
NC_IW_TEMPLATE = unhexlify("000000120801120e08042a0a08041a060a0444796831")
NC_IW_LEN = 22
# NOTE: I'm using pb[0] from test_dissector.py
NC_PAY_TEMPLATE = unhexlify("000000740a500a1c080110022a10354cba65f04068c8994b8bef204d37be3204080d100112302be4098b30f50d412a89b9399a816fca5f65759f9a21ed80930cd65a6ca7415fa354a9fc71ba2349e0df2fd50e3a74e912208438763733cbdef7730544cf8f162f7ba01b4a97f6e54d8c563cd2b8b5b518c8")
# NOTE: I'm using pb[1] from test_dissector.py
NC_PT_TEMPLATE = unhexlify("0a27080112230803221f0801120f0894aac89788fbd48a9001100118041a0a080010001a0473656e741002")
# NOTE: I'm using pb[2] from test_dissector.py
NC_PAY2_TEMPLATE = unhexlify("000000740a500a1c080110022a1000bce4bb44be88ec622445dd7ec27e503204080d10011230e13e2c8e9be9a79ee82c782e3c91eb92a0cef4805b056aac5f9e42d95437321f77ac93167b8213126915d419d2b8e7df122068ec513e3464ae1860de5b6d908162fbbfbe66d03907995c84cf12c62448c382")
# NOTE: I'm using pb[3] from test_dissector.py
NC_PT2_TEMPLATE = unhexlify("0a210801121d080322190801120f0894aac89788fbd48a9001100118041a04080110041003")
NC_KA_PRE_CON = unhexlify("000000080801120408053200")
NC_KA_LEN = 12
NC_KA_TEMPLATE = unhexlify("0a0808011204080532001001")
assert(len(NC_KA_TEMPLATE) == NC_KA_LEN)
NC_KA_TEMPLATE_STAR = unhexlify("0a0808011204080532001001")
NC_ACCEPT_CON = unhexlify("0000000a0801120608021a020800")
NC_REJECT_CON = unhexlify("0000000b0801120708021a0308c43e")
NC_HMAC_AES_I1 = unhexlify("cede08e7109e75b132839139a9e9c3d6478a1d347fa803742ba8aa558707becd")
NC_HMAC_AES_O1 = unhexlify("1aff4359a25958237e2dcd242bcb0a0110550616c58f432c8d2c9cb404a7d6c8")
NC_HMAC_AES_O2 = unhexlify("2bfcbd6fabf5a8725e33791a4420a6e5dcb7396780c8d104c457dde51dfa1dd5")
NC_HMAC_K1 = unhexlify("bf9d2a53c63616d75db0a7165b91c1ef73e537f2427405fa23610a4be657642e")
NC_HMAC_I1 = unhexlify("cede08e7109e75b132839139a9e9c3d6478a1d347fa803742ba8aa558707becd")
NC_HMAC_O1 = unhexlify("1aff4359a25958237e2dcd242bcb0a0110550616c58f432c8d2c9cb404a7d6c8")
# NOTE: HMAC_I3 = HEAD | iv | FOOT | ct
NC_HMAC_O2 = unhexlify("16458174defe48964ccdded8849392c0ee39143a8a8257a8246b0f3761cbfd30")
# NOTE: used for enc, dec and mac
NC_KEY = unhexlify("bf9d2a53c63616d75db0a7165b91c1ef73e537f2427405fa23610a4be657642e")
# NOTE: KDF
NC_KDF_KEY = unhexlify("82aa55a0d397f88346ca1cee8d3909b95f13fa7deb1d4ab38376b8256da85510")
NC_STR_SecureMessageì = unhexlify("5365637572654d657373616765")
NC_STR_UKEY2v1auth = unhexlify("554B4559322076312061757468")
NC_STR_UKEY2v1next = unhexlify("554b455932207631206e657874")
# NOTE: see trace-nc.md
NC_KDF_INP_HEAD = '080212830108011220'
NC_KDF_SEP = '1a4408641240'
NC_KDF_AES_STR = '22174145535f3235365f4342432d484d41435f534841323536' # "AES_256_CBC-HMAC_SHA256
NC_KDF_INP_FOOT = '0803127008011220768447b83e627078a8193ff80ae8d59e8f4b71fc6d978a67cac88844f70d31dc18642248080112440a206b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c29612204fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5'
NC_STR_CLIENT = unhexlify("636c69656e74")
NC_STR_SERVER = unhexlify("736572766572")
NC_MAC_PRE_IV = "0a1c080110022a10"
NC_PT_TYPES = [
# NOTE: encrypted
'eka',
'ewd',
'eha',
'esh', 'esh2',
'ewl',
'pay', 'pay2',
# NOTE: not encrypted
'kep1', 'kep2', 'kep3', 'kep4',
'iw',
]
NC_MAC_POST_IV = {
'eka': "3204080d10011210", # 88 Bytes
'esh': "3204080d10011210", # 88 Bytes
'esh2': "3204080d10011210", # 88 Bytes
'ewl': "3204080d10011220", # 104 Bytes
'pay': "3204080d10011230", # 120 Bytes
'pay2': "3204080d10011230", # 120 Bytes
'ewd': "3204080d10011240", # 136 Bytes
'eha': "3204080d10011250", # 152 Bytes
}
# NOTE: CIPHER
NC_STR_SIG1 = unhexlify("5349473a31")
NC_STR_ENC2 = unhexlify("454e433a32")
BTNAME_SEP = "AAAAAAAAA"
WIFI_MODES = [ 'hostapd', 'direct' ]
|
StarcoderdataPython
|
126617
|
from numpy.core.fromnumeric import take
from gi.repository import Gtk, GLib, Gio
from matplotlib.backends.backend_gtk3agg import (
FigureCanvasGTK3Agg as FigureCanvas)
from matplotlib.figure import Figure
import numpy as np
import time
import threading
import serial
# from pyfirmata import Arduino, util
from stepper import StepperMotor
import glob
import sys
import gi
gi.require_version('Gtk', '3.0')
# TODO: Take constants into a separate file.
MAGNETOMETER = "Magnetometer"
ACCELEROMETER = "Accelerometer"
GYROSCOPE = "Gyroscope"
class AppWindow(Gtk.ApplicationWindow):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.timer = None
self.set_border_width(10)
hpaned = Gtk.Paned.new(Gtk.Orientation.HORIZONTAL)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
# Label Serial Port
serial_port_label = Gtk.Label.new("Serial Port:")
vbox.pack_start(serial_port_label, False, True, 0)
# Combobox Serial Port
ports = self.getSerialPorts()
port_combobox = Gtk.ComboBoxText()
port_combobox.set_entry_text_column(0)
port_combobox.connect("changed", self.on_port_change)
for port in ports:
port_combobox.append_text(str(port))
port_combobox.set_active(0)
self.port = str(port_combobox.get_active_text())
vbox.pack_start(port_combobox, False, False, 0)
# Label Samples
samples_label = Gtk.Label.new("Samples: ")
vbox.pack_start(samples_label, False, False, 0)
# Spinbox samples
samples_spin = Gtk.SpinButton.new_with_range(1, 1000, 10)
samples_spin.set_digits(0)
samples_spin.connect("value-changed", self.on_samples_changed)
vbox.pack_start(samples_spin, False, False, 0)
# Label Sensor Reading
serial_port_label = Gtk.Label.new("MPU sensor to be read:")
vbox.pack_start(serial_port_label, False, True, 0)
# Combobox Serial Port
sensor_options = [ACCELEROMETER, GYROSCOPE, MAGNETOMETER] # MPU options
sensor_combobox = Gtk.ComboBoxText()
sensor_combobox.set_entry_text_column(0)
sensor_combobox.connect("changed", self.on_sensor_option_change)
for option in sensor_options:
sensor_combobox.append_text(str(option))
sensor_combobox.set_active(2)
vbox.pack_start(sensor_combobox, False, False, 0)
# Button Start
self.start_button = Gtk.Button.new_with_label("Start")
self.start_button.connect("clicked", self.on_button_start)
vbox.pack_start(self.start_button, False, False, 0)
# Button Stop
self.stop_button = Gtk.Button.new_with_label("Stop")
self.stop_button.connect("clicked", self.on_button_stop)
vbox.pack_start(self.stop_button, False, False, 0)
# Button Save
self.save_button = Gtk.Button.new_with_label("Save")
self.save_button.connect("clicked", self.on_button_save)
vbox.pack_start(self.save_button, False, False, 0)
# Button Calibration
self.stepper_motor_button = Gtk.Button.new_with_label("Stepper Routine")
self.stepper_motor_button.connect("clicked", self.on_button_calibrate)
vbox.pack_start(self.stepper_motor_button, False, False, 0)
# Button Calibration
self.calibrate_button = Gtk.Button.new_with_label("Calibrate")
self.calibrate_button.connect("clicked", self.on_button_calibrate)
vbox.pack_start(self.calibrate_button, False, False, 0)
hpaned.add1(vbox)
# App vars initialization
self.current_sensor = str(sensor_combobox.get_active_text())
self.logic_level = 5.0
# self.baud_rate = 9600
self.baud_rate = 115200
self.board_resolution = 1023
self.samples = 0
self.micro_board = None
self.time_interval = 0.050 # seconds (s)
self.values = []
# Example sine wave plot on init
self.fig = Figure(figsize=(5, 4), dpi=100)
self.ax = self.fig.add_subplot(111)
self.x = np.arange(0.0, 3.0, 0.015)
self.y = ((self.logic_level / 2) + (self.logic_level/2)) * \
np.sin(2*np.pi*self.x)
self.ax.plot(self.x, self.y, 'C1o--')
self.ax.set_xlabel("Time (s)")
self.ax.set_ylabel("Voltage (V)")
self.ax.grid(visible=True)
self.ax.set_title(f"Sample Graph")
# Add Graph to Canvas
self.canvas = FigureCanvas(self.fig)
self.canvas.set_size_request(300, 250)
hpaned.add2(self.canvas)
self.add(hpaned)
self.set_size_request(800, 600)
self.show_all()
def draw(self, x, y):
self.ax.clear()
self.ax.plot(x, y, 'C1o--')
self.ax.set_xlabel("x")
self.ax.set_ylabel("y")
self.ax.grid(visible=True)
self.ax.set_title(f"{self.current_sensor} reading.")
self.canvas.draw()
""" draw_magnetometer()
Receives a numpy list X and Y to graph the elipsis read by the MPU Magnetometer
on the X and Y axis.
"""
def draw_magnetometer(self, x, y):
self.ax.clear()
self.ax.plot(x, y, 'C1o--')
self.ax.set_xlabel("x")
self.ax.set_ylabel("y")
self.ax.grid(visible=True)
self.ax.set_title(f"Magnetometer reading.")
for i in range(x.size):
xitem = x[i]
yitem = y[i]
#etiqueta = "{:.1f}".format(xitem)
etiqueta = str(i)
self.ax.annotate(etiqueta, (xitem,yitem), textcoords="offset points",xytext=(0,10),ha="center")
# self.ax.set_xlim([-50, 50])
# self.ax.set_ylim([-50, 50])
self.canvas.draw()
""" draw_calibrated_magnetometer()
Receives a numpy list X and Y to graph the elipsis read by the MPU Magnetometer
on the X and Y axis.
"""
def draw_calibrated_magnetometer(self, x, y, mx ,my):
self.ax.clear()
self.ax.plot(x, y, 'C1o--')
self.ax.plot(mx, my, 'C2o--')
self.ax.set_xlabel("x")
self.ax.set_ylabel("y")
self.ax.grid(visible=True)
self.ax.set_title("Magnetometer calibration")
self.canvas.draw()
""" getSerialPorts()
Explore serial ports available and reuturn a list of string names.
Works both on Windows and Linux.
"""
def getSerialPorts(self) -> list:
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port, 9600)
s.close()
result.append(port)
except:
pass
return result
""" on_port_change()
Updates the serial port when the combobox changes.
"""
def on_port_change(self, combo):
available_port = str(combo.get_active_text())
if available_port != None:
self.port = available_port
else:
self.port = None
self.on_no_port_available(self)
""" on_no_port_available()
Shows an pop up window with an error message when no serial port is found.
"""
def on_no_port_available(self, widget):
port_dialog = Gtk.MessageDialog(transient_for=self,
flags=0,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.OK,
text="No serial port available",
title="Serial Port")
port_dialog.run()
port_dialog.destroy()
""" on_samples_changed()
Updates the amount of samples.
"""
def on_samples_changed(self, samples_spin):
self.samples = samples_spin.get_value_as_int()
def on_sensor_option_change(self, combo):
self.current_sensor = str(combo.get_active_text())
"""on_button_start()
Start button starts a async thread that executes the get_time()
method to read from the arduino board.
"""
def on_button_start(self, widget):
print("Start")
self.stepper_routine_thread = threading.Thread(target=self.stepper_routine)
self.stepper_routine_thread.daemon = True
self.timer = threading.Thread(target=self.get_time)
self.event = threading.Event()
self.timer.daemon = True
self.stepper_routine_thread.start()
self.timer.start()
def stepper_routine(self):
stepper = StepperMotor()
stepper.routine()
"""get_time()
This method reads the serial port from the arduino. It stores data in a Numpy
array t for time, and v for value read.
"""
def get_time(self):
time_value = value = count = 0
self.x = np.array([])
self.y = np.array([])
self.start_button.hide()
self.save_button.hide()
self.stop_button.show()
self.calibrate_button.hide()
take_data = False
if self.micro_board != None:
print("Closing board before init")
self.micro_board.close()
# Initialiaze Serial Connection if there's a valid Serial port selected
if self.port != None:
try:
print("Opening Serial Comm on port:", self.port)
# Serial initialization
self.micro_board = serial.Serial(
str(self.port), self.baud_rate, timeout=1)
time.sleep(1)
# Reset Buffer
self.micro_board.reset_input_buffer()
# Reading flag set to tue
take_data = True
except:
if not self.event.is_set():
print("Stop")
# Stop thread
self.event.set()
self.timer = None
GLib.idle_add(self.on_failed_connection)
take_data = False
else:
print("No serial port available. Restart.")
# Serial port reading when reading flag is true.
if take_data:
if time_value == 0:
if self.current_sensor == MAGNETOMETER:
# stepper = StepperMotor()
# stepper.routine()
print("X (mT) \t Y (mT) \t Magnetometer")
elif self.current_sensor == ACCELEROMETER:
print("X (mss) \t Y (mss) \t Accelerometer")
elif self.current_sensor == GYROSCOPE:
print("X (rad) \t Y (rad) \t Gyroscope")
else:
print("X () \t Y ()")
while not self.event.is_set():
# Stop when we get to the samples amount limit.
if count >= self.samples:
print("Sampling completed - Stoping...")
# Stop thread
self.event.set()
# Reset timer
self.timer = None
# Close Serial connection
if self.micro_board != None:
self.micro_board.reset_input_buffer()
self.micro_board.close()
break
try:
# Read serial port and decode.
temp = str(self.micro_board.readline().decode('cp437'))
temp = temp.replace("\n", "")
mpu_reading = temp.split(",")
# Append reading into app graph vars
if self.current_sensor == MAGNETOMETER:
# XY Plane
print(mpu_reading[6], mpu_reading[7])
self.x = np.append(self.x, float(mpu_reading[6]))
self.y = np.append(self.y, float(mpu_reading[7]))
elif self.current_sensor == GYROSCOPE:
# XY Plane
print(mpu_reading[3], mpu_reading[4])
self.x = np.append(self.x, float(mpu_reading[4]))
self.y = np.append(self.y, float(mpu_reading[3]))
elif self.current_sensor == ACCELEROMETER:
# XY Plane
print(mpu_reading[0], mpu_reading[1])
self.x = np.append(self.x, float(mpu_reading[0]))
self.y = np.append(self.y, float(mpu_reading[1]))
except Exception as e:
print("Cannot make reading. //", e)
pass
# Reading delay
time.sleep(self.time_interval)
# Current sample count increase
count += 1
# Update time by our time interval
time_value += self.time_interval
time.sleep(0.5)
# Draw reading after completed sampling.
if self.current_sensor == MAGNETOMETER:
self.draw_magnetometer(self.x, self.y)
elif self.current_sensor == ACCELEROMETER:
self.draw(self.x, self.y)
elif self.current_sensor == GYROSCOPE:
self.draw(self.x, self.y)
# Show buttons adter sampling is completed
self.start_button.show()
self.save_button.show()
self.stop_button.hide()
if self.current_sensor == MAGNETOMETER:
self.calibrate_button.show()
else:
self.calibrate_button.hide()
""" on_faild_connection()
Shows an pop up window with an error message when the initilization connection with the board failed.
"""
def on_faild_connection(self):
print("Failed Connection")
failed_connection_dialog = Gtk.MessageDialog(transient_for=self,
flags=0,
message_type=Gtk.MessageType.ERROR,
text="Board communication error. No data will be taken",
title="Serial Error")
failed_connection_dialog.run()
failed_connection_dialog.destroy()
def on_button_stop(self, widget):
print("Stop Button")
self.event.set()
self.timer = None
if self.micro_board != None:
self.micro_board.reset_input_buffer()
self.micro_board.close()
def on_button_save(self, widget):
print("Save Button")
self.save_button.hide()
self.start_button.hide()
save_dialog = Gtk.FileChooserDialog(
title="Save file as...", parent=self, action=Gtk.FileChooserAction.SAVE)
save_dialog.add_buttons(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE,
Gtk.ResponseType.OK)
filter_csv = Gtk.FileFilter()
filter_csv.add_pattern("*.CSV")
filter_csv.set_name("CSV")
save_dialog.add_filter(filter_csv)
response = save_dialog.run()
# self.values.append(str(time_value) +
# "," + "{0:.4f}".format(value))
if response == Gtk.ResponseType.OK:
filename = save_dialog.get_filename()
if not filename.endswith(".csv"):
filename += ".csv"
new_file = open(filename, 'w')
new_file.write("Time(s),Voltage(V)" + "\n")
for i in range(self.x.size):
# Write Magnetometer reading from memory
new_file.write("{0:.4f}".format(self.x[i]) + "," + "{0:.4f}".format(self.y[i]) + "\n")
# new_file.write(self.values[i] + "\n")
new_file.close()
save_dialog.destroy()
self.start_button.show()
self.save_button.show()
def on_button_calibrate(self, widget):
print("Calibrate button")
if not self.x[0] or not self.y[0]:
print("Unable to make calibration. No data or data corrupted.")
return
mx,my = self.getMagnetometerCalibrationValues(self.x, self.y)
self.draw_calibrated_magnetometer(self.x, self.y, mx, my)
def getMagnetometerCalibrationValues(self, x, y):
x_sf, y_sf, x_off, y_off = self.getMagnetometerCalibrationParameters(x, y)
print(f"x_sf = {x_sf}, y_sf = {y_sf}, x_off = {x_off}, y_off = {y_off}")
mx = np.array([])
my = np.array([])
for x_i, y_i in np.nditer([x, y]):
mx_i = x_sf * x_i + x_off
my_i = y_sf * y_i + y_off
mx = np.append(mx, mx_i)
my = np.append(my, my_i)
return mx, my
def getMagnetometerCalibrationParameters(self, x, y):
x_min = x.min()
x_max = x.max()
y_min = y.min()
y_max = y.max()
# Scale Factor
x_sf = (y_max - y_min) / (x_max - x_min)
y_sf = (x_max - x_min) / (y_max - y_min)
if x_sf <= 1:
x_sf = 1
if y_sf <= 1:
y_sf = 1
# Offset
x_off = ((x_max - x_min) / 2 - x_max) * x_sf
y_off = ((y_max - y_min) / 2 - y_max) * y_sf
return x_sf, y_sf, x_off, y_off
class Application(Gtk.Application):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.window = None
def do_activate(self):
if not self.window:
self.window = AppWindow(
application=self, title="Single Point Measurement - PyGtk")
self.window.show_all()
self.window.save_button.hide()
self.window.stop_button.hide()
self.window.calibrate_button.hide()
self.window.present()
def do_shutdown(self):
if self.window.micro_board != None:
try:
self.micro_board.close()
except:
pass
print("Byeee")
Gtk.Application.do_shutdown(self)
if self.window:
self.window.destroy()
if __name__ == "__main__":
app = Application()
app.run(sys.argv)
|
StarcoderdataPython
|
1634556
|
from warcsigner.warcsigner import RSASigner, sign_cli, verify_cli
from pytest import raises
import shutil
import os
import tempfile
from io import BytesIO
def abs_path(filename):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), filename)
TEST_WARC = abs_path('test_warc.warc.gz')
TEMP_SIGNED_WARC = abs_path('test_warc.warc.gz.signed')
EMPTY_FILE = abs_path('empty.warc.gz')
PRIVATE_KEY = abs_path('test_private_key.pem')
PUBLIC_KEY = abs_path('test_public_key.pem')
PUBLIC_WRONG_KEY = abs_path('test_wrong_key.pem')
class TestWarcSigner(object):
def setup(self):
self.signer = RSASigner(private_key_file=PRIVATE_KEY,
public_key_file=PUBLIC_KEY)
self.wrong_signer = RSASigner(public_key_file=PUBLIC_WRONG_KEY)
def test_sign_verify_remove(self):
shutil.copyfile(TEST_WARC, TEMP_SIGNED_WARC)
# save size
orig_size = os.path.getsize(TEMP_SIGNED_WARC)
assert self.signer.sign(TEMP_SIGNED_WARC) == True
# verify signed
assert self.signer.verify(TEMP_SIGNED_WARC) == True
# verify against wrong key
assert self.wrong_signer.verify(TEMP_SIGNED_WARC) == False
# signature added to warc size
assert os.path.getsize(TEMP_SIGNED_WARC) > orig_size
# verify and remove sig
assert self.signer.verify(TEMP_SIGNED_WARC, remove=True) == True
# should no longer be signed
assert self.signer.verify(TEMP_SIGNED_WARC) == False
# should be back to original size
assert os.path.getsize(TEMP_SIGNED_WARC) == orig_size
os.remove(TEMP_SIGNED_WARC)
# original never signed
assert self.signer.verify(TEST_WARC) == False
def test_cli_sign(self):
shutil.copyfile(TEST_WARC, TEMP_SIGNED_WARC)
assert sign_cli([PRIVATE_KEY, TEMP_SIGNED_WARC]) == 0
assert verify_cli([PUBLIC_KEY, TEMP_SIGNED_WARC]) == 0
# wrong key
assert verify_cli([PUBLIC_WRONG_KEY, TEMP_SIGNED_WARC]) == 1
# not signed
assert verify_cli([PUBLIC_KEY, TEST_WARC]) == 1
os.remove(TEMP_SIGNED_WARC)
def test_empty_sign(self):
open(EMPTY_FILE, 'w').close()
# not signed
assert self.signer.verify(EMPTY_FILE) == False
# sign
assert self.signer.sign(EMPTY_FILE) == True
# verify signed
assert self.signer.verify(EMPTY_FILE) == True
os.remove(EMPTY_FILE)
# non-existent file
assert self.signer.sign(EMPTY_FILE) == False
assert self.signer.verify(EMPTY_FILE) == False
assert sign_cli([PRIVATE_KEY, EMPTY_FILE]) == 1
assert verify_cli([PUBLIC_KEY, EMPTY_FILE]) == 1
def test_stream(self):
with tempfile.TemporaryFile() as temp:
temp.write('ABC')
assert self.signer.sign(temp) == True
assert self.signer.verify(temp) == True
def test_stream_noseek(self):
with tempfile.TemporaryFile() as temp:
temp.write('ABCDEF')
assert self.signer.sign(temp) == True
# compute size and reset
temp.seek(0, 2)
total_len = temp.tell()
# read unsigned stream
temp.seek(0)
uns = self.signer.get_unsigned_stream(temp, total_len=total_len)
buff = BytesIO()
buff.write(uns.read())
buff.write(uns.read())
assert uns.read() == ''
assert buff.getvalue() == 'ABCDEF'
# no seeking in verify
temp.seek(0)
assert self.signer.verify(temp, size=total_len) == True
# unsigned portion
temp.seek(0)
assert self.signer.verify(temp, size=6) == False
# wrong key
temp.seek(0)
assert self.wrong_signer.verify(temp, size=total_len) == False
# incorrect name
temp.seek(0)
assert self.signer.verify(temp, size=total_len,
hash_type='SHA-256') == False
# modify stream
temp.seek(0)
temp.write('X')
temp.seek(0)
assert self.signer.verify(temp, size=total_len) == False
def test_unsigned_stream_noseek(self):
with tempfile.TemporaryFile() as temp:
temp.write('ABCDEF' * 30)
# compute size and reset
temp.seek(0, 2)
total_len = temp.tell()
# read unsigned stream
temp.seek(0)
uns = self.signer.get_unsigned_stream(temp, total_len=total_len)
buff = BytesIO()
buff.write(uns.read())
buff.write(uns.read())
assert uns.read() == ''
assert buff.getvalue() == ('ABCDEF' * 30)
|
StarcoderdataPython
|
3299505
|
#!/usr/bin/env python
"""ADC Demo
This demo initializes pin 0 as an analog input and illustrates using the
analog_voltage_changed event to notify user code (in this case, a lambda
expression) when the voltage at the pin exceeds its threshold.
This script will run until terminated or board.disconnect() is called by
pressing [ENTER].
"""
from treehopper.api import find_boards, PinMode
board = find_boards()[0]
board.connect()
pin = board.pins[0]
pin.mode = PinMode.AnalogInput
pin.analog_voltage_changed += lambda sender, value: print(value)
input("Press [ENTER] to stop and disconnect\n")
board.disconnect()
|
StarcoderdataPython
|
3364810
|
# xpyBuild - eXtensible Python-based Build System
#
# This module holds definitions that are used throughout the build system, and
# typically all names from this module will be imported.
#
# Copyright (c) 2013 - 2017, 2019 Software AG, Darmstadt, Germany and/or its licensors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Id: buildcommon.py 301527 2017-02-06 15:31:43Z matj $
#
"""
Contains standard functionality for use in build files such as `xpybuild.buildcommon.include`, useful constants such as `xpybuild.buildcommon.IS_WINDOWS` and
functionality for adding prefixes/suffixes to paths such as `xpybuild.buildcommon.FilenameStringFormatter`.
"""
import traceback, os, sys, locale, inspect, io
import re
import platform
import logging
# do NOT define a 'log' variable here or targets will use it by mistake
def __getXpybuildVersion():
try:
with open(os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), "XPYBUILD_VERSION")) as f:
return f.read().strip()
except Exception:
raise
return "<unknown>"
XPYBUILD_VERSION: str = __getXpybuildVersion()
"""The current xpybuild version."""
def include(file):
""" Parse and register the targets and properties in in the specified
``XXX.xpybuild.py`` file.
Targets should only be defined in files included using this method,
not using python import statements.
@param file: a path relative to the directory containing this file.
"""
from xpybuild.buildcontext import getBuildInitializationContext
from xpybuild.utils.buildfilelocation import BuildFileLocation
file = getBuildInitializationContext().expandPropertyValues(file)
assert file.endswith('.xpybuild.py') # enforce recommended naming convention
filepath = getBuildInitializationContext().getFullPath(file, os.path.dirname(BuildFileLocation._currentBuildFile[-1]))
BuildFileLocation._currentBuildFile.append(filepath) # add to stack of files being parsed
namespace = {}
exec(compile(open(filepath, "rb").read(), filepath, 'exec'), namespace, namespace)
del BuildFileLocation._currentBuildFile[-1]
return namespace
IS_WINDOWS: bool = platform.system()=='Windows'
""" A boolean that specifies whether this is Windows or some other operating system. """
# (we won't want constants for every possible OS here, but since there is so much conditionalization between
# windows and unix-based systems, much of it on the critical path, it is worthwhile having a constant for this).
if IS_WINDOWS:
def isWindows():
""" Returns True if this is a windows platform.
@deprecated: Use the `IS_WINDOWS` constant instead.
"""
return True
else:
def isWindows():
""" Returns True if this is a windows platform.
@deprecated: Use the `IS_WINDOWS` constant instead.
"""
return False
def defineAtomicTargetGroup(*targets):
""" The given targets must all be built before anything which depends on any of those targets.
Returns the flattened list of targets.
"""
from xpybuild.buildcontext import getBuildInitializationContext
targets = flatten(targets)
getBuildInitializationContext().defineAtomicTargetGroup(targets)
return targets
def requireXpybuildVersion(version: str):
""" Checks that this xpybuild is at least a certain version number. """
from xpybuild.utils.stringutils import compareVersions
if compareVersions(XPYBUILD_VERSION, version) < 0: raise Exception("This build file requires xpyBuild at least version "+version+" but this is xpyBuild "+XPYBUILD_VERSION)
requireXpyBuildVersion = requireXpybuildVersion
"""
.. private:: Old name for compatibility.
Use requireXpyBuildVersion instead.
"""
def registerPreBuildCheck(fn):
""" Defines a check which will be called after any clean but before any build actions take place.
fn should be a functor that takes a context and raises a BuildException if the check fails. """
from buildcontext import getBuildInitializationContext
getBuildInitializationContext().registerPreBuildCheck(fn)
class StringFormatter(object):
""" A simple named functor for applying a ``%s``-style string format, useful
in situations where a function is needed to add a suffix/prefix for the
value of an option.
"""
def __init__(self, formatstring):
self.fmt = formatstring
def __repr__(self):
return 'StringFormatter<"%s">'%self.fmt
def __call__(self, *args, **kwargs):
assert not kwargs
assert len(args)==1
return self.fmt % args[0]
class FilenameStringFormatter(object):
""" A simple named functor for applying a ``%s``-style string format.
Formatter is just applied to the basename part of the filename,
the dirname part is preserved as-is.
"""
def __init__(self, formatstring):
self.fmt = formatstring
def __repr__(self):
return 'FilenameStringFormatter<"%s">'%self.fmt
def __call__(self, *args, **kwargs):
assert not kwargs
assert len(args)==1
return os.path.join(os.path.dirname(args[0]), self.fmt % os.path.basename(args[0]))
import pkgutil
def enableLegacyXpybuildModuleNames():
"""
Adds aliases for pre-3.0 module names e.g. `buildcommon` instead of `xpybuild.buildcommon`, etc.
The old names are deprecated, so this should be used only as a temporary measure.
"""
# must manually import every single module from 'utils' and 'targets';
# although just importing 'utils' appears to work,
# we end up with duplicate packages for the modules underneath it
# (and so may attempt to import and hence define the same option twice)
import xpybuild.utils
import xpybuild.targets
__log = logging.getLogger('xpybuild.buildcommon')
for parentpackage in [xpybuild, xpybuild.utils, xpybuild.targets]:
for _, modulename, ispkg in pkgutil.iter_modules(
path=parentpackage.__path__, prefix=(parentpackage.__name__[len('xpybuild.'):]+'.').lstrip('.')):
__log.debug('enableLegacyXpybuildModuleNames: Importing legacy package name %s', modulename)
if modulename!='buildcommon': # first make sure the original one has been imported
exec(f'import xpybuild.{modulename}', {})
# then define an alias
exec(f'sys.modules["{modulename}"] = sys.modules["xpybuild.{modulename}"]')
assert 'utils.fileutils' in sys.modules, sys.modules # sanity check that it worked
assert 'targets.copy' in sys.modules, sys.modules # sanity check that it worked
# aliases for modules we folded into other modules in v3.0, or moved
exec(f'sys.modules["propertyfunctors"] = sys.modules["xpybuild.propertysupport"]')
exec(f'sys.modules["buildexceptions"] = sys.modules["xpybuild.utils.buildexceptions"]')
xpybuild.targets.touch = sys.modules["xpybuild.targets.writefile"]
exec(f'sys.modules["targets.touch"] = sys.modules["xpybuild.targets.writefile"]')
xpybuild.targets.unpack = sys.modules["xpybuild.targets.archive"]
exec(f'sys.modules["targets.unpack"] = sys.modules["xpybuild.targets.archive"]')
xpybuild.targets.zip = sys.modules["xpybuild.targets.archive"]
exec(f'sys.modules["targets.zip"] = sys.modules["xpybuild.targets.archive"]')
xpybuild.targets.tar = sys.modules["xpybuild.targets.archive"]
exec(f'sys.modules["targets.tar"] = sys.modules["xpybuild.targets.archive"]')
import xpybuild.utils.fileutils
from xpybuild.utils.flatten import flatten
isDirPath = xpybuild.utils.fileutils.isDirPath
"""Returns true if the path is a directory (ends with a slash, ``/`` or ``\\\\``). """
normpath = xpybuild.utils.fileutils.normPath
"""
.. private:: This is deprecated in favour of fileutils.normPath and hidden from documentation to avoid polluting the docs.
"""
|
StarcoderdataPython
|
1773408
|
<filename>youtube/migrations/0004_auto_20201202_1100.py
# Generated by Django 2.2 on 2020-12-02 05:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('youtube', '0003_auto_20181019_1947'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='text',
field=models.TextField(max_length=700),
),
migrations.AlterField(
model_name='video',
name='title',
field=models.CharField(max_length=300),
),
]
|
StarcoderdataPython
|
70196
|
#############################################################
## ##
## Copyright (c) 2003-2017 by The University of Queensland ##
## Centre for Geoscience Computing ##
## http://earth.uq.edu.au/centre-geoscience-computing ##
## ##
## Primary Business: Brisbane, Queensland, Australia ##
## Licensed under the Open Software License version 3.0 ##
## http://www.apache.org/licenses/LICENSE-2.0 ##
## ##
#############################################################
from . import gouge2dbench
|
StarcoderdataPython
|
4836619
|
<reponame>outloudvi/mw2fcitx
# This collation file is for moegirl.org.
# It MIGHT NOT be fit for other wikis.
from ..utils import normalize
def dont_have(string: str, array: [str]):
for i in array:
if string.find(i) != -1:
return False
return True
def split_and_merge_single(group: [str], spliter: str):
ret = []
for i in group:
for j in i.split(spliter):
ret.append(j)
return ret
def tweak_remove_char(char):
def cb(words):
return list(map(lambda x: x.replace(char, ""), words))
return cb
def tweak_len_more_than(length):
def cb(words):
return list(filter(lambda x: len(x) > length, words))
return cb
def tweak_remove_word_includes(items):
def cb(words):
return list(filter(lambda x: dont_have(x, items), words))
return cb
def tweak_split_word_with(spliters):
def cb(items: [str]):
ret = items
for i in spliters:
tmp = []
for j in split_and_merge_single(ret, i):
tmp.append(j)
ret = tmp
return ret
return cb
def tweak_trim_suffix(suffixes):
def cb(items: [str]):
ret = []
for i in items:
for j in suffixes:
i = i.removesuffix(j)
ret.append(i)
return ret
return cb
def tweak_remove_regex(regexes):
from re import compile
compiled_regexes = list(map(compile, regexes))
def cb(items: [str]):
ret = items
for rgx in compiled_regexes:
ret = filter(
lambda x, rgx=rgx: not rgx.match(x), ret
)
return list(ret)
return cb
def tweak_normalize(words):
ret = []
for i in words:
ret.append(normalize(i))
return ret
tweaks = [
tweak_remove_word_includes(["○", "〇"]),
tweak_split_word_with(
[":", "/", "(", ")", "(", ")", "【", "】", "『", "』", "/", " ", "!", "!"]),
tweak_len_more_than(1),
tweak_remove_char("·"),
tweak_trim_suffix(["系列", "列表", "对照表"]),
tweak_remove_regex([
"^第.*(次|话)$"
]),
tweak_normalize
]
|
StarcoderdataPython
|
3328046
|
import os, os.path
import csv
import math
import numpy
import gzip
from isodist.Isochrone import Isochrone, FEH2Z, Z2FEH, dict2recarray
from isodist.PadovaIsochrone import _DATADIR
_ANZSOLAR= 0.0176
_ZS= [-0.1,-0.2,-0.3,-0.5,-1.,-1.5,-2.,-3.,0.,0.1,0.2,0.4]
class AnIsochrone (Isochrone):
"""Class that represents a An+08 isochrone"""
def __init__(self,Z=None,filters=None,corrected=True):
"""
NAME:
__init__
PURPOSE:
initialize
INPUT:
corrected= if False, use un-corrected isochrones
Z= load only this metallicity (can be list)
OUTPUT:
HISTORY:
2011-08-05 - Written - Bovy (NYU)
BUGS:
Z determination needs to account for dY/dZ
"""
self._filters= ['u','g','r','i','z']
#Read the files
dicts= []
if Z is None: #Z here is actually FeH, we correct this later
ZS= _ZS
else:
if isinstance(Z,(list,numpy.ndarray)):
ZS= Z
else:
ZS= [Z]
for Zm in ZS:
if Zm >= 0.: signstr= 'p'
else: signstr= 'm'
if corrected: corrstr= 'corr'
else: corrstr= 'marcs'
dicts.append(read_an_isochrone(os.path.join(_DATADIR,
'an_isochrones',
signstr+'%03i_' % (int(numpy.fabs(100.*Zm)))
+corrstr+'.txt'),
filters=self._filters))
self._ZS= numpy.array([FEH2Z(z,zsolar=_ANZSOLAR) for z in ZS])
self._dicts= dicts
#Gather ages
self._logages= numpy.array(sorted(list(set(self._dicts[0]['logage']))))
return None
def __call__(self,logage,Z=None,feh=None,afe=None,maxm=None,
asrecarray=False,stage=None):
"""
NAME:
__call__
PURPOSE:
get a single isochrone from the library
INPUT:
logage - log_10 age
Z= or feh= metallicity (use Z_\odot=0.019)
afe= None (not supported for An; linear relation between afe and
feh is assumed)
maxm= maximum mass to consider (m_ini)
stage= if set, only show this evolutionary stage (NOT IMPLEMENTED FOR AN)
KEYWORDS:
asrecarray= if True, return recarray, otherwise dict
OUTPUT:
isochrone
HISTORY:
2011-08-04 - Written - Bovy (NYU)
"""
if not afe is None:
raise NotImplementedError("'afe=' not implemented for Padova isochrones")
if not feh is None:
Z= 10.**(feh+math.log10(_ANZSOLAR))
indx= (self._ZS == Z)
ii= 0
while (ii < len(self._dicts) and not indx[ii]): ii+= 1
if ii == len(self._dicts):
raise IOError("No isochrone found that matches this metallicity")
thisDict= self._dicts[ii]
if maxm is None:
indx= (thisDict['logage'] == logage)
else:
indx= (thisDict['logage'] == logage)*(thisDict['Mass'] < maxm)
if numpy.sum(indx) == 0:
raise IOError("No isochrone found that matches this logage")
outDict= {}
for key in thisDict.keys():
outDict[key]= thisDict[key][indx]
if asrecarray:
return dict2recarray(outDict)
else:
return outDict
def read_an_isochrone(name,filters=None):
"""
NAME:
read_an_isochrone
PURPOSE:
read an An isochrone file
INPUT:
name- name of the file
filters= list of filters in the file
OUTPUT:
dictionary with the table
HISTORY:
2011-08-04 - Written - Bovy (NYU)
"""
dialect= csv.excel
dialect.skipinitialspace=True
if name[-2:] == 'gz':
file= gzip.open(name,'r')
else:
file= open(name,'r')
reader= csv.reader(file,delimiter=' ',
dialect=dialect)
logage=[]
Mass= []
logL= []
logTe= []
logg= []
mbol= []
mags= []
for row in reader:
try:
if row[0][0:4] == 'Mass': #Header line to skip
continue
except IndexError:
pass
try:
if row[0] == 'Cluster': #Header line to extract age from
thislogage= numpy.log10(float(row[4]))
continue
except IndexError:
pass
logage.append(thislogage) #from the header, see above
Mass.append(float(row[0]))
logTe.append(numpy.log10(float(row[1])))
logL.append(float(row[2]))
logg.append(float(row[3]))
mbol.append(float(row[4]))
r= float(row[5])
gr = float(row[6])
gi = float(row[7])
gz = float(row[8])
ug = float(row[9])
mags.append([r+gr+ug, #u
r+gr, #g
r,
-gi+gr+r, #i
-gz+gr+r]) #z
#Load everything into a dictionary
outDict= {}
outDict['logage']= numpy.array(logage)
outDict['Mass']= numpy.array(Mass)
outDict['logL']= numpy.array(logL)
outDict['logTe']= numpy.array(logTe)
outDict['logg']= numpy.array(logg)
outDict['mbol']= numpy.array(mbol)
for ii in range(len(filters)):
thismag= []
for jj in range(len(mags)):
thismag.append(mags[jj][ii])
outDict[filters[ii]]= numpy.array(thismag)
return outDict
|
StarcoderdataPython
|
1775315
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 3 09:34:48 2021
Objective: Yahoo stock price data analysis
@author: Ashish
"""
# load required libraries
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
import numpy as np
from pandas.plotting import scatter_matrix
# load data
start_date = "2010-01-01"
end_date = "2021-08-30"
tcs = yf.download('TCS', start_date)
infy = yf.download('INFY', start_date)
wipro = yf.download('WIPRO.NS', start_date)
# EDA
tcs['Open'].plot(label = 'TCS', figsize = (15,7))
infy['Open'].plot(label = "Infosys")
wipro['Open'].plot(label = 'Wipro')
plt.title('Stock Prices of TCS, Infosys and Wipro')
plt.legend()
plt.show()
tcs['Volume'].plot(label = 'TCS', figsize = (15,7))
infy['Volume'].plot(label = "Infosys")
wipro['Volume'].plot(label = 'Wipro')
plt.title('Volume of Stock traded')
plt.legend()
plt.show()
#Market Capitalisation
tcs['MarktCap'] = tcs['Open'] * tcs['Volume']
infy['MarktCap'] = infy['Open'] * infy['Volume']
wipro['MarktCap'] = wipro['Open'] * wipro['Volume']
tcs['MarktCap'].plot(label = 'TCS', figsize = (15,7))
infy['MarktCap'].plot(label = 'Infosys')
wipro['MarktCap'].plot(label = 'Wipro')
plt.title('Market Cap')
plt.legend()
plt.show()
# Moving Average
tcs['MA50'] = tcs['Open'].rolling(50).mean()
tcs['MA200'] = tcs['Open'].rolling(200).mean()
tcs['Open'].plot(figsize = (15,7))
tcs['MA50'].plot()
tcs['MA200'].plot()
plt.show()
#Volatility
tcs['returns'] = (tcs['Close']/tcs['Close'].shift(1)) -1
infy['returns'] = (infy['Close']/infy['Close'].shift(1))-1
wipro['returns'] = (wipro['Close']/wipro['Close'].shift(1)) - 1
tcs['returns'].hist(bins = 100, label = 'TCS', alpha = 0.5, figsize = (15,7))
infy['returns'].hist(bins = 100, label = 'Infosysy', alpha = 0.5)
wipro['returns'].hist(bins = 100, label = 'Wipro', alpha = 0.5)
plt.legend()
# Show plots
plt.show()
|
StarcoderdataPython
|
156662
|
# Copyright (c) 2021 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
"""Reporter classes.
Classes implementing presentation, storage and retrieval of major geometric
characteristics of the spatial systems. Provides a unified set of
class methods imtended to be used in a similar way.
Two operational modes are possible:
- initial analysis, visualization and export of the results:
R.create(tp, s).pipeline(sp)
- import and visualization of stored analysis results:
R.create(tp, s).restore(p)
where:
'R': the report class name,
'tp': type of the spatial system,
's': flag of interactive visualization,
'sp': list of spatial system instances to be analysed,
'p': path to the stored analysis results.
"""
from __future__ import annotations
import json
from pathlib import Path
from typing import Final, Optional
import numpy as np
import cytoskeleton_analyser.fitting as fit
from ..histograms import Experimental
from ..histograms import Histogram
from ..histograms import Simulated
from ..report import Report
from .spatial_systems import FullDepth
from .spatial_systems import ListOfSpatialSystems
class Features:
"""Classification of reported features
"""
#: Features applicable to both full and sliced cell representations.
common: Final[list[str]] = [
'Lengths3d',
'Lengths2d',
'Curvature3d',
'RadialMass',
'RadialEnds',
'AnglesToRad',
'SegmentNumbers',
]
#: Features applicable to full cell representation alone.
only_full: Final[list[str]] = [
'AgesByNode',
'AgesByFilament',
]
#: Features applicable to sliced cell representation alone.
only_slice: Final[list[str]] = [
'Curvature2dConv',
'Curvature2dMboc17',
]
#: Complete set of implemented features.
all: Final[list[str]] = common + only_full + only_slice
__all__ = all
@staticmethod
def is_common(f: str) -> bool:
"""True if ``f`` belongs to set common to both representations.
:param f: Feature class name.
"""
return any(f == g for g in Features.common)
@staticmethod
def is_full(f: str) -> bool:
"""True if feature ``f`` is applicable to full representation.
:param f: Feature class name.
"""
return any(f == g for g in Features.only_full)
@staticmethod
def is_slice(f: str) -> bool:
"""True if feature ``f`` is applicable to slice representation.
:param f: Feature class name.
"""
return any(f == g for g in Features.only_slice)
@staticmethod
def is_any(f: str) -> bool:
"""True if feature ``f`` is applicable to any representation.
:param f: Feature class name.
"""
return any(f == g for g in Features.all)
@staticmethod
def is_applicable(
f: str,
tp: type[FullDepth],
) -> bool:
"""True if feature ``f`` is applicable to representation ``tp``.
:param f: Feature class name.
:param tp: Feature class name.
"""
return \
Features.is_common(f) or \
tp.type == 'slice' and Features.is_slice(f) or \
tp.type == 'full' and Features.is_full(f)
@staticmethod
def reporter(f: str):
"""Convert feature name ``f`` to corresponding reporter type.
:param f: Feature name.
"""
if not Features.is_any(f):
raise ValueError(f"{f} is not a valid position_feature.")
return globals()[f]
class _Report(Report):
"""Adaptation of report.Report class for spatial systems.
For subclassing specific to reported cytoskeleton attributes.
"""
tp: type[FullDepth] #: Type of the spatial system.
@classmethod
def _create(
cls,
tp: type[FullDepth],
name: str,
show: bool = True,
) -> None:
cls.tp = tp
cls.__create(
tp.logger,
tp.paths.data_out,
name + '_' + tp.type,
show
)
class Lengths3d(_Report):
"""Reports of microtubule lengths in .
"""
#: Part of figure and report titles.
LABEL: Final = '3d filament length in'
@classmethod
def create(
cls,
tp: type[FullDepth],
show: bool = True,
) -> type[Lengths3d]:
super()._create(tp, __class__.__name__, show)
cls.units = tp.len_units
return cls
@classmethod
def report(
cls,
sp: ListOfSpatialSystems,
) -> tuple[Histogram, list, list]:
data = [s.len_total3d for s in sp]
avg, std = cls.tp.print_avgstd(cls.LABEL, data, cls.units)
h = Histogram(
cls.name,
Simulated().initialise(data, cls.fits_sim, dx=0.4, density=True)
)
h.to_csv(cls.path_out)
cls.plot(h)
cls.logger.info('')
return h, [avg], [std]
@classmethod
def plot(
cls,
h: Histogram
) -> None:
h.plot(
cls.LABEL + ' ' + cls.tp.type,
xlabel=f'length ({cls.units})',
xlim=[0., 60.],
save_path=cls.path_out,
show=cls.show
)
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
) -> None:
cls.fits_sim = [
# (fit.Gamma.loc0, [2., 0.1, 0.]),
# (fit.Weibull.full, [2., 1.]),
# (fit.Rayleigh.f, [1.]),
]
rep = cls.report(sp)
cls.summarize(rep, [0])
class Lengths2d(_Report):
"""Reports lengths of xy projections of the microtubules.
Examines apparent lengths of simulated microtubules and compares
them with experimental data obtained using optical microscopy.
For this purpose, implements experimental sets from microtubule
length measurements with superresolution methods
by Zhang et al. (MBoC 2017)
"""
#: Part of figure and report titles.
LABEL: Final = 'length of filament 2d projections in'
@classmethod
def create(
cls,
tp: type[FullDepth],
show: bool = True,
) -> type[Lengths2d]:
super()._create(tp, __class__.__name__, show)
cls.units = tp.len_units
return cls
@classmethod
def _experimental(
cls,
cell_type: str,
):
import cytoskeleton_analyser.position.empirical_data.mboc17 as mboc17
bc, (contr, ca_ras) = mboc17.length(density=True)
if cell_type == 'RW_Protr':
h = ca_ras
elif cell_type == 'SpreRou':
h = contr
avg = mboc17.avg(bc, h)
cls.logger.info('\nEmpirical length of filament 2d projections in ' +
f'{cls.tp.type}: {avg} {cls.units}')
return bc, h, avg
@classmethod
def report(
cls,
sp: ListOfSpatialSystems,
) -> tuple[Histogram, list, list]:
data = [s.len_total2d for s in sp]
avg, std = cls.tp.print_avgstd(cls.LABEL, data, cls.units)
ct = cls.tp.params['cell'].typename
if cls.tp.type == 'slice' and \
(ct == 'RW_Protr' or
ct == 'SpreRou'):
bc, l, l_avg = cls._experimental(ct)
e = Experimental().initialise((bc, l), cls.fits_exp)
h = Histogram(
cls.name,
Simulated().initialise(
data, cls.fits_sim, dx=0.4, exper_bc=e.bc, density=True),
experimental=e
)
avg, std = [avg, l_avg], [std, np.nan]
else:
h = Histogram(
cls.name,
Simulated().initialise(
data, cls.fits_sim, dx=0.4, density=True),
)
avg, std = [avg], [std]
h.to_csv(cls.path_out)
cls.plot(h)
cls.logger.info('')
return h, avg, std
@classmethod
def plot(
cls,
h: Histogram
) -> None:
h.plot(
cls.LABEL + ' ' + cls.tp.type,
xlabel=f'length ({cls.units})',
xlim=[0., 60.],
save_path=cls.path_out,
show=cls.show
)
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
) -> None:
best = []
if cls.tp.type == 'full':
cls.fits_sim = [
# (fit.Gamma.loc0, [1., 1, 0.]),
# (fit.Weibull.full, [2., 3.]),
]
best = [0]
if cls.tp.type == 'slice':
# e = fit.Exponential.create()
# p = fit.Exponential.Pars
# tu = cls.units
cls.fits_exp = [
# (e.d_h, p(a=1., tau1=2.), tu),
# (fit.Gamma.loc0, [1., 1, 0.]),
# (fit.Weibull.full, [2., 3.]),
]
cls.fits_sim = [
# (e.d_h, p(a=1., tau1=2.), tu),
# (fit.Gamma.loc0, [1., 1, 0.]),
# (fit.Weibull.full, [2., 3.]),
]
best = [1, 1]
rep = cls.report(sp)
cls.summarize(rep, best)
class RadialMass(_Report):
"""Reports distribution of microtubule masss.
Microtubule mass is analysed as a function of distance to cell
center in xy plane.
"""
#: Part of figure and report titles.
LABEL: Final = 'mass vs distance to center '
@classmethod
def create(
cls,
tp: type[FullDepth],
show: bool = True,
) -> type[RadialMass]:
super()._create(tp, __class__.__name__, show)
cls.units = tp.len_units
return cls
@classmethod
def report(
cls,
sp: ListOfSpatialSystems
) -> tuple[Histogram, list, list]:
data = [np.concatenate(s.center_dist_2d) for s in sp]
avg, std = cls.tp.print_avgstd(cls.LABEL, data, cls.units)
h = Histogram(
cls.name,
Simulated().initialise(
data, fits=cls.fits_sim, dx=0.25, density=True
),
)
h.to_csv(cls.path_out)
cls.plot(h)
cls.logger.info('')
return h, [avg], [std]
@classmethod
def plot(
cls,
h: Histogram
):
h.plot(
cls.LABEL + cls.tp.type,
xlabel=f'length ({cls.units})',
xlim=[0., 30.],
save_path=cls.path_out,
show=cls.show,
)
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
) -> None:
cls.report(sp)
class RadialEnds(_Report):
"""Reports positions of of microtubule plus ends.
Analyse the distribution of microtubule plus ends as a function
of distance to cell center in xy plane.
"""
#: Part of figure and report titles.
LABEL: Final = 'plus-ends vs distance to center '
@classmethod
def create(
cls,
tp: type[FullDepth],
show: bool = True,
) -> type[RadialEnds]:
super()._create(tp, __class__.__name__, show)
cls.units = tp.len_units
return cls
@classmethod
def report(
cls,
sp: ListOfSpatialSystems,
) -> tuple[Histogram, list, list]:
data = [np.concatenate(s.center_dist_2d_ends) for s in sp]
avg, std = cls.tp.print_avgstd(cls.LABEL, data, cls.units)
h = Histogram(
cls.name,
Simulated().initialise(
data, fits=cls.fits_sim, dx=0.25, density=True)
)
h.to_csv(cls.path_out)
cls.plot(h)
cls.logger.info('')
return h, [avg], [std]
@classmethod
def plot(
cls,
h: Histogram
):
h.plot(
cls.LABEL + cls.tp.type,
xlabel=f'length ({cls.units})',
xlim=[0., 30.],
save_path=cls.path_out,
show=cls.show,
)
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
) -> None:
cls.report(sp)
class SegmentNumbers(_Report):
"""Reports apparent number of microtubules.
Measure statistics on apparent number of microtubules in full system
and as visible in TIRF microscopy observations.
"""
@classmethod
def create(
cls,
tp: type[FullDepth],
_,
) -> type[SegmentNumbers]:
super()._create(tp, __class__.__name__)
cls.units = 'segments'
return cls
@classmethod
def report(
cls,
sp: ListOfSpatialSystems,
) -> tuple[Optional[Histogram], list, list]:
cls.logger.info(f"Number of filaments in {cls.tp.type}:")
data = np.array([len(s.len_total3d) for s in sp])
[cls.logger.info(f'\t {n}') for n in data]
avg = np.mean(data)
std = np.std(data)
cls.logger.info(f"overall: {avg} ± {std} slices\n")
fname = cls.path_out / f"{cls.name}.json"
with open(fname, 'w') as f:
json.dump({'num': {'avg': avg, 'std': std}}, f)
cls.logger.info('')
return None, [avg], [std]
@classmethod
def restore(
cls,
path: Path
) -> dict:
cls.logger.info('Restoring ' + cls.name + ':')
summary = cls.log_stats(path)
# fname = f"{cls.path_out}{cls.name}.json"
# with open(fname, 'w') as f:
# json.dump({'num': {'avg': avg, 'std': std}}, f)
cls.logger.info('')
return {'summary': summary,
'h_sim_avg': None,
}
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
) -> None:
cls.report(sp)
class Curvature3d(_Report):
"""Reports 3d curvature of microtubule fibers.
"""
#: Part of figure and report titles.
LABEL: Final = '3d curvature of filaments in'
@classmethod
def create(
cls,
tp: type[FullDepth],
show: bool = True,
) -> type[Curvature3d]:
super()._create(tp, __class__.__name__, show)
cls.units = '1/' + tp.len_units
return cls
@classmethod
def report(
cls,
sp: ListOfSpatialSystems,
) -> tuple[Histogram, list, list]:
data = [s.curv3d for s in sp]
avg, std = cls.tp.print_avgstd(cls.LABEL, data, cls.units)
h = Histogram(
cls.name,
Simulated().initialise(data, cls.fits_sim, dx=0.02, density=True)
)
h.to_csv(cls.path_out)
cls.plot(h)
return h, [avg], [std]
@classmethod
def plot(
cls,
h: Histogram
) -> None:
h.plot(
cls.LABEL + ' ' + cls.tp.type,
xlabel=f'curvature ({cls.units})',
xlim=[0., 1.5],
save_path=cls.path_out,
show=cls.show
)
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
) -> None:
cls.fits_sim = [
(fit.Rayleigh.f, [1.]),
]
rep = cls.report(sp)
cls.summarize(rep, [0])
class Curvature2dConv(_Report):
"""Reports of apparent curvature of microtubule projections
to xy plane.
"""
#: Part of figure and report titles.
LABEL: Final = '2d curvature of projected filaments in'
@classmethod
def create(
cls,
tp: type[FullDepth],
show: bool = True,
) -> type[Curvature2dConv]:
super()._create(tp, __class__.__name__, show)
cls.units = '1/' + tp.len_units
return cls
@classmethod
def report(
cls,
sp: ListOfSpatialSystems,
) -> tuple[Histogram, list, list]:
data = [s.curv2d for s in sp]
avg, std = cls.tp.print_avgstd(cls.LABEL, data, cls.units)
h = Histogram(
cls.name,
Simulated().initialise(data, cls.fits_sim, dx=0.02, density=True)
)
h.to_csv(cls.path_out)
cls.plot(h)
return h, [avg], [std]
@classmethod
def plot(
cls,
h: Histogram
) -> None:
h.plot(
cls.LABEL + ' ' + cls.tp.type,
xlabel=f'curvature ({cls.units})',
xlim=[0., 1.5],
save_path=cls.path_out,
show=cls.show
)
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
) -> None:
cls.fits_sim = [
# (fit.Gamma.loc0, [2., 0.1, 0.]),
# (fit.Weibull.full, [2., 1.]),
# (fit.Rayleigh.f, [.5]),
]
rep = cls.report(sp)
cls.summarize(rep, [0])
class Curvature2dMboc17(_Report):
"""Reports microtubule curvatures using formulas applied in the
processing of superresolution images by Zhang et al. MBoC 2017
"""
#: Part of figure and report titles.
LABEL: Final = 'empirical curvature of filament 2d projections in'
@classmethod
def create(
cls,
tp: type[FullDepth],
show: bool = True,
) -> type[Curvature2dMboc17]:
name = __class__.__name__
if tp.type != 'slice':
tp.logger.warning(
f'WARNING: Analysis of {name} makes only '
f'sence for Tirf slices.'
)
ct = tp.params['cell'].typename
if ct != 'RW_Protr' and ct != 'SpreRou':
tp.logger.warning(
f'WARNING: Analysis of {name} makes only '
f'sence for specific cell types.'
)
super()._create(tp, name, show)
cls.units = '1/' + tp.len_units
return cls
@classmethod
def experimental(
cls,
cell_type: str
) -> tuple[list, Histogram, float]:
import cytoskeleton_analyser.position.empirical_data.mboc17 as zh
bc, (contr, ca_ras) = zh.curvature(density=True)
if cell_type == 'RW_Protr':
h = ca_ras
elif cell_type == 'SpreRou':
h = contr
else:
assert False, 'Wrong Cell Type'
avg = zh.avg(bc, h)
cls.logger.info('\nEmpirical curvature of filament 2d projections in '
+ f'{cls.tp.type}: {avg} {cls.units}')
return bc, h, avg
@classmethod
def report(
cls,
sp: ListOfSpatialSystems,
) -> tuple[Histogram, list, list]:
data = [s.curv2d_mboc17 for s in sp]
avg, std = cls.tp.print_avgstd(cls.LABEL, data, cls.units)
ct = cls.tp.params['cell'].typename
if cls.tp.type == 'slice' and \
(ct == 'RW_Protr' or
ct == 'SpreRou'):
bc, c, c_avg = cls.experimental(ct)
e = Experimental().initialise((bc, c), cls.fits_exp)
h = Histogram(
cls.name,
Simulated().initialise(
data, cls.fits_sim, dx=0.02, exper_bc=e.bc, density=True),
e,
)
avg, std = [avg, c_avg], [std, np.nan]
else:
h = Histogram(
cls.name,
Simulated().initialise(
data, cls.fits_sim, dx=0.02, density=True),
)
avg, std = [avg], [std]
h.to_csv(cls.path_out)
cls.plot(h)
return h, avg, std
@classmethod
def plot(
cls,
h: Histogram
) -> None:
h.plot(
cls.LABEL + ' ' + cls.tp.type,
xlabel=f'curvature ({cls.units})',
xlim=[0., 1.5],
save_path=cls.path_out,
show=cls.show
)
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
) -> None:
cls.fits_sim = [
# (fit.Gamma.loc0, [2., 0.1, 0.]),
# (fit.Weibull.full, [2., 1.]),
# (fit.Rayleigh.f, [1.]),
]
cls.fits_exp = [
# (fit.Gamma.loc0, [2., 0.1, 0.]),
# (fit.Weibull.full, [2., 1.]),
# (fit.Rayleigh.f, [1.]),
]
rep = cls.report(sp)
cls.summarize(rep, [0, 0])
class AnglesToRad(_Report):
"""Reports distribution of angles between points on the microtubule
and local radial direction.
"""
#: Part of figure and report titles.
LABEL: Final = 'angle to radial direction in'
@classmethod
def create(
cls,
tp: type[FullDepth],
show: bool = True,
) -> type[AnglesToRad]:
super()._create(tp, __class__.__name__, show)
cls.units = 'grad'
cls.is_polar = True
cls.is_halfpolar = True
return cls
@classmethod
def report(
cls,
sp: ListOfSpatialSystems,
) -> tuple[Histogram, list, list]:
data = [s.threshold_radial_dev(cls.tp.params['cell'].
regions.lamella.is_inside)
for s in sp]
data = [np.array([2.*np.pi - d if d >= np.pi else d for d in dd])
for dd in data]
avg, std = cls.tp.print_avgstd(cls.LABEL,
[d/np.pi*180. for d in data], cls.units)
h = Histogram(
cls.name,
Simulated()
.initialise(data, cls.fits_sim, dx=2.*np.pi/180., density=True,
polar=cls.is_polar, halfpolar=cls.is_halfpolar),
)
h.to_csv(cls.path_out)
cls.plot(h)
return h, [avg], [std]
@classmethod
def plot(
cls,
h: Histogram
):
h.plot(
cls.LABEL + ' ' + cls.tp.type,
xlabel=f'angle ({cls.units})',
xlim=[0., 2. * np.pi],
save_path=cls.path_out,
show=cls.show
)
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
):
cls.fits_sim = [
(fit.VonMisesDouble.full, [0.6, 0.3, 32., 5., np.pi / 3.])
]
rep = cls.report(sp)
cls.summarize(rep, [0])
class AgesByNode(_Report):
"""Report ages (time after the polymerization event) of microtubules
by filament nodes: individual node ages.
"""
#: Part of figure and report titles.
LABEL: Final = 'node ages in'
@classmethod
def create(
cls,
tp: type[FullDepth],
show: bool = True,
) -> type[AgesByNode]:
super()._create(tp, __class__.__name__, show)
cls.units = 'sec'
return cls
@classmethod
def report(
cls,
sp: ListOfSpatialSystems,
) -> tuple[Histogram, list, list]:
data = [s.ages_cumulative for s in sp]
avg, std = cls.tp.print_avgstd(cls.LABEL, data, cls.units)
h = Histogram(
cls.name,
Simulated().initialise(data, cls.fits_sim, dx=10., density=True),
)
h.to_csv(cls.path_out)
cls.plot(h)
return h, [avg], [std]
@classmethod
def plot(
cls,
h: Histogram
) -> None:
h.plot(
cls.LABEL + ' ' + cls.tp.type,
xlabel=f'age ({cls.units})',
xlim=[0., 10000],
yscale='log',
save_path=cls.path_out,
show=cls.show
)
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
) -> None:
e = fit.Exponential.create()
p = fit.Exponential.Pars
tu = cls.units
a = 1.
tau1 = 10
tau2 = 1000
cls.fits_sim = [
(e.d_h, p(a=a, tau1=tau1), tu),
(e.d_d_h, p(a=a, tau1=tau1, b=0.9, tau2=tau2), tu),
]
rep = cls.report(sp)
cls.summarize(rep, [1])
class AgesByFilament(_Report):
"""Report ages of microtubules.
Age is defined as time passed after the polymerization event.
Here node age averages over all filament nodes are considered.
"""
#: Part of figure and report titles.
LABEL: Final = 'filament ages in'
@classmethod
def create(
cls,
tp: type[FullDepth],
show: bool = True,
) -> type[AgesByFilament]:
super()._create(tp, __class__.__name__, show)
cls.units = 'sec'
return cls
@classmethod
def report(
cls,
sp: ListOfSpatialSystems,
) -> tuple[Histogram, list, list]:
data = [s.ages_by_filament for s in sp]
avg, std = cls.tp.print_avgstd(cls.LABEL, data, cls.units)
h = Histogram(
cls.name,
Simulated().initialise(data, cls.fits_sim, dx=10., density=True),
)
h.to_csv(cls.path_out)
cls.plot(h)
return h, [avg], [std]
@classmethod
def plot(
cls,
h: Histogram
) -> None:
h.plot(
cls.LABEL + ' ' + cls.tp.type,
xlabel=f'age ({cls.units})',
xlim=[0., 10000],
yscale='log',
save_path=cls.path_out,
show=cls.show,
)
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
) -> None:
e = fit.Exponential.create()
p = fit.Exponential.Pars
tu = cls.units
a = 1.
tau1 = 10
tau2 = 1000
cls.fits_sim = [
(e.d_h, p(a=a, tau1=tau1), tu),
(e.d_d_h, p(a=a, tau1=tau1, b=0.9, tau2=tau2), tu),
]
rep = cls.report(sp)
cls.summarize(rep, [1])
|
StarcoderdataPython
|
108848
|
<filename>common/logger_conf.py
# Copyright (c) 2017 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Logging configuration dictionary.
Using:
from logging.config import dictConfig
dictConfig(LOG_CONFIG)
"""
import sys
import pathlib
import logging
LOG_CONFIG = {
'version': 1,
'formatters': {
'custom': {
'format': '[%(asctime)s] %(levelname)s: %(message)s'
}
},
'handlers': {
'stream_handler': {'class': 'logging.StreamHandler',
'formatter': 'custom',
'stream': sys.stdout,
'level': logging.INFO},
'file_handler': {'class': 'logging.FileHandler',
'formatter': 'custom',
'filename': str(pathlib.Path.cwd() / '_all.log'),
'level': logging.DEBUG,
'delay': True}
},
'root': {
'handlers': ['stream_handler', 'file_handler'],
'level': logging.DEBUG
}
}
|
StarcoderdataPython
|
133821
|
import uuid
from django.core.exceptions import ValidationError
from arches.app.functions.base import BaseFunction
from arches.app.models import models
from arches.app.models.tile import Tile
import json
import logging
from django.db.models import Q
logger = logging.getLogger(__name__)
details = {
"name": "Incrementor",
"type": "node",
"description": "Adds a tile to a newly created instance with an incrementing value",
"defaultconfig": {"selected_nodegroup": "", "target_node":"", "triggering_nodegroups":[], "starting_value": 0, "last_value":0, "prefix":"", "suffix":""},
"classname": "IncrementorFunction",
"component": "views/components/functions/incrementor-function",
"functionid": "2cc07b0a-adbd-4721-86ce-dad1699caa86"
}
class IncrementorFunction(BaseFunction):
def post_save(self, tile, request):
tile_already_exists = models.TileModel.objects.filter(resourceinstance_id=tile.resourceinstance_id).filter(nodegroup_id=self.config["selected_nodegroup"]).exists()
if not tile_already_exists:
try:
if int(self.config['last_value']) == 0:
new_number = str(int(self.config['starting_value']) + int(self.config['last_value']) + 1)
else:
new_number = str(int(self.config['last_value']) + 1)
new_value =self.config['prefix'] + ((7 - len(new_number)) * '0') + new_number + self.config['suffix'] #EAMENA numbers are 7 digits long
fn = models.FunctionXGraph.objects.get(Q(function_id="2cc07b0a-adbd-4721-86ce-dad1699caa86"), Q(graph_id=tile.resourceinstance.graph_id))
fn.config['last_value'] = new_number
fn.save()
nodegroup_id = self.config["selected_nodegroup"]
target_node = self.config['target_node']
nodegroup = models.NodeGroup.objects.get(pk = nodegroup_id)
if tile.nodegroup.nodegroupid == nodegroup.nodegroupid:
tile.data[target_node] = new_value
return
if nodegroup.parentnodegroup_id == tile.nodegroup.nodegroupid:
return
else:
tile = Tile.update_node_value(target_node, new_value, nodegroupid=nodegroup_id, resourceinstanceid=tile.resourceinstance_id)
except Exception:
logger.exception("The incrementor function is unable to create incremented value")
|
StarcoderdataPython
|
3272094
|
from .base import SimulatorException, SimulationResult
from .scipyode import ScipyOdeSimulator
from .cupsoda import CupSodaSimulator
from .stochkit import StochKitSimulator
from .bng import BngSimulator, PopulationMap
__all__ = ['BngSimulator', 'CupSodaSimulator', 'ScipyOdeSimulator',
'StochKitSimulator', 'SimulationResult', 'PopulationMap']
|
StarcoderdataPython
|
3258197
|
<filename>djeff/djeff.py
import re
from django.template.backends.django import DjangoTemplates, Template
from django.template.engine import _dirs_undefined
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
class DjeffTemplates(DjangoTemplates):
def get_template(self, template_name, dirs=_dirs_undefined):
return DjeffTemplate(self.engine.get_template(template_name, dirs))
class DjeffTemplate(Template):
def render(self, context=None, request=None):
rendered_context = super().render(context, request)
return djeffify_html(rendered_context)
def djeffify_string(string_to_djeff):
"""
Djeffifies string_to_djeff
"""
string_to_djeff = re.sub(r'^(?=[jg])', 'd', string_to_djeff, flags=re.IGNORECASE) # first
string_to_djeff = re.sub(r'[ ](?=[jg])', ' d', string_to_djeff, flags=re.IGNORECASE) # spaces
string_to_djeff = re.sub(r'[\n](?=[jg])', '\nd', string_to_djeff, flags=re.IGNORECASE) # \n
return string_to_djeff
def djeffify_html(rendered_string):
"""
This function contains the core logic for a
middleware, template tag or Template engine approach
"""
parser = DjeffParser()
parser.feed(rendered_string)
return parser.djhtml
def reconstruct_attrs(attrs):
tag_string = ''
for attr in attrs:
tag_string += ('{}={} ').format(attr[0], attr[1])
return tag_string.strip()
class DjeffParser(HTMLParser):
def __init__(self, convert_charrefs=True, *args, **kwargs):
"""
Explicitly set convert_charrefs to keep deprecation warnings at bay.
See:
https://docs.python.org/3/library/html.parser.html#html.parser.HTMLParser
"""
# python 3
try:
HTMLParser.__init__(self, convert_charrefs=convert_charrefs)
# python 2
except TypeError:
HTMLParser.__init__(self)
self.djhtml = ''
def handle_starttag(self, tag, attrs):
self.djhtml += '<{} {}>'.format(tag, reconstruct_attrs(attrs))
def handle_endtag(self, tag):
self.djhtml += '</{}>'.format(tag)
def handle_data(self, data):
"""
Djeffify data between tags
"""
if data.strip():
data = djeffify_string(data)
self.djhtml += data
|
StarcoderdataPython
|
1786979
|
"""Config flow for Air Touch 3."""
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST
from .const import DOMAIN
from .s7comm import S7Comm
DATA_SCHEMA = vol.Schema({vol.Required(CONF_HOST): str})
class S7CommConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle an S7Comm config flow."""
VERSION = 1
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is None:
return self.async_show_form(step_id="user", data_schema=DATA_SCHEMA)
errors = {}
# Ask the user for the host name
host = user_input[CONF_HOST]
self._async_abort_entries_match({CONF_HOST: host})
# Connect to the S7 PLC and request an update
s7comm = S7Comm(host)
s7comm.get_cpu_state()
# Check if we connected, if not, error message
if not s7comm.comms_status:
errors["base"] = "cannot_connect"
# Show errors to user, exiting
if errors:
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
# All good and connected, create entry
return self.async_create_entry(
title="Step 7 PLC (" + user_input[CONF_HOST] + ")",
data={
CONF_HOST: user_input[CONF_HOST],
},
)
|
StarcoderdataPython
|
1631278
|
<filename>bin/vehicle-inspector.py<gh_stars>0
#! /usr/bin/env python
from smartcameras.subscriber import VehicleInspector
import threading
def main():
print("########################################")
print ""
print("Welcome to the Vehicle Inspector!")
print ""
print("########################################")
vehicleInspector = VehicleInspector()
thread = threading.Thread(target=vehicleInspector.activate)
thread.daemon = True
thread.start()
while not vehicleInspector.isActive:
time.sleep(1)
print ""
print("The Vehicle Inspector has been activated!")
print ""
while True:
try:
raw_input("Press Ctrl+D to exit.")
except EOFError:
print ""
break
vehicleInspector.terminate()
thread.join()
print "Vehicle Inspector terminated."
print "Closing..."
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1788017
|
<filename>fairmodels/plot_ceteris_paribus_cutoff.py
import numpy as np
import pandas as pd
from .plotnine import *
def plot_ceteris_paribus_cutoff(cpc):
data = cpc.cutoff_data
models = cpc.label
n_models = len(models)
min_data = cpc.min_data
min_data["y"] = [np.min(data.parity_loss)] * n_models
min_data["tyend"] = [np.max(data.parity_loss)] * n_models
min_data["yend"] = min_data.tyend * 0.95
min_data.y -= min_data.tyend / 20
plt = ggplot(data, aes("cutoff", "parity_loss", color="metric")) + \
geom_line() + \
labs(color="parity loss metric") + \
facet_wrap("~model") + \
ggtitle("Ceteris paribus cutoff plot") + \
xlab("value of cutoff") + \
ylab("Metric's parity loss") + \
geom_segment(data=min_data, mapping=aes(x="mins", xend="mins", y="y", yend="yend"),
linetype="dashed", color="grey") + \
geom_text(data=min_data, mapping=aes(x="mins", y="tyend", label="mins"), size=10, color="grey")
return plt
|
StarcoderdataPython
|
3205205
|
<reponame>MartinSeeler/Advent-of-Code<filename>2020/day20/solution.py
from math import prod, sqrt
transpose = lambda tile: list(''.join(line) for line in zip(*tile))
invert = lambda tile: [''.join(reversed(line)) for line in tile]
rotate = lambda tile: invert(transpose(tile))
def parse(text: str):
tiles = dict()
tile_sides = dict()
for block in text.split("\n\n"):
lines = block.splitlines()
sides = [lines[1], lines[-1], "".join(l[0] for l in lines[1:]), "".join(l[-1] for l in lines[1:])]
sides += [s[::-1] for s in sides]
id = int(lines[0].split(" ")[1][:-1])
tile_sides[id] = sides
tiles[id] = lines[1:]
adjacents = dict()
for k, sides in tile_sides.items():
adjacents[k] = set.union(
*map(set, list([j for j in tile_sides.keys() if side in tile_sides[j] and j != k] for side in sides)))
return tiles, adjacents
def solve_part_1(text: str):
_, adjacents = parse(text)
return prod([k for k, sides in adjacents.items() if len(sides) == 2])
def solve_part_2(text: str):
tiles, adjacents = parse(text)
N = int(sqrt(len(tiles)))
board = [[None] * N for _ in range(N)]
print(rotate(tiles[3079]))
if __name__ == '__main__':
with open("input.txt", "r") as f:
quiz_input = f.read()
print("Part 1:", solve_part_1(quiz_input))
print("Part 2:", solve_part_2(quiz_input))
|
StarcoderdataPython
|
3310003
|
# Copyright (c) 2018-2020, <NAME>
# Copyright (c) 2021, Auburn University
# This file is part of the azplugins project, released under the Modified BSD License.
import numpy as np
import hoomd
hoomd.context.initialize()
from hoomd import md
try:
from hoomd import azplugins
except ImportError:
import azplugins
import unittest
# azplugins.restrain.plane
class restrain_plane_tests(unittest.TestCase):
"""Tests azplugins.restrain.plane"""
def setUp(self):
snap = hoomd.data.make_snapshot(N=3, box=hoomd.data.boxdim(L=5.0))
if hoomd.comm.get_rank() == 0:
snap.particles.position[0] = [ 1,0,0]
snap.particles.position[1] = [-1,0,2]
snap.particles.position[2] = [ 2,0,0]
snap.particles.image[2] = [-1,0,0]
hoomd.init.read_snapshot(snap)
# dummy integrator
all_ = hoomd.group.all()
md.integrate.mode_standard(dt=0)
md.integrate.nve(group=all_)
def test_create(self):
"""Test object creation and updating."""
f = azplugins.restrain.plane(group=hoomd.group.all(), point=(0,0,0), normal=(1,0,0), k=2.0)
f.set_params(k=5.0)
f.set_params(k=8)
f.set_params(point=(0,0,1))
f.set_params(point=[0,0,1])
f.set_params(point=np.array([0,0,1]))
f.set_params(normal=(0,0,1))
f.set_params(normal=[0,0,1])
f.set_params(normal=np.array([0,0,1]))
f.set_params(point=(0,0,0), normal=(1,0,0), k=10.0)
def test_force(self):
"""Test forces computed on particles."""
group = hoomd.group.all()
# compute forces
f = azplugins.restrain.plane(group=group, point=(0,0,0), normal=(1,0,0), k=2.0)
hoomd.run(1)
np.testing.assert_array_almost_equal(f.forces[0].force, (-2.,0,0))
np.testing.assert_array_almost_equal(f.forces[1].force, ( 2.,0,0))
np.testing.assert_array_almost_equal(f.forces[2].force, ( 6.,0,0))
self.assertAlmostEqual(f.forces[0].energy, 1.)
self.assertAlmostEqual(f.forces[1].energy, 1.)
self.assertAlmostEqual(f.forces[2].energy, 9.)
np.testing.assert_array_almost_equal(f.forces[0].virial, (-2.,0,0,0,0,0))
np.testing.assert_array_almost_equal(f.forces[1].virial, (-2.,0,4.,0,0,0))
np.testing.assert_array_almost_equal(f.forces[2].virial, (12.,0,0,0,0,0))
# change the spring constant
f.set_params(k=1.0)
hoomd.run(1)
np.testing.assert_array_almost_equal(f.forces[0].force, (-1.,0,0))
np.testing.assert_array_almost_equal(f.forces[1].force, ( 1.,0,0))
np.testing.assert_array_almost_equal(f.forces[2].force, ( 3.,0,0))
self.assertAlmostEqual(f.forces[0].energy, 0.5)
self.assertAlmostEqual(f.forces[1].energy, 0.5)
self.assertAlmostEqual(f.forces[2].energy, 4.5)
# shift the plane down
f.set_params(point=(-1,0,0))
hoomd.run(1)
np.testing.assert_array_almost_equal(f.forces[0].force, (-2.,0,0))
np.testing.assert_array_almost_equal(f.forces[1].force, ( 0.,0,0))
np.testing.assert_array_almost_equal(f.forces[2].force, ( 2.,0,0))
self.assertAlmostEqual(f.forces[0].energy, 2.0)
self.assertAlmostEqual(f.forces[1].energy, 0.0)
self.assertAlmostEqual(f.forces[2].energy, 2.0)
# rotate the plane so that only particle 1 is off the line
f.set_params(point=(0,0,0), normal=(0,0,1))
hoomd.run(1)
np.testing.assert_array_almost_equal(f.forces[0].force, (0,0,0))
np.testing.assert_array_almost_equal(f.forces[1].force, (0,0,-2))
np.testing.assert_array_almost_equal(f.forces[2].force, (0,0,0))
self.assertAlmostEqual(f.forces[0].energy, 0.0)
self.assertAlmostEqual(f.forces[1].energy, 2.0)
self.assertAlmostEqual(f.forces[2].energy, 0.0)
def test_group(self):
"""Test forces on subgroup of prticles."""
# leave out particle 0
group = hoomd.group.tags(1,2)
# compute forces
f = azplugins.restrain.plane(group=group, point=(0,0,0), normal=(1,0,0), k=2.0)
hoomd.run(1)
np.testing.assert_array_almost_equal(f.forces[0].force, ( 0.,0,0))
np.testing.assert_array_almost_equal(f.forces[1].force, ( 2.,0,0))
np.testing.assert_array_almost_equal(f.forces[2].force, ( 6.,0,0))
self.assertAlmostEqual(f.forces[0].energy, 0.)
self.assertAlmostEqual(f.forces[1].energy, 1.)
self.assertAlmostEqual(f.forces[2].energy, 9.)
np.testing.assert_array_almost_equal(f.forces[0].virial, (0,0,0,0,0,0))
np.testing.assert_array_almost_equal(f.forces[1].virial, (-2.,0,4.,0,0,0))
np.testing.assert_array_almost_equal(f.forces[2].virial, (12.,0,0,0,0,0))
def tearDown(self):
hoomd.context.initialize()
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
StarcoderdataPython
|
1693346
|
"""Check module for command."""
# Official Libraries
from argparse import Namespace
# My Modules
from stobu.types.command import CmdType
__all__ = (
'has_cmd_of',
)
# Define Constants
COMMAND_TABLE = {
CmdType.ADD: ('a', 'add'),
CmdType.BUILD: ('b', 'build'),
CmdType.COPY: ('c', 'copy'),
CmdType.DELETE: ('d', 'delete'),
CmdType.EDIT: ('e', 'edit'),
CmdType.INIT: ('i', 'init'),
CmdType.LIST: ('l', 'list'),
CmdType.PUSH: ('p', 'push'),
CmdType.REJECT: ('r', 'reject'),
CmdType.RENAME: ('n', 'rename'),
CmdType.SET: ('set',),
CmdType.NONE: ('none',),
}
# Main
def has_cmd_of(args: Namespace, cmd: CmdType) -> bool:
assert isinstance(args, Namespace)
assert isinstance(cmd, CmdType)
order_cmd = args.cmd
return order_cmd in COMMAND_TABLE[cmd]
|
StarcoderdataPython
|
1681172
|
<gh_stars>0
import torch.nn as nn
from collections import OrderedDict
from mmdet.models.plugins import ConcatFeatureMap, ScaleAwareLayer, SpatialAwareLayer, TaskAwareLayer
import torch
import torch.nn.functional as F
class DyHead_Block(nn.Module):
def __init__(self, L, S, C):
super(DyHead_Block, self).__init__()
# Saving all dimension sizes of F
self.L_size = L
self.S_size = S
self.C_size = C
# Inititalizing all attention layers
self.scale_attention = ScaleAwareLayer(s_size=self.S_size)
self.spatial_attention = SpatialAwareLayer(L_size=self.L_size)
self.task_attention = TaskAwareLayer(num_channels=self.C_size)
def forward(self, F_tensor):
scale_output = self.scale_attention(F_tensor)
spacial_output = self.spatial_attention(scale_output)
task_output = self.task_attention(spacial_output)
return task_output
# def DyHead(num_blocks, L, S, C):
# blocks = [('Block_{}'.format(i + 1), DyHead_Block(L, S, C)) for i in range(num_blocks)]
#
# return nn.Sequential(OrderedDict(blocks))
class DynamicHead(nn.Module):
def __init__(self, num_blocks, L, S, C):
super(DynamicHead, self).__init__()
blocks = [('Block_{}'.format(i + 1), DyHead_Block(L, S, C)) for i in range(num_blocks)]
self.blocks = nn.Sequential(OrderedDict(blocks))
self.concat_layer = ConcatFeatureMap()
def forward(self, fpn_output):
if len(fpn_output) > 4:
fpn_output = fpn_output[:4]
concat_levels, median_height = self.concat_layer(fpn_output)
dynamic_output = self.blocks(concat_levels)
B, L, _, C = dynamic_output.size()
output = dynamic_output.transpose(2, 3).reshape(B, L, C, median_height, median_height)
output = output.split(split_size=1, dim=1)
output = [o.squeeze(1).contiguous() for o in output]
output.append(F.max_pool2d(output[-1], 1, stride=2))
return output
if __name__ == '__main__':
z = [torch.randn(1, 256, 200, 200),
torch.randn(1, 256, 100, 100),
torch.randn(1, 256, 50, 50),
torch.randn(1, 256, 25, 25)]
head = DynamicHead(6, 4, 5625, 256)
y = head(z)
pass
|
StarcoderdataPython
|
3360621
|
<reponame>wavestate/wavestate-control<filename>src/wavestate/control/statespace/ace_electrical.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 <NAME> <<EMAIL>>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
State Space System
"""
import numpy as np
import copy
from wavestate import declarative
from collections import defaultdict, Mapping
import functools
from numbers import Number
from wavestate.control.statespace import dense
from wavestate.control.statespace import ACE
from wavestate.control.statespace.dense.zpk_algorithms import zpk_cascade
from wavestate.control.statespace.dense.xfer_algorithms import ss2xfer
def zpkACE(Zr=(), Zc=(), Pr=(), Pc=(), k=1):
ABCDs = zpk_cascade(zr=Zr, zc=Zc, pr=Pr, pc=Pc, k=k)
syslist = []
for (A, B, C, D, E) in ABCDs:
syslist.append(ACE.ACE.from_ABCD(A, B, C, D, E))
ace = ACE.ACE()
for idx, sys in enumerate(syslist):
ace.insert(sys, cmn="sys{}".format(idx))
for idx in range(len(syslist) - 1):
ace.bind_equal(
{"sys{}.O".format(idx), "sys{}.I".format(idx + 1)},
constr="s{}{}".format(idx, idx + 1),
)
ace.io_add("I", {"sys0.I": None})
ace.io_add("O", {"sys{}.O".format(idx): None})
return ace
pi2 = np.pi * 2
def op_amp(Gbw=1e7):
ace = ACE.ACE()
ace.insert(
zpkACE(
Pr=[
-1 * pi2,
],
k=Gbw,
),
cmn="Vgain",
)
ace.insert(
zpkACE(
Pr=[
-1 * pi2,
],
Zr=[
-100 * pi2,
],
k=4e-9 * 100,
),
cmn="Vnoise",
)
ace.insert(
zpkACE(
Pr=[
-1 * pi2,
],
Zr=[
-30 * pi2,
],
k=10e-12 * 100,
),
cmn="Inoise",
)
ace.states_augment(N=1, st="posI", io=True)
ace.states_augment(N=1, st="posV", io=True)
ace.states_augment(N=1, st="negI", io=True)
ace.states_augment(N=1, st="negV", io=True)
ace.states_augment(N=1, st="outI", io=True)
ace.states_augment(N=1, st="outV", io=True)
ace.bind_equal({"outV", "Vgain.O"}, constr="Vgain.O")
ace.bind_sum({"posI"}, constr="posI")
ace.bind_equal({"negI", "Inoise.O"}, constr="negI")
ace.bind_sum({"posV": -1, "negV": 1, "Vnoise.O": -1, "Vgain.I": 1}, constr="amp")
ace.io_input("Vnoise.I")
ace.io_input("Inoise.I")
ace.noise_add("opamp", {"Vnoise.I", "Inoise.I"})
ace.port_add("inP", type="electrical", flow="posI", potential="posV")
ace.port_add("inN", type="electrical", flow="negI", potential="negV")
ace.port_add("out", type="electrical", flow="outI", potential="outV")
return ace
def electrical1port():
ace = ACE.ACE()
ace.states_augment(N=1, st="aI", io=True)
ace.states_augment(N=1, st="aV", io=True)
ace.port_add("a", type="electrical", flow="aI", potential="aV")
return ace
def short():
ace = electrical1port()
ace.bind_sum({"aV"}, constr="V")
return ace
def open():
ace = electrical1port()
ace.bind_sum({"aI"}, constr="V")
return ace
def electrical2port():
ace = ACE.ACE()
ace.states_augment(N=1, st="aI", io=True)
ace.states_augment(N=1, st="bI", io=True)
ace.states_augment(N=1, st="aV", io=True)
ace.states_augment(N=1, st="bV", io=True)
ace.port_add("a", type="electrical", flow="aI", potential="aV")
ace.port_add("b", type="electrical", flow="bI", potential="bV")
return ace
def voltage_source1():
ace = electrical1port()
ace.states_augment(N=1, st="V", io=True)
ace.bind_equal({"aV", "V"}, constr="aV")
return ace
def current_source1():
ace = electrical1port()
ace.states_augment(N=1, st="I", io=True)
ace.bind_sum({"aI": 1, "I": 1}, constr="aI")
return ace
def voltage_source2():
ace = electrical2port()
ace.states_augment(N=1, st="V", io=True)
ace.bind_sum({"aV": 1, "bV": -1, "V": 1}, constr="V")
ace.bind_sum({"aI", "bI"}, constr="I")
return ace
def current_source2():
ace = electrical2port()
ace.states_augment(N=1, st="I", io=True)
ace.bind_equal({"aV", "bV"}, constr="V")
ace.bind_sum({"aI": 1, "bI": 1, "I": 1}, constr="I")
return ace
|
StarcoderdataPython
|
1616300
|
from nice65.utils.addressing import AddressParser
class Disassembler:
def __init__(self, mpu, address_parser=None):
if address_parser is None:
address_parser = AddressParser()
self._mpu = mpu
self._address_parser = address_parser
self.addrWidth = mpu.ADDR_WIDTH
self.byteWidth = mpu.BYTE_WIDTH
self.addrFmt = mpu.ADDR_FORMAT
self.byteFmt = mpu.BYTE_FORMAT
self.addrMask = mpu.addrMask
self.byteMask = mpu.byteMask
def instruction_at(self, pc):
""" Disassemble the instruction at PC and return a tuple
containing (instruction byte count, human readable text)
"""
instruction = self._mpu.ByteAt(pc)
disasm, addressing = self._mpu.disassemble[instruction]
if addressing == 'acc':
disasm += ' A'
length = 1
elif addressing == 'abs':
address = self._mpu.WordAt(pc + 1)
address_or_label = self._address_parser.label_for(
address, '$' + self.addrFmt % address)
disasm += ' ' + address_or_label
length = 3
elif addressing == 'abx':
address = self._mpu.WordAt(pc + 1)
address_or_label = self._address_parser.label_for(
address, '$' + self.addrFmt % address)
disasm += ' %s,X' % address_or_label
length = 3
elif addressing == 'aby':
address = self._mpu.WordAt(pc + 1)
address_or_label = self._address_parser.label_for(
address, '$' + self.addrFmt % address)
disasm += ' %s,Y' % address_or_label
length = 3
elif addressing == 'imm':
byte = self._mpu.ByteAt(pc + 1)
disasm += ' #$' + self.byteFmt % byte
length = 2
elif addressing == 'imp':
length = 1
elif addressing == 'ind':
address = self._mpu.WordAt(pc + 1)
address_or_label = self._address_parser.label_for(
address, '$' + self.addrFmt % address)
disasm += ' (%s)' % address_or_label
length = 3
elif addressing == 'iny':
zp_address = self._mpu.ByteAt(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '$' + self.byteFmt % zp_address)
disasm += ' (%s),Y' % address_or_label
length = 2
elif addressing == 'inx':
zp_address = self._mpu.ByteAt(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '$' + self.byteFmt % zp_address)
disasm += ' (%s,X)' % address_or_label
length = 2
elif addressing == 'iax':
address = self._mpu.WordAt(pc + 1)
address_or_label = self._address_parser.label_for(
address, '$' + self.addrFmt % address)
disasm += ' (%s,X)' % address_or_label
length = 3
elif addressing == 'rel':
opv = self._mpu.ByteAt(pc + 1)
targ = pc + 2
if opv & (1 << (self.byteWidth - 1)):
targ -= (opv ^ self.byteMask) + 1
else:
targ += opv
targ &= self.addrMask
address_or_label = self._address_parser.label_for(
targ, '$' + self.addrFmt % targ)
disasm += ' ' + address_or_label
length = 2
elif addressing == 'zpi':
zp_address = self._mpu.ByteAt(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '($' + self.byteFmt % zp_address + ')')
disasm += ' %s' % address_or_label
length = 2
elif addressing == 'zpg':
zp_address = self._mpu.ByteAt(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '$' + self.byteFmt % zp_address)
disasm += ' %s' % address_or_label
length = 2
elif addressing == 'zpx':
zp_address = self._mpu.ByteAt(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '$' + self.byteFmt % zp_address)
disasm += ' %s,X' % address_or_label
length = 2
elif addressing == 'zpy':
zp_address = self._mpu.ByteAt(pc + 1)
address_or_label = self._address_parser.label_for(
zp_address, '$' + self.byteFmt % zp_address)
disasm += ' %s,Y' % address_or_label
length = 2
else:
msg = "Addressing mode: %r" % addressing
raise NotImplementedError(msg)
return (length, disasm)
|
StarcoderdataPython
|
24717
|
import re
import pprint
pp = pprint.PrettyPrinter(indent=4)
from sys import version_info # py3, for checking type of input
def combine_messages(messages):
""" Combines messages that have one or more integers in them, such as
"trial001" "trial002", into a single message like "trial# (#=1-2)".
This is to reduce the number of messages required to be displayed.
Operates by creating the following structure, named "ti" for "template info":
{
't2tn': {} - maps each template (containing "#") to a template number (tn)
'tn2t': [] - list of templates, indexed by the template number
'm2tns': {} - maps each message number (index in messages) to
array of template numbers (tns)
'tn2dm': {} - maps each template number to a dictionary that has as keys the digits
used to make the template, and with value the message number used to make the template
with those digits. i.e.:
{ tn1: {d1: m1, d2: m2}, tn2: {d3: m3, d4: m4}, tn2: { ...}}
where:
tn - template number
d: m - digits used to make template from message number m
'tn2md': {} - maps each template number of a dictionary that has keys the message number
and value the digits used to make the message. These reverse the key-values in 'tn2dm', e.g.:
{ tn1: {m1: d1, m2: d2}, tn2: {m3: d3, m4: d4}, tn2: { ...}}
where:
tn - template number
d: m - digits used to make template from message number m
This array is used to dynamically remove entries in 'tn2dm' as each message in a
template is displayed so that structure always has an accurate list of remaining messages.
'mout': [] - messages to display (output), formed by combining messages
'mfin': [] - set of message numbers "finished" (already included in mout).
}
This function works by first creating everything except mout and mfin, then
going through each message, finding the template numbers that have the most
digits, and using those to make the combined message.
"""
ti = {}
ti['t2tn'] = {}
ti['tn2t'] = []
ti['m2tns'] = {}
ti['tn2dm'] = {}
ti['tn2md'] = {}
# debug_msg = "/acquisition/timeseries/fov_15002_17/data"
# debug_mn = -1
for mn in range(len(messages)):
msg = messages[mn]
if version_info[0] > 2:
assert isinstance(msg, str), "in Python 3, messages must be str (unicode) type"
# if msg.startswith(debug_msg):
# debug_mn = mn
found_nums = re.findall("\d+", msg)
if not found_nums:
# no numbers found, don't process
continue
# remove any duplicates
found_nums = list(set(found_nums))
for digits in found_nums:
pattern = "(?<!\d)%s(?!\d)" % digits # substitute only if digits not surrounded by other digits
template = re.sub(pattern, "#", msg) # make template for this message and digits
if template not in ti['t2tn']:
tn = len(ti['tn2t']) # template number
ti['tn2t'].append(template) # add template to list of templates
ti['t2tn'][template] = tn # add entry to map of template to template number
else:
tn = ti['t2tn'][template]
# save template number (tn) in 'm2tns'
if mn not in ti['m2tns']:
ti['m2tns'][mn] = [tn,]
else:
ti['m2tns'][mn].append(tn)
# save template number, digits and message number in 'tn2dm'
idigits = int(digits)
if tn not in ti['tn2dm']:
ti['tn2dm'][tn] = {idigits: mn}
ti['tn2md'][tn] = {mn: idigits}
else:
if digits in ti['tn2dm'][tn]:
print ("duplicate message found: %s" % msg)
break
ti['tn2dm'][tn][idigits] = mn
ti['tn2md'][tn][mn] = idigits
# done building needed structures. Now generate 'output' (i.e. ti['mfin'] and ti['mout']
ti['mout'] = []
ti['mfin'] = set([])
for mn in range(len(messages)):
# if mn == debug_mn:
# print ("found mn %i '%s'" % (debug_mn, debug_msg))
# import pdb; pdb.set_trace()
if mn in ti['mfin']:
# message has already been displayed (using a template)
continue
if mn not in ti['m2tns']:
# no digits found in this message, just display as is
ti['mout'].append(messages[mn])
ti['mfin'].add(mn)
continue
# this message has at least one pattern. Find template with largest number of other messages
# that have not been displayed yet
# build list of pairs, (a, b); a - template number, b - number of messages in template
tn_nm_pairs = [ (tn, len(ti['tn2dm'][tn])) for tn in ti['m2tns'][mn] ]
# get those pairs that have the largest number of messages
ltn_nm_pairs = largest_pairs(tn_nm_pairs)
# nmax = 0
# for tn in ti['m2tns'][mn]:
# dm = ti['tn2dm'][tn]
# num_messages = len(ti['tn2dm'][tn]) # num messages associated with this template
# if num_messages > nmax:
# max_tn = [tn]
# nmax = num_messages
# elif num_messages == nmax:
# # multiple templates have the same number of messages, will need to select
# # one in a deterministic way
# max_tn.append(tn)
# # if no other messages use pattern, just display as is
# if nmax == 1:
if ltn_nm_pairs[0][1] == 1:
# only one messages uses pattern, just display as is
ti['mout'].append(messages[mn])
ti['mfin'].add(mn)
continue
# if len(max_tn) > 1:
if len(ltn_nm_pairs) == 1:
# only one template found that has maximal number of messages. use it.
max_tn = ltn_nm_pairs[0][0]
else:
# multiple templates have the same maximal number of messages. Select the one
# with the rightmost position of '#' in the template
# build list of pairs, (a,b): a - template number, b - index of '#' in template
tn_ix_pairs = [ (ltn_nm_pairs[i][0], ti['tn2t'][ltn_nm_pairs[i][0]].index('#'))
for i in range(len(ltn_nm_pairs))]
tn_ix_pairs = largest_pairs(tn_ix_pairs)
if len(tn_ix_pairs) > 1:
# should never happen since templates made for the same message cannot have
# the same position for the '#'
sys.exit("found multiple templates with same maximal number of messages and same template")
# use the template found
max_tn = tn_ix_pairs[0][0]
# other messages use this template. Get list message numbers and digits that share this template
s_digits = list(ti['tn2dm'][max_tn].keys()) # shared digits
s_mns = list(ti['tn2dm'][max_tn].values()) # shared message numbers
# update tn2dm to remove messages that will be displayed shortly (in this template)
for mn in s_mns:
for tn in ti['m2tns'][mn]:
idigit = ti['tn2md'][tn][mn]
del ti['tn2dm'][tn][idigit]
# make new message by combining shared digits with template
template = ti['tn2t'][max_tn]
# convert digits from string to int
# i_digits = sorted([int(i) for i in s_digits])
i_digits = sorted(s_digits)
# make string representing ranges of digits
prevn = i_digits[0] # initialize previous number to first
sr = str(prevn) # string of ranges being generated
in_range = False
for i in range(1, len(i_digits)):
newn = i_digits[i]
if newn == prevn + 1:
# in a range
in_range = True
else:
# not in a range. But if was previously save end of previous range
if in_range:
sr = "%s-%i" % (sr, prevn)
in_range = False
# save new number
sr = "%s,%i" % (sr, newn)
prevn = newn
# append final number if in range
if in_range:
sr = "%s-%i" % (sr, newn)
new_message = template + " (#=%s)" % sr
ti['mout'].append(new_message)
# add all messages that share this template to ti['mfin'] so they are not displayed again
ti['mfin'].update(s_mns)
# return list of combined messages
return ti['mout']
def largest_pairs(pairs):
""""Input is a list of two-element tuples, e.g. [(5, 4), (2, 7), ...]
Output is list of those, which have the largest 2nd element, e.g. [(2,7)]"""
largest = -1
for pair in pairs:
a, b = pair
if b > largest:
largest = b
lpairs = [pair]
elif b == largest:
lpairs.append(pair)
return lpairs
def test_combine_messages():
""" tests combine_messages function"""
messages = [
"some prefix trial-none",
"some prefix trial23",
"some prefix trial23/timestamps",
"some prefix trial23 timestamps",
"some prefix trial23\ntimestamps",
"some prefix 32-bits, trial32",
"some prefix 32-bits, trial33",
"some prefix 32-bits, trial34",
"some prefix 32-bits, trial35",
"some prefix trial-11",
"some prefix trial23 and trial23 again",
"some prefix trial27",
"some prefix trial27/timestamps",
"some prefix trial27 timestamps",
"some prefix trial27\ntimestamps",
"some prefix 32-bits, trial27",
"some prefix trial27 and trial27 again"]
cm = combine_messages(messages)
pp.pprint(cm)
if __name__ == '__main__':
test_combine_messages()
|
StarcoderdataPython
|
4829350
|
<reponame>lhoupert/hvplot<gh_stars>100-1000
from unittest import TestCase, SkipTest
try:
import numpy as np
import networkx as nx
import hvplot.networkx as hvnx
except:
raise SkipTest('NetworkX not available')
class TestOptions(TestCase):
def setUp(self):
# Create nodes (1-10) in unsorted order
nodes = np.array([1, 4, 5, 10, 8, 9, 3, 7, 2, 6])
edges = list(zip(nodes[:-1], nodes[1:]))
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
self.nodes = nodes
self.g = g
def test_nodes_are_not_sorted(self):
plot = hvnx.draw(self.g)
assert all(self.nodes == plot.nodes.dimension_values(2))
|
StarcoderdataPython
|
1751881
|
<gh_stars>10-100
from django.contrib.auth.models import User
__all__ = ["User"]
|
StarcoderdataPython
|
1606396
|
from substrateinterface import Keypair
from substrateinterface import SubstrateInterface
from pathlib import Path
from brownie import *
import base58
from hashlib import blake2b
import json
import yaml
from pathlib import Path
from colorama import Fore, Back, Style, init
import os
init(autoreset=True)
NETWORK=os.getenv("NETWORK", "polkadot")
def get_derivative_account(root_account, index):
seed_bytes = b'modlpy/utilisuba'
root_account_bytes = bytes.fromhex(Keypair(root_account).public_key[2:])
index_bytes = int(index).to_bytes(2, 'little')
entropy = blake2b(seed_bytes + root_account_bytes + index_bytes, digest_size=32).digest()
input_bytes = bytes([42]) + entropy
checksum = blake2b(b'SS58PRE' + input_bytes).digest()
return base58.b58encode(input_bytes + checksum[:2]).decode()
project.load(Path.home() / ".brownie" / "packages" / config["dependencies"][0])
if hasattr(project, 'OpenzeppelinContracts410Project'):
OpenzeppelinContractsProject = project.OpenzeppelinContracts410Project
else:
OpenzeppelinContractsProject = project.OpenzeppelinContractsProject
def load_deployments(network):
path = './deployments/' + network + '.json'
if Path(path).is_file():
with open(path) as file:
return json.load(file)
else:
return {}
def save_deployments(deployments, network):
path = './deployments/' + network + '.json'
with open(path, 'w+') as file:
json.dump(deployments, file)
def load_deployment_config(network):
with open('./deployment-config.yml') as file:
return yaml.safe_load(file)['networks'][network]
CONFIG = load_deployment_config(NETWORK)
DEPLOYMENTS = load_deployments(NETWORK)
# global configs
CONFS = 1
GAS_PRICE = "100 gwei"
GAS_LIMIT = 10*10**6
# utils
def ss58decode(address):
return Keypair(ss58_address=address).public_key
def get_opts(sender, gas_price=GAS_PRICE, gas_limit=GAS_LIMIT):
return {'from': sender, 'gas_price': gas_price, 'gas_limit': gas_limit}
def get_deployment(container):
info = container.get_verification_info()
name = info['contract_name']
if name in DEPLOYMENTS:
return DEPLOYMENTS[name]
else:
return None
def add_new_deploy(container, address):
info = container.get_verification_info()
name = info['contract_name']
DEPLOYMENTS[name] = address
save_deployments(DEPLOYMENTS, NETWORK)
def yes_or_no(question):
reply = input(question+' (y/n): ').lower().strip()
if reply[0] == 'y':
return True
if reply[0] == 'n':
return False
else:
return yes_or_no(Fore.RED + "Uhhhh... please enter y/n ")
def check_and_get_deployment(container):
deployment = get_deployment(container)
name = container.get_verification_info()["contract_name"]
if deployment:
if yes_or_no(Fore.RED + f'Found old deployment for {name} at {deployment}, use it?'):
return container.at(deployment)
else:
print(Fore.RED + f'REDEPLOYING {name} contract to new address')
return None
def deploy_with_proxy(container, proxy_admin, deployer, *args):
print("")
deployment = check_and_get_deployment(container)
if deployment:
return deployment
name = container.get_verification_info()["contract_name"]
print(Fore.GREEN + f'DEPLOYING {name} ...')
owner = proxy_admin.owner()
_implementation = container.deploy(get_opts(deployer))
encoded_inputs = _implementation.initialize.encode_input(*args)
_instance = OpenzeppelinContractsProject.TransparentUpgradeableProxy.deploy(
_implementation,
proxy_admin,
encoded_inputs,
get_opts(deployer)
)
OpenzeppelinContractsProject.TransparentUpgradeableProxy.remove(_instance)
add_new_deploy(container, _instance.address)
print(Fore.GREEN + f'Contract {name} deployed at {Fore.YELLOW}{_instance.address} {Fore.GREEN}under {Fore.RED} proxy')
return container.at(_instance.address)
def deploy(container, deployer, *args):
print("")
deployment = check_and_get_deployment(container)
if deployment:
return deployment
name = container.get_verification_info()["contract_name"]
print(Fore.GREEN + f'DEPLOYING {name} ...')
inst = None
if args:
inst = container.deploy(*args, get_opts(deployer))
else:
inst = container.deploy(get_opts(deployer))
add_new_deploy(container, inst.address)
print(Fore.GREEN + f'Contract {name} deployed at {Fore.YELLOW}{inst.address}')
return inst
# deploy functions
def deploy_proxy_admin(deployer):
return deploy(OpenzeppelinContractsProject.ProxyAdmin, deployer)
def deploy_auth_manager(deployer, proxy_admin, auth_super_admin):
return deploy_with_proxy(AuthManager, proxy_admin, deployer, auth_super_admin)
def deploy_oracle_clone(deployer):
return deploy(Oracle, deployer)
def deploy_oracle_master(deployer, proxy_admin, oracle_clone, oracle_quorum):
return deploy_with_proxy(OracleMaster, proxy_admin, deployer, oracle_clone, oracle_quorum)
def deploy_withdrawal(deployer, proxy_admin, cap, xcKSM):
return deploy_with_proxy(Withdrawal, proxy_admin, deployer, cap, xcKSM)
def deploy_ledger_clone(deployer):
return deploy(Ledger, deployer)
def deploy_wstksm(deployer, lido, vksm, token_decimals):
return deploy(WstKSM, deployer, lido, vksm, token_decimals)
def deploy_controller(deployer, proxy_admin, root_derivative_index, vksm, relay_encoder, xcm_transactor, x_token, hex1, hex2, as_derevative_hex):
return deploy_with_proxy(Controller, proxy_admin, deployer, root_derivative_index, vksm, relay_encoder, xcm_transactor, x_token, hex1, hex2, as_derevative_hex)
def deploy_lido(deployer, proxy_admin, auth_manager, vksm, controller, treasury, developers, oracle_master, withdrawal, deposit_cap, max_difference):
return deploy_with_proxy(Lido, proxy_admin, deployer, auth_manager, vksm, controller, developers, treasury, oracle_master, withdrawal, deposit_cap, max_difference)
def deploy_ledger_beacon(deployer, _ledger_clone, _lido):
return deploy(LedgerBeacon, deployer, _ledger_clone, _lido)
def deploy_leger_factory(deployer, _lido, _ledger_beacon):
return deploy(LedgerFactory, deployer, _lido, _ledger_beacon)
# deployment
def main():
#deployer = accounts.at(CONFIG['deployer'])
deployer = accounts.load(CONFIG['deployer'])
auth_super_admin = CONFIG['auth_sudo']
treasury = CONFIG['treasury']
developers = CONFIG['developers']
roles = CONFIG['roles']
oracles = CONFIG['oracles']
oracle_quorum = CONFIG['quorum']
vksm = CONFIG['precompiles']['vksm']
xcm_transactor = CONFIG['precompiles']['xcm_transactor']
relay_encoder = CONFIG['precompiles']['relay_encoder']
x_token = CONFIG['precompiles']['x_token']
era_sec = CONFIG['relay_spec']['era_duratation']
max_validators_per_ledger = CONFIG['relay_spec']['max_validators_per_ledger']
min_nominator_bond = CONFIG['relay_spec']['min_nominator_bond']
min_active_balance = CONFIG['relay_spec']['min_active_balance']
reverse_transfer_fee = CONFIG['relay_spec']['reverse_transfer_fee']
transfer_fee = CONFIG['relay_spec']['transfer_fee']
max_unlocking_chunks = CONFIG['relay_spec']['max_unlocking_chunks']
withdrawal_cap = CONFIG['withdrawal_cap']
deposit_cap = CONFIG['deposit_cap']
token_name = CONFIG['token_name']
token_symbol = CONFIG['token_symbol']
token_decimals = CONFIG['token_decimals']
hex1 = CONFIG['hex1']
hex2 = CONFIG['hex2']
as_derevative_hex = CONFIG['as_derevative_hex']
max_difference = CONFIG['oracle_limit']
root_derivative_index = CONFIG['root_derivative_index']
root_derivative_account = ss58decode(get_derivative_account(CONFIG['sovereign_account'], root_derivative_index))
print(f'{Fore.GREEN}Root derivative account: {root_derivative_account}')
stash_idxs = CONFIG['stash_indexes']
stashes = [ss58decode(get_derivative_account(root_derivative_account, idx)) for idx in stash_idxs]
print(f'{Fore.GREEN}Stash accounts: {stashes}')
xcm_max_weight = CONFIG['xcm_max_weight']
xcm_weights = CONFIG['xcm_weights']
proxy_admin = deploy_proxy_admin(deployer)
controller = deploy_controller(deployer, proxy_admin, root_derivative_index, vksm, relay_encoder, xcm_transactor, x_token, hex1, hex2, as_derevative_hex)
auth_manager = deploy_auth_manager(deployer, proxy_admin, auth_super_admin)
for role in roles:
print(f"{Fore.GREEN}Setting role: {role}")
if auth_manager.has(web3.solidityKeccak(["string"], [role]), roles[role]):
print(f"{Fore.YELLOW}Role {role} already setted, skipping..")
else:
auth_manager.addByString(role, roles[role], get_opts(deployer))
oracle_clone = deploy_oracle_clone(deployer)
oracle_master = deploy_oracle_master(deployer, proxy_admin, oracle_clone, oracle_quorum)
withdrawal = deploy_withdrawal(deployer, proxy_admin, withdrawal_cap, vksm)
lido = deploy_lido(deployer, proxy_admin, auth_manager, vksm, controller, treasury, developers, oracle_master, withdrawal, deposit_cap, max_difference)
lido.setTokenInfo(token_name, token_symbol, token_decimals, get_opts(CONFIG['deployer']))
print(f"\n{Fore.GREEN}Configuring controller...")
controller.setLido(lido, get_opts(deployer))
controller.setMaxWeight(xcm_max_weight, get_opts(roles['ROLE_CONTROLLER_MANAGER']))
controller.setWeights([w | (1<<65) for w in xcm_weights], get_opts(roles['ROLE_CONTROLLER_MANAGER']))
controller.setReverseTransferFee(reverse_transfer_fee, get_opts(roles['ROLE_CONTROLLER_MANAGER']))
controller.setTransferFee(transfer_fee, get_opts(roles['ROLE_CONTROLLER_MANAGER']))
ledger_clone = deploy_ledger_clone(deployer)
ledger_beacon = deploy_ledger_beacon(deployer, ledger_clone, lido)
ledger_factory = deploy_leger_factory(deployer, lido, ledger_beacon)
print(f'\n{Fore.GREEN}Lido configuration...')
lido.setLedgerBeacon(ledger_beacon, get_opts(roles['ROLE_BEACON_MANAGER']))
lido.setLedgerFactory(ledger_factory, get_opts(roles['ROLE_BEACON_MANAGER']))
lido.setRelaySpec((max_validators_per_ledger, min_nominator_bond, min_active_balance, max_unlocking_chunks), get_opts(roles['ROLE_SPEC_MANAGER']))
oracle_master.setAnchorEra(0, 1, era_sec, get_opts(roles['ROLE_SPEC_MANAGER']))
print(f'\n{Fore.GREEN}Adding oracle members...')
for oracle in oracles:
print(f"{Fore.YELLOW}Adding oracle member: {oracle}")
oracle_master.addOracleMember(oracle, get_opts(roles['ROLE_ORACLE_MEMBERS_MANAGER']))
ledgers = []
print(f'\n{Fore.GREEN}Adding ledgers...')
for i in range(len(stashes)):
s_bytes = ss58decode(stashes[i])
print(f"{Fore.GREEN}Added ledger, idx: {stash_idxs[i]} stash: {stashes[i]}")
lido.addLedger(s_bytes, s_bytes, stash_idxs[i], get_opts(roles['ROLE_LEDGER_MANAGER']))
ledgers.append(lido.findLedger(s_bytes))
wStKSM = deploy_wstksm(deployer, lido, vksm, token_decimals)
for ledger in ledgers:
print("Refreshing allowances for ledger:", ledger)
Ledger.at(ledger).refreshAllowances(get_opts(roles['ROLE_LEDGER_MANAGER']))
print(f'\n{Fore.GREEN}Sending vKSM to Controller...')
vksm_contract = vKSM_mock.at(vksm)
vksm_contract.transfer(controller, CONFIG['controller_initial_balance'], get_opts(deployer))
def prompt():
pass
|
StarcoderdataPython
|
107367
|
<gh_stars>0
#
# Solution to Project Euler Problem 1
# by <NAME>
#
# Answer: 233168
#
NUM = 1000
# Compute sum of the naturals that are a multiple of 3 or 5 and less than [NUM]
def compute():
return sum(i for i in range(1, NUM) if i % 3 == 0 or i % 5 == 0)
if __name__ == "__main__":
print(compute())
|
StarcoderdataPython
|
3238849
|
<reponame>Alirezaja1384/MajazAmooz
"""
Django settings for core project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from collections import OrderedDict
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config("DEBUG", default=False, cast=bool)
ALLOWED_HOSTS = (
config("ALLOWED_HOSTS", default="127.0.0.1, localhost")
.replace(" ", "")
.split(",")
)
INTERNAL_IPS = (
config("INTERNAL_IPS", default="127.0.0.1, localhost")
.replace(" ", "")
.split(",")
)
# Application definition
INSTALLED_APPS = [
# Django apps
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# Third-party apps
"django_bleach",
"debug_toolbar",
"django_filters",
"django_tables2",
"crispy_forms",
"tinymce",
"django_resized",
"constance",
# Project apps
"authentication.apps.AuthenticationConfig",
"learning.apps.LearningConfig",
"ajax.apps.AjaxConfig",
"user.apps.UserConfig",
"shared.apps.SharedConfig",
]
MIDDLEWARE = [
# Include the Debug Toolbar middleware as early as possible
# in the list. However, it must come after any other middleware
# that encodes the response’s content, such as GZipMiddleware.
"debug_toolbar.middleware.DebugToolbarMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
# Custom middlewares
"shared.middleware.LoginRequiredMiddleware",
"shared.middleware.TimezoneMiddleware",
]
ROOT_URLCONF = "core.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [BASE_DIR / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "core.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": config(
"DEFAULT_DATABASE_ENGINE",
default="django.db.backends.postgresql_psycopg2",
),
"NAME": config("DEFAULT_DATABASE_NAME", default="MajazAmooz"),
"USER": config("DEFAULT_DATABASE_USER", default="postgres"),
"PASSWORD": config("DEFAULT_DATABASE_PASSWORD", default="postgres"),
"HOST": config("DEFAULT_DATABASE_HOST", default="127.0.0.1"),
"PORT": config("DEFAULT_DATABASE_PORT", default=5432),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation"
".UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation"
".MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation"
".CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation"
".NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "fa"
TIME_ZONE = "UTC"
# Used in TimezoneMiddleware
DEFAULT_USER_TZ = "Asia/Tehran"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
# Other configurations in production.py/development.py
STATIC_URL = "/static/"
MEDIA_URL = "/media/"
# Custom uSer model
AUTH_USER_MODEL = "authentication.User"
# Default auto-created primary keys type
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# Email settings
EMAIL_BACKEND = config(
"EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
DEFAULT_FROM_EMAIL = config("DEFAULT_FROM_EMAIL", default=None)
EMAIL_HOST = config("EMAIL_HOST")
EMAIL_PORT = config("EMAIL_PORT")
EMAIL_HOST_USER = config("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = config("EMAIL_HOST_PASSWORD")
EMAIL_USE_TLS = config("EMAIL_USE_TLS", cast=bool)
# django-bleach settings
# https://django-bleach.readthedocs.io/en/latest/settings.html
BLEACH_ALLOWED_TAGS = [
"p",
"b",
"i",
"u",
"em",
"strong",
"a",
"img",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"span",
"sup",
"sub",
"code",
"table",
"tbody",
"tr",
"th",
"td",
]
BLEACH_ALLOWED_ATTRIBUTES = ["href", "title", "style", "src"]
BLEACH_ALLOWED_STYLES = [
"font-family",
"font-weight",
"font-size",
"font-variant",
"text-decoration",
"color",
"background-color",
"direction",
"text-align",
]
BLEACH_ALLOWED_PROTOCOLS = ["http", "https"]
# Strip unknown tags if True, replace with HTML escaped characters if False
BLEACH_STRIP_TAGS = True
# Strip HTML comments, or leave them in.
BLEACH_STRIP_COMMENTS = False
# Deafult form widget
BLEACH_DEFAULT_WIDGET = "tinymce.widgets.TinyMCE"
# Login url
# https://docs.djangoproject.com/en/3.2/ref/settings/#login-url
LOGIN_URL = "/auth/login"
# Logout required url for LogoutRequiredMixin
LOGOUT_REQUIRED_URL = "/auth/logout_required"
# Login required by url pattern
# Used for LoginRequiredMiddleware
LOGIN_REQUIRED_URLS = [r"^/user/(.*)$"]
# Logging configuration
# https://docs.djangoproject.com/en/3.2/topics/logging/
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {"format": "%(levelname)s %(message)s"},
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
},
"json": {
"()": "pythonjsonlogger.jsonlogger.JsonFormatter",
"format": "%(levelname)s %(asctime)s %(module)s "
"%(pathname)s %(process)d %(thread)d %(message)s",
},
},
"handlers": {
"console": {
"formatter": "simple",
"class": "logging.StreamHandler",
},
"verbose_console": {
"formatter": "verbose",
"class": "logging.StreamHandler",
},
"file_json": {
"level": "WARNING",
"formatter": "json",
"class": "logging.FileHandler",
"filename": config("LOGGING_FILE_NAME", "logs.log"),
},
},
"loggers": {
"": {
"handlers": ["console", "file_json"],
"level": "WARNING",
"propagate": False,
},
"django": {
"handlers": ["console", "file_json"],
"level": "INFO",
"propagate": False,
},
"emails": {
"handlers": ["verbose_console", "file_json"],
"level": "WARNING",
"propagate": False,
},
"database": {
"handlers": ["verbose_console", "file_json"],
"level": "ERROR",
"propagate": False,
},
},
}
# django-crispy-forms settings
# https://django-crispy-forms.readthedocs.io/en/latest/
CRISPY_TEMPLATE_PACK = "bootstrap4"
# django-tinymce settings
# https://django-tinymce.readthedocs.io/en/latest/
TINYMCE_JS_ROOT = "js/tinymce/"
TINYMCE_JS_URL = os.path.join(STATIC_URL, "js/tinymce/tinymce.min.js")
TINYMCE_DEFAULT_CONFIG = {
"theme": "silver",
"height": 500,
"menubar": False,
"plugins": "link print preview searchreplace code"
"fullscreen table wordcount",
"toolbar": "undo redo | forecolor | bold italic | alignleft"
"aligncenter alignright alignjustify | bold italic underline"
"strikethrough | fontselect fontsizeselect formatselect | "
"table code searchreplace | preview fullscreen",
}
# django_tables2 settings
# https://django-tables2.readthedocs.io/en/latest/
DJANGO_TABLES2_TEMPLATE = "django_tables2/bootstrap4.html"
# django-resized settings
# https://github.com/un1t/django-resized
DJANGORESIZED_DEFAULT_SIZE = [960, 540]
DJANGORESIZED_DEFAULT_QUALITY = 72
DJANGORESIZED_DEFAULT_KEEP_META = True
DJANGORESIZED_DEFAULT_FORCE_FORMAT = "JPEG"
DJANGORESIZED_DEFAULT_FORMAT_EXTENSIONS = {"JPEG": ".jpg"}
DJANGORESIZED_DEFAULT_NORMALIZE_ROTATION = True
# django-constance settings
# https://django-constance.readthedocs.io/en/latest/
CONSTANCE_BACKEND = "constance.backends.redisd.RedisBackend"
CONSTANCE_REDIS_PREFIX = "constance:majazamooz:"
CONSTANCE_REDIS_CONNECTION = config("CONSTANCE_REDIS_CONNECTION")
CONSTANCE_CONFIG = OrderedDict(
[
# Website settings
(
"WEBSITE_KEYWORDS",
(
"آموزش,درسنامه,آزمون,آموزش مجازی,آزمون آنلاین",
"وبسایت - کلمات کلیدی",
str,
),
),
(
"WEBSITE_DESCRIPTION",
(
(
"با هم مجازی می آموزیم! مجاز آموز، جامعه ای از دانش آموزان"
" سراسر ایران است که با استفاده از فضای آنلاین در هر مکان"
" و زمان به آموزش دسترسی دارند و به یکدیگر آموزش می دهند."
),
"وبسایت - توضیحات",
str,
),
),
# Learning
(
"LEARNING_HOME_CAROUSEL_ITEMS_COUNT",
(6, "صفحه اصلی - تعداد گزینه اسلایدرها", int),
),
(
"LEARNING_RECOMMENDATION_ITEMS_COUNT",
(5, "آموزش - تعداد موارد پیشنهادها", int),
),
(
"LEARNING_TUTORIAL_ARCHIVE_PAGINATE_BY",
(30, "آرشیو آموزش ها - تعداد آموزش هر صفحه", int),
),
# User panel
(
"USER_PANEL_PAGINATE_BY",
(15, "پنل کاربری - تعداد آیتم هر صفحه", int),
),
(
"USER_PANEL_STATISTICS_LAST_MONTH_COUNT",
(5, "پنل کاربری - تعداد ماه های اخیر آمار", int),
),
# Tutorial relations
(
"TUTORIAL_LIKE_SCORE",
(5, "آموزش - امتباز لایک", int),
),
(
"TUTORIAL_LIKE_COIN",
(5, "آموزش - سکه لایک", int),
),
(
"TUTORIAL_VIEW_SCORE",
(1, "آموزش - امتباز بازدید کاربران", int),
),
(
"TUTORIAL_VIEW_COIN",
(1, "آموزش - سکه بازدید کاربران", int),
),
(
"TUTORIAL_UPVOTE_SCORE",
(0, "آموزش - امتباز رای موافق", int),
),
(
"TUTORIAL_UPVOTE_COIN",
(0, "آموزش - سکه رای موافق", int),
),
(
"TUTORIAL_DOWNVOTE_SCORE",
(0, "آموزش - امتباز رای مخالف", int),
),
(
"TUTORIAL_DOWNVOTE_COIN",
(0, "آموزش - سکه رای مخالف", int),
),
# Tutorial comment relations
(
"TUTORIAL_COMMENT_LIKE_SCORE",
(3, "دیدگاه آموزش - امتباز لایک", int),
),
(
"TUTORIAL_COMMENT_LIKE_COIN",
(3, "دیدگاه آموزش - سکه لایک", int),
),
(
"TUTORIAL_COMMENT_UPVOTE_SCORE",
(0, "دیدگاه آموزش - امتباز رای موافق", int),
),
(
"TUTORIAL_COMMENT_UPVOTE_COIN",
(0, "دیدگاه آموزش - سکه رای موافق", int),
),
(
"TUTORIAL_COMMENT_DOWNVOTE_SCORE",
(0, "دیدگاه آموزش - امتباز رای مخالف", int),
),
(
"TUTORIAL_COMMENT_DOWNVOTE_COIN",
(0, "دیدگاه آموزش - سکه رای مخالف", int),
),
]
)
|
StarcoderdataPython
|
1664999
|
<filename>ojos_ca/interface/view/django/core.py
import sys
from logging import basicConfig, getLogger, INFO
from typing import Any, List
from django.conf import settings
from django.http.request import HttpRequest
from django.http.response import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from django.utils.decorators import method_decorator
from ojos_ca.di.view import BaseViewFactory
from ojos_ca.usecase.interactor import BaseInteractor
from ojos_ca.usecase.interactor.exception import MethodNotAllowedException
from ojos_ca.usecase.serializer.django import HttpResponseSerializer
from .. import BaseView as _BaseView
basicConfig()
logger = getLogger(__name__)
logger.setLevel(INFO)
class BaseView(_BaseView):
def get(self, request: HttpRequest, *args, **kwargs) -> Any:
if self.get_interactor is None:
raise MethodNotAllowedException(name='Method', value='GET')
return self.get_interactor.exec(request, *args, **kwargs)
def post(self, request: HttpRequest, *args, **kwargs) -> Any:
if self.post_interactor is None:
raise MethodNotAllowedException(name='Method', value='POST')
return self.post_interactor.exec(request, *args, **kwargs)
def put(self, request: HttpRequest, *args, **kwargs) -> Any:
if self.put_interactor is None:
raise MethodNotAllowedException(name='Method', value='PUT')
return self.put_interactor.exec(request, *args, **kwargs)
def delete(self, request: HttpRequest, *args, **kwargs) -> Any:
if self.delete_interactor is None:
raise MethodNotAllowedException(name='Method', value='DELETE')
return self.delete_interactor.exec(request, *args, **kwargs)
def patch(self, request: HttpRequest, *args, **kwargs) -> Any:
if self.patch_interactor is None:
raise MethodNotAllowedException(name='Method', value='PATCH')
return self.patch_interactor.exec(request, *args, **kwargs)
def options(self, request: HttpRequest, *args, **kwargs) -> Any:
if self.options_interactor is None:
raise MethodNotAllowedException(name='Method', value='OPTIONS')
return self.options_interactor.exec(request, *args, **kwargs)
class ViewWrapper(View):
_view_factory = None
_serializer = None
_allow_method = ['get', 'post', 'put', 'delete', 'patch', 'options']
@property
def view_factory(self) -> BaseViewFactory:
return self._view_factory
@view_factory.setter
def view_factory(self, view_factory: BaseViewFactory):
self._view_factory = view_factory
@property
def serializer(self) -> HttpResponseSerializer:
return self._serializer
@serializer.setter
def serializer(self, serializer: HttpResponseSerializer):
self._serializer = serializer
@property
def allow_method(self) -> List[str]:
return self._allow_method
@allow_method.setter
def allow_method(self, allow_method: List[str]):
self._allow_method = allow_method
def http_method_not_allowed(self, request, *args, **kwargs):
raise MethodNotAllowedException(name='method', value=request.method)
def dispatch(self, request, *args, **kwargs) -> HttpResponse:
method = request.method.lower()
if method in self.http_method_names and method in self.allow_method:
handler = getattr(self.view_factory.create(), method, self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
# kwargs = handler(request, *args, **kwargs)
# return self.serializer.to_response(**kwargs)
try:
kwargs = handler(request, *args, **kwargs)
return self.serializer.to_response(**kwargs)
except Exception as e:
logger.error(e)
return self.serializer.error_to_response(e)
@method_decorator(csrf_exempt, name='dispatch')
class ApiViewWrapper(ViewWrapper):
ACCESS_CONTROL_ALLOW_ORIGIN = getattr(settings, 'ACCESS_CONTROL_ALLOW_ORIGIN', '*')
ACCESS_CONTROL_ALLOW_METHODS = getattr(settings, 'ACCESS_CONTROL_ALLOW_METHODS', 'GET,POST,PUT,DELETE,PATCH,OPTIONS')
ACCESS_CONTROL_ALLOW_HEADERS = getattr(settings, 'ACCESS_CONTROL_ALLOW_HEADERS', 'Origin,Authorization,Accept,Content-Type')
ACCESS_CONTROL_MAX_AGE = getattr(settings, 'ACCESS_CONTROL_MAX_AGE', 3600)
ACCESS_CONTROL_ALLOW_CREDENTIALS = True
def dispatch(self, request, *args, **kwargs) -> HttpResponse:
method = request.method.lower()
if method == 'options':
response = self.serializer.to_response(status_code=204)
else:
response = super(ApiViewWrapper, self).dispatch(request, *args, **kwargs)
response['Access-Control-Allow-Origin'] = self.ACCESS_CONTROL_ALLOW_ORIGIN
if self.ACCESS_CONTROL_ALLOW_ORIGIN != '*' and \
self.ACCESS_CONTROL_ALLOW_CREDENTIALS:
response['Access-Control-Allow-Credentials'] = True
if method == 'options':
response['Access-Control-Allow-Methods'] = self.ACCESS_CONTROL_ALLOW_METHODS
response['Access-Control-Allow-Hegetattraders'] = self.ACCESS_CONTROL_ALLOW_HEADERS
response['Access-Control-Max-Age'] = self.ACCESS_CONTROL_MAX_AGE
return response
|
StarcoderdataPython
|
1766750
|
#!/usr/bin/env python
#
# This code calculates LQR gains for a drone system whose reference signal is obtained
# by differential flatness. The input to the system is the body frame thrust and body
# frame angular velocities, and output is the world frame position, velocity and
# angular position given by euler angles.
#
# For reference please see:
# Differential Flatness Based Control of a Rotorcraft For Aggressive Maneuvers BYU ScholarsArchive Citation Differential Flatness Based Control of a Rotorcraft For
# Aggressive Maneuvers, (September), 2688-2693. Retrieved from https://scholarsarchive.byu.edu/facpub%0Ahttps://scholarsarchive.byu.edu/facpub/1949
#
# For a reference of the differential flatness proof for the drone dynamics refer
# to: <NAME>., <NAME>. (2011). Minimum snap trajectory generation and control for quadrotors. Proceedings - IEEE International Conference on Robotics and
# Automation, 2520-2525. https://doi.org/10.1109/ICRA.2011.5980409
#
# And this paper for important corrections on the demostration done in the previous paper:
# <NAME>., <NAME>., & <NAME>. (2017). Differential Flatness of Quadrotor Dynamics Subject to Rotor Drag for Accurate Tracking of High-Speed Trajectories.
# https://doi.org/10.1109/LRA.2017.2776353
#
#
# The system is x2_dot = A*x_2 + B*u_2
#
# x2 = [x,y,z,v_x,v_y,v_z,phi,theta,psi].T
#
# u_2 =[u_2a, u_2b].T
#
# A = [[0_3x3, I_3x3, 0_3x3],
# [0_3x3, 0_3x3, 0_3x3],
# [0_3x3, 0_3x3, 0_3x3]]
#
# B = [[0_3x3, 0_3x3],
# [I_3x3, 0_3x3],
# [0_3x3, I_3x3]]
#
# The dynamics of this system can be divided as follows, which eases computation and allows
# use of the python control library.
# __
# [x_dot ] = [0 1][ x ] + [0][u_2a_x] |
# [v_x_dot] [0 0][v_x] [1] |
# |
# [y_dot ] = [0 1][ y ] + [0][u_2a_y] |
# [v_y_dot] [0 0][v_y] [1] |---- translation dynamics
# |
# [z_dot ] = [0 1][ z ] + [0][u_2a_z] |
# [v_z_dot] [0 0][v_z] [1] |
# __|
#
# __
# [phi_dot ] = [0][phi] + [0][u_2b_x] |
# |
# [theta_dot ] = [0][theta] + [0][u_2b_y] |---- rotational dynamics
# |
# [psi_dot ] = [0][psi] + [0][u_2b_z] |
# __|
#
# System division that we use to compute constants in a simpler manner.
import rl_quad.conventional_control as ctl
print(ctl)
import numpy as np
# In general u = Nu*r - K(x -Nx*r) = -K*x + (Nu + K*Nx)*r
# where K are the LQR gains, Nu and Nx are reference input matrices.
# See more at p.493 "Feedback Control of Dynamic Systems, <NAME>,
# Powell, <NAME>, Abbas"
# This method calculates Nu and Nx matrices for given A,B,C,D matrices.
def getInputMatrices(A_,B_,C_,D_):
aug1 = np.concatenate((A_,B_), axis = 1)
aug2 = np.concatenate((C_,D_), axis = 1)
# create [[A,B],[C,D]] matrix
inp = np.concatenate((aug1,aug2), axis = 0)
#print(inp)
# create multiplying matrix
mult = np.zeros((inp.shape[0],1))
mult[mult.shape[0]-1] = 1.0
# compute inv(inp)*mult
inp = np.linalg.inv(inp)*mult
# nx is all values but last, nu is simply last value
n_x = inp[0:(inp.shape[0]-1)]
n_u = inp[-1]
return n_u, n_x
# Q should be nxn diagonal np.array, where n is dim(A). Diagonal elements should be
# the output performance weights
# R should be mxm np.array where m is dim(B). The diagonal values should be input
# effort weights.
#
# Example:
# output performance matrix for translation variables
# Qt = np.diag([100.0,1.0])
# input effort matrix for translation variables
# Rt = np.array([1.0])
def calculate_LQR_gains(A,B,C,D,Q,R):
# calculate LQR gains
(K, X, E) = ctl.lqr(A, B, Q, R)
# calculate Nu, Nx matrices for including reference input
Nu, Nx = getInputMatrices(A,B,C,D)
return K, Nu, Nx
# Calculate the gain matrix K required for the poles of a system
# x_dot = Ax + Bu, with u = -Kx
# to be equal to the pole array passed in the argument dp.
# dp is an np.array 1xm where m is dim(A)
#
# For the system to be stable, all elements in dp should be < 0
def calculate_pp_gains(A,B,C,D,dp):
# pole placement via ackermann's formula
K = ctl.acker(A,B,dp)
# calculate Nu, Nx matrices for including reference input
Nu, Nx = getInputMatrices(A,B,C,D)
return K, Nu, Nx
# define system matrix for translation variables
# (It is the result of linearization with computer-torque method)
At = np.matrix(
[[0.0,1.0],
[0.0,0.0]])
# input matrices
Bt = np.matrix(
[[0.0],
[1.0]])
# output matrices
Ct = np.matrix([1.0,0.0]) #np.matrix()
# system matrix for rotational variables
Ar = np.matrix([0.0])
# input matrix
Br = np.matrix([1.0])
# output matrix
Cr = np.matrix([1.0])
# transmission matrix for all systems
D_ = np.matrix([0.0])
# PID Gains for 2nd order system with transfer function
# H(s) = 1 / s**2
Kpx2 = 3.0
Kix2 = 0.5
Kdx2 = 3.0
Kpy2 = 3.0
Kiy2 = 0.5
Kdy2 = 3.0
Kpz2 = 3.0
Kiz2 = 1.0
Kdz2 = 10.0
# PID Gains for 1st order system with transfer function
# H(s) = 1 / s
Kp1 = 3
Ki1 = 0.5
Kd1 = 4
# Helper code for visualizing the performance of the gains computed
# by both the LQR and pole placement control methods
if __name__ == '__main__':
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(20,10))
fig.suptitle(" LQR, PP and PID controller performance")
ax0 = fig.add_subplot(2,1,1)#, projection='3d')
ax1 = fig.add_subplot(2,1,2)
#ax2 = fig.add_subplot(3,1,3)#, projection='3d')
#ax3 = fig.add_subplot(3,2,4)
#ax4 = fig.add_subplot(3,2,5)
#ax5 = fig.add_subplot(3,2,6)
# define simulation time
t_max = 10
dt = 0.01
t = np.arange(0.0,t_max,dt)
# output performance matrix for translation variables
Qt = np.diag([100.0,1.0])
# input effort matrix for translation variables
Rt = np.array([1.0])
# output performance matrix for rotational variables
Qr = np.diag([5.0])
# input effort matrix for rotational variables
Rr = np.array([1.0])
# Desired pole locations for pole placement method
# translation and rotatin dynamics poles
dpt = np.array([-3.0+10j,-3.0-10j])
dpr = np.array([-8.0])
Kt_lqr, N_ut_lqr, N_xt_lqr = calculate_LQR_gains(At,Bt,Ct,D_,Qt,Rt)
Kr_lqr, N_ur_lqr, N_xr_lqr = calculate_LQR_gains(Ar,Br,Cr,D_,Qr,Rr)
#print(type(Kr_lqr))
Kt_pp, N_ut_pp, N_xt_pp = calculate_pp_gains(At,Bt,Ct,D_,dpt)
Kr_pp, N_ur_pp, N_xr_pp = calculate_pp_gains(Ar,Br,Cr,D_,dpr)
print("Translation dynamics LQR K: {}, Nu: {}, Nx: {}".format(Kt_lqr,N_ut_lqr,N_xt_lqr))
print("Rotation dynamics LQR K: {}, Nu: {}, Nx: {}".format(Kr_lqr,N_ur_lqr,N_xr_lqr))
print("Translation dynamics PP K: {}, Nu: {}, Nx: {}".format(Kt_pp, N_ut_pp, N_xt_pp))
print("Rotation dynamics PP K: {}, Nu: {}, Nx: {}".format(Kr_pp, N_ur_pp, N_xr_pp))
# define step input gain: u(t) = R*u(t)
# for each control variable
Rx = 10.0
refx = Rx*np.ones_like(t)
Rw = 11.0
refw = Rw*np.ones_like(t)
# Define 2nd order system transfer function
num2 = np.array([1.0])
den2 = np.array([1.0,0.0,0.0])
plant2 = ctl.tf(num2, den2)
# Define PID controller for 2nd order system
num_pid_2 = np.array([Kdz2, Kpz2, Kiz2])
den_pid_2 = np.array([1.0, 0.0])
pid2 = ctl.tf(num_pid_2, den_pid_2)
# Define 1st order system transfer function
num1 = np.array([1.0])
den1 = np.array([1.0,0.0])
plant1 = ctl.tf(num1,den1)
# Define PID Controller for 1st order system
num_pid_1 = np.array([Kd1, Kp1]) # np.array([Kd1, Kp1, Ki1])
den_pid_1 = np.array([1.0]) # np.array([1.0, 0.0])
pid1 = ctl.tf(num_pid_1, den_pid_1)
# closed loop dynamics system with LQR
x_cl_lqr = ctl.ss(At-Bt*Kt_lqr, Bt*(N_ut_lqr + Kt_lqr*N_xt_lqr)*1.0 , Ct, D_)
w_cl_lqr = ctl.ss(Ar-Br*Kr_lqr, Br*(N_ur_lqr + Kr_lqr*N_xr_lqr)*1.0 , Cr, D_)
# closed loop dynamics system with PP
x_cl_pp = ctl.ss(At-Bt*Kt_pp, Bt*(N_ut_pp + Kt_pp*N_xt_pp)*1.0 , Ct, D_)
w_cl_pp = ctl.ss(Ar-Br*Kr_pp, Br*(N_ur_pp + Kr_pp*N_xr_pp)*1.0 , Cr, D_)
# closed loop dynamics system with PID
# interconnect plant and pid with feedback block with H(s)= 1
fb = ctl.tf([1],[1])
dummy = ctl.series(pid2, plant2)
pid_controlled_sys2 = ctl.feedback(dummy, fb, sign = -1)
fb2 = ctl.tf([1],[1])
dummy2 = ctl.series(pid1, plant1)
pid_controlled_sys1 = ctl.feedback(dummy2, fb2, sign = -1)
# define an input signal shape and draw response
tx, x_lqr, s = ctl.forced_response(x_cl_lqr, T=t, U=refx)
tx, w_lqr, s = ctl.forced_response(w_cl_lqr, T=t, U=refw)
tx, x_pp, s = ctl.forced_response(x_cl_pp, T=t, U=refx)
tx, w_pp, s = ctl.forced_response(w_cl_pp, T=t, U=refw)
tx, x_pid, s = ctl.forced_response(pid_controlled_sys2, T=t, U=refx)
tx, w_pid, s = ctl.forced_response(pid_controlled_sys1, T=t, U=refw)
#tx, x_ol, s = ctl.forced_response(plant2, T=t, U=refx)
ax0.plot(t, x_lqr,linestyle = '-',color ='r', label = "x_lqr")
ax0.plot(t, x_pp,linestyle = '-',color ='g', label = "x_pp")
ax0.plot(t, x_pid, linestyle = '-',color ='b', label = "x_pid")
ax0.plot(t, refx,linestyle = '--', color = "k", label = 'x ref')
ax0.set_title("Step response for translation dynamics", fontsize='small')
ax0.legend(loc='center right', shadow=True, fontsize='small')
ax0.set_xlabel("time {s}")
ax0.set_ylabel("x {m}")
print("PID Overshoot: {}".format(max(x_pid) - Rx))
print("")
ax1.plot(t, w_lqr, linestyle = '-',color ='r', label = "w_lqr")
ax1.plot(t, w_pp, linestyle = '-',color ='g', label = "w_pp")
ax1.plot(t, w_pid, linestyle = '-',color ='b', label = "w_pid")
ax1.plot(t, refw,linestyle = '--', color = "k", label = 'w ref')
ax1.set_title("Step response of rotational dynamics", fontsize='small')
ax1.legend(loc='center right', shadow=True, fontsize='small')
ax1.set_xlabel("time {s}")
ax1.set_ylabel("w {rad}")
"""
ax2.plot(t, x_ol,linestyle = '-',color ='r', label = "x_ol")
#ax1.plot(t, w_pp,linestyle = '-',color ='g', label = "w_pp")
ax2.plot(t, refx,linestyle = '--', color = "k", label = 'x ref')
ax2.set_title("Step response of open loop translation dynamics", fontsize='small')
ax2.legend(loc='center right', shadow=True, fontsize='small')
ax2.set_xlabel("time {s}")
ax2.set_ylabel("x {m}")
"""
plt.show()
|
StarcoderdataPython
|
1663223
|
'''cont = 1
while cont < 10:
print(cont)
cont += 1'''
n = 1
par = impar = 0
while n != 0:
n = int(input('Digite um valor: '))
if n != 0:
if n % 2 == 0:
par += 1
else:
impar += 1
print('Você digitou {} números pares e {} números ímpares!'.format(par, impar))
|
StarcoderdataPython
|
3205730
|
import numpy as np
import random
import math
from scipy import signal
class RoadGenerator():
def __init__(self):
self.L = 300 # Length of Road Profile (m)
self.B = 0.1 # Sampling Interval (m)
self.dn = 1/self.L # Frequency Band (1/m)
self.n0 = 0.1 # Spatial Frequency (cycles/m)
def createGeneralRoad(self, Vel_from, Vel_to, k1):
# Vel_from : Initial Velocity (kph)
# Vel_to : Final Velocity (kph)
# k1: from ISO 8608, 3 is very rough road
# L: Length of Road Profile (m)
# B: Sampling Interval (m)
# dn: Frequency Band (1/m)
# n0: Spatial Frequency (cycles/m)
L = self.L
B = self.B
dn= self.dn
n0= self.n0
# N: Number of data points
N = L/B
# Spatial Frequency Band (1/m)
n = np.arange(dn, N*dn+dn, dn)
# Abscissa Variable from 0 to L (x-Coordinate, m)
x = np.arange(0, L-B+B, B)
# Amplitude for Road Class
Amp1 = math.sqrt(dn) * math.pow(2, k1) * (1e-3) * (n0/n)
def gen():
# Random Phase Angle
phi1 = 2 * math.pi * np.random.rand(n.shape[0])
# Road hight (m)
z = np.zeros(x.shape[0])
for i in range(x.shape[0]):
z[i] = np.sum(Amp1 * np.cos(2 * math.pi * n * x[i] + phi1))
# Removing linear trend
z = signal.detrend(z)
# 처음과 마지막 구간 0으로 수렴하도록 윈도우 적용
z = z * signal.tukey(z.shape[0], 0.1)
return z
z_LH = gen()
z_RH = gen()
# Random velocity
v = np.full(z_LH.shape, (Vel_from + (Vel_to - Vel_from) * random.random()))
# kph to mps
v_mps = v / 3.6
# End Time
t_end = x / v_mps[0]
return x, z_LH, z_RH, v, t_end
def createBumpRoad(self, Vel_from, Vel_to, width=None, height=None):
if width is None:
width = 3000 + 600 * random.random()
if height is None:
height = 80 + 20 * random.random()
B = self.B
# milimeter to meter
width = width/1000
height= height/1000
temp = np.arange(-width/2, width/2+B, B)
# Road Height (m)
z = 1 - (np.power(temp, 2) / (np.power(width/2, 2)))
z = z.clip(min=0)
z = np.sqrt(z) * height
add_zeros_before = 10
# Road Distance (m)
x = np.arange(0, (z.shape[0] + add_zeros_before) * B, B)
# Add zero range
add_zero_after = 10
z = np.concatenate([
np.zeros((add_zero_after,)),
z,
np.zeros((add_zeros_before-add_zero_after,))
])
# Random velocity
v = np.full(z.shape, (Vel_from + (Vel_to - Vel_from) * random.random()))
# kph to mps
v_mps = v / 3.6
# End Time
t_end = x / v_mps[0]
# Same roads for left/right tires
return x, z, z, v, t_end
if __name__ == "__main__":
rg = RoadGenerator()
# rg.createGeneralRoad(30, 50, 1.5)
rg.createBumpRoad(30, 40, 3000, 100)
|
StarcoderdataPython
|
4834867
|
<filename>lib/__init__.py
#/usr/bin/env python3
"""Module Init"""
import lib.cfgparser, lib.utils
import lib.preprocess_era5inp, lib.air_parcel
|
StarcoderdataPython
|
4835485
|
<reponame>HireIQ/django-session-security
import time
import unittest
from django.test.client import Client
from django import test
from session_security.utils import set_last_activity, get_last_activity
from datetime import datetime, timedelta
from .test_base import SettingsMixin
class MiddlewareTestCase(SettingsMixin, test.TestCase):
fixtures = ['session_security_test_user']
def setUp(self):
super(MiddlewareTestCase, self).setUp()
self.client = Client()
def test_auto_logout(self):
self.client.login(username='test', password='<PASSWORD>')
response = self.client.get('/admin/')
self.assertTrue('_auth_user_id' in self.client.session)
time.sleep(self.max_expire_after)
response = self.client.get('/admin/')
self.assertFalse('_auth_user_id' in self.client.session)
def test_last_activity_in_future(self):
self.client.login(username='test', password='<PASSWORD>')
now = datetime.now()
future = now + timedelta(0, self.max_expire_after * 2)
set_last_activity(self.client.session, future)
response = self.client.get('/admin/')
self.assertTrue('_auth_user_id' in self.client.session)
def test_non_javascript_browse_no_logout(self):
self.client.login(username='test', password='<PASSWORD>')
response = self.client.get('/admin/')
time.sleep(self.max_warn_after)
response = self.client.get('/admin/')
self.assertTrue('_auth_user_id' in self.client.session)
time.sleep(self.min_warn_after)
response = self.client.get('/admin/')
self.assertTrue('_auth_user_id' in self.client.session)
def test_javascript_activity_no_logout(self):
self.client.login(username='test', password='<PASSWORD>')
response = self.client.get('/admin/')
time.sleep(self.max_warn_after)
self.client.get('/session_security/ping/?idleFor=1')
self.assertTrue('_auth_user_id' in self.client.session)
time.sleep(self.min_warn_after)
self.client.get('/admin/')
self.assertTrue('_auth_user_id' in self.client.session)
def test_url_names(self):
self.client.login(username='test', password='<PASSWORD>')
# Confirm activity is updating
response = self.client.get('/admin/')
activity1 = get_last_activity(self.client.session)
time.sleep(min(2, self.min_warn_after))
response = self.client.get('/admin/')
activity2 = get_last_activity(self.client.session)
self.assertTrue(activity2 > activity1)
# Confirm activity on ignored URL is NOT updated
time.sleep(min(2, self.min_warn_after))
response = self.client.get('/ignore/')
activity3 = get_last_activity(self.client.session)
self.assertEqual(activity2, activity3)
|
StarcoderdataPython
|
93517
|
"""
Provides anadroid version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update anadroid` to change this file.
from incremental import Version
__version__ = Version("anadroid", 0, 5, 26)
__all__ = ["__version__"]
|
StarcoderdataPython
|
1668477
|
"""Google Analytics Reporting API v4 Metrics."""
from easy_gar.base import Metric
class ReportingMetric(Metric):
"""Analytics Metric class."""
def __init__(self, expression, alias=None, formatting_type=None):
super().__init__(expression, alias, formatting_type)
def __call__(self):
"""Return dictionary to be used in API requests."""
obj = {"expression": self.expression}
if self.alias:
obj["alias"] = self.alias
if self.formatting_type:
obj["formattingType"] = self.formatting_type
return obj
def __add__(self, other):
"""Metric addition."""
m = ReportingMetric(expression=f"({self.expression})+{other.expression}")
m.alias = f"{self} + {other}"
return m
def __sub__(self, other):
"""Metric subtraction."""
m = ReportingMetric(expression=f"({self.expression})-{other.expression}")
m.alias = f"{self} - {other}"
return m
def __mul__(self, other):
"""Metric multiplication."""
m = ReportingMetric(expression=f"({self.expression})*{other.expression}")
m.alias = f"{self} * {other}"
return m
def __truediv__(self, other):
"""Metric division."""
m = ReportingMetric(
expression=f"({self.expression})/{other.expression}",
formatting_type="FLOAT",
)
m.alias = f"{self} / {other}"
return m
class Metrics:
"""Analytics Metrics for use with the API objects."""
# Users
@property
def users(self):
return ReportingMetric(
expression="ga:users", alias="Users", formatting_type="INTEGER"
)
@property
def new_users(self):
return ReportingMetric(
expression="ga:newUsers", alias="New Users", formatting_type="INTEGER"
)
@property
def users_1d(self):
return ReportingMetric(
expression="ga:1dayUsers",
alias="1 Day Active Users",
formatting_type="INTEGER",
)
@property
def users_7d(self):
return ReportingMetric(
expression="ga:7dayUsers",
alias="7 Day Active Users",
formatting_type="INTEGER",
)
@property
def users_14d(self):
return ReportingMetric(
expression="ga:14dayUsers",
alias="14 Day Active Users",
formatting_type="INTEGER",
)
@property
def users_28d(self):
return ReportingMetric(
expression="ga:28dayUsers",
alias="28 Day Active Users",
formatting_type="INTEGER",
)
@property
def users_30d(self):
return ReportingMetric(
expression="ga:30dayUsers",
alias="30 Day Active Users",
formatting_type="INTEGER",
)
@property
def sessions_per_user(self):
return ReportingMetric(
expression="ga:sessionsPerUser",
alias="Sessions per User",
formatting_type="FLOAT",
)
@property
def percent_new_sessions(self):
return ReportingMetric(
expression="ga:percentNewSessions",
alias="% New Sessions",
formatting_type="PERCENT",
)
# Sessions
@property
def sessions(self):
return ReportingMetric(
expression="ga:sessions", alias="Sessions", formatting_type="INTEGER"
)
@property
def bounces(self):
return ReportingMetric(
expression="ga:bounces", alias="Bounces", formatting_type="INTEGER"
)
@property
def bounce_rate(self):
return ReportingMetric(
expression="ga:bounceRate", alias="Bounce Rate", formatting_type="PERCENT"
)
@property
def session_duration(self):
return ReportingMetric(
expression="ga:sessionDuration",
alias="Session Duration",
formatting_type="TIME",
)
@property
def avg_session_duration(self):
return ReportingMetric(
expression="ga:avgSessionDuration",
alias="Avg. Session Duration",
formatting_type="TIME",
)
@property
def unique_dimensions_combination(self):
return ReportingMetric(
expression="ga:uniqueDimensionCombinations",
alias="Unique Dimension Combinations",
formatting_type="INTEGER",
)
@property
def hits(self):
return ReportingMetric(
expression="ga:hits", alias="Hits", formatting_type="INTEGER"
)
# Traffic Sources
@property
def organic_searches(self):
return ReportingMetric(
expression="ga:organicSearches",
alias="Organic Searches",
formatting_type="INTEGER",
)
# Adwords
@property
def impressions(self):
return ReportingMetric(
expression="ga:impressions", alias="Impressions", formatting_type="INTEGER"
)
@property
def ad_clicks(self):
return ReportingMetric(
expression="ga:adClicks", alias="Clicks", formatting_type="INTEGER"
)
@property
def ad_cost(self):
return ReportingMetric(
expression="ga:adCost", alias="Cost", formatting_type="CURRENCY"
)
@property
def cpm(self):
return ReportingMetric(
expression="ga:CPM", alias="CPM", formatting_type="CURRENCY"
)
@property
def cpc(self):
return ReportingMetric(
expression="ga:CPC", alias="CPC", formatting_type="CURRENCY"
)
@property
def ctr(self):
return ReportingMetric(
expression="ga:CTR", alias="CTR", formatting_type="PERCENT"
)
@property
def cost_per_transaction(self):
return ReportingMetric(
expression="ga:costPerTransaction",
alias="Cost per Transaction",
formatting_type="CURRENCY",
)
@property
def cost_per_conversion(self):
return ReportingMetric(
expression="ga:costPerConversion",
alias="Cost per Conversion",
formatting_type="CURRENCY",
)
@property
def rpc(self):
return ReportingMetric(
expression="ga:RPC", alias="RPC", formatting_type="CURRENCY"
)
@property
def roas(self):
return ReportingMetric(
expression="ga:ROAS", alias="ROAS", formatting_type="CURRENCY"
)
# Goal Conversions (ALL)
@property
def goal_stars_all(self):
return ReportingMetric(
expression="ga:goalStartsAll",
alias="Goal Starts",
formatting_type="INTEGER",
)
@property
def goal_completions_all(self):
return ReportingMetric(
expression="ga:goalCompletionsAll",
alias="Goal Completions",
formatting_type="INTEGER",
)
@property
def goal_value_all(self):
return ReportingMetric(
expression="ga:goalValueAll", alias="Goal Value", formatting_type="CURRENCY"
)
@property
def goal_value_per_session(self):
return ReportingMetric(
expression="ga:goalValuePerSession",
alias="Per Session Goal Value",
formatting_type="CURRENCY",
)
@property
def goal_conversion_rate_all(self):
return ReportingMetric(
expression="ga:goalConversionRateAll",
alias="Goal Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal_abandons_all(self):
return ReportingMetric(
expression="ga:goalAbandonsAll",
alias="Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal_abandon_rate_all(self):
return ReportingMetric(
expression="ga:goalAbandonRateAll",
alias="Total Abondonment Rate",
formatting_type="PERCENT",
)
# Goal Conversions (Itemized)
# Goal 01
@property
def goal01_starts(self):
return ReportingMetric(
expression="ga:goal1Starts",
alias="Goal 01 Starts",
formatting_type="INTEGER",
)
@property
def goal01_completions(self):
return ReportingMetric(
expression="ga:goal1Completions",
alias="Goal 01 Completions",
formatting_type="INTEGER",
)
@property
def goal01_value(self):
return ReportingMetric(
expression="ga:goal1Value",
alias="Goal 01 Value",
formatting_type="CURRENCY",
)
@property
def goal01_conversion_rate(self):
return ReportingMetric(
expression="ga:goal1ConversionRate",
alias="Goal 01 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal01_abandons(self):
return ReportingMetric(
expression="ga:goal1Abandons",
alias="Goal 01 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal01_abandon_rate(self):
return ReportingMetric(
expression="ga:goal1AbandonRate",
alias="Goal 01 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 02
@property
def goal02_starts(self):
return ReportingMetric(
expression="ga:goal2Starts",
alias="Goal 02 Starts",
formatting_type="INTEGER",
)
@property
def goal02_completions(self):
return ReportingMetric(
expression="ga:goal2Completions",
alias="Goal 02 Completions",
formatting_type="INTEGER",
)
@property
def goal02_value(self):
return ReportingMetric(
expression="ga:goal2Value",
alias="Goal 02 Value",
formatting_type="CURRENCY",
)
@property
def goal02_conversion_rate(self):
return ReportingMetric(
expression="ga:goal2ConversionRate",
alias="Goal 02 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal02_abandons(self):
return ReportingMetric(
expression="ga:goal2Abandons",
alias="Goal 02 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal02_abandon_rate(self):
return ReportingMetric(
expression="ga:goal2AbandonRate",
alias="Goal 02 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 03
@property
def goal03_starts(self):
return ReportingMetric(
expression="ga:goal3Starts",
alias="Goal 03 Starts",
formatting_type="INTEGER",
)
@property
def goal03_completions(self):
return ReportingMetric(
expression="ga:goal3Completions",
alias="Goal 03 Completions",
formatting_type="INTEGER",
)
@property
def goal03_value(self):
return ReportingMetric(
expression="ga:goal3Value",
alias="Goal 03 Value",
formatting_type="CURRENCY",
)
@property
def goal03_conversion_rate(self):
return ReportingMetric(
expression="ga:goal3ConversionRate",
alias="Goal 03 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal03_abandons(self):
return ReportingMetric(
expression="ga:goal3Abandons",
alias="Goal 03 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal03_abandon_rate(self):
return ReportingMetric(
expression="ga:goal3AbandonRate",
alias="Goal 03 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 04
@property
def goal04_starts(self):
return ReportingMetric(
expression="ga:goal4Starts",
alias="Goal 04 Starts",
formatting_type="INTEGER",
)
@property
def goal04_completions(self):
return ReportingMetric(
expression="ga:goal4Completions",
alias="Goal 04 Completions",
formatting_type="INTEGER",
)
@property
def goal04_value(self):
return ReportingMetric(
expression="ga:goal4Value",
alias="Goal 04 Value",
formatting_type="CURRENCY",
)
@property
def goal04_conversion_rate(self):
return ReportingMetric(
expression="ga:goal4ConversionRate",
alias="Goal 04 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal04_abandons(self):
return ReportingMetric(
expression="ga:goal4Abandons",
alias="Goal 04 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal04_abandon_rate(self):
return ReportingMetric(
expression="ga:goal4AbandonRate",
alias="Goal 04 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 05
@property
def goal05_starts(self):
return ReportingMetric(
expression="ga:goal5Starts",
alias="Goal 05 Starts",
formatting_type="INTEGER",
)
@property
def goal05_completions(self):
return ReportingMetric(
expression="ga:goal5Completions",
alias="Goal 05 Completions",
formatting_type="INTEGER",
)
@property
def goal05_value(self):
return ReportingMetric(
expression="ga:goal5Value",
alias="Goal 05 Value",
formatting_type="CURRENCY",
)
@property
def goal05_conversion_rate(self):
return ReportingMetric(
expression="ga:goal5ConversionRate",
alias="Goal 05 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal05_abandons(self):
return ReportingMetric(
expression="ga:goal5Abandons",
alias="Goal 05 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal05_abandon_rate(self):
return ReportingMetric(
expression="ga:goal5AbandonRate",
alias="Goal 05 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 06
@property
def goal06_starts(self):
return ReportingMetric(
expression="ga:goal6Starts",
alias="Goal 06 Starts",
formatting_type="INTEGER",
)
@property
def goal06_completions(self):
return ReportingMetric(
expression="ga:goal6Completions",
alias="Goal 06 Completions",
formatting_type="INTEGER",
)
@property
def goal06_value(self):
return ReportingMetric(
expression="ga:goal6Value",
alias="Goal 06 Value",
formatting_type="CURRENCY",
)
@property
def goal06_conversion_rate(self):
return ReportingMetric(
expression="ga:goal6ConversionRate",
alias="Goal 06 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal06_abandons(self):
return ReportingMetric(
expression="ga:goal6Abandons",
alias="Goal 06 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal06_abandon_rate(self):
return ReportingMetric(
expression="ga:goal6AbandonRate",
alias="Goal 06 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 07
@property
def goal07_starts(self):
return ReportingMetric(
expression="ga:goal7Starts",
alias="Goal 07 Starts",
formatting_type="INTEGER",
)
@property
def goal07_completions(self):
return ReportingMetric(
expression="ga:goal7Completions",
alias="Goal 07 Completions",
formatting_type="INTEGER",
)
@property
def goal07_value(self):
return ReportingMetric(
expression="ga:goal7Value",
alias="Goal 07 Value",
formatting_type="CURRENCY",
)
@property
def goal07_conversion_rate(self):
return ReportingMetric(
expression="ga:goal7ConversionRate",
alias="Goal 07 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal07_abandons(self):
return ReportingMetric(
expression="ga:goal7Abandons",
alias="Goal 07 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal07_abandon_rate(self):
return ReportingMetric(
expression="ga:goal7AbandonRate",
alias="Goal 07 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 08
@property
def goal08_starts(self):
return ReportingMetric(
expression="ga:goal8Starts",
alias="Goal 08 Starts",
formatting_type="INTEGER",
)
@property
def goal08_completions(self):
return ReportingMetric(
expression="ga:goal8Completions",
alias="Goal 08 Completions",
formatting_type="INTEGER",
)
@property
def goal08_value(self):
return ReportingMetric(
expression="ga:goal8Value",
alias="Goal 08 Value",
formatting_type="CURRENCY",
)
@property
def goal08_conversion_rate(self):
return ReportingMetric(
expression="ga:goal8ConversionRate",
alias="Goal 08 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal08_abandons(self):
return ReportingMetric(
expression="ga:goal8Abandons",
alias="Goal 08 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal08_abandon_rate(self):
return ReportingMetric(
expression="ga:goal8AbandonRate",
alias="Goal 08 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 09
@property
def goal09_starts(self):
return ReportingMetric(
expression="ga:goal9Starts",
alias="Goal 09 Starts",
formatting_type="INTEGER",
)
@property
def goal09_completions(self):
return ReportingMetric(
expression="ga:goal9Completions",
alias="Goal 09 Completions",
formatting_type="INTEGER",
)
@property
def goal09_value(self):
return ReportingMetric(
expression="ga:goal9Value",
alias="Goal 09 Value",
formatting_type="CURRENCY",
)
@property
def goal09_conversion_rate(self):
return ReportingMetric(
expression="ga:goal9ConversionRate",
alias="Goal 09 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal09_abandons(self):
return ReportingMetric(
expression="ga:goal9Abandons",
alias="Goal 09 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal09_abandon_rate(self):
return ReportingMetric(
expression="ga:goal9AbandonRate",
alias="Goal 09 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 10
@property
def goal10_starts(self):
return ReportingMetric(
expression="ga:goal10Starts",
alias="Goal 10 Starts",
formatting_type="INTEGER",
)
@property
def goal10_completions(self):
return ReportingMetric(
expression="ga:goal10Completions",
alias="Goal 10 Completions",
formatting_type="INTEGER",
)
@property
def goal10_value(self):
return ReportingMetric(
expression="ga:goal10Value",
alias="Goal 10 Value",
formatting_type="CURRENCY",
)
@property
def goal10_conversion_rate(self):
return ReportingMetric(
expression="ga:goal10ConversionRate",
alias="Goal 10 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal10_abandons(self):
return ReportingMetric(
expression="ga:goal10Abandons",
alias="Goal 10 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal10_abandon_rate(self):
return ReportingMetric(
expression="ga:goal10AbandonRate",
alias="Goal 10 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 11
@property
def goal11_starts(self):
return ReportingMetric(
expression="ga:goal11Starts",
alias="Goal 11 Starts",
formatting_type="INTEGER",
)
@property
def goal11_completions(self):
return ReportingMetric(
expression="ga:goal11Completions",
alias="Goal 11 Completions",
formatting_type="INTEGER",
)
@property
def goal11_value(self):
return ReportingMetric(
expression="ga:goal11Value",
alias="Goal 11 Value",
formatting_type="CURRENCY",
)
@property
def goal11_conversion_rate(self):
return ReportingMetric(
expression="ga:goal11ConversionRate",
alias="Goal 11 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal11_abandons(self):
return ReportingMetric(
expression="ga:goal11Abandons",
alias="Goal 11 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal11_abandon_rate(self):
return ReportingMetric(
expression="ga:goal11AbandonRate",
alias="Goal 11 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 12
@property
def goal12_starts(self):
return ReportingMetric(
expression="ga:goal12Starts",
alias="Goal 12 Starts",
formatting_type="INTEGER",
)
@property
def goal12_completions(self):
return ReportingMetric(
expression="ga:goal12Completions",
alias="Goal 12 Completions",
formatting_type="INTEGER",
)
@property
def goal12_value(self):
return ReportingMetric(
expression="ga:goal12Value",
alias="Goal 12 Value",
formatting_type="CURRENCY",
)
@property
def goal12_conversion_rate(self):
return ReportingMetric(
expression="ga:goal12ConversionRate",
alias="Goal 12 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal12_abandons(self):
return ReportingMetric(
expression="ga:goal12Abandons",
alias="Goal 12 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal12_abandon_rate(self):
return ReportingMetric(
expression="ga:goal12AbandonRate",
alias="Goal 12 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 13
@property
def goal13_starts(self):
return ReportingMetric(
expression="ga:goal13Starts",
alias="Goal 13 Starts",
formatting_type="INTEGER",
)
@property
def goal13_completions(self):
return ReportingMetric(
expression="ga:goal13Completions",
alias="Goal 13 Completions",
formatting_type="INTEGER",
)
@property
def goal13_value(self):
return ReportingMetric(
expression="ga:goal13Value",
alias="Goal 13 Value",
formatting_type="CURRENCY",
)
@property
def goal13_conversion_rate(self):
return ReportingMetric(
expression="ga:goal13ConversionRate",
alias="Goal 13 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal13_abandons(self):
return ReportingMetric(
expression="ga:goal13Abandons",
alias="Goal 13 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal13_abandon_rate(self):
return ReportingMetric(
expression="ga:goal13AbandonRate",
alias="Goal 13 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 14
@property
def goal14_starts(self):
return ReportingMetric(
expression="ga:goal14Starts",
alias="Goal 14 Starts",
formatting_type="INTEGER",
)
@property
def goal14_completions(self):
return ReportingMetric(
expression="ga:goal14Completions",
alias="Goal 14 Completions",
formatting_type="INTEGER",
)
@property
def goal14_value(self):
return ReportingMetric(
expression="ga:goal14Value",
alias="Goal 14 Value",
formatting_type="CURRENCY",
)
@property
def goal14_conversion_rate(self):
return ReportingMetric(
expression="ga:goal14ConversionRate",
alias="Goal 14 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal14_abandons(self):
return ReportingMetric(
expression="ga:goal14Abandons",
alias="Goal 14 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal14_abandon_rate(self):
return ReportingMetric(
expression="ga:goal14AbandonRate",
alias="Goal 14 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 15
@property
def goal15_starts(self):
return ReportingMetric(
expression="ga:goal15Starts",
alias="Goal 15 Starts",
formatting_type="INTEGER",
)
@property
def goal15_completions(self):
return ReportingMetric(
expression="ga:goal15Completions",
alias="Goal 15 Completions",
formatting_type="INTEGER",
)
@property
def goal15_value(self):
return ReportingMetric(
expression="ga:goal15Value",
alias="Goal 15 Value",
formatting_type="CURRENCY",
)
@property
def goal15_conversion_rate(self):
return ReportingMetric(
expression="ga:goal15ConversionRate",
alias="Goal 15 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal15_abandons(self):
return ReportingMetric(
expression="ga:goal15Abandons",
alias="Goal 15 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal15_abandon_rate(self):
return ReportingMetric(
expression="ga:goal15AbandonRate",
alias="Goal 15 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 16
@property
def goal16_starts(self):
return ReportingMetric(
expression="ga:goal16Starts",
alias="Goal 16 Starts",
formatting_type="INTEGER",
)
@property
def goal16_completions(self):
return ReportingMetric(
expression="ga:goal16Completions",
alias="Goal 16 Completions",
formatting_type="INTEGER",
)
@property
def goal16_value(self):
return ReportingMetric(
expression="ga:goal16Value",
alias="Goal 16 Value",
formatting_type="CURRENCY",
)
@property
def goal16_conversion_rate(self):
return ReportingMetric(
expression="ga:goal16ConversionRate",
alias="Goal 16 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal16_abandons(self):
return ReportingMetric(
expression="ga:goal16Abandons",
alias="Goal 16 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal16_abandon_rate(self):
return ReportingMetric(
expression="ga:goal16AbandonRate",
alias="Goal 16 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 17
@property
def goal17_starts(self):
return ReportingMetric(
expression="ga:goal17Starts",
alias="Goal 17 Starts",
formatting_type="INTEGER",
)
@property
def goal17_completions(self):
return ReportingMetric(
expression="ga:goal17Completions",
alias="Goal 17 Completions",
formatting_type="INTEGER",
)
@property
def goal17_value(self):
return ReportingMetric(
expression="ga:goal17Value",
alias="Goal 17 Value",
formatting_type="CURRENCY",
)
@property
def goal17_conversion_rate(self):
return ReportingMetric(
expression="ga:goal17ConversionRate",
alias="Goal 17 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal17_abandons(self):
return ReportingMetric(
expression="ga:goal17Abandons",
alias="Goal 17 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal17_abandon_rate(self):
return ReportingMetric(
expression="ga:goal17AbandonRate",
alias="Goal 17 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 18
@property
def goal18_starts(self):
return ReportingMetric(
expression="ga:goal18Starts",
alias="Goal 18 Starts",
formatting_type="INTEGER",
)
@property
def goal18_completions(self):
return ReportingMetric(
expression="ga:goal18Completions",
alias="Goal 18 Completions",
formatting_type="INTEGER",
)
@property
def goal18_value(self):
return ReportingMetric(
expression="ga:goal18Value",
alias="Goal 18 Value",
formatting_type="CURRENCY",
)
@property
def goal18_conversion_rate(self):
return ReportingMetric(
expression="ga:goal18ConversionRate",
alias="Goal 18 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal18_abandons(self):
return ReportingMetric(
expression="ga:goal18Abandons",
alias="Goal 18 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal18_abandon_rate(self):
return ReportingMetric(
expression="ga:goal18AbandonRate",
alias="Goal 18 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 19
@property
def goal19_starts(self):
return ReportingMetric(
expression="ga:goal19Starts",
alias="Goal 19 Starts",
formatting_type="INTEGER",
)
@property
def goal19_completions(self):
return ReportingMetric(
expression="ga:goal19Completions",
alias="Goal 19 Completions",
formatting_type="INTEGER",
)
@property
def goal19_value(self):
return ReportingMetric(
expression="ga:goal19Value",
alias="Goal 19 Value",
formatting_type="CURRENCY",
)
@property
def goal19_conversion_rate(self):
return ReportingMetric(
expression="ga:goal19ConversionRate",
alias="Goal 19 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal19_abandons(self):
return ReportingMetric(
expression="ga:goal19Abandons",
alias="Goal 19 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal19_abandon_rate(self):
return ReportingMetric(
expression="ga:goal19AbandonRate",
alias="Goal 19 Abandonment Rate",
formatting_type="PERCENT",
)
# Goal 20
@property
def goal20_starts(self):
return ReportingMetric(
expression="ga:goal20Starts",
alias="Goal 20 Starts",
formatting_type="INTEGER",
)
@property
def goal20_completions(self):
return ReportingMetric(
expression="ga:goal20Completions",
alias="Goal 20 Completions",
formatting_type="INTEGER",
)
@property
def goal20_value(self):
return ReportingMetric(
expression="ga:goal20Value",
alias="Goal 20 Value",
formatting_type="CURRENCY",
)
@property
def goal20_conversion_rate(self):
return ReportingMetric(
expression="ga:goal20ConversionRate",
alias="Goal 20 Conversion Rate",
formatting_type="PERCENT",
)
@property
def goal20_abandons(self):
return ReportingMetric(
expression="ga:goal20Abandons",
alias="Goal 20 Abandoned Funnels",
formatting_type="INTEGER",
)
@property
def goal20_abandon_rate(self):
return ReportingMetric(
expression="ga:goal20AbandonRate",
alias="Goal 20 Abandonment Rate",
formatting_type="PERCENT",
)
# Page Tracking
@property
def page_value(self):
return ReportingMetric(
expression="ga:pageValue", alias="Page Value", formatting_type="CURRENCY"
)
@property
def entrances(self):
return ReportingMetric(
expression="ga:entrances", alias="Entrances", formatting_type="INTEGER"
)
@property
def entrance_rate(self):
return ReportingMetric(
expression="ga:entranceRate",
alias="Entrances / Pageviews",
formatting_type="PERCENT",
)
@property
def pageviews(self):
return ReportingMetric(
expression="ga:pageviews", alias="Pageviews", formatting_type="INTEGER"
)
@property
def pageviews_per_session(self):
return ReportingMetric(
expression="ga:pageviewsPerSession",
alias="Pages / Session",
formatting_type="FLOAT",
)
@property
def unique_pageviews(self):
return ReportingMetric(
expression="ga:uniquePageviews",
alias="Unique Page Views",
formatting_type="INTEGER",
)
@property
def time_on_page(self):
return ReportingMetric(
expression="ga:timeOnPage", alias="Time on Page", formatting_type="TIME"
)
@property
def exits(self):
return ReportingMetric(
expression="ga:exits", alias="Exits", formatting_type="INTEGER"
)
@property
def avg_time_on_page(self):
return ReportingMetric(
expression="ga:avgTimeOnPage",
alias="Avg. Time on Page",
formatting_type="TIME",
)
@property
def exit_rate(self):
return ReportingMetric(
expression="ga:exitRate", alias="% Exit", formatting_type="PERCENT"
)
metrics = Metrics()
|
StarcoderdataPython
|
3247408
|
import numpy as np
import pytest
import nept
toy_array = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
def test_find_nearest_idx():
assert nept.find_nearest_idx(toy_array, 13) == 3
assert nept.find_nearest_idx(toy_array, 11.49) == 1
assert nept.find_nearest_idx(toy_array, 11.51) == 2
assert nept.find_nearest_idx(toy_array, 25) == 10
assert nept.find_nearest_idx(toy_array, 1) == 0
def test_find_nearest_indices():
assert np.allclose(nept.find_nearest_indices(toy_array, np.array([13.2])), np.array([3]))
assert np.allclose(nept.find_nearest_indices(toy_array, np.array([10, 20])), np.array([0, 10]))
def test_sort_idx():
linear = nept.Position(np.linspace(0, 10, 4), np.linspace(0, 3, 4))
spikes = [nept.SpikeTrain(np.array([1.5]), 'test'),
nept.SpikeTrain(np.array([0.5]), 'test'),
nept.SpikeTrain(np.array([2.5]), 'test')]
tuning = nept.tuning_curve_1d(linear, spikes, binsize=3, gaussian_std=None)
sort_idx = nept.get_sort_idx(tuning)
assert np.allclose(sort_idx, [1, 0, 2])
def test_sort_idx1():
linear = nept.Position(np.linspace(0, 9, 4), np.linspace(0, 3, 4))
spikes = [nept.SpikeTrain(np.array([2.5]), 'test'),
nept.SpikeTrain(np.array([0.0]), 'test'),
nept.SpikeTrain(np.array([2.0]), 'test'),
nept.SpikeTrain(np.array([1.0]), 'test')]
tuning = nept.tuning_curve_1d(linear, spikes, binsize=3, gaussian_std=None)
sort_idx = nept.get_sort_idx(tuning)
assert np.allclose(sort_idx, [1, 3, 0, 2])
def test_multi_in_epochs_one():
epochs = nept.Epoch([1.0, 4.0, 6.0], [2.0, 5.0, 7.0])
spikes = [nept.SpikeTrain(np.array([6.7])),
nept.SpikeTrain(np.array([1.1, 6.5])),
nept.SpikeTrain(np.array([1.3, 4.1])),
nept.SpikeTrain(np.array([1.7, 4.3]))]
min_involved = 3
multi_epochs = nept.find_multi_in_epochs(spikes, epochs, min_involved)
assert np.allclose(multi_epochs.starts, np.array([1.]))
assert np.allclose(multi_epochs.stops, np.array([2.]))
def test_multi_in_epochs_edge():
epochs = nept.Epoch([1.0, 4.0, 6.0], [2.0, 5.0, 7.0])
spikes = [nept.SpikeTrain(np.array([6.7])),
nept.SpikeTrain(np.array([2.0, 6.5])),
nept.SpikeTrain(np.array([2.0, 4.1])),
nept.SpikeTrain(np.array([2.0, 4.3]))]
min_involved = 3
multi_epochs = nept.find_multi_in_epochs(spikes, epochs, min_involved)
assert np.allclose(multi_epochs.starts, np.array([1.]))
assert np.allclose(multi_epochs.stops, np.array([2.]))
def test_multi_in_epochs_mult():
epochs = nept.Epoch([1.0, 4.0, 6.0], [2.0, 5.0, 7.0])
spikes = [nept.SpikeTrain(np.array([1.1, 6.5])),
nept.SpikeTrain(np.array([1.3, 4.1])),
nept.SpikeTrain(np.array([1.7, 4.3]))]
min_involved = 2
multi_epochs = nept.find_multi_in_epochs(spikes, epochs, min_involved)
assert np.allclose(multi_epochs.starts, np.array([1., 4.]))
assert np.allclose(multi_epochs.stops, np.array([2., 5.]))
def test_multi_in_epoch_none():
epochs = nept.Epoch([0.0], [1.0])
spikes = [nept.SpikeTrain(np.array([1.1, 6.5])),
nept.SpikeTrain(np.array([1.3, 4.1])),
nept.SpikeTrain(np.array([1.7, 4.3]))]
min_involved = 2
multi_epochs = nept.find_multi_in_epochs(spikes, epochs, min_involved)
assert np.allclose(multi_epochs.starts, np.array([]))
assert np.allclose(multi_epochs.stops, np.array([]))
def test_get_xyedges_mult():
times = np.array([1.0, 2.0, 3.0])
data = np.array([[1.0, 1.1],
[5.0, 5.1],
[10.0, 10.1]])
position = nept.Position(data, times)
xedges, yedges = nept.get_xyedges(position, binsize=3)
assert np.allclose(xedges, np.array([1., 4., 7., 10.]))
assert np.allclose(yedges, np.array([1.1, 4.1, 7.1, 10.1]))
def test_get_xyedges_one_full():
times = np.array([1.0, 2.0, 3.0])
data = np.array([[1.0, 1.1],
[5.0, 5.1],
[10.0, 10.1]])
position = nept.Position(data, times)
position = nept.Position(data, times)
xedges, yedges = nept.get_xyedges(position, binsize=10)
assert np.allclose(xedges, np.array([1., 11.]))
assert np.allclose(yedges, np.array([1.1, 11.1]))
def test_get_xyedges_1d_position():
times = np.array([1.0, 2.0, 3.0])
data = np.array([1.0, 5.0, 10.0])
position = nept.Position(data, times)
with pytest.raises(ValueError) as excinfo:
xedges, yedges = nept.get_xyedges(position, binsize=3)
assert str(excinfo.value) == "position must be 2-dimensional"
def test_bin_spikes():
spikes = np.hstack((np.arange(0, 10, 1.4), np.arange(0.2, 5, 0.3)))
spikes = [nept.SpikeTrain(np.sort(spikes), 'test')]
counts = nept.bin_spikes(spikes, 0.0, 10.0, dt=2.,
window=2., gaussian_std=None, normalized=False, lastbin=False)
assert np.allclose(counts.data, np.array([[9.], [7.], [5.], [1.]]))
def test_bin_spikes_normalized():
spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]
counts = nept.bin_spikes(spikes, 0.0, 4.0, dt=0.5, window=2., gaussian_std=None, normalized=True)
assert np.allclose(counts.data, np.array([[0.25], [1.], [1.], [1.25], [1.], [0.5], [0.5]]))
def test_bin_spikes_actual():
spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]
counts = nept.bin_spikes(spikes, 0.0, 4.0, dt=0.5,
window=2., gaussian_std=None, normalized=False)
assert np.allclose(counts.data, np.array([[1.], [4.], [4.], [5.], [4.], [2.], [2.]]))
def test_bin_spikes_gaussian():
spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]
time = np.array([0., 10.])
counts = nept.bin_spikes(spikes, 0.0, 10.0, dt=0.5, window=2.,
gaussian_std=0.51, normalized=True)
assert np.allclose(counts.data, np.array([[0.40347865],
[0.77042907],
[1.00980573],
[1.06273102],
[0.90701256],
[0.65089043],
[0.45510984],
[0.31307944],
[0.18950878],
[0.07738638],
[0.01560105],
[0.00129411],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0]]))
def test_bin_spikes_gaussian_even():
spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]
counts = nept.bin_spikes(spikes, 0.0, 10.0, dt=0.5, window=2.,
gaussian_std=0.5, normalized=True)
assert np.allclose(counts.data, np.array([[0.40134569],
[0.77353559],
[1.0133553 ],
[1.06721847],
[0.90916337],
[0.64912917],
[0.45410060],
[0.31272558],
[0.18949094],
[0.07622698],
[0.01460966],
[0.00110826],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0]]))
def test_bin_spikes_mult_neurons():
spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1]),
nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]
counts = nept.bin_spikes(spikes, 0.0, 4.0, dt=0.5, window=2, gaussian_std=None)
assert np.allclose(counts.data, np.array([[0.25, 0.25],
[1.0, 1.0],
[1.0, 1.0],
[1.25, 1.25],
[1.0, 1.0],
[0.5, 0.5],
[0.5, 0.5]]))
def test_bin_spikes_mult_neurons_adjust_window():
spikes = [nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1]),
nept.SpikeTrain([0.8, 1.1, 1.2, 1.2, 2.1, 3.1])]
counts = nept.bin_spikes(spikes, 0.0, 4.0, dt=0.5, window=2.5, gaussian_std=None)
assert np.allclose(counts.data, np.array([[0.8, 0.8],
[0.8, 0.8],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
[0.4, 0.4],
[0.4, 0.4]]))
def test_bin_spikes_no_window():
spikes = np.hstack((np.arange(0, 10, 1.4), np.arange(0.2, 5, 0.3)))
spikes = [nept.SpikeTrain(np.sort(spikes), 'test')]
counts = nept.bin_spikes(spikes, 0.0, 10.0, dt=4., gaussian_std=None, normalized=False)
assert np.allclose(counts.data, np.array([[16.], [6.]]))
def test_cartesian():
xcenters = np.array([0., 4., 8.])
ycenters = np.array([0., 2., 4.])
xy_centers = nept.cartesian(xcenters, ycenters)
assert np.allclose(xy_centers, np.array([[0., 0.], [4., 0.], [8., 0.],
[0., 2.], [4., 2.], [8., 2.],
[0., 4.], [4., 4.], [8., 4.]]))
def test_consecutive():
array = np.array([0, 3, 4, 5, 9, 12, 13, 14])
groups = nept.consecutive(array, stepsize=1)
assert len(groups) == 4
assert np.allclose(groups[0], [0])
assert np.allclose(groups[1], [3, 4, 5])
assert np.allclose(groups[2], [9])
assert np.allclose(groups[3], [12, 13, 14])
def test_consecutive_equal_stepsize():
array = np.arange(0, 10, 1)
groups = nept.consecutive(array, stepsize=1)
assert np.all(groups == np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]))
def test_consecutive_all_split():
array = np.arange(0, 10, 1)
groups = nept.consecutive(array, stepsize=0.9)
assert np.all(groups == np.array([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]))
def test_get_edges_no_lastbin():
edges = nept.get_edges(0.0, 4.1, binsize=0.5, lastbin=False)
assert np.allclose(edges, np.array([0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4.]))
def test_get_edges_simple():
edges = nept.get_edges(0.0, 4.1, binsize=0.5)
assert np.allclose(edges, np.array([0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.1]))
def test_perievent_slice_simple():
data = np.array([9., 7., 5., 3., 1.])
time = np.array([0., 1., 2., 3., 4.])
analogsignal = nept.AnalogSignal(data, time)
events = np.array([1.])
perievent_lfp = nept.perievent_slice(analogsignal, events, t_before=1., t_after=1.)
assert np.allclose(perievent_lfp.data, np.array([[9.], [7.], [5.]]))
assert np.allclose(perievent_lfp.time, np.array([-1., 0., 1.]))
def test_perievent_slice_with_dt():
data = np.array([9., 7., 5., 3., 1.])
time = np.array([0., 1., 2., 3., 4.])
analogsignal = nept.AnalogSignal(data, time)
events = np.array([1.])
perievent_lfp = nept.perievent_slice(analogsignal, events, t_before=1., t_after=1., dt=0.5)
assert np.allclose(perievent_lfp.data, np.array([[9.], [8.], [7.], [6.], [5.]]))
assert np.allclose(perievent_lfp.time, np.array([-1., -0.5, 0., 0.5, 1.]))
def test_perievent_slice_2d():
x = np.array([9., 7., 5., 3., 1.])
y = np.array([9., 7., 5., 3., 1.])
time = np.array([0., 1., 2., 3., 4.])
data = np.hstack([np.array(x)[..., np.newaxis], np.array(y)[..., np.newaxis]])
analogsignal = nept.AnalogSignal(data, time)
events = np.array([1.])
with pytest.raises(ValueError) as excinfo:
perievent_lfp = nept.perievent_slice(analogsignal, events, t_before=1., t_after=1.)
assert str(excinfo.value) == "AnalogSignal must be 1D."
def test_rest_threshold_simple():
times = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
data = np.array([0.0, 0.5, 1.0, 0.7, 1.7, 2.0])
position = nept.Position(data, times)
run_epoch = nept.rest_threshold(position, thresh=0.4, t_smooth=None)
assert np.allclose(run_epoch.starts, np.array([0., 3.]))
assert np.allclose(run_epoch.stops, np.array([1., 4.]))
def test_run_threshold_simple():
times = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
data = np.array([0.0, 0.5, 1.0, 0.7, 1.7, 2.0])
position = nept.Position(data, times)
run_epoch = nept.run_threshold(position, thresh=0.4, t_smooth=None)
assert np.allclose(run_epoch.starts, np.array([1., 4.]))
assert np.allclose(run_epoch.stops, np.array([3., 5.]))
def test_gaussian_filter_unchanged():
signal = np.array([1., 3., 7.])
std = 0.1
filtered_signal = nept.gaussian_filter(signal, std, dt=1.0)
assert(filtered_signal.all() == signal.all())
|
StarcoderdataPython
|
37122
|
<gh_stars>0
import time
import torch
from torch.distributions.dirichlet import Dirichlet
from detectron2.engine.train_loop import SimpleTrainer
class LinearScalarizationTrainer(SimpleTrainer):
"""
A simple trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization,
optionally using data-parallelism.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
All other tasks during training (checkpointing, logging, evaluation, LR schedule)
are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, optimizer, preference_vector: float = torch.ones(2) / 2):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of losses.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
preference_vector: Vector detailing the weight between losses
"""
super().__init__(model, data_loader, optimizer)
self.preference_vector = preference_vector
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
loss_dict = self.model(data)
losses = torch.matmul(torch.stack(list(loss_dict.values())), self.preference_vector)
loss_dict = self.model(data)
loss_dict['total_loss'] = losses
"""
If you need to accumulate gradients or do something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
losses.backward()
self._write_metrics(loss_dict, data_time)
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method. But it is
suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
"""
self.optimizer.step()
|
StarcoderdataPython
|
70496
|
#
# Copyright (c) 2016-2021 Deephaven Data Labs and Patent Pending
#
# add JDK to path (otherwise jnius gives DLL load error)
import os
os.environ['PATH'] = os.environ['PATH'] + ";C:\\Program Files\\Java\jdk1.8.0_72\\jre\\bin\\server"
os.environ['PATH'] = os.environ['PATH'] + ";C:\\Program Files\\Java\jdk1.8.0_60\\jre\\bin\\server"
print(os.environ['PATH'])
import jpyutil
jpyutil.init_jvm()
# jpyutil.init_jvm(jvm_maxmem='512M', jvm_classpath=['target/test-classes'])
import jpy
Stack = jpy.get_type('java.util.Stack')
stack = Stack()
stack.push('hello')
stack.push('world')
print(stack.pop()) # --> 'world'
print(stack.pop()) # --> 'hello'
print(stack.getClass().getName())
|
StarcoderdataPython
|
150234
|
<gh_stars>1-10
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model helper utilities for processing search query and search detail requests.
Search constants and helper functions.
"""
# flake8: noqa Q000,E122,E131
# Disable Q000: Allow query strings to be in double quotation marks that contain single quotation marks.
# Disable E122: allow query strings to be more human readable.
# Disable E131: allow query strings to be more human readable.
GET_DETAIL_DAYS_LIMIT = 7 # Number of days in the past a get details request is allowed.
# Maximum number of days in the past to filter when fetching account search history: set to <= 0 to disable.
GET_HISTORY_DAYS_LIMIT = -1
# Account search history max result set size.
ACCOUNT_SEARCH_HISTORY_MAX_SIZE = 1000
# Maximum number or results returned by search.
SEARCH_RESULTS_MAX_SIZE = 1000
# Result set size limit clause
RESULTS_SIZE_LIMIT_CLAUSE = 'FETCH FIRST :max_results_size ROWS ONLY'
# Serial number search base where clause
SERIAL_SEARCH_BASE = """
SELECT r.registration_type,r.registration_ts AS base_registration_ts,
sc.serial_type,sc.serial_number,sc.year,sc.make,sc.model,
r.registration_number AS base_registration_num,
CASE WHEN serial_number = :query_value THEN 'EXACT' ELSE 'SIMILAR' END match_type,
fs.expire_date,fs.state_type,sc.id AS vehicle_id, sc.mhr_number
FROM registrations r, financing_statements fs, serial_collateral sc
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND sc.financing_id = fs.id
AND sc.registration_id_end IS NULL
"""
# Equivalent logic as DB view search_by_reg_num_vw, but API determines the where clause.
REG_NUM_QUERY = """
SELECT r2.registration_type, r2.registration_ts AS base_registration_ts,
r2.registration_number AS base_registration_num,
'EXACT' AS match_type, fs.state_type, fs.expire_date
FROM registrations r, financing_statements fs, registrations r2
WHERE r.financing_id = fs.id
AND r2.financing_id = fs.id
AND r2.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.registration_number = :query_value
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
"""
# Equivalent logic as DB view search_by_mhr_num_vw, but API determines the where clause.
MHR_NUM_QUERY = SERIAL_SEARCH_BASE + \
" AND sc.serial_type = 'MH' " + \
"AND sc.mhr_number = (SELECT searchkey_mhr(:query_value)) " + \
"ORDER BY match_type, r.registration_ts ASC " + RESULTS_SIZE_LIMIT_CLAUSE
# Equivalent logic as DB view search_by_serial_num_vw, but API determines the where clause.
SERIAL_NUM_QUERY = SERIAL_SEARCH_BASE + \
" AND sc.serial_type NOT IN ('AC', 'AF', 'AP') " + \
"AND sc.srch_vin = (SELECT searchkey_vehicle(:query_value)) " + \
"ORDER BY match_type, sc.serial_number " + RESULTS_SIZE_LIMIT_CLAUSE
# Equivalent logic as DB view search_by_aircraft_dot_vw, but API determines the where clause.
AIRCRAFT_DOT_QUERY = SERIAL_SEARCH_BASE + \
" AND sc.serial_type IN ('AC', 'AF', 'AP') " + \
"AND sc.srch_vin = (SELECT searchkey_aircraft(:query_value)) " + \
"ORDER BY match_type, sc.serial_number " + RESULTS_SIZE_LIMIT_CLAUSE
BUSINESS_NAME_QUERY = """
SELECT r.registration_type,r.registration_ts AS base_registration_ts,
p.business_name,
r.registration_number AS base_registration_num,
CASE WHEN p.business_name = :query_bus_name THEN 'EXACT' ELSE 'SIMILAR' END match_type,
fs.expire_date,fs.state_type,p.id
FROM registrations r, financing_statements fs, parties p
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND p.financing_id = fs.id
AND p.registration_id_end IS NULL
AND p.party_type = 'DB'
AND (SELECT searchkey_business_name(:query_bus_name)) <% p.business_srch_key
AND word_similarity(p.business_srch_key, (SELECT searchkey_business_name(:query_bus_name))) >= .60
ORDER BY match_type, p.business_name
""" + RESULTS_SIZE_LIMIT_CLAUSE
INDIVIDUAL_NAME_QUERY = """
SELECT r.registration_type,r.registration_ts AS base_registration_ts,
p.last_name,p.first_name,p.middle_initial,p.id,
r.registration_number AS base_registration_num,
CASE WHEN p.last_name = :query_last AND p.first_name = :query_first THEN 'EXACT' ELSE 'SIMILAR' END match_type,
fs.expire_date,fs.state_type, p.birth_date
FROM registrations r, financing_statements fs, parties p
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND p.financing_id = fs.id
AND p.registration_id_end IS NULL
AND p.party_type = 'DI'
AND p.id IN (SELECT * FROM unnest(match_individual_name(:query_last, :query_first)))
ORDER BY match_type, p.last_name, p.first_name
""" + RESULTS_SIZE_LIMIT_CLAUSE
INDIVIDUAL_NAME_MIDDLE_QUERY = """
SELECT r.registration_type,r.registration_ts AS base_registration_ts,
p.last_name,p.first_name,p.middle_initial,p.id,
r.registration_number AS base_registration_num,
CASE WHEN p.last_name = :query_last AND
p.first_name = :query_first AND
p.middle_initial = :query_middle THEN 'EXACT' ELSE 'SIMILAR' END match_type,
fs.expire_date,fs.state_type, p.birth_date
FROM registrations r, financing_statements fs, parties p
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND p.financing_id = fs.id
AND p.registration_id_end IS NULL
AND p.party_type = 'DI'
AND p.id IN (SELECT * FROM unnest(match_individual_name(:query_last, :query_first)))
ORDER BY match_type, p.last_name, p.first_name
""" + RESULTS_SIZE_LIMIT_CLAUSE
# Total result count queries for serial number, debtor name searches:
BUSINESS_NAME_TOTAL_COUNT = """
SELECT COUNT(r.id) AS query_count
FROM registrations r, financing_statements fs, parties p
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND p.financing_id = fs.id
AND p.registration_id_end IS NULL
AND p.party_type = 'DB'
AND (SELECT searchkey_business_name(:query_bus_name)) <% p.business_srch_key
AND word_similarity(p.business_srch_key, (SELECT searchkey_business_name(:query_bus_name))) >= .60
"""
INDIVIDUAL_NAME_TOTAL_COUNT = """
SELECT COUNT(r.id) AS query_count
FROM registrations r, financing_statements fs, parties p
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND p.financing_id = fs.id
AND p.registration_id_end IS NULL
AND p.party_type = 'DI'
AND p.id IN (SELECT * FROM unnest(match_individual_name(:query_last, :query_first)))
"""
SERIAL_SEARCH_COUNT_BASE = """
SELECT COUNT(r.id) AS query_count
FROM registrations r, financing_statements fs, serial_collateral sc
WHERE r.financing_id = fs.id
AND r.registration_type_cl IN ('PPSALIEN', 'MISCLIEN', 'CROWNLIEN')
AND r.base_reg_number IS NULL
AND (fs.expire_date IS NULL OR fs.expire_date > ((now() at time zone 'utc') - interval '30 days'))
AND NOT EXISTS (SELECT r3.id
FROM registrations r3
WHERE r3.financing_id = fs.id
AND r3.registration_type_cl = 'DISCHARGE'
AND r3.registration_ts < ((now() at time zone 'utc') - interval '30 days'))
AND sc.financing_id = fs.id
AND sc.registration_id_end IS NULL
"""
MHR_NUM_TOTAL_COUNT = SERIAL_SEARCH_COUNT_BASE + \
" AND sc.serial_type = 'MH' " + \
"AND sc.mhr_number = searchkey_mhr(:query_value)"
SERIAL_NUM_TOTAL_COUNT = SERIAL_SEARCH_COUNT_BASE + \
" AND sc.serial_type NOT IN ('AC', 'AF') " + \
"AND sc.srch_vin = searchkey_vehicle(:query_value)"
AIRCRAFT_DOT_TOTAL_COUNT = SERIAL_SEARCH_COUNT_BASE + \
" AND sc.serial_type IN ('AC', 'AF') " + \
"AND sc.srch_vin = searchkey_aircraft(:query_value)"
COUNT_QUERY_FROM_SEARCH_TYPE = {
'AC': AIRCRAFT_DOT_TOTAL_COUNT,
'BS': BUSINESS_NAME_TOTAL_COUNT,
'IS': INDIVIDUAL_NAME_TOTAL_COUNT,
'MH': MHR_NUM_TOTAL_COUNT,
'SS': SERIAL_NUM_TOTAL_COUNT
}
ACCOUNT_SEARCH_HISTORY_DATE_QUERY = \
'SELECT sc.id, sc.search_ts, sc.api_criteria, sc.total_results_size, sc.returned_results_size,' + \
'sr.exact_match_count, sr.similar_match_count ' + \
'FROM search_requests sc, search_results sr ' + \
'WHERE sc.id = sr.search_id ' + \
"AND sc.account_id = '?' " + \
"AND sc.search_ts > ((now() at time zone 'utc') - interval '" + str(GET_HISTORY_DAYS_LIMIT) + " days') " + \
'ORDER BY sc.search_ts DESC ' + \
'FETCH FIRST ' + str(ACCOUNT_SEARCH_HISTORY_MAX_SIZE) + ' ROWS ONLY'
ACCOUNT_SEARCH_HISTORY_QUERY = \
'SELECT sc.id, sc.search_ts, sc.api_criteria, sc.total_results_size, sc.returned_results_size,' + \
'sr.exact_match_count, sr.similar_match_count ' + \
'FROM search_requests sc, search_results sr ' + \
'WHERE sc.id = sr.search_id ' + \
"AND sc.account_id = '?' " + \
'ORDER BY sc.search_ts DESC ' + \
'FETCH FIRST ' + str(ACCOUNT_SEARCH_HISTORY_MAX_SIZE) + ' ROWS ONLY'
|
StarcoderdataPython
|
163261
|
import uuid
import imghdr
import os
from cStringIO import StringIO
from flask import Flask, request, redirect, render_template, url_for, flash, jsonify
from flask_s3 import FlaskS3
import boto3
import requests
from cat import CatThat
FINISHED_FOLDER = 'finished'
S3_BUCKET = 'cats.databeard.com'
ALLOWED_EXTENSIONS = ['png', 'jpg', 'jpeg', 'gif']
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get('FLASK_SECRET_KEY')
app.config['FLASKS3_BUCKET_NAME'] = S3_BUCKET
app.config['FLASKS3_URL_STYLE'] = 'path'
#app.config['FLASKS3_ACTIVE'] = False
s3 = FlaskS3(app)
def valid_image_file_odl(file_obj):
res = imghdr.what('ignored.txt', h=file_obj.read())
return res in ALLOWED_EXTENSIONS
def valid_image_file(file_obj):
return '.' in file_obj.filename and \
file_obj.filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def upload_to_s3(file_obj, folder):
s3 = boto3.client('s3')
picture_name = '{0!s}/{1!s}.jpg'.format(folder, uuid.uuid4())
s3.upload_fileobj(file_obj, S3_BUCKET, picture_name, ExtraArgs={'ContentType': 'image/jpeg'})
s3_url = 'https://s3.amazonaws.com/{0!s}/{1!s}'.format(S3_BUCKET, picture_name)
return s3_url
@app.route('/', methods=['GET'])
def index(event=None, context=None):
return render_template('index.html')
@app.route('/finished', methods=['GET', 'POST'])
def process(event=None, context=None):
if request.method == 'POST':
picture_url = request.form.get('url')
if picture_url:
# Download the pic into tmp
r = requests.get(picture_url, stream=True)
if r.status_code != 200:
flash("We did not get 200 response code when downloading the image")
return redirect(url_for('index'))
file_obj = StringIO(r.content)
result = 'redirect'
elif 'file' in request.files:
file_obj = request.files['file']
if not valid_image_file(file_obj):
flash("This is not a valid image file")
return redirect(url_for('index'))
result = 'json'
else:
flash("We did not get posted file or url in the POSt variables")
return redirect(url_for('index'))
cat_that = CatThat()
cat_faced = cat_that.add_cat_face(file_obj=file_obj)
if not cat_faced:
flash("Couldn't put cats on this face, sorry. This this might be because no faces could be found in the image.")
return redirect(url_for('index'))
cat_path = upload_to_s3(file_obj=cat_faced, folder=FINISHED_FOLDER)
print('Cat Image URL: {}'.format(cat_path))
if result == 'redirect':
return render_template('finished.html', data={'url': cat_path})
else:
return jsonify({'success': True, 'url': cat_path})
cat_path = request.args.get('url')
if not cat_path:
return redirect(url_for('index'))
return render_template('finished.html', data={'url': cat_path})
if __name__ == "__main__":
app.run(debug=True)
|
StarcoderdataPython
|
47840
|
import numpy
import pandas as pd
from boo.columns import SHORT_COLUMNS
from boo.errors import UnclassifiableCodeError
QUOTE_CHAR = '"'
EMPTY = int(0)
NUMERIC_COLUMNS = SHORT_COLUMNS.numeric
def adjust_rub(df, cols=NUMERIC_COLUMNS):
rows = (df.unit == "385")
df.loc[rows, cols] = df.loc[rows, cols].multiply(1000)
df.loc[rows, "unit"] = "384"
rows = (df.unit == "383")
df.loc[rows, cols] = df.loc[rows, cols].divide(1000).round(0).astype(int)
df.loc[rows, "unit"] = "384"
return df
def dequote(name: str):
"""Split company *name* to organisation and title."""
# Warning: will not work well on company names with more than 4 quotechars
parts = name.split(QUOTE_CHAR)
org = parts[0].strip()
cnt = name.count(QUOTE_CHAR)
if cnt == 2:
title = parts[1].strip()
elif cnt > 2:
title = QUOTE_CHAR.join(parts[1:])
else:
title = name
return org, title.strip()
def replace_names(title: str):
return title .replace(
"ПУБЛИЧНОЕ АКЦИОНЕРНОЕ ОБЩЕСТВО",
"ПАО") .replace(
"ОТКРЫТОЕ АКЦИОНЕРНОЕ ОБЩЕСТВО",
"ОАО") .replace(
"АКЦИОНЕРНОЕ ОБЩЕСТВО ЭНЕРГЕТИКИ И ЭЛЕКТРИФИКАЦИИ",
"AO энерго") .replace(
"НЕФТЕПЕРЕРАБАТЫВАЮЩИЙ ЗАВОД",
"НПЗ") .replace(
"ГЕНЕРИРУЮЩАЯ КОМПАНИЯ ОПТОВОГО РЫНКА ЭЛЕКТРОЭНЕРГИИ",
"ОГК") .replace(
"ГОРНО-ОБОГАТИТЕЛЬНЫЙ КОМБИНАТ",
"ГОК")
def add_title(df):
s_ = df.name.apply(dequote)
df['org'] = s_.apply(lambda x: x[0])
df['title'] = s_.apply(lambda x: replace_names(x[1]))
return df
def rename_rows(df):
RENAME_DICT = {
'2460066195': "РусГидро",
'4716016979': "ФСК ЕЭС",
'7702038150': "Московский метрополитен",
'7721632827': "Концерн Росэнергоатом",
'7706664260': "Атомэнергопром",
'7703683145': "Холдинг ВТБ Капитал АЙ БИ",
'9102048801': "Черноморнефтегаз",
'7736036626': "РИТЭК"
}
keys = RENAME_DICT.keys()
ix = df.index.isin(keys)
if not ix.any():
return df
sub = df.loc[ix, 'title']
for k, v in RENAME_DICT.items():
if k in sub.index:
sub.loc[k] = v
df.loc[ix, 'title'] = sub
return df
def split_okved(code_string: str):
"""Get 3 levels of OKVED codes from *code_string*."""
if code_string.count(".") > 2:
raise UnclassifiableCodeError(code_string)
try:
codes = [int(x) for x in code_string.split(".")]
except ValueError:
raise UnclassifiableCodeError(code_string)
return codes + [0] * (3 - len(codes))
def add_okved_subcode(df):
df['ok1'], df['ok2'], df['ok3'] = zip(*df.okved.apply(split_okved))
return df
def fst(x):
try:
return int(x[0:2])
except TypeError:
return 0
def add_region(df):
df['region'] = df.inn.apply(fst)
return df
def more_columns(df):
return add_okved_subcode(add_region(add_title(df)))
def canonic_df(df):
"""Преобразовать данные внтури датафрейма:
- Привести все строки к одинаковым единицам измерения (тыс. руб.)
- Убрать неиспользуемые колонки (date_revised, report_type)
- Новые колонки:
* короткое название компании
* три уровня кода ОКВЭД
* регион (по ИНН)
"""
df_ = add_okved_subcode(add_region(add_title(df)))
df_ = adjust_rub(df_)
df_ = df_.set_index('inn')
df_ = rename_rows(df_)
return df_[canonic_columns()]
def canonic_columns(numeric=SHORT_COLUMNS.numeric):
return (['title', 'org', 'okpo', 'okopf', 'okfs', 'okved'] +
['unit'] +
['ok1', 'ok2', 'ok3', 'region'] +
numeric)
def is_numeric_column(name, numeric=SHORT_COLUMNS.numeric):
return name in numeric
def columns_typed_as_integer(numeric=SHORT_COLUMNS.numeric):
return numeric + ['ok1', 'ok2', 'ok3', 'region']
def canonic_dtypes():
def switch(col):
int_columns = columns_typed_as_integer()
return numpy.int64 if (col in int_columns) else str
result = {col: switch(col) for col in canonic_columns()}
result['inn'] = str
return result
|
StarcoderdataPython
|
3210533
|
# -*- coding: utf-8 -*-
from allauth.account.adapter import get_adapter
from rest_auth.registration.serializers import RegisterSerializer
class UserSerializer(RegisterSerializer):
def save(self, request):
adapter = get_adapter()
user = adapter.new_user(request)
self.cleaned_data = self.get_cleaned_data()
adapter.save_user(request, user, self)
self.custom_signup(request, user)
return user
|
StarcoderdataPython
|
3291316
|
<reponame>lnfjpt/GraphScope<filename>python/graphscope/nx/algorithms/tests/forward/test_assortativity.py
import networkx.algorithms.assortativity.tests.base_test
import networkx.algorithms.assortativity.tests.test_connectivity
import networkx.algorithms.assortativity.tests.test_correlation
import networkx.algorithms.assortativity.tests.test_mixing
import networkx.algorithms.assortativity.tests.test_neighbor_degree
import networkx.algorithms.assortativity.tests.test_pairs
import pytest
from graphscope.nx.utils.compat import import_as_graphscope_nx
# N.B import base_test at begin
import_as_graphscope_nx(networkx.algorithms.assortativity.tests.base_test,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.assortativity.tests.test_connectivity,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.assortativity.tests.test_neighbor_degree,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.assortativity.tests.test_correlation,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.assortativity.tests.test_mixing,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.assortativity.tests.test_pairs,
decorators=pytest.mark.usefixtures("graphscope_session"))
|
StarcoderdataPython
|
3370050
|
import os
import sys
# Add all the python paths needed to execute when using Python 3.6
sys.path.append(os.path.join(os.path.dirname(__file__), "models"))
sys.path.append(os.path.join(os.path.dirname(__file__), "models/arc"))
sys.path.append(os.path.join(os.path.dirname(__file__), "skiprnn_pytorch"))
sys.path.append(os.path.join(os.path.dirname(__file__), "models/wrn"))
import time
import numpy as np
from datetime import datetime, timedelta
from logger import Logger
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score
import shutil
from models import models
from models.models import ArcBinaryClassifier
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from option import Options, tranform_options
# Omniglot dataset
from omniglotDataLoader import omniglotDataLoader
from dataset.omniglot import Omniglot
from dataset.omniglot import OmniglotPairs
from dataset.omniglot import OmniglotOneShot
# Mini-imagenet dataset
from miniimagenetDataLoader import miniImagenetDataLoader
from dataset.mini_imagenet import MiniImagenet
from dataset.mini_imagenet import MiniImagenetPairs
from dataset.mini_imagenet import MiniImagenetOneShot
# Banknote dataset
from banknoteDataLoader import banknoteDataLoader
from dataset.banknote_pytorch import FullBanknote
from dataset.banknote_pytorch import FullBanknotePairs
from dataset.banknote_pytorch import FullBanknoteOneShot
# FCN
from models.conv_cnn import ConvCNNFactory
# Attention module in ARC
from models.fullContext import FullContextARC
from models.naiveARC import NaiveARC
# Co-Attn module
from models.coAttn import CoAttn
from do_epoch_fns import do_epoch_ARC, do_epoch_ARC_unroll, do_epoch_naive_full
import arc_train
import arc_val
import arc_test
import context_train
import context_val
import context_test
import multiprocessing
import ntpath
import cv2
from torch.optim.lr_scheduler import ReduceLROnPlateau
# CUDA_VISIBLE_DEVICES == 1 (710) / CUDA_VISIBLE_DEVICES == 0 (1070)
#import os
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def data_generation(opt):
# use cuda?
opt.cuda = False
#cudnn.benchmark = True # set True to speedup
# Load mean/std if exists
train_mean = None
train_std = None
if os.path.exists(os.path.join(opt.save, 'mean.npy')):
train_mean = np.load(os.path.join(opt.save, 'mean.npy'))
train_std = np.load(os.path.join(opt.save, 'std.npy'))
# Load Dataset
opt.setType='set1'
dataLoader = banknoteDataLoader(type=FullBanknotePairs, opt=opt, fcn=None, train_mean=train_mean,
train_std=train_std)
# Use the same seed to split the train - val - test
if os.path.exists(os.path.join(opt.save, 'dataloader_rnd_seed_arc.npy')):
rnd_seed = np.load(os.path.join(opt.save, 'dataloader_rnd_seed_arc.npy'))
else:
rnd_seed = np.random.randint(0, 100000)
np.save(os.path.join(opt.save, 'dataloader_rnd_seed_arc.npy'), rnd_seed)
# Get the DataLoaders from train - val - test
train_loader, val_loader, test_loader = dataLoader.get(rnd_seed=rnd_seed)
train_mean = dataLoader.train_mean
train_std = dataLoader.train_std
if not os.path.exists(os.path.join(opt.save, 'mean.npy')):
np.save(os.path.join(opt.save, 'mean.npy'), train_mean)
np.save(os.path.join(opt.save, 'std.npy'), train_std)
epoch = 1
if opt.arc_resume == True or opt.arc_load is None:
try:
while epoch < opt.train_num_batches:
# wait to check if it is needs more data
lst_epochs = train_loader.dataset.getFolderEpochList()
while len(lst_epochs) > 10:
time.sleep(1)
lst_epochs = train_loader.dataset.getFolderEpochList()
# In case there is more than one generator.
# get the last folder epoch executed and update the epoch accordingly
if len(lst_epochs)>0:
bContinueCheckingNextFolder = True
while bContinueCheckingNextFolder:
lst_epoch_selected = []
for i in range(10):
lst_epochs = train_loader.dataset.getFolderEpochList()
epoch = np.array([path_leaf(str).split('_')[-1] for str in lst_epochs if 'train' in str]).astype(np.int).max()
lst_epoch_selected.append(epoch)
time.sleep(0.05)
# if after 0.5s we only have one possible epoch folder then go on with that epoch.
if len(set(lst_epoch_selected))==1:
bContinueCheckingNextFolder = False
# do the next epoch
epoch += 1
## set information of the epoch in the dataloader
repetitions = 1
start_time = datetime.now()
for repetition in range(repetitions):
print('Initiating epoch: %d' % (epoch))
train_loader.dataset.set_path_tmp_epoch_iteration(epoch,repetition)
for batch_idx, (data, label) in enumerate(train_loader):
noop = 0
time_elapsed = datetime.now() - start_time
print ("[train]", "epoch: ", epoch, ", time: ", time_elapsed.seconds, "s:", time_elapsed.microseconds / 1000)
if epoch % opt.val_freq == 0:
repetitions = opt.val_num_batches
start_time = datetime.now()
for repetition in range(repetitions):
val_loader.dataset.set_path_tmp_epoch_iteration(epoch,repetition)
for batch_idx, (data, label) in enumerate(val_loader):
noop = 0
time_elapsed = datetime.now() - start_time
print ("[val]", "epoch: ", epoch, ", time: ", time_elapsed.seconds, "s:", time_elapsed.microseconds / 1000)
repetitions = opt.test_num_batches
start_time = datetime.now()
for repetition in range(repetitions):
test_loader.dataset.set_path_tmp_epoch_iteration(epoch,repetition)
for batch_idx, (data, label) in enumerate(test_loader):
noop = 0
time_elapsed = datetime.now() - start_time
print ("[test]", "epoch: ", epoch, ", time: ", time_elapsed.seconds, "s:", time_elapsed.microseconds / 1000)
epoch += 1
print ("[%s] ... generating data done" % multiprocessing.current_process().name)
except KeyboardInterrupt:
pass
print ('###########################################')
print ('.... Starting Context Data Generation ....')
print ('###########################################')
# Set the new batchSize as in the ARC code.
opt.__dict__['batchSize'] = opt.naive_batchSize
# Change the path_tmp_data to point to one_shot
opt.path_tmp_data = opt.path_tmp_data.replace('/data/','/data_one_shot/')
# Load the dataset
opt.setType='set1'
if opt.datasetName == 'miniImagenet':
dataLoader = miniImagenetDataLoader(type=MiniImagenetOneShot, opt=opt, fcn=None)
elif opt.datasetName == 'omniglot':
dataLoader = omniglotDataLoader(type=OmniglotOneShot, opt=opt, fcn=None, train_mean=train_mean,
train_std=train_std)
elif opt.datasetName == 'banknote':
dataLoader = banknoteDataLoader(type=FullBanknoteOneShot, opt=opt, fcn=None,
train_mean=train_mean, train_std=train_std)
else:
pass
# Get the DataLoaders from train - val - test
train_loader, val_loader, test_loader = dataLoader.get(rnd_seed=rnd_seed)
epoch = 0
try:
while epoch < opt.naive_full_epochs:
# wait to check if it is neede more data
lst_epochs = train_loader.dataset.getFolderEpochList()
if len(lst_epochs) > 50:
time.sleep(10)
# In case there is more than one generator.
# get the last folder epoch executed and update the epoch accordingly
if len(lst_epochs)>0:
epoch = np.array([path_leaf(str).split('_')[-1] for str in lst_epochs if 'train' in str]).astype(np.int).max()
epoch += 1
## set information of the epoch in the dataloader
repetitions = 1
start_time = datetime.now()
for repetition in range(repetitions):
train_loader.dataset.set_path_tmp_epoch_iteration(epoch,repetition)
for batch_idx, (data, label) in enumerate(train_loader):
noop = 0
time_elapsed = datetime.now() - start_time
print ("[train]", "epoch: ", epoch, ", time: ", time_elapsed.seconds, "s:", time_elapsed.microseconds / 1000)
if epoch % opt.naive_full_val_freq == 0:
repetitions = opt.val_num_batches
start_time = datetime.now()
for repetition in range(repetitions):
val_loader.dataset.set_path_tmp_epoch_iteration(epoch,repetition)
for batch_idx, (data, label) in enumerate(val_loader):
noop = 0
time_elapsed = datetime.now() - start_time
print ("[val]", "epoch: ", epoch, ", time: ", time_elapsed.seconds, "s:", time_elapsed.microseconds / 1000)
repetitions = opt.test_num_batches
start_time = datetime.now()
for repetition in range(repetitions):
test_loader.dataset.set_path_tmp_epoch_iteration(epoch,repetition)
for batch_idx, (data, label) in enumerate(test_loader):
noop = 0
time_elapsed = datetime.now() - start_time
print ("[test]", "epoch: ", epoch, ", time: ", time_elapsed.seconds, "s:", time_elapsed.microseconds / 1000)
epoch += 1
print ("[%s] ... generating data done" % multiprocessing.current_process().name)
except KeyboardInterrupt:
pass
###################################
def server_processing(opt):
# use cuda?
opt.cuda = torch.cuda.is_available()
cudnn.benchmark = True # set True to speedup
# Load mean/std if exists
train_mean = None
train_std = None
if os.path.exists(os.path.join(opt.save, 'mean.npy')):
train_mean = np.load(os.path.join(opt.save, 'mean.npy'))
train_std = np.load(os.path.join(opt.save, 'std.npy'))
# Load FCN
fcn = None
if opt.apply_wrn:
# Convert the opt params to dict.
optDict = dict([(key, value) for key, value in opt._get_kwargs()])
fcn = ConvCNNFactory.createCNN(opt.wrn_name_type, optDict)
if opt.wrn_load and os.path.exists(opt.wrn_load):
if torch.cuda.is_available():
fcn.load_state_dict(torch.load(opt.wrn_load))
else:
fcn.load_state_dict(torch.load(opt.wrn_load, map_location=torch.device('cpu')))
if opt.cuda:
fcn.cuda()
# Load Dataset
opt.setType='set1'
if opt.datasetName == 'miniImagenet':
dataLoader = miniImagenetDataLoader(type=MiniImagenetPairs, opt=opt, fcn=fcn)
elif opt.datasetName == 'omniglot':
dataLoader = omniglotDataLoader(type=OmniglotPairs, opt=opt, fcn=fcn,train_mean=train_mean,
train_std=train_std)
elif opt.datasetName == 'banknote':
dataLoader = banknoteDataLoader(type=FullBanknotePairs, opt=opt, fcn=fcn, train_mean=train_mean,
train_std=train_std)
else:
pass
# Get the params
# opt = dataLoader.opt
# Use the same seed to split the train - val - test
if os.path.exists(os.path.join(opt.save, 'dataloader_rnd_seed_arc.npy')):
rnd_seed = np.load(os.path.join(opt.save, 'dataloader_rnd_seed_arc.npy'))
else:
rnd_seed = np.random.randint(0, 100000)
np.save(os.path.join(opt.save, 'dataloader_rnd_seed_arc.npy'), rnd_seed)
# Get the DataLoaders from train - val - test
train_loader, val_loader, test_loader = dataLoader.get(rnd_seed=rnd_seed)
train_mean = dataLoader.train_mean
train_std = dataLoader.train_std
if not os.path.exists(os.path.join(opt.save, 'mean.npy')):
np.save(os.path.join(opt.save, 'mean.npy'), train_mean)
np.save(os.path.join(opt.save, 'std.npy'), train_std)
# free memory
del dataLoader
print ('[%s] ... Loading Set2' % multiprocessing.current_process().name)
opt.setType='set2'
if opt.datasetName == 'miniImagenet':
dataLoader2 = miniImagenetDataLoader(type=MiniImagenetPairs, opt=opt, fcn=None)
elif opt.datasetName == 'omniglot':
dataLoader2 = omniglotDataLoader(type=OmniglotPairs, opt=opt, fcn=None,train_mean=train_mean,
train_std=train_std)
elif opt.datasetName == 'banknote':
dataLoader2 = banknoteDataLoader(type=FullBanknotePairs, opt=opt, fcn=None, train_mean=train_mean,
train_std=train_std)
else:
pass
_, _, test_loader2 = dataLoader2.get(rnd_seed=rnd_seed, dataPartition = [None,None,'train+val+test'])
del dataLoader2
if opt.cuda:
models.use_cuda = True
if opt.name is None:
# if no name is given, we generate a name from the parameters.
# only those parameters are taken, which if changed break torch.load compatibility.
#opt.name = "train_{}_{}_{}_{}_{}_wrn".format(str_model_fn, opt.numGlimpses, opt.glimpseSize, opt.numStates,
opt.name = "{}_{}_{}_{}_{}_{}_wrn".format(opt.naive_full_type,
"fcn" if opt.apply_wrn else "no_fcn",
opt.arc_numGlimpses,
opt.arc_glimpseSize, opt.arc_numStates,
"cuda" if opt.cuda else "cpu")
print("[{}]. Will start training {} with parameters:\n{}\n\n".format(multiprocessing.current_process().name,
opt.name, opt))
# make directory for storing models.
models_path = os.path.join(opt.save, opt.name)
if not os.path.isdir(models_path):
os.makedirs(models_path)
else:
shutil.rmtree(models_path)
# create logger
logger = Logger(models_path)
# initialise the model
discriminator = ArcBinaryClassifier(num_glimpses=opt.arc_numGlimpses,
glimpse_h=opt.arc_glimpseSize,
glimpse_w=opt.arc_glimpseSize,
channels=opt.arc_nchannels,
controller_out=opt.arc_numStates,
attn_type = opt.arc_attn_type,
attn_unroll = opt.arc_attn_unroll,
attn_dense=opt.arc_attn_dense)
# load from a previous checkpoint, if specified.
if opt.arc_load is not None and os.path.exists(opt.arc_load):
if torch.cuda.is_available():
discriminator.load_state_dict(torch.load(opt.arc_load))
else:
discriminator.load_state_dict(torch.load(opt.arc_load, map_location=torch.device('cpu')))
if opt.cuda:
discriminator.cuda()
# Load the Co-Attn module
coAttn = None
if opt.use_coAttn:
coAttn = CoAttn(size = opt.coAttn_size, num_filters=opt.arc_nchannels, typeActivation = opt.coAttn_type, p = opt.coAttn_p)
if opt.coattn_load is not None and os.path.exists(opt.coattn_load):
if torch.cuda.is_available():
coAttn.load_state_dict(torch.load(opt.coattn_load))
else:
coAttn.load_state_dict(torch.load(opt.coattn_load, map_location=torch.device('cpu')))
if opt.cuda:
coAttn.cuda()
loss_fn = torch.nn.BCELoss()
if opt.cuda:
loss_fn = loss_fn.cuda()
lstOptimizationParameters = []
lstOptimizationParameters.append(list(discriminator.parameters()))
if opt.apply_wrn:
lstOptimizationParameters.append(list(fcn.parameters()))
if opt.use_coAttn:
lstOptimizationParameters.append(list(coAttn.parameters()))
flatten_lstOptimizationParameters = [item for sublist in lstOptimizationParameters for item in sublist]
optimizer = torch.optim.Adam(params=flatten_lstOptimizationParameters, lr=opt.arc_lr)
scheduler = ReduceLROnPlateau(optimizer, mode='min', patience=opt.arc_lr_patience, verbose=True)
# load preexisting optimizer values if exists
if os.path.exists(opt.arc_optimizer_path):
if torch.cuda.is_available():
optimizer.load_state_dict(torch.load(opt.arc_optimizer_path))
else:
optimizer.load_state_dict(torch.load(opt.arc_optimizer_path, map_location=torch.device('cpu')))
# Select the epoch functions
do_epoch_fn = None
if opt.arc_attn_unroll == True:
do_epoch_fn = do_epoch_ARC_unroll
else:
do_epoch_fn = do_epoch_ARC
###################################
## TRAINING ARC/CONVARC
###################################
epoch = 1
if opt.arc_resume == True or opt.arc_load is None:
try:
while epoch < opt.train_num_batches:
train_auc_epoch, train_auc_std_epoch, train_loss_epoch = arc_train.arc_train(epoch, do_epoch_fn, opt, train_loader,
discriminator, logger, optimizer=optimizer,
loss_fn=loss_fn, fcn=fcn, coAttn=coAttn)
# Reduce learning rate when a metric has stopped improving
scheduler.step(train_loss_epoch)
if epoch % opt.val_freq == 0:
val_auc_epoch, val_auc_std_epoch, val_loss_epoch, is_model_saved = arc_val.arc_val(epoch, do_epoch_fn, opt, val_loader,
discriminator, logger,
optimizer=optimizer,
loss_fn=loss_fn, fcn=fcn, coAttn=coAttn)
if is_model_saved:
print('Testing SET1')
test_loader.dataset.mode = 'generator_processor'
test_loader.dataset.remove_path_tmp_epoch(epoch)
test_auc_epoch, test_auc_std_epoch = arc_test.arc_test(epoch, do_epoch_fn, opt, test_loader, discriminator, logger)
print('Testing SET2')
test_loader2.dataset.mode = 'generator_processor'
test_loader2.dataset.remove_path_tmp_epoch(epoch)
test_auc_epoch, test_auc_std_epoch = arc_test.arc_test(epoch, do_epoch_fn, opt, test_loader2, discriminator, logger)
logger.step()
epoch += 1
print ("[%s] ... training done" % multiprocessing.current_process().name)
print ("[%s], best validation accuracy: %.2f, best validation loss: %.5f" % (
multiprocessing.current_process().name, arc_val.best_auc, arc_val.best_validation_loss))
print ("[%s] ... exiting training regime " % multiprocessing.current_process().name)
except KeyboardInterrupt:
pass
###################################
#''' UNCOMMENT!!!! TESTING NAIVE - FULLCONTEXT
# LOAD AGAIN THE FCN AND ARC models. Freezing the weights.
print ('[%s] ... Testing SET1' % multiprocessing.current_process().name)
# for the final testing set the data loader to generator
test_loader.dataset.mode = 'generator_processor'
test_loader.dataset.remove_path_tmp_epoch(epoch)
test_acc_epoch = arc_test.arc_test(epoch, do_epoch_fn, opt, test_loader, discriminator, logger)
print ('[%s] ... FINISHED! ...' % multiprocessing.current_process().name)
#'''
## Get the set2 and try
test_loader2.dataset.mode = 'generator_processor'
test_loader2.dataset.remove_path_tmp_epoch(epoch)
print ('[%s] ... Testing Set2' % multiprocessing.current_process().name)
test_acc_epoch = arc_test.arc_test(epoch, do_epoch_fn, opt, test_loader2, discriminator, logger)
print ('[%s] ... FINISHED! ...' % multiprocessing.current_process().name)
###########################################
## Now Train the NAIVE of FULL CONTEXT model
###########################################
print ('###########################################')
print ('... Starting Context Classification')
print ('###########################################')
# Set the new batchSize as in the ARC code.
opt.__dict__['batchSize'] = opt.naive_batchSize
# Add the model_fn Naive / Full Context classification
context_fn = None
if opt.naive_full_type == 'Naive':
context_fn = NaiveARC(numStates = opt.arc_numStates)
elif opt.naive_full_type == 'FullContext':
layer_sizes = opt.naive_full_layer_sizes
vector_dim = opt.arc_numStates
num_layers = opt.naive_full_num_layers
context_fn = FullContextARC(hidden_size=layer_sizes, num_layers=num_layers, vector_dim=vector_dim)
# Load the Fcn
fcn = None
if opt.apply_wrn:
# Convert the opt params to dict.
optDict = dict([(key, value) for key, value in opt._get_kwargs()])
fcn = ConvCNNFactory.createCNN(opt.wrn_name_type, optDict)
if torch.cuda.is_available():
fcn.load_state_dict(torch.load(opt.wrn_load))
else:
fcn.load_state_dict(torch.load(opt.wrn_load, map_location=torch.device('cpu')))
if opt.cuda:
fcn.cuda()
# Load the discriminator
if opt.arc_load is not None and os.path.exists(opt.arc_load):
if torch.cuda.is_available():
discriminator.load_state_dict(torch.load(opt.arc_load))
else:
discriminator.load_state_dict(torch.load(opt.arc_load, map_location=torch.device('cpu')))
if opt.cuda and discriminator is not None:
discriminator = discriminator.cuda()
# Load the Co-Attn module
coAttn = None
if opt.use_coAttn:
coAttn = CoAttn(size = opt.coAttn_size, num_filters=opt.arc_nchannels, typeActivation = opt.coAttn_type, p = opt.coAttn_p)
if opt.coattn_load is not None and os.path.exists(opt.coattn_load):
if torch.cuda.is_available():
coAttn.load_state_dict(torch.load(opt.coattn_load))
else:
coAttn.load_state_dict(torch.load(opt.coattn_load, map_location=torch.device('cpu')))
if opt.cuda:
coAttn.cuda()
# Load the Naive / Full classifier
if opt.naive_full_load_path is not None and os.path.exists(opt.naive_full_load_path):
if torch.cuda.is_available():
context_fn.load_state_dict(torch.load(opt.naive_full_load_path))
else:
context_fn.load_state_dict(torch.load(opt.naive_full_load_path, map_location=torch.device('cpu')))
if opt.cuda and context_fn is not None:
context_fn = context_fn.cuda()
# Set the epoch function
do_epoch_fn = do_epoch_naive_full
# Load the dataset
opt.setType='set1'
if opt.datasetName == 'miniImagenet':
dataLoader = miniImagenetDataLoader(type=MiniImagenetOneShot, opt=opt, fcn=fcn)
elif opt.datasetName == 'omniglot':
dataLoader = omniglotDataLoader(type=OmniglotOneShot, opt=opt, fcn=fcn, train_mean=train_mean,
train_std=train_std)
elif opt.datasetName == 'banknote':
dataLoader = banknoteDataLoader(type=FullBanknoteOneShot, opt=opt, fcn=fcn,
train_mean=train_mean, train_std=train_std)
else:
pass
# Get the params
opt = dataLoader.opt
# Use the same seed to split the train - val - test
if os.path.exists(os.path.join(opt.save, 'dataloader_rnd_seed_naive_full.npy')):
rnd_seed = np.load(os.path.join(opt.save, 'dataloader_rnd_seed_naive_full.npy'))
else:
rnd_seed = np.random.randint(0, 100000)
np.save(os.path.join(opt.save, 'dataloader_rnd_seed_naive_full.npy'), rnd_seed)
# Get the DataLoaders from train - val - test
train_loader, val_loader, test_loader = dataLoader.get(rnd_seed=rnd_seed)
# Loss
#loss_fn = torch.nn.CrossEntropyLoss()
loss_fn = torch.nn.BCELoss()
if opt.cuda:
loss_fn = loss_fn.cuda()
optimizer = torch.optim.Adam(params=context_fn.parameters(), lr=opt.naive_full_lr)
scheduler = ReduceLROnPlateau(optimizer, mode='min',
patience=opt.arc_lr_patience, verbose=True,
cooldown=opt.arc_lr_patience)
# load preexisting optimizer values if exists
if os.path.exists(opt.naive_full_optimizer_path):
if torch.cuda.is_available():
optimizer.load_state_dict(torch.load(opt.naive_full_optimizer_path))
else:
optimizer.load_state_dict(torch.load(opt.naive_full_optimizer_path, map_location=torch.device('cpu')))
###################################
## TRAINING NAIVE/FULLCONTEXT
if opt.naive_full_resume == True or opt.naive_full_load_path is None:
try:
epoch = 0
while epoch < opt.naive_full_epochs:
epoch += 1
train_acc_epoch, train_loss_epoch = context_train.context_train(epoch, do_epoch_naive_full, opt,
train_loader, discriminator, context_fn,
logger, optimizer, loss_fn, fcn, coAttn)
# Reduce learning rate when a metric has stopped improving
scheduler.step(train_loss_epoch)
if epoch % opt.naive_full_val_freq == 0:
val_acc_epoch, val_loss_epoch, is_model_saved = context_val.context_val(epoch, do_epoch_naive_full,
opt, val_loader,
discriminator, context_fn,
logger, loss_fn, fcn, coAttn)
if is_model_saved:
# Save the optimizer
torch.save(optimizer.state_dict(), opt.naive_full_optimizer_path)
# Test the model
test_acc_epoch = context_test.context_test(epoch, do_epoch_naive_full, opt, test_loader,
discriminator, context_fn, logger, fcn, coAttn)
logger.step()
print ("[%s] ... training done" % multiprocessing.current_process().name)
print ("[%s] best validation accuracy: %.2f, best validation loss: %.5f" % (
multiprocessing.current_process().name, context_val.best_accuracy, context_val.best_validation_loss))
print ("[%s] ... exiting training regime" % multiprocessing.current_process().name)
except KeyboardInterrupt:
pass
###################################
# LOAD AGAIN THE FCN AND ARC models. Freezing the weights.
print ('[%s] ... Testing' % multiprocessing.current_process().name)
test_loader.dataset.mode = 'generator_processor'
test_acc_epoch = context_test.context_test(epoch, do_epoch_fn, opt, test_loader, discriminator, context_fn, logger, fcn=fcn, coAttn=coAttn)
print ('[%s] ... FINISHED! ...' % multiprocessing.current_process().name)
def train(index = None):
# change parameters
opt = Options().parse()
#opt = Options().parse() if opt is None else opt
opt = tranform_options(index, opt)
if opt.mode == 'generator':
print('Starting generator...')
data_generation(opt)
elif opt.mode == 'generator_processor':
print('Starting generator - processor no save images...')
server_processing(opt)
else:
print('Starting processor...')
server_processing(opt)
def main():
train()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3263684
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from plot_2d_from_json import plot_community
from tqdm import tqdm
import json
import os
if __name__ == '__main__':
os.makedirs('image_data', exist_ok=True)
canvas_size = 1000
with open('ReCo_json.json', encoding='utf-8') as f:
data = json.load(f)
for community in tqdm(data):
_id = community['_id']
plot_community(community, canvas_size, printing_title=False,
building_color='black', boundary_color='red', hide_spines=True)
plt.savefig('image_data/'+str(_id)+'.jpg', dpi=150)
plt.clf()
|
StarcoderdataPython
|
3375437
|
from validation.param_check import ParamCheck
class SmartRegisterParamCheck(ParamCheck):
def __init__(self, params):
self.barcode = params['barcode'] if 'barcode' in params else None
self.order_id = params['orderId'] if 'orderId' in params else None
self.items = params['items'] if 'items' in params else None # noqa:E501
self.transaction_id = params['transactionId'] if 'transactionId' in params else None # noqa:E501
self.error_msg = []
def check_api_get_item_info(self):
self.check_barcode()
return self.error_msg
def check_api_get_order_info(self):
return self.error_msg
def check_api_put_cart_data(self):
self.check_item()
return self.error_msg
def check_api_put_linepay_request(self):
self.check_order_id()
return self.error_msg
def check_api_put_linepay_confirm(self):
self.check_transaction_id()
self.check_order_id()
return self.error_msg
def check_api_get_coupons_info(self):
return self.error_msg
def check_barcode(self):
if error := self.check_required(self.barcode, 'barcode'):
self.error_msg.append(error)
return
def check_order_id(self):
if error := self.check_required(self.order_id, 'orderId'):
self.error_msg.append(error)
return
if error := self.check_length(self.order_id, 'orderId', 1, None): # noqa:E501
self.error_msg.append(error)
def check_transaction_id(self):
if error := self.check_required(self.transaction_id, 'transactionId'):
self.error_msg.append(error)
return
if error := self.check_length(self.transaction_id, 'transactionId', 1, None): # noqa:E501
self.error_msg.append(error)
def check_item(self):
def check_item_barcode(self, barcode):
if error := self.check_required(barcode, 'barcode'):
self.error_msg.append(error)
return
def check_order_quantity(self, quantity):
if error := self.check_required(quantity, 'quantity'):
self.error_msg.append(error)
return
# itemがあるか確認→ない場合は中身のチェック無し
if error := self.check_required(self.items, 'items'):
self.error_msg.append(error)
return
# itemの中身をループでチェック
for item_single in self.items:
check_item_barcode(self,
item_single['barcode']
if 'barcode' in item_single else None)
check_order_quantity(self,
item_single['quantity']
if 'quantity' in item_single else None)
|
StarcoderdataPython
|
3358118
|
import os
import requests
from pprint import pprint
import yaml
class Playlists():
def __init__(self):
self.data = self.read_file()
self.login(self.data)
self.playlists = self.data.get('playlists')
def login(self, data):
user_id = self.data.get('server_data').get('user_id')
username = self.data.get('server_data').get('user')
password = self.data.get('server_data').get('password')
api_url = self.data.get('server_data').get('server')
self.music_library = self.data.get('server_data').get(
'music_library', 'Music')
auth_data = {
'Username': username,
'Pw': password
}
headers = {}
authorization = (
f'MediaBrowser UserId="{user_id}", '
f'Client="other", '
f'Device="computer", '
f'DeviceId="auto-playlist", '
f'Version="0.0.0"'
)
headers['x-emby-authorization'] = authorization
auth_url = f'{api_url}/Users/AuthenticateByName'
r = requests.post(auth_url, headers=headers, data=auth_data)
token = r.json().get('AccessToken')
headers['x-mediabrowser-token'] = token
self.headers = headers
self.api_url = api_url
self.user_id = user_id
def read_file(self):
path = os.path.dirname(os.path.abspath(__file__))
with open(f'{path}/config.yaml', 'r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
return data
def get_library_id(self, library_name):
r = requests.get(f'{self.api_url}/Users/{self.user_id}/Views',
headers=self.headers)
r.raise_for_status()
libraries = r.json().get('Items')
library_id = [ x.get('Id') for x in libraries
if x.get('Name') == library_name]
if library_id:
library_id = library_id[0]
return library_id
def get_all_playlists(self):
library_id = self.get_library_id('Playlists')
r = requests.get(f'{self.api_url}/Items?ParentId={library_id}'
f'&UserId={self.user_id}&IncludeItemTypes=Playlist'
f'&Recursive=true', headers=self.headers)
r.raise_for_status()
existing_playlists = r.json().get('Items')
playlists = {i.get('Name'): i.get('Id') for i in existing_playlists }
return playlists
def create_playlist(self, playlist_name):
data = {'Name': playlist_name, 'MediaType': 'Audio'}
r = requests.post(f'{self.api_url}/Playlists?UserId={self.user_id}'
f'&Name={playlist_name}', headers=self.headers,
data=data)
r.raise_for_status()
def add_single_to_playlist(self, playlist_id, item_id):
r = requests.post(f'{self.api_url}/Playlists/{playlist_id}/Items?'
f'Ids={item_id}&UserId={self.user_id}',
headers=self.headers)
r.raise_for_status()
def add_bulk_to_playlist(self, playlist_id, items):
item_ids = [ item.get('Id') for item in items ]
grouped_ids = list(generator.split_list(item_ids, 15))
index = 1
for group in grouped_ids:
item_ids = ','.join(group)
print(f'Adding group {index} to playlist')
index += 1
r = requests.post(f'{self.api_url}/Playlists/{playlist_id}/Items?'
f'Ids={item_ids}&UserId={self.user_id}',
headers=self.headers)
r.raise_for_status()
def get_playlist_id(self, playlist_name):
playlists = self.get_all_playlists()
playlist_id = playlists.get(playlist_name)
return playlist_id
def get_playlist_contents(self, playlist_name):
playlist_id = self.get_playlist_id(playlist_name)
print('start')
r = requests.get(f'{self.api_url}/Playlists/{playlist_id}/Items?'
f'UserId={self.user_id}', headers=self.headers)
r.raise_for_status()
return r.json().get('Items')
def clear_playlist(self, playlist_name):
playlist_id = self.get_playlist_id(playlist_name)
tracks = self.get_playlist_contents(playlist_name)
tracklist = [track.get('Id') for track in tracks ]
grouped_ids = list(self.split_list(tracklist, 15))
for group in grouped_ids:
item_ids = ','.join(group)
#print(item_ids)
#return item_ids
print(f'{self.api_url}/Playlists/{playlist_id}/Items?'
f'UserId={self.user_id}&EntryIds={item_ids}')#,
r = requests.delete(f'{self.api_url}/Playlists/{playlist_id}/Items?'
f'UserId={self.user_id}&EntryIds={item_ids}',
headers=self.headers)
print(r.status_code)
r.raise_for_status()
def split_list(self, item_ids, size):
for i in range(0, len(item_ids), size):
yield item_ids[i:i+size]
def get_all_tracks(self):
library_id = self.get_library_id(self.music_library)
r = requests.get(f'{self.api_url}/Users/{self.user_id}/Items?'
f'ParentId={library_id}&Recursive=true&'
f'IncludeItemTypes=Audio&'
f'Fields=Genres', headers=self.headers)
r.raise_for_status()
tracks = r.json().get('Items')
return tracks
def get_recent_tracks(self, num):
library_id = self.get_library_id(self.music_library)
r = requests.get(f'{self.api_url}/Users/{self.user_id}/Items/Latest?'
f'IncludeItemTypes=Audio&Limit={num}&Recursive=true&'
f'ParentId={library_id}', headers=self.headers)
r.raise_for_status()
return r.json()
#def process_playlists(self):
if __name__ == '__main__':
generator = Playlists()
tracks = generator.get_all_tracks()
#contents = generator.get_playlist_contents('auto-rock')
for playlist, genre in generator.playlists.items():
generator.create_playlist(playlist)
playlist_id = generator.get_playlist_id(playlist)
#print(genres.get('genres'))
for track in tracks:
#print(track.get('Name'))
#print(track.get('Genres'))
items = [ track for track in tracks if genre in track.get('Genres') ]
#any(genre in track.get('Genres') for genre in genres) ]
# #item_ids = ','.join(group)
if items:
generator.add_bulk_to_playlist(playlist_id, items)
# #print(item_ids)
# #print(len(item_ids))
# #print(self.playlists)
|
StarcoderdataPython
|
1705441
|
<filename>parametersConfig.py
to = {''}
gmail_user = {''}
gmail_pwd = {''}
|
StarcoderdataPython
|
69699
|
<gh_stars>10-100
"""A kernel manager for in-process kernels."""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import Instance, DottedObjectName, default
from jupyter_client.managerabc import KernelManagerABC
from jupyter_client.manager import KernelManager
from jupyter_client.session import Session
from .constants import INPROCESS_KEY
class InProcessKernelManager(KernelManager):
"""A manager for an in-process kernel.
This class implements the interface of
`jupyter_client.kernelmanagerabc.KernelManagerABC` and allows
(asynchronous) frontends to be used seamlessly with an in-process kernel.
See `jupyter_client.kernelmanager.KernelManager` for docstrings.
"""
# The kernel process with which the KernelManager is communicating.
kernel = Instance('yap_kernel.inprocess.ipkernel.InProcessKernel',
allow_none=True)
# the client class for KM.client() shortcut
client_class = DottedObjectName('yap_kernel.inprocess.BlockingInProcessKernelClient')
@default('blocking_class')
def _default_blocking_class(self):
from .blocking import BlockingInProcessKernelClient
return BlockingInProcessKernelClient
@default('session')
def _default_session(self):
# don't sign in-process messages
return Session(key=INPROCESS_KEY, parent=self)
#--------------------------------------------------------------------------
# Kernel management methods
#--------------------------------------------------------------------------
def start_kernel(self, **kwds):
from yap_kernel.inprocess.ipkernel import InProcessKernel
self.kernel = InProcessKernel(parent=self, session=self.session)
def shutdown_kernel(self):
self.kernel.iopub_thread.stop()
self._kill_kernel()
def restart_kernel(self, now=False, **kwds):
self.shutdown_kernel()
self.start_kernel(**kwds)
@property
def has_kernel(self):
return self.kernel is not None
def _kill_kernel(self):
self.kernel = None
def interrupt_kernel(self):
raise NotImplementedError("Cannot interrupt in-process kernel.")
def signal_kernel(self, signum):
raise NotImplementedError("Cannot signal in-process kernel.")
def is_alive(self):
return self.kernel is not None
def client(self, **kwargs):
kwargs['kernel'] = self.kernel
return super(InProcessKernelManager, self).client(**kwargs)
#-----------------------------------------------------------------------------
# ABC Registration
#-----------------------------------------------------------------------------
KernelManagerABC.register(InProcessKernelManager)
|
StarcoderdataPython
|
19042
|
<reponame>kyungjaelee/robosuite<gh_stars>0
# utility functions for manipulating MJCF XML models
import xml.etree.ElementTree as ET
import os
import numpy as np
from collections.abc import Iterable
from PIL import Image
from pathlib import Path
import robosuite
RED = [1, 0, 0, 1]
GREEN = [0, 1, 0, 1]
BLUE = [0, 0, 1, 1]
TEXTURES = {
"WoodRed": "red-wood.png",
"WoodGreen": "green-wood.png",
"WoodBlue": "blue-wood.png",
"WoodLight": "light-wood.png",
"WoodDark": "dark-wood.png",
"WoodTiles": "wood-tiles.png",
"WoodPanels": "wood-varnished-panels.png",
"WoodgrainGray": "gray-woodgrain.png",
"PlasterCream": "cream-plaster.png",
"PlasterPink": "pink-plaster.png",
"PlasterYellow": "yellow-plaster.png",
"PlasterGray": "gray-plaster.png",
"PlasterWhite": "white-plaster.png",
"BricksWhite": "white-bricks.png",
"Metal": "metal.png",
"SteelBrushed": "steel-brushed.png",
"SteelScratched": "steel-scratched.png",
"Brass": "brass-ambra.png",
"Bread": "bread.png",
"Can": "can.png",
"Ceramic": "ceramic.png",
"Cereal": "cereal.png",
"Clay": "clay.png",
"Dirt": "dirt.png",
"Glass": "glass.png",
"FeltGray": "gray-felt.png",
"Lemon": "lemon.png",
}
ALL_TEXTURES = TEXTURES.keys()
def xml_path_completion(xml_path):
"""
Takes in a local xml path and returns a full path.
if @xml_path is absolute, do nothing
if @xml_path is not absolute, load xml that is shipped by the package
Args:
xml_path (str): local xml path
Returns:
str: Full (absolute) xml path
"""
if xml_path.startswith("/"):
full_path = xml_path
else:
full_path = os.path.join(robosuite.models.assets_root, xml_path)
return full_path
def array_to_string(array):
"""
Converts a numeric array into the string format in mujoco.
Examples:
[0, 1, 2] => "0 1 2"
"""
return " ".join(["{}".format(x) for x in array])
def string_to_array(string):
"""
Converts a array string in mujoco xml to np.array.
Examples:
"0 1 2" => [0, 1, 2]
Args:
string (str): String to convert to an array
Returns:
np.array: Numerical array equivalent of @string
"""
return np.array([float(x) for x in string.split(" ")])
def set_alpha(node, alpha=0.1):
"""
Sets all a(lpha) field of the rgba attribute to be @alpha
for @node and all subnodes
used for managing display
Args:
node (ET.Element): Specific node element within XML tree
alpha (float): Value to set alpha value of rgba tuple
"""
for child_node in node.findall(".//*[@rgba]"):
rgba_orig = string_to_array(child_node.get("rgba"))
child_node.set("rgba", array_to_string(list(rgba_orig[0:3]) + [alpha]))
def new_joint(**kwargs):
"""
Creates a joint tag with attributes specified by @**kwargs.
Args:
**kwargs (dict): Specified attributes for the new joint
Returns:
ET.Element: new joint xml element
"""
element = ET.Element("joint", attrib=kwargs)
return element
def new_actuator(joint, act_type="actuator", **kwargs):
"""
Creates an actuator tag with attributes specified by @**kwargs.
Args:
joint (str): type of actuator transmission.
see all types here: http://mujoco.org/book/modeling.html#actuator
act_type (str): actuator type. Defaults to "actuator"
**kwargs (dict): Any additional specified attributes for the new joint
Returns:
ET.Element: new actuator xml element
"""
element = ET.Element(act_type, attrib=kwargs)
element.set("joint", joint)
return element
def new_site(name, rgba=RED, pos=(0, 0, 0), size=(0.005,), **kwargs):
"""
Creates a site element with attributes specified by @**kwargs.
NOTE: With the exception of @name, @pos, and @size, if any arg is set to
None, the value will automatically be popped before passing the values
to create the appropriate XML
Args:
name (str): site name.
rgba (4-array): (r,g,b,a) color and transparency. Defaults to solid red.
pos (3-array): (x,y,z) 3d position of the site.
size (array of float): site size (sites are spherical by default).
**kwargs (dict): Any additional specified attributes for the new site
Returns:
ET.Element: new site xml element
"""
kwargs["name"] = name
kwargs["pos"] = array_to_string(pos)
kwargs["size"] = array_to_string(size)
kwargs["rgba"] = array_to_string(rgba) if rgba is not None else None
# Loop through all remaining attributes and pop any that are None
for k, v in kwargs.copy().items():
if v is None:
kwargs.pop(k)
element = ET.Element("site", attrib=kwargs)
return element
def new_geom(geom_type, size, pos=(0, 0, 0), rgba=RED, group=0, **kwargs):
"""
Creates a geom element with attributes specified by @**kwargs.
NOTE: With the exception of @geom_type, @size, and @pos, if any arg is set to
None, the value will automatically be popped before passing the values
to create the appropriate XML
Args:
geom_type (str): type of the geom.
see all types here: http://mujoco.org/book/modeling.html#geom
size (array of float): geom size parameters.
pos (3-array): (x,y,z) 3d position of the site.
rgba (4-array): (r,g,b,a) color and transparency. Defaults to solid red.
group (int): the integrer group that the geom belongs to. useful for
separating visual and physical elements.
**kwargs (dict): Any additional specified attributes for the new geom
Returns:
ET.Element: new geom xml element
"""
kwargs["type"] = str(geom_type)
kwargs["size"] = array_to_string(size)
kwargs["pos"] = array_to_string(pos)
kwargs["rgba"] = array_to_string(rgba) if rgba is not None else None
kwargs["group"] = str(group) if group is not None else None
# Loop through all remaining attributes and pop any that are None
for k, v in kwargs.copy().items():
if v is None:
kwargs.pop(k)
element = ET.Element("geom", attrib=kwargs)
return element
def new_body(name=None, pos=None, **kwargs):
"""
Creates a body element with attributes specified by @**kwargs.
Args:
name (str): body name.
pos (3-array): (x,y,z) 3d position of the body frame.
**kwargs (dict): Any additional specified attributes for the new body
Returns:
ET.Element: new body xml element
"""
if name is not None:
kwargs["name"] = name
if pos is not None:
kwargs["pos"] = array_to_string(pos)
element = ET.Element("body", attrib=kwargs)
return element
def new_inertial(name=None, pos=(0, 0, 0), mass=None, **kwargs):
"""
Creates a inertial element with attributes specified by @**kwargs.
Args:
name (str): [NOT USED]
pos (3-array): (x,y,z) 3d position of the inertial frame.
mass (float): The mass of inertial
**kwargs (dict): Any additional specified attributes for the new inertial element
Returns:
ET.Element: new inertial xml element
"""
if mass is not None:
kwargs["mass"] = str(mass)
kwargs["pos"] = array_to_string(pos)
element = ET.Element("inertial", attrib=kwargs)
return element
def postprocess_model_xml(xml_str):
"""
This function postprocesses the model.xml collected from a MuJoCo demonstration
in order to make sure that the STL files can be found.
Args:
xml_str (str): Mujoco sim demonstration XML file as string
Returns:
str: Post-processed xml file as string
"""
path = os.path.split(robosuite.__file__)[0]
path_split = path.split("/")
# replace mesh and texture file paths
tree = ET.fromstring(xml_str)
root = tree
asset = root.find("asset")
meshes = asset.findall("mesh")
textures = asset.findall("texture")
all_elements = meshes + textures
for elem in all_elements:
old_path = elem.get("file")
if old_path is None:
continue
old_path_split = old_path.split("/")
ind = max(
loc for loc, val in enumerate(old_path_split) if val == "robosuite"
) # last occurrence index
new_path_split = path_split + old_path_split[ind + 1 :]
new_path = "/".join(new_path_split)
elem.set("file", new_path)
return ET.tostring(root, encoding="utf8").decode("utf8")
class CustomMaterial(object):
"""
Simple class to instantiate the necessary parameters to define an appropriate texture / material combo
Instantiates a nested dict holding necessary components for procedurally generating a texture / material combo
Please see http://www.mujoco.org/book/XMLreference.html#asset for specific details on
attributes expected for Mujoco texture / material tags, respectively
Note that the values in @tex_attrib and @mat_attrib can be in string or array / numerical form.
Args:
texture (str or 4-array): Name of texture file to be imported. If a string, should be part of ALL_TEXTURES
If texture is a 4-array, then this argument will be interpreted as an rgba tuple value and a template
png will be procedurally generated during object instantiation, with any additional
texture / material attributes specified.
Note the RGBA values are expected to be floats between 0 and 1
tex_name (str): Name to reference the imported texture
mat_name (str): Name to reference the imported material
tex_attrib (dict): Any other optional mujoco texture specifications.
mat_attrib (dict): Any other optional mujoco material specifications.
Raises:
AssertionError: [Invalid texture]
"""
def __init__(
self,
texture,
tex_name,
mat_name,
tex_attrib=None,
mat_attrib=None,
):
# Check if the desired texture is an rgba value
if type(texture) is str:
default = False
# Verify that requested texture is valid
assert texture in ALL_TEXTURES, "Error: Requested invalid texture. Got {}. Valid options are:\n{}".format(
texture, ALL_TEXTURES)
else:
default = True
# This is an rgba value and a default texture is desired; make sure length of rgba array is 4
assert len(texture) == 4, "Error: Requested default texture. Got array of length {}. Expected rgba array " \
"of length 4.".format(len(texture))
# Setup the texture and material attributes
self.tex_attrib = {} if tex_attrib is None else tex_attrib.copy()
self.mat_attrib = {} if mat_attrib is None else mat_attrib.copy()
# Add in name values
self.tex_attrib["name"] = tex_name
self.mat_attrib["name"] = mat_name
self.mat_attrib["texture"] = tex_name
# Loop through all attributes and convert all non-string values into strings
for attrib in (self.tex_attrib, self.mat_attrib):
for k, v in attrib.items():
if type(v) is not str:
if isinstance(v, Iterable):
attrib[k] = array_to_string(v)
else:
attrib[k] = str(v)
# Handle default and non-default cases separately for linking texture patch file locations
if not default:
# Add in the filepath to texture patch
self.tex_attrib["file"] = xml_path_completion("textures/" + TEXTURES[texture])
else:
# Create a texture patch
tex = Image.new('RGBA', (100, 100), tuple((np.array(texture)*255).astype('int')))
# Create temp directory if it does not exist
save_dir = "/tmp/robosuite_temp_tex"
Path(save_dir).mkdir(parents=True, exist_ok=True)
# Save this texture patch to the temp directory on disk (MacOS / Linux)
fpath = save_dir + "/{}.png".format(tex_name)
tex.save(fpath, "PNG")
# Link this texture file to the default texture dict
self.tex_attrib["file"] = fpath
|
StarcoderdataPython
|
3253323
|
"""Functions for the Error-replacing Portion
Functions that are responsible for the process of generating possible corrections, swapping out mask tokens
with the corrections, and returning the new sentence list.
"""
import error_detection_functions as edf
import torch
def initialize_suggestion_list(suggestion_num):
"""Function for Initializing suggestion_list
Simply creates an empty list of size "suggestion_num" and returns it
"""
y = 0
suggestion_list = []
while y < suggestion_num:
suggestion_list.append("")
y += 1
return suggestion_list
def section_decision(indexer, sentence, offset_list, end_matches):
"""Decides what section mask token is in
Returns a specific section of the sentence given which error is being worked on by splitting the text
at the offset given in the offset_list.
"""
if indexer == 0:
# First section:
mask_str = sentence[:offset_list[indexer]]
elif indexer == (len(end_matches) - 1):
# The last section:
mask_str = sentence[offset_list[indexer - 1]:]
else:
# Any middle sections:
mask_str = sentence[offset_list[indexer - 1]:offset_list[indexer]]
return mask_str
def get_top_tokens(sequence_switched, indexer, tokenizer, model, suggestion_num):
"""Generates the suggestions for the error"""
token_input = tokenizer.encode(sequence_switched, return_tensors="pt")
mask_token_index = torch.where(token_input == tokenizer.mask_token_id)[1]
token_logits = model(token_input).logits
mask_token_logits = token_logits[0, mask_token_index, :]
top_tokens = torch.topk(mask_token_logits, suggestion_num, dim=1).indices[indexer].tolist()
return top_tokens
def replace_tokens(the_string, sequence_switched, suggestion_list, indexer, tokenizer, model, suggestion_num):
"""Replaces tokens with suggestions stored in the suggestion list"""
# Get top tokens
top_tokens = get_top_tokens(sequence_switched, indexer, tokenizer, model, suggestion_num)
iterator = 0
for token in top_tokens:
suggestion_list[iterator] += the_string.replace(tokenizer.mask_token, tokenizer.decode([token]))
iterator += 1
return suggestion_list
def replace_errors(suggestion_num, sequence_switched, end_matches, offset_list):
"""Main Function for Error-replacing Process
Loops through the process to replace all errors in the sentence. Returns string list containing the new
sentences with possible corrections.
"""
if len(end_matches) != 0:
# Initialize Variables
new_list = initialize_suggestion_list(suggestion_num)
tokenizer, model = edf.initialize_tokenizer_variables()
# Swapping masks with corrections section
if len(end_matches) > 1:
k = 0
j = 0
current = 0
keep_going = True
while keep_going:
# Determine what section to locate mask token in
mask_str = section_decision(j, sequence_switched, offset_list, end_matches)
new_list = replace_tokens(mask_str, sequence_switched, new_list, j, tokenizer, model, suggestion_num)
# If end of sentence, return j to 0, else continue iterations
if j == (len(end_matches) - 1):
# Update variables
j = 0
current += 1
keep_going = False
else:
j += 1
k += 1
else:
new_list = replace_tokens(sequence_switched, sequence_switched, new_list, 0, tokenizer, model,
suggestion_num)
return new_list
else:
return []
|
StarcoderdataPython
|
69035
|
<reponame>jozhang97/mmsegmentation
_base_ = [
'../_base_/models/upernet_swin.py', '../_base_/datasets/uvo_finetune.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
pretrained='PATH/TO/YOUR/swin_large_patch4_window12_384_22k.pth',
backbone=dict(
pretrain_img_size=384,
embed_dims=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
drop_path_rate=0.2,
window_size=12),
decode_head=dict(
in_channels=[192, 384, 768, 1536],
num_classes=2,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
in_channels=768,
num_classes=2,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
)
# AdamW optimizer, no weight decay for position embedding & layer norm
# in backbone
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.00006 / 10,
betas=(0.9, 0.999),
weight_decay=0.01,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(
_delete_=True,
policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=5e-7,
power=1.0,
min_lr=0.0,
by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
)
load_from = '/tmp-network/user/ydu/mmsegmentation/work_dirs/biggest_model_clean_w_jitter/iter_300000.pth'
runner = dict(type='IterBasedRunner', max_iters=100000)
checkpoint_config = dict(by_epoch=False, interval=5000)
evaluation = dict(interval=5000, metric='mIoU', pre_eval=True)
|
StarcoderdataPython
|
4804247
|
<gh_stars>10-100
from sc2maptool import main
from sc2maptool import functions as f
from sc2maptool import index
from sc2maptool import selectMap
from sc2maptool import standardizeMapName
from sc2maptool.mapRecord import MapRecord
def test_main():
import sys
sys.argv = ['', "--mapname=MechDepot", "--list", "--season=3"]
main()
sys.argv = ['', "--mapname=MechDepot", "--details"]
main()
sys.argv = ['', "--mapname=zx", "--details"]
main()
sys.argv = ['', "--mapname=zx"]
main()
sys.argv = ['', "--mapname=a", "--best"]
main()
sys.argv = ['', "--mapname=zz", "--list"]
main()
sys.argv = ['', "--mapname=a", "--best", "--path"]
main()
from sc2maptool import __main__
def test_index():
idx1 = index.getIndex()
idx2 = index.getIndex()
newCache = index.IndexCache()
assert len(index.c.INCLUDED_KEYS) == 9
x = index.c.InvalidMapSelection()
try:
raise index.c.InvalidMapSelection("test")
assert False
except:
assert True
assert isinstance(index.c.MAPS_FOLDER, str)
assert isinstance(index.c.SC2_MAP_EXT, str)
def test_simple():
for m in selectMap(name="oi", season=1, excludeName=True, closestMatch=False):
assert isinstance(m, MapRecord)
assert isinstance(m.rawData, bytes) # requires real paths
def test_filter_map():
r1 = f.filterMapAttrs(ladder=False)
r2 = f.filterMapAttrs(r1) # no tags means return param as is
assert r1 == r2
def test_match_attrs():
boardwalk = selectMap("boardwalk")
assert False == f.matchRecordAttrs(boardwalk, {"asdjfd":True})
assert True == f.matchRecordAttrs(boardwalk, {"asdjfd":None})
assert True == f.matchRecordAttrs(boardwalk, {"asdjfd":""})
assert True == f.matchRecordAttrs(boardwalk, {"asdjfd":0})
assert True == f.matchRecordAttrs(boardwalk, {"asdjfd":False})
assert False == f.matchRecordAttrs(boardwalk, {"year":2016})
assert True == f.matchRecordAttrs(boardwalk, {"year":2017})
assert True == f.matchRecordAttrs(boardwalk, {})
def test_filter_map_names():
r = f.filterMapNames("water", closestMatch=False)
assert len(r) == 1
assert r[0].name == "Backwater"
r = f.filterMapNames("w[ea][rt]", closestMatch=False)
rNames = {m.name for m in r}
display_test(rNames, len(rNames), 3)
assert "Backwater" in rNames
assert "Eastwatch" in rNames
assert "FlowerFields" in rNames
rNames = {m.name for m in f.filterMapNames("at", closestMatch=True)}
display_test(rNames, len(rNames), 4)
assert "Flat32" in rNames
assert "Flat48" in rNames
assert "Flat64" in rNames
assert "Flat96" in rNames
rNames = {m.name for m in f.filterMapNames("[rst]e", closestMatch=True)}
display_test(rNames, len(rNames), 3)
assert "Acolyte" in rNames
assert "Odyssey" in rNames
assert "RedCity" in rNames
rNames = {m.name for m in f.filterMapNames("bi", closestMatch=True)}
display_test(rNames, len(rNames), 1)
assert "16Bit" in rNames
rNames = {m.name for m in f.filterMapNames("[amoqy6]", excludeRegex=True, closestMatch=False)}
display_test(rNames, len(rNames), 4)
assert "BelShirVestige" in rNames
assert "Redshift" in rNames
assert "Blueshift" in rNames
assert "NewkirkPrecinct" in rNames
rNames = {m.name for m in f.filterMapNames("[toi]", excludeRegex=True, closestMatch=True)}
display_test(rNames, len(rNames), 1)
assert "Sequencer" in rNames
def test_map_record():
x = MapRecord("test", "testpath", ["ftrue", "fval123bc", "fint12"])
assert x.display() == None
assert type(str(x)) == str
assert type(repr(x)) == str
assert x.ftrue == True
assert x.fval == "123bc"
assert x.fint == 12
assert len(x.attrs) == 4
def test_map_selection():
"""verify the functionality of selectMap()"""
for i in range(100): # without any params, a single map should always selected
assert isinstance(selectMap(), MapRecord)
casesInclusion = [
# INPUT TEST CASE EXPECTED RESULT
("zerg", ["DefeatZerglingsAndBanelings", "FindAndDefeatZerglings"]),
("AndBane", ["DefeatZerglingsAndBanelings"]),
("e[ar].*r$", ["Dreamcatcher", "Interloper"]), #accepts regular expression
("^b.*a.*k$", ["BattleOnTheBoardwalk", "Blackpink", "Blackpink"]),
("q", ["Sequencer", "NeonVioletSquare", "NeonVioletSquare"]), # identifies multiple results for their unique paths
("^x", Exception), # none of the maps start with an 'x'
("^abi", ["Abiogenesis"]),
("[ui][rt]e$", ["Fracture", "Parasite"]),
(".{1}[^o]nt", ["AcidPlant", "AcidPlant", "AcidPlant", "LastRemnant", "LastRemnant"]),
]
casesExclusion = [
# INPUT TEST CASE EXPECTED RESULT
("[\w]", Exception), # if ignoring all valid chars, error!
("[aeiou]", Exception), # all maps have a vowel
("[diyt]", ["Sequencer", "CeruleanFall"]), # all maps without an a, i or y
("[qtyhdjm1649]", ["Abiogenesis", "Blackpink", "Blackpink", "CeruleanFall"]),
("^[^u]", ["Ujari", "Ujari"]),
("[acdeghjklmny]", ["Frost", "16Bit"]),
]
def iterCases(cases, exclusion):
for thisInput, thisExpect in cases:
try: mapResults = selectMap(name=thisInput, excludeName=exclusion, closestMatch=False)
except:
display_test(thisInput, Exception, thisExpect)
continue
print(">>", mapResults)
for m in mapResults:
display_test("%s in %s"%(m.name, thisExpect), m.name in thisExpect, True)
display_test(thisInput, len(mapResults), len(thisExpect))
iterCases(casesInclusion, False)
iterCases(casesExclusion, True)
newMap = selectMap(year=2018, season=1) # get exactly one map
assert not isinstance(newMap, list)
try: assert selectMap(year=1970) and False
except: assert True # bad attrs causes an exception
try: assert selectMap("z", mode="1v1", year=2018, season=2) and False
except: assert True # force attribute selection AND bad name filtering resuling in no matches
try: assert selectMap("[\w]", excludeName="ujari", closestMatch=True, ladder=True) and False
except: assert True
def test_names():
"""verify the functionality of standardizeMapName()"""
cases = [
# INPUT TEST CASE EXPECTED RESULT WHAT IS TESTED
(None, Exception ), # bad input
("abcd/12", "12" ), # ignores path
("ksNmsQpLMdiMs", "ksNmsQpLMdiMs" ), # maintains case
("test123.SC2Map", "test123" ), # ignores extension
("AbiogenesisLE.SC2Map", "Abiogenesis" ), # ignores name descriptors
("/2017/s3/AcolyteLE.SC2Map", "Acolyte" ), # tests everything
]
for thisInput, thisExpect in cases:
try: thisResult = standardizeMapName(thisInput)
except: thisResult = Exception
display_test(thisInput, thisResult, thisExpect)
def display_test(testIn, testOut, testExpect):
"""display test condition and its result, then assert the result"""
print("%s%s => %s == %s"%(testIn, " "*max(0, 30-len(str(testIn))), testOut, testExpect))
assert testExpect == testOut
|
StarcoderdataPython
|
178205
|
<reponame>hodgka/super_res_demo
"""Settings configuration - Configuration for environment variables can go in here."""
import os
from keras.models import load_model
import tensorflow as tf
from dotenv import load_dotenv
load_dotenv()
ENV = os.getenv('FLASK_ENV', default='production')
DEBUG = ENV == 'development'
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL')
SECRET_KEY = os.getenv('SECRET_KEY', default='octocat')
GITHUB_CLIENT_ID = os.getenv('GITHUB_CLIENT_ID')
GITHUB_CLIENT_SECRET = os.getenv('GITHUB_CLIENT_SECRET')
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOAD_FOLDER = os.getenv('UPLOAD_FOLDER', default=os.path.join(os.getcwd(), 'uploads'))
OUTPUT_FOLDER = os.getenv('OUTPUT_FOLDER', default=os.path.join(os.getcwd(), 'outputs'))
ALLOWED_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpeg', 'gif'])
CHAR_SET = list(set('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'))
IMAGE_LABELS = ['Headphone', 'Mouse', 'Camera', 'Smartphone', 'Glasses', 'Shoes', 'Watch', 'Laptop']
MODEL_PATH = os.getenv('MODEL_PATH', default="ml_modules/tutorial/SouqNet128v2_gpu.h5")
MODEL = load_model(MODEL_PATH)
global graph
graph = tf.get_default_graph()
|
StarcoderdataPython
|
3294914
|
<filename>settings/staging.py
MONGODB_DATABASES = {
'name': 'mongo',
'host': 'localhost',
'port': 27017,
'password': '<PASSWORD>',
'username': 'root',
'database': 'toplist',
'tz_aware': True
}
|
StarcoderdataPython
|
1663566
|
<reponame>deokershesh/IfxPy
#
#
#
import unittest, sys
import IfxPy
import config
from testfunctions import IfxPyTestFunctions
class IfxPyTestCase(unittest.TestCase):
def test_231_FieldTypeName(self):
obj = IfxPyTestFunctions()
obj.assert_expect(self.run_test_231)
def run_test_231(self):
conn = IfxPy.connect(config.ConnStr, config.user, config.password)
result = IfxPy.exec_immediate(conn, "select * from sales")
result2 = IfxPy.exec_immediate(conn, "select * from staff")
result3 = IfxPy.exec_immediate(conn, "select * from emp_photo")
for i in range(0, IfxPy.num_fields(result) + 1):
print str(i) + ":" + str(IfxPy.field_type(result,IfxPy.field_name(result,i)))
print "\n-----"
for i in range(0, IfxPy.num_fields(result2)):
print str(i) + ":" + IfxPy.field_type(result2,IfxPy.field_name(result2,i))
print "\n-----"
for i in range(0, 3):
print str(i) + ":" + IfxPy.field_type(result3,IfxPy.field_name(result3,i))
print "\n-----"
print "region:%s" % IfxPy.field_type(result,'region')
print "5:%s" % IfxPy.field_type(result2,5)
#__END__
#__LUW_EXPECTED__
#0:date
#1:string
#2:string
#3:int
#4:False
#
#-----
#0:int
#1:string
#2:int
#3:string
#4:int
#5:decimal
#6:decimal
#
#-----
#0:string
#1:string
#2:blob
#
#-----
#region:False
#5:decimal
#__ZOS_EXPECTED__
#0:date
#1:string
#2:string
#3:int
#4:False
#
#-----
#0:int
#1:string
#2:int
#3:string
#4:int
#5:decimal
#6:decimal
#
#-----
#0:string
#1:string
#2:blob
#
#-----
#region:False
#5:decimal
#__SYSTEMI_EXPECTED__
#0:date
#1:string
#2:string
#3:int
#4:False
#
#-----
#0:int
#1:string
#2:int
#3:string
#4:int
#5:decimal
#6:decimal
#
#-----
#0:string
#1:string
#2:blob
#
#-----
#region:False
#5:decimal
#__IDS_EXPECTED__
#0:date
#1:string
#2:string
#3:int
#4:False
#
#-----
#0:int
#1:string
#2:int
#3:string
#4:int
#5:decimal
#6:decimal
#
#-----
#0:string
#1:string
#2:string
#
#-----
#region:string
#5:decimal
|
StarcoderdataPython
|
110250
|
import logging
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from web.lib.base import BaseController, render
log = logging.getLogger(__name__)
class FormtestController(BaseController):
def index(self):
# Return a rendered template
#return render('/formtest.mako')
# or, return a string
return 'Hello World'
def form(self):
return render('/form.mako')
def email(self):
return 'Your email is: %s' % request.params['email']
|
StarcoderdataPython
|
186519
|
<filename>luffy/models/layers/__init__.py
from .activation import *
from .attention import *
from .mlp import *
from .transformer import *
|
StarcoderdataPython
|
3367671
|
<filename>light.py
from light_modes import *
import win32com.client
import config
import light_board
auraSdk = win32com.client.Dispatch("aura.sdk.1")
auraSdk.SwitchMode()
board = light_board.light_board(auraSdk)
def update_light():
if config.MODE == config.MODES.RANDOM:
light_random.light_random(board)
elif config.MODE == config.MODES.SCREEN:
light_screen.light_screen(board)
elif config.MODE == config.MODES.SCREEN_RAINBOW_HORIZONTAL:
light_screen_rainbow_horizontal.light_screen_rainbow_horizontal(board)
|
StarcoderdataPython
|
3309597
|
<filename>vies/models.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.db.models import CharField
from django.utils.translation import ugettext_lazy as _
from . import VATIN_MAX_LENGTH, forms
class VATINField(CharField):
"""
Database field for European VIES VAT Identification Number.
This field stores and validates VATINs.
Example::
class MyModel(models.Model):
vat = VATINField(_('EU VAT ID'))
"""
description = _("A VIES VAT field.")
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', VATIN_MAX_LENGTH)
super(VATINField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
kwargs.setdefault('form_class', forms.VATINField)
return super(VATINField, self).formfield(**kwargs)
|
StarcoderdataPython
|
44837
|
<filename>marketplace/migrations/0003_auto_20171106_1454.py
# Generated by Django 2.0b1 on 2017-11-06 14:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0002_auto_20171106_1452'),
]
operations = [
migrations.AlterField(
model_name='purchase',
name='team',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='marketplace.Team'),
),
]
|
StarcoderdataPython
|
3267965
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Live Plotting of Received LSL Time Series.
#
# - <NAME>, 2016
###############################################################################
#
# license info here...
#
###############################################################################
# Lab Streaming Layer Import
#############################################
import sys; sys.path.append('liblsl-Python') # help python find pylsl relative to this example program
from pylsl import StreamInlet, resolve_stream, StreamInfo, StreamOutlet, local_clock
# Standard Anaconda:
#############################################
# import csv
import datetime
import matplotlib
import matplotlib.pyplot as plt
# import matplotlib.animation as anim
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
# from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
import matplotlib.dates as pldates
import matplotlib.ticker as plticker
import multiprocessing
import numpy as np
import os
# import os.path
import pandas as pd
import psutil
# import socket
# import struct
import sys
import threading
import time
import Tkinter as tk
# import tkFileDialog as filedialog
# import tkMessageBox
# Custom file dependencies:
#############################################
# import Decoding
# from ConfigGUI import ConfigGUI
# # NOTE: ConfigGUI() creates another file called Config.py, which it imports
# # and reloads a couple of times. Some python interpretters may therefore
# # throw errors the first couple of times this script is run, until
# # Config.py has all its variables settled, especially if the OS is
# # running slowly and takes a long time to write that file. Sometimes it
# # is necessary to restart the inerpretter, or to open and re-save
# # Config.py.
# from RPeakDetectLive import detect_beats
# For live plotting in Tkinter, ensure matplotlib plots in Tkinter:
#############################################
matplotlib.use('TkAgg')
"""
###############################################################################
# Functions ##### # # ## # ####
# # # # # # # #
# #### # # # # ###
# # # # # ## #
# # # # # # ####
###############################################################################
"""
# FUNCTION: Update lWindow
#############################################
def listCycle(x, list):
"""Append x to list and maintain length.
Returns updated list."""
list.append(x) # append new value
# list.pop(0) # get rid of first value (to maintain size)
del list[0] # get rid of first value (to maintain size)
return list
#############################################
"""
############################################################################
# Classes #### # # #### #### ##### ####
# # # # # # # # # #
# # # # # ### ### ##### ###
# # # # ##### # # # #
# #### ##### # # #### #### ##### ####
############################################################################
"""
# CLASS: Multiprocess plotting
#############################################
class livePlotting(threading.Thread):
"""Process in which to analyse and plot the incoming ECG.
args:
drawQ = the data queue to be drawn
plot_rate = rate (in Hz) at which to attempt to redraw.
rate = sampling rate
liveWindowTime = window width in seconds"""
def __init__(self, type, plot_rate, rate, liveWindowTime, internalTime,
thickness, col, negative, inverted, scale, offset, valueText):
self.type = type
self.rate = rate
self.internalTime = internalTime
self.INTERVAL = 1.0 / rate
self.liveWindowTime = liveWindowTime
self.thickness = thickness
self.col = col
self.negative = negative
self.inverted = inverted
self.scale = scale
self.offset = offset
self.valueText = valueText
self.refresh = int(1000 / plot_rate) # Hz to milliseconds
self.liveWindow = int(liveWindowTime * rate)
self.startTime = 0.0
self.internalT = 0.0
self.prevT = 0
# Persistent main data plots:
self.ECGsample = []
self.ECGtime = []
self.ECG = pd.Series()
# Start:
threading.Thread.__init__(self)
self.start()
def updateplot(self):
peak_i, peak_y, RRIntervals, HR = [],[],[],[]
peak_t = []
""" Pull LSL Data """
try:
LSLSample, LSLtimestamp = self.peakInlet.pull_chunk()
except WindowsError:
# If there's an access error, try again...
print "WindowsError. Retrying..."
self.window.after(self.refresh, self.updateplot)
return # just in case
# print "sample:", LSLSample, "\n time", LSLtimestamp
""" Convert list of lists to simple list and append to existing data """
sample=[]
# Datapoints come as one-item lists (since only one channel on this stream)
for item in LSLSample:
# For a multi-channel stream, use item[n] for the nth channel
if self.negative:
if self.inverted:
sample += [-self.scale/(item[0] + self.offset)]
else:
sample += [-self.scale*(item[0] + self.offset)]
else:
if self.inverted:
sample += [self.scale/(item[0] + self.offset)]
else:
sample += [self.scale*(item[0] + self.offset)]
# print 'sample:', sample
""" Set time internally """
if self.internalTime:
time = []
if LSLtimestamp:
for n in xrange(len(LSLtimestamp)):
# update 'clock'
self.prevT += self.INTERVAL
# add to index list
time += [self.prevT]
# print 'time:', time
else:
""" Set time by received """
# Set start time as first received time:
if self.startTime == 0.0:
# (i.e. Do this each time until a startTime has been assigned)
if not (LSLtimestamp == []): # wait for first timestamp
# set startTime as first timestamp
self.startTime = float(LSLtimestamp[0])
# Start from zero:
time = [(x - self.startTime) for x in LSLtimestamp]
""" Trim by time """
# if len(time) > 1:
# print [time[0], time[-1]]
# Cast new data into pandas Series and append to existing:
self.ECG = self.ECG.append(pd.Series(sample, time))
# Trim to the window duration back from the most recent point.
if self.ECG.size > 1:
if (self.ECG.index[-1] - self.ECG.index[0]) > self.liveWindowTime:
self.ECG = self.ECG[self.ECG.index[-1] - self.liveWindowTime:]
""" Plot """
# line:
self.line.set_ydata(self.ECG)
self.line.set_xdata(self.ECG.index)
# Ticks by Locator:
loc = plticker.MultipleLocator(np.ceil(self.liveWindowTime/5.0))
self.ax.xaxis.set_major_locator(loc)
self.ax.set_xticklabels([str(int(x)) for x in self.ax.get_xticks()])
# "{:10.4f}".format(x - self.startTime)
""" Rescale """
# Recompute the data limits and update the view:
self.ax.relim()
self.ax.autoscale()
if list(self.ECG.index):
self.ax.set_xlim([self.ECG.index[0], self.ECG.index[0] + self.liveWindowTime])
# self.ax.set_xlim([self.ECG.index[0], self.ECG.index[-1]])
# print [self.ECG.index[0], self.ECG.index[0] + self.liveWindowTime]
# Label
if (self.valueText != "") and list(self.ECG):
# If there *is* a value to be printed:
self.label.set_text("{lab}: {num:d}".format(lab = self.valueText,
num = int(self.ECG.iloc[-1])
)
)
""" Redraw: """
self.ax.draw_artist(self.ax)
self.ax.draw_artist(self.line)
# self.ax.draw_artist(self.peak)
# self.ax.set_xlim(loc_t[0], loc_t[-1])
self.canvas.draw()
self.canvas.flush_events() # Common practice...? :/
self.window.after(self.refresh, self.updateplot)
# else:
# # self.window.after(self.refresh, self.updateplot)
# pass
# self.window.after(self.refresh, self.updateplot)
def run(self):
# Create a window
self.window = tk.Tk(className='\n' + self.type + ' Plot')
self.window.protocol("WM_DELETE_WINDOW", self.terminate)
self.window.configure(bg='black')
self.window.tk_setPalette(background='#282820',
foreground='black',
activeBackground='black',
activeForeground='#282820')
self.fig, self.ax = plt.subplots(1, 1)
self.fig.set_facecolor('#282820')
self.canvas = FigureCanvasTkAgg(self.fig, master=self.window)
self.canvas.get_tk_widget().pack(side='top', fill='both', expand=1)
self.canvas._tkcanvas.pack(side='top', fill='both', expand=1)
# Beautify:
self.ax.spines["top"].set_visible(False)
self.ax.spines["bottom"].set_visible(False)
self.ax.spines["right"].set_visible(False)
self.ax.spines["left"].set_visible(False)
self.ax.set_axis_bgcolor('black')
self.ax.set_xlabel("Seconds", fontsize=14, color='white', alpha=0.3)
self.ax.set_xlim(0, self.liveWindowTime)
# self.ax.set_ylim(-1.5, 2.5)
xlist = range(0, int(self.liveWindowTime + 1))
self.ax.set_xticks(xlist)
self.ax.set_xticklabels([str(x) for x in xlist],
fontsize=14, color='white')
self.ax.set_autoscaley_on(True)
self.ax.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off",
labelleft="on", labelsize=14, labelcolor='white')
self.canvas.show()
# Initial Data:
#####################
self.t = np.linspace(0, self.liveWindow * self.INTERVAL,
self.liveWindow)
# # Centreline:
# self.ax.plot(self.t, [0, ] * len(self.t),
# color='white', alpha=0.3, linewidth=2.0)
# Set colour
colDict = {'gold':'#b0c050', 'red':'red', 'blue':'skyblue', 'gray':'dimgray',
'grey':'dimgray', 'silver':'lightgrey'}
try:
tempCol = colDict[self.col]
except:
tempCol = colDict['gold']
# Initialise line
self.line, = self.ax.plot(self.t, [0, ] * len(self.t),
color=tempCol,
linewidth=self.thickness, zorder=0)
# Text display (top right corner):
if self.valueText != "":
self.label = self.ax.text(0.98, 0.97, # Coordinates (percentage)
"", fontweight='bold',
verticalalignment='top',
horizontalalignment='right',
transform=self.ax.transAxes,
fontsize=14, color=tempCol)
# LSL:
# first resolve a stream on the lab network
print"looking for a(n)", self.type, "stream..."
peakStream = resolve_stream('type', self.type)
print"found a(n)", self.type, "stream..."
# create a new inlet to read from the stream
self.peakInlet = StreamInlet(peakStream[0])
# Run the animation:
self.updateplot()
# Maintain the window even when not updated:
self.window.mainloop()
self.terminate()
return
def terminate(self):
count = 1
hostProc = psutil.Process(os.getpid())
# print "hostProc:", hostProc
subProcs = hostProc.children(recursive=True)
for subProc in subProcs:
count += 1
# print "subProc:", subProc
subProc.kill()
psutil.wait_procs(subProcs, timeout=5)
print "Terminated %i window process(es)." % count
# print "Window terminated."
sys.stdout.flush()
hostProc.kill()
hostProc.wait(5)
pass
#############################################
"""
###########################################################################
# Main # # # ### ## #
# ## ## # # # # # #
# # # # # # # # # # #
# # # # ##### # # ##
# # # # # ### # #
###########################################################################
"""
if __name__ == "__main__":
# BPM:
plot_rate = 15
rate = 500 # ignored if no internalTime
windowTime = 60 # seconds
type = 'RR'
internalTime = False # use internally generated timestamps rather than received ones
thickness = 5
col = 'red' # gold, red, blue, gray/grey, or silver
negative = False
inverted = True # Take the inverse
scale = 60000.0 # <float> Multiplies y-values by this (after inverse)
offset = 32768 # add to reading (before inversion or scaling)
valueText = "BPM" # Text, if desired, to indicate the current value (for HR, for example)
# # Standard Deviation:
# plot_rate = 15
# rate = 500 # ignored if no internalTime
# windowTime = 180 # seconds
# type = 'HRV_STD'
# internalTime = False # use internally generated timestamps rather than received ones
# thickness = 5
# col = 'blue' # gold, red, blue, gray/grey, or silver
# negative = False
# inverted = False # Take the inverse
# scale = 1.0 # <float> Multiplies y-values by this (after inverse)
# offset = 0 # add to reading (before inversion or scaling)
# valueText = "" # Text, if desired, to indicate the current value (for HR, for example)
# # ECG
# plot_rate = 15
# rate = 500 # ignored if no internalTime
# windowTime = 5 # seconds
# type = 'ECG'
# internalTime = True # use internally generated timestamps rather than received ones
# thickness = 2
# col = 'gold' # gold, red, blue, gray/grey, or silver
# negative = True
# inverted = False # Take the inverse
# scale = 1.0 # <float> Multiplies y-values by this (after inverse)
# offset = 0 # add to reading (before inversion or scaling)
# valueText = "" # Text, if desired, to indicate the current value (for HR, for example)
# # ECG
# plot_rate = 15
# rate = 1000 # ignored if no internalTime
# windowTime = 0.5 # seconds
# type = 'Stream'
# internalTime = False # use internally generated timestamps rather than received ones
# thickness = 1
# col = 'gold' # gold, red, blue, gray/grey, or silver
# negative = False
# inverted = False # Take the inverse
# scale = 1.0 # <float> Multiplies y-values by this (after inverse)
# offset = 0 # add to reading (before inversion or scaling)
# valueText = "" # Text, if desired, to indicate the current value (for HR, for example)
Plot = livePlotting(type, plot_rate, rate, windowTime, internalTime,
thickness, col, negative, inverted, scale, offset, valueText)
# And they all lived happily ever after.
# THE END
|
StarcoderdataPython
|
87186
|
"""
Custom backend example
"""
import random
from frontera import FrontierManager, Settings, FrontierTester, graphs
from frontera.contrib.backends.memory import MemoryBaseBackend
SITE_LIST = [
[('http://google.com', [])],
[('http://scrapinghub.com', [])],
[('http://zynga.com', [])],
[('http://microsoft.com', [])],
[('http://apple.com', [])],
]
class AlphabeticSortBackend(MemoryBaseBackend):
"""
Custom backend that sort pages alphabetically from url
"""
name = 'Alphabetic domain name sort backend'
def _compare_pages(self, first, second):
return cmp(first.url, second.url)
class RandomSortBackend(MemoryBaseBackend):
"""
Custom backend that sort pages randomly
"""
name = 'Random sort backend'
def _compare_pages(self, first, second):
return random.choice([-1, 0, 1])
def test_backend(backend):
# Graph
graph = graphs.Manager()
graph.add_site_list(SITE_LIST)
# Frontier
settings = Settings()
settings.BACKEND = backend
settings.LOGGING_MANAGER_ENABLED = True
settings.LOGGING_BACKEND_ENABLED = True
settings.LOGGING_DEBUGGING_ENABLED = False
frontier = FrontierManager.from_settings(settings)
print '-'*80
print frontier.backend.name
print '-'*80
# Tester
tester = FrontierTester(frontier, graph)
tester.run()
# Show crawling sequence
for page in tester.sequence:
print page.url
if __name__ == '__main__':
test_backend('10_custom_backends.AlphabeticSortBackend')
test_backend('10_custom_backends.RandomSortBackend')
|
StarcoderdataPython
|
1750138
|
<filename>Smart_container/PaddleClas/deploy/python/test.py
#图片处理
import base64
import binascii
import hashlib
import json
import os
from typing import Container
import memcache
import pymysql
import requests
from django.http import JsonResponse
from django.shortcuts import HttpResponse, render
#检索
from fuzzywuzzy import fuzz, process
#登陆用
from pyDes import CBC, PAD_PKCS5, des
from xpinyin import Pinyin
# 数据库相关操作
from app01 import models
# Create your views here.
KEY='<KEY>' #秘钥
PICTURE_ROOT = './PaddleClas/dataset/retail'
def des_encrypt(s):
"""
DES 加密
:param s: 原始字符串
:return: 加密后字符串,16进制
"""
secret_key = KEY
iv = secret_key
k = des(secret_key, CBC, iv, pad=None, padmode=PAD_PKCS5)
en = k.encrypt(s, padmode=PAD_PKCS5)
return binascii.b2a_hex(en)
def des_descrypt(s):
"""
DES 解密
:param s: 加密后的字符串,16进制
:return: 解密后的字符串
"""
secret_key = KEY
iv = secret_key
k = des(secret_key, CBC, iv, pad=None, padmode=PAD_PKCS5)
de = k.decrypt(binascii.a2b_hex(s), padmode=PAD_PKCS5)
sessionID = de.split('_')
openid = sessionID[0]
return openid
def SKexpired(old_sessionID, code):
s_openid = des_descrypt(old_sessionID)
appid = "wx433732b2940b7d4c"
secret = "b4e95c5b998cd13ba9d09e077343f2e7"
code2SessionUrl = "https://api.weixin.qq.com/sns/jscode2session?appid={appid}&secret={secret}&js_code={code}&grant_type=authorization_code".format(
appid=appid, secret=secret, code=code)
resp = requests.get(code2SessionUrl)
respDict = resp.json()
s_session_key = respDict.get("session_key")
s = str(s_openid) + '_' +str(s_session_key)
sessionID = des_encrypt(s)
models.TUser.objects.filter(openid=s_openid).update(session_key=s_session_key)
return sessionID
def information():
container = models.TContainer.objects.all()
container_all = []
for i in container:
temp = []
temp.append(i.number)
temp.append(i.container_name)
temp.append(i.container_price)
temp.append(i.picture_address)
container_all.append(temp)
return container_all
def update():
container_all = information()
TXT_PATH='./PaddleClas/dataset/retail/data_update.txt'
with open(os.path.abspath(TXT_PATH),'w+',encoding='utf-8') as fh:
for container_single in container_all:
container_name = container_single[1]
container_address = container_single[3]
fh.write(container_address + '\t' + container_name + '\n')
fh.close()
#有问题要修改
os.system('python3 python/build_gallery.py -c configs/build_product.yaml -o IndexProcess.data_file="./PaddleClas/dataset/retail/data_update.txt" -o IndexProcess.index_dir="./PaddleClas/dataset/retail/index_update"')
# 识别模块
def reference(request):
if request.method == "POST":
sessionID = request.POST.get('sessionID')
isSKexpried = request.POST.get('isSKexpried')
code = request.POST.get('code')
value = request.POST.get('picture')
res_all = models.TContainer.objects.all()
if isSKexpried:
sessionID = SKexpired(sessionID, code)
image_name = base64.b64decode(value)
print(image_name)
image_file = './PaddleClas/dataset/retail/test1.jpg'
with open(image_file, "wb") as fh:
fh.write(image_name)
fh.close()
### 商品识别
rec_docs_list = []
rec_docs_price_all = []
price_all = 0.0
# self.picture_file = '/home/thomas/Smart_container/PaddleClas/dataset/retail/test.jpg'
#
# cv2.imwrite(self.picture_file, self.image)
os.system(
'python ./PaddleClas/deploy/python/predict_system.py -c ./PaddleClas/deploy/configs/inference_product.yaml -o Global.use_gpu=False')
print('3')
log_path = './PaddleClas/dataset/log.txt'
rec_docs_str = ''
rec_deplay_str = ''
with open(log_path, 'r', encoding='utf8') as F:
str_result_list = F.readlines()
print(str_result_list)
if str_result_list[0] == "Please connect root to upload container's name and it's price!":
rec_deplay_str_all = str_result_list[0]
else:
for str_result in str_result_list:
price_all = 0
rec_docs_price = []
dict_result = eval(str_result)
rec_docs = dict_result['rec_docs'] # 结果
rec_docs_list.append(rec_docs)
print('2')
print(rec_docs_list)
for res in res_all:
for rec_docs_sig in rec_docs_list:
if rec_docs_sig == res.container_name:
rec_price = res.container_price
price_all += float(rec_price)
rec_docs_price.append(rec_docs)
rec_docs_price.append(rec_price)
rec_docs_price_all.append(rec_docs_price)
# print("1")
# print(rec_docs_price_all)
os.remove(log_path)
return JsonResponse({"state": 'true',"container": rec_docs_price_all,"price_all": price_all})
else:
return JsonResponse({"state": 'false'})
#登录
def login_in(request):
if request.method == "POST":
code = request.POST.get('code')
userinfo = request.POST.get('userinfo')
userinfo = json.loads(userinfo)
s_nickname = userinfo['nickName']
appid = "wx433732b2940b7d4c"
secret = "b4e95c5b998cd13ba9d09e077343f2e7"
code2SessionUrl = "https://api.weixin.qq.com/sns/jscode2session?appid={appid}&secret={secret}&js_code={code}&grant_type=authorization_code".format(
appid=appid, secret=secret, code=code)
resp = requests.get(code2SessionUrl)
respDict = resp.json()
s_openid = respDict.get("openid") #需要存入的openid
s_session_key = respDict.get("session_key") #需要存入的session_key
s = str(s_openid) + '_' +str(s_session_key)
sessionID = des_encrypt(s)
sessionID = str(sessionID)
old_openid = models.TUser.objects.filter(openid=s_openid) #old_openid是查询数据库中是否有s_openid,无为空
old_openid = old_openid.values()
if not bool(old_openid): #判断表中是否还有对应openid
s_user = models.TUser(openid = s_openid, nickname = s_nickname, session_key = s_session_key)
s_user.save()
update()
else:
models.TUser.objects.filter(openid=s_openid).update(session_key=s_session_key) #替换session_key
return JsonResponse({"sessionID": sessionID})
def record(request): #增加模块
if request.method == "POST":
sessionID = request.POST.get('sessionID')
isSKexpried = request.POST.get('isSKexpried')
code = request.POST.get('code')
s_container_name = request.POST.get('container_name') #商品名称 str
s_container_price = request.POST.get('container_price') #商品单价 float
picture = request.FILES['productimage'] #照片
if isSKexpried:
sessionID = SKexpired(sessionID, code)
value_name = s_container_name
p = Pinyin()
name = p.get_pinyin(value_name).replace('-','')
s_picture_address = 'gallery/'+ name + '.jpg'
with open(os.path.join(PICTURE_ROOT,s_picture_address), 'wb') as fh:
for chunk in picture.chunks():
fh.write(chunk)
fh.close()
last_data = models.TContainer.objects.last() #查询t_container表中最后一条数据,以便于商品录入排序
if not bool(last_data.number):
s_number = 1 #序号
else:
s_number = last_data.number + 1
old_container = models.TContainer.objects.filter(container_name=s_container_name)
old_container = old_container.values()
if not bool(old_container):
s_container = models.TContainer(number = s_number, container_name = s_container_name, container_price = s_container_price,picture_address = s_picture_address)
s_container.save()
update()
return JsonResponse({"state": 'true', "sessionID": sessionID})
else:
return JsonResponse({"state": 'true', "sessionID": sessionID})
else:
return JsonResponse({"state": 'false'})
def delete(request): #删除模块
if request.method == "POST":
sessionID = request.POST.get('sessionID')
isSKexpried = request.POST.get('isSKexpried')
code = request.POST.get('code')
d_number = request.POST.get('number')
d_container_name = request.POST.get('container_name')
if isSKexpried:
sessionID = SKexpired(sessionID, code)
d_number = int(d_number)
old_container = models.TContainer.objects.filter(number = d_number) #查询t_container表中所有数据,判断表中是否已经包含目标商品
old_container = old_container.values()
if not bool(old_container): #表内不含待删除商品
return JsonResponse({"state": 'false', "sessionID": sessionID})
else:
models.TContainer.objects.filter(number = d_number).delete()
update()
return JsonResponse({"state": 'true', "sessionID": sessionID})
else:
return JsonResponse({"state": 'false'})
def replace(request): #修改模块
if request.method == "POST":
sessionID = request.POST.get('sessionID')
isSKexpried = request.POST.get('isSKexpried')
code = request.POST.get('code')
number = request.POST.get('number')
r_container_name = request.POST.get('container_name')
r_container_price = request.POST.get('container_price')
r_picture = request.FILES['productimage']
# print(r_container_name)
if isSKexpried:
sessionID = SKexpired(sessionID, code)
models.TContainer.objects.filter(number = number).update(container_name = r_container_name)
models.TContainer.objects.filter(number = number).update(container_price = r_container_price)
g = models.TContainer.objects.filter(number = number)
result = models.TContainer.objects.filter(number = number)
with open(os.path.join(PICTURE_ROOT,result[0].picture_address), 'wb') as fh:
for chunk in r_picture.chunks():
fh.write(chunk)
fh.close()
update()
return JsonResponse({"state": 'true', "sessionID": sessionID})
else:
return JsonResponse({"state": 'false'})
def search(request): #查询模块
if request.method == "POST":
sessionID = request.POST.get('sessionID')
isSKexpried = request.POST.get('isSKexpried')
code = request.POST.get('code')
if isSKexpried:
sessionID = SKexpired(sessionID, code)
container_all = information()
return JsonResponse({"state": 'true', "sessionID": sessionID, 'container_all': container_all})
else:
return JsonResponse({"state": 'false'})
def find(request): #检索模块
if request.method== "POST":
sessionID = request.POST.get('sessionID')
isSKexpried = request.POST.get('isSKexpried')
code = request.POST.get('code')
searchtarget = request.POST.get('searchtarget')
container = models.TContainer.objects.all()
find_result = []
for i in container:
value = fuzz.partial_ratio("%s"%searchtarget,i.container_name)
if value>=80:
temp = []
temp.append(i.number)
temp.append(i.container_name)
temp.append(i.container_price)
temp.append(i.picture_address)
find_result.append(temp)
return JsonResponse({"state": 'true', "sessionID": sessionID,"container_all":find_result})
else:
return JsonResponse({"state": 'false'})
|
StarcoderdataPython
|
3388006
|
<filename>src/westpa/analysis/__init__.py
from westpa.analysis.core import Run
from westpa.analysis.statistics import time_average
from westpa.analysis.trajectories import Trajectory, BasicMDTrajectory
__all__ = ['Run', 'time_average', 'Trajectory', 'BasicMDTrajectory']
|
StarcoderdataPython
|
3391847
|
# -*- coding: utf-8 -*-
import time
from scrapy.contrib.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.spiders import Rule
from scrapy_redis.spiders import RedisCrawlSpider
from fish_core.scrapy.items import CommonItem
from fish_core.simhash import Simhash
class SimpleCrawler(RedisCrawlSpider):
"""
A simple example for distributed crawler,
it would extract the attribute from a page such as title, description, keywords....
"""
name = 'simple_fish_crawler'
redis_key = 'simple_fish_crawler:start_urls'
rules = (
Rule(LxmlLinkExtractor(), callback='parse_page', follow=True),
)
def parse_page(self, response):
self.logger.debug('Parse function called on %s ' % response.url)
item = CommonItem()
item['title'] = ''.join(response.xpath('//title/text()').extract())
item['description'] = ''.join(response.xpath('//meta[contains(@name,"description")]/@content').extract())
item['keywords'] = ''.join(response.xpath('//meta[contains(@name,"keywords")]/@content').extract())
item['p_texts'] = response.xpath('//p/text()').extract()
item['url'] = response.url
item['crawled_timestamp'] = time.time()
item['links'], item['links_text'] = self.parse_links(response.xpath('//a[contains(@href,"http")]'))
item['simhash'] = self.generate_simhash(item)
self.logger.debug('Parse done...........')
return item
def parse_links(self, a_list):
links, links_text = [], []
for a in a_list:
links.append(''.join(a.xpath('@href').extract()))
links_text.append(''.join(a.xpath('text()').extract()))
return links, links_text
def generate_simhash(self, item):
"""
Generate simhash based on title, description, keywords, p_texts and links_text.
"""
list = item['p_texts'] + item['links_text']
list.append(item['title'])
list.append(item['description'])
list.append(item['keywords'])
return Simhash(','.join(list).strip()).hash
|
StarcoderdataPython
|
3376040
|
from __future__ import absolute_import, division, print_function
from .index import *
from .core import *
try:
from .dask import *
except ImportError:
pass
|
StarcoderdataPython
|
180628
|
from django.contrib import admin
from .models import User, UserOutreach, UserStatus, Logging
admin.site.register(UserStatus)
admin.site.register(UserOutreach)
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = ('username', 'question_id', 'points', 'xp')
search_fields = ('email', 'username')
ordering = (('points'), )
@admin.register(Logging)
class LogginAdmin(admin.ModelAdmin):
list_display = ('__str__', 'answer', 'question_id', 'timestamp')
search_fields = ('user__username', 'answer')
ordering = (('timestamp'), )
|
StarcoderdataPython
|
1723733
|
def TP(a,b,c):
if(a**2+b**2==c**2):
return "YES"
else:
return "NO"
print("Enter a,b,c : ",end="")
try:
a,b,c=map(int,input().split())
print(TP(a,b,c))
except:
print("Invalid input")
|
StarcoderdataPython
|
1683485
|
<reponame>milkrong/Basic-Python-DS-Algs<filename>LeetCode/longest_palindrome.py
import collections
class Solution:
def longestPalindrome(self, s: str) -> int:
ans = 0
for v in collections.Counter(s).values():
ans += v // 2 * 2
if ans % 2 == 0 and v % 2 == 1:
ans += 1
return ans
|
StarcoderdataPython
|
4801377
|
<filename>bert_train.py
import torch
from torch import nn
import json
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from pytorch_pretrained_bert import BertModel, BertAdam
from pytorch_pretrained_bert.modeling import BertPreTrainedModel
import train_args as parse
from m_io import create_output_name, create_valid_rouge, get_valid_evaluation, create_metric_figure
from tqdm import tqdm, trange
import numpy as np
import os
class CustomNetwork(BertPreTrainedModel):
def __init__(self, config, num_labels=2, use_positional=True, dropout=0.1):
super(CustomNetwork, self).__init__(config)
self.num_labels = num_labels
if use_positional:
config.type_vocab_size = config.max_position_embeddings
self.bert = BertModel(config)
self.apply(self.init_bert_weights)
self.dropout_qa = nn.Dropout(dropout)
self.dropout_s = nn.Dropout(dropout)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, start_positions=None,end_positions=None, weights=None, train=False):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout_s(pooled_output)
sequence_output = self.dropout_qa(sequence_output)
logits = self.classifier(pooled_output)
logits_qa = self.qa_outputs(sequence_output)
start_logits, end_logits = logits_qa.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if train:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
loss_sent = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
loss_qa = (start_loss + end_loss) / 10.0
total_loss = loss_qa + loss_sent
return total_loss, loss_sent, loss_qa
else:
ignored_index = start_logits.size(1)
loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
loss_sent = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
loss_qa = (start_loss + end_loss) / 10.0
total_loss = loss_qa + loss_sent
return torch.nn.functional.softmax(start_logits, dim=-1), torch.nn.functional.softmax(end_logits, dim=-1), torch.nn.functional.softmax(logits, dim=-1), total_loss
def create_iterator(data_split='train', max_len=45, max_size=-1, batch_size=32, balance=None, bert_model='bert-large-uncased', ofp_fname=''):
bal_str = ''
if balance is not None and data_split == 'train': # do not balance test or valid
bal_str = '_balance_' + str(balance).replace('.', '_') + '_'
ifp = open('data.nosync/' + data_split + '/' + bert_model + '_cnndm_labeled_tokenized' + bal_str + '.json', 'rb')
data = json.load(ifp)
ifp.close()
x_ls, y_ls, s_idx_ls, b_id_ls, rouge_dict, x_for_rouge, x_align = data['x'], data['y'], data['s_id'], data['b_id'], data[
'rouge'], data['x_orig'], data['x_align']
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_start_positions = []
all_end_positions = []
all_sent_labels = []
all_sent_align = []
batch_id_list = []
num_t = 0
for (x, _), (label, start, end), s_id, b_id, x_a in zip(x_ls, y_ls, s_idx_ls, b_id_ls, x_align):
if start >= max_len or label == 0:
label = 0
start = max_len
end = max_len
if end > max_len:
end = max_len - 1
all_sent_labels.append(label)
all_start_positions.append(start)
all_end_positions.append(end)
mask = [1] * len(x)
padding_mask = [0] * (max_len - len(x))
mask.extend(padding_mask)
x.extend(padding_mask)
all_input_ids.append(x[:max_len])
all_input_mask.append(mask[:max_len])
segment_id = [s_id] * max_len
all_segment_ids.append(segment_id[:max_len])
batch_id_list.append(b_id)
all_sent_align.append(x_a)
num_t += 1
if num_t == max_size:
break
tensor_data = TensorDataset(torch.tensor(all_input_ids, dtype=torch.long),
torch.tensor(all_input_mask, dtype=torch.long),
torch.tensor(all_start_positions, dtype=torch.long),
torch.tensor(all_end_positions, dtype=torch.long),
torch.tensor(all_sent_labels, dtype=torch.long),
torch.tensor(all_segment_ids, dtype=torch.long))
if data_split == 'train':
sampler = RandomSampler(tensor_data)
else:
sampler = None
data_loader = DataLoader(tensor_data, sampler=sampler, batch_size=batch_size)
return data_loader, num_t, batch_id_list, x_for_rouge, all_sent_align
def train(model, loader_train, loader_valid, num_train_epochs=70, rouge_dict=None, x_for_rouge=None, x_sent_align=None, optim='adam', learning_rate=3e-5, unchanged_limit=20, weights=None, ofp_fname='PLT', batch_ids=None):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
rouge_sys_sent_path = 'data.nosync/rouge_sent/' + ofp_fname + '/'
rouge_sys_segs_path = 'data.nosync/rouge_segs/' + ofp_fname + '/'
output_model_file = 'saved_models/' + ofp_fname
output_config_file = 'saved_configs/' + ofp_fname
if not os.path.exists(rouge_sys_sent_path):
os.mkdir(rouge_sys_sent_path)
if not os.path.exists(rouge_sys_segs_path):
os.mkdir(rouge_sys_segs_path)
if not os.path.exists('saved_models'):
os.mkdir('saved_models')
if not os.path.exists('saved_configs'):
os.mkdir('saved_configs')
if optim == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=0.01)
else:
optimizer = BertAdam(model.parameters(), lr=learning_rate)
model.train()
loss_ls, loss_ls_s, loss_ls_qa, loss_valid_ls = [], [], [], []
qa_acc, qa_f1, sent_acc, sent_f1 = [], [], [], []
acc_loss, acc_loss_s, acc_loss_qa = [], [], []
best_valid = 1e3
unchanged = 0
if weights is not None:
weights = torch.tensor([weights, 1.0], dtype=torch.float32).to(device)
cur_used_ls_mean, total_used, total_s, mean_seg_len = None, None, None, None
for _ in trange(num_train_epochs, desc="Epoch"):
for step, batch in enumerate(tqdm(loader_train, desc="Iteration")):
optimizer.zero_grad()
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, start_positions, end_position, sent_labels, seg_ids = batch
loss, loss_s, loss_q = model(input_ids, seg_ids, input_mask, sent_labels, start_positions, end_position, weights, train=True)
loss.backward()
optimizer.step()
acc_loss.append(loss.cpu().data.numpy())
acc_loss_s.append(loss_s.cpu().data.numpy())
acc_loss_qa.append(loss_q.cpu().data.numpy())
if (step + 1) % 10000 == 0:
loss_ls.append(np.mean(acc_loss))
loss_ls_s.append(np.mean(acc_loss_s))
loss_ls_qa.append(np.mean(acc_loss_qa))
acc_loss, acc_loss_s, acc_loss_qa = [], [], []
with torch.no_grad():
eval_gt_start, eval_gt_end, eval_gt_sent = [], [], []
eval_sys_start, eval_sys_end, eval_sys_sent = [], [], []
valid_ls = []
for _, batch_valid in enumerate(tqdm(loader_valid, desc="Validation")):
batch_valid = tuple(t2.to(device) for t2 in batch_valid)
input_ids, input_mask, start_positions, end_position, sent_labels, seg_ids = batch_valid
start_l, end_l, sent_l, valid_l = model(input_ids, seg_ids, input_mask, sent_labels, start_positions, end_position, None)
# sent_l = model(input_ids, seg_ids, input_mask, None, None, None)
eval_gt_start.extend(start_positions.cpu().data.numpy())
eval_gt_end.extend(end_position.cpu().data.numpy())
eval_gt_sent.extend(sent_labels.cpu().data.numpy())
eval_sys_start.extend(start_l.cpu().data.numpy())
eval_sys_end.extend(end_l.cpu().data.numpy())
eval_sys_sent.extend(sent_l.cpu().data.numpy())
valid_ls.append(valid_l.cpu().data.numpy())
qa_acc_val, qa_f1_val, sent_acc_val, sent_f1_val = get_valid_evaluation(eval_gt_start,
eval_gt_end,
eval_gt_sent,
eval_sys_start,
eval_sys_end,
eval_sys_sent)
avg_val_loss = np.mean(valid_ls)
qa_acc.append(qa_acc_val)
qa_f1.append(qa_f1_val)
sent_acc.append(sent_acc_val)
sent_f1.append(sent_f1_val)
loss_valid_ls.append(avg_val_loss)
if avg_val_loss < best_valid:
best_valid = avg_val_loss
unchanged = 0
cur_used_ls_mean, total_used, total_s, mean_seg_len = create_valid_rouge(rouge_dict,
x_for_rouge,
eval_sys_sent,
eval_sys_start,
eval_sys_end,
eval_gt_sent,
eval_gt_start,
eval_gt_end,
batch_ids,
x_sent_align,
rouge_sys_sent_path,
rouge_sys_segs_path,
ofp_fname)
model_to_save = model.module if hasattr(model, 'module') else model
torch.save(model_to_save.state_dict(), output_model_file)
with open(output_config_file, 'w') as f:
f.write(model_to_save.config.to_json_string())
elif unchanged > unchanged_limit:
create_metric_figure(ofp_fname, loss_ls, loss_ls_s, loss_ls_qa, loss_valid_ls, qa_f1, sent_f1, cur_used_ls_mean, total_used, total_s, mean_seg_len)
return
else:
unchanged += 1
create_metric_figure(ofp_fname, loss_ls, loss_ls_s, loss_ls_qa, loss_valid_ls, qa_f1, sent_f1, cur_used_ls_mean, total_used, total_s, mean_seg_len)
args = parse.get_args()
batch_size = args.batch_size
sent_len = args.sent_len
if args.train:
ofp_fname = create_output_name(args)
data_loader_valid, num_val, used_b_id, x_for_rouge, all_sent_align = create_iterator(data_split='valid',
max_len=sent_len,
max_size=-1,
batch_size=batch_size,
balance=None,
bert_model=args.bert_model,
ofp_fname=ofp_fname)
data_loader_train, num_train, _, _, _ = create_iterator(data_split='train',
max_len=sent_len,
max_size=-1,
batch_size=batch_size,
balance=args.balance,
bert_model=args.bert_model,
ofp_fname=ofp_fname)
model = CustomNetwork.from_pretrained(args.bert_model, use_positional=args.use_positional, dropout=args.dropout)
train(model=model,
loader_train=data_loader_train,
loader_valid=data_loader_valid,
num_train_epochs=args.epochs,
rouge_dict=used_b_id,
x_for_rouge=x_for_rouge,
x_sent_align=all_sent_align,
optim=args.optim,
learning_rate=args.lr,
unchanged_limit=args.unchanged_limit,
weights=args.weights,
ofp_fname=ofp_fname,
batch_ids=used_b_id)
else:
raise NotImplementedError
|
StarcoderdataPython
|
3217248
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from elasticsearch import Elasticsearch
class ElasticSearchClient(object):
def __init__(self):
self._es_client = None
def init_app(self, app):
if 'ES_HOST' in app.config and 'ES_PORT' in app.config:
self._es_client = Elasticsearch([
{
'host': app.config['ES_HOST'],
'port': app.config['ES_PORT']
}])
def search(self, *args, **kwargs):
return self._es_client.search(*args, **kwargs)
def query_log(self, index, keyword, pod_name, start_time, end_time,
match_phrase=None):
query_body = {
'version': True,
'size': 8000,
'sort': [
{'@timestamp': 'desc'},
{
'log.offset': {
'order': 'desc',
'unmapped_type': 'long'
}
}
],
'_source': ['message'],
'query': {
'bool': {
'must': []
}
}
}
keyword_list = [{
'query_string': {
'query': keyword,
'analyze_wildcard': True,
'default_operator': 'AND',
'default_field': '*'
}
}] if keyword else []
match_phrase_list = [
match_phrase if match_phrase else
{
'prefix': {
'kubernetes.pod.name': pod_name
}
},
{
'range': {
'@timestamp': {
'gte': start_time,
'lte': end_time,
'format': 'epoch_millis'
}
}
}
]
query_body['query']['bool']['must'] = keyword_list + match_phrase_list
response = self._es_client.search(index=index, body=query_body)
return [item['_source']['message'] for item in response['hits']['hits']]
def query_data_join_metrics(self, job_name, num_buckets):
STAT_AGG = {
"JOINED": {
"filter": {
"term": {
"joined": True
}
}
},
"FAKE": {
"filter": {
"term": {
"fake": True
}
}
},
"TOTAL": {
"filter": {
"term": {
"fake": False
}
}
},
"UNJOINED": {
"bucket_script": {
"buckets_path": {
"JOINED": "JOINED[_count]",
"TOTAL": "TOTAL[_count]"
},
"script": "params.TOTAL - params.JOINED"
}
},
"JOIN_RATE": {
"bucket_script": {
"buckets_path": {
"JOINED": "JOINED[_count]",
"TOTAL": "TOTAL[_count]",
"FAKE": "FAKE[_count]"
},
"script": "params.JOINED / (params.TOTAL + params.FAKE)"
}
}
}
query = {
"size": 0,
"query": {
"bool": {
"must": [
{"term": {"application_id": job_name}}
]
}
},
"aggs": {
"OVERALL": {
"terms": {
"field": "application_id"
},
"aggs": STAT_AGG
},
"EVENT_TIME": {
"auto_date_histogram": {
"field": "event_time",
"format": "strict_date_optional_time",
"buckets": num_buckets
},
"aggs": STAT_AGG
},
"PROCESS_TIME": {
"auto_date_histogram": {
"field": "process_time",
"format": "strict_date_optional_time",
"buckets": num_buckets
},
"aggs": {
"MAX_EVENT_TIME": {
"max": {
"field": "event_time",
"format": "strict_date_optional_time"
}
},
"MIN_EVENT_TIME": {
"min": {
"field": "event_time",
"format": "strict_date_optional_time"
}
}
}
}
}
}
return es.search(index='data_join*', body=query)
def query_nn_metrics(self, job_name, num_buckets):
query = {
"size": 0,
"query": {
"bool": {
"must": [
{
"term": {
"tags.application_id.keyword": job_name
}
}
]
}
},
"aggs": {
"PROCESS_TIME": {
"auto_date_histogram": {
"field": "date_time",
"format": "strict_date_optional_time",
"buckets": num_buckets
},
"aggs": {
"AUC": {
"filter": {
"term": {"name": "auc"}
},
"aggs": {
"AUC": {
"avg": {
"field": "value"
}
}
}
},
}
}
}
}
return es.search(index='metrics*', body=query)
def query_events(self, index, keyword, pod_name,
start_time, end_time):
query_body = {
'version': True,
'size': 8000,
'sort': [
{'@timestamp': 'desc'},
{
'log.offset': {
'order': 'desc',
'unmapped_type': 'long'
}
}
],
'_source': ['message'],
'query': {
'bool': {
'must': []
}
}
}
keyword_list = [
{
'query_string': {
'query': f'{keyword} AND Event',
'analyze_wildcard': True,
'default_operator': 'AND',
'default_field': '*'
}
}
] if keyword else []
match_phrase_list = [
{
'prefix': {
'kubernetes.pod.name': pod_name
}
},
{
'range': {
'@timestamp': {
'gte': start_time,
'lte': end_time,
'format': 'epoch_millis'
}
}
}
]
query_body['query']['bool']['must'] = keyword_list + match_phrase_list
response = self._es_client.search(index=index, body=query_body)
return [item['_source']['message'] for item in response['hits']['hits']]
es = ElasticSearchClient()
|
StarcoderdataPython
|
3366990
|
<gh_stars>0
from nic.datapreparation.data import (
features_size,
load_captions,
load_data,
load_images,
load_tokenizer,
vocabulary_size,
)
from nic.datapreparation.downloading import (
download_mscoco,
split_out_test_data,
)
from nic.datapreparation.preprocessing import (
ImageOptions,
MetaTokens,
preprocess_captions,
preprocess_data,
preprocess_images,
)
|
StarcoderdataPython
|
150604
|
#!/usr/bin/env python3
theInput = """785 516 744
272 511 358
801 791 693
572 150 74
644 534 138
191 396 196
860 92 399
233 321 823
720 333 570
308 427 572
246 206 66
156 261 595
336 810 505
810 210 938
615 987 820
117 22 519
412 990 256
405 996 423
55 366 418
290 402 810
313 608 755
740 421 321
255 322 582
990 174 658
609 818 360
565 831 87
146 94 313
895 439 866
673 3 211
517 439 733
281 651 582
601 711 257
467 262 375
33 52 584
281 418 395
278 438 917
397 413 991
495 306 757
232 542 800
686 574 729
101 642 506
785 898 932
975 924 106
889 792 114
287 901 144
586 399 529
619 307 456
287 508 88
159 175 190
195 261 148
348 195 270
905 600 686
847 396 680
59 421 879
969 343 600
969 361 585
95 115 209
512 831 395
172 774 662
372 396 290
957 281 445
745 525 297
489 630 225
81 138 18
694 114 404
764 196 383
607 861 94
896 92 140
786 862 123
389 449 298
795 339 780
863 507 892
589 850 759
273 645 371
368 884 486
637 553 423
391 630 950
442 950 581
383 650 712
538 844 405
353 261 544
682 60 336
750 308 698
177 369 643
479 919 137
482 598 184
275 726 55
139 874 850
456 195 839
385 766 205
561 751 249
397 764 714
508 856 876
478 410 12
686 230 267
876 247 272
160 436 673
466 798 278
487 839 773
754 780 900
45 983 801
800 595 188
523 408 239
269 609 216
745 692 237
15 588 840
702 583 298
707 150 859
835 750 375
211 754 368
892 434 152
521 659 592
683 573 904
902 544 412
718 218 502
379 227 292
482 87 780
903 433 382
223 196 369
824 588 734
342 396 279
164 561 918
409 841 918
893 409 204
33 435 169
858 423 74
134 797 255
517 881 109
466 373 193
379 180 973
620 467 941
260 512 298
993 461 89
111 986 990
946 668 987
26 65 110
223 55 372
235 103 473
288 244 964
343 199 25
62 213 984
602 117 311
624 142 356
65 130 248
709 95 376
316 897 723
420 840 349
159 460 208
385 445 929
408 13 791
149 92 682
791 253 440
870 196 395
651 347 49
738 362 536
392 226 485
683 642 938
332 890 393
954 394 971
279 217 309
610 429 747
588 219 959
840 565 791
671 624 380
384 426 485
407 323 226
780 290 428
539 41 571
455 267 306
48 607 250
432 567 400
851 507 477
853 456 923
615 416 838
245 496 353
253 325 926
159 716 989
488 216 473
808 222 742
395 178 798
514 383 732
478 845 728
508 486 4
230 643 35
151 298 584
123 906 576
583 682 294
580 605 784
624 517 984
911 778 745
9 897 325
913 357 501
27 221 249
798 669 614
824 777 397
749 461 304
734 769 1
447 543 306
454 200 19
551 134 674
562 329 665
352 188 281
808 151 622
834 255 648
352 199 340
429 182 121
585 223 382
524 977 225
520 156 532
827 929 419
429 175 759
284 376 877
312 548 751
571 507 529
390 503 483
710 1 146
938 421 582
975 981 186
118 771 531
328 490 638
452 743 750
511 772 242
957 850 177
669 750 665
975 296 664
228 35 159
763 347 650
752 315 557
366 530 294
828 154 645
730 388 763
744 298 774
459 508 375
449 485 748
537 819 907
526 259 551
773 890 650
523 839 473
645 928 485
333 109 115
403 952 399
229 50 606
377 900 212
693 731 399
682 103 579
441 764 471
481 114 267
196 567 591
353 495 798
436 348 30
794 88 526
926 411 524
1 862 754
839 440 848
839 458 109
961 799 930
944 692 853
168 520 788
579 920 687
32 930 283
575 759 747
857 705 926
842 674 925
233 163 29
544 409 719
266 643 767
315 323 56
754 135 658
99 757 569
818 832 207
296 602 519
316 371 301
409 879 747
765 696 151
960 836 689
526 564 790
33 954 343
548 203 379
545 797 622
550 122 105
606 538 12
686 434 102
595 820 249
642 215 221
120 703 124
972 440 214
444 544 447
963 225 373
904 628 271
733 109 374
193 673 588
446 724 945
246 771 901
389 900 339
331 323 756
245 428 969
565 457 539
977 743 742
26 199 543
960 804 405
795 914 721
454 695 816
984 422 849
437 495 803
237 106 58
221 442 834
638 278 21
697 880 830
818 953 849
276 335 944
152 650 953
232 972 23
675 991 179
741 579 408
164 741 285
682 156 113
71 607 759
740 692 644
284 229 308
681 114 133
961 232 394
214 653 533
240 863 332
115 651 664
396 356 477
308 220 134
283 505 569
286 400 234
413 830 734
534 877 619
293 562 171
862 216 186
819 427 63
491 121 321
139 108 142
438 39 219
345 120 486
367 91 482
400 61 605
780 858 434
854 188 478
141 726 62
600 904 292
312 328 103
648 896 200
304 299 382
372 325 229
625 114 513
95 742 875
432 99 818
510 731 863
353 520 495
501 335 400
411 187 358
612 274 381
658 586 774
908 858 876
162 722 881
604 277 772
677 484 369
964 772 239
973 618 388
463 799 264
262 49 691
800 816 875
827 820 394
828 682 576
571 670 724
322 910 202
12 72 856
529 771 829
520 830 38
796 154 681
662 160 750
193 314 633
772 925 453
769 769 427
318 182 338
552 366 505
82 205 468
486 218 352
542 633 640
612 625 879
69 715 867
233 571 479
818 703 639
866 989 856
285 504 265
981 758 773
920 716 904
698 390 977
336 1 838
563 391 169
692 87 692
17 75 754
691 100 143
605 754 711
844 724 864
261 457 167
640 655 371
554 294 874
777 541 528
902 595 406
774 309 254
322 721 257
638 883 617
278 793 525
779 669 120
144 539 722
106 533 242
187 925 743
221 863 490
284 899 481
186 82 103
102 143 562
306 494 540
352 574 239
885 218 247
551 750 123
859 634 206
391 513 363
361 608 410
390 303 93
353 111 592
472 450 724
395 507 621
494 19 266
184 416 881
330 402 821
999 82 370
613 165 722
572 141 978
361 202 671
975 376 474
878 445 216
925 529 713
499 522 338
891 315 749
712 539 290
382 388 479
806 394 342
273 56 594
213 3 226
359 52 693
637 612 601
792 336 253
223 380 699
189 101 265
812 297 699
635 255 739
885 653 957
165 873 646
883 444 400
982 789 89
6 922 192
990 310 109
159 595 656
884 640 514
876 44 671
288 569 864
108 255 977
237 819 178
417 923 144
231 444 375
452 951 241
947 724 475
569 243 481
646 678 7
282 474 921
830 520 36
961 461 957
333 955 876
359 778 909
128 276 70
914 961 185
606 942 453
373 323 614
270 170 447
745 480 454
499 649 95
468 127 922
436 722 121
202 773 971
307 127 21
11 122 90
305 54 93
266 543 113
931 735 706
931 480 683
306 433 158
155 35 379
343 401 321
880 477 516
226 996 282
778 531 528
722 313 162
975 489 594
406 312 635
106 191 147
180 731 20
249 869 140
336 359 426
266 580 403
569 702 587
740 913 549
197 372 292
585 964 683
340 532 249
592 588 910
280 78 824
675 892 101
642 718 222
393 359 157
714 442 999
851 425 954
487 545 408
504 759 191
509 179 626
774 859 455
335 476 523
573 622 288
518 561 504
812 100 602
433 455 676
565 453 112
282 266 523
642 508 440
558 512 102
109 685 128
291 903 221
254 370 275
300 398 431
341 809 383
622 948 79
813 961 308
972 451 601
390 877 719
988 448 275
184 229 542
902 307 761
587 575 909
442 648 331
424 98 620
512 106 578
411 219 614
577 294 104
81 916 468
84 842 287
96 261 678
34 323 226
943 321 29
906 619 258
924 503 215
929 149 431
56 505 511
876 769 999
994 714 980
416 495 355
79 265 420
37 917 286
53 782 558
868 327 59
926 27 398
704 348 370
773 909 356
969 799 551
282 138 448
808 51 437
417 277 372
806 291 537
818 510 460
945 372 38
127 191 422
100 287 753
341 510 391
317 252 884
629 201 567
164 10 560
632 205 370
353 891 990
609 391 12
889 564 990
74 820 241
356 636 389
309 232 292
654 294 199
45 226 362
645 308 329
955 891 186
180 78 115
842 938 141
141 179 159
401 227 573
372 73 681
562 216 682
184 526 998
530 450 357
296 812 233
398 287 530
613 539 372
523 719 554
377 735 429
854 319 362
445 828 221
506 485 402
519 603 250
490 421 819
638 204 983
664 585 407
434 503 124
512 551 153
135 449 30
673 10 513
682 45 265
32 44 498
168 415 698
151 821 711
179 682 145
800 471 326
376 893 698
885 523 390
992 49 159
949 8 59
83 47 107
871 46 660
610 954 892
352 956 637
12 139 444
517 748 733
502 731 354
368 754 687
197 759 584
292 25 928
197 319 514
359 824 99
458 827 546
681 543 197
160 492 603
634 82 455
456 96 53
399 94 836
702 2 814
614 422 467
161 290 252
506 605 591
8 454 407
46 544 489
42 491 477
772 602 767
359 465 769
970 360 114
959 552 83
945 581 284
26 314 286
153 708 707
444 681 830
400 65 430
22 993 185
327 525 125
321 665 106
538 632 959
552 220 966
17 787 5
561 309 865
997 652 785
678 924 297
772 290 460
322 347 473
811 393 92
283 398 625
349 50 528
385 403 544
404 671 204
430 214 286
798 480 219
430 440 811
240 249 442
223 510 411
590 18 592
468 166 556
542 165 708
93 12 480
893 355 601
822 348 850
431 606 256
367 819 690
188 247 644
766 199 514
384 469 416
412 520 459
261 326 646
746 533 31
972 788 664
465 548 470
257 371 412
633 703 817
525 26 466
6 667 539
532 692 356
891 468 602
709 24 599
275 449 2
674 471 289
683 549 57
177 917 270
954 311 715
991 921 707
115 946 6
745 615 446
646 288 148
725 333 588
933 915 326
828 947 286
350 59 117
598 98 286
436 127 91
461 223 198
334 167 679
506 86 803
254 237 989
878 248 371
416 757 398
721 841 757
761 303 973
24 76 928
749 280 886
194 695 42
134 261 752
134 557 727
345 367 861
380 87 425
685 424 723
17 738 451
902 886 569
920 272 125
239 222 797
361 951 767
273 835 197
696 235 427
247 212 922
706 389 739
480 893 290
877 177 494
450 864 281
392 164 313
799 233 293
416 168 35
860 290 4
989 284 124
710 88 120
431 307 526
515 417 528
442 400 566
108 858 371
47 472 519
147 627 386
644 481 315
168 838 337
675 409 29
130 117 449
959 401 512
963 416 667
729 166 375
843 452 322
749 325 88
978 850 511
91 789 818
993 552 510
741 512 45
836 644 865
136 851 903
711 818 984
933 760 333
461 66 945
285 198 321
726 577 317
952 421 2
278 961 835
995 134 148
805 999 760
542 731 575
657 754 721
135 43 343
755 179 318
372 24 646
577 194 595
277 7 440
530 48 416
257 54 634
772 302 492
789 397 21
532 270 499
145 511 583
600 286 402
628 449 621
577 588 199
485 965 239
765 760 422
709 284 676
962 672 786
760 716 362
511 254 53
626 96 383
488 316 340
19 256 733
680 798 260
693 578 908
810 216 783
485 703 650
965 741 152
44 544 334
880 702 451
887 581 132
476 77 741
661 24 435
858 68 607
943 416 836
936 334 662
5 397 348
452 838 182
801 89 369
781 853 284
969 23 717
482 493 611
560 483 394
221 642 492
641 393 428
491 752 98
710 791 437
615 198 656
146 646 943
218 385 132
934 209 589
863 299 513
941 624 167
648 514 553
724 157 441
389 733 241
236 109 421
607 816 536
363 877 317
508 493 332
782 929 79
535 607 463
877 32 399
637 626 172
511 865 972
560 916 928
599 325 80
809 477 224
724 60 279
524 454 262
960 517 994
216 42 880
969 487 190
977 329 652
916 539 696
271 581 76
660 74 681
768 761 323
108 821 440
224 478 560
373 567 614
417 716 566
178 155 529
994 670 562
987 621 375
161 498 922
527 843 478
495 975 788
528 11 567
713 744 575
268 746 35
802 53 869
789 717 381
437 703 871
177 220 104
638 684 79
807 535 71
525 978 321
576 696 351
928 572 83
414 437 25
75 371 320
338 89 327
376 90 239
363 330 126
12 260 210
284 21 356
403 54 748
551 49 530
530 461 249
640 450 399
153 754 393
548 774 958
602 773 906
417 11 377
188 879 740
486 105 649
426 929 107
848 677 563
913 728 646
700 116 390
148 425 782
564 335 839
584 652 155
707 887 518
489 250 857
979 726 399
113 305 420
402 396 742
479 99 950
753 425 677
88 533 246
804 138 554
76 734 294
472 550 372
415 621 525
76 617 903
821 145 901
876 539 35
91 745 637
871 604 106
811 466 729
694 153 573
100 735 306
660 640 817
927 55 814
852 30 289
741 33 898
193 57 636
260 208 711
172 215 152
790 262 520
92 511 437
726 622 89
709 848 318
269 960 557
940 814 793
286 59 993
529 6 870
415 58 850
578 13 524
261 258 423
695 247 290
512 229 270
485 271 272
118 461 3
757 679 808
830 886 324
913 315 870
414 229 764
386 567 738
32 657 59
336 169 14
821 494 667
815 606 674
20 654 529
482 674 49
476 321 512
661 466 229
869 974 565
205 686 438
466 218 494
567 519 761
257 658 648
546 491 467
102 526 542
542 949 126
608 999 976
867 666 798
421 801 941
825 589 335
871 93 179
491 670 303
464 256 249
318 650 322
168 807 391
513 5 179
770 8 127
960 9 82
561 661 885
176 670 865
468 382 20
811 732 457
709 856 356
713 378 649
306 510 409
963 269 649
988 749 782
208 173 181
679 734 178
884 870 45
763 290 80
228 495 689
736 653 771
325 948 972
985 132 914
770 859 360
382 859 755
781 866 681
922 20 119
628 584 547
584 262 320
62 407 277
831 531 304
979 31 842
194 538 646
77 61 758
245 247 620
175 298 876
315 121 893
185 404 558
222 359 367
901 873 23
109 560 553
819 848 567
509 184 809
188 194 46
405 255 773
333 734 547
283 750 154
115 220 406
551 373 358
851 505 478
961 847 160
661 295 417
489 136 814
192 307 866
976 763 437
255 964 24
786 900 454
727 560 520
814 169 504
882 573 524
550 409 236
567 647 258
155 576 474
508 455 921
718 197 9
331 356 917
344 78 748
204 6 937
187 83 648
138 81 913
314 972 914
286 971 4
677 344 702
326 452 163
407 131 576
560 351 137
701 839 354
475 503 263
606 504 651
919 601 112
709 224 732
714 184 103
261 554 192
766 381 290
388 784 853
447 869 923
504 124 571
923 643 251
323 679 152
847 477 171
796 368 649
80 716 799
771 677 294
270 364 957
253 591 959
17 756 22
121 466 617
401 838 752
350 604 913
393 811 828
646 949 940
328 230 516
794 443 695
136 429 579
657 140 613
803 177 821
829 564 440
560 469 853
961 693 979
382 661 84
630 180 995
626 353 575
616 502 687
264 223 764
64 507 569
575 427 662
619 807 506
663 203 959
978 775 783
317 749 481
3 581 875
320 828 793
317 838 107
671 603 282
524 581 326
619 728 57
91 937 198
182 353 260
226 759 244
140 153 149
387 732 239
427 761 138
339 447 421
278 439 647
82 135 839
824 513 865
117 310 825
838 670 58
183 82 130
212 209 749
118 151 861
978 275 262
273 747 689
916 739 878
689 270 339
358 268 750
966 97 753
161 685 813
174 396 866
70 861 132
866 117 790
737 201 723
209 85 468
821 948 557
182 374 327
912 671 412
444 592 746
567 613 415
561 75 393
631 428 740
976 362 326
504 171 911
753 886 430
738 680 494
839 371 481
979 537 330
333 886 216
669 357 476
107 186 484
302 327 78
400 231 541
159 873 75
744 684 46
592 363 80
944 670 496
811 292 699
545 959 949
299 552 632
683 94 14
418 603 646
370 781 758
364 236 619
107 837 860
106 409 344
492 713 36
398 460 375
730 569 497
733 409 499
577 349 19
652 182 824
768 822 363
207 862 535
911 344 372
868 814 640
68 792 781
674 787 205
182 852 241
725 665 43
187 852 838
615 856 418
632 277 593
654 386 27
805 801 218
328 416 226
76 206 209
81 209 660
31 231 523
569 910 110
815 106 675
739 830 604
534 724 869
379 460 782
549 270 934
324 105 218
841 218 205
739 259 232
572 504 356
66 459 486
504 66 344
873 117 119
261 245 916
621 157 915
220 648 409
630 192 549
440 773 415
816 468 543
475 374 845
446 219 487
999 434 835
304 444 775
698 203 348
715 544 424
206 628 403
760 782 86
651 599 486
973 404 562
614 229 172
396 460 782
434 339 349
88 790 818
925 685 952
922 381 967
723 870 704
94 145 400
308 686 530
288 716 629
867 678 982
554 414 584
942 429 931
608 828 977
599 663 620
867 330 419
200 740 588
225 213 673
146 675 372
302 792 589
299 948 809
16 942 797
262 796 418
591 828 555
532 403 619
694 289 960
801 532 203
918 746 870
127 617 829
350 179 938
326 510 128
432 714 226
948 786 102
866 664 162
302 115 584
714 623 211
829 582 543
173 321 260
47 284 919
133 35 880
614 25 827
768 490 998
825 502 252
275 750 219
716 140 453
758 864 541
563 352 768
197 800 911
670 540 302
307 237 726
76 667 665
322 617 207
118 298 820
283 548 228
381 502 797
990 491 579
250 474 670
784 55 283
729 933 464
255 765 347
807 818 198
594 601 446
374 725 121
591 760 424
480 456 809
974 408 234
876 153 811
540 263 238
535 68 556
21 293 527
613 39 765
761 255 406
596 279 414
772 451 527
258 554 169
958 697 445
127 9 107
607 445 305
695 435 396
487 224 873
671 199 792
739 37 85
859 744 284
947 299 230
755 817 226
827 207 658
882 709 567
303 509 790
73 262 270
917 112 21
949 277 281
559 557 918
668 875 906
308 669 543
479 563 879
311 317 834
534 751 50
275 774 278
200 642 690
293 196 466
780 804 135
866 162 122
916 783 58
631 477 70
878 375 67
425 621 4
826 161 926
147 884 139
717 936 799
140 703 405
284 168 89
144 738 315
418 417 564
439 357 820
73 113 702
163 550 647
144 780 984
34 592 770
696 167 452
666 541 973
314 622 567
986 92 636
301 171 1
812 146 637
673 395 895
583 283 510
380 482 907
953 189 148
513 372 455
923 505 387
525 45 877
630 816 797
119 776 276
540 139 396
560 62 596
502 97 876
431 977 533
867 782 484
844 409 190
46 63 700
102 972 421
110 987 312
58 543 365
657 248 64
613 658 340
605 875 408
746 653 401
898 980 5
449 371 108
496 690 91
672 657 184
816 48 744
121 109 689
849 88 201
982 268 418
569 193 589
630 267 676
690 453 47
496 369 792
677 412 833
95 316 802
957 774 647
966 842 861
233 737 194
260 605 424
266 274 310
874 365 762
411 87 704
477 356 739
554 598 454
107 540 64
641 631 470
444 387 133
277 704 401
226 869 475
299 986 127
831 706 60
899 442 111
414 281 804
579 702 597
587 807 932
755 649 537
844 439 295
979 235 417
821 852 719
546 59 716
607 889 8
851 534 334
926 234 50
184 710 286
152 872 638
132 517 712
21 970 152
801 701 104
438 845 30
966 454 106
37 894 741
276 896 923
274 6 535
339 346 129
141 566 488
386 418 551
160 69 822
586 589 634
443 633 319
466 944 856
704 6 944
438 937 229
47 201 738
283 102 389
305 168 844
760 854 880
827 903 750
612 138 163
658 57 491
622 91 900
233 144 773
113 85 645
399 129 190
497 49 481
85 698 906
604 146 968
653 767 92
130 260 706
288 396 267
268 625 621
6 283 805
992 917 363
985 716 887
900 677 593
892 668 406
40 259 733
572 860 510
154 225 479
575 750 809
938 312 243
36 294 461
973 150 452
226 270 159
66 81 520
247 346 496
58 864 207
395 140 524
438 901 717
491 838 807
85 203 859
541 931 704
764 26 272
912 250 107
512 278 182
910 89 345
242 826 85
687 889 267
112 610 93
445 882 337
532 746 381
689 526 854
696 858 351
778 798 801
255 8 362
200 45 44
203 50 342
520 236 135
228 35 196
421 236 120
689 653 418
692 773 233
898 438 334
32 821 511
419 55 31
449 776 496
617 857 815
691 530 996
105 959 469
403 371 317
309 394 366
207 449 84
902 419 633
361 480 733
987 318 213
722 531 649
600 600 12
954 968 654
436 429 111
169 205 606
331 227 610
943 543 304
146 666 412
998 544 402
459 475 58
269 455 55
388 98 38
243 675 858
172 732 707
188 120 313
959 887 640
719 968 101
752 83 547
477 517 337
908 620 289
869 878 321
738 33 20
817 227 913
469 260 898
138 329 593
23 459 967
159 339 524
681 669 674
216 619 673
740 360 420
302 875 950
539 759 635
430 548 612
239 841 169
323 702 113
374 615 255
457 851 958
721 40 270
495 842 808
745 939 343
484 408 610
554 739 576
539 695 49
535 745 493
117 88 444
554 939 3
665 470 581
133 876 580
268 430 703
436 883 249
448 823 862
3 218 505
85 944 264
81 994 367
673 488 484
506 901 694
847 914 612
426 423 29
971 214 741
589 221 732
20 853 541
995 783 448
983 854 858
446 523 27
418 52 118
73 566 122
438 74 361
354 136 981
399 183 794
888 816 366
863 586 878
388 254 979
430 735 19
922 536 47
750 686 60
545 836 683
828 748 301
678 297 546
493 567 351
514 643 523
58 191 768
418 778 387
273 925 613
651 160 330
859 215 624
750 876 36
138 836 637
906 550 568
46 520 876
928 79 632
400 610 906
380 471 22
163 624 931
822 507 661
49 89 414
874 593 476
958 895 660
910 783 691
341 147 325
751 767 297
194 81 335
633 808 345
726 290 602
550 102 207
345 194 542
217 68 103
290 441 451
239 464 407
987 401 195
300 341 313
797 409 430
471 607 441
82 153 439
511 578 399
634 593 414
630 113 776
448 679 413
346 784 577
320 851 645
584 584 73
603 742 196
165 758 361
624 23 262
626 90 435
943 647 702
446 598 392
993 579 904
41 608 924
979 209 371
654 642 136
776 518 520
787 369 444
518 543 529
824 974 110
415 582 629
651 356 869
903 347 977
345 269 581
549 840 613
433 209 891
407 630 900
509 95 409
510 103 362
194 69 754"""
theInput = theInput.split('\n')
theInput2 = [i.split(' ') for i in theInput]
totalPossible = 0
for triangle in theInput2:
triangle.sort()
print(triangle)
if len(triangle) == 3:
side1, side2, side3 = triangle
else:
nullStr, side1, side2, side3 = triangle
if int(side1)+int(side2) > int(side3) and \
int(side1)+int(side3) > int(side2) and \
int(side2)+int(side3) > int(side1):
totalPossible += 1
print(totalPossible)
|
StarcoderdataPython
|
165559
|
<filename>DEN/run_fdc.py
import torch
from torch.utils import data
from torchvision.transforms import Compose
from torchvision.models import resnet152
import os
import fdc
import transforms_nyu
from dataset import NyuV2, KITTIdataset
from den import DEN
data_path = '/data/nyu_v2/'
seed = 2
torch.manual_seed(seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.cuda.set_device(4)
print(device)
depth_size = (128, 416)
model_input = 224
test_crop = (427, 561)
crop_ratios = [0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1]
transform = Compose([
transforms_nyu.Normalize(),
transforms_nyu.FDCPreprocess(crop_ratios)
])
transformKITTI = Compose([
transforms_nyu.NormalizeKITTI(),
transforms_nyu.FDCPreprocessKITTI(crop_ratios)
])
nyu = NyuV2(os.path.join(data_path, 'train'), transform=transform)
kitti = KITTIdataset(transform=transformKITTI)
# dataloader = data.DataLoader(nyu, batch_size=1, shuffle=True, num_workers=6)
dataloader = data.DataLoader(kitti, batch_size=1, shuffle=True, num_workers=0)
#wts = './models/temp_v3/042_model.pt'
resnet_wts = './models/resnet152-b121ed2d.pth'
resnet_152 = resnet152(pretrained=True)
den = DEN()
#den.load_state_dict(torch.load(wts))
den = den.to(device)
den.eval()
print('DEN has been loaded')
# fdc initialize with model den.
fdc_model = fdc.FDC(den)
f_m_hat, f = fdc_model.forward(dataloader)
fdc_model.fit(f_m_hat, f)
fdc_model.save_weights('./models/FDC/den_dbe/')
|
StarcoderdataPython
|
3386199
|
<reponame>JerryRain/rrt_ws
#!/usr/bin/env python
# rrt.py
# This program generates a simple rapidly
# exploring random tree (RRT) in a rectangular region.
#
# Written by <NAME>
# May 2011
import sys, random, math, pygame
from pygame.locals import *
from math import sqrt,cos,sin,atan2
#constants
XDIM = 640
YDIM = 480
WINSIZE = [XDIM, YDIM]
EPSILON = 7.0
NUMNODES = 1000
fpsClock = pygame.time.Clock()
def dist(p1,p2):
return sqrt((p1[0]-p2[0])*(p1[0]-p2[0])+(p1[1]-p2[1])*(p1[1]-p2[1]))
def step_from_to(p1,p2):
if dist(p1,p2) < EPSILON:
return p2
else:
theta = atan2(p2[1]-p1[1],p2[0]-p1[0])
return p1[0] + EPSILON*cos(theta), p1[1] + EPSILON*sin(theta)
def main():
#initialize and prepare screen
pygame.init()
screen = pygame.display.set_mode(WINSIZE)
pygame.display.set_caption('Original RRT')
#white = 255, 240, 200
#black = 20, 20, 40
white = 255, 255, 255
black = 0, 0, 0
screen.fill(black)
nodes = []
#nodes.append((XDIM/2.0,YDIM/2.0)) # Start in the center
nodes.append((0.0,0.0)) # Start in the corner
for i in range(NUMNODES):
rand = random.random()*640.0, random.random()*480.0
nn = nodes[0]
for p in nodes:
if dist(p,rand) < dist(nn,rand):
nn = p
newnode = step_from_to(nn,rand)
nodes.append(newnode)
pygame.draw.line(screen,white,nn,newnode)
pygame.display.update()
fpsClock.tick(100)
#print i, " ", nodes
for e in pygame.event.get():
if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):
sys.exit("Leaving because you requested it.")
# if python says run, then we should run
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3209670
|
from .client import (Client, Tensor, BlobTensor, DType, Device, Backend)
|
StarcoderdataPython
|
3279334
|
"""Tools for decoding and executing a neural network from its genetic representation."""
from typing import Tuple
import numpy as np
from .executable import Executable
##############################
# Function sigmoid
##############################
def sigmoid(x):
"""A logistic sigmoid activation function. Accepts array-like inputs,
and uses NumPy for efficient computation.
"""
return 1/(1+np.exp(-x))
##############################
# Function ReLu
##############################
def relu(x):
"""A rectified linear unit (ReLu) activation function. Accept array-like
inputs, and uses NumPy for efficient computation."""
return np.maximum(0, x)
##############################
# Function softmax
##############################
def softmax(x):
"""A softmax activation function. Accepts array-like input and normalizes
each element relative to the others."""
return np.exp(x)/np.sum(np.exp(x))
##############################
# Class SimpleNeuralNetworkDecoder
##############################
class SimpleNeuralNetworkDecoder():
"""Decode a real-vector genome into a neural network by treating it
as a test_sequence of weight matrices.
For example, say we have a linear real-valued made up of 29 values:
>>> genome = list(range(0, 29))
We can decode this into a neural network with 4 inputs, two hidden layers
(of size 3 and 2), and 2 outputs like so:
>>> from leap_ec.executable_rep import neural_network
>>> dec = neural_network.SimpleNeuralNetworkDecoder([ 4, 3, 2, 2 ])
>>> nn = dec.decode(genome)
:param (int) shape: the size of each layer of the network, i.e. (inputs,
hidden nodes, outputs). The shape tuple must have at least two
elements (inputs + bias weight and outputs): each additional value is treated as a hidden layer.
Note also that we expect a bias weight to exist for the inputs of each layer,
so the number of weights at each layer will be set to 1 greater
than the number of inputs you specify for that layer.
"""
def __init__(self, shape: Tuple[int], activation=sigmoid):
assert(shape is not None)
assert(len(shape) > 1)
shape = [ x for x in shape if x != 0 ] # Ignore layers of size zero
# Pair the shapes into the dimensions of each weight matrix,
# adding one row to each layer's input so they can accomodate
# a biat unit.
# ex. [a, b, c, d] —> [(a + 1, b), (b + 1, c), (c + 1, d)]
shape = np.array(shape)
self.dimensions = list(zip(1 + shape[:-1], shape[1:]))
matrix_lengths = list(map(lambda x: x[0]*x[1], self.dimensions))
self.length = sum(matrix_lengths)
self.activation = activation
def decode(self, genome, *args, **kwargs):
"""Decode a genome into a `SimpleNeuralNetworkExecutable`."""
if len(genome) != self.length:
raise ValueError(f"Expected a genome of length {self.length}, but received one of {len(genome)}.")
# Extract each layer's weight matrix from the linear genome
start = 0
weight_matrices = []
for num_inputs, num_outputs in self.dimensions:
end = start + num_inputs*num_outputs
layer_sequence = genome[start:end]
layer_matrix = np.reshape(layer_sequence, (num_inputs, num_outputs))
weight_matrices.append(layer_matrix)
start = end
return SimpleNeuralNetworkExecutable(weight_matrices, self.activation)
##############################
# Class SimpleNeuralNetworkExecutable
##############################
class SimpleNeuralNetworkExecutable(Executable):
"""A simple fixed-architecture neural network that can be executed on inputs.
Takes a list of weight matrices and an activation function as arguments. The
weight matrices each must have 1 row more than the previous layer's outputs,
to support a bias node that is implicitly connected to each layer.
For example, here we build a network with 10 inputs, two hidden layers (with
5 and 3 nodes, respectively), and 5 output nodes, and random weights:
>>> import numpy as np
>>> from leap_ec.executable_rep import neural_network
>>> n_inputs = 10
>>> n_hidden1, n_hidden2 = 5, 3
>>> n_outputs = 5
>>> weights = [ np.random.uniform((n_inputs + 1, n_hidden1)),
... np.random.uniform((n_hidden1 + 1, n_hidden2)),
... np.random.uniform((n_hidden2 + 1, n_outputs)) ]
>>> nn = neural_network.SimpleNeuralNetworkExecutable(weights, neural_network.sigmoid)
"""
def __init__(self, weight_matrices, activation):
assert(weight_matrices is not None)
assert(activation is not None)
self.weight_matrices = weight_matrices
self.activation = activation
def __call__(self, input_):
assert(input_ is not None)
signal = np.array(input_)
#print(f"\n\nINPUT\n{signal.tolist()}")
for W in self.weight_matrices:
signal = np.append(signal, 1.0) # Add a constant bias unit to the input
#print(f"\n\n\nWEIGHTS\n{W.tolist()}")
signal = self.activation(np.dot(signal, W))
assert(len(signal) > 0)
#print(f"\n\n\nOUTPUT\n{signal.tolist()}")
return signal
|
StarcoderdataPython
|
3304206
|
#"issubclass" es capaz de identificar una relación entre dos clases,
# y aunque su diagnóstico no es complejo, puede verificar si una clase particular
# es una subclase de cualquier otra clase.
#Cada clase se considera una subclase de sí misma.
class Vehiculo:
pass
class VehiculoTerrestre(Vehiculo):
pass
class VehiculoOruga(VehiculoTerrestre):
pass
for cls1 in [Vehiculo, VehiculoTerrestre, VehiculoOruga]:
for cls2 in [Vehiculo, VehiculoTerrestre, VehiculoOruga]:
print(issubclass(cls1, cls2), end="\t")
print()
|
StarcoderdataPython
|
83716
|
# coding=utf-8
from __future__ import unicode_literals
from datetime import datetime, timedelta
from pub_site.constant import WithdrawState
from pub_site.withdraw import dba as withdraw_dba
from pub_site import pay_client
from pub_site.sms import sms
from tools.utils import to_bankcard_mask
def fetch_notify_withdraw_result(minutes):
now = datetime.utcnow()
d = timedelta(minutes=minutes)
t = now - d
withdraw_records = withdraw_dba.get_requested_withdraw_record_before(t)
for withdraw_record in withdraw_records:
user_id = withdraw_record.user_id
sn = withdraw_record.sn
data = pay_client.query_withdraw(user_id, sn)
if data is None:
continue
is_success = is_withdraw_result_success(data['code'])
if is_success is not None:
notify_user_withdraw_result(is_success, withdraw_record)
def is_withdraw_result_success(code):
if code not in [0, '0', 1, '1']:
return None
return code in [0, '0']
def notify_user_withdraw_result(is_success, withdraw_record):
msg = _build_msg(is_success, withdraw_record)
notified = sms.send(withdraw_record.phone_no, msg)
if not notified:
# 失败再尝试一次,TODO: 使用celery.
notified = sms.send(withdraw_record.phone_no, msg)
if notified:
new_state = WithdrawState.SUCCESS if is_success else WithdrawState.FAILED
withdraw_dba.update_withdraw_state(withdraw_record.sn, withdraw_record.user_id, new_state)
return True
def _build_msg(is_success, withdraw_record):
user_id = withdraw_record.user_id
bankcard_id = withdraw_record.bankcard_id
bc = pay_client.app_get_user_bankcard(user_id, bankcard_id)
params = {
'created_on': withdraw_record.created_on,
'amount': withdraw_record.amount,
'bank_name': bc['bank_name'],
'card_no': to_bankcard_mask(bc['card_no'])
}
if is_success:
params['actual_amount'] = withdraw_record.actual_amount
params['fee'] = withdraw_record.fee
msg = "您于{created_on}提现{amount}到{bank_name}({card_no})的请求已处理,实际金额: {actual_amount}, 手续费: {fee}; 正等待到账,请留意银行卡到账信息。"
else:
msg = "您于{created_on}提现{amount}到{bank_name}({card_no})的请求失败。"
return msg.format(**params)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.