max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
src/pretix/plugins/badges/__init__.py | snadal/pretix | 0 | 12786051 | from django.apps import AppConfig
from django.utils.translation import ugettext, ugettext_lazy as _
from pretix import __version__ as version
class BadgesApp(AppConfig):
name = 'pretix.plugins.badges'
verbose_name = _("Badges")
class PretixPluginMeta:
name = _("Badges")
author = _("the pretix team")
version = version
category = "FEATURE"
description = _("This plugin allows you to generate badges or name tags for your attendees.")
def ready(self):
from . import signals # NOQA
def installed(self, event):
if not event.badge_layouts.exists():
event.badge_layouts.create(
name=ugettext('Default'),
default=True,
)
default_app_config = 'pretix.plugins.badges.BadgesApp'
| 1.929688 | 2 |
SPICE/__init__.py | nfahlgren/SPICE_py | 6 | 12786052 | <filename>SPICE/__init__.py
from SPICE.SPICE import *
from SPICE.QPP import *
from SPICE import util
| 1.101563 | 1 |
benchmarks/pydy_pendulum.py | Midnighter/symengine.py | 133 | 12786053 | <gh_stars>100-1000
import os
import time
import sys
sys.path = ["../sympy", "../pydy", "../symengine.py"] + sys.path
import sympy
import symengine
import pydy
from sympy.physics.mechanics.models import n_link_pendulum_on_cart
print(sympy.__file__)
print(symengine.__file__)
print(pydy.__file__)
if (len(sys.argv) > 1):
n = int(sys.argv[1])
else:
n = 4
start = time.time()
sys = n_link_pendulum_on_cart(n, cart_force=False)
end = time.time()
print("%s s" % (end-start))
#print(sys.eom_method.mass_matrix)
| 2.109375 | 2 |
shutterstock_api/models/license_request_metadata.py | Lumen5/shutterstock-api | 1 | 12786054 | # coding: utf-8
"""
Shutterstock API Reference
The Shutterstock API provides access to Shutterstock's library of media, as well as information about customers' accounts and the contributors that provide the media. # noqa: E501
OpenAPI spec version: 1.0.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class LicenseRequestMetadata(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'customer_id': 'str',
'geo_location': 'str',
'number_viewed': 'str',
'search_term': 'str'
}
attribute_map = {
'customer_id': 'customer_ID',
'geo_location': 'geo_location',
'number_viewed': 'number_viewed',
'search_term': 'search_term'
}
def __init__(self, customer_id=None, geo_location=None, number_viewed=None, search_term=None): # noqa: E501
"""LicenseRequestMetadata - a model defined in Swagger""" # noqa: E501
self._customer_id = None
self._geo_location = None
self._number_viewed = None
self._search_term = None
self.discriminator = None
if customer_id is not None:
self.customer_id = customer_id
if geo_location is not None:
self.geo_location = geo_location
if number_viewed is not None:
self.number_viewed = number_viewed
if search_term is not None:
self.search_term = search_term
@property
def customer_id(self):
"""Gets the customer_id of this LicenseRequestMetadata. # noqa: E501
The ID of a revenue-sharing partner's customer # noqa: E501
:return: The customer_id of this LicenseRequestMetadata. # noqa: E501
:rtype: str
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this LicenseRequestMetadata.
The ID of a revenue-sharing partner's customer # noqa: E501
:param customer_id: The customer_id of this LicenseRequestMetadata. # noqa: E501
:type: str
"""
self._customer_id = customer_id
@property
def geo_location(self):
"""Gets the geo_location of this LicenseRequestMetadata. # noqa: E501
The customer's location # noqa: E501
:return: The geo_location of this LicenseRequestMetadata. # noqa: E501
:rtype: str
"""
return self._geo_location
@geo_location.setter
def geo_location(self, geo_location):
"""Sets the geo_location of this LicenseRequestMetadata.
The customer's location # noqa: E501
:param geo_location: The geo_location of this LicenseRequestMetadata. # noqa: E501
:type: str
"""
self._geo_location = geo_location
@property
def number_viewed(self):
"""Gets the number_viewed of this LicenseRequestMetadata. # noqa: E501
How many pieces of media the customer viewed # noqa: E501
:return: The number_viewed of this LicenseRequestMetadata. # noqa: E501
:rtype: str
"""
return self._number_viewed
@number_viewed.setter
def number_viewed(self, number_viewed):
"""Sets the number_viewed of this LicenseRequestMetadata.
How many pieces of media the customer viewed # noqa: E501
:param number_viewed: The number_viewed of this LicenseRequestMetadata. # noqa: E501
:type: str
"""
self._number_viewed = number_viewed
@property
def search_term(self):
"""Gets the search_term of this LicenseRequestMetadata. # noqa: E501
The search term that the customer used # noqa: E501
:return: The search_term of this LicenseRequestMetadata. # noqa: E501
:rtype: str
"""
return self._search_term
@search_term.setter
def search_term(self, search_term):
"""Sets the search_term of this LicenseRequestMetadata.
The search term that the customer used # noqa: E501
:param search_term: The search_term of this LicenseRequestMetadata. # noqa: E501
:type: str
"""
self._search_term = search_term
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LicenseRequestMetadata, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LicenseRequestMetadata):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 2.125 | 2 |
anonympy/pandas/core_pandas.py | ArtLabss/open-data-anonimizer | 29 | 12786055 | import pandas as pd
import numpy as np
from texttable import Texttable
from cape_privacy.pandas import dtypes
from cape_privacy.pandas.transformations import NumericPerturbation
from cape_privacy.pandas.transformations import DatePerturbation
from cape_privacy.pandas.transformations import NumericRounding
from cape_privacy.pandas.transformations import Tokenizer
from faker import Faker
from anonympy.pandas import utils_pandas as _utils
from sklearn.decomposition import PCA
class dfAnonymizer(object):
"""
Initializes pandas DataFrame as a dfAnonymizer object.
Parameters:
----------
df: pandas DataFrame
Returns:
----------
dfAnonymizer object
Raises
----------
Exception:
* If ``df`` is not a DataFrame
See also
----------
dfAnonymizer.to_df : Return a DataFrame
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
Contructing dfAnonymizer object:
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
>>> anonym.to_df()
name age ... email ssn
0 Bruce 33 ... <EMAIL> 343554334
1 Tony 48 ... <EMAIL> 656564664
"""
def __init__(self,
df: pd.DataFrame):
if df.__class__.__name__ != "DataFrame":
raise Exception(f"{df} is not a pandas DataFrame.")
# Private Attributes
self._df = df.copy()
self._df2 = df.copy()
self._methods_applied = {}
self._synthetic_data = 'Synthetic Data'
self._tokenization = 'Tokenization'
self._numeric_perturbation = 'Numeric Perturbation'
self._datetime_perturbation = 'Datetime Perturbation'
self._round = 'Generalization - Rounding'
self._bin = 'Generalization - Binning'
self._drop = 'Column Suppression'
self._sample = 'Resampling'
self._PCA = 'PCA Masking'
self._email = 'Partial Masking'
# Public Attributes
self.anonymized_columns = []
self.columns = self._df.columns.tolist()
self.unanonymized_columns = self.columns.copy()
self.numeric_columns = _utils.get_numeric_columns(self._df)
self.categorical_columns = _utils.get_categorical_columns(self._df)
self.datetime_columns = _utils.get_datetime_columns(self._df)
self._available_methods = _utils.av_methods
self._fake_methods = _utils.faker_methods
def __str__(self):
return self._info().draw()
def __repr__(self):
return self._info().draw()
def _dtype_checker(self, column: str):
'''
Returns the dtype of the column
Parameters
----------
column: str
Returns
----------
dtype: numpy dtype
'''
dtype = self._df[column].dtype
if dtype == np.float32:
return dtypes.Float
elif dtype == np.float64:
return dtypes.Double
elif dtype == np.byte:
return dtypes.Byte
elif dtype == np.short:
return dtypes.Short
elif dtype == np.int32:
return dtypes.Integer
elif dtype == np.int64:
return dtypes.Long
else:
return None
def anonymize(self,
methods=None,
locale=['en_US'],
seed=None,
inplace=True):
'''
Anonymize all columns using different methods for each dtype.
If dictionary is not provided, for numerical columns
``numeric_rounding`` is applied.
``categorical_fake`` and ``categorical_tokenization`` for
categorical columns
and ``datetime_noise`` or ``datetime_fake`` are applied for columns of
datetime type.
Parameters
----------
methods : Optional[Dict[str, str]], default None
{column_name: anonympy_method}. Call ``available_methods`` for list
of all methods.
locale : str or List[str], default ['en_US']
See https://faker.readthedocs.io/en/master/locales.html for all
faker's locales.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
seed : Optional[int], default None
Pass an integer for reproducible output across multiple function
calls.
Returns
----------
If inplace is False, pandas Series or DataFrame is returned
See Also
--------
dfAnonymizer.categorical_fake_auto : Replace values with synthetically
generated ones
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset, \
available_methods
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
If methods None:
>>> anonym.anonymize(inplace = False)
name age ... email ssn
0 <NAME> 30 ... <EMAIL> 718-51-5290
1 <NAME> 50 ... <EMAIL> 684-81-8137
Passing a dict for specifying which methods to apply:
>>> available_methods('numeric')
numeric_noise numeric_binning numeric_masking numeric_rounding
>>> anonym.anonymize({'name':'categorical_fake',
... 'age':'numeric_noise',
... 'email':'categorical_email_masking',
... 'salary': 'numeric_rounding'}, inplace = False)
name age email salary
0 <NAME> 37 <EMAIL> 60000.0
1 <NAME> 52 <EMAIL> 50000.0
'''
if not methods:
if inplace:
# try synthetic data
self.categorical_fake_auto(locale=locale, seed=seed)
# if there are still columns left unanonymized
if self.unanonymized_columns:
for column in self.unanonymized_columns.copy():
if column in self.numeric_columns:
self.numeric_rounding(column)
elif column in self.categorical_columns:
self.categorical_tokenization(column,
key=str(seed))
elif column in self.datetime_columns:
self.datetime_noise(column, seed=seed)
else:
# try synthetic data
temp = self.categorical_fake_auto(locale=locale,
inplace=False,
seed=seed)
unanonymized = self.unanonymized_columns.copy()
if isinstance(temp, pd.DataFrame):
unanonymized = [column for column in unanonymized
if column not in temp.columns.to_list()]
elif isinstance(temp, pd.Series):
unanonymized.remove(temp.name)
temp = pd.DataFrame(temp)
else: # if temp is a already a dataframe
temp = pd.DataFrame()
if unanonymized:
for column in unanonymized:
if column in self.numeric_columns:
temp[column] = self.numeric_rounding(column,
inplace=False)
elif column in self.categorical_columns:
temp[column] = self.categorical_tokenization(
column,
inplace=False,
key=str(seed))
elif column in self.datetime_columns:
temp[column] = self.datetime_noise(column,
inplace=False,
seed=seed)
return temp
# if dictionary with methods was passed
else:
if inplace:
for key, value in methods.items():
# numeric
if value == "numeric_noise":
self.numeric_noise(key, seed=seed)
elif value == "numeric_binning":
self.numeric_binning(key)
elif value == "numeric_masking":
self.numeric_masking(key)
elif value == "numeric_rounding":
self.numeric_rounding(key)
# categorical
elif value == "categorical_fake":
self.categorical_fake(key, seed=seed)
elif value == "categorical_resampling":
self.categorical_resampling(key, seed=seed)
elif value == "categorical_tokenization":
self.categorical_tokenization(key, key=str(seed))
elif value == "categorical_email_masking":
self.categorical_email_masking(key)
# datetime
elif value == "datetime_fake":
self.datetime_fake(key, seed=seed)
elif value == "datetime_noise":
self.datetime_noise(key, seed=seed)
# drop
elif value == "column_suppression":
self.column_suppression(key)
else:
temp = pd.DataFrame()
for key, value in methods.items():
# numeric
if value == "numeric_noise":
temp[key] = self.numeric_noise(key,
inplace=False,
seed=seed)
elif value == "numeric_binning":
temp[key] = self.numeric_binning(key, inplace=False)
elif value == "numeric_masking":
temp[key] = self.numeric_masking(key, inplace=False)
elif value == "numeric_rounding":
temp[key] = self.numeric_rounding(key, inplace=False)
# categorical
elif value == "categorical_fake":
temp[key] = self.categorical_fake(key,
inplace=False,
seed=seed)
elif value == "categorical_resampling":
temp[key] = self.categorical_resampling(key,
inplace=False,
seed=seed)
elif value == "categorical_tokenization":
temp[key] = self.categorical_tokenization(
key,
inplace=False,
key=str(seed))
elif value == 'categorical_email_masking':
temp[key] = self.categorical_email_masking(
key,
inplace=False)
# datetime
elif value == "datetime_fake":
temp[key] = self.datetime_fake(key,
inplace=False,
seed=seed)
elif value == "datetime_noise":
temp[key] = self.datetime_noise(key,
inplace=False,
seed=seed)
# drop
elif value == "column_suppression":
pass
if len(temp.columns) > 1:
return temp
elif len(temp.columns) == 1:
return pd.Series(temp[temp.columns[0]])
def _fake_column(self,
column,
method,
locale=['en_US'],
seed=None,
inplace=True):
'''
Anonymize pandas Series object using synthetic data generator
Based on faker.Faker.
Parameters
----------
column : str
Column name which data will be substituted.
method : str
Method name. List of all methods ``fake_methods``.
locale : str or List[str], default ['en_US']
See https://faker.readthedocs.io/en/master/locales.html for all
faker's locales.
seed : Optional[int], default None
Pass an integer for reproducible output across multiple function
calls.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
None if inplace is True, else pandas Series is returned
See also
----------
dfAnonymizer.categorical_fake : Replace values with synthetically
generated ones by specifying which methods to apply
'''
Faker.seed(seed)
fake = Faker(locale=locale)
method = getattr(fake, method)
faked = self._df[column].apply(lambda x: method())
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = faked
self.unanonymized_columns.remove(column)
self.anonymized_columns.append(column)
self._methods_applied[column] = self._synthetic_data
else:
return faked
def categorical_fake(self,
columns,
locale=['en_US'],
seed=None,
inplace=True):
'''
Replace data with synthetic data using faker's generator.
To see the list of all faker's methods, call ``fake_methods``.
If column name and faker's method are similar, then pass a string or a
list of strings for `columns` argument
Otherwise, pass a dictionary with column name as a key and faker's
method as a value `{col_name: fake_method}`.
Parameters
----------
columns : Union[str, List[str], Dict[str, str]]
If a string or list of strings is passed, function will assume that
method name is same as column name.
locale : str or List[str], default ['en_US']
See https://faker.readthedocs.io/en/master/locales.html for all
faker's locales.
seed : Optional[int], default None
Pass an integer for reproducible output across multiple function
calls.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
None if inplace is True, else pandas Series or pandas DataFrame is
returned
See Also
--------
dfAnonymizer.categorical_fake_auto : Replace values with synthetically
generated ones
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
If methods are not specified, locale Great Britain:
>>> anonym.categorical_fake(['name', 'email', 'ssn'],
... locale = 'en_GB',
... inplace = False)
name email ssn
0 <NAME> <EMAIL> ZZ 180372 T
1 <NAME> <EMAIL> ZZ780511T
Passing a specific method, locale Russia:
>>> fake_methods('n')
name, name_female, name_male, name_nonbinary, nic_handle,
nic_handles, null_boolean, numerify
>>> anonym.categorical_fake({'name': 'name_nonbinary', 'web': 'url'},
... locale = 'ru_RU',
... inplace = False)
name web
0 <NAME> https://shestakov.biz
1 <NAME> https://monetka.net
'''
# if a single column is passed (str)
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
if inplace:
self._fake_column(columns,
columns,
inplace=True,
seed=seed,
locale=locale)
else:
return self._fake_column(columns,
columns,
inplace=False,
seed=seed,
locale=locale)
# if a list of columns is passed
elif isinstance(columns, list):
temp = pd.DataFrame()
if inplace:
for column in columns:
self._fake_column(column,
column,
inplace=True,
seed=seed,
locale=locale)
else:
for column in columns:
faked = self._fake_column(column,
column,
inplace=False,
seed=seed,
locale=locale)
temp[column] = faked
return temp
# if a dictionary with column name and method name is passed
elif isinstance(columns, dict):
temp = pd.DataFrame()
if inplace:
for column, method in columns.items():
self._fake_column(column,
method,
inplace=True,
seed=seed,
locale=locale)
else:
for column, method in columns.items():
faked = self._fake_column(column,
method,
inplace=False,
seed=seed,
locale=locale)
temp[column] = faked
if len(columns) == 1:
return temp[column]
else:
return temp
def categorical_fake_auto(self,
locale=['en_US'],
seed=None,
inplace=True):
'''
Anonymize only those column which names are in ``fake_methods`` list.
Parameters
----------
locale : str or List[str], default ['en_US']
See https://faker.readthedocs.io/en/master/locales.html for all
faker's locales.
seed : Optional[int], default None
Pass an integer for reproducible output across multiple function
calls.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
None if inplace = True, else an anonymized pandas Series or pandas
DataFrame
See also
----------
dfAnonymizer.categorical_fake : Replace values with synthetically
generated ones by specifying which methods to apply
Notes
----------
In order to produce synthetic data, column name should have same name
as faker's method name
Function will go over all columns and if column name mathces any
faker's method, values will be replaced.
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset, fake_methods
Change column names so the function can understand which method to
apply:
>>> df = load_dataset()
>>> fake_methods('n')
name, name_female, name_male, name_nonbinary, nic_handle,
nic_handles, null_boolean, numerify
>>> df.rename(columns={'name': 'name_female'}, inplace = True)
>>> anonym = dfAnonymizer(df)
Calling the method without specifying which methods to apply, locale
Japan:
>>> anonym.categorical_fake_auto(local = 'ja_JP',
... inplace = False)
name_female email ssn
0 西村 あすか <EMAIL> 783-28-2531
1 山口 直子 <EMAIL> 477-58-9577
'''
temp = pd.DataFrame()
for column in self.columns:
func = column.strip().lower()
if func in _utils._fake_methods:
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._fake_column(column,
func,
inplace=True,
seed=seed,
locale=locale)
else:
temp[column] = self._fake_column(column,
func,
inplace=False,
seed=seed,
locale=locale)
if not inplace:
if len(temp.columns) > 1:
return temp
elif len(temp.columns) == 1:
return pd.Series(temp[temp.columns[0]])
else:
return None
def numeric_noise(self,
columns,
MIN=-10,
MAX=10,
seed=None,
inplace=True):
'''
Add uniform random noise
Based on cape-privacy's NumericPerturbation function.
Mask a numeric pandas Series/DataFrame by adding uniform random
noise to each value. The amount of noise is drawn from
the interval [min, max).
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
MIN : (int, float), default -10
The values generated will be greater then or equal to min.
MAX : (int, float), default 10
The values generated will be less than max.
seed : int, default None
To initialize the random generator.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct,
else output is returned.
Returns
----------
ser: pandas Series or pandas DataFrame with uniform random noise added
See also
----------
dfAnonymizer.numeric_binning : Bin values into discrete intervals
dfAnonymizer.numeric_masking : Apply PCA masking to numeric values
dfAnonymizer.numeric_rounding : Round values to the given number
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Applying numeric perturbation:
>>> anonym.numeric_noise('age', inplace = False)
0 29
1 48
dtype: int64
'''
# If a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
dtype = self._dtype_checker(columns)
noise = NumericPerturbation(dtype=dtype,
min=MIN,
max=MAX,
seed=seed)
ser = noise(self._df[columns].copy()).astype(dtype)
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._numeric_perturbation
else:
return ser.astype(dtype)
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
dtype = self._dtype_checker(column)
noise = NumericPerturbation(dtype=dtype,
min=MIN,
max=MAX,
seed=seed)
ser = noise(self._df[column].copy()).astype(dtype)
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._numeric_perturbation # noqa: E501
else:
temp[column] = ser
if not inplace:
return temp
def datetime_noise(self,
columns,
frequency=("MONTH", "DAY"),
MIN=(-10, -5, -5),
MAX=(10, 5, 5),
seed=None,
inplace=True):
'''
Add uniform random noise to a Pandas series of timestamps
Based on cape-privacy's DatePerturbation function
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
frequency : Union[str, Tuple[str]], default ("MONTH", "DAY")
One or more frequencies to perturbate
MIN : Union[int, Tuple[int, ...]], default (-10, -5, -5)
The values generated will be greater then or equal to min.
MAX : Union[int, Tuple[int, ...]], default (10, 5, 5)
The values generated will be less than max.
seed : int, default None
To initialize the random generator.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct,
else output is returned.
Returns
----------
ser: pandas Series or pandas DataFrame
See also
----------
dfAnonymizer.datetime_fake : Replace values with synthetic dates
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Calling the method with specifying the frequency to perturbate:
>>> anonym.datetime_noise('birthdate',
frequency=('YEAR', 'MONTH', 'DAY'),
inplace = False)
0 1916-03-16
1 1971-04-24
Name: birthdate, dtype: datetime64[ns]
'''
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
noise = DatePerturbation(frequency=frequency,
min=MIN,
max=MAX,
seed=seed)
ser = noise(self._df[columns].copy())
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._datetime_perturbation # noqa: E501
else:
return ser
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
noise = DatePerturbation(frequency=frequency,
min=MIN,
max=MAX,
seed=seed)
ser = noise(self._df[column].copy())
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._datetime_perturbation # noqa: E501
else:
temp[column] = ser
if not inplace:
return temp
def numeric_rounding(self,
columns,
precision=None,
inplace=True):
'''
Round each value in the Pandas Series to the given number
Based on cape-privacy's NumericRounding.
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
precision : int, default None
The number of digits.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct,
else output is returned.
Returns
----------
pandas Series or pandas DataFrame if inplace = False, else None
See also
----------
dfAnonymizer.numeric_binning : Bin values into discrete intervals
dfAnonymizer.numeric_masking : Apply PCA masking
dfAnonymizer.numeric_noise : Add uniform random noise
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Apply Numeric Rounding:
>>> anonym.numeric_rounding(['age', 'salary'], inplace = False)
age salary
0 30 60000.0
1 50 50000.0
'''
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
dtype = self._dtype_checker(columns)
if precision is None:
precision = len(str(int(self._df[columns].mean()))) - 1
rounding = NumericRounding(dtype=dtype, precision=-precision)
ser = rounding(self._df[columns].copy()).astype(dtype)
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._round
else:
return ser
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
dtype = self._dtype_checker(column)
precision = len(str(int(self._df[column].mean()))) - 1
rounding = NumericRounding(dtype=dtype, precision=-precision)
ser = rounding(self._df[column].copy())
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._round
else:
temp[column] = ser.astype(dtype)
if not inplace:
return temp
def numeric_masking(self,
columns,
inplace=True):
'''
Apply PCA masking to a column/columns
Based on sklearn's PCA function
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct,
else output is returned.
Returns
----------
ser : pandas Series or pandas DataFrame
See also
----------
dfAnonymizer.numeric_binning : Bin values into discrete intervals
dfAnonymizer.numeric_rounding : Apply PCA masking
dfAnonymizer.numeric_noise : Round values to the given number
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Apply PCA Masking:
>>> num_cols = anonym.numeric_columns
>>> anonym.numeric_masking(num_cols, inplace = False)
age salary
0 -4954.900676 5.840671e-15
1 4954.900676 5.840671e-15
'''
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
pca = PCA(n_components=1)
ser = pd.DataFrame(pca.fit_transform(self._df[[columns]]),
columns=[columns])
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser[columns]
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._PCA
else:
return ser[columns]
# if a list of columns is passed
else:
if not inplace:
pca = PCA(n_components=len(columns))
return pd.DataFrame(pca.fit_transform(self._df[columns]),
columns=columns)
else:
for column in columns:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._PCA
pca = PCA(n_components=len(columns))
self._df[columns] = pca.fit_transform(self._df[columns])
def categorical_tokenization(self,
columns,
max_token_len=10,
key=None,
inplace=True):
'''
Maps a string to a token (hexadecimal string) to obfuscate it.
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
max_token_len : int, default 10
Control the token length.
key : str, default None
String or Byte String. If not specified, key will be set to a
random byte string.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
ser : pandas Series or pandas DataFrame
See also
----------
dfAnonymizer.categorical_fake : Replace values with synthetically
generated ones by specifying which methods to apply
dfAnonymizer.categorical_resampling : Resample values from the same
distribution
dfAnonymizer.categorical_email_masking : Apply partial masking to
emails
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Passing only categorical columns:
>>> anonym.categorical_columns
['name', 'web', 'email', 'ssn']
>>> anonym.categorical_tokenization(['name', 'web', 'email', 'ssn'],
inplace = False)
name web email ssn
0 a6488532f8 f8516a7ce9 a07981a4d6 9285bc9cb7
1 f7231e5026 44dfa9af8e 25ca1a128b a7a16a7c7d
'''
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
tokenize = Tokenizer(max_token_len=max_token_len, key=key)
ser = tokenize(self._df[columns])
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._tokenization
else:
return ser
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
tokenize = Tokenizer(max_token_len=max_token_len, key=key)
ser = tokenize(self._df[column])
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._tokenization
else:
temp[column] = ser
if not inplace:
return temp
def _mask(self, s):
'''
Mask a single email
Parameters
----------
s : str
string to mask.
Returns
----------
masked : str
See also
----------
dfAnonymizer.categorical_email_masking : Apply partial masking to email
'''
lo = s.find('@')
if lo > 0:
masked = s[0] + '*****' + s[lo-1:]
return masked
else:
raise Exception('Invalid Email')
def categorical_email_masking(self,
columns,
inplace=True):
'''
Apply Partial Masking to emails.
Parameters
----------
columns: Union[str, List[str]]
Column name or a list of column names.
inplace: Optional[bool] = True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
ser : pandas Series or pandas DataFrame
See also
----------
dfAnonymizer.categorical_fake : Replace values with synthetically
generated ones by specifying which methods to apply
dfAnonymizer.categorical_resampling : Resample values from the same
distribution
dfAnonymizer.categorical_tokenization : Map a string to a token
Notes
----------
Applicable only to column with email strings.
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Calling the method on email column:
>>> anonym.categorical_email_masking('email', inplace=False)
0 <EMAIL>
1 <EMAIL>
Name: email, dtype: object
'''
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
ser = self._df[columns].apply(lambda x: self._mask(x))
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._email
else:
return ser
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
ser = self._df[column].apply(lambda x: self._mask(x))
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._email
else:
temp[column] = ser
if not inplace:
return temp
def datetime_fake(self,
columns,
pattern='%Y-%m-%d',
end_datetime=None,
seed=None,
locale=['en_US'],
inplace=True):
'''
Replace Column's values with synthetic dates between January 1, 1970
and now.
Based on faker `date()` method
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
pattern : str, default '%Y-%m-%d'
end_datetime : Union[datetime.date, datetime.datetime,
datetime.timedelta, str, int, None], default None
locale : str or List[str], default ['en_US']
See https://faker.readthedocs.io/en/master/locales.html for all
faker's locales.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
ser : pandas Series or pandas DataFrame
See also
----------
dfAnonymizer.datetime_noise : Add uniform random noise to the column
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Calling the method with specifying the datetime column
>>> anonym.datetime_fake('birthdate', inplace = False)
0 2018-04-09
1 2005-05-28
Name: birthdate, dtype: datetime64[ns]
'''
Faker.seed(seed)
fake = Faker(locale=locale)
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
ser = self._df[columns].apply(lambda x: pd.to_datetime(fake.date(
pattern=pattern,
end_datetime=end_datetime)))
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._synthetic_data
else:
return ser
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
ser = self._df[column].apply(
lambda x: pd.to_datetime(fake.date(
pattern=pattern,
end_datetime=end_datetime)))
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._synthetic_data
else:
temp[column] = ser
if not inplace:
return temp
def column_suppression(self,
columns,
inplace=True):
'''
Redact a column (drop)
Based on pandas `drop` method
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
ser : None if inplace = True, else pandas Series or pandas DataFrame
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
>>> anonym.to_df()
name age ... email ssn
0 Bruce 33 ... <EMAIL> 343554334
1 Tony 48 ... <EMAIL> 656564664
Dropping `ssn` column
>>> anonym.column_suppression('ssn', inplace = False)
name age ... web email # noqa: E501
0 Bruce 33 ... http://www.alandrosenburgcpapc.co.uk <EMAIL>
1 Tony 48 ... http://www.capgeminiamerica.co.uk <EMAIL>
'''
# if single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df.drop(columns, axis=1, inplace=True)
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._drop
else:
return self._df2.drop(columns, axis=1, inplace=False)
# if a list of columns is passed
else:
if inplace:
for column in columns:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df.drop(column, axis=1, inplace=True)
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._drop
else:
return self._df2.drop(columns, axis=1, inplace=False)
def numeric_binning(self,
columns,
bins=4,
inplace=True):
'''
Bin values into discrete intervals.
Based on pandas `cut` method
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
bins : int, default 4
the number of equal-width bins in the range of `bins`
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct,
else output is returned.
Returns
----------
ser : None if inplace = True, else pandas Series or pandas DataFrame
See also
----------
dfAnonymizer.numeric_noise : Add uniform random noise
dfAnonymizer.numeric_masking : Apply PCA masking to numeric values
dfAnonymizer.numeric_rounding : Round values to the given number
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Call the method with specifying the number of bins:
>>> anonym.numeric_binning('age', bins = 2, inplace = False)
0 (33.0, 40.0]
1 (40.0, 48.0]
Name: age, dtype: category
'''
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
ser = pd.cut(self._df[columns], bins=bins, precision=0)
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._bin
else:
return ser
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
ser = pd.cut(self._df[column], bins=bins, precision=0)
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._bin
else:
temp[column] = ser
if not inplace:
return temp
def categorical_resampling(self,
columns,
seed=None,
inplace=True):
'''
Sampling from the same distribution
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
ser : None if inplace = True, else pandas Series or pandas DataFrame
See also:
----------
dfAnonymizer.categorical_fake : Replace values with synthetically
generated ones by specifying which methods to apply
dfAnonymizer.categorical_email_masking : Apply partial masking to
email column
dfAnonymizer.categorical_tokenization : Map a string to a token
Notes
----------
This method should be used on categorical data with finite number of
unique elements.
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
>>> anonym.categorical_resampling('name', inplace =False)
0 Bruce
1 Bruce
dtype: object
'''
# if a single column is passed
np.random.seed(seed)
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
counts = self._df[columns].value_counts(normalize=True)
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = np.random.choice(counts.index,
p=counts.values,
size=len(self._df))
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._sample
else:
return pd.Series(np.random.choice(counts.index,
p=counts.values,
size=len(self._df)))
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
counts = self._df[column].value_counts(normalize=True)
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = np.random.choice(counts.index,
p=counts.values,
size=len(self._df))
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._sample
else:
temp[column] = np.random.choice(counts.index,
p=counts.values,
size=len(self._df))
if not inplace:
return temp
def _info(self):
'''
Print a summary of the a DataFrame.
Which columns have been anonymized and which haven't.
Returns
----------
None
See also
----------
dfAnonymizer.info : Print a summy of the DataFrame
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Method gets called when the instance of `dfAnonymizer` object is called
>>> anonym
+-------------------------------+
| Total number of columns: 7 |
+===============================+
| Anonymized Column -> Method: |
+-------------------------------+
| Unanonymized Columns: |
| - name |
| - age |
| - birthdate |
| - salary |
| - web |
| - email |
| - ssn |
+-------------------------------+
'''
t = Texttable(max_width=150)
header = f'Total number of columns: {self._df.shape[1]}'
row1 = 'Anonymized Column -> Method: '
for column in self.anonymized_columns:
row1 += '\n- ' + column + ' -> ' + \
self._methods_applied.get(column)
row2 = 'Unanonymized Columns: \n'
row2 += '\n'.join([f'- {i}' for i in self.unanonymized_columns])
t.add_rows([[header], [row1], [row2]])
return t
def info(self):
'''
Print a summary of the a DataFrame.
Which columns have been anonymized using which methods.
`status = 1 ` means the column have been anonymized and `status = 0 `
means the contrary.
Returns
----------
None
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
>>> anonym.info()
+-----------+--------+--------+
| Column | Status | Method |
+===========+========+========+
| name | 0 | |
+-----------+--------+--------+
| age | 0 | |
+-----------+--------+--------+
| birthdate | 0 | |
+-----------+--------+--------+
| salary | 0 | |
+-----------+--------+--------+
| web | 0 | |
+-----------+--------+--------+
| email | 0 | |
+-----------+--------+--------+
| ssn | 0 | |
+-----------+--------+--------+
'''
t = Texttable(150)
t.header(['Column', 'Status', 'Type', 'Method'])
for i in range(len(self.columns)):
column = self.columns[i]
if column in self.anonymized_columns:
status = 1
method = self._methods_applied[column]
else:
status = 0
method = ''
if column in self.numeric_columns:
dtype = 'numeric'
elif column in self.categorical_columns:
dtype = 'categorical'
elif column in self.datetime_columns:
dtype = 'datetime'
else:
dtype = str(self._df[column].dtype)
row = [column, status, dtype, method]
t.add_row(row)
print(t.draw())
def to_df(self):
'''
Convert dfAnonymizer object back to pandas DataFrame
Returns
----------
DataFrame object
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
>>> anonym.to_df()
name age ... email ssn
0 Bruce 33 ... <EMAIL> 343554334
1 Tony 48 ... <EMAIL> 656564664
'''
return self._df.copy()
| 2.765625 | 3 |
Containers/Kubernetes-examples/GCP/greetings/application.py | danavaziri/Cloud | 0 | 12786056 | <reponame>danavaziri/Cloud<filename>Containers/Kubernetes-examples/GCP/greetings/application.py
import os
import time
from flask import request
from flask import Flask, render_template
import mysql.connector
from mysql.connector import errorcode
import json
application = Flask(__name__)
app = application
def get_db_creds():
db = os.environ.get("DB", None) or os.environ.get("database", None)
username = os.environ.get("USER", None) or os.environ.get("username", None)
password = os.environ.get("PASSWORD", None) or os.environ.get("password", None)
hostname = os.environ.get("HOST", None) or os.environ.get("dbhost", None)
return db, username, password, hostname
def create_table():
# Check if table exists or not. Create and populate it only if it does not exist.
db, username, password, hostname = get_db_creds()
table_ddl = 'CREATE TABLE movies(id INT UNSIGNED NOT NULL AUTO_INCREMENT, year INT, title TEXT, director TEXT, actor TEXT, release_date TEXT, rating FLOAT, PRIMARY KEY (id))'
cnx = ''
try:
cnx = mysql.connector.connect(user=username, password=password,
host=hostname,
database=db)
except Exception as exp:
print(exp)
import MySQLdb
#try:
cnx = MySQLdb.connect(unix_socket=hostname, user=username, passwd=password, db=db)
#except Exception as exp1:
# print(exp1)
cur = cnx.cursor()
try:
cur.execute(table_ddl)
cnx.commit()
populate_data()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print("already exists.")
else:
print(err.msg)
def populate_data():
db, username, password, hostname = get_db_creds()
print("Inside populate_data")
print("DB: %s" % db)
print("Username: %s" % username)
print("Password: %s" % password)
print("Hostname: %s" % hostname)
cnx = ''
try:
cnx = mysql.connector.connect(user=username, password=password,
host=hostname,
database=db)
except Exception as exp:
print(exp)
import MySQLdb
cnx = MySQLdb.connect(unix_socket=hostname, user=username, passwd=password, db=db)
cur = cnx.cursor()
cur.execute("INSERT INTO message (greeting) values ('Hello, World!')")
cnx.commit()
print("Returning from populate_data")
def query_data():
db, username, password, hostname = get_db_creds()
print("Inside query_data")
print("DB: %s" % db)
print("Username: %s" % username)
print("Password: %s" % password)
print("Hostname: %s" % hostname)
cnx = ''
try:
cnx = mysql.connector.connect(user=username, password=password,
host=hostname,
database=db)
except Exception as exp:
print(exp)
import MySQLdb
cnx = MySQLdb.connect(unix_socket=hostname, user=username, passwd=password, db=db)
cur = cnx.cursor()
cur.execute("SELECT greeting FROM message")
entries = [dict(greeting=row[0]) for row in cur.fetchall()]
return entries
try:
print("---------" + time.strftime('%a %H:%M:%S'))
print("Before create_table global")
create_table()
print("After create_data global")
except Exception as exp:
print("Got exception %s" % exp)
conn = None
######### INSERT/UPDATE MOVIE #########
@app.route('/add_to_db', methods=['POST'])
def add_to_db():
print("Received request.")
print(request.form['year'])
year = request.form['year']
title = request.form['title']
actor = request.form['actor']
director = request.form['director']
release_date = request.form['release_date']
rating = request.form['rating']
actor = actor.title()
director = director.title()
db, username, password, hostname = get_db_creds()
cnx = ''
try:
cnx = mysql.connector.connect(user=username, password=password,
host=hostname,
database=db)
except Exception as exp:
print(exp)
import MySQLdb
cnx = MySQLdb.connect(unix_socket=hostname, user=username, passwd=password, db=db)
cur = cnx.cursor()
cur.execute("SELECT id FROM movies WHERE LOWER(title) = ('" + title.lower() + "')")
print("CHECK TO SEE IF UPDATEE!!!!!!")
entries = [dict(id=row[0]) for row in cur.fetchall()]
if not entries:
cur.execute("INSERT INTO movies (year, title, director, actor, release_date, rating) values ('" + year + "','" + title + "','" + director + "','" + actor + "','" + release_date + "','" + rating + "')")
try:
cnx.commit()
except Exception as exp:
return add_movie_print(exp)
return add_movie_print("Movie " + title + " successfully inserted")
######### DELETE MOVIE #########
@app.route('/delete_movie', methods=['POST'])
def delete_movie():
print("Received DELETE request.")
print(request.form['delete_movie_box'])
msg = request.form['delete_movie_box']
lower_title = msg.lower()
db, username, password, hostname = get_db_creds()
cnx = ''
try:
cnx = mysql.connector.connect(user=username, password=password,
host=hostname,
database=db)
except Exception as exp:
print(exp)
import MySQLdb
cnx = MySQLdb.connect(unix_socket=hostname, user=username, passwd=password, db=db)
cur = cnx.cursor()
cur.execute("SELECT title FROM movies WHERE LOWER(title) = ('" + lower_title + "')")
entries = [dict(title=row[0]) for row in cur.fetchall()]
if not entries:
return delete_movie_print("Movie " + msg + " could not be deleted - Movie with " + msg + " does not exist")
cur2 = cnx.cursor()
cur2.execute("DELETE FROM movies WHERE (title) = ('" + msg + "')")
try:
cnx.commit()
except Exception as exp:
return delete_movie_print(exp)
return delete_movie_print("Movie " + msg + " successfully deleted")
######### SEARCH MOVIE #########
@app.route('/search_movie', methods=['POST'])
def search_movie():
print("Received SEARCH request.")
print(request.form['search_actor_box'])
msg = request.form['search_actor_box']
msg = msg.title()
db, username, password, hostname = get_db_creds()
cnx = ''
try:
cnx = mysql.connector.connect(user=username, password=password,
host=hostname,
database=db)
except Exception as exp:
print(exp)
import MySQLdb
cnx = MySQLdb.connect(unix_socket=hostname, user=username, passwd=password, db=db)
cur = cnx.cursor()
cur.execute("SELECT title FROM movies WHERE actor = ('" + msg + "')")
entries = [dict(title=row[0]) for row in cur.fetchall()]
if not entries:
failed = "No movies found for actor " + msg
return print_search_movie(entries, failed)
cur.execute("SELECT year, title, actor FROM movies WHERE actor = ('" + msg + "')")
entries = [dict(year=row[0], title=row[1], actor=row[2]) for row in cur.fetchall()]
json_data = json.dumps(entries)
s = "" + str(json_data)
s = s[1:]
s = s[:-1]
s = s[1:]
s = s[:-1]
result = s.split("}, {")
return print_search_movie(result, "")
######### PRINT HIGHEST RATING MOVIES #########
@app.route('/print_movie_highest', methods=['POST'])
def print_movie_highest():
print("Received PRINT HIGH request.")
db, username, password, hostname = get_db_creds()
cnx = ''
try:
cnx = mysql.connector.connect(user=username, password=password,
host=hostname,
database=db)
except Exception as exp:
print(exp)
import MySQLdb
cnx = MySQLdb.connect(unix_socket=hostname, user=username, passwd=password, db=db)
cur = cnx.cursor()
cur.execute("SELECT year, title, director, actor, rating FROM movies WHERE rating=(SELECT MAX(rating) FROM movies)")
entries = [dict(rating=row[4], actor=row[3], director=row[2], title=row[1], year=row[0]) for row in cur.fetchall()]
json_data = json.dumps(entries)
s = "" + str(json_data)
s = s[1:]
s = s[:-1]
s = s[1:]
s = s[:-1]
result = s.split("}, {")
return print_highest_stat_movie(result)
######### PRINT HIGHEST RATING MOVIES #########
@app.route('/print_movie_lowest', methods=['POST'])
def print_movie_lowest():
print("Received PRINT LOW request.")
db, username, password, hostname = get_db_creds()
cnx = ''
try:
cnx = mysql.connector.connect(user=username, password=password,
host=hostname,
database=db)
except Exception as exp:
print(exp)
import MySQLdb
cnx = MySQLdb.connect(unix_socket=hostname, user=username, passwd=password, db=db)
cur = cnx.cursor()
cur.execute("SELECT year, title, director, actor, rating FROM movies WHERE rating=(SELECT MIN(rating) FROM movies)")
entries = [dict(rating=row[4], actor=row[3], director=row[2], title=row[1], year=row[0]) for row in cur.fetchall()]
json_data = json.dumps(entries)
s = "" + str(json_data)
s = s[1:]
s = s[:-1]
s = s[1:]
s = s[:-1]
result = s.split("}, {")
return print_lowest_stat_movie(result)
@app.route("/")
def hello():
print("Inside hello")
print("Printing available environment variables")
print(os.environ)
print("Before displaying index.html")
entries = query_data()
print("Entries: %s" % entries)
return render_template('index.html', entries=entries)
def add_movie_print(message):
print("Printing searched movies")
print("Before displaying searched movies in index.html")
print("Searched Movies: %s" % message)
return render_template('index.html', insert_msg=message)
def delete_movie_print(message):
print("Printing searched movies")
print("Before displaying searched movies in index.html")
print("Searched Movies: %s" % message)
return render_template('index.html', delete_msg=message)
def print_search_movie(searched_movies, failed_msg):
print("Printing searched movies")
print("Before displaying searched movies in index.html")
print("Searched Movies: %s" % searched_movies)
if failed_msg != "":
return render_template('index.html', movies_searched_fail=failed_msg)
return render_template('index.html', movies_searched=searched_movies)
def print_highest_stat_movie(movie_stat):
print("Printing movies STATS")
print("Before displaying stats of movies in index.html")
print("Searched Movies: %s" % movie_stat)
return render_template('index.html', highest_rating_movies=movie_stat)
def print_lowest_stat_movie(movie_stat):
print("Printing movies STATS")
print("Before displaying stats of movies in index.html")
print("Searched Movies: %s" % movie_stat)
return render_template('index.html', lowest_rating_movies=movie_stat)
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0')
| 2.921875 | 3 |
soundsig/spikes.py | theunissenlab/sounsig | 22 | 12786057 | <gh_stars>10-100
from __future__ import division, print_function
import operator
import numpy as np
from scipy.stats import gamma
from scipy.ndimage import convolve1d
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from matplotlib.patches import Rectangle
def compute_joint_isi(spike_train1, spike_train2, window_size=0.500, bin_size=0.001):
half_window_size = window_size / 2.0
half_nbins = int(half_window_size / bin_size)
nbins = half_nbins*2 + 1 # ensure an odd number of bins, with zero lag in the middle
#construct sparse matrix of spike-to-spike distances
isi_hist = np.zeros([nbins], dtype='int')
lowest_j = 0
for i,ti in enumerate(spike_train1):
#print 'i=%d, ti=%0.3f, lowest_j=%d' % (i, ti, lowest_j)
if lowest_j > len(spike_train2)-1:
break
for j in range(lowest_j, len(spike_train2)):
tj = spike_train2[j]
dij = ti - tj
#print '\tj=%d, tj=%0.3f, dij=%0.3f' % (j, tj, dij)
if dij > half_window_size:
#there is no t{i+k}, k>0 such that t{i+k} - tj < half_window_size, so this is the lowest that
# j has to be for future iterations. we'll keep track of that to reduce the number of iterations
# of the inner loop for future outer loop iterations
lowest_j = j+1
continue
if dij < -half_window_size:
#at this point there is no tj such that ti - tj >= -half_window_size, so we should break
break
else:
#add to the histogram
bin_index = int(np.round(dij / bin_size)) + half_nbins
#print '\t added to bin, bin_index=%d' % bin_index
isi_hist[bin_index] += 1
sp = window_size / nbins
isi_vals = np.arange(-half_window_size, half_window_size, sp) #values of left hand edges of bins
return isi_vals,isi_hist
def simulate_poisson(psth, duration, num_trials=20):
dt = 0.001
trange = np.arange(0.0, duration, dt)
new_spike_trials = []
for k in range(num_trials):
next_spike_time = np.random.exponential(1.0)
last_spike_index = 0
spike_times = []
for k, t in enumerate(trange):
csum = np.cumsum(psth[last_spike_index:k])
if len(csum) < 1:
continue
if csum[-1] >= next_spike_time:
last_spike_index = k
spike_times.append(t)
new_spike_trials.append(np.array(spike_times))
return new_spike_trials
def simulate_gamma(psth, trials, duration, num_trials=20):
#rescale the ISIs
dt = 0.001
rs_isis = []
for trial in trials:
if len(trial) < 1:
continue
csum = np.cumsum(psth)*dt
for k,ti in enumerate(trial[1:]):
tj = trial[k]
if ti > duration or tj > duration or ti < 0.0 or tj < 0.0:
continue
ti_index = int((ti / duration) * len(psth))
tj_index = int((tj / duration) * len(psth))
#print 'k=%d, ti=%0.6f, tj=%0.6f, duration=%0.3f' % (k, ti, tj, duration)
#print ' ti_index=%d, tj_index=%d, len(psth)=%d, len(csum)=%d' % (ti_index, tj_index, len(psth), len(csum))
#get rescaled time as difference in cumulative intensity
ui = csum[ti_index] - csum[tj_index]
if ui < 0.0:
print('ui < 0! ui=%0.6f, csum[ti]=%0.6f, csum[tj]=%0.6f' % (ui, csum[ti_index], csum[tj_index]))
else:
rs_isis.append(ui)
rs_isis = np.array(rs_isis)
rs_isi_x = np.arange(rs_isis.min(), rs_isis.max(), 1e-5)
#fit a gamma distribution to the rescaled ISIs
gamma_alpha,gamma_loc,gamma_beta = gamma.fit(rs_isis)
gamma_pdf = gamma.pdf(rs_isi_x, gamma_alpha, loc=gamma_loc, scale=gamma_beta)
print('Rescaled ISI Gamma Fit Params: alpha=%0.3f, beta=%0.3f, loc=%0.3f' % (gamma_alpha, gamma_beta, gamma_loc))
#simulate new trials using rescaled ISIs
new_trials = []
for nt in range(num_trials):
ntrial = []
next_rs_time = gamma.rvs(gamma_alpha, loc=gamma_loc,scale=gamma_beta)
csum = 0.0
for t_index,pval in enumerate(psth):
csum += pval*dt
if csum >= next_rs_time:
#spike!
t = t_index*dt
ntrial.append(t)
#reset integral and generate new rescaled ISI
csum = 0.0
next_rs_time = gamma.rvs(gamma_alpha, loc=gamma_loc,scale=gamma_beta)
new_trials.append(ntrial)
#plt.figure()
#plt.hist(rs_isis, bins=20, normed=True)
#plt.plot(rs_isi_x, gamma_pdf, 'r-')
#plt.title('Rescaled ISIs')
return new_trials
def compute_psth(trials, duration, bin_size=0.001, time_offset=0.0):
"""
Compute a peri-stimulus time histogram (PSTH), conditioned on an event such as stimulus.
trials: an array of arrays of spike times in seconds, relative to the onset of the stimulus,
If a spike precedes a stimulus, it's spike time should be negative. len(trials) = # of trials,
and len(trials[0]) = number of spikes in first trial
duration: the duration of the event.
bin_size: the size in seconds of the bin to use in creating the PSTH (defaults to 0.001s = 1ms)
Returns the average spike rate in KHz across trials in each time bin.
"""
nbins = int(np.ceil((duration) / bin_size))
spike_counts = np.zeros(nbins)
for stimes in trials:
if len(stimes) == 0:
continue
stimes = np.array(stimes)
if len(stimes.shape) > 0:
# get index of spike times valid for the conditioned event
vi = (stimes >= time_offset) & (stimes <= duration)
# convert spike times to indices in PSTH
sbins = np.floor((stimes[vi]-time_offset) / bin_size).astype('int')
# add spike to each bin
for k in sbins:
if k < nbins:
spike_counts[k] += 1
# compute rate in KHz by dividing by bin size
spike_counts /= bin_size*1000.0
# take mean across trials (spikes were already summed across trials)
spike_counts /= len(trials)
# construct time axis, represents time point at left hand of bin
t = (np.arange(nbins).astype('float') * bin_size) + time_offset
return t,spike_counts
def create_random_psth(duration, smooth_win_size=10, samp_rate=1000.0, thresh=0.5):
nsamps = duration * samp_rate
psth = np.random.randn(nsamps)
psth[psth < thresh] = 0.0
#smooth psth
kt = np.arange(-smooth_win_size, smooth_win_size+1, 1.0)
k = np.exp(-kt**2)
k /= k.sum()
psth = np.convolve(psth, k, mode='same')
return psth
def plot_raster(spike_trains, ax=None, duration=None, bin_size=0.001, time_offset=0.0, ylabel='Trial #', groups=None,
bgcolor=None, spike_color='k'):
"""
Make a raster plot of the trials of spike times.
spike_trains: an array of arrays of spike times in seconds.
time_offset: amount of time in seconds to offset the time axis for plotting
groups: a dictionary that groups spike trains together. the key is the group name, and
the value is a list of spike train indicies. The groups are
differentiated visually using a background color, and labeled on the y-axis.
The elements in the indicies array must be contiguous!
"""
if ax is None:
ax = plt.gca()
if bgcolor is not None:
ax.set_axis_bgcolor(bgcolor)
if duration is None:
duration = -np.inf
for trial in spike_trains:
if len(trial) > 0:
duration = max(duration, np.max(trial))
nbins = (duration / bin_size)
#draw group backgrounds
if groups is not None:
#first make sure indicies are lists
groups = dict([(kk, vv if type(vv) is list else [vv]) for kk, vv in groups.iteritems()])
#sort group names by min trial
group_list = [(group_name,min(trial_indicies)) for group_name,trial_indicies in groups.iteritems()]
group_list.sort(key=operator.itemgetter(1))
group_list = [x[0] for x in group_list]
for k,(group_name,trial_indicies) in enumerate(groups.iteritems()):
real_index = group_list.index(group_name)
if real_index % 2:
max_trial = max(trial_indicies)
y = len(spike_trains) - max_trial - 1
x = 0.0 + time_offset
h = len(trial_indicies)
w = nbins
rect = Rectangle( (x, y), width=w, height=h, fill=True, alpha=0.5, facecolor='#aaaaaa', linewidth=0.0)
ax.add_patch(rect)
#draw actual spikes
for k,trial in enumerate(spike_trains):
if len(trial) == 0:
continue
for st in trial:
y = len(spike_trains) - k - 1
x = st
rect = Rectangle( (x, y), width=bin_size, height=1, linewidth=1.0, facecolor=spike_color, edgecolor=spike_color)
ax.add_patch(rect)
#change x axis tick marks to reflect actual time
ax.autoscale_view()
ax.set_xlim(time_offset, time_offset+duration)
ax.figure.canvas.draw()
if groups is None:
#change y axis tick labels to reflect trial number
y_labels = [y.get_text() for y in ax.get_yticklabels()]
y_labels.reverse()
ax.set_yticklabels(y_labels)
else:
ax.set_yticklabels([])
#change y axis tick labels to reflect group, one tick per group
yticks = list()
for k,(group_name,trial_indicies) in enumerate(groups.iteritems()):
min_trial = min(trial_indicies)
ypos = len(spike_trains) - (min_trial + (len(trial_indicies) / 2.0))
yticks.append( (ypos, group_name) )
yticks.sort(key=operator.itemgetter(0))
ax.set_yticks([y[0] for y in yticks])
ax.set_yticklabels([y[1] for y in yticks])
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.set_xlabel('Time (s)')
def xcorr_hist(spike_train1, spike_train2, duration=None, window_size=0.001, sample_rate=1000.0, normalize=True):
"""
Make a cross-correlation histogram of coincident spike times between spike train 1 and 2. The cross-correlation
histogram is a function of time. At each moment t im time, the value of the histogram is given as the number
of spike pairs from train 1 and 2 that are within the window specified by window_size.
Normalization means to divide by window_size*int(duration*sample_rate), which turns the returned quantity into
the probability of spikes from the two trains co-occurring.
Returns t,xhist,clow,chigh where t is the time vector, xhist is the cross-correlation histogram, and clow and chigh
are the lower and upper 95% confidence intervals. When a normalized xhist falls between these
"""
if duration is None:
duration = -np.inf
for st in spike_train1:
if len(st) > 0:
duration = np.max(np.max(st), duration)
#construct the histogram
nbins = int(np.ceil(duration*sample_rate))
xhist = np.zeros([nbins], dtype='int')
half_window_size = window_size / 2
#populate the histogram
for t in range(nbins):
tmin = t/sample_rate - half_window_size
tmax = t/sample_rate + half_window_size
#count the number of spikes that occur in this time window
ns1 = ((spike_train1 >= tmin) & (spike_train1 <= tmax)).sum()
ns2 = ((spike_train2 >= tmin) & (spike_train2 <= tmax)).sum()
#compute the count of all pairs, this is the value for the histogram
xhist[t] = ns1*ns2
R = int(duration*sample_rate)
if normalize:
xhist = xhist.astype('float') / (window_size * R)
#compute confidence intervals
clow = -1.96 / np.sqrt(4*window_size*R)
chigh = 1.96 / np.sqrt(4*window_size*R)
t = np.arange(nbins)*(1.0 / sample_rate)
return t,xhist,clow,chigh
def spike_envelope(spike_trains, start_time, duration, bin_size=1e-3, win_size=3.0, thresh_percentile=None, smooth=False):
#construct empty envelope
tlen = int(duration / bin_size)
env = np.zeros([tlen])
#sum spike trains across electrodes
for st in spike_trains:
#some basic checks
assert np.sum(st < start_time) == 0, "spike_envelope: %d spike times occurred before the start time of %0.6fs" % (np.sum(st < start_time), start_time)
assert np.sum(st > start_time+duration) == 0, "spike_envelope: %d spike times occurred after the end time of %0.6fs" % (np.sum(st > start_time+duration), start_time+duration)
#convert spike times to indices
sindex = ((st - start_time) / bin_size).astype('int')
#increment spike count vector
env[sindex] += 1
if smooth:
#smooth the spike count vector with a gaussian
sct = np.linspace(-50, 50, 30)
scwin = np.exp(-(sct**2 / win_size**2))
env = convolve1d(env, scwin)
#normalize the envelope
env /= env.max()
assert np.sum(env < 0.0) == 0, "Why are there zeros in the spike envelope?"
if thresh_percentile is not None:
thresh = np.percentile(env, thresh_percentile)
print('spike_envelope threshold: %f' % thresh)
env[env < thresh] = 0.0
return env
def spike_trains_to_matrix(spike_trains, bin_size, start_time, duration):
""" Convert an array of spike time arrays to a matrix of counts.
:param spike_trains: An array of arrays of spike times.
:param bin_size: The bin size of each matrix pixel.
:param start_time: The start time of the matrix.
:param duration: The duration of the matrix.
:return: A matrix of spike counts, one row per each array in the spike_trains array.
"""
nt = int(duration / bin_size)
spike_count = np.zeros([len(spike_trains), nt])
for k, spikes in enumerate(spike_trains):
vi = (spikes >= start_time) & (spikes < start_time+duration)
# convert the spike times into integer indices in spike_count
spikes_index = ((spikes[vi] - start_time) / bin_size).astype('int')
#increment each bin by the number of spikes that lie in it
for si in spikes_index:
assert si <= spike_count.shape[1], "IndexError: nt=%d, si=%d, k=%d" % (nt, si, k)
spike_count[k, min(si, spike_count.shape[1]-1)] += 1.0
return spike_count
def psth_colormap(noise_level=0.1, ncolors=256):
cdata = list()
for x in np.linspace(0, 1, ncolors):
if x < noise_level:
cdata.append([1., 1., 1., 1])
else:
v = (x - noise_level) / (1. - noise_level)
c = (1. - v)**6
# cdata.append([0, v/2., v, (v/2. + 0.5)])
cdata.append([c, c, c])
return ListedColormap(cdata, name='psth')
def causal_smooth(spike_times, duration, bin_size=1e-3, tau=1e-3, winlen=5e-3, num_win_points=11):
""" Convolve a set of spike times (specified in seconds) with a causal exponential
filter with time constant tau.
"""
assert num_win_points % 2 == 1
# convert the spike times to a binary vector
nbins = int(duration / bin_size)
b = np.zeros(nbins)
sti = (spike_times / bin_size).astype('int')
sti[sti < 0] = 0
sti[sti > nbins-1] = nbins-1
b[sti] = 1
# create an causal exponential window
x = np.linspace(-winlen, winlen, num_win_points)
w = np.exp(-x / tau)
w[x < 0] = 0
w /= w.sum()
return convolve1d(b, w)
def simple_synchrony(spike_times1, spike_times2, duration, bin_size=1e-1):
""" Turn the two spike trains into binary vectors by binning, compute their normalized distance. Should
be bounded by 0 and 1. """
nbins = int(duration / bin_size)
b1 = np.zeros(nbins, dtype='bool')
b2 = np.zeros(nbins, dtype='bool')
sti1 = (spike_times1 / bin_size).astype('int')
sti1[sti1 < 0] = 0
sti1[sti1 > nbins - 1] = nbins - 1
b1[sti1] = True
sti2 = (spike_times2 / bin_size).astype('int')
sti2[sti2 < 0] = 0
sti2[sti2 > nbins - 1] = nbins - 1
b2[sti2] = True
n1 = b1.sum()
n2 = b2.sum()
return np.sum(b1 & b2) / np.sqrt(n1*n2)
def exp_conv(spike_times, duration, tau, bin_size, causal=True):
""" Convolve spike train with an exponential kernel.
:param spike_times: List of spike times in seconds.
:param tau: Exponential time constant in seconds.
:param duration: The duration of the time series.
:param bin_size: Bin size in seconds
:param causal: Whether to use a causal filter or not. If causal=False, then the spike times are convolved with a two-sided exponential
:return: An array time series.
"""
assert spike_times.min() >= 0, "No negative spike times for exp_conv!"
nt = int(duration / bin_size)
good_spikes = (spike_times > 0) & (spike_times < duration)
i = (spike_times[good_spikes] / bin_size).astype('int')
s = np.zeros([nt])
s[i] = 1.
# make sure the exponential window size is at least 4 times the time constant
winlen = 4*int(tau/bin_size) + 1
assert winlen < len(s), "Signal is too small to convolve with exponential that has tau=%0.3f" % tau
hwinlen = int(winlen / 2)
twin = np.arange(-hwinlen, hwinlen+1)*bin_size
win = np.zeros_like(twin)
win[hwinlen:] = np.exp(-twin[hwinlen:] / tau)
if ~causal:
win[:hwinlen] = win[(hwinlen+1):][::-1]
sc = convolve1d(s, win)
return sc
| 2.40625 | 2 |
study/pytorch_study/02.py | strawsyz/straw | 2 | 12786058 | import matplotlib.pyplot as plt
import torch
# 回归类型的例子
data_shape = torch.ones(400, 2)
x0 = torch.normal(2 * data_shape, 1)
y0 = torch.zeros(data_shape.size()[0])
x1 = torch.normal(-2 * data_shape, 1)
y1 = torch.ones(data_shape.size()[0])
x = torch.cat((x0, x1), 0).type(torch.FloatTensor)
y = torch.cat((y0, y1)).type(torch.LongTensor)
print(y.size())
print(y)
# plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=8, lw=0, cmap='RdYlGn')
# plt.show()
# create network
class Net(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_input, n_hidden)
self.output = torch.nn.Linear(n_hidden, n_output)
def forward(self, x):
x = torch.sigmoid(self.hidden(x))
return self.output(x)
net = Net(n_input=2, n_hidden=10, n_output=2)
print(net)
# train network
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
loss_func = torch.nn.CrossEntropyLoss()
plt.ion()
for i in range(1000):
out = net(x)
loss = loss_func(out, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 20 == 0:
plt.cla()
# temp = torch.softmax(out, 1)
prediction = torch.max(out, 1)[1]
# prediction = torch.max(out)
pred_y = prediction.data.numpy().squeeze()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=8, lw=0, cmap='RdYlGn')
accuracy = float((pred_y == target_y).astype(int).sum()) / float(target_y.size)
plt.text(1.5, -4, 'Accuracy=%.4f' % accuracy, fontdict={'size': 12, 'color': 'orange'})
plt.pause(0.1)
if accuracy == 1.0:
print('perfect')
break
print('end')
plt.ioff()
plt.show()
| 3.609375 | 4 |
backup.d/backup.py | davidhi7/backup | 0 | 12786059 | <filename>backup.d/backup.py<gh_stars>0
import sys
import os
import shutil
import subprocess
import datetime
import configparser
from pathlib import Path
def load_config(filepath: Path):
config = configparser.ConfigParser()
# stop converting key names to lower case, see https://stackoverflow.com/questions/19359556/configparser-reads-capital-keys-and-make-them-lower-case
config.optionxform = str
config.read(filepath)
# do some basic validation
if not config.has_section('General'):
print('Configuration file is missing "General" section')
exit(1)
required_options = ('SOURCE', 'REPOSITORY', 'PASSPHRASE_FILE')
for option in required_options:
if not config.has_option('General', option):
print('Missing required configuration file option ' + option)
exit(1)
return config
def config_to_env(config):
env = dict()
for section in config.sections():
for key, value in config.items(section):
env[key] = value
return env
def backup(argv):
if len(argv) < 2:
print('Missing argument: backup configuration file')
exit(1)
config_path = Path(argv[1])
if not config_path.is_file():
print('Given argument doesn\'t lead to a configuration file')
exit(1)
config = load_config(Path(argv[1]))
backup_name = str()
if config.has_option('General', 'LABEL'):
backup_name += config.get('General', 'LABEL') + '_'
backup_name += datetime.datetime.now().strftime( f'%Y-%m-%dT%H:%M')
print('=> Starting backup ' + backup_name)
# Temporary directory to store files created for example by pre-hooks. Going to be deleted after the backup finished.
backup_tmp_dir = Path(config['General']['SOURCE']) / f'.backup_{backup_name}'
backup_tmp_dir.mkdir(exist_ok=True)
# Environmental variables used by Borg
borg_env = {'BORG_REPO': config['General']['REPOSITORY'], 'BORG_PASSCOMMAND': 'cat ' + config['General']['PASSPHRASE_FILE']}
hook_env = config_to_env(config)
exitcodes = dict()
if config.has_option('General', 'BACKUP_PRE_HOOK'):
print('=> Running prehook command')
exitcodes['prehook'] = exec(config['General']['BACKUP_PRE_HOOK'], hook_env)
print('=> Creating new archive')
exitcodes['borg create'] = borg_create(config, backup_name, borg_env)
print('=> Pruning repository')
exitcodes['borg prune'] = borg_prune(config, borg_env)
if config.has_option('General', 'BACKUP_SUCCESS_HOOK'):
if exitcodes['borg create'] == 0 and exitcodes['borg prune'] == 0:
print('=> Running success hook command')
exitcodes['success_hook'] = exec(config['General']['BACKUP_SUCCESS_HOOK'], hook_env)
else:
print('=> Skipping success hook, see status codes below')
if config.has_option('General', 'BACKUP_HOOK'):
print('=> Running hook command')
exitcodes['hook'] = exec(config['General']['BACKUP_HOOK'], hook_env)
shutil.rmtree(backup_tmp_dir)
print('\nFinished backup')
print('List of exit codes:')
print_table(exitcodes)
if max(exitcodes.values()) > 1:
exit(1)
else:
exit(0)
def borg_create(config, backup_name, env):
#cmd = ['borg', 'create', '--stats', '--exclude-from', f'"{borg_exclude}"', f'"::{backup_name}"', f'"{borg_source}"']
borg_source = config['General']['SOURCE']
borg_exclude_parameter = str()
if config.has_option('General', 'EXCLUDE_FILE'):
borg_exclude_parameter = '--exclude-from ' + config.get('General', 'EXCLUDE_FILE')
cmd = f'''
borg create --stats {borg_exclude_parameter} \
'::{backup_name}' \
'{borg_source}'
'''
return exec(cmd, env)
def borg_prune(config, env):
keep_daily = config.getint('Prune', 'KEEP_DAILY', fallback=7)
keep_weekly = config.getint('Prune', 'KEEP_WEEKLY', fallback=4)
keep_monthly = config.getint('Prune', 'KEEP_MONTHLY', fallback=12)
cmd = f'''
borg prune --stats \
--keep-daily={keep_daily} \
--keep-weekly={keep_weekly} \
--keep-monthly={keep_monthly} \
'''
return exec(cmd, env)
def exec(cmd, env):
out = subprocess.run(cmd, env=env, shell=True)
return out.returncode
# prints dict of data with string keys and integer values in a fancy table
def print_table(data):
longest_key = len(max(data.keys(), key=len))
longest_val = len(str(max(data.values())))
separator = ' | '
print('-' * (longest_key + longest_val + len(separator)))
for entry in data.items():
print(entry[0] + ((longest_key - len(entry[0])) * ' ') + separator + str(entry[1]))
print('-' * (longest_key + longest_val + len(separator)))
if __name__ == '__main__':
backup(sys.argv)
| 3.015625 | 3 |
2015/05/main.py | juanrgon/advent-of-code | 3 | 12786060 | <reponame>juanrgon/advent-of-code
TEST = [
(
"""
ugknbfddgicrmopn
aaa
jchzalrnumimnmhp
haegwjzuvuyypxyu
dvszwmarrgswjxmb
""",
1 + 1,
),
]
TEST2 = [
(
"""
qjhvhtzxzqqjkmpb
xxyxx
uurcxstgmygtbstg
ieodomkazucvgmuy
""",
1 + 1,
),
]
import aoc
from collections import Counter
from itertools import product
import string
@aoc.submit(part=1)
@aoc.get_input
@aoc.tests(TEST)
@aoc.parse_text
def part_1(raw: str, ints: list[int], strs: list[str]):
vowels = "aeiou"
nice = 0
for s in strs:
if sum(Counter(s)[v] for v in vowels) < 3:
continue
for l in string.ascii_lowercase:
if l + l in s:
break
else:
continue
if "ab" in s or "cd" in s or "pq" in s or "xy" in s:
continue
nice += 1
return nice
@aoc.submit(part=2)
@aoc.get_input
@aoc.tests(TEST2)
@aoc.parse_text
def part_2(raw: str, ints: list[int], strs: list[str]):
vowels = "aeiou"
nice = 0
for s in strs:
for l, k in product(string.ascii_lowercase, repeat=2):
t = s.replace(l + k, "")
if len(s) - len(t) >= 4:
break
else:
continue
for l, k in product(string.ascii_lowercase, repeat=2):
if l + k + l in s:
break
else:
continue
nice += 1
return nice
if __name__ == "__main__":
print("Part 1:", part_1(__file__))
print("Part 2:", part_2(__file__))
| 3.140625 | 3 |
autoscalingsim/scaling/policiesbuilder/adjustmentplacement/desired_adjustment_calculator/scoring/score/score_impl/__init__.py | Remit/autoscaling-simulator | 6 | 12786061 | from .price_score import PriceScore
| 0.984375 | 1 |
utils.py | learningequality/sushi-chef-readwritethink | 0 | 12786062 | <reponame>learningequality/sushi-chef-readwritethink
import json
import os
from pathlib import Path
import ntpath
from ricecooker.utils import downloader
import requests
from ricecooker.utils.caching import CacheForeverHeuristic, FileCache, CacheControlAdapter
#from le_utils.constants import licenses, content_kinds, file_formats
DATA_DIR = "chefdata"
BASE_URL = "http://www.readwritethink.org"
sess = requests.Session()
cache = FileCache('.webcache')
basic_adapter = CacheControlAdapter(cache=cache)
forever_adapter = CacheControlAdapter(heuristic=CacheForeverHeuristic(), cache=cache)
sess.mount('http://', basic_adapter)
sess.mount(BASE_URL, forever_adapter)
def save_thumbnail(url, save_as):
THUMB_DATA_DIR = build_path([DATA_DIR, 'thumbnail'])
filepath = os.path.join(THUMB_DATA_DIR, save_as)
try:
document = downloader.read(url, loadjs=False, session=sess)
except requests.exceptions.ConnectionError as e:
return None
else:
with open(filepath, 'wb') as f:
f.write(document)
return filepath
def if_file_exists(filepath):
file_ = Path(filepath)
return file_.is_file()
def if_dir_exists(filepath):
file_ = Path(filepath)
return file_.is_dir()
def get_name_from_url(url):
head, tail = ntpath.split(url)
params_index = tail.find("&")
if params_index != -1:
tail = tail[:params_index]
basename = ntpath.basename(url)
params_b_index = basename.find("&")
if params_b_index != -1:
basename = basename[:params_b_index]
return tail or basename
def get_name_from_url_no_ext(url):
path = get_name_from_url(url)
path_split = path.split(".")
if len(path_split) > 1:
name = ".".join(path_split[:-1])
else:
name = path_split[0]
return name
def build_path(levels):
path = os.path.join(*levels)
if not if_dir_exists(path):
os.makedirs(path)
return path
def remove_links(content):
if content is not None:
for link in content.find_all("a"):
link.replaceWithChildren()
def remove_iframes(content):
if content is not None:
for iframe in content.find_all("iframe"):
iframe.extract()
def check_shorter_url(url):
shorters_urls = set(["bitly.com", "goo.gl", "tinyurl.com", "ow.ly", "ls.gd",
"buff.ly", "adf.ly", "bit.do", "mcaf.ee"])
index_init = url.find("://")
index_end = url[index_init+3:].find("/")
if index_init != -1:
if index_end == -1:
index_end = len(url[index_init+3:])
domain = url[index_init+3:index_end+index_init+3]
check = len(domain) < 12 or domain in shorters_urls
return check
def get_level_map(tree, levels):
actual_node = levels[0]
r_levels = levels[1:]
for children in tree.get("children", []):
if children["source_id"] == actual_node:
if len(r_levels) >= 1:
return get_level_map(children, r_levels)
else:
return children
def load_tree(path):
with open(path, 'r') as f:
tree = json.load(f)
return tree
| 2.421875 | 2 |
fssim_rqt_plugins/rqt_fssim_track_editor/src/rqt_fssim_track_editor/cone_editor.py | AhmedOsamaAgha/fssim | 200 | 12786063 | # AMZ-Driverless
# Copyright (c) 2018 Authors:
# - <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import scipy.io as sio
from qt_gui.plugin import Plugin
from PyQt5.QtWidgets import QWidget, QGraphicsScene, QGraphicsView, QGraphicsLineItem
from PyQt5 import QtGui, QtCore
from PyQt5.QtGui import QColor, QPen, QBrush
from PyQt5.QtCore import *
from track import *
from snapshot_handler import *
class TrackViewScene(QGraphicsScene):
cur_scale = 1.0
_px_per_m = 10
enable_editing = False
def __init__(self, context, cone_view):
super(TrackViewScene, self).__init__()
self._context = context
self.cone_view = cone_view # type: QGraphicsView
self._map_height = cone_view.frameGeometry().height()
self._map_width = cone_view.frameGeometry().width()
self._landmarks = []
self._car_diameter = 5.0
self._cone_diameter = self.m_to_px(0.5)
self.cone_view.setDragMode(1)
self._mode = Mode.DRAW
self._grid_alpha = 255
self._grid_m = 5
self._draw_grid(self._grid_alpha, self._grid_m)
self.mousePressEvent = self.on_mouse_down
# self.mouseMoveEvent = self.on_mouse_move
self.track = Track()
self.snapshots = SnapshotHandler()
self.model_path = ""
self.tracks_path = ""
self.draw_rect([0, 0], 0.5, 0.5)
def set_cone_diameter(self, size):
self._cone_diameter = self.m_to_px(size)
def interpolate(self, circular = True):
self.track.interpolate(circular)
self.update_all()
def generate_skipdpad(self, widget):
self.track.generate_skidpad(widget)
self.update_all()
def generate_acceleration(self, widget):
self.track.generate_acceleration(widget)
self.update_all()
def draw_entire_track(self):
self.draw_track(self.track.middle)
self.draw_cones(self.track.cones_right, self.track.get_color(Type.RIGHT))
self.draw_cones(self.track.cones_orange, self.track.get_color(Type.ORANGE))
self.draw_cones(self.track.cones_left, self.track.get_color(Type.LEFT))
self.draw_big_cones(self.track.cones_orange_big, self.track.get_color(Type.ORANGE))
self.draw_tk_device(self.track.tk_device)
self.draw_cones(self.track.cones, self.track.get_color(Type.UNKNOWN))
self.draw_lines(self.track.control_points)
self.draw_axes(self.track.starting_pose_front_wing)
def draw_snapshot(self):
self.clear()
self.update_grid(self._grid_alpha, self._grid_m)
self.draw_cones(self.snapshots.cones)
def draw_snapshot_i(self, i):
self.snapshots.load_snap_from_list(i)
self.draw_snapshot()
def change_enabled(self, enabled):
if enabled:
self.enable_editing = True
self.cone_view.setDragMode(0)
else:
self.enable_editing = False
self.cone_view.setDragMode(1)
def update_grid(self, alpha=20, grid_size=5, draw_track=True):
self._map_height = self.cone_view.frameGeometry().height()
self._map_width = self.cone_view.frameGeometry().width()
self.clear()
self._draw_grid(alpha, grid_size)
if draw_track:
self.draw_entire_track()
def update_all(self):
self.clear()
self.update_grid(self._grid_alpha, self._grid_m)
self.draw_entire_track()
def show_event(self, event):
self.cone_view.fitInView(self.sceneRect(), Qt.KeepAspectRatio)
def change_view(self, i):
if i == 2:
self.draw_snapshot()
elif i == 0:
self.update_all()
def add_land_mark(self, x, y):
pen = QPen(QColor(100, 200, 0), 0.5, Qt.SolidLine, Qt.RoundCap)
def clearTrack(self):
self.track.clear()
self.update_grid(self._grid_alpha, self._grid_m, False)
#####################################
## GETTERS & SETTERS
#####################################
def set_px_per_m(self, val):
self._px_per_m = val
def set_mode(self, mode):
self._mode = mode
def set_cone_add_side(self, side):
self._side = side
#####################################
## EVENT HANDLERS
#####################################
def wheelEvent(self, event):
if event.delta() > 0:
factor = 1.2
if self.cur_scale < 100:
self.cur_scale = self.cur_scale * factor
else:
factor = 0.8
if self.cur_scale > 0.1:
self.cur_scale = self.cur_scale * factor
if self.cur_scale > 0.1 and self.cur_scale < 10:
self.cone_view.scale(factor, factor)
self.update_grid(self._grid_alpha, self._grid_m)
def on_mousewheel(self, event):
pass
def handle_btn_export(self, name, yaml, mat):
path = self.tracks_path + "/" + name
if yaml:
self.track.export_to_yaml(self.tracks_path, name)
if mat:
self.track.export_to_mat(self.tracks_path, name)
self.export_model(path, name)
def export_model(self, path, name):
root = etree.Element("model")
etree.SubElement(root, "name").text = "track"
etree.SubElement(root, "version").text = "1.0"
etree.SubElement(root, "sdf", version="1.4").text = name + ".sdf"
etree.SubElement(root, "description").text = "random track"
tree = etree.ElementTree(root)
tree.write(self.model_path + "/track/model.config", pretty_print=True, xml_declaration=True, encoding='UTF-8')
root = etree.Element("sdf", version="1.4")
model = etree.SubElement(root, "model", name="some track")
for i in range(0, self.track.get_size(Type.RIGHT)):
include = etree.SubElement(model, "include")
etree.SubElement(include, "uri").text = "model://fssim_gazebo/models/cone_blue"
etree.SubElement(include, "pose").text = self.track.get_cone_pos(Type.RIGHT, i)
etree.SubElement(include, "name").text = "cone_right"
for i in range(0, self.track.get_size(Type.LEFT)):
include = etree.SubElement(model, "include")
etree.SubElement(include, "uri").text = "model://fssim_gazebo/models/cone_yellow"
etree.SubElement(include, "pose").text = self.track.get_cone_pos(Type.LEFT, i)
etree.SubElement(include, "name").text = "cone_left"
for i in range(0, self.track.get_size(Type.ORANGE)):
include = etree.SubElement(model, "include")
etree.SubElement(include, "uri").text = "model://fssim_gazebo/models/cone_orange"
etree.SubElement(include, "pose").text = self.track.get_cone_pos(Type.ORANGE, i)
etree.SubElement(include, "name").text = "cone_orange"
for i in range(0, self.track.get_size(Type.ORANGE_BIG)):
include = etree.SubElement(model, "include")
etree.SubElement(include, "uri").text = "model://fssim_gazebo/models/cone_orange_big"
etree.SubElement(include, "pose").text = self.track.get_cone_pos(Type.ORANGE_BIG, i)
etree.SubElement(include, "name").text = "cone_orange_big"
for i in range(0, self.track.get_size(Type.TK_DEVICE)):
include = etree.SubElement(model, "include")
etree.SubElement(include, "uri").text = "model://fssim_gazebo/models/time_keeping"
etree.SubElement(include, "pose").text = self.track.get_cone_pos(Type.TK_DEVICE, i)
etree.SubElement(include, "name").text = "tk_device_" + str(i)
tree = etree.ElementTree(root)
gazebo_models = self.model_path + "/track/" + name
tree.write(gazebo_models + ".sdf", pretty_print=True, xml_declaration=True, encoding='UTF-8')
self.track.export_to_yaml(self.model_path + "/track/tracks_yaml", name,create_dir=False)
print "[INFO] Saving track to: ",gazebo_models + ".sdf"
def handle_btn_import(self, path,outside,inside,center):
if path.endswith('.bag'):
self.track.load_track_from_bag(path,outside,inside,center)
self.update_all()
else:
print "[ERROR] Wrong file extension. Only ROSBAG supported"
def on_mouse_up(self, event):
pass
def on_mouse_move(self, event):
print event
def on_mouse_down(self, event):
if not self.enable_editing:
return
scene_point = event.scenePos()
point = np.array([(scene_point.x()), (scene_point.y())])
point = self.get_m_from_px(point)
if self._mode == Mode.DRAW:
if self.track.add_point_on_middle_line(point):
point_from = (self.track.get_control_point(-2))
point_to = (self.track.get_control_point(-1))
self.draw_line(point_from, point_to)
self.draw_rect(point_to, 0.5, 0.5)
elif self._mode == Mode.EDIT and self.track.computed_cones:
if self._side == Type.RIGHT:
self.track.cones_right = np.vstack([self.track.cones_right, point])
elif self._side == Type.LEFT:
self.track.cones_left = np.vstack([self.track.cones_left, point])
self.update_all()
elif self._mode == Mode.ERASE:
counter = 0
dist_min = 100.0
index = 0
for p in self.track.cones_left:
dist = np.linalg.norm(p - point)
if dist < dist_min:
dist_min = dist
index = counter
counter = counter + 1
if dist_min < 0.5:
self.track.cones_left = np.delete(self.track.cones_left, index, 0)
counter = 0
dist_min = 100.0
index = 0
for p in self.track.cones_right:
dist = np.linalg.norm(p - point)
if dist < dist_min:
dist_min = dist
index = counter
counter = counter + 1
if dist_min < 0.5:
self.track.cones_right = np.delete(self.track.cones_right, index, 0)
self.update_all()
#####################################
## DRAWING FUNCTIONS
#####################################
def _draw_axes(self):
pos_from = self.get_px_pos_from_m([0, 0])
pos_to = self.get_px_pos_from_m([5, 0])
grid_lines = QPen(QColor(255, 0, 0))
grid_lines.setWidth(5)
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], grid_lines)
pos_to = self.get_px_pos_from_m([0, 5])
grid_lines = QPen(QColor(0, 0, 255))
grid_lines.setWidth(5)
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], grid_lines)
def draw_axes(self, pos):
heading = pos[2]
x = pos[0]
y = pos[1]
length = 2.5
xy = [x,y]
xy_to = [x + length * np.cos(heading), y + length * np.sin(heading)]
pos_from = self.get_px_pos_from_m(xy)
pos_to = self.get_px_pos_from_m(xy_to)
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], QPen(QColor(255, 0, 0)))
heading = heading + np.pi / 2.0
xy_to = [x + length * np.cos(heading), y + length * np.sin(heading)]
pos_from = self.get_px_pos_from_m(xy)
pos_to = self.get_px_pos_from_m(xy_to)
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], QPen(QColor(0, 0, 255)))
def _draw_grid(self, alpha, grid_size):
self._grid_alpha = alpha
self._grid_m = grid_size
self._draw_axes()
max_x = 200
max_y = 200
grid_lines = QPen(QColor(105, 105, 105, alpha))
for x in range(0, max_x, grid_size):
pos_from = self.get_px_pos_from_m([x, -max_y])
pos_to = self.get_px_pos_from_m([x, max_y])
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], grid_lines)
pos_from = self.get_px_pos_from_m([max_x, x])
pos_to = self.get_px_pos_from_m([-max_x, x])
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], grid_lines)
pos_from = self.get_px_pos_from_m([-x, -max_y])
pos_to = self.get_px_pos_from_m([-x, max_y])
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], grid_lines)
pos_from = self.get_px_pos_from_m([max_x, -x])
pos_to = self.get_px_pos_from_m([-max_x, -x])
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], grid_lines)
def _draw_cone(self, x, y, diameter=10, color=QColor(100, 200, 0)):
point = self.get_px_pos_from_m([x, y])
cone_pen = QPen(color, 2, Qt.SolidLine, Qt.RoundCap)
cone_ellipse = self.addEllipse(point[0] - diameter / 2,
point[1] - diameter / 2,
diameter,
diameter,
cone_pen)
def draw_line(self, start, end, color=QColor(0, 0, 100)):
cone_pen = QPen(color, 2, Qt.DashLine, Qt.RoundCap)
start = self.get_px_pos_from_m(start)
end = self.get_px_pos_from_m(end)
self.addLine(start[0], start[1], end[0], end[1], cone_pen)
def draw_rect(self, pos, width, height, color=QColor(0, 0, 100)):
cone_pen = QPen(color, 2, Qt.SolidLine, Qt.RoundCap)
width = self.m_to_px(width)
height = self.m_to_px(height)
start = self.get_px_pos_from_m(pos)
start[0] = start[0] - width / 2.0
start[1] = start[1] - height / 2.0
self.addRect(start[0], start[1], width, height, cone_pen)
def draw_track(self, track, color=QColor(100, 200, 0)):
for i, row in enumerate(track):
if i != 0:
self.draw_line(track[i - 1], track[i], color=color)
def draw_cones(self, track, color=QColor(100, 200, 0)):
for x, y in track:
self._draw_cone(x, y, diameter=self._cone_diameter, color=color)
def draw_big_cones(self, track, color=QColor(100, 200, 0)):
for x, y in track:
self._draw_cone(x, y, diameter=self._cone_diameter * 2.0, color=color)
def draw_tk_device(self, track, color=QColor(255, 0, 0)):
for x, y in track:
self._draw_cone(x, y, diameter=self._cone_diameter * 2.0, color=color)
def draw_lines(self, lines, color=QColor(0, 0, 100)):
size = len(lines)
if size < 3:
return
for i in range(1, size):
last = lines[i - 1, :]
pos = lines[i, :]
self.draw_line(last, pos, color)
self.draw_rect(pos, 0.5, 0.5)
#####################################
## CONVERTERS
#####################################
def m_to_px(self, x):
return x * self._px_per_m
def px_to_m(self, px, py):
return [self.px_to_m(px), self.px_to_m(py)]
def px_to_m(self, px):
return px / self._px_per_m
def get_px_pos_from_m(self, p):
p_augmented = np.array([p[0], -p[1], 1])
p_res = np.dot(self.get_transform_px_to_m(), p_augmented)
return np.array([p_res[0, 0], p_res[0, 1]])
def get_m_from_px(self, p):
p_augmented = np.array([p[0], p[1], 1])
p_res = np.dot(np.linalg.inv(self.get_transform_px_to_m()), p_augmented)
return np.array([p_res[0, 0], -p_res[0, 1]])
def get_transform_px_to_m(self):
# Inv = np.matrix([[1, 0], [0, -1]])
angle = 3.0 / 2.0 * np.pi
c = np.cos(angle)
s = np.sin(angle)
Rot = np.matrix([[c, -s], [s, c]])
Multip = np.matrix([[self._px_per_m, 0], [0, self._px_per_m]])
InvRot = Multip * Rot
trans = [self._map_width / 2.0, self._map_height / 2.0]
T = np.matrix([[InvRot[0, 0], InvRot[0, 1], trans[0]],
[InvRot[1, 0], InvRot[1, 1], trans[1]],
[0, 0, 1]])
return T
| 1.625 | 2 |
tests/test_base.py | youngsoul/PyBlynkRestApi | 0 | 12786064 | import unittest
from pyblynkrestapi.PyBlynkRestApi import PyBlynkRestApi
class TestBase(unittest.TestCase):
def __init__(self,*args, **kwargs):
super(TestBase, self).__init__(*args, **kwargs)
self.auth_token = ''
self.blynk = PyBlynkRestApi(auth_token=self.auth_token)
| 2.3125 | 2 |
Count.py | avishek19/allneo | 0 | 12786065 | import DbConnect
from DbConnect import Connection
import pandas as pd
class NodeCount:
def __init__(self,uri,uname,pwd):
self.self = self
self.uri = uri
self.uname = uname
self.pwd = <PASSWORD>
# print('NodeCount:',uri,' ',uname,' ',pwd)
self.conn = Connection(self.uri,self.pwd,self.uname)
def getNodeCount(self,n):
query_1 = "match("+n+") return count(n) as Count"
result = self.conn.query(query_1)
dat = [{"Count":val["Count"]} for val in result]
print(dat)
query_2 = "match("+n+") return n.id as id, n.name as name"
result = self.conn.query(query_2)
Val2 = [{"ID":dat["id"],"Name":dat["name"]} for dat in result]
print(pd.DataFrame(Val2))
def getLabelCount(self,n):
query = "match("+n+") return count(distinct(labels(n))) as UnqLblCnt"
result = self.conn.query(query)
LblCnt = [{"UniqueLabelCount":dat["UnqLblCnt"]} for dat in result]
print(LblCnt)
| 3.046875 | 3 |
core/co_struct_to_xml.py | ykiveish/mks_dashnoard | 0 | 12786066 | <filename>core/co_struct_to_xml.py
import os
import struct
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import tostring
class StructParser:
def __init__(self, schema):
self.Schema = schema
def ConvertListToInteger(self, buffer):
buffer_size = len(buffer)
if buffer_size > 4:
return 0
value = 0
for index in range(buffer_size):
value |= buffer[index] << (index * 8)
# value |= buffer[index] << ( ((buffer_size - 1) - index) * 8)
return value
def ParseXML(self, buffer):
index = 0
with open(self.Schema, 'r') as xml_file:
tree = ET.parse(xml_file)
for elem in tree.iter():
if elem.get('item') == "value":
size = int(elem.get('size'))
# print(elem.tag)
if elem.get('type') == "int":
if size == 1:
elem.text = str(self.ConvertListToInteger(buffer[index:index+1]))
elif size == 2:
elem.text = str(self.ConvertListToInteger(buffer[index:index+2]))
elif size == 4:
elem.text = str(self.ConvertListToInteger(buffer[index:index+4]))
index += size
elif elem.get('type') == "string":
try:
str_data = ""
for item in buffer[index:index+size]:
if item > 0x20 and item < 0x7f:
str_data += chr(item)
elem.text = str_data
except Exception as e:
print("[ParseBINtoXML] - ERROR - {0}".format(e))
index += size
elif elem.get('item') == "array":
pass
else:
pass
return tostring(tree.getroot()) | 2.953125 | 3 |
python/merge_sort.py | v0rs4/algorithms | 0 | 12786067 | <gh_stars>0
from random import shuffle
def mergeSort(arr):
if len(arr) == 1:
return
m = len(arr) // 2
left = arr[:m]
right = arr[m:]
mergeSort(left)
mergeSort(right)
i = j = k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k += 1
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
if __name__ == "__main__":
arr = list(range(11))
shuffle(arr)
print(f"Before: {arr}")
mergeSort(arr)
print(f"After: {arr}") | 3.96875 | 4 |
setup.py | samn33/qlazy | 15 | 12786068 | # -*- coding: utf-8 -*-
import os
import pathlib
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext as build_ext_orig
_VERSION = '0.2.2'
class CMakeExtension(Extension):
def __init__(self, name):
super().__init__(name, sources=[])
class build_ext(build_ext_orig):
def run(self):
for ext in self.extensions:
self.build_cmake(ext)
super().run()
def build_cmake(self, ext):
cwd = pathlib.Path().absolute()
build_temp = pathlib.Path(self.build_temp)
build_temp.mkdir(parents=True, exist_ok=True)
extdir = pathlib.Path(self.get_ext_fullpath(ext.name))
extdir.mkdir(parents=True, exist_ok=True)
config = 'Debug' if self.debug else 'Release'
cmake_args = [
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + str(extdir.parent.parent.absolute()),
'-DCMAKE_RUNTIME_OUTPUT_DIRECTORY=' + str(extdir.parent.parent.parent.absolute()),
'-DCMAKE_BUILD_TYPE=' + config
]
build_args = [
'--config', config,
'--', '-j4'
]
os.chdir(str(build_temp))
self.spawn(['cmake', str(cwd)] + cmake_args)
if not self.dry_run:
self.spawn(['cmake', '--build', '.'] + build_args)
os.chdir(str(cwd))
setup(
name='qlazy',
version=_VERSION,
url='https://github.com/samn33/qlazy',
author='Sam.N',
author_email='<EMAIL>',
description='Quantum Computing Simulator',
long_description='',
packages=find_packages(),
include_package_data=True,
install_requires=[
'numpy'
],
license='Apache Software License',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'License :: OSI Approved :: Apache Software License',
],
keywords=['quantum', 'simulator'],
ext_modules=[CMakeExtension('qlazy/lib/c/qlz')],
cmdclass={
'build_ext': build_ext,
},
entry_points="""
[console_scripts]
qlazy = qlazy.core:main
""",
)
| 1.929688 | 2 |
djaimes/flask/FLASK_REST/app.py | gthompsonku/predicting_covid_with_bloodtest | 1 | 12786069 | from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import os
# Init app
app = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
# Database
app.config['SQLALCHEMY_DATABASE_URI'] = ('sqlite:///' +
os.path.join(basedir, 'db.sqlite'))
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Init db
db = SQLAlchemy(app)
# Init ma
ma = Marshmallow(app)
# Product Class/Model
class Product(db.Model):
id = db.Column(db.Integer, primary_key=True)
patient_age_quantile = db.Column(db.Float)
def __init__(self, patient_age_quantile):
self.patient_age_quantile = patient_age_quantile
#Product Schema
class ProductSchema(ma.Schema):
class Meta:
fields = ('id', 'patient_age_quantile')
product_schema = ProductSchema()
products_schema = ProductSchema(many=True)
# Create a Product
@app.route('/product', methods=['POST'])
def add_product():
patient_age_quantile = request.json['patient_age_quantile']
new_product = Product(patient_age_quantile)
db.session.add(new_product)
db.session.commit()
return product_schema.jsonify(new_product)
# Run Server
if __name__ == '__main__':
app.run(debug=True) | 2.6875 | 3 |
concepts/numberRangeDouble.py | sixtysecondrevit/dynamoPython | 114 | 12786070 | <gh_stars>100-1000
"""
PYTHON RANGE: DOUBLE APPROACH
"""
__author__ = '<NAME> - <EMAIL>'
__twitter__ = '@solamour'
__version__ = '1.0.0'
# DEFINITION:
# Custom definition to build a function similar to our DesignScript
# float range
def floatRange( start, end, step ):
for number in xrange( end ):
yield start
start += step
# SYNTAX: floatRange( [start], stop[, step ] )
# Start = Starting number of the sequence [Open]
# Stop = Generate numbers up to, but not including this number [Closed]
# Step = Difference between each number in the sequence. In order to pair
# with our DesignScript variant, we need to run this as: ( 1.0 / end)
# NOTES:
# If we wish to use floating values (Doubles) we have to specify, hence
# in our step value we use 1.0 instead of 1. IF we used 1 (An integer)
# we would simply return a list of zeroes
# The input ports
start = IN[0] # A number such as 0 (int)
stop = IN[1] # A number such as 10 (int)
# A divisor calculation that changes our ints to floats
step = ( 1.0 / stop )
# The output port - In this case a list comprehension
OUT = [ value for value in floatRange( start, stop + 1, step ) ]
| 3.234375 | 3 |
5_Functions/F_return.py | Oscar-Oliveira/Python3 | 0 | 12786071 | <filename>5_Functions/F_return.py
"""
return
"""
def my_max(*numbers):
max = None
for value in numbers:
if max is None or max < value:
max = value
return max
def is_greater_v1(value1, value2):
if value1 > value2:
return True
else:
return False
def is_greater_v2(value1, value2):
if value1 > value2:
return True
return False
def my_sqrt(value):
if value > 0:
result = value ** 0.5
print("Square root of {} is {}".format(value, result))
return
print("value must be > 0")
def adder(x):
def inside_adder(y):
return x + y
return inside_adder
print(my_max(1, 17, 5, 2, 50, -75, 1))
print()
print(is_greater_v1(10, 5))
print(is_greater_v1(10, 11))
print()
print(is_greater_v2(10, 5))
print(is_greater_v2(10, 11))
print()
my_sqrt(2)
my_sqrt(-2)
add = adder(5)
print(add(3))
print(add(5))
| 3.984375 | 4 |
sphenecoll/sphene/sphsearchboard/models.py | pigletto/sct-communitytools | 0 | 12786072 | <gh_stars>0
import logging
import os
from django.conf import settings
from sphene.community import sphsettings
from sphene.community.middleware import get_current_group, get_current_user
from sphene.sphboard.models import Post, get_all_viewable_categories
logger = logging.getLogger('sphene.sphsearchboard.models')
post_index = None
try:
import urls #ensure that load_indexes is called
post_index= Post.indexer
except:
from djapian import Indexer
searchboard_post_index = sphsettings.get_sph_setting('sphsearchboard_post_index', '/var/cache/sct/postindex/')
if not os.path.isdir(searchboard_post_index):
os.makedirs(searchboard_post_index)
Post.index_model = 'sphene.sphsearchboard.models.post_index'
post_index = Indexer(
path = searchboard_post_index,
model = Post,
fields = [('subject', 20), 'body'],
tags = [
('subject', 'subject', 20),
('date', 'postdate'),
('category', 'category.name'),
('post_id', 'id'),
('category_id', 'category.id'),
('group_id', 'category.group.id'),
])
post_index.boolean_fields = ('category_id', 'group_id',)
class PostFilter(object):
"""
Very simplistic "filter" for the search resultset
- since this is only a security measure to ensure the
search string got recognized we simply filter out
everything that is not in the viewable categries.
this could lead to weird display behaviors.. but
only if the search query didn't match es expected.
"""
def __init__(self, resultset, viewable_category_ids):
self.resultset = resultset
self.viewable_category_ids = viewable_category_ids
def __len__(self):
return self.resultset.count()
count = __len__
def __iter__(self):
for hit in self.resultset:
if self.verify_hit(hit):
yield hit
def verify_hit(self, hit):
return hit.instance.category_id in self.viewable_category_ids
def __getslice__(self, start, end):
for hit in self.resultset[start:end]:
if self.verify_hit(hit):
yield hit
def search_posts(query, category = None):
group = get_current_group()
user = get_current_user()
#if group:
# query = u''.join((u'+', u'group_id:', unicode(group.id), ' ', query))
categories = get_all_viewable_categories(group, user)
if category is not None:
prefix = u'category_id:%d' % category.id
else:
prefix = u' OR '.join([u'category_id:%d' % category for category in categories])
query = u'(%s) AND (%s)' % (prefix, query)
logger.debug('Searching for: %s' % query)
ret = PostFilter(post_index.search(query=query), categories)
logger.debug('Searching done.')
return ret
def get_category_name(post):
return post.category.name
get_category_name.name = 'category'
def get_category_id(post):
return post.category.id
get_category_id.name = 'category_id'
def get_group_id(post):
return post.category.group.id
get_group_id.name = 'group_id'
| 1.96875 | 2 |
Python/Metodo Anova.py | filipeaguiarrod/Formacao-Cientista-de-Dados-com-Python-e-R | 0 | 12786073 | <reponame>filipeaguiarrod/Formacao-Cientista-de-Dados-com-Python-e-R<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 15 14:17:40 2020
@author: rodri
"""
import pandas as pd
from scipy import stats
import statsmodels.api as sm
from statsmodels.formula.api import ols
import matplotlib.pyplot as plt # gerar os gráficos
from statsmodels.stats.multicomp import MultiComparison
tratamento = pd.read_csv('anova.csv',sep=';') # abrir csv com separados ";"
tratamento.boxplot(by = 'Remedio', grid=False) # criar box plot dos dados em função de Remedio e grid tira linhas
plt.scatter(tratamento.Remedio,tratamento.Horas)
modelo1=ols("Horas ~ Remedio", data=tratamento).fit() # cria modelo através de data / fit faz o treinamento (regressão)
resultados1 = sm.stats.anova_lm(modelo1) # teste do anova
modelo2=ols("Horas ~ Remedio*Sexo", data=tratamento).fit() # cria modelo através de data + de 1 coluna/ fit faz o treinamento (regressão)
resultados2 = sm.stats.anova_lm(modelo2) # teste do anova
#Existe variação significativa ? Método Turkey
ac = MultiComparison (tratamento['Horas'], tratamento['Remedio'])
resultado_teste=ac.tukeyhsd()
print(resultado_teste)
resultado_teste.plot_simultaneous()
| 2.8125 | 3 |
Example_Cases/FDS_Mass_Loss_Rate/Scripts/models.py | koverholt/bayes-fire | 6 | 12786074 | #!/usr/bin/env python
"""Module for setting up statistical models"""
from __future__ import division
from math import pi
import numpy as np
import pymc as mc
import graphics
import data_fds
import external_fds
def fds_mlr():
"""PyMC configuration with FDS as the model."""
# Priors
# FDS inputs: abs_coeff, A, E, emissivity, HoR, k, rho, c
theta = mc.Uniform(
'theta',
lower=[1, 7.5e12, 187e3, 0.75, 500, 0.01, 500, 0.5],
value=[2500, 8.5e12, 188e3, 0.85, 750, 0.25, 1000, 3.0],
upper=[5000, 9.5e12, 189e3, 1.00, 2000, 0.50, 2000, 6.0])
sigma = mc.Uniform('sigma', lower=0., upper=10., value=0.100)
# Model
@mc.deterministic
def y_mean(theta=theta):
casename = external_fds.gen_input(
abs_coeff=theta[0],
A=theta[1],
E=theta[2],
emissivity=theta[3],
HoR=theta[4],
k=theta[5],
rho=theta[6],
c=theta[7])
external_fds.run_fds(casename)
mlrs = external_fds.read_fds(casename)
mlr = mlrs[:, 2]
# Print MLR vs. time for each iteration
graphics.plot_fds_mlr(mlr)
return mlr
# Likelihood
# The likelihood is N(y_mean, sigma^2), where sigma
# is pulled from a uniform distribution.
y_obs = mc.Normal('y_obs',
value=data_fds.mlr,
mu=y_mean,
tau=sigma**-2,
observed=True)
return vars()
| 2.59375 | 3 |
Python3/test.py | 610yilingliu/leetcode | 0 | 12786075 | import collections
class Solution:
def knightProbability(self, N: int, K: int, r: int, c: int) -> float:
def valid(curr, curc):
if curr < 0 or curr > N - 1 or curc < 0 or curc > N - 1:
return False
return True
if valid(r, c) == False:
return 0
if K == 0:
return 1
bfs = collections.deque([(r, c, 0, True)])
dirs = [(1, 2), (-1, 2), (1, -2), (-1, -2), (2, 1), (-2, 1), (2, -1), (-2, -1)]
out_cnt = 0
in_cnt = 0
while bfs and bfs[0][2] < K:
curr, curc, curt, curvalid = bfs.popleft()
for dir in dirs:
if curvalid == False:
if curt + 1 == K:
out_cnt += 1
bfs.append((curr + dir[0], curc + dir[1], curt + 1, False))
else:
nxtr = curr + dir[0]
nxtc = curc + dir[1]
is_valid = valid(nxtr, nxtc)
if is_valid and curt + 1 == K:
in_cnt += 1
else:
if curt + 1 == K:
out_cnt += 1
bfs.append((nxtr, nxtc, curt + 1, is_valid))
return in_cnt/(in_cnt + out_cnt)
a = Solution()
b = a.knightProbability(3, 2, 0, 0)
print(b) | 2.90625 | 3 |
tracetools_read/tracetools_read/trace.py | paulbovbel/ros2_tracing | 4 | 12786076 | # Copyright 2019 <NAME> GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module with functions for reading traces."""
import os
from typing import List
from . import DictEvent
from .babeltrace import get_babeltrace_impl
impl = get_babeltrace_impl()
def is_trace_directory(path: str) -> bool:
"""
Check recursively if a path is a trace directory.
:param path: the path to check
:return: `True` if it is a trace directory, `False` otherwise
"""
path = os.path.expanduser(path)
if not os.path.isdir(path):
return False
return impl.is_trace_directory(path) # type: ignore
def get_trace_events(trace_directory: str) -> List[DictEvent]:
"""
Get the events of a trace.
:param trace_directory: the path to the main/top trace directory
:return: events
"""
return impl.get_trace_events(trace_directory) # type: ignore
| 1.960938 | 2 |
modules/devenvironments-21/python/Sobres/Sobrial.py | pomaretta/PROG2021 | 0 | 12786077 | <filename>modules/devenvironments-21/python/Sobres/Sobrial.py
"""
Crea un sobre que contingui 5 cartes creades.
El programa ha de poder llistar les cartes contingudes
dins el sobre mitjannat un mètode anomenat
VeureCartesSobre() (per exemple veure el nom de la carta,
el tipus i la raresa).
"""
# AUTHOR <NAME>
# https://carlospomares.es
from Components.Carta import Carta, generateCard
from Components.Sobre import Sobre, generatePackage
from Components.Caja import Caja
import random
def main():
# Caja de 25 Cartas / 5 Sobres
caja = Caja("25 Card box")
caja.print()
# Sobre individual
sobre = generatePackage("Magico Magic","Sobre mágico",9.99)
sobre.veureCartes()
if __name__ == '__main__':
main() | 3.140625 | 3 |
hasspad/main.py | kvchen/hasspad | 2 | 12786078 | #!/usr/bin/env python3
import asyncio
import logging
from typing import TextIO
import click
import yaml
from rich.logging import RichHandler
from hasspad.config import HasspadConfig
from hasspad.hasspad import Hasspad
logging.basicConfig(
level="INFO",
format="%(message)s",
datefmt="[%X]",
handlers=[RichHandler(rich_tracebacks=True)],
)
logger = logging.getLogger(__file__)
@click.command()
@click.argument("config", type=click.File("r"))
def main(config: TextIO) -> None:
hasspad = Hasspad(HasspadConfig(**yaml.safe_load(config)))
asyncio.run(hasspad.listen())
| 2.109375 | 2 |
adaptivefiltering/asprs.py | zaw-shinoto/adaptivefiltering | 0 | 12786079 | from adaptivefiltering.utils import AdaptiveFilteringError, is_iterable
# Mapping from human-readable name to class codes
_name_to_class = {
"unclassified": (0, 1),
"ground": (2,),
"low_vegetation": (3,),
"medium_vegetation": (4,),
"high_vegetation": (5,),
"building": (6,),
"low_point": (7,),
"water": (9,),
"road_surface": (11,),
}
# Inverse mapping from class codes to human readable names
_class_to_name = ["(not implemented)"] * 256
# Populate the inverse mapping
for name, classes in _name_to_class.items():
for c in classes:
_class_to_name[c] = name
def asprs_class_code(name):
"""Map ASPRS classification name to code"""
try:
return _name_to_class[name]
except KeyError:
raise AdaptiveFilteringError(
f"Classification identifier '{name}'' not known to adaptivefiltering"
)
def asprs_class_name(code):
"""Map ASPRS classification code to name"""
try:
return _class_to_name[code]
except IndexError:
raise AdaptiveFilteringError(
f"Classification code '{code}' not in range [0, 255]"
)
def asprs(vals):
"""Map a number of values to ASPRS classification codes
:param vals:
An arbitrary number of values that somehow describe an ASPRS
code. Can be integers which will used directy, can be strings
which will be split at commas and then turned into integers
:returns:
A sorted tuple of integers with ASPRS codes:
:rtype: tuple
"""
if is_iterable(vals):
return tuple(sorted(set(sum((_asprs(v) for v in vals), ()))))
else:
return asprs([vals])
def _asprs(val):
if isinstance(val, str):
# First, we split at commas and go into recursion
pieces = val.split(",")
if len(pieces) > 1:
return asprs(pieces)
# If this is a simple string token it must match a code
return asprs_class_code(pieces[0].strip())
elif isinstance(val, int):
if val < 0 or val > 255:
raise AdaptiveFilteringError(
"Classification values need to be in the interval [0, 255]"
)
return (val,)
elif isinstance(val, slice):
# If start is not given, it is zero
start = val.start
if start is None:
start = 0
# If stop is not given, it is the maximum possible classification value: 255
stop = val.stop
if stop is None:
stop = 255
# This adaptation is necessary to be able to use the range generator below
stop = stop + 1
# Collect the list of arguments to the range generator
args = [start, stop]
# Add a third parameter iff the slice step parameter was given
if val.step is not None:
args.append(val.step)
# Return the tuple of classification values
return tuple(range(*args))
else:
raise ValueError(f"Cannot handle type {type(val)} in ASPRS classification")
| 3.359375 | 3 |
problems/p009/solution.py | alar0330/ProjectEuler | 0 | 12786080 | """
Project Euler: Problem #009
"""
def solve(s):
"""After a short analytic transformation, the
algorithm searches for an integer value that solves the
derived equation by iterating through all possibilities.
:param s: Constraint on sum, i.e. s = a + b + c
:returns: Returns the product a*b*c.
"""
# Solve 2 eqs. for 'b' in terms of 'a'
b = lambda a: ( s*a - s*s/2 ) / ( a - s )
for a in range (1, s - 1):
if b(a).is_integer():
break
c = s - a - b(a)
return int(a*b(a)*c)
# Independent driver
if __name__ == '__main__':
print("Check: {}".format(solve(12)))
| 3.828125 | 4 |
quantumdl/__init__.py | dexterai-lab/quantumdl | 0 | 12786081 | from quantumdl.models.quantummodel import *
from quantumdl.core.engine import * | 1.0625 | 1 |
hatch_vcs/build_hook.py | ofek/hatch-vcs | 0 | 12786082 | # SPDX-FileCopyrightText: 2022-present <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class VCSBuildHook(BuildHookInterface):
PLUGIN_NAME = 'vcs'
def __init__(self, *args, **kwargs):
super(VCSBuildHook, self).__init__(*args, **kwargs)
self.__config_version_file = None
self.__config_template = None
@property
def config_version_file(self):
if self.__config_version_file is None:
version_file = self.config.get('version-file', '')
if not isinstance(version_file, str):
raise TypeError('Option `version-file` for build hook `{}` must be a string'.format(self.PLUGIN_NAME))
elif not version_file:
raise ValueError('Option `version-file` for build hook `{}` is required'.format(self.PLUGIN_NAME))
self.__config_version_file = version_file
return self.__config_version_file
@property
def config_template(self):
if self.__config_template is None:
template = self.config.get('template', '')
if not isinstance(template, str):
raise TypeError('Option `template` for build hook `{}` must be a string'.format(self.PLUGIN_NAME))
self.__config_template = template
return self.__config_template
def initialize(self, version, build_data):
from setuptools_scm import dump_version
dump_version(self.root, self.metadata.version, self.config_version_file, template=self.config_template)
build_data['artifacts'].append('/{}'.format(self.config_version_file))
| 2.078125 | 2 |
speid/processors.py | OsvaldoRino/speid | 22 | 12786083 | <reponame>OsvaldoRino/speid
import os
from stpmex import Client
STP_PRIVATE_LOCATION = os.environ['STP_PRIVATE_LOCATION']
STP_EMPRESA = os.environ['STP_EMPRESA']
STP_KEY_PASSPHRASE = os.environ['STP_KEY_PASSPHRASE']
STP_BASE_URL = os.getenv('STP_BASE_URL', None)
SPEID_ENV = os.getenv('SPEID_ENV', '')
# Configura el cliente STP
with open(STP_PRIVATE_LOCATION) as fp:
private_key = fp.read()
stpmex_client = Client(
empresa=STP_EMPRESA,
priv_key=private_key,
priv_key_passphrase=STP_KEY_PASSPHRASE,
demo=SPEID_ENV != 'prod',
base_url=STP_BASE_URL,
)
| 2 | 2 |
mplStyle/test/test_MplTickStyle.py | khanfarhan10/mplStyle | 39 | 12786084 | <gh_stars>10-100
#===========================================================================
#
# Copyright (c) 2014, California Institute of Technology.
# U.S. Government Sponsorship under NASA Contract NAS7-03001 is
# acknowledged. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#===========================================================================
"Unit test for the MplTickStyle class."
__version__ = "$Revision: #1 $"
#===========================================================================
# Required imports. Do not modify these.
import unittest
#===========================================================================
# Place all imports after here.
#
import matplotlib as mpl
import matplotlib.axes
import matplotlib.figure
from mplStyle import MplTickStyle
#
# Place all imports before here.
#===========================================================================
#===========================================================================
class TestMplTickStyle( unittest.TestCase ):
"""Test the MplTickStyle class."""
#-----------------------------------------------------------------------
def setUp( self ):
"""This method is called before any tests are run."""
pass
#-----------------------------------------------------------------------
def tearDown( self ):
"""This method is called after all tests are run."""
pass
#=======================================================================
# Add tests methods below.
# Any method whose name begins with 'test' will be run by the framework.
#-----------------------------------------------------------------------
def checkElement( self, testName, values, element ):
for property in values:
expected = values[ property ]
msg = "%s: Incorrect value for property: %s" % (testName, property)
getFunc = getattr( element, 'get_%s' % property )
self.assertEqual( expected, getFunc(), msg = msg )
#-----------------------------------------------------------------------
def testBasic( self ):
"""A basic test of MplTickStyle."""
tickVals = {
'pad' : 0.1,
}
markVals = {
'color' : '#F0F000',
'markersize' : 15,
'markeredgewidth' : 3.0,
'linewidth' : 2,
}
gridVals = {
'color' : '#B0B0B0',
'linestyle' : ':',
'linewidth' : 1,
}
fig = mpl.figure.Figure()
ax = mpl.axes.Axes( fig, [ 0.2, 0.2, 0.6, 0.6 ] )
tick = ax.get_xaxis().get_major_ticks()[0]
style = MplTickStyle(
pad = tickVals['pad'],
grid = {
'color' : gridVals['color'],
'style' : gridVals['linestyle'],
'width' : gridVals['linewidth'],
},
length = markVals['markersize'],
width = markVals['markeredgewidth'],
marks = {
'color' : markVals['color'],
'width' : markVals['linewidth'],
},
)
style.apply( tick )
self.checkElement( "Apply tick", tickVals, tick )
self.checkElement( "Apply mark", markVals, tick.tick1line )
self.checkElement( "Apply grid", gridVals, tick.gridline )
self.assertRaises( Exception, style.apply, 'invalid',
msg = "Failed to throw on invalid element." )
#-----------------------------------------------------------------------
| 1.164063 | 1 |
todolist/dashboard/models.py | Joneyviana/todolist-django-angular | 0 | 12786085 | <gh_stars>0
from django.db import models
from ..users.models import User
# Create your models here.
class Plan(models.Model):
name = models.CharField(max_length=50)
user = models.ForeignKey(User, related_name='plans')
class Task(models.Model):
name = models.CharField(max_length=50)
description = models.TextField(blank=True)
plan = models.ForeignKey(Plan, related_name='tasks')
class Annotation(models.Model):
description = models.TextField(default='')
task = models.ForeignKey(Task, related_name='Annotations',blank=True)
| 2.28125 | 2 |
College/HVCC/Python Programming/Ch 4/encrypt_and_decrypt.py | lwoluke/Projects | 0 | 12786086 | <reponame>lwoluke/Projects
"""
File: encrypt_and_decrypt.py
Encypts an input string of lowercase letters and prints
the result. The other input is the distance value.
Next, the output of the encrpyted string is decrypted
back to lowercase letters and the result is printed.
The other input is the distance value.
"""
plainText = input("Enter a one-word, lowercase message: ")
distance = int(input("Enter the distance value: "))
code = ""
for ch in plainText:
ordValue = ord(ch)
cipherValue = ordValue + distance
if cipherValue > ord('z'):
cipherValue = ord('a') + distance - \
(ord('z') - ordValue + 1)
code += chr(cipherValue)
print('\n' + 'encrypted message: ', code)
plainText = ''
for ch in code:
ordValue = ord(ch)
cipherValue = ordValue - distance
if cipherValue < ord('a'):
cipherValue = ord('z') - (distance -
(ordValue - ord('a')) - 1)
plainText += chr(cipherValue)
print('decrypted message: ', plainText)
| 4.15625 | 4 |
src/pygaps/parsing/mic_excel.py | pauliacomi/adsutils | 0 | 12786087 | <gh_stars>0
"""Parse micromeritics xls output files.
@author <NAME>
@modified <NAME>
"""
import re
from itertools import product
import dateutil.parser
import xlrd
from pygaps import logger
from pygaps.utilities.exceptions import ParsingError
_NUMBER_REGEX = re.compile(r"^(-)?\d+(.|,)?\d+")
_BRACKET_REGEX = re.compile(r"(?<=\().+?(?=\))")
_META_DICT = {
'material': {
'text': ('sample:', 'echantillon:'),
'type': 'string',
"xl_ref": (0, 1),
},
'adsorbate': {
'text': ('analysis ads', ),
'type': 'string',
"xl_ref": (0, 1),
},
'temperature': {
'text': ('analysis bath', ),
'type': 'number',
"xl_ref": (0, 1),
},
'operator': {
'text': ('operator', 'analyste'),
'type': 'string',
"xl_ref": (0, 1),
},
'date': {
'text': ('started', ),
'type': 'string',
"xl_ref": (0, 1),
},
'material_mass': {
'text': ('sample mass', ),
'type': 'number',
"xl_ref": (0, 1),
},
'apparatus': {
'text': ('micromeritics instrument', ),
'type': 'string',
"xl_ref": (1, 0),
},
'comment': {
'text': ('comments', ),
'type': 'string',
"xl_ref": (0, 0),
},
'error': {
'text': ('primary data', ),
'type': 'error',
"xl_ref": (1, 0),
},
}
_DATA_DICT = {
'absolute': 'pressure',
'relative': 'pressure_relative',
'saturation': 'pressure_saturation',
'quantity': 'loading',
'elapsed': 'time',
}
_UNITS_DICT = {
"p": {
"torr": ('mmHg', 'torr'),
"kPa": ('kPa'),
"bar": ('bar'),
"mbar": ('mbar'),
},
"l": {
"mmol": ("mmol"),
"mol": ("mol"),
"cm3(STP)": ("ml(STP)", "cm3(STP)", "cm^3(STP)", "cm³"),
},
"m": {
"g": ("g", "g-1", "g STP", "kg STP", "g^-1"),
},
}
def parse(path):
"""
Parse an xls file generated by micromeritics software.
Parameters
----------
path: str
the location of an xls file generated by a micromeritics instrument.
Returns
-------
dict
A dictionary containing report information.
"""
meta = {}
data = {}
errors = []
workbook = xlrd.open_workbook(path, encoding_override='latin-1')
sheet = workbook.sheet_by_index(0)
for row, col in product(range(sheet.nrows), range(sheet.ncols)):
cell_value = str(sheet.cell(row, col).value).lower()
if cell_value not in ["isotherm tabular report"]:
try:
name = next(
k for k, v in _META_DICT.items()
if any(cell_value.startswith(n) for n in v.get('text', []))
)
except StopIteration:
continue
ref = _META_DICT[name]['xl_ref']
tp = _META_DICT[name]['type']
val = sheet.cell(row + ref[0], col + ref[1]).value
if tp == 'number':
meta[name] = _handle_numbers(val, name)
elif tp == 'string':
meta[name] = _handle_string(val)
elif tp == 'error':
errors += _get_errors(sheet, row, col)
else: # If "data" section
header_list = _get_data_labels(sheet, row, col)
head, units = _parse_header(header_list) # header
meta.update(units)
for i, h in enumerate(head[1:]):
points = _get_datapoints(sheet, row, col + i)
if h == 'time':
data[h] = _convert_time(points)[1:]
elif h == 'pressure_saturation':
data[h] = [float(x) for x in points[1:]]
elif h.startswith("pressure") or h.startswith("loading"):
data[h] = [float(x) for x in points]
else:
data[h] = points
if errors:
meta['errors'] = errors
_check(meta, data, path)
# Set extra metadata
try:
meta['date'] = dateutil.parser.parse(meta['date']).isoformat()
except BaseException:
logger.warning("Could not convert date.")
meta['pressure_mode'] = 'absolute'
meta['loading_basis'] = 'molar'
meta['material_basis'] = 'mass'
return meta, data
def _handle_numbers(val, name):
"""
Remove any extra information (such as units) to return only the number as a float.
Input is a cell of type 'number'.
"""
if val:
ret = float(_NUMBER_REGEX.search(val.replace(',', '')).group())
if name == 'temperature':
if '°C' in val:
ret = ret + 273.15
return ret
def _handle_string(val):
"""
Replace Comments: and any newline found.
Input is a cell of type 'string'.
"""
return val.replace('Comments: ', '').replace('\r\n', ' ')
def _convert_time(points):
"""Convert time points from HH:MM format to minutes."""
minutes = []
for point in points:
hours, mins = str(point).split(':')
minutes.append(int(hours) * 60 + int(mins))
return minutes
def _get_data_labels(sheet, row, col):
"""Locate all column labels for data collected during the experiment."""
final_column = col
header_row = 2
# Abstract this sort of thing
header = sheet.cell(row + header_row, final_column).value.lower()
while any(header.startswith(label) for label in _DATA_DICT.keys()):
final_column += 1
header = sheet.cell(row + header_row, final_column).value.lower()
if col == final_column:
# this means no header exists, can happen in some older files
# the units might not be standard! TODO should check
logger.warning("Default data headers supplied for file.")
return [
"Relative Pressure (P/Po)",
"Absolute Pressure (kPa)",
"Quantity Adsorbed (cm³/g STP)",
"Elapsed Time (h:min)",
"Saturation Pressure (kPa)",
]
return [sheet.cell(row + header_row, i).value for i in range(col, final_column)]
def _get_datapoints(sheet, row, col):
"""Return all collected data points for a given column."""
rowc = 3
# Data can start on two different rows. Try first option and then next row.
if sheet.cell(row + rowc, col).value:
start_row = row + rowc
final_row = row + rowc
else:
start_row = row + (rowc + 1)
final_row = row + (rowc + 1)
point = sheet.cell(final_row, col).value
while point:
final_row += 1
point = sheet.cell(final_row, col).value
# sometimes 1-row gaps are left for P0 measurement
if not point:
final_row += 1
point = sheet.cell(final_row, col).value
return [
sheet.cell(i, col).value for i in range(start_row, final_row) if sheet.cell(i, col).value
]
def _parse_header(header_split):
"""Parse an adsorption/desorption header to get columns and units."""
headers = ['branch']
units = {}
for h in header_split:
header = next((_DATA_DICT[a] for a in _DATA_DICT if h.lower().startswith(a)), h)
headers.append(header)
if header in 'loading':
unit = _BRACKET_REGEX.search(h).group().strip()
unit_l, unit_m = unit.split('/')
units['loading_basis'] = 'molar'
units['loading_unit'] = _parse_unit(unit_l, 'l')
units['material_basis'] = 'mass'
units['material_unit'] = _parse_unit(unit_m, 'm')
elif header == 'pressure':
unit = _BRACKET_REGEX.search(h).group().strip()
units['pressure_mode'] = 'absolute'
units['pressure_unit'] = _parse_unit(unit, 'p')
if 'pressure' not in headers:
if 'pressure_relative' in headers:
headers[headers.index('pressure_relative')] = 'pressure'
units['pressure_mode'] = 'relative'
return headers, units
def _parse_unit(unit, unit_type):
for (k, v) in _UNITS_DICT[unit_type].items():
if unit in v:
return k
raise ParsingError(f"Could not parse unit '{unit}'.")
def _get_errors(sheet, row, col):
"""
Look for all cells that contain errors.
(are below a cell labelled primary data).
"""
ref = _META_DICT['error']['xl_ref']
val = sheet.cell(row + ref[0], col + ref[1]).value
if not val:
return []
final_row = row + ref[0]
error = sheet.cell(final_row, col + ref[1]).value
while error:
final_row += 1
error = sheet.cell(final_row, col + ref[1]).value
return [sheet.cell(i, col + ref[1]).value for i in range(row + ref[0], final_row)]
def _check(meta, data, path):
"""
Check keys in data and logs a warning if a key is empty.
Also logs a warning for errors found in file.
"""
if 'loading' in data:
# Some files use an odd format
# We instead remove unreadable values
dels = []
for k, v in data.items():
if not v:
logger.info(f'No data collected for {k} in file {path}.')
if len(v) != len(data['pressure']):
dels.append(k)
for d in dels:
del data[d]
if 'errors' in meta:
logger.warning('Report file contains warnings:')
logger.warning('\n'.join(meta['errors']))
| 2.65625 | 3 |
tools/anki.py | vitkyrka/nordict | 1 | 12786088 | <reponame>vitkyrka/nordict<filename>tools/anki.py
#!/usr/bin/env python3
import argparse
import sys
import os
import re
import hashlib
import pprint
import json
import itertools
import genanki
import attr
class MinimalPairNote(genanki.Note):
@property
def guid(self):
return genanki.guid_for(self.fields[0])
@attr.s
class Feature(object):
slug = attr.ib()
title = attr.ib()
key = attr.ib()
def make_deck(model, feature, words):
deckid = int(hashlib.sha256(feature.title.encode('utf-8')).hexdigest(), 16) % 10**8
deck = genanki.Deck(deckid, feature.title)
for key, g in itertools.groupby(sorted(words, key=feature.key), key=feature.key):
similar = list(g)
if len(similar) == 1:
continue
print(key)
similar = sorted(similar, key=lambda w:w['text'])
print(similar)
extra = '<br>'.join([f'''
<div style="vertical-align: middle; font-size: 1em; font-family: Arial">
[{s["text"]}]
<audio style="vertical-align: middle;" src="{s["audio"]}" controls></audio>
</div>
''' for s in similar])
for s in similar:
note = MinimalPairNote(model=model, fields=[s['text'], s['audio'], extra])
deck.add_note(note)
return deck
def main():
parser = argparse.ArgumentParser()
args = parser.parse_args()
model = genanki.Model(
812794318729,
'Minimal pair',
fields=[
{'name': 'text'},
{'name': 'audio'},
{'name': 'extra'},
],
templates=[
{
'name': 'Card 1',
'qfmt': '''
<center><audio autoplay controls src="{{audio}}"></audio></center>
''',
'afmt': '''
<hr id="answer">
<center>
<audio autoplay controls src="{{audio}}"></audio>
<div style="font-size: 3em; font-family: Arial;">[{{text}}]</div>
<p>{{extra}}
</center>
''',
},
])
with open('words.json', 'r') as f:
words = json.load(f)
features = [
Feature(slug='stød', title='Stød', key=lambda w:w['text'].replace('ˀ', '')),
Feature(slug='front0', title='[i] / [e]', key=lambda w:re.sub('(i|e)ː?', 'X', w['text'])),
Feature(slug='front1', title='[e] / [ε]', key=lambda w:re.sub('(e|ε)ː?', 'X', w['text'])),
Feature(slug='front2', title='[ε] / [εj] / [æ]', key=lambda w:re.sub('(εj|æː?|εː?)', 'X', w['text'])),
Feature(slug='front3', title='[æ] / [a]', key=lambda w:re.sub('(æ|a)ː?', 'X', w['text'])),
Feature(slug='front', title='[i] / [e] / [ε] / [æ] / [a]', key=lambda w:re.sub('(i|e)ː?', 'X', w['text'])),
Feature(slug='fround0', title='[y] / [ø]', key=lambda w:re.sub('(y|ø)ː?', 'X', w['text'])),
Feature(slug='fround1', title='[ø] / [œ]', key=lambda w:re.sub('(ø|œ)ː?', 'X', w['text'])),
Feature(slug='fround2', title='[œ] / [ɶ]', key=lambda w:re.sub('(œ|ɶ)ː?', 'X', w['text'])),
Feature(slug='fround', title='[y] / [ø] / [œ] / [ɶ]', key=lambda w:re.sub('(y|ø|œ|ɶ)ː?', 'X', w['text'])),
Feature(slug='back0', title='[u] / [o]', key=lambda w:re.sub('(u|o)ː?', 'X', w['text'])),
Feature(slug='back1', title='[o] / [ɔ]', key=lambda w:re.sub('(o|ɔ)ː?', 'X', w['text'])),
Feature(slug='back2', title='[ɔ] / [ɒ]', key=lambda w:re.sub('(ɔ|ɒ)ː?', 'X', w['text'])),
Feature(slug='back3', title='[ɒ] / [ʌ]', key=lambda w:re.sub('(ɒ|ʌ)ː?', 'X', w['text'])),
Feature(slug='back', title='[u] / [o] / [ɔ] / / [ɒ] / [ʌ]', key=lambda w:re.sub('(u|o|ɔ|ɒ|ʌ)ː?', 'X', w['text'])),
]
withoutstod = lambda w:w['text'].replace('ˀ', '')
# withoutstod = lambda w:re.sub('(εj?|æː?)', 'X', w['text'])
withoutstod = lambda w:re.sub('(εj|æː|ε|æ|e)', 'X', w['text'])
for i, feature in enumerate(features):
deck = make_deck(model, feature, words)
genanki.Package(deck).write_to_file(feature.slug + '.apkg')
return
if __name__ == '__main__':
main()
| 2.578125 | 3 |
core/serializers/fields/__init__.py | xuhang57/atmosphere | 0 | 12786089 | from .base import ModelRelatedField
| 1.054688 | 1 |
yat-master/pymodule/yat/shell/ssh.py | opengauss-mirror/Yat | 0 | 12786090 | #!/usr/bin/env python
# encoding=utf-8
"""
Copyright (c) 2021 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
import io
from threading import Thread, Lock
import paramiko
import scp
from paramiko.client import MissingHostKeyPolicy
class _Buffer:
"""
Thread-safe Buffer using StringIO
"""
def __init__(self):
self._io = io.StringIO()
self._lock = Lock()
def write(self, data):
try:
self._lock.acquire()
self._io.write(data)
finally:
self._lock.release()
def getvalue(self):
return self._io.getvalue()
class _AllOkPolicy(MissingHostKeyPolicy):
"""
accept all missing host key policy for paramiko
"""
def missing_host_key(self, client, hostname, key):
pass
class SSH:
"""
Ssh client to execute remote shell command and scp files or directories
"""
@staticmethod
def _read_to(stream, buffer):
"""
Read stream to buffer in other thread
"""
def _read_handle():
line = stream.readline()
while line:
buffer.write(line)
line = stream.readline()
thread = Thread(target=_read_handle)
thread.start()
return thread
def __init__(self, user, password, host, port, **kwargs):
self.host = host
self.port = port
self.user = user
self.password = password
self.timeout = kwargs.get('timeout', None)
self._do_connect()
def _do_connect(self):
"""
do the ssh2 session connect with username and password
"""
self._session = paramiko.SSHClient()
self._session.set_missing_host_key_policy(_AllOkPolicy)
self._session.connect(self.host, self.port, self.user, self.password, timeout=self.timeout)
def sh(self, cmd, *params, **kwargs) -> (int, str):
"""
execute shell command in remote host with ssh2 protocol
:param cmd: command in text
:param params: arguments for command format
:param kwargs: named-arguments for command format
:return: (exit_code, output(include stderr and stdout))
"""
channel = self._session.get_transport().open_session()
if len(params) == 0 and len(kwargs) == 0:
real_cmd = cmd
else:
real_cmd = cmd.format(*params, kwargs)
channel.exec_command(real_cmd.format(*params, **kwargs))
stdout = channel.makefile('r', 40960)
stderr = channel.makefile_stderr('r', 40960)
buffer = _Buffer()
stdout_reader = self._read_to(stdout, buffer)
stderr_reader = self._read_to(stderr, buffer)
stdout_reader.join()
stderr_reader.join()
return_code = channel.recv_exit_status()
return return_code, buffer.getvalue()
def scp_get(self, _from, to, force=False):
"""
get remote directory or files to local
:param _from: the remote path to fetch
:param to: the local path
:param force: force override local exists files
:exception IOError if io error occur
"""
scp.get(self._session.get_transport(), _from, to, recursive=True)
def scp_put(self, _from, to, force=False):
"""
put local file or directory to remote host
:param _from: local file or directory
:param to: remote file or directory
:param force: force override exists files or not
"""
scp.put(self._session.get_transport(), _from, to, recursive=True)
def close(self):
"""
close the ssh2 session connection, when call to a closed ssh instance error will be raise
"""
self._session.close()
| 2.078125 | 2 |
offer/53_03_IntegerIdenticalToIndex.py | DevRoss/python-offer-code | 1 | 12786091 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 19-1-24
def solve(array: list):
if not array:
return None
start = 0
end = len(array) - 1
while start <= end:
mid = (start + end) >> 1
if array[mid] == mid:
return mid
elif array[mid] < mid:
start = mid + 1
else:
end = mid - 1
return None
if __name__ == '__main__':
print(solve([-6, -3, -1, 3, 5, 6, 7]))
print(solve([-6, -3, -1, 88, 5, 6, 7]))
print(solve([0, 2, 5, 7, 83]))
print(solve([-1, 0, 2]))
| 3.734375 | 4 |
friteup/models/Post/PostUpdates.py | zero-shubham/friteup-graphql-api- | 14 | 12786092 | from pydantic import BaseModel
from bson import ObjectId
from typing import Any, List
from models.comment import CommentBase
from db.mongodb import get_database
class PostUpdates(BaseModel):
id: str
text: str
title: str
user_id: str
published: bool
up_vote: List[str]
down_vote: List[str]
comment_ids: List[str] = []
@classmethod
async def find_by_id(cls, _id: str):
db = await get_database()
post = await db.posts.find_one({"_id": ObjectId(_id)})
if post:
post["id"] = str(post["_id"])
return PostUpdates(**post)
return None
@classmethod
async def find_by_user_id(cls, user_id):
db = await get_database()
posts_count = await db.posts.count_documents({"user_id": user_id})
posts = await db.posts.find({"user_id": user_id}).to_list(posts_count)
if posts:
all_posts = []
for post in posts:
post["id"] = str(post["_id"])
post["comments"] = await CommentBase.find_by_post_id(
post["id"]
)
all_posts.append(PostUpdates(**post))
return all_posts
return []
async def add_comment(self, comment_id: str):
db = await get_database()
self.comment_ids.append(comment_id)
done = await db.posts.update_one({"_id": ObjectId(self.id)},
{"$set": {
"comment_ids": self.comment_ids
}})
return done
async def delete(self):
db = await get_database()
await CommentBase.delete_all_comments_for_post(self.id)
done = await db.posts.delete_one({"_id": self.id})
return done.acknowledged
async def vote(self, vote_type, user_id):
db = await get_database()
if vote_type == "UP_VOTE":
if user_id in self.up_vote:
self.up_vote.remove(user_id)
else:
if user_id in self.down_vote:
self.down_vote.remove(user_id)
self.up_vote.append(user_id)
elif vote_type == "DOWN_VOTE":
if user_id in self.down_vote:
self.down_vote.remove(user_id)
else:
if user_id in self.up_vote:
self.up_vote.remove(user_id)
self.down_vote.append(user_id)
done = await db.posts.update_one(
{"_id": ObjectId(self.id)},
{"$set": {"up_vote": self.up_vote, "down_vote": self.down_vote}}
)
return done.acknowledged
@classmethod
async def delete_all_posts_for_user(cls, user_id):
db = await get_database()
done = await db.posts.delete_many({"user_id": user_id})
return done.acknowledged
| 2.5625 | 3 |
data_loader.py | PAI-SmallIsAllYourNeed/Mixup-AMP | 5 | 12786093 | import csv
import random
import re
import sys
import tqdm
import numpy as np
import torch
from torch.utils.data import TensorDataset
from transformers.tokenization_bert import BertTokenizer
def load_glove_txt(file_path="glove.840B.300d.txt"):
results = {}
num_file = sum([1 for i in open(file_path, "r", encoding='utf8')])
with open(file_path, 'r', encoding='utf8') as infile:
for line in tqdm.tqdm(infile, total=num_file):
data = line.strip().split(' ')
word = data[0]
results[word] = 1
return results
def clean_str(string):
# string = re.sub("[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub("\'s", " \'s", string)
string = re.sub("\'ve", " \'ve", string)
string = re.sub("n\'t", " n\'t", string)
string = re.sub("\'re", " \'re", string)
string = re.sub("\'d", " \'d", string)
string = re.sub("\'ll", " \'ll", string)
string = re.sub('"', " ", string)
string = re.sub("'", " ", string)
string = re.sub("`", " ", string)
string = re.sub(r"\\", " ", string)
string = re.sub(r"[\[\]<>/&#\^$%{}‘\.…*]", " ", string)
# string = re.sub(",", " , ", string)
# string = re.sub("!", " ! ", string)
# string = re.sub("\(", " \( ", string)
# string = re.sub("\)", " \) ", string)
# string = re.sub("\?", " \? ", string)
# string = re.sub("\\\?", "?", string)
# string = re.sub("\s{2,}", " ", string)
# string = re.sub("-", ' ', string)
return string.strip().split()
def shuffle_data(x, y):
idx = list(range(len(x)))
np.random.shuffle(idx)
new_x = []
new_y = []
for id_ in idx:
new_x.append(x[id_])
new_y.append(y[id_])
return new_x, new_y
def read_TREC(cv=None, scale_rate=1):
data = {}
def read(mode):
x, y = [], []
with open("data/TREC/" + mode + ".tsv", "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
for line in reader:
x.append(clean_str(line[0]))
y.append(line[1])
if mode == "train":
label2data = {}
for x_, y_ in zip(x, y):
if y_ not in label2data:
label2data[y_] = [x_]
else:
label2data[y_].append(x_)
new_train_x = []
new_train_y = []
for y_ in label2data.keys():
train_idx = max(int(len(label2data[y_]) * scale_rate), 1)
for x_ in label2data[y_][:train_idx]:
new_train_x.append(x_)
new_train_y.append(y_)
x, y = shuffle_data(new_train_x, new_train_y)
data["train_x"], data["train_y"] = x, y
else:
data["test_x"], data["test_y"] = x, y
read("train")
read("test")
return data
def read_SST1(cv=None, scale_rate=1):
data = {}
def read(mode):
x, y = [], []
with open("data/SST1/" + mode + ".tsv", "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
for line in reader:
y.append(line[1])
x.append(clean_str(line[0]))
# x.append(line[0])
if mode == "train":
with open("data/SST1/stsa.fine.phrases.train", "r", encoding="utf-8", errors='ignore') as f:
for line in f:
y.append(line[0])
x.append(clean_str(line[2:]))
label2data = {}
for x_, y_ in zip(x, y):
if y_ not in label2data:
label2data[y_] = [x_]
else:
label2data[y_].append(x_)
new_train_x = []
new_train_y = []
for y_ in label2data.keys():
train_idx = max(int(len(label2data[y_]) * scale_rate), 1)
for x_ in label2data[y_][:train_idx]:
new_train_x.append(x_)
new_train_y.append(y_)
x, y = shuffle_data(new_train_x, new_train_y)
data["train_x"], data["train_y"] = x, y
else:
data["test_x"], data["test_y"] = x, y
read("train")
read("test")
return data
def read_SST2(cv=None, scale_rate=1):
data = {}
def read(mode):
x, y = [], []
with open("data/SST2/" + mode + ".tsv", "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
for line in reader:
y.append(line[1])
x.append(clean_str(line[0]))
# x.append(line[0])
if mode == "train":
with open("data/SST2/stsa.binary.phrases.train", "r", encoding="utf-8", errors='ignore') as f:
for line in f:
y.append(line[0])
x.append(clean_str(line[2:]))
label2data = {}
for x_, y_ in zip(x, y):
if y_ not in label2data:
label2data[y_] = [x_]
else:
label2data[y_].append(x_)
new_train_x = []
new_train_y = []
for y_ in label2data.keys():
train_idx = max(int(len(label2data[y_]) * scale_rate), 1)
for x_ in label2data[y_][:train_idx]:
new_train_x.append(x_)
new_train_y.append(y_)
x, y = shuffle_data(new_train_x, new_train_y)
data["train_x"], data["train_y"] = x, y
else:
data["test_x"], data["test_y"] = x, y
read("train")
read("test")
return data
def read_SUBJ(cv=0, scale_rate=1):
data = {}
x, y = [], []
with open("data/SUBJ/subj.all", "r", encoding="utf-8", errors='ignore') as f:
# reader = csv.reader(f, delimiter="\t", quotechar=None)
for line in f:
x.append(clean_str(line[2:]))
# x.append(line[0])
y.append(line[0])
idx = list(range(len(x)))
np.random.shuffle(idx)
test_index = cv # 0-9
train_x = []
train_y = []
test_x = []
test_y = []
for i, id_ in enumerate(idx):
index = i % 10
if index == test_index:
test_x.append(x[id_])
test_y.append(y[id_])
else:
train_x.append(x[id_])
train_y.append(y[id_])
label2data = {}
for x_, y_ in zip(train_x, train_y):
if y_ not in label2data:
label2data[y_] = [x_]
else:
label2data[y_].append(x_)
new_train_x = []
new_train_y = []
for y_ in label2data.keys():
train_idx = max(int(len(label2data[y_]) * scale_rate), 1)
for x_ in label2data[y_][:train_idx]:
new_train_x.append(x_)
new_train_y.append(y_)
train_x, train_y = shuffle_data(new_train_x, new_train_y)
data["train_x"], data["train_y"] = train_x, train_y
data["test_x"], data["test_y"] = test_x, test_y
return data
def read_MR(cv=0, scale_rate=1):
data = {}
x, y = [], []
with open("data/MR/rt-polarity.pos", "r", encoding="utf-8") as f:
for line in f:
if line[-1] == "\n":
line = line[:-1]
x.append(clean_str(line))
y.append(1)
with open("data/MR/rt-polarity.neg", "r", encoding="utf-8") as f:
for line in f:
if line[-1] == "\n":
line = line[:-1]
x.append(clean_str(line))
y.append(0)
idx = list(range(len(x)))
np.random.shuffle(idx)
test_index = cv # 0-9
# dev_index = (cv+1)%10
train_x = []
train_y = []
test_x = []
test_y = []
for i, id_ in enumerate(idx):
index = i % 10
if index == test_index:
test_x.append(x[id_])
test_y.append(y[id_])
else:
train_x.append(x[id_])
train_y.append(y[id_])
label2data = {}
for x_, y_ in zip(train_x, train_y):
if y_ not in label2data:
label2data[y_] = [x_]
else:
label2data[y_].append(x_)
new_train_x = []
new_train_y = []
for y_ in label2data.keys():
train_idx = max(int(len(label2data[y_]) * scale_rate), 1)
for x_ in label2data[y_][:train_idx]:
new_train_x.append(x_)
new_train_y.append(y_)
train_x, train_y = shuffle_data(new_train_x, new_train_y)
data["train_x"], data["train_y"] = train_x, train_y
data["test_x"], data["test_y"] = test_x, test_y
return data
def refind_sent(sent, g_dict):
new_sent = []
for word in sent:
if word in g_dict:
new_sent.append(word)
elif '-' in word:
for wd in word.split('-'):
new_sent.append(wd)
elif '\/' in word:
for wd in word.split('\/'):
new_sent.append(wd)
elif word.lower() in g_dict:
new_sent.append(word.lower())
else:
continue
return new_sent
def preprocess_data(data, VOCAB_SIZE, MAX_SENT_LEN, dtype='train'):
x = []
for sent in data[dtype + "_x"]:
sent_tmp = [data['word_to_idx']["<BOS>"]]
for word in sent:
if len(sent_tmp) < MAX_SENT_LEN - 1:
sent_tmp.append(data['word_to_idx'][word])
sent_tmp.append(data['word_to_idx']["<EOS>"])
if len(sent_tmp) < MAX_SENT_LEN:
sent_tmp += [VOCAB_SIZE + 1] * (MAX_SENT_LEN - len(sent_tmp))
x.append(sent_tmp)
y = [data["classes"].index(c) for c in data[dtype + "_y"]]
x = torch.LongTensor(x)
y = torch.LongTensor(y)
return x, y
def load_dataset(options):
mod = sys.modules[__name__]
if options.classifier != 'BERT':
data = getattr(mod, f"read_{options.dataset}")(cv=options.cv, scale_rate=options.scale_rate)
g_dict = load_glove_txt()
for i in range(len(data['train_x'])):
data['train_x'][i] = refind_sent(data['train_x'][i], g_dict)
for i in range(len(data['test_x'])):
data['test_x'][i] = refind_sent(data['test_x'][i], g_dict)
data["vocab"] = sorted(
list(set([w for sent in data["train_x"] + data["test_x"] for w in sent] + ["<BOS>", "<EOS>"])))
data["classes"] = sorted(list(set(data["train_y"])))
data["word_to_idx"] = {w: i for i, w in enumerate(data["vocab"])}
data["idx_to_word"] = {i: w for i, w in enumerate(data["vocab"])}
options.VOCAB_SIZE = len(data["vocab"])
if not hasattr(options, 'MAX_SENT_LEN'):
options.MAX_SENT_LEN = max([len(sent) for sent in data["train_x"] + data["test_x"]])
options.CLASS_SIZE = len(data["classes"])
train_x, train_y = preprocess_data(data, options.VOCAB_SIZE, options.MAX_SENT_LEN, 'train')
train_set = TensorDataset(train_x, train_y)
test_x, test_y = preprocess_data(data, options.VOCAB_SIZE, options.MAX_SENT_LEN, 'test')
test_set = TensorDataset(test_x, test_y)
return train_set, test_set, data
else:
data = {}
dset = getattr(mod, f"{options.dataset}_Processor")(cv=options.cv)
train_examples = dset.train_examples
test_examples = dset.test_examples
data['tokenizer'] = BertTokenizer(vocab_file='./bert-base-uncased/vocab.txt'
, do_basic_tokenize=True)
data["classes"] = sorted(list(set([z.label for z in train_examples])))
options.CLASS_SIZE = len(data["classes"])
options.VOCAB_SIZE = len(data['tokenizer'].vocab)
if not hasattr(options, 'MAX_SENT_LEN'):
setattr(options, 'MAX_SENT_LEN',
max([len(example.text_a.split(' ')) for example in train_examples + test_examples]) + 2)
# print("max",max([len(example.text_a.split(' ')) for example in train_examples + test_examples]))
train_set = _make_data_loader(train_examples, data["classes"], data['tokenizer'], options.MAX_SENT_LEN)
test_set = _make_data_loader(test_examples, data["classes"], data['tokenizer'], options.MAX_SENT_LEN)
return train_set, test_set, data
def _make_data_loader(examples, label_list, tokenizer, MAX_SEQ_LENGTH):
all_features = _convert_examples_to_features(
examples=examples,
label_list=label_list,
max_seq_length=MAX_SEQ_LENGTH,
tokenizer=tokenizer,
output_mode='classification')
all_input_ids = torch.tensor(
[f.input_ids for f in all_features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in all_features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in all_features], dtype=torch.long)
all_label_ids = torch.tensor(
[f.label_id for f in all_features], dtype=torch.long)
all_ids = torch.arange(len(examples))
dataset = TensorDataset(
all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_ids)
return dataset
def _convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
# print(len(input_ids),len(input_mask),len(segment_ids),max_seq_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal
# percent of tokens from each, since if one sequence is very short then each
# token that's truncated likely contains more information than a longer
# sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def csv_reader(filename):
print('read file:', filename)
f = open(filename, 'r', encoding='utf8')
reader = csv.reader(f, delimiter="\t", quotechar=None)
return reader
class InputExample:
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def __getitem__(self, item):
return [self.input_ids, self.input_mask,
self.segment_ids, self.label_id][item]
class DatasetProcessor:
def get_train_examples(self):
raise NotImplementedError
def get_dev_examples(self):
raise NotImplementedError
def get_test_examples(self):
raise NotImplementedError
def get_labels(self):
raise NotImplementedError
class SST1_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def __init__(self, cv=0):
train_file = "./data/SST1/train.tsv"
test_file = "./data/SST1/test.tsv"
print("processing train_file{},test_file".format(train_file, test_file))
self._train_set, self._test_set = csv_reader(train_file), csv_reader(test_file)
self.train_examples, self.test_examples = self.get_train_examples(), self.get_test_examples()
x, y = [], []
with open("data/SST1/stsa.fine.phrases.train", "r", encoding="utf-8", errors='ignore') as f:
for line in f:
y.append(line[0])
x.append(line[2:])
self.train_examples_extra = self._create_examples(zip(x, y), "train")
self.train_examples = self.train_examples + self.train_examples_extra
def get_train_examples(self):
"""See base class."""
examples = self._create_examples(self._train_set, "train")
print('getting train examples,len = ', len(examples))
return examples
def get_test_examples(self):
"""See base class."""
examples = self._create_examples(self._test_set, "test")
print('getting test examples,len = ', len(examples))
return examples
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
label=data[1]
))
# return examples
return examples
class SST2_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def __init__(self, cv=0):
train_file = "./data/SST2/train.tsv"
test_file = "./data/SST2/test.tsv"
x, y = [], []
with open("data/SST2/stsa.binary.phrases.train", "r", encoding="utf-8", errors='ignore') as f:
for line in f:
y.append(line[0])
x.append(line[2:])
self.train_examples_extra = self._create_examples(zip(x, y), "train")
print("processing train_file{},test_file".format(train_file, test_file))
self._train_set, self._test_set = csv_reader(train_file), csv_reader(test_file)
self.train_examples, self.test_examples = self.get_train_examples(), self.get_test_examples()
self.train_examples = self.train_examples + self.train_examples_extra
def get_train_examples(self):
"""See base class."""
examples = self._create_examples(self._train_set, "train")
print('getting train examples,len = ', len(examples))
return examples
def get_test_examples(self):
"""See base class."""
examples = self._create_examples(self._test_set, "test")
print('getting test examples,len = ', len(examples))
return examples
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
label=data[1]
))
# return examples
return examples
class TREC_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def __init__(self, cv=0):
train_file = "./data/TREC/train.tsv"
test_file = "./data/TREC/test.tsv"
print("processing train_file{},test_file,{}".format(train_file, test_file))
self._train_set, self._test_set = csv_reader(train_file), csv_reader(test_file)
self.train_examples, self.test_examples = self.get_train_examples(), self.get_test_examples()
def get_train_examples(self):
"""See base class."""
examples = self._create_examples(self._train_set, "train")
print('getting train examples,len = ', len(examples))
return examples
def get_test_examples(self):
"""See base class."""
examples = self._create_examples(self._test_set, "test")
print('getting test examples,len = ', len(examples))
return examples
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
label=data[1]
))
# return examples
return examples
class SUBJ_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def __init__(self, cv):
all_file = "./data/SUBJ/data_all.tsv"
print("processing all_file{}".format(all_file))
self._all_set = csv_reader(all_file)
self.train_examples, self.test_examples = self.get_train_examples(cv=cv)
def _read_examples(self):
examples = self._create_examples(self._all_set, "all")
return examples
def get_train_examples(self, cv=0):
"""See base class."""
examples = self._read_examples()
idx = list(range(len(examples)))
np.random.shuffle(idx)
test_index = cv
test_example = []
train_example = []
for i, id_ in enumerate(idx):
index = i % 10
if index == test_index:
test_example.append(examples[id_])
else:
train_example.append(examples[id_])
return train_example, test_example
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
label=data[1]
))
return examples
# return shuffle_data(examples)
class MR_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def __init__(self, cv=0):
pos_file = "./data/MR/rt-polarity.pos"
neg_file = "./data/MR/rt-polarity.neg"
print("processing pos_file:{},neg_file:{}".format(pos_file, neg_file))
self._pos_set, self._neg_set = csv_reader(pos_file), csv_reader(neg_file)
self.train_examples, self.test_examples = self.get_train_examples(cv=cv)
def _read_examples(self):
pos_examples = self._create_examples(self._pos_set, "pos")
neg_examples = self._create_examples(self._neg_set, "neg")
examples = []
for ex in pos_examples:
examples.append(InputExample(
guid=ex.guid,
text_a=ex.text_a,
label=1
))
for ex in neg_examples:
examples.append(InputExample(
guid=ex.guid,
text_a=ex.text_a,
label=0
))
return examples
def get_train_examples(self, cv=0):
"""See base class."""
examples = self._read_examples()
idx = list(range(len(examples)))
np.random.shuffle(idx)
test_index = cv
test_example = []
train_example = []
for i, id_ in enumerate(idx):
index = i % 10
if index == test_index:
test_example.append(examples[id_])
else:
train_example.append(examples[id_])
return train_example, test_example
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
))
return examples
if __name__ == "__main__":
processor = TREC_Processor(cv=2)
print(processor.get_labels())
train = processor.train_examples
for x in train:
print(x.text_a, x.label)
break
# class OPT:
# def __init__(self):
# self.dataset="SUBJ"
# self.cv = "0"
# self.scale_rate=1
# self.MAX_SENT_LEN=-1
# opt = OPT()
# dset = getattr(sys.modules[__name__],'load_dataset')(opt)
# for x in dset[0]:
# print(x)
# break
# from torch.utils.data import DataLoader
# train_loader = DataLoader(dset[0], batch_size=50, shuffle=True)
| 2.625 | 3 |
deep_thought/repl_cli.py | centralityai/repl-demo | 4 | 12786094 | <filename>deep_thought/repl_cli.py
import click
class DeepThought:
def __init__(self, host):
super().__init__()
self.host = host
@property
def answer(self):
print(f"Connecting to {self.host}...")
return 42
@click.command()
@click.option("--host", default="localhost", help="Host to connect to.")
def main(host):
header = "Deep Thought initialised as `cpu`. Type `help(cpu)` for assistance."
footer = ""
scope_vars = {"cpu": DeepThought(host)}
try:
import IPython
except ImportError:
from code import InteractiveConsole
InteractiveConsole(locals=scope_vars).interact(header, footer)
else:
print(header)
IPython.start_ipython(argv=[], user_ns=scope_vars)
print(footer)
if __name__ == "__main__":
main()
| 2.40625 | 2 |
isitup/util/__init__.py | Twi1ightSparkle/matrix | 2 | 12786095 | from . import content
from . import http
from . import matrix
from . import sql
| 1.054688 | 1 |
azurectl/instance/virtual_machine.py | SUSE/azurectl | 9 | 12786096 | # Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from azure.servicemanagement import ConfigurationSetInputEndpoint
from azure.servicemanagement import ConfigurationSet
from azure.servicemanagement import PublicKey
from azure.servicemanagement import LinuxConfigurationSet
from azure.servicemanagement import OSVirtualHardDisk
from azure.storage.blob.baseblobservice import BaseBlobService
# project
from azurectl.defaults import Defaults
from azurectl.azurectl_exceptions import (
AzureCustomDataTooLargeError,
AzureVmCreateError,
AzureVmDeleteError,
AzureVmRebootError,
AzureVmShutdownError,
AzureVmStartError,
AzureStorageNotReachableByCloudServiceError,
AzureImageNotReachableByCloudServiceError
)
class VirtualMachine(object):
"""
Implements creation/deletion and management of virtual
machine instances from a given image name
"""
def __init__(self, account):
self.account = account
self.service = self.account.get_management_service()
def create_linux_configuration(
self, username='azureuser', instance_name=None,
disable_ssh_password_authentication=True,
password=None, custom_data=None, fingerprint=''
):
"""
create a linux configuration
"""
self.__validate_custom_data_length(custom_data)
# The given instance name is used as the host name in linux
linux_config = LinuxConfigurationSet(
instance_name, username, password,
disable_ssh_password_authentication,
custom_data
)
if fingerprint:
ssh_key_file = '/home/' + username + '/.ssh/authorized_keys'
ssh_pub_key = PublicKey(
fingerprint, ssh_key_file
)
linux_config.ssh.public_keys = [ssh_pub_key]
return linux_config
def create_network_configuration(self, network_endpoints):
"""
create a network configuration
"""
network_config = ConfigurationSet()
for endpoint in network_endpoints:
network_config.input_endpoints.input_endpoints.append(endpoint)
network_config.configuration_set_type = 'NetworkConfiguration'
return network_config
def create_network_endpoint(
self, name, public_port, local_port, protocol
):
"""
create a network service endpoint
"""
return ConfigurationSetInputEndpoint(
name, protocol, public_port, local_port
)
def create_instance(
self, cloud_service_name, disk_name, system_config,
network_config=None, label=None, group='production',
machine_size='Small', reserved_ip_name=None
):
"""
create a virtual disk image instance
"""
if not self.__storage_reachable_by_cloud_service(cloud_service_name):
message = [
'The cloud service "%s" and the storage account "%s"',
'are not in the same region, cannot launch an instance.'
]
raise AzureStorageNotReachableByCloudServiceError(
' '.join(message) % (
cloud_service_name, self.account.storage_name()
)
)
if not self.__image_reachable_by_cloud_service(
cloud_service_name, disk_name
):
message = [
'The selected image "%s" is not available',
'in the region of the selected cloud service "%s",',
'cannot launch instance'
]
raise AzureImageNotReachableByCloudServiceError(
' '.join(message) % (
disk_name, cloud_service_name
)
)
deployment_exists = self.__get_deployment(
cloud_service_name
)
if label and deployment_exists:
message = [
'A deployment of the name: %s already exists.',
'Assignment of a label can only happen for the',
'initial deployment.'
]
raise AzureVmCreateError(
' '.join(message) % cloud_service_name
)
if reserved_ip_name and deployment_exists:
message = [
'A deployment of the name: %s already exists.',
'Assignment of a reserved IP name can only happen for the',
'initial deployment.'
]
raise AzureVmCreateError(
' '.join(message) % cloud_service_name
)
storage = BaseBlobService(
self.account.storage_name(),
self.account.storage_key(),
endpoint_suffix=self.account.get_blob_service_host_base()
)
media_link = storage.make_blob_url(
self.account.storage_container(), ''.join(
[
cloud_service_name,
'_instance_', system_config.host_name,
'_image_', disk_name
]
)
)
instance_disk = OSVirtualHardDisk(disk_name, media_link)
instance_record = {
'deployment_name': cloud_service_name,
'network_config': network_config,
'role_name': system_config.host_name,
'role_size': machine_size,
'service_name': cloud_service_name,
'system_config': system_config,
'os_virtual_hard_disk': instance_disk,
'provision_guest_agent': True
}
if network_config:
instance_record['network_config'] = network_config
try:
if deployment_exists:
result = self.service.add_role(
**instance_record
)
else:
instance_record['deployment_slot'] = group
if reserved_ip_name:
instance_record['reserved_ip_name'] = reserved_ip_name
if label:
instance_record['label'] = label
else:
instance_record['label'] = cloud_service_name
result = self.service.create_virtual_machine_deployment(
**instance_record
)
return {
'request_id': format(result.request_id),
'cloud_service_name': cloud_service_name,
'instance_name': system_config.host_name
}
except Exception as e:
raise AzureVmCreateError(
'%s: %s' % (type(e).__name__, format(e))
)
def delete_instance(
self, cloud_service_name, instance_name
):
"""
delete a virtual disk image instance
"""
try:
result = self.service.delete_role(
cloud_service_name, cloud_service_name, instance_name, True
)
return(Defaults.unify_id(result.request_id))
except Exception as e:
raise AzureVmDeleteError(
'%s: %s' % (type(e).__name__, format(e))
)
def shutdown_instance(
self, cloud_service_name, instance_name, deallocate_resources=False
):
"""
Shuts down the specified virtual disk image instance
If deallocate_resources is set to true the machine shuts down
and releases the compute resources. You are not billed for
the compute resources that this Virtual Machine uses in this case.
If a static Virtual Network IP address is assigned to the
Virtual Machine, it is reserved.
"""
post_shutdown_action = 'Stopped'
if deallocate_resources:
post_shutdown_action = 'StoppedDeallocated'
try:
result = self.service.shutdown_role(
cloud_service_name, cloud_service_name,
instance_name, post_shutdown_action
)
return(Defaults.unify_id(result.request_id))
except Exception as e:
raise AzureVmShutdownError(
'%s: %s' % (type(e).__name__, format(e))
)
def start_instance(
self, cloud_service_name, instance_name
):
"""
Start the specified virtual disk image instance.
"""
try:
result = self.service.start_role(
cloud_service_name, cloud_service_name,
instance_name
)
return(Defaults.unify_id(result.request_id))
except Exception as e:
raise AzureVmStartError(
'%s: %s' % (type(e).__name__, format(e))
)
def reboot_instance(
self, cloud_service_name, instance_name
):
"""
Requests reboot of a virtual disk image instance
"""
try:
result = self.service.reboot_role_instance(
cloud_service_name, cloud_service_name, instance_name
)
return(Defaults.unify_id(result.request_id))
except Exception as e:
raise AzureVmRebootError(
'%s: %s' % (type(e).__name__, format(e))
)
def instance_status(
self, cloud_service_name, instance_name=None
):
"""
Request instance status. An instance can be in different
states like Initializing, Running, Stopped. This method
returns the current state name.
"""
instance_state = 'Undefined'
if not instance_name:
instance_name = cloud_service_name
try:
properties = self.service.get_hosted_service_properties(
service_name=cloud_service_name,
embed_detail=True
)
for deployment in properties.deployments:
for instance in deployment.role_instance_list:
if instance.instance_name == instance_name:
instance_state = instance.instance_status
except Exception:
# if the properties can't be requested due to an error
# the default state value set to Undefined will be returned
pass
return instance_state
def __validate_custom_data_length(self, custom_data):
if (custom_data and (len(custom_data) > self.__max_custom_data_len())):
raise AzureCustomDataTooLargeError(
"The custom data specified is too large. Custom Data must" +
"be less than %d bytes" % self.__max_custom_data_len()
)
return True
def __get_deployment(self, cloud_service_name):
"""
check if the virtual machine deployment already exists.
Any other than a ResourceNotFound error will be treated
as an exception to stop processing
"""
try:
return self.service.get_deployment_by_name(
service_name=cloud_service_name,
deployment_name=cloud_service_name
)
except Exception as e:
if 'ResourceNotFound' in format(e):
return None
raise AzureVmCreateError(
'%s: %s' % (type(e).__name__, format(e))
)
def __cloud_service_location(self, cloud_service_name):
return self.service.get_hosted_service_properties(
cloud_service_name
).hosted_service_properties.location
def __storage_location(self):
return self.service.get_storage_account_properties(
self.account.storage_name()
).storage_service_properties.location
def __image_locations(self, disk_name):
try:
image_properties = self.service.get_os_image(disk_name)
return image_properties.location.split(';')
except Exception:
# if image does not exist return without an exception.
pass
def __storage_reachable_by_cloud_service(self, cloud_service_name):
service_location = self.__cloud_service_location(
cloud_service_name
)
storage_location = self.__storage_location()
if service_location == storage_location:
return True
else:
return False
def __image_reachable_by_cloud_service(self, cloud_service_name, disk_name):
service_location = self.__cloud_service_location(
cloud_service_name
)
image_locations = self.__image_locations(disk_name)
if not image_locations:
return False
if service_location in image_locations:
return True
else:
return False
def __max_custom_data_len(self):
"""
Custom Data is limited to 64K
https://msdn.microsoft.com/library/azure/jj157186.aspx
"""
return 65536
| 1.890625 | 2 |
tests/test_updater.py | upciti/debops | 0 | 12786097 | from typing import List, Optional
import pytest
from httpx import AsyncClient
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from ops2deb.exceptions import Ops2debUpdaterError
from ops2deb.logger import enable_debug
from ops2deb.updater import GenericUpdateStrategy, GithubUpdateStrategy
enable_debug(True)
@pytest.fixture
def app_factory():
def _app_response(request: Request):
return Response(status_code=200)
def _app_factory(versions: List[str]):
app = Starlette(debug=True)
for version in versions:
app.add_route(
f"/releases/{version}/some-app.tar.gz", _app_response, ["HEAD", "GET"]
)
return app
return _app_factory
@pytest.fixture
def github_app_factory():
def _github_app_factory(latest_release: str, versions: Optional[List[str]] = None):
versions = versions or []
app = Starlette(debug=True)
@app.route("/owner/name/releases/{version}/some-app.tar.gz")
def github_asset(request: Request):
version = request.path_params["version"]
status = 200 if version in versions or version == latest_release else 404
return Response(status_code=status)
@app.route("/repos/owner/name/releases/latest")
def github_release_api(request: Request):
return JSONResponse({"tag_name": latest_release})
return app
return _github_app_factory
@pytest.mark.parametrize(
"versions,expected_result",
[
(["1.0.0", "1.1.0"], "1.1.0"),
(["1.0.0", "1.1.3"], "1.1.3"),
(["1.0.0", "1.0.1", "1.1.0"], "1.1.0"),
(["1.0.0", "1.1.1", "2.0.0"], "1.1.1"),
(["1.0.0", "2.0.0"], "2.0.0"),
(["1.0.0", "2.0.3"], "2.0.3"),
(["1.0.0", "1.1.0", "2.0.0"], "1.1.0"),
(["1.0.0", "1.0.1", "1.0.2", "1.1.0", "1.1.1"], "1.1.1"),
],
)
async def test_generic_update_strategy_should_find_expected_blueprint_release(
blueprint_factory, app_factory, versions, expected_result
):
blueprint = blueprint_factory(
fetch={
"url": "http://test/releases/{{version}}/some-app.tar.gz",
"sha256": "deadbeef",
}
)
app = app_factory(versions)
async with AsyncClient(app=app) as client:
update_strategy = GenericUpdateStrategy(client)
assert await update_strategy(blueprint) == expected_result
@pytest.mark.parametrize(
"fetch_url,tag_name",
[
("https://github.com/owner/name/releases/{{version}}/some-app.tar.gz", "2.3.0"),
("https://github.com/owner/name/releases/v{{version}}/some-app.tar.gz", "v2.3.0"),
],
)
async def test_github_update_strategy_should_find_expected_blueprint_release(
blueprint_factory, github_app_factory, fetch_url, tag_name
):
app = github_app_factory(tag_name)
blueprint = blueprint_factory(fetch={"url": fetch_url, "sha256": "deadbeef"})
async with AsyncClient(app=app) as client:
update_strategy = GithubUpdateStrategy(client)
assert await update_strategy(blueprint) == "2.3.0"
async def test_github_update_strategy_should_not_return_an_older_version_than_current_one(
blueprint_factory, github_app_factory
):
app = github_app_factory("0.1.0", versions=["1.0.0"])
url = "https://github.com/owner/name/releases/{{version}}/some-app.tar.gz"
blueprint = blueprint_factory(fetch={"url": url, "sha256": "deadbeef"})
async with AsyncClient(app=app) as client:
update_strategy = GithubUpdateStrategy(client)
assert await update_strategy(blueprint) == "1.0.0"
async def test_github_update_strategy_should_fail_gracefully_when_asset_not_found(
blueprint_factory, github_app_factory
):
app = github_app_factory("someapp-v2.3.0")
url = "https://github.com/owner/name/releases/someapp-v{{version}}/some-app.tar.gz"
blueprint = blueprint_factory(fetch={"url": url, "sha256": "deadbeef"})
async with AsyncClient(app=app) as client:
with pytest.raises(Ops2debUpdaterError) as e:
await GithubUpdateStrategy(client)(blueprint)
assert "Failed to determine latest release URL" in str(e)
| 2.15625 | 2 |
fluent_python/function/shallow_copy_test.py | helloTC/LearnPython | 0 | 12786098 | <gh_stars>0
#!/usr/bin/env python
# coding=utf-8
l1 = [3, [24,55,33], (15,14,17)]
l2 = list(l1)
l1.append(100)
l1[1].remove(55)
print('l1:',l1)
print('l2:',l2)
l2[1] += [33,22]
l2[2] += (10,11)
print('l1:',l1)
print('l2:',l2)
| 3.203125 | 3 |
jenkins_perf_visualizer/fetch.py | Khan/jenkins-perf-visualizer | 0 | 12786099 | <filename>jenkins_perf_visualizer/fetch.py
"""Routines to fetch data from Jenkins and save it to a data file.
The output data file has all the information needed to make a
visualization graph, and can be given as input to the visualizer
script. It is mostly the html of the "pipeline steps" Jenkins page
for a build, with some additional metadata thrown in.
TODO(csilvers): save two data files, the raw datafile and the json.
"""
import errno
import json
import logging
import os
import re
from jenkins_perf_visualizer import jenkins
from jenkins_perf_visualizer import steps
class DataError(Exception):
"""An error reading or parsing Jenkins data for a build."""
def __init__(self, job, build_id, message):
super(DataError, self).__init__(message)
self.job = job
self.build_id = build_id
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def fetch_from_datafile(fname):
"""Fetch data from the cached data-file, instead of asking Jenkins.
The datafile is one that was saved via a previous call to fetch_build().
"""
with open(fname, 'rb') as f:
step_html = f.read().decode('utf-8')
m = re.search(r'<script>var parameters = (.*?)</script>',
step_html)
build_params = json.loads(m.group(1) if m else '{}')
# We get the buid-start time by the file's mtime.
build_start_time = os.path.getmtime(fname)
return (step_html, build_params, build_start_time)
def _fetch_from_jenkins(job, build_id, jenkins_client):
"""Fetch data for the given build from Jenkins."""
try:
build_params = jenkins_client.fetch_build_parameters(job, build_id)
step_html = jenkins_client.fetch_pipeline_steps(job, build_id)
step_root = steps.parse_pipeline_steps(step_html)
if not step_root:
raise DataError(job, build_id, "invalid job? (no steps found)")
build_start_time = jenkins_client.fetch_build_start_time(
job, build_id, step_root.id)
return (step_html, build_params, build_start_time)
except jenkins.HTTPError as e:
raise DataError(job, build_id, "HTTP error: %s" % e)
def fetch_build(job, build_id, output_dir, jenkins_client, force=False):
"""Download, save, and return the data-needed-to-render for one build."""
mkdir_p(output_dir)
outfile = os.path.join(
output_dir, '%s:%s.data' % (job.replace('/', '--'), build_id))
if not force and os.path.exists(outfile):
(step_html, build_params, build_start_time) = fetch_from_datafile(
outfile)
else:
logging.info("Fetching %s:%s", job, build_id)
(step_html, build_params, build_start_time) = _fetch_from_jenkins(
job, build_id, jenkins_client)
with open(outfile, 'wb') as f:
f.write(step_html.encode('utf-8'))
params_text = ('\n\n<script>var parameters = %s</script>'
% json.dumps(build_params))
f.write(params_text.encode('utf-8'))
# Set the last-modified time of this file to its start-time.
os.utime(outfile, (build_start_time, build_start_time))
return (step_html, build_params, build_start_time, outfile)
| 3.03125 | 3 |
questions/serializers/section.py | Ivin0022/django-questions | 0 | 12786100 | from rest_framework import serializers
# djangorestframework-recursive
from rest_framework_recursive.fields import RecursiveField
# local
from .question import QuestionSerializer
from ..models import Section
class SectionSerializer(serializers.ModelSerializer):
children = RecursiveField(required=False, allow_null=True, many=True)
question_set = QuestionSerializer(many=True)
class Meta:
model = Section
fields = (
'id',
'url',
'title',
'parent',
'question_set',
'children',
)
| 2.1875 | 2 |
app/user/views.py | PythonDjangoJavascript/advanced_django_rest_api_with_tdd | 0 | 12786101 | from rest_framework import generics
from rest_framework import authentication
from rest_framework import permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializser
from user.serializers import AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializser
class CreateTokenView(ObtainAuthToken):
"""Create a new auth token for the user"""
serializer_class = AuthTokenSerializer
# renderer_classes to render browseble view
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Mange the authenticatited user"""
serializer_class = UserSerializser
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""Related object (here user)"""
return self.request.user
| 2.234375 | 2 |
adspygoogle/dfp/DfpWebService.py | hockeyprincess/google-api-dfp-python | 0 | 12786102 | <filename>adspygoogle/dfp/DfpWebService.py
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for sending and recieving SOAP XML requests."""
__author__ = '<EMAIL> (<NAME>)'
import time
from adspygoogle.common import SOAPPY
from adspygoogle.common import Utils
from adspygoogle.common.Errors import Error
from adspygoogle.common.WebService import WebService
from adspygoogle.dfp import DfpSanityCheck
from adspygoogle.dfp import AUTH_TOKEN_EXPIRE
from adspygoogle.dfp import AUTH_TOKEN_SERVICE
from adspygoogle.dfp import LIB_SIG
from adspygoogle.dfp import LIB_URL
from adspygoogle.dfp.DfpErrors import ERRORS
from adspygoogle.dfp.DfpErrors import DfpApiError
from adspygoogle.dfp.DfpErrors import DfpError
from adspygoogle.dfp.DfpSoapBuffer import DfpSoapBuffer
class DfpWebService(WebService):
"""Implements DfpWebService.
Responsible for sending and recieving SOAP XML requests.
"""
def __init__(self, headers, config, op_config, url, lock, logger=None):
"""Inits DfpWebService.
Args:
headers: dict Dictionary object with populated authentication
credentials.
config: dict Dictionary object with populated configuration values.
op_config: dict Dictionary object with additional configuration values for
this operation.
url: str URL of the web service to call.
lock: thread.lock Thread lock.
logger: Logger Instance of Logger
"""
self.__config = config
self.__op_config = op_config
super(DfpWebService, self).__init__(LIB_SIG, headers, config, op_config,
url, lock, logger)
def __ManageSoap(self, buf, start_time, stop_time, error={}):
"""Manage SOAP XML message.
Args:
buf: SoapBuffer SOAP buffer.
start_time: str Time before service call was invoked.
stop_time: str Time after service call was invoked.
[optional]
error: dict Error, if any.
"""
try:
# Set up log handlers.
handlers = [
{
'tag': 'xml_log',
'name': 'soap_xml',
'data': ''
},
{
'tag': 'request_log',
'name': 'request_info',
'data': str('host=%s service=%s method=%s responseTime=%s '
'requestId=%s'
% (Utils.GetNetLocFromUrl(self._url),
buf.GetServiceName(), buf.GetCallName(),
buf.GetCallResponseTime(), buf.GetCallRequestId()))
},
{
'tag': '',
'name': 'dfp_api_lib',
'data': ''
}
]
fault = super(DfpWebService, self)._ManageSoap(
buf, handlers, LIB_URL, ERRORS, start_time, stop_time, error)
if fault:
# Raise a specific error, subclass of DfpApiError.
if 'detail' in fault:
if 'code' in fault['detail']:
code = int(fault['detail']['code'])
if code in ERRORS: raise ERRORS[code](fault)
elif 'errors' in fault['detail']:
type = fault['detail']['errors'][0]['type']
if type in ERRORS: raise ERRORS[str(type)](fault)
if isinstance(fault, str):
raise DfpError(fault)
elif isinstance(fault, dict):
raise DfpApiError(fault)
except DfpApiError, e:
raise e
except DfpError, e:
raise e
except Error, e:
if error: e = error
raise Error(e)
def CallMethod(self, method_name, params, service_name=None, loc=None,
request=None):
"""Make an API call to specified method.
Args:
method_name: str API method name.
params: list List of parameters to send to the API method.
[optional]
service_name: str API service name.
loc: service Locator.
request: instance Holder of the SOAP request.
Returns:
tuple/str Response from the API method. If 'raw_response' flag enabled a
string is returned, tuple otherwise.
"""
# Acquire thread lock.
self._lock.acquire()
try:
headers = self._headers
config = self._config
config['data_injects'] = ()
error = {}
# Load/set authentication token. If authentication token has expired,
# regenerate it.
now = time.time()
if ((('authToken' not in headers and
'auth_token_epoch' not in config) or
int(now - config['auth_token_epoch']) >= AUTH_TOKEN_EXPIRE)):
headers['authToken'] = Utils.GetAuthToken(
headers['email'], headers['password'], AUTH_TOKEN_SERVICE,
LIB_SIG, config['proxy'])
config['auth_token_epoch'] = time.time()
self._headers = headers
self._config = config
headers = Utils.UnLoadDictKeys(Utils.CleanUpDict(headers),
['email', 'password'])
name_space = '/'.join(['https://www.google.com/apis/ads/publisher',
self._op_config['version']])
config['ns_target'] = (name_space, 'RequestHeader')
# Load new authentication headers, starting with version v201103.
data_injects = []
if self.__op_config['version'] > 'v201101':
new_headers = {}
for key in headers:
if key == 'authToken' and headers[key]:
if config['soap_lib'] == SOAPPY:
data_injects.append(
('<authentication>',
'<authentication xsi3:type="ClientLogin">'))
config['data_injects'] = tuple(data_injects)
else:
config['auth_type'] = 'ClientLogin'
new_headers['authentication'] = {'token': headers['authToken']}
elif key == 'oAuthToken' and headers[key]:
# TODO(api.sgrinberg): Add support for OAuth.
pass
else:
new_headers[key] = headers[key]
headers = new_headers
buf = DfpSoapBuffer(
xml_parser=self._config['xml_parser'],
pretty_xml=Utils.BoolTypeConvert(self._config['pretty_xml']))
start_time = time.strftime('%Y-%m-%d %H:%M:%S')
response = super(DfpWebService, self).CallMethod(
headers, config, method_name, params, buf,
DfpSanityCheck.IsJaxbApi(self._op_config['version']), LIB_SIG,
LIB_URL, service_name, loc, request)
stop_time = time.strftime('%Y-%m-%d %H:%M:%S')
# Restore list type which was overwritten by SOAPpy.
if config['soap_lib'] == SOAPPY and isinstance(response, tuple):
from adspygoogle.common.soappy import MessageHandler
holder = []
for element in response:
holder.append(MessageHandler.RestoreListType(
element, ('results', 'afcFormats', 'sizes', 'targetedAdUnitIds',
'excludedAdUnitIds', 'targetedPlacementIds',
'frequencyCaps', 'creativeSizes')))
response = tuple(holder)
if isinstance(response, dict) or isinstance(response, Error):
error = response
if not Utils.BoolTypeConvert(self.__config['raw_debug']):
self.__ManageSoap(buf, start_time, stop_time, error)
finally:
# Release thread lock.
if self._lock.locked():
self._lock.release()
if Utils.BoolTypeConvert(self._config['raw_response']):
return response
return response
def CallRawMethod(self, soap_message):
"""Make an API call by posting raw SOAP XML message.
Args:
soap_message: str SOAP XML message.
Returns:
tuple Response from the API method.
"""
# Acquire thread lock.
self._lock.acquire()
try:
buf = DfpSoapBuffer(
xml_parser=self._config['xml_parser'],
pretty_xml=Utils.BoolTypeConvert(self._config['pretty_xml']))
super(DfpWebService, self).CallRawMethod(
buf, Utils.GetNetLocFromUrl(self._op_config['server']), soap_message)
self.__ManageSoap(buf, self._start_time, self._stop_time,
{'data': buf.GetBufferAsStr()})
finally:
# Release thread lock.
if self._lock.locked():
self._lock.release()
return (self._response,)
| 1.976563 | 2 |
github/types/headers.py | appheap/PyGithub | 1 | 12786103 | <filename>github/types/headers.py
from dataclasses import dataclass
from typing import Optional
from .object import Object
@dataclass
class Headers(Object):
allow_control_allow_origin: Optional['str']
access_control_expose_headers: Optional['str']
cache_control: Optional['str']
content_encoding: Optional['str']
content_security_policy: Optional['str']
content_type: Optional['str']
date: Optional['str']
etag: Optional['str']
last_modified: Optional['str']
referrer_policy: Optional['str']
server: Optional['str']
strict_transport_security: Optional['str']
transfer_encoding: Optional['str']
vary: Optional['str']
x_accepted_oauth_scopes: Optional['str']
x_content_type_options: Optional['str']
x_frame_options: Optional['str']
x_github_media_type: Optional['str']
x_github_request_id: Optional['str']
x_oauth_scopes: Optional['str']
x_ratelimit_limit: Optional['int']
x_ratelimit_remaining: Optional['int']
x_ratelimit_reset: Optional['int']
x_ratelimit_resource: Optional['str']
x_ratelimit_used: Optional['int']
x_xss_protection: Optional['str']
@staticmethod
def _parse(headers: dict) -> Optional['Headers']:
if headers is None or not len(headers):
return None
return Headers(
allow_control_allow_origin=headers.get('Access-Control-Allow-Origin', None),
access_control_expose_headers=headers.get('Access-Control-Expose-Headers', None),
cache_control=headers.get('Cache-Control', None),
content_encoding=headers.get('Content-Encoding', None),
content_security_policy=headers.get('Content-Security-Policy', None),
content_type=headers.get('Content-Type', None),
date=headers.get('Date', None),
etag=headers.get('ETag', None),
last_modified=headers.get('Last-Modified', None),
referrer_policy=headers.get('Referrer-Policy', None),
server=headers.get('Server', None),
strict_transport_security=headers.get('Strict-Transport-Security', None),
transfer_encoding=headers.get('Transfer-Encoding', None),
vary=headers.get('Vary', None),
x_accepted_oauth_scopes=headers.get('X-Accepted-OAuth-Scopes', None),
x_content_type_options=headers.get('X-Content-Type-Options', None),
x_frame_options=headers.get('X-Frame-Options', None),
x_github_media_type=headers.get('X-GitHub-Media-Type', None),
x_github_request_id=headers.get('X-GitHub-Request-Id', None),
x_oauth_scopes=headers.get('X-OAuth-Scopes', None),
x_ratelimit_limit=int(headers.get('X-RateLimit-Limit', None)),
x_ratelimit_remaining=int(headers.get('X-RateLimit-Remaining', None)),
x_ratelimit_reset=int(headers.get('X-RateLimit-Reset', None)),
x_ratelimit_resource=headers.get('X-RateLimit-Resource', None),
x_ratelimit_used=int(headers.get('X-RateLimit-Used', None)),
x_xss_protection=headers.get('X-XSS-Protection', None),
)
| 2.359375 | 2 |
utiles/__init__.py | steinvenic/escience | 1 | 12786104 | <reponame>steinvenic/escience<filename>utiles/__init__.py
#!/usr/bin/env python
# encoding: utf-8
"""
@version: python2.7
@author: ‘eric‘
@license: Apache Licence
@contact: <EMAIL>
@site: 00123.ml:8000
@software: PyCharm
@file: __init__.py.py
@time: 18/4/24 下午1:20
"""
| 0.957031 | 1 |
surrortg/inputs/input.py | SurrogateInc/surrortg-sdk | 21 | 12786105 | from abc import ABC, abstractmethod
from .input_config import assert_on_screen_position, convert_enums_to_values
class Input(ABC):
"""Base class for all user inputs
Read more about defaults from input_config.py
"""
def __init__(self, defaults=None):
if defaults:
self.validate_defaults(defaults)
self.defaults = defaults
@abstractmethod
async def _on_input(self, command, seat):
"""Implements the spesific Input functionality
:param command: Command from the game engine
:type command: dict
:param seat: Robot seat
:type seat: int
"""
pass
@abstractmethod
async def reset(self, seat):
"""Reset functionality for the Input
:param seat: Robot seat
:type seat: int
"""
pass
async def shutdown(self, seat):
"""Input shutdown method. Defaults to calling reset.
:param seat: Robot seat
:type seat: int
"""
await self.reset(seat)
@abstractmethod
def _get_default_keybinds(self):
"""Returns the default keybind(s) for the input
Returns the default inputs in the correct format that the protocol
expects. The format depends on the input type.
"""
def validate_defaults(self, defaults):
assert isinstance(defaults, dict), "defaults needs to be dictionary"
assert "humanReadableName" not in defaults or isinstance(
defaults["humanReadableName"], str
), "humanReadableName must be a string"
if "onScreenPosition" in defaults:
assert_on_screen_position(defaults["onScreenPosition"])
def get_defaults_dict(self):
"""Returns the default input config"""
if hasattr(self, "defaults") and self.defaults:
return convert_enums_to_values(self.defaults)
return self._get_default_keybinds()
@abstractmethod
def get_name(self):
"""Returns the name of the input
:return: name of the input
:rtype: str
"""
pass
| 3.875 | 4 |
geoportal/tests/functional/test_dbreflection.py | rbovard/c2cgeoportal | 43 | 12786106 | <reponame>rbovard/c2cgeoportal
# Copyright (c) 2013-2019, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
# pylint: disable=missing-docstring,attribute-defined-outside-init,protected-access
from unittest import TestCase
from tests.functional import setup_common as setup_module
from tests.functional import teardown_common as teardown_module # noqa
from c2cgeoportal_geoportal.lib.caching import init_region
class TestReflection(TestCase):
_tables = None
def setup_method(self, _):
setup_module()
# Always see the diff
# https://docs.python.org/2/library/unittest.html#unittest.TestCase.maxDiff
self.maxDiff = None
self.metadata = None
def teardown_method(self, _):
if self._tables is not None:
for table in self._tables[::-1]:
table.drop()
def _create_table(self, tablename):
"""
Test functions use this function to create a table object.
Each test function should call this function only once. And there should not be two test functions
that call this function with the same ptable_name value.
"""
from geoalchemy2 import Geometry
from sqlalchemy import Column, ForeignKey, Table, types
from c2cgeoportal_commons.models.main import Base
if self._tables is None:
self._tables = []
ctable = Table(
f"{tablename!s}_child",
Base.metadata,
Column("id", types.Integer, primary_key=True),
Column("name", types.Unicode),
schema="public",
)
ctable.create(checkfirst=True)
self._tables.append(ctable)
ptable = Table(
tablename,
Base.metadata,
Column("id", types.Integer, primary_key=True),
Column("child1_id", types.Integer, ForeignKey(f"public.{tablename!s}_child.id")),
Column(
"child2_id",
types.Integer,
ForeignKey(f"public.{tablename!s}_child.id"),
nullable=False,
),
Column("point", Geometry("POINT")),
Column("linestring", Geometry("LINESTRING")),
Column("polygon", Geometry("POLYGON")),
Column("multipoint", Geometry("MULTIPOINT")),
Column("multilinestring", Geometry("MULTILINESTRING")),
Column("multipolygon", Geometry("MULTIPOLYGON")),
schema="public",
)
ptable.create(checkfirst=True)
self._tables.append(ptable)
self.metadata = Base.metadata
def test_get_class_nonexisting_table(self):
from sqlalchemy.exc import NoSuchTableError
from c2cgeoportal_geoportal.lib.dbreflection import get_class
self.assertRaises(NoSuchTableError, get_class, "nonexisting")
def test_get_class(self):
from geoalchemy2 import Geometry
from c2cgeoportal_geoportal.lib.dbreflection import _AssociationProxy, get_class
init_region({"backend": "dogpile.cache.memory"}, "std")
init_region({"backend": "dogpile.cache.memory"}, "obj")
self._create_table("table_a")
modelclass = get_class("table_a")
# test the class
assert modelclass.__name__.startswith("Table_a_")
self.assertEqual(modelclass.__table__.name, "table_a")
self.assertEqual(modelclass.__table__.schema, "public")
self.assertTrue(isinstance(modelclass.point.type, Geometry))
self.assertTrue(isinstance(modelclass.linestring.type, Geometry))
self.assertTrue(isinstance(modelclass.polygon.type, Geometry))
self.assertTrue(isinstance(modelclass.multipoint.type, Geometry))
self.assertTrue(isinstance(modelclass.multilinestring.type, Geometry))
self.assertTrue(isinstance(modelclass.multipolygon.type, Geometry))
self.assertTrue(isinstance(modelclass.child1, _AssociationProxy))
self.assertTrue(modelclass.child1.nullable)
self.assertEqual(modelclass.child1_id.info.get("association_proxy"), "child1")
self.assertTrue(isinstance(modelclass.child2, _AssociationProxy))
self.assertFalse(modelclass.child2.nullable)
self.assertEqual(modelclass.child2_id.info.get("association_proxy"), "child2")
child1_asso_proxy = getattr(modelclass, modelclass.child1_id.info["association_proxy"])
self.assertEqual("name", child1_asso_proxy.value_attr)
self.assertEqual("name", child1_asso_proxy.order_by)
# test the Table object
table = modelclass.__table__
self.assertTrue("id" in table.c)
self.assertTrue("child1_id" in table.c)
self.assertTrue("child2_id" in table.c)
self.assertTrue("point" in table.c)
self.assertTrue("linestring" in table.c)
self.assertTrue("polygon" in table.c)
self.assertTrue("multipoint" in table.c)
self.assertTrue("multilinestring" in table.c)
self.assertTrue("multipolygon" in table.c)
col_child1_id = table.c["child1_id"]
self.assertEqual(col_child1_id.name, "child1_id")
col_child2_id = table.c["child2_id"]
self.assertEqual(col_child2_id.name, "child2_id")
col_point = table.c["point"]
self.assertEqual(col_point.name, "point")
self.assertEqual(col_point.type.geometry_type, "POINT")
col_linestring = table.c["linestring"]
self.assertEqual(col_linestring.name, "linestring")
self.assertEqual(col_linestring.type.geometry_type, "LINESTRING")
col_polygon = table.c["polygon"]
self.assertEqual(col_polygon.name, "polygon")
self.assertEqual(col_polygon.type.geometry_type, "POLYGON")
col_multipoint = table.c["multipoint"]
self.assertEqual(col_multipoint.name, "multipoint")
self.assertEqual(col_multipoint.type.geometry_type, "MULTIPOINT")
col_multilinestring = table.c["multilinestring"]
self.assertEqual(col_multilinestring.name, "multilinestring")
self.assertEqual(col_multilinestring.type.geometry_type, "MULTILINESTRING")
col_multipolygon = table.c["multipolygon"]
self.assertEqual(col_multipolygon.name, "multipolygon")
self.assertEqual(col_multipolygon.type.geometry_type, "MULTIPOLYGON")
assert get_class("table_a") is modelclass
def test_get_class_dotted_notation(self):
from c2cgeoportal_geoportal.lib.dbreflection import get_class
self._create_table("table_b")
modelclass = get_class("public.table_b")
assert modelclass.__name__.startswith("Table_b_")
self.assertEqual(modelclass.__table__.name, "table_b")
self.assertEqual(modelclass.__table__.schema, "public")
def test_mixing_get_class_and_queries(self):
"""
This test shows that we can mix the use of DBSession and the db reflection API.
"""
import transaction
from sqlalchemy import text
from c2cgeoportal_commons.models import DBSession
from c2cgeoportal_geoportal.lib.dbreflection import get_class
self._create_table("table_c")
DBSession.execute(text("SELECT id FROM table_c"))
modelclass = get_class("table_c")
assert modelclass.__name__.startswith("Table_c_")
# This commits the transaction created by DBSession.execute. This
# is required here in the test because tearDown does table.drop,
# which will block forever if the transaction is not committed.
transaction.commit()
def test_get_class_exclude_properties(self):
from c2cgeoportal_geoportal.lib.dbreflection import get_class
self._create_table("table_d")
assert get_class("table_d", exclude_properties=["foo", "bar"]) is not None
def test_get_class_attributes_order(self):
from c2cgeoportal_geoportal.lib.dbreflection import get_class
attributes_order = ["child1_id", "point", "child2_id"]
self._create_table("table_d")
cls = get_class("table_d", attributes_order=attributes_order)
self.assertEqual(attributes_order, cls.__attributes_order__)
def test_get_class_enumerations_config(self):
from c2cgeoportal_geoportal.lib.dbreflection import get_class
enumerations_config = {"child1_id": {"value": "id", "order_by": "name"}}
self._create_table("table_d")
cls = get_class("table_d", enumerations_config=enumerations_config)
self.assertEqual(enumerations_config, cls.__enumerations_config__)
association_proxy = getattr(cls, cls.child1_id.info["association_proxy"])
self.assertEqual("id", association_proxy.value_attr)
self.assertEqual("name", association_proxy.order_by)
# Without order_by.
enumerations_config = {"child1_id": {"value": "id"}}
cls = get_class("table_d", enumerations_config=enumerations_config)
association_proxy = getattr(cls, cls.child1_id.info["association_proxy"])
self.assertEqual("id", association_proxy.value_attr)
self.assertEqual("id", association_proxy.order_by)
def test_get_class_readonly_attributes(self):
from c2cgeoportal_geoportal.lib.dbreflection import get_class
readonly_attributes = ["child1_id", "point"]
self._create_table("table_d")
cls = get_class("table_d", readonly_attributes=readonly_attributes)
self.assertEqual(True, cls.child1_id.info.get("readonly"))
self.assertEqual(True, cls.point.info.get("readonly"))
| 1.234375 | 1 |
icebook/apps/users/forms.py | RohanJnr/IceBook-social_media-django- | 19 | 12786107 | from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser, Profile
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = CustomUser
fields = ("first_name", "last_name", "email")
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = ("first_name", "last_name", "email")
class CustomUserLoginForm(forms.Form):
email = forms.EmailField(widget=forms.EmailInput(attrs={"autofocus": True}))
password = forms.CharField(
strip=False,
widget=forms.PasswordInput(attrs={"autocomplete": "current-password"}),
)
def clean(self) -> None:
if self.is_valid():
email = self.cleaned_data["email"]
password = self.cleaned_data["password"]
user = authenticate(email=email, password=password)
if not user:
raise forms.ValidationError("Invalid login credentials!!", "invalid")
class ProfileCreationForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ["user"]
widgets = {"bio": forms.Textarea(attrs={"cols": 80, "rows": 20})}
| 2.5625 | 3 |
src/project/urls.py | jmaslanka/movie-db | 0 | 12786108 | from django.contrib import admin
from django.urls import path, include
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework import permissions
schema_view = get_schema_view(
openapi.Info(
title='Movie DB API',
default_version='v1',
description='API to fetch movie data.',
),
public=True,
permission_classes=(permissions.AllowAny,),
authentication_classes=(),
)
docs_urlpatterns = [
path(
'docs/',
schema_view.with_ui('swagger', cache_timeout=0),
name='schema-swagger',
),
path(
'dock-redoc',
schema_view.with_ui('redoc', cache_timeout=0),
name='schema-redoc',
),
]
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('movie.urls', namespace='movie')),
path(
'api-auth/',
include('rest_framework.urls', namespace='rest_framework')
),
] + docs_urlpatterns
| 1.929688 | 2 |
WHI_long_term_detectable_notch_vs_VED_plotting.py | annahs/atmos_research | 2 | 12786109 | import sys
import os
import pickle
import math
import numpy as np
import matplotlib.pyplot as plt
from pprint import pprint
os.chdir('C:/Users/<NAME>/Documents/Data/WHI long term record/coatings/')
file = open('fraction of detectable notch positions by BC core size - aged.pickl', 'r')
fractions_detectable_aged = pickle.load(file)
file.close()
os.chdir('C:/Users/<NAME>/Documents/Data/WHI long term record/coatings/')
file = open('fraction of detectable notch positions by BC core size - fresh.pickl', 'r')
fractions_detectable_fresh = pickle.load(file)
file.close()
fractions_detectable_fresh.pop(0) #get rid of 65-70 bin, since no data really here
fractions_detectable_aged.pop(0) #get rid of 65-70 bin, since no data really here
pprint(fractions_detectable_aged)
pprint(fractions_detectable_fresh)
##plotting
bins_aged = [row[0] for row in fractions_detectable_aged]
fractions_aged = [row[1] for row in fractions_detectable_aged]
bins_fresh = [row[0] for row in fractions_detectable_fresh]
fractions_fresh = [row[1] for row in fractions_detectable_fresh]
#####plotting
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(bins_aged, fractions_aged, color = 'b', label = 'Background')
ax.scatter(bins_fresh, fractions_fresh, color = 'r', label = 'Fresh emissions')
ax.set_ylim(0,1.0)
ax.set_ylabel('fraction of particles with detectable notch position')
ax.set_xlabel('rBC core VED (nm)')
#ax.axvline(95, color='g', linestyle='-')
ax.axvline(155, color='r', linestyle='--')
ax.axvline(180, color='r', linestyle='--')
plt.legend(loc = 2)
os.chdir('C:/Users/<NAME>/Documents/Data/WHI long term record/coatings/')
plt.savefig('fraction of particles with detectable zero-crossing', bbox_inches='tight')
plt.show() | 2.390625 | 2 |
flask1/flask1.py | fangxian/flask_exercise | 0 | 12786110 | <gh_stars>0
from flask import Flask, flash, render_template, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from forms import RegisterForm, LoginForm, EditArticleForm
from flask_login import login_user
from datetime import datetime
import pymysql
from flask_bootstrap import Bootstrap
app = Flask(__name__)
app.config['SECRET_KEY'] = 'register page'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:[email protected]:8889/flask1'
db = SQLAlchemy(app)
bootstrap = Bootstrap(app)
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True)
email = db.Column(db.String(20), unique=True, index=True)
password = db.Column(db.String(20))
add_time = db.Column(db.DateTime, default=datetime.utcnow())
def __repr__(self):
return 'user table'
class Article(db.Model):
__tablename__ = 'articles'
id = db.Column(db.Integer, primary_key=True)
author = db.relationship
body = db.Column(db.Text)
add_time = db.Column(db.DateTime, default=datetime.utcnow())
def __repr__(self):
return "this is %s" % self.__tablename__
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
user = User(
email=form.email.data,
username=form.username.data,
password=form.password.data
)
db.session.add(user)
db.session.commit()
flash(u'注册成功')
return redirect(url_for('hello_world'))
flash('email has been registered')
return render_template('register.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None:
if user.password == form.password.data:
login_user(user)
flash(u'登录成功')
return redirect(url_for('hello_world'))
else:
flash(u'密码错误')
else:
flash(u'账号错误')
return render_template('login.html', form=form)
@app.route('/logout')
def logout():
pass
@app.route('/article_list')
def article_list():
# display all article
return render_template('article_list.html')
@app.route('/add_article')
def add_article():
form = EditArticleForm()
if form.validate_on_submit():
pass
return render_template('add_article.html', form=form)
if __name__ == '__main__':
db.create_all()
app.run()
| 2.65625 | 3 |
app/vulnerability/routes.py | sbs2001/vulncode-db | 1 | 12786111 | <gh_stars>1-10
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import urllib.error
import urllib.parse
import urllib.request
import ssl
from flask import (
Blueprint,
redirect,
flash,
request,
render_template,
abort,
url_for,
Response,
send_file,
make_response,
g,
)
from sqlalchemy import or_
from sqlalchemy.orm import make_transient
from werkzeug.routing import RequestRedirect
from flask_bouncer import ensure
from bouncer.constants import EDIT, CREATE
from app import flash_error
from app.auth.acls import admin_required, requires, skip_authorization, bouncer
from app.exceptions import InvalidIdentifierException
from app.vulnerability.views.details import VulnerabilityDetails
import cfg
from app.vulnerability.views.vulnerability import VulnerabilityView
from data.models import RepositoryFilesSchema, Vulnerability
from data.forms import VulnerabilityDeleteForm, VulnerabilityDetailsForm, VulnerabilityProposalReject, \
VulnerabilityProposalApprove, VulnerabilityProposalAssign, VulnerabilityProposalPublish, \
VulnerabilityProposalUnassign
from data.database import DEFAULT_DATABASE
from data.models.vulnerability import VulnerabilityState, ASSIGN, APPROVE, REJECT
from lib.vcs_management import get_vcs_handler
from lib.utils import create_json_response
bp = Blueprint("vuln", __name__, url_prefix="/")
db = DEFAULT_DATABASE
def view_vuln(vcdb_id, use_template):
try:
vulnerability_details = VulnerabilityDetails(vcdb_id)
vulnerability_details.validate_and_simplify_id()
if not vulnerability_details.vulnerability_view:
abort(404)
except InvalidIdentifierException as err:
return flash_error(str(err), "frontend.serve_index")
return render_template(use_template,
vulnerability_details=vulnerability_details)
@bp.route("/vuln", methods=["POST"])
def vuln_view_post():
return view_vuln(None, "vulnerability/view_overview.html")
def _get_vulnerability_details(vcdb_id, vuln_id=None,
simplify_id: bool = True):
try:
vulnerability_details = VulnerabilityDetails(vcdb_id, vuln_id)
if simplify_id:
vulnerability_details.validate_and_simplify_id()
# Drop everything else.
if not vulnerability_details.vulnerability_view:
abort(404)
return vulnerability_details
except InvalidIdentifierException:
abort(404)
@bp.route("/<vcdb_id>/review/<vuln_id>", methods=["GET", "POST"])
@skip_authorization # authz is done inline
def vuln_review(vcdb_id, vuln_id):
vulnerability_details = _get_vulnerability_details(vcdb_id,
simplify_id=False)
view = vulnerability_details.vulnerability_view
vuln = vulnerability_details.get_or_create_vulnerability()
proposal_vulnerability_details = _get_vulnerability_details(
None, vuln_id=vuln_id, simplify_id=False)
proposal_view = proposal_vulnerability_details.vulnerability_view
proposal_vuln = proposal_vulnerability_details.get_or_create_vulnerability(
)
form_reject = VulnerabilityProposalReject()
form_approve = VulnerabilityProposalApprove()
form_assign = VulnerabilityProposalAssign()
form_unassign = VulnerabilityProposalUnassign()
form_publish = VulnerabilityProposalPublish()
if request.method == 'POST':
if request.form[
"review_response"] == "assign" and form_assign.validate_on_submit(
):
ensure(ASSIGN, proposal_vuln)
if proposal_vuln.is_reviewable():
proposal_vuln.accept_review(g.user)
db.session.add(proposal_vuln)
db.session.commit()
flash("The review was successfully assigned to you.",
"success")
return redirect(request.url)
else:
flash_error("This entry is not in a reviewable state.")
if request.form[
"review_response"] == "unassign" and form_unassign.validate_on_submit(
):
ensure(ASSIGN, proposal_vuln)
if proposal_vuln.is_reviewer(g.user):
proposal_vuln.deny_review()
db.session.add(proposal_vuln)
db.session.commit()
flash("You successfully unassigned yourself from this review.",
"success")
return redirect(request.url)
else:
flash_error("This entry is not assigned to you.")
if request.form[
"review_response"] == "approve" and form_approve.validate_on_submit(
):
ensure(APPROVE, proposal_vuln)
proposal_vuln.accept_change()
db.session.add(proposal_vuln)
db.session.commit()
flash(
"You approved the proposal. Waiting for the entry to be published by an admin.",
"success")
return redirect(request.url)
if request.form[
"review_response"] == "reject" and form_reject.validate_on_submit(
):
ensure(REJECT, proposal_vuln)
proposal_vuln.deny_change(form_reject.data["review_feedback"])
db.session.add(proposal_vuln)
db.session.commit()
flash("Waiting for the author to address your feedback.",
"success")
return redirect(request.url)
if request.form[
"review_response"] == "publish" and form_publish.validate_on_submit(
):
ensure('PUBLISH', proposal_vuln)
proposal_vuln.publish_change()
db.session.add(proposal_vuln)
db.session.commit()
# This might be the first entry of its kind so no archiving is necessary.
if vuln.state:
vuln.archive_entry()
db.session.add(vuln)
db.session.commit()
flash("Entry was successfully published.", "success")
return redirect(request.url)
# Published entries can't be reviewed.
# if view.state == VulnerabilityState.PUBLISHED:
# raise RequestRedirect("/" + str(vcdb_id))
return render_template(
"vulnerability/review/review.html",
proposal_vulnerability_details=proposal_vulnerability_details,
vulnerability_details=vulnerability_details,
form_assign=form_assign,
form_unassign=form_unassign,
form_reject=form_reject,
form_approve=form_approve,
form_publish=form_publish)
# Create a catch all route for vulnerability identifiers.
@bp.route("/<vcdb_id>")
@skip_authorization
def vuln_view(vcdb_id=None):
vulnerability_details = _get_vulnerability_details(vcdb_id)
view = vulnerability_details.vulnerability_view
use_template = "vulnerability/view_details.html"
if view.annotated:
use_template = "vulnerability/view_overview.html"
return render_template(use_template,
vulnerability_details=vulnerability_details)
@bp.route("/<vcdb_id>/details")
@skip_authorization
def vuln_view_details(vcdb_id):
return view_vuln(vcdb_id, "vulnerability/view_details.html")
@bp.route("/<vcdb_id>/editor")
@skip_authorization
def vuln_editor(vcdb_id):
return view_vuln(vcdb_id, "vulnerability/code_editor.html")
@bp.route("/<vcdb_id>/tree")
@skip_authorization
def vuln_file_tree(vcdb_id):
vulnerability_details = _get_vulnerability_details(vcdb_id)
view = vulnerability_details.vulnerability_view
master_commit = view.master_commit
if not master_commit:
abort(404)
status_code = 200
content_type = "text/json"
response_msg = master_commit.tree_cache
if not response_msg:
try:
vulnerability_details.fetch_tree_cache(skip_errors=False,
max_timeout=10)
response_msg = master_commit.tree_cache
except urllib.error.HTTPError as err:
status_code = err.code
response_msg = "".join([
"VCS proxy is unreachable (it might be down).",
"\r\nHTTPError\r\n",
err.read(),
])
content_type = "text/plain"
except urllib.error.URLError as err:
status_code = 400
response_msg = "".join([
"VCS proxy is unreachable (it might be down).",
"\r\nURLError\r\n",
str(err.reason),
])
content_type = "text/plain"
except Exception: # pylint: disable=broad-except
status_code = 400
content_type = "text/plain"
response_msg = "VCS proxy is unreachable (it might be down)."
return Response(response=response_msg,
status=status_code,
content_type=content_type)
@bp.route("/<vcdb_id>/annotation_data")
@skip_authorization
def annotation_data(vcdb_id):
vulnerability_details = _get_vulnerability_details(vcdb_id)
vulnerability_details.validate_and_simplify_id()
view = vulnerability_details.vulnerability_view
master_commit = view.master_commit
if not master_commit:
logging.error("Vuln (id: %r) has no linked Git commits!", view.id)
return create_json_response("Entry has no linked Git link!", 404)
master_commit = vulnerability_details.get_master_commit()
files_schema = RepositoryFilesSchema(many=True)
return files_schema.jsonify(master_commit.repository_files)
@bp.route("/<vcdb_id>/file_provider")
@skip_authorization
def file_provider(vcdb_id):
vulnerability_details = _get_vulnerability_details(vcdb_id)
vulnerability_details.validate_and_simplify_id()
item_hash = request.args.get("item_hash", 0, type=str)
item_path = request.args.get("item_path", None, type=str)
proxy_target = (cfg.GCE_VCS_PROXY_URL + url_for(
"vcs_proxy.main_api",
repo_url=vulnerability_details.repo_url,
item_path=item_path,
item_hash=item_hash,
)[1:])
try:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.load_verify_locations(cafile=cfg.APP_CERT_FILE)
ctx.verify_mode = ssl.CERT_REQUIRED
result = urllib.request.urlopen(proxy_target, context=ctx) # nosec
except urllib.error.HTTPError as err:
return Response(response=err.read(),
status=err.code,
content_type="text/plain")
return send_file(result, mimetype="application/octet-stream")
@bp.route("/<vcdb_id>/embed")
@skip_authorization
def embed(vcdb_id):
try:
section_id = int(request.args.get("sid", -1))
start_line = int(request.args.get("start_line", 1))
end_line = int(request.args.get("end_line", -1))
vulnerability_details = VulnerabilityDetails(vcdb_id)
vulnerability_details.validate_and_simplify_id()
view = vulnerability_details.vulnerability_view
if not view:
return make_response(("No vulnerability found", 404))
if not view.master_commit:
return make_response(
(f"Vuln (id: {view.id}) has no linked Git commits!", 404))
master_commit = vulnerability_details.get_master_commit()
files_schema = RepositoryFilesSchema(many=True)
# Hack to quickly retrieve the full data.
custom_data = json.loads(
files_schema.jsonify(master_commit.repository_files).data)
settings = {
"section_id": section_id,
"startLine": start_line,
"endLine": end_line,
"entry_data": custom_data,
}
return render_template(
"vulnerability/embedded.html",
vulnerability_details=vulnerability_details,
embed_settings=settings,
)
except (ValueError, InvalidIdentifierException):
return make_response(("No vulnerability found", 404))
@bp.route("/<vcdb_id>/create", methods=["GET", "POST"])
@bp.route("/create", methods=["GET", "POST"])
@requires(CREATE, Vulnerability)
def create_vuln(vcdb_id=None):
return _create_vuln_internal(vcdb_id)
def _create_vuln_internal(vcdb_id=None):
try:
vulnerability_details = VulnerabilityDetails(vcdb_id)
vulnerability = vulnerability_details.get_or_create_vulnerability()
except InvalidIdentifierException as err:
return flash_error(str(err), "frontend.serve_index")
if vulnerability.id:
logging.debug("Preexisting vulnerability entry found: %r",
vulnerability.id)
delete_form = VulnerabilityDeleteForm()
if delete_form.validate_on_submit():
db.session.delete(vulnerability)
# Remove the entry.
db.session.commit()
flash("The entry was deleted.", "success")
return redirect("/")
form = VulnerabilityDetailsForm(obj=vulnerability)
commit = form.data["commits"][0]
if not commit["repo_name"]:
logging.info("Empty repository name. %r", commit)
repo_url = commit["repo_url"]
vcs_handler = get_vcs_handler(None, repo_url)
if vcs_handler:
logging.info("Found name. %r", vcs_handler.repo_name)
form.commits[0].repo_name.process_data(vcs_handler.repo_name)
if form.validate_on_submit():
try:
form.populate_obj(vulnerability)
db.session.add(vulnerability)
db.session.commit()
# TODO: Improve this hack to assign a new vcdb_id here.
# Currently, we are just piggy backing on the auto increment of the primary key to ensure uniqueness.
# This will likely be prone to race conditions.
vulnerability.vcdb_id = vulnerability.id
db.session.add(vulnerability)
db.session.commit()
logging.debug("Successfully created/updated entry: %r",
vulnerability.id)
flash("Successfully created/updated entry.", "success")
return redirect(
url_for("vuln.vuln_view", vcdb_id=vulnerability.vcdb_id))
except InvalidIdentifierException as err:
flash_error(str(err))
return render_template("vulnerability/create.html",
vulnerability_details=vulnerability_details,
form=form)
def add_proposal(vuln: Vulnerability, form: VulnerabilityDetailsForm):
vuln_clone = vuln.copy()
form.populate_obj(vuln_clone)
vuln_clone.version = None
vuln_clone.prev_version = vuln.version
vuln_clone.state = VulnerabilityState.READY
vuln_clone.creator = g.user
# Reset any previous feedback data.
vuln_clone.reviewer_id = None
vuln_clone.review_feedback = None
db.session.add(vuln_clone)
db.session.commit()
if not vuln_clone.vcdb_id:
# TODO: Improve this hack to assign a new vcdb_id here.
# Currently, we are just piggy backing on the auto increment of the primary key to ensure uniqueness.
# This will likely be prone to race conditions.
vuln_clone.vcdb_id = vuln_clone.id
db.session.add(vuln_clone)
db.session.commit()
flash(
"Your proposal will be reviewed soon. You can monitor progress in your Proposals Section.",
"success")
@bp.route("/<vcdb_id>/edit", methods=["GET", "POST"])
@bp.route("/edit", methods=["GET", "POST"])
@requires(EDIT, Vulnerability)
def edit_vuln(vcdb_id=None):
return _edit_vuln_internal(vcdb_id)
def _can_add_proposal(vuln):
# Conditions for creating a proposal:
"""
- No pending open proposals by the same user.
- Proposals can only be made for currently PUBLISHED entries only.
"""
# TODO: Simplify or move away the query below.
existing_user_proposals = Vulnerability.query.filter(
or_(Vulnerability.vcdb_id == vuln.vcdb_id, Vulnerability.cve_id == vuln.cve_id), Vulnerability.creator == g.user,
Vulnerability.state != VulnerabilityState.PUBLISHED,
Vulnerability.state != VulnerabilityState.ARCHIVED).first()
if existing_user_proposals:
flash_error(
"You already have a pending proposal for this entry. Please go to your proposals section."
)
return False
return True
def _edit_vuln_internal(vcdb_id: str = None):
vulnerability_details = _get_vulnerability_details(vcdb_id,
simplify_id=False)
view = vulnerability_details.vulnerability_view
vuln = vulnerability_details.get_or_create_vulnerability()
if not _can_add_proposal(vuln):
return redirect(
url_for("vuln.vuln_view", vcdb_id=vcdb_id))
form = VulnerabilityDetailsForm(obj=vuln)
# Populate the form data from the vulnerability view if necessary.
if form.comment.data == "":
form.comment.data = view.comment
form_submitted = form.validate_on_submit()
if form_submitted:
add_proposal(vuln, form)
return redirect(
url_for("vuln.vuln_view", vcdb_id=vcdb_id))
return render_template("vulnerability/edit.html",
vulnerability_details=vulnerability_details,
form=form)
| 1.757813 | 2 |
implicit_emulators/implem/data/data.py | m-dml/lil2021swe | 1 | 12786112 | import numpy as np
import torch
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from implem.utils import device
class SimpleDataset(torch.utils.data.Dataset):
def __init__(self, data, offset=1, start=None, end=None):
super(SimpleDataset, self).__init__()
assert len(data.shape) >= 2 #[T,*D], where D can be [C,W,H] etc.
self.T = len(data)
self.data = data
self.offset = offset
self.start = 0 if start is None else start
self.end = self.T-np.asarray(self.offset).max() if end is None else end
assert self.end > self.start
self.idx = torch.arange(self.start, self.end, requires_grad=False, device='cpu')
def __getitem__(self, index):
""" Generate one batch of data """
x = self.data[self.idx[index]].reshape(*self.data.shape[1:])
y = self.data[self.idx[index]+self.offset].reshape(len(self.offset), *self.data.shape[1:])
return x,y
def __len__(self):
return len(self.idx)
class MultiTrialDataset(torch.utils.data.Dataset):
def __init__(self, data, offset=1, start=None, end=None):
super(MultiTrialDataset, self).__init__()
assert len(data.shape) >= 3 #[N,T,*D], where D can be [C,W,H] etc.
self.N, self.T = data.shape[:2]
self.data = data.reshape(-1, *data.shape[2:]) #[NT,*D]
self.offset = offset
self.start = 0 if start is None else start
self.end = self.T-np.asarray(self.offset).max() if end is None else end
assert self.end > self.start
idx = torch.arange(self.start, self.end, requires_grad=False, device='cpu')
idx = [idx for j in range(self.N)]
self.idx = torch.cat([j*self.T + idx[j] for j in range(len(idx))])
def __getitem__(self, index):
""" Generate one batch of data """
x = self.data[self.idx[index]].reshape(*self.data.shape[1:])
y = self.data[self.idx[index]+self.offset].reshape(*self.data.shape[1:])
return x,y
def __len__(self):
return len(self.idx)
class MultiStepMultiTrialDataset(MultiTrialDataset):
def __init__(self, data, offset=1, start=None, end=None):
super(MultiStepMultiTrialDataset, self).__init__(data=data, offset=offset, start=start, end=end)
self.offset = torch.as_tensor(np.asarray(offset, dtype=np.int).reshape(1,-1), device='cpu')
def __getitem__(self, index):
""" Generate one batch of data """
io = (self.idx[index].reshape(-1,1) + self.offset.reshape(1,-1)).flatten()
x = self.data[self.idx[index]].reshape(*self.data.shape[1:])
y = self.data[io].reshape(np.prod(self.offset.shape), *self.data.shape[1:])
return x,y
class DataModule(pl.LightningDataModule):
def __init__(self, data, train_valid_split: int = 0.9,
batch_size: int = 2, offset: int = 1, Dataset=SimpleDataset,
**kwargs):
super().__init__()
self.data = data
self.Dataset = Dataset
self.batch_size = batch_size
self.offset = offset if isinstance(offset, np.ndarray) else np.arange(offset)
self.num_workers = 0
assert 0. < train_valid_split and train_valid_split <= 1.
self.train_valid_split = train_valid_split
def setup(self, stage=None):
if stage == 'fit' or stage is None:
split_index = int(len(self.data) * self.train_valid_split)
self.train_data = self.Dataset(data = self.data[:split_index], offset = self.offset)
self.valid_data = self.Dataset(data = self.data[split_index:], offset = self.offset)
def train_dataloader(self):
return DataLoader(self.train_data, batch_size=self.batch_size, num_workers=self.num_workers,
shuffle=True, generator=torch.Generator(device=device))
def val_dataloader(self):
return DataLoader(self.valid_data, batch_size=self.batch_size, num_workers=self.num_workers,
shuffle=False, generator=torch.Generator(device=device)) | 2.71875 | 3 |
test_project/forms.py | ebenh/django-flex-user | 1 | 12786113 | <gh_stars>1-10
from django import forms
from django.utils.text import capfirst
from django.contrib.auth import get_user_model, password_validation
from django.core.exceptions import ValidationError
from django_flex_user.models.otp import TimeoutError
UserModel = get_user_model()
class OTPTokensSearchForm(forms.Form):
user_identifier = forms.CharField(
label=capfirst(
'{username}, {email} or {phone}'.format(
username=UserModel._meta.get_field('username').verbose_name,
email=UserModel._meta.get_field('email').verbose_name,
phone=UserModel._meta.get_field('phone').verbose_name,
)
)
)
class VerifyOTPForm(forms.Form):
password = forms.CharField(label='Verification code')
def __init__(self, otp_token=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.otp_token = otp_token
def clean_password(self):
password = self.cleaned_data['password']
try:
success = self.otp_token.check_password(password)
except TimeoutError:
raise ValidationError("You're doing that too much. Please wait before trying again.")
else:
if not success:
raise ValidationError('The verification code you entered is incorrect or has expired.')
return password
class SignUpWithUsernameForm(forms.ModelForm):
class Meta:
model = UserModel
fields = ('username', 'password')
# required = ('username', 'password')
widgets = {
'password': forms.PasswordInput()
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Override bank = True set in our model
self.fields['username'].required = True
def clean(self):
# Validate unique
super().clean()
# Check password strength
username = self.cleaned_data['username']
password = self.cleaned_data['password']
# note eben: UserAttributeSimilarityValidator doesn't normalize email before comparing it
# note eben: Be aware that there may be issues if a password validator expects the user instance to have
# a valid id and/or have been persisted to the database. For example, issues may occur in a password
# validator that checks for password reuse.
temp_user = self.Meta.model(username=username)
temp_user.set_unusable_password()
password_validation.validate_password(password, temp_user)
return self.cleaned_data
def save(self):
password = self.cleaned_data.pop('password')
self.instance.set_password(password)
return super().save()
class SignUpWithEmailForm(forms.ModelForm):
class Meta:
model = UserModel
fields = ('email', 'password')
# required = ('username', 'password')
widgets = {
'password': forms.PasswordInput()
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Override bank = True set in our model
self.fields['email'].required = True
def clean(self):
# Validate unique
super().clean()
# Check password strength
email = self.cleaned_data['email']
password = self.cleaned_data['password']
# note eben: UserAttributeSimilarityValidator doesn't normalize email before comparing it
# note eben: Be aware that there may be issues if a password validator expects the user instance to have
# a valid id and/or have been persisted to the database. For example, issues may occur in a password
# validator that checks for password reuse.
temp_user = self.Meta.model(email=email)
temp_user.set_unusable_password()
password_validation.validate_password(password, temp_user)
return self.cleaned_data
def save(self):
password = self.cleaned_data.pop('password')
self.instance.set_password(password)
return super().save()
class SignUpWithPhoneForm(forms.ModelForm):
class Meta:
model = UserModel
fields = ('phone', 'password')
# required = ('username', 'password')
widgets = {
'password': forms.PasswordInput()
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Override bank = True set in our model
self.fields['phone'].required = True
def clean(self):
# Validate unique
super().clean()
# Check password strength
# If phonenumber_field.formfields.PhoneNumberField.to_python can't convert the user's text input into a valid
# phonenumbers.phonenumber.PhoneNumber object, an exception is raised and no "phone" key is added to
# self.cleaned_data (It's also worth mentioning that following this error the validator
# phonenumber_field.validators import validate_international_phonenumber will not run).
# This results in the following field error:
# "Enter a valid phone number (e.g. (201) 555-0123) or a number with an international call prefix."
# As well as the following (redundant) non-field error:
# "You must supply at least one of username, email address or phone number."
# To suppress the redundant non-field error, provide the user a widget for inputting phone numbers which
# constrains their input to only values which can form a valid phonenumbers.phonenumber.PhoneNumber object.
phone = self.cleaned_data.get('phone')
password = self.cleaned_data['password']
# note eben: UserAttributeSimilarityValidator doesn't normalize email before comparing it
# note eben: Be aware that there may be issues if a password validator expects the user instance to have
# a valid id and/or have been persisted to the database. For example, issues may occur in a password
# validator that checks for password reuse.
temp_user = self.Meta.model(phone=phone)
temp_user.set_unusable_password()
password_validation.validate_password(password, temp_user)
return self.cleaned_data
def save(self):
password = self.cleaned_data.pop('password')
self.instance.set_password(password)
return super().save()
class UserForm(forms.ModelForm):
class Meta:
model = UserModel
fields = ('username', 'email', 'phone')
| 2.4375 | 2 |
core/config/__init__.py | tangchengdong/tang_project | 1 | 12786114 | # -*- coding:utf8 -*-
"""读取配置文件信息"""
import os
import yaml
BASEPATH = os.path.dirname(__file__)
def dbConfig():
data = open(os.path.join(BASEPATH, 'db.default.yaml'), 'r')
dict = yaml.load(data)
data.close()
return dict
if __name__ == '__main__':
print(dbConfig()) | 2.59375 | 3 |
vilya/models/elastic/searcher.py | mubashshirjamal/code | 1,582 | 12786115 | <filename>vilya/models/elastic/searcher.py
# -*- coding: utf-8 -*-
from vilya.libs.search import code_client
class SearchEngine(object):
c = code_client
if not c.head():
c.put('')
@classmethod
def check_result(cls, result):
if result and not result.get('error'):
return True
return False
@classmethod
def decode(cls, json_raw, parse_names):
dic = json_raw
if not cls.check_result(dic):
return []
decoded = []
for e in dic['hits']['hits']:
d = e['_source']
values = []
for parse_name in parse_names:
values.append(d.get(parse_name))
decoded.append(values)
return decoded
@classmethod
def get_count(cls, result):
if cls.check_result(result):
return result['hits']['total']
return 0
@classmethod
def query_all(cls, index_type, from_=0, size=0):
data = {
'from': from_,
'size': size,
'query': {
'match_all': {}
}
}
result = cls.c.get('%s/_search' % index_type, data=data)
return result
@classmethod
def query_by_field(cls, index_type, field_dict, from_=0, size=0):
data = {
'from': from_,
'size': size,
'query': {
"term": field_dict,
},
}
result = cls.c.get('%s/_search' % index_type, data=data)
return result
@classmethod
def search_a_phrase(cls, index_type, phrase, from_=0, size=20,
filter_data=None, sort_data=None, highlight_data=None,
facets_data=None):
data = {
'from': from_,
'size': size,
"query": {
"query_string": {
"query": phrase
}
},
}
if highlight_data:
data['highlight'] = highlight_data
if filter_data:
filtered_query_data = {
"filtered": {
"query": data['query'],
"filter": filter_data,
}
}
data['query'] = filtered_query_data
if sort_data:
data['sort'] = sort_data
if facets_data:
data['facets'] = facets_data
result = cls.c.get('%s/_search' % index_type, data=data)
return result
| 2.1875 | 2 |
src/inworldz/maestro/environment/CredentialCrypt.py | IslandzVW/maestro | 3 | 12786116 | '''
Created on Jan 3, 2014
@author: <NAME>
'''
from Crypto.Cipher import AES
import base64
# the block size for the cipher object; must be 16, 24, or 32 for AES
BLOCK_SIZE = 32
BLOCK_SZ = 14
# the character used for padding--with a block cipher such as AES, the value
# you encrypt must be a multiple of BLOCK_SIZE in length. This character is
# used to ensure that your value is always a multiple of BLOCK_SIZE
PADDING = '{'
SECRET = None
IV = None
# one-liner to sufficiently pad the text to be encrypted
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
# one-liners to encrypt/encode and decrypt/decode a string
# encrypt with AES, encode with base64
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
def Encode(s):
cipher=AES.new(key=SECRET,mode=AES.MODE_CBC,IV=IV)
return EncodeAES(cipher, s)
def Decode(s):
cipher=AES.new(key=SECRET,mode=AES.MODE_CBC,IV=IV)
return DecodeAES(cipher, s)
| 3.8125 | 4 |
getwpuser.py | galletitaoreo/PythonPentest | 5 | 12786117 | #coding=utf-8
import sys
import urllib
import httplib
import re
import urlparse
#import requests
count = 100
#httplib.HTTPConnection.debuglevel = 1
def savefile(text):
pass
def checktitle(target):
i = 1
for i in range(count):
url='http://' + target + '/?author=%s' % i
pattern='<title>(.*) \|'
try:
#r=requests.get(url)
r = urllib.urlopen(url).read()
except Exception,e:
print e
#text=r.text
text = r
regux=re.compile(pattern)
result=regux.findall(text)
print result
re_str=''.join(result)
if re_str not in [u'\u672a\u627e\u5230\u9875\u9762']:
n_str=str(i)+':'+re_str
print n_str
#print "%d:%s" % (i,list)
## BUG: cannot get from Freebuf.com
def checkurl(target):
i = 1
for i in range(count):
conn = httplib.HTTPConnection(target)
headers={"User-Agent":"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"}
conn.request('GET','/?author=%s' % i,headers=headers)
r = conn.getresponse()
#print r.status
if r.status == 301:
url = 'http://' + target + '/?author=%s' % i
a = urllib.urlopen(url).geturl()
a = urlparse.urlparse(a)
#print 'path:',a.path
#astr = a.path[:-1]
#astr = astr[astr.rindex('/')+1:]
apos = a.path.rindex('/')
bpos = len(a.path.split('/'))
if apos == len(a.path)-1 :
print a.path.split('/')[bpos-2]
else:
print a.path.split('/')[bpos-1]
#print str(i) + ':' + astr
def usage():
print '<Usage>:'
print '\t GetWPUser <type> <url>'
print '<Type> :'
print '\t 1 - From Title'
print '\t 2 - From URL (Handler 302)'
print '<Eg.> :'
print '\t GetWPUser 2 www.blackh4t.org'
print ''
print '\t\t\t by DarkR4y.'
def main():
if len(sys.argv) == 3:
t = sys.argv[1]
target = sys.argv[2]
if t == '1':
checktitle(target)
if t == '2':
checkurl(target)
else:
usage()
pass
if __name__ == "__main__":
main()
| 2.71875 | 3 |
UVa 10424 love calculator/sample/main.py | tadvi/uva | 1 | 12786118 | import sys
import re
sys.stdin = open('input.txt')
while True:
try:
s1 = raw_input().strip()
s2 = raw_input().strip()
except:
break
t1 = sum(ord(c) - ord('a') + 1 for c in re.findall('[a-z]', s1.lower()))
while t1 >= 10:
t1 = sum(int(c) for c in str(t1))
t2 = sum(ord(c) - ord('a') + 1 for c in re.findall('[a-z]', s2.lower()))
while t2 >= 10:
t2 = sum(int(c) for c in str(t2))
print '%.2f %%' % (100.0 * t2 / t1)
| 3.109375 | 3 |
mobycity/news/models.py | LucienD/Mobct | 0 | 12786119 | #coding:utf-8
from django.db import models
from django.utils import timezone
from djangocms_text_ckeditor.fields import HTMLField
class News(models.Model):
creation_datetime = models.DateTimeField(default=timezone.now, verbose_name=u'Date de création')
update_datetime = models.DateTimeField(default=timezone.now, verbose_name=u'Date de mise à jour')
publication_datetime = models.DateTimeField(default=timezone.now, verbose_name=u'Date de publication')
title = models.CharField(max_length=255, verbose_name=u'Titre')
subtitle = models.CharField(blank=True, max_length=255, verbose_name=u'Sous-titre')
theme = models.CharField(max_length=100, verbose_name=u'Thème')
body = HTMLField(verbose_name=u'Contenu')
image = models.ImageField(upload_to='news_news', verbose_name=u'Image')
link1 = models.URLField(blank=True, verbose_name=u'Lien 1')
link2 = models.URLField(blank=True, verbose_name=u'Lien 2')
link3 = models.URLField(blank=True, verbose_name=u'Lien 3')
class Meta:
verbose_name = u'Actualité'
verbose_name_plural = u'Actualités' | 2.046875 | 2 |
dj_generatortools/stubbs/default/views.py | weholt/django-generation-tools | 0 | 12786120 | <gh_stars>0
from django.contrib.auth.decorators import login_required
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from {{app_name}}.models.{{snake_case_model_name}} import {{model_name}}
@method_decorator(login_required, name="dispatch")
class {{model_name}}ListView(ListView):
model = {{model_name}}
template_name = "{{app_name}}/{{snake_case_model_name}}/list.html"
context_object_name = "{{snake_case_model_name}}s"
paginate_by = 5
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
object_list = self.get_queryset()
page = self.request.GET.get("page")
paginator = Paginator(object_list, self.paginate_by)
try:
object_list = paginator.page(page)
except PageNotAnInteger:
object_list = paginator.page(1)
except EmptyPage:
object_list = paginator.page(paginator.num_pages)
context[self.context_object_name] = object_list
return context
@method_decorator(login_required, name='dispatch')
class {{model_name}}CreateView(CreateView):
model = {{model_name}}
template_name = '{{app_name}}/{{snake_case_model_name}}/create.html'
fields = ({% for field in fields %}{% if field.editable %}'{{field.name}}',{% endif %}{% endfor %} )
success_url = reverse_lazy('{{app_name}}:{{snake_case_model_name}}-list')
@method_decorator(login_required, name='dispatch')
class {{model_name}}DetailView(DetailView):
model = {{model_name}}
template_name = '{{app_name}}/{{snake_case_model_name}}/detail.html'
context_object_name = '{{snake_case_model_name}}'
@method_decorator(login_required, name='dispatch')
class {{model_name}}UpdateView(UpdateView):
model = {{model_name}}
template_name = '{{app_name}}/{{snake_case_model_name}}/update.html'
context_object_name = '{{snake_case_model_name}}'
fields = ({% for field in fields %}{% if field.editable %}'{{field.name}}',{% endif %}{% endfor %} )
def get_success_url(self):
return reverse_lazy('{{app_name}}:{{snake_case_model_name}}-list')
#return reverse_lazy('{{app_name}}:{{snake_case_model_name}}-detail', kwargs={'pk': self.object.id})
@method_decorator(login_required, name='dispatch')
class {{model_name}}DeleteView(DeleteView):
model = {{model_name}}
template_name = '{{app_name}}/{{snake_case_model_name}}/delete.html'
success_url = reverse_lazy('{{app_name}}:{{snake_case_model_name}}-list')
| 2.171875 | 2 |
boot.py | dlizotte-uwo/matrixportal-messageboard | 0 | 12786121 | <filename>boot.py
import board
from digitalio import DigitalInOut, Direction, Pull
up_button = DigitalInOut(board.BUTTON_UP)
up_button.direction = Direction.INPUT
up_button.pull = Pull.UP
if not up_button.value:
import storage
storage.remount("/", False)
led = DigitalInOut(board.L)
led.direction = Direction.OUTPUT
led.value = True | 3.015625 | 3 |
utils/label_broadcast.py | Jo-wang/LDBE | 7 | 12786122 | import torch
import torch.nn as nn
def label_broadcast(label_map,target):
# label_map is the prediction output through softmax operation
N, C, W, H = label_map.shape
# label_map = label_map.softmax(dim=1)
new_label = label_map.clone()
mask = (target.unsqueeze(1) != 255).detach()
new_mask = torch.zeros((N, 1, W, H)).cuda()
left = label_map[:, :, 0:W - 1, :] * mask[:, :, 0:W - 1, :]
right = label_map[:, :, 1:W, :] * mask[:, :, 1:W, :]
up = label_map[:, :, :, 0:H - 1] * mask[:, :, :, 0:H - 1]
down = label_map[:, :, :, 1:H] * mask[:, :, :, 1:H]
new_label[:, :, 1:W, :] = new_label[:, :, 1:W, :].clone() + left
new_label[:, :, 0:W - 1] = new_label[:, :, 0:W - 1].clone() + right
new_label[:, :, :, 1:H] = new_label[:, :, :, 1:H].clone() + down
new_label[:, :, :, 0:H - 1] = new_label[:, :, :, 0:H - 1].clone() + up
new_label = nn.Softmax(dim=1)(new_label)
new_mask[:, :, 1:W, :] += mask[:, :, 0:W - 1, :]
new_mask[:, :, 0:W - 1] += mask[:, :, 1:W, :]
new_mask[:, :, :,1:H] += mask[:, :, :, 0:H - 1]
new_mask[:, :, :, 0:H-1] += mask[:, :, :, 1:H]
new_mask = new_mask>=1
return new_label,new_mask.squeeze().detach()
| 2.734375 | 3 |
Find Digit.py | joel-razor1/HackerRank_Problem_Solving | 1 | 12786123 | import math
import os
import random
import re
import sys
# Complete the findDigits function below.
def findDigits(n):
cnt=0
a=[int(i) for i in str(n)]
print(a)
for i in range(0,len(a)):
if a[i]!=0:
if n%a[i]==0:
cnt=cnt+1
else:
cnt=cnt+0
return cnt
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = findDigits(n)
fptr.write(str(result) + '\n')
fptr.close()
| 3.515625 | 4 |
scripts/generate_aruco_mesh.py | hubertbraszko/follow-marker | 3 | 12786124 | <reponame>hubertbraszko/follow-marker
#!/usr/bin/env python
# import the necessary packages
import rospy
import rospkg
import numpy as np
import cv2
import sys
import os
# import debugpy
# debugpy.listen(("localhost", 5678))
# debugpy.wait_for_client() # optional, blocks execution until client is attached
# Get package path
ROSPACK = rospkg.RosPack()
PKG_PATH = ROSPACK.get_path('aruco_description')
DEBUG_PLOT = False
# define names of each possible ArUco tag OpenCV supports
ARUCO_DICT = {
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
"DICT_4X4_250": cv2.aruco.DICT_4X4_250,
"DICT_4X4_1000": cv2.aruco.DICT_4X4_1000,
"DICT_5X5_50": cv2.aruco.DICT_5X5_50,
"DICT_5X5_100": cv2.aruco.DICT_5X5_100,
"DICT_5X5_250": cv2.aruco.DICT_5X5_250,
"DICT_5X5_1000": cv2.aruco.DICT_5X5_1000,
"DICT_6X6_50": cv2.aruco.DICT_6X6_50,
"DICT_6X6_100": cv2.aruco.DICT_6X6_100,
"DICT_6X6_250": cv2.aruco.DICT_6X6_250,
"DICT_6X6_1000": cv2.aruco.DICT_6X6_1000,
"DICT_7X7_50": cv2.aruco.DICT_7X7_50,
"DICT_7X7_100": cv2.aruco.DICT_7X7_100,
"DICT_7X7_250": cv2.aruco.DICT_7X7_250,
"DICT_7X7_1000": cv2.aruco.DICT_7X7_1000,
"DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL
}
def generate_dae(png_filename, out_path, dae_template):
data = {"pngFilename": png_filename}
new_dae = dae_template.format(**data)
# write the dae template
with open(out_path, mode='w') as file:
file.write(new_dae)
def generate_tag(arucoDict, tag_id, out_path):
tag = np.zeros((300, 300, 1), dtype="uint8")
cv2.aruco.drawMarker(arucoDict, tag_id, 300, tag, 1)
# write the generated ArUCo tag to disk and then display it to our
# screen
cv2.imwrite(out_path, tag)
cv2.imshow("ArUCo Tag", tag)
if DEBUG_PLOT:
cv2.waitKey(100)
def generate_aruco():
rospy.init_node('aruco_generator', anonymous=True)
# Get params
aruco_ids = rospy.get_param('~aruco_ids')
if type(aruco_ids) == str:
# If multiple element splitted by a comma are provided
aruco_ids = aruco_ids.split(',')
aruco_ids = [int(id) for id in aruco_ids]
else:
# if only one number is provided
aruco_ids = int(aruco_ids)
if aruco_ids == -1:
aruco_ids = []
else:
aruco_ids = [aruco_ids]
dict_type = rospy.get_param('~aruco_dictionary')
# Check inputs
if ARUCO_DICT.get(dict_type, None) is None:
rospy.logerr("ArUCo tag of '{}' is not supported".format(
dict_type))
return False
max_aruco_id = int(dict_type.split('_')[-1])
aruco_ids.sort()
if(len(aruco_ids) == 0):
aruco_ids = [i for i in range(max_aruco_id)]
else:
if(not aruco_ids[-1] < max_aruco_id):
rospy.logerr("The max id for {} is {}. Requested id was {} ".format(
dict_type, max_aruco_id, aruco_ids[-1]))
return False
# load the ArUCo dictionary
arucoDict = cv2.aruco.Dictionary_get(ARUCO_DICT[dict_type])
output_folder = os.path.join(PKG_PATH, 'meshes', dict_type)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# load the dae template
with open(os.path.join(PKG_PATH, 'meshes', 'template.dae'), mode='r') as file:
dae_template = file.read()
for aruco_id in aruco_ids:
rospy.loginfo("generating ArUCo tag type '{}' with ID '{}'".format(
dict_type, aruco_id))
png_filename = "{}_id{}.png".format(dict_type, aruco_id)
dae_filename = "{}_id{}.dae".format(dict_type, aruco_id)
png_path = os.path.join(output_folder, png_filename)
dae_path = os.path.join(output_folder, dae_filename)
generate_tag(arucoDict, aruco_id, png_path)
generate_dae(png_filename, dae_path, dae_template)
return True
if __name__ == '__main__':
try:
if(not generate_aruco()):
sys.exit(-1)
except rospy.ROSInterruptException:
pass
| 2.125 | 2 |
homogen.py | Gkdnz/SfePy | 0 | 12786125 | <reponame>Gkdnz/SfePy
#!/usr/bin/env python
from optparse import OptionParser
import sfepy
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.homogenization.homogen_app import HomogenizationApp
usage = """%prog [options] filename_in"""
help = {
'filename' :
'basename of output file(s) [default: <basename of input file>]',
}
def main():
parser = OptionParser(usage=usage, version="%prog " + sfepy.__version__)
parser.add_option("-o", "", metavar='filename', action="store",
dest="output_filename_trunk",
default=None, help=help['filename'])
(options, args) = parser.parse_args()
if (len(args) == 1):
filename_in = args[0]
else:
parser.print_help(),
return
required, other = get_standard_keywords()
required.remove('equations')
conf = ProblemConf.from_file(filename_in, required, other)
app = HomogenizationApp(conf, options, 'homogen:')
opts = conf.options
if hasattr(opts, 'parametric_hook'): # Parametric study.
parametric_hook = conf.get_function(opts.parametric_hook)
app.parametrize(parametric_hook)
app()
if __name__ == '__main__':
main()
| 2.40625 | 2 |
sklearn_fine_tuning.py | dropofwill/author-attr-experiments | 2 | 12786126 | <reponame>dropofwill/author-attr-experiments
import numpy as np
import os
import time as tm
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
from sklearn.decomposition import RandomizedPCA
docs = datasets.load_files(container_path="../../sklearn_data/problemI")
X, y = docs.data, docs.target
baseline = 1/float(len(list(np.unique(y))))
# Split the dataset into testing and training sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=2)
# define a pipeline combining a text feature extractor/transformer with a classifier
pipeline = Pipeline([
('vect', CountVectorizer(decode_error='ignore', analyzer='char', ngram_range=(1,2))),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB(alpha=0.0001))
])
# features to cross-check
parameters = {
#'vect__max_df': (0.5, 0.75, 1),
#'vect__max_features': (None, 100, 5000),
#'vect__analyzer' : ('char', 'word'),
#'vect__ngram_range': ((1, 1), (1, 2), (2,2), (2,3), (1,3), (1,4), (3,4), (1,5), (4,5), (3,5)),
#'vect__ngram_range': ((1, 1), (1, 2), (1,3)), # unigrams or bigrams or ngrams
#'tfidf__use_idf': (True, False),
#'clf__alpha': (1, 0.5, 0.01, 0.001, 0.0001, 0.00001, 0.000001, 0.0000001),
#'clf__alpha': (0.001, 0.0001, 0.00001, 0.000001)
}
scores = ['precision', 'recall']
sub_dir = "Results/"
location = "results" + tm.strftime("%Y%m%d-%H%M%S") + ".txt"
with open( os.path.join(sub_dir, location), 'w+') as f:
for score in scores:
f.write("%s \n" % score)
clf = GridSearchCV(pipeline, parameters, cv=2, scoring=score, verbose=0)
clf.fit(X_train, y_train)
improvement = (clf.best_score_ - baseline) / baseline
f.write("Best parameters from a %s stand point:\n" % score)
f.write("Best score: %0.3f \n" % clf.best_score_)
f.write("Baseline score: %0.3f \n" % baseline)
f.write("Improved: %0.3f over baseline \n" % improvement)
f.write("\n\nGrid scores from a %s stand point:\n" % score)
for params, mean_score, scores in clf.grid_scores_:
f.write("%0.3f (+/-%0.03f) for %r \n" % (mean_score, scores.std() / 2, params))
f.write("\n\n")
f.write("\n\nDetailed classification report:\n")
f.write("The model is trained on the full development set.\n")
f.write("The scores are computed on the full evaluation set.\n")
y_true, y_pred = y_test, clf.best_estimator_.predict(X_test)
f.write(classification_report(y_true, y_pred)) | 2.28125 | 2 |
Plugins/Classes/Ship.py | bvbohnen/X4_Customizer | 25 | 12786127 | <reponame>bvbohnen/X4_Customizer
from .Macro import Macro
from .Connection import Connection
from .Component import Component
from .Storage import Storage
from Framework import File_System
from .Shared import Physics_Properties
__all__ = [
'Ship',
]
class Ship(Macro, Physics_Properties):
'''
Ship macro. This will be filled in as needed; many basic ship edits
are done directly on the xml.
TODO: move more ship stuff over to here.
* engine_count
- Int, number of engines.
* engine_tags
- Set of tags related to engine connections, including 'engine'.
'''
def __init__(self, xml_node, *args, **kwargs):
Macro.__init__(self, xml_node, *args, **kwargs)
self.engine_count = None
self.engine_tags = None
self.engine_macro = None
self._race = None
return
def Get_Ship_Type(self):
return self.Get('./properties/ship', 'type')
def Get_Primary_Purpose(self):
return self.Get('./properties/purpose', 'primary')
#def Get_Race(self):
# '''
# Returns the expected race for this ship, based on wares group,
# defaulting to argon if not found.
# '''
# if self._race == None:
# race = 'argon'
# wares_file = File_System.Load_File('libraries/wares.xml')
# xml_root = wares_file.Get_Root_Readonly()
#
# # /wares/ware[./component/@ref="ship_arg_l_destroyer_01_a_macro"]
# ware_entries = xml_root.xpath(f'./ware[./component/@ref="{self.name}"]')
# if ware_entries:
# assert len(ware_entries) == 1
# group = ware_entries[0].get('group')
# if group and group.startswith('ships_'):
# # The race should be the term after "ships_".
# race = group.replace('ships_', '')
# self._race = race
# return self._race
def Get_Storage_Macros(self):
'''
Returns a list of cargo storage macros for this ship. Such macro names
are assumed to start with "storage_". Typical ship will have only
one macro.
'''
ret_list = []
# Look through connections; they aren't specific on what class each
# links to (unless macros were already filled in).
for conn in self.conns.values():
# If the conn has no macro, and the ref starts with storage,
# load the macro.
if conn.macro == None and conn.macro_ref.startswith('storage_'):
conn.Get_Macro()
# If it has a macro, check its type.
if conn.macro and isinstance(conn.macro, Storage):
ret_list.append(conn.Get_Macro())
return ret_list
def Load_Engine_Data(self):
'Helper function that loads engine count and tags.'
component = self.Get_Component()
# Search the connections.
self.engine_count = 0
self.engine_tags = []
for conn in component.conns.values():
if 'engine' in conn.tags:
self.engine_count += 1
self.engine_tags = conn.tags
return
def Get_Engine_Count(self):
'Returns the number of engine connections.'
self.Load_Engine_Data()
return self.engine_count
def Get_Engine_Tags(self):
'Returns the engine connection tags.'
self.Load_Engine_Data()
return self.engine_tags
# TODO: some function somewhere which links a ship with engines,
# picked based on connection tag matching and whatever other criteria,
# and annotated back to here for convenience.
# Maybe a Loadout class?
def Select_Engine(
self,
engine_macros,
mk = None,
match_owner = True,
owner = None,
):
'''
From the given engine macros, select a matching engine.
Returns the engine selected.
'''
# There might be a specified loadout in the ship macro.
loadouts = self.xml_node.xpath('.//loadout')
for loadout in loadouts:
# Just check the first one for an engine macro.
engine_node = loadout.find('./macros/engine')
if engine_node != None and engine_node.get('macro'):
engine_macro_name = engine_node.get('macro')
# Look up this engine.
macro = self.database.Get_Macro(engine_macro_name)
if macro:
self.engine_macro = macro
return
# If here, search for a matching engine.
matches = []
# Find factions that can make the ship. As set for lookups.
ship_factions = set(self.Get_Ware_Factions())
# Use owner if given, and there are factions involved.
if ship_factions and owner:
ship_factions = set([owner])
# The rules for component matching are unclear, but it is not
# simply a direct tag group match, but some sort of subgroup match.
# Eg. ship-side {'platformcollision', 'engine', 'medium'}
# should match engine-side {'medium', 'engine', 'component'},
# where only two terms are common.
# In some cases, the ship won't have a size specifier, nor does
# a generic_engine.
# Try gathering select tags, for use in an exact match.
# Start with component.
engine_tags = ['component']
valid_tags = ['small', 'medium', 'large', 'extralarge', 'engine', 'spacesuit', 'bomb']
for tag in self.Get_Engine_Tags():
if tag in valid_tags:
engine_tags.append(tag)
# Convert to set for match checks.
engine_tags = set(engine_tags)
for macro in engine_macros:
macro_tags = macro.Get_Component_Connection_Tags()
if not macro_tags == engine_tags:
continue
if mk and macro.Get_mk() != mk:
continue
if ship_factions and match_owner:
# Find factions that can make the engine.
engine_factions = macro.Get_Ware_Factions()
# Check for no overlap.
if not any(x in ship_factions for x in engine_factions):
continue
matches.append(macro)
# From matches, pick fastest engine.
self.engine_macro = None
this_thrust = None
for macro in matches:
macro_thrust = macro.Get_Forward_Thrust()
if not self.engine_macro or macro_thrust > this_thrust:
self.engine_macro = macro
this_thrust = self.engine_macro.Get_Forward_Thrust()
return self.engine_macro
def Get_Engine_Macro(self):
'Return the currently selected engine macro.'
return self.engine_macro
def Get_Speed(self):
'Return the ship speed with currently selected engine.'
if not self.engine_macro:
return 0
thrust = float(self.engine_macro.Get_Forward_Thrust()) * self.Get_Engine_Count()
drag = self.Get_Forward_Drag()
speed = thrust / drag
return speed
'''
For reference, paths/attributes of interest.
'./properties/identification' , 'name'
'./properties/identification' , 'description'
'.' , 'name'
'.' , 'class'
'./component' , 'ref'
'./properties/ship' , 'type'
'./properties/purpose' , 'primary'
'./properties/hull' , 'max'
'./properties/explosiondamage' , 'value'
'./properties/people' , 'capacity'
'./properties/storage' , 'missile'
'./properties/thruster' , 'tags'
'./properties/secrecy' , 'level'
'./properties/sounds/shipdetail' , 'ref'
'./properties/sound_occlusion' , 'inside'
'./properties/software'
'./properties/physics' , 'mass'
'./properties/physics/inertia' , 'pitch'
'./properties/physics/inertia' , 'yaw'
'./properties/physics/inertia' , 'roll'
'./properties/physics/drag' , 'forward'
'./properties/physics/drag' , 'reverse'
'./properties/physics/drag' , 'horizontal'
'./properties/physics/drag' , 'vertical'
'./properties/physics/drag' , 'pitch'
'./properties/physics/drag' , 'yaw'
'./properties/physics/drag' , 'roll'
''' | 2.625 | 3 |
student_sys/student/migrations/0001_initial.py | timTianWSRF/student_management | 0 | 12786128 | # Generated by Django 2.2.3 on 2019-07-22 13:33
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='name')),
('sex', models.IntegerField(choices=[(1, 'male'), (2, 'female'), (0, 'unknown')], verbose_name='sex')),
('profession', models.CharField(max_length=128, verbose_name='job')),
('email', models.EmailField(max_length=254, verbose_name='Email')),
('qq', models.CharField(max_length=128, verbose_name='qq')),
('phone', models.CharField(max_length=128, verbose_name='phone')),
('status', models.IntegerField(choices=[(0, 'applying'), (1, 'passed'), (2, 'refused')], default=0, verbose_name='checkStatus')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='createTime')),
],
options={
'verbose_name': 'StudentInf',
'verbose_name_plural': 'StudentInf',
},
),
]
| 2 | 2 |
api/api_endpoints/model/posts_system/posts_system_models.py | albcristi/TheHub | 0 | 12786129 | <filename>api/api_endpoints/model/posts_system/posts_system_models.py
from django.db import models
from ...models import AppUsers
class Posts(models.Model):
"""
A post is created by a user and
is used in order for a user to
share its thoughts and emotions
"""
post_id = models.AutoField(primary_key=True)
post_title = models.CharField(max_length=40)
post_text = models.TextField()
post_date = models.DateTimeField()
is_public = models.BooleanField(default=False)
user = models.ForeignKey(
AppUsers,
related_name='user_posts',
on_delete=models.CASCADE
)
no_likes = models.IntegerField(default=0)
def __str__(self):
return str(self.post_title)
class PostLikes(models.Model):
"""
Each Post may receive one or more
likes from users (friends in the
case of posts that are not public)
"""
post = models.ForeignKey(
Posts,
related_name='post_likes',
on_delete=models.CASCADE
)
app_user = models.ForeignKey(
AppUsers,
related_name='likes_post',
on_delete=models.CASCADE
)
def __str__(self):
return "Post "+str(self.post)+" liked by "+str(self.app_user)
class PostComments(models.Model):
"""
A PostComment appears when a user
that sees a post wants to add a
remark regarding that specific post
"""
post = models.ForeignKey(
Posts,
related_name='related_post',
on_delete=models.CASCADE
)
app_user = models.ForeignKey(
AppUsers,
related_name='user_post_comments',
on_delete=models.CASCADE
)
comment_text = models.CharField(max_length=300)
comment_date = models.DateTimeField()
no_likes = models.IntegerField(default=0)
def __str__(self):
return str(self.comment_text)
class CommentLikes(models.Model):
"""
Whenever a user sees a comment of a certain
post, he/she can give that comment a like
"""
post_comment = models.ForeignKey(
PostComments,
related_name='comment_likes',
on_delete=models.CASCADE
)
app_user = models.ForeignKey(
AppUsers,
related_name='user_comment_likes',
on_delete=models.CASCADE
)
def __str__(self):
return "Comment "+str(self.post_comment)+" liked by "+str(self.app_user)
| 2.796875 | 3 |
dev/tools/pqrne/wanderlust.py | gusugusu1018/simmobility-prod | 50 | 12786130 | #!/usr/bin/env python
# Copyright (2012) Singapore-MIT Alliance for Research and Technology
import math
from PyQt4 import QtGui, QtCore
from paper import Tracing_paper
from digital_map import Lane_marking, Kerb_line, Road_section
from network import Lane_edge
from point import Point, nearest_point, re_arrange_co_linear_points, simplify_polyline, \
intersection_point, is_between, cross_product
from error import show_error_message, show_info_message
class Wanderlust:
debug = True
def __init__(self, main_window):
self.main_window = main_window
self.tracing_paper = Tracing_paper()
def load_digital_map(self, digital_map):
self.load_roads(digital_map.roads)
self.load_lane_markings(digital_map.lane_markings)
self.load_kerb_lines(digital_map.kerb_lines)
self.load_arrow_markings(digital_map.arrow_markings)
self.load_bus_stops(digital_map.bus_stops)
self.load_traffic_signals(digital_map.traffic_signals)
def load_roads(self, roads):
for road_name in roads:
for road_section in roads[road_name]:
self.tracing_paper.mark_out_polyline(road_section)
def load_lane_markings(self, lane_markings):
for marking_type in lane_markings:
for lane_marking in lane_markings[marking_type]:
self.load_lane_marking(lane_marking)
def load_lane_marking(self, lane_marking):
self.tracing_paper.mark_out_polyline(lane_marking)
def load_kerb_lines(self, kerb_lines):
for kerb_line in kerb_lines:
self.tracing_paper.mark_out_polyline(kerb_line)
def load_arrow_markings(self, arrow_markings):
for arrow_marking in arrow_markings:
self.tracing_paper.mark_out_position(arrow_marking)
def load_bus_stops(self, bus_stops):
for bus_stop in bus_stops:
self.tracing_paper.mark_out_position(bus_stop)
def load_traffic_signals(self, traffic_signals):
for signal in traffic_signals:
self.tracing_paper.mark_out_position(signal)
def load_road_network(self, road_network):
self.load_lane_edges(road_network.lane_edges)
def load_lane_edges(self, lane_edges):
for marking_type in lane_edges:
for lane_edge in lane_edges[marking_type]:
self.load_lane_edge(lane_edge)
def load_lane_edge(self, lane_edge):
graphics_item = self.tracing_paper.mark_out_polyline(lane_edge)
graphics_item.setZValue(1)
################################################################################
# Extract crossings and stop-lines
################################################################################
def extract_crossings_and_stop_lines(self, path, marking_types):
lane_markings = list()
for graphics_item in self.tracing_paper.items(path):
road_item = graphics_item.road_item
if hasattr(road_item, "lane_edge"):
continue
if isinstance(road_item, Lane_marking) and road_item.marking_type in marking_types:
lane_markings.append(road_item)
if len(lane_markings) == 0:
show_info_message("Either there are no crossing and stop-line lane-marking in the "
"selection region or crossing and stop-line lane-edges have already been created "
"over the lane-markings.")
return list()
lane_edges = list()
while len(lane_markings):
group = set()
lane_marking = lane_markings[0]
self.transfer(lane_marking, lane_markings, group)
self.find_similar_nearby_lane_markings(lane_marking, lane_markings, group)
marking_type = "J" if marking_types[0] == "A4" else marking_types[0]
lane_edge = self.form_lane_edge(group, marking_type)
lane_edges.append(lane_edge)
return lane_edges
def transfer(self, lane_marking, lane_markings, group):
if lane_marking in lane_markings:
lane_markings.remove(lane_marking)
group.add(lane_marking)
self.tracing_paper.hide(lane_marking)
def find_similar_nearby_lane_markings(self, lane_marking, lane_markings, group):
polyline = lane_marking.polyline
marking_type = lane_marking.marking_type
w = h = 2
for road_item in self.tracing_paper.road_items_around_point(polyline[0], w, h):
if isinstance(road_item, Lane_marking) and road_item.marking_type == marking_type:
if road_item.polyline[0].distance(road_item.polyline[-1]) < 0.1:
continue
if self.is_co_linear(lane_marking, road_item):
self.transfer(road_item, lane_markings, group)
self.find_similar_nearby_lane_markings(road_item, lane_markings, group)
for road_item in self.tracing_paper.road_items_around_point(polyline[-1], w, h):
if isinstance(road_item, Lane_marking) and road_item.marking_type == marking_type:
if road_item.polyline[0].distance(road_item.polyline[-1]) < 0.1:
continue
if self.is_co_linear(lane_marking, road_item):
self.transfer(road_item, lane_markings, group)
self.find_similar_nearby_lane_markings(road_item, lane_markings, group)
def is_co_linear(self, road_item1, road_item2):
p1 = road_item1.polyline[0]
p2 = road_item1.polyline[-1]
discard, distance1 = nearest_point(p1, road_item2.polyline[0], p2)
discard, distance2 = nearest_point(p1, road_item2.polyline[-1], p2)
return (distance1 < 0.5) and (distance2 < 0.5)
def form_lane_edge(self, group, marking_type):
points = list()
for road_item in group:
for p in road_item.polyline:
points.append(p)
points = re_arrange_co_linear_points(points)
polyline = simplify_polyline(points)
lane_edge = self.main_window.road_network.add_lane_edge(marking_type, polyline)
self.main_window.scene.draw_lane_edge(lane_edge)
self.load_lane_edge(lane_edge)
for road_item in group:
self.tracing_paper.show(road_item)
road_item.lane_edge = lane_edge
return lane_edge
def extend_stop_lines_to_kerb(self, stop_lines):
for stop_line in stop_lines:
kerb_lines = list()
p1 = stop_line.polyline[0]
p2 = stop_line.polyline[-1]
# There should be only 2 kerb-lines, but if there is a road-divider, which is usually
# only one meter wide, then the following search may be too wide and return the other
# side of the road-divider, thus adding a third kerb-line.
for road_item in self.tracing_paper.road_items_around_line(p1, p2, 2, 0.1):
if isinstance(road_item, Kerb_line):
kerb_lines.append(road_item)
if len(kerb_lines) < 2:
continue
if len(kerb_lines) > 3:
print "found a stop-line (%s, %s) that cuts more than 3 kerb-lines" % (p1, p2)
continue
stop_line_polyline = list()
for p in stop_line.polyline:
stop_line_polyline.append(p)
polyline = list()
for kerb_line in kerb_lines:
for i in range(len(kerb_line.polyline) - 1):
p3 = kerb_line.polyline[i]
p4 = kerb_line.polyline[i+1]
p = intersection_point(p3, p4, p1, p2)
if is_between(p, p3, p4):
# A long kerb-line may be folded like the letter 'U'. Just like a
# horizontal line may intersect 'U' at 2 places, the stop-line may also
# intersect the kerb-line at 2 places. We ignore the intersection that
# is too far away from the stop-line.
if p.distance(p1) < 2 or p.distance(p2) < 2:
polyline.append(p)
# The 'U'-shaped kerb-line may represent the road-divider, which is
# usually one meter wide, enough room for someone to stand on. It is
# possible that the latter part of the kerb-line is nearer to the
# stop-line (we don't know which side of the 'U'-shaped kerb-line is
# the stop-line. Therefore, we cannot break out of the for-loop; we
# have to check the other "stem" of the 'U'.
re_arrange_co_linear_points(polyline)
if polyline[0].distance(p1) > polyline[0].distance(p2):
# The first point is closer to the last point of the stop-line. We reverse the
# polyline so that it is in the same direction as the stop-line's polyline.
polyline.reverse()
if len(polyline) < 2:
continue
if len(polyline) == 2:
stop_line_polyline[0] = polyline[0]
stop_line_polyline[-1] = polyline[-1]
elif len(polyline) == 3:
if polyline[1].distance(p1) < polyline[1].distance(p2):
# The middle point is closer to the stop-line's first point.
stop_line_polyline[0] = polyline[1]
stop_line_polyline[-1] = polyline[2]
else:
stop_line_polyline[0] = polyline[0]
stop_line_polyline[-1] = polyline[1]
else:
print "found a stop-line (%s, %s) that cuts at more than 3 places" % (p1, p2)
continue
stop_line.polyline = stop_line_polyline
point = stop_line_polyline[0]
path = QtGui.QPainterPath(QtCore.QPointF(point.x, point.y))
for point in stop_line_polyline:
path.lineTo(point.x, point.y)
stop_line.visual_item.setPath(path)
def connect_nearby_crossings(self, crossings):
while len(crossings):
group = set()
crossing = crossings[0]
self.transfer(crossing, crossings, group)
road_section = None
for road_item in self.tracing_paper.road_items_intersecting_polyline(crossing.polyline):
if isinstance(road_item, Road_section):
road_section = road_item
break
for road_item in self.tracing_paper.road_items_intersecting_polyline(road_section.polyline):
if isinstance(road_item, Lane_edge) and road_item.marking_type == "J":
self.transfer(road_item, crossings, group)
next_road_section = None
p1 = road_section.polyline[0]
p2 = road_section.polyline[-1]
for road_item in self.tracing_paper.road_items_around_line(p1, p2, 0.5, 0.5):
if isinstance(road_item, Road_section) and road_item.road_type in ("I", "XJ", "TJ"):
next_road_section = road_item
break
p = next_road_section.polyline[0]
if p.distance(p1) < 0.5 or p.distance(p2) < 0.5:
# The first point of next_road_section is closer to the previous road-section;
# so its end-point is at the center of the intersection.
p = next_road_section.polyline[-1]
for road_item in self.tracing_paper.road_items_around_point(p, 0.5, 0.5):
if road_item == next_road_section:
continue
if isinstance(road_item, Road_section) and road_item.road_type in ("I", "XJ", "TJ"):
self.find_crossing_on_other_side(road_item, p, crossings, group)
else:
# The first point of next_road_section is at the center of the intersection.
for road_item in self.tracing_paper.road_items_around_point(p, 0.5, 0.5):
if road_item == next_road_section:
continue
if isinstance(road_item, Road_section) and road_item.road_type in ("I", "XJ", "TJ"):
self.find_crossing_on_other_side(road_item, p, crossings, group)
def find_crossing_on_other_side(self, road_section, point, crossings, group):
if point.distance(road_section.polyline[0]) < 0.5:
p = road_section.polyline[-1]
else:
p = road_section.polyline[0]
next_road_section = None
for road_item in self.tracing_paper.road_items_around_point(p, 0.5, 0.5):
if road_item == road_section:
continue
if isinstance(road_item, Road_section):
next_road_section = road_item
break
road_section = next_road_section
for road_item in self.tracing_paper.road_items_intersecting_polyline(road_section.polyline):
if isinstance(road_item, Lane_edge) and road_item.marking_type == "J":
self.transfer(road_item, crossings, group)
################################################################################
# Extract yellow boxes
################################################################################
def extract_yellow_boxes(self, path):
lane_markings = list()
for graphics_item in self.tracing_paper.items(path):
road_item = graphics_item.road_item
if hasattr(road_item, "lane_edge"):
continue
if isinstance(road_item, Lane_marking) and road_item.marking_type == "N":
lane_markings.append(road_item)
if len(lane_markings) == 0:
show_info_message("Either there are no yellow-box lane-marking in the selection "
"region or a yellow-box lane-edge has already been created over the lane-markings.")
while len(lane_markings):
group = set()
lane_marking = lane_markings[0]
self.transfer(lane_marking, lane_markings, group)
self.find_connected_yellow_box(lane_marking, lane_markings, group)
self.form_yellow_box(group)
def find_connected_yellow_box(self, lane_marking, lane_markings, group):
for point in lane_marking.polyline:
for road_item in self.tracing_paper.road_items_around_point(point, 0.2, 0.2):
if isinstance(road_item, Lane_marking) and road_item.marking_type == "N":
self.transfer(road_item, lane_markings, group)
self.find_connected_yellow_box(road_item, lane_markings, group)
def form_yellow_box(self, group):
scene_rect = self.main_window.scene.sceneRect()
left = scene_rect.right()
right = scene_rect.left()
top = scene_rect.top()
bottom = scene_rect.bottom()
north = None
east = None
west = None
south = None
for road_item in group:
for point in road_item.polyline:
if left > point.x:
left = point.x
west = point
if right < point.x:
right = point.x
east = point
if bottom > point.y:
bottom = point.y
south = point
if top < point.y:
top = point.y
north = point
polyline = list()
polyline.append(north)
polyline.append(east)
polyline.append(south)
polyline.append(west)
polyline.append(north)
polyline.append(south)
polyline.append(east)
polyline.append(west)
lane_edge = self.main_window.road_network.add_lane_edge("N", polyline)
self.main_window.scene.draw_lane_edge(lane_edge)
self.load_lane_edge(lane_edge)
for road_item in group:
self.tracing_paper.show(road_item)
road_item.lane_edge = lane_edge
################################################################################
# Create missing stop-line
################################################################################
def create_missing_stop_line(self, path):
crossings, lane_markings, kerb_lines = self.items_in_selection_region(path)
if len(kerb_lines) < 2:
show_error_message("The selection region must include at least 2 kerb-lines.")
return
important_marking_types = ("B", "C", "A", "L", "S", "S1", "A1")
for lane_marking in lane_markings:
if lane_marking.marking_type in important_marking_types:
break
else:
msg = "The selection region must contain at least one lane-marking of the types: "
msg = msg + "'%s'" % important_marking_types[0]
for type in important_marking_types[1:]:
msg = msg + ", '%s'" % type
show_error_message(msg)
return
crossing = crossings[0]
ref_point = self.get_reference_point(lane_marking, crossing)
stop_line = self.calc_stop_line(ref_point, crossing, kerb_lines)
for lane_marking in lane_markings:
polyline = self.get_polyline(lane_marking, crossing)
p1 = polyline[-1]
p2 = polyline[-2]
p = intersection_point(p1, p2, stop_line[0], stop_line[-1])
if is_between(p, stop_line[0], stop_line[-1]) and is_between(p, p1, p2):
polyline[-1] = p
self.adjust_lane_marking(lane_marking, polyline)
lane_marking = self.main_window.digital_map.add_lane_marking("M", "discard", stop_line)
self.main_window.scene.draw_lane_marking(lane_marking)
self.load_lane_marking(lane_marking)
def items_in_selection_region(self, path):
crossings = list()
lane_markings = list()
kerb_lines = list()
for graphics_item in self.tracing_paper.items(path):
road_item = graphics_item.road_item
if isinstance(road_item, Lane_marking):
if road_item.marking_type in ("J", "A4"):
crossings.append(road_item)
else:
lane_markings.append(road_item)
elif isinstance(road_item, Kerb_line):
kerb_lines.append(road_item)
return crossings, lane_markings, kerb_lines
def get_reference_point(self, lane_marking, crossing):
polyline = self.get_polyline(lane_marking, crossing)
p1 = polyline[-1]
p2 = polyline[-2]
p3 = crossing.polyline[0]
p4 = crossing.polyline[-1]
p = intersection_point(p1, p2, p3, p4)
vec = p2 - p
p = p + (2.5 / abs(vec)) * vec
return p
def get_polyline(self, lane_marking, crossing):
polyline = simplify_polyline(lane_marking.polyline)
point = crossing.polyline[0]
if point.distance(polyline[0]) < point.distance(polyline[-1]):
# The first point of the lane-marking polyline is closer to the crossing. This is not
# correct; the polyline should be moving towards the stop-line/crossing.
polyline.reverse()
return polyline
def calc_stop_line(self, ref_point, crossing, kerb_lines):
stop_line = list()
vec = crossing.polyline[-1] - crossing.polyline[0]
p3 = ref_point
p4 = p3 + vec
for kerb_line in kerb_lines:
for i in range(len(kerb_line.polyline) - 1):
p1 = kerb_line.polyline[i]
p2 = kerb_line.polyline[i+1]
p = intersection_point(p1, p2, p3, p4)
if is_between(p, p1, p2):
stop_line.append(p)
break
if len(stop_line) < 2:
raise RuntimeError("stop-line has only %d point" % len(stop_len))
if len(stop_line) == 2:
return stop_line
stop_line.sort(key = lambda point : point.x)
if is_between(ref_point, stop_line[0], stop_line[1]):
return (stop_line[0], stop_line[1])
return (stop_line[1], stop_line[2])
def adjust_lane_marking(self, lane_marking, polyline):
lane_marking.polyline = polyline
point = polyline[0]
path = QtGui.QPainterPath(QtCore.QPointF(point.x, point.y))
for point in polyline[1:]:
path.lineTo(point.x, point.y)
lane_marking.visual_item.setPath(path)
################################################################################
# Extract road segments
################################################################################
def extract_road_segments(self, path):
lane_edges = list()
bounding_box = QtCore.QRectF()
for graphics_item in self.tracing_paper.items(path):
road_item = graphics_item.road_item
if isinstance(road_item, Lane_edge) and road_item.marking_type == "e":
for point in road_item.polyline:
if not path.contains(QtCore.QPointF(point.x, point.y)):
break
else:
lane_edges.append(road_item)
point1 = QtCore.QPointF(road_item.polyline[0].x, road_item.polyline[0].y)
point2 = QtCore.QPointF(road_item.polyline[-1].x, road_item.polyline[-1].y)
bounding_box = bounding_box.united(QtCore.QRectF(point1, point2))
if len(lane_edges) < 4:
show_error_message("Sorry, this feature is not supported when there are 3 or less "
"lane edges.")
return
crossings = list()
stop_lines = list()
for graphics_item in self.tracing_paper.items(bounding_box):
road_item = graphics_item.road_item
if isinstance(road_item, Lane_edge) and road_item.marking_type == "J":
crossings.append(road_item)
elif isinstance(road_item, Lane_edge) and road_item.marking_type == "M":
stop_lines.append(road_item)
for stop_line in stop_lines:
self.get_lane_edges_ending_at_stop_line(lane_edges, stop_line)
self.order_lane_edges(stop_line.lane_edges, stop_line)
for stop_line in stop_lines:
self.get_crossing_at_start_of_lane_edges(stop_line.lane_edges, crossings)
self.create_side_walk_edges(stop_lines)
self.create_lanes(stop_lines)
for stop_line in stop_lines:
self.redraw(stop_line)
for lane_edge in stop_line.lane_edges:
self.redraw(lane_edge)
self.redraw(stop_line.lane_edges[-1].crossing)
bus_zones = list()
yellow_boxes = list()
#if len(stop_lines) == 1:
# point = stop_lines[0].polyline[0]
# outer_path = QtGui.QPainterPath(QtCore.QPointF(point.x, point.y))
# for point in stop_lines[0].polyline[1:]:
# outer_path.lineTo(point.x, point.y)
# for point in stop_lines[0].lane_edges[-1].
#else:
# outer_path
#for graphics_item in self.tracing_paper.items(outer_path):
# if isinstance(road_item, Lane_edge) and road_item.marking_type == "R":
# bus_zones.append(road_item)
# elif isinstance(road_item, Lane_edge) and road_item.marking_type == "N":
# yellow_boxes.append(road_item)
return lane_edges
def get_lane_edges_ending_at_stop_line(self, lane_edges, stop_line):
stop_line.lane_edges = list()
path = self.make_rectangle_around_line(stop_line.polyline[0], stop_line.polyline[-1])
for graphics_item in self.tracing_paper.items(path):
road_item = graphics_item.road_item
if isinstance(road_item, Lane_edge) and road_item.marking_type == 'e':
if road_item in lane_edges:
stop_line.lane_edges.append(road_item)
#if self.debug:
# print "stop-line={"
# print " polyline={"
# for p in stop_line.polyline:
# print " %s" % p
# print " }"
# for l in stop_line.lane_edges:
# print " lane-edge={"
# for p in l.polyline:
# print " %s" % p
# print " }"
# print "}"
def make_rectangle_around_line(self, point1, point2, extension=0.5, width=3.0):
dx = point2.x - point1.x
dy = point2.y - point1.y
mag = math.hypot(dx, dy)
x = dx * extension / mag
y = dy * extension / mag
dx = dx * width / mag
dy = dy * width / mag
path = QtGui.QPainterPath(QtCore.QPointF(point1.x - x - dy, point1.y - y + dx))
path.lineTo(point1.x - x + dy, point1.y - y - dx)
path.lineTo(point2.x + x + dy, point2.y + y - dx)
path.lineTo(point2.x + x - dy, point2.y + y + dx)
path.closeSubpath()
return path
def order_lane_edges(self, lane_edges, stop_line):
p1 = stop_line.polyline[0]
p2 = stop_line.polyline[-1]
for lane_edge in lane_edges:
if p1.distance(lane_edge.polyline[0]) < p1.distance(lane_edge.polyline[-1]):
# The lane-edge's first point is closer to the closer to the stop-line than its
# last point. Reverse its polyline so that it points towards the stop-line.
lane_edge.polyline.reverse()
lane_edge.stop_line = stop_line
p3 = lane_edge.polyline[-1]
p4 = lane_edge.polyline[-2]
# Extends the lane-edge so that it touches the stop-line.
lane_edge.polyline[-1] = intersection_point(p1, p2, p3, p4)
# Re-arrange the order of the lane-edges, although at this point we do not know if the
# ordering is from left to right or vice versa.
stop_line.lane_edges.sort(key = lambda lane_edge : lane_edge.polyline[-1].x)
# Make sure that the order of the lane-edges is from right to left when looking at the
# stop-line while standing before the lane-edges' last points. Also make sure that the
# stop-line's polyline moves from right to left.
if p1.distance(lane_edges[0].polyline[-1]) < p1.distance(lane_edges[-1].polyline[-1]):
# The stop-line's first point is closer to lane_edges[0] than to the last lane_edge.
p2 = stop_line.polyline[1]
vec1 = p2 - p1
p4 = lane_edges[0].polyline[-1]
p3 = lane_edges[0].polyline[-2]
vec2 = p4 - p3
if cross_product(vec1, vec2) > 0:
lane_edges.reverse()
stop_line.polyline.reverse()
else:
p2 = stop_line.polyline[1]
vec1 = p2 - p1
p4 = lane_edges[-1].polyline[-1]
p3 = lane_edges[-1].polyline[-2]
vec2 = p4 - p3
if cross_product(vec1, vec2) < 0:
lane_edges.reverse()
else:
stop_line.polyline.reverse()
# Extends the stop-line so that the stop-line touches the first and last lane-edges.
stop_line.polyline[0] = lane_edges[0].polyline[-1]
stop_line.polyline[-1] = lane_edges[-1].polyline[-1]
def get_crossing_at_start_of_lane_edges(self, lane_edges, crossings):
distances = list()
for i, crossing in enumerate(crossings):
d = 0.0
n = 0
p = crossing.polyline[0]
for lane_edge in lane_edges:
d = d + p.distance(lane_edge.polyline[0])
n = n + 1
p = crossing.polyline[-1]
for lane_edge in lane_edges:
d = d + p.distance(lane_edge.polyline[0])
n = n + 1
distances.append(d / n)
the_crossing = None
big_number = 999999
for i, d in enumerate(distances):
if big_number > d:
big_number = d
the_crossing = crossings[i]
p1 = the_crossing.polyline[0]
p2 = the_crossing.polyline[-1]
for lane_edge in lane_edges:
discard, distance = nearest_point(p1, lane_edge.polyline[0], p2)
if distance < 3.0:
lane_edge.crossing = the_crossing
p3 = lane_edge.polyline[0]
p4 = lane_edge.polyline[1]
lane_edge.polyline[0] = intersection_point(p1, p2, p3, p4)
if p2.distance(lane_edges[0].polyline[0]) < p2.distance(lane_edges[-1].polyline[0]):
the_crossing.polyline.reverse()
the_crossing.polyline[-1] = lane_edges[-1].polyline[0]
the_crossing.lane_edges = lane_edges
def create_side_walk_edges(self, stop_lines):
for stop_line in stop_lines:
stop_line.side_walk_edges = list()
lane_edge = stop_line.lane_edges[-1]
edge = self.create_side_walk_edge(lane_edge.polyline, 2.0)
stop_line.side_walk_edges.append(edge)
edge = self.create_side_walk_edge(lane_edge.polyline, 4.0)
stop_line.side_walk_edges.append(edge)
if len(stop_lines) == 1:
lane_edge = stop_line.lane_edges[0]
edge = self.create_side_walk_edge(lane_edge.polyline, -2.0)
stop_line.side_walk_edges.insert(0, edge)
edge = self.create_side_walk_edge(lane_edge.polyline, -4.0)
stop_line.side_walk_edges.insert(0, edge)
def create_side_walk_edge(self, polyline, distance):
line = self.calc_parallel_line(polyline, distance)
lane_edge = self.main_window.road_network.add_lane_edge("se", line)
self.main_window.scene.draw_lane_edge(lane_edge)
self.load_lane_edge(lane_edge)
return lane_edge
def calc_parallel_line(self, polyline, distance):
line = list()
for i in range(len(polyline) - 1):
p1 = polyline[i]
p2 = polyline[i + 1]
dx = p2.x - p1.x
dy = p2.y - p1.y
hypot = math.hypot(dx, dy)
normal = Point(-dy, dx)
line.append(p1 + (distance / hypot) * normal)
line.append(p2 + (distance / hypot) * normal)
return simplify_polyline(line)
def create_lanes(self, stop_lines):
for stop_line in stop_lines:
stop_line.lanes = list()
lane = self.create_lane(stop_line.side_walk_edges[0], stop_line.side_walk_edges[1])
stop_line.lanes.append(lane)
for i in range(len(stop_line.lane_edges) - 1):
if i == 0:
lane = self.create_lane(stop_line.lane_edges[i+1], stop_line.lane_edges[i])
else:
lane = self.create_lane(stop_line.lane_edges[i], stop_line.lane_edges[i+1])
stop_line.lanes.append(lane)
if len(stop_line.side_walk_edges) == 4:
lane = self.create_lane(stop_line.side_walk_edges[2], stop_line.side_walk_edges[3])
stop_line.lanes.append(lane)
def create_lane(self, edge1, edge2):
line = list()
for p in edge1.polyline:
j = 0
d = 999999
for i, p2 in enumerate(edge2.polyline):
dd = p.distance(p2)
if d > dd:
d = dd
j = i
if j == 0:
p1 = edge2.polyline[0]
p2 = edge2.polyline[1]
else:
p1 = edge2.polyline[j]
p2 = edge2.polyline[j - 1]
n, d = nearest_point(p1, p, p2)
line.append(Point((n.x + p.x) / 2.0, (n.y + p.y) / 2.0))
p = line[0]
path = QtGui.QPainterPath(QtCore.QPointF(p.x, p.y))
for p in line[1:]:
path.lineTo(p.x, p.y)
item = QtGui.QGraphicsPathItem(path)
item.is_selectable = False
item.setPen(QtCore.Qt.red)
self.main_window.scene.addItem(item)
def redraw(self, lane_edge):
point = lane_edge.polyline[0]
path = QtGui.QPainterPath(QtCore.QPointF(point.x, point.y))
for point in lane_edge.polyline[1:]:
path.lineTo(point.x, point.y)
lane_edge.visual_item.setPath(path)
# vim:columns=100:smartindent:shiftwidth=4:expandtab:softtabstop=4:
| 2.703125 | 3 |
icekit/content_collections/page_type_plugins.py | ic-labs/django-icekit | 52 | 12786131 | <filename>icekit/content_collections/page_type_plugins.py
from django.conf.urls import patterns, url
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from icekit.page_types.layout_page.admin import LayoutPageAdmin
from icekit.plugins import ICEkitFluentContentsPagePlugin
class ListingPagePlugin(ICEkitFluentContentsPagePlugin):
# render_template = 'icekit_content_collections/layouts/collection.html'
model_admin = LayoutPageAdmin
def get_context(self, request, page, **kwargs):
""" Include in context items to be visible on listing page """
context = super(ListingPagePlugin, self).get_context(
request, page, **kwargs)
context['items_to_list'] = page.get_items_to_list(request)
return context
def get_view_response(self, request, page, view_func, view_args, view_kwargs):
"""
Render the custom view that was exposed by the extra plugin URL patterns.
This gives the ability to add extra middleware logic.
"""
return view_func(request, page, *view_args, **view_kwargs)
def collected_content_view(request, parent, slug):
try:
# using .visible() here to acknowledge IS_DRAFT context.
page = parent.get_items_to_mount(request).get(slug=slug)
except ObjectDoesNotExist:
raise Http404
# If the item defines its own response, use that.
if hasattr(page, 'get_response'):
return page.get_response(request, parent=parent)
else:
raise AttributeError("You need to define `%s.get_response(request, parent)`, or override `collected_content_view` in your `ListingPagePlugin` class" % (type(page).__name__))
urls = patterns('',
url(
'^(?P<slug>[-\w]+)/$',
collected_content_view,
),
)
| 1.882813 | 2 |
hood/views.py | Ianodad/Hoodwatch | 0 | 12786132 | from django.shortcuts import redirect, render, get_object_or_404
from .forms import ProfileForm, Hoodform, BusinessForm, PostForm
from django.contrib.auth.decorators import login_required
from .models import Hood, Profile, Business, Post
from urllib import request
from django.db.models import Q
# Create your views here.
@login_required(login_url='/accounts/login/')
def home(request):
current_user = request.user
# print(current_user)
if request.method == 'POST':
formhood = Hoodform(request.POST, request.FILES)
if formhood.is_valid():
upload = formhood.save(commit=False)
upload.admin = request.user.profile
# request.user.profile.save()
upload.save()
return redirect('home')
else:
formhood = Hoodform()
welcome = "welcome to the home page"
hoods = Hood.objects.all()
return render(request, 'hood/home.html', {"welcome": welcome, "formhood": formhood, "hoods": hoods})
@login_required(login_url='/accounts/login/')
def add_profile(request):
current_user = request.user
if request.method == 'POST':
formpro = ProfileForm(request.POST, request.FILES)
if formpro.is_valid():
upload = formpro.save(commit=False)
upload.user = current_user
upload.save()
return redirect('profile')
else:
formpro = ProfileForm()
return render(request, 'hood/add_profile.html', {"formpro": formpro})
@login_required(login_url='/accounts/login/')
def profile(request):
return render(request, 'hood/profile.html')
@login_required(login_url='/accounts/login/')
def neighborhood(request, hood_id):
current_user = request.user
# hood = get_object_or_404(Hood, pk=hood_id)
if request.method == 'POST':
formbiz = BusinessForm(request.POST, request.FILES)
if formbiz.is_valid():
addbiz = formbiz.save(commit=False)
addbiz.hood = hood_id
# upload.admin = current_user
# request.user.profile.save()
addbiz.save()
return redirect('hood')
else:
formbiz = BusinessForm()
if request.method == 'POST':
formpost = PostForm(request.POST, request.FILES)
if formpost.is_valid():
addpost = formpost.save(commit=False)
addpost.hoodwatch = hood_id
addpost.user = current_user
addpost.save()
return redirect('hood')
else:
formpost = PostForm()
# post = get_object_or_404(Post, hoodwatch=hood_id)
hood = get_object_or_404(Hood, pk=hood_id)
# business = get_object_or_404(Business, hood=hood_id)
return render(request, 'hood/hood.html', {"formbiz": formbiz, "formpost": formpost, "hood": hood})
@login_required(login_url='/accounts/login/')
def search(request):
query = request.GET.get('q')
print(query)
if query:
results = Hood.objects.filter(
Q(name__icontains=query))
else:
results = Hood.objects.all()
return render(request, 'pages/search.html', {'results': results})
| 2.15625 | 2 |
xpmig_migrate.py | kschets/XP_migrator | 1 | 12786133 | <filename>xpmig_migrate.py
#!/usr/bin/python
"""
####################################################################################################
TITLE : HPE XP7 Migration, Migrate
DESCRIPTION : Migrate the data to the new server
AUTHOR : <NAME> / StorageTeam
VERSION : Based on previous ODR framework
1.0 Initial version
CONFIG : xpmig.ini
LOG : xpmig_migrate.log
TODO :
Check CaJ replication is > 90%
Wait for the source host to logout
Remove hba_wwns to prevent re-login
Wait for the syncrate to be 100%
Request operator confirmation before split
Stop CaJ replication
Detach external storage raidgroups
Show the overview of the migration in a foot-window
####################################################################################################
"""
import curses
| 1.734375 | 2 |
Beginner/AdultAge.py | man21/IOSD-UIETKUK-HacktoberFest-Meetup-2019 | 22 | 12786134 | # Code for Adult age
# By Ebuka
import re
age = input("Enter your age")
try:
int_age = int(age)
except TypeError:
raise TypeError("Age is not valid")
if int_age >= 18:
print("Access granted")
else:
print("Access Denied") | 4.03125 | 4 |
rexR-V1.0.5-release/x86_attack/tools.py | t3ls/rex-r | 1 | 12786135 | <gh_stars>1-10
import os
from pwn import *
class tools():
def __init__(self, binary, crash):
self.binary = binary
self.crash = crash
self.core_list = filter(lambda x:"core" in x, os.listdir('.'))
self.core = self.core_list[0]
def gdb(self, command):
popen=os.popen('gdb '+self.binary+' '+self.core+' --batch -ex "'+command+'"')
return popen.read()
def ROPsearch(self, register):
popen = os.popen('ROPgadget --binary '+self.binary+' |grep ": call '+register+'"|cut -d \' \' -f1')
s = popen.read()
if (s != ''):
rop = p32(int(s,16))
else:
popen = os.popen('ROPgadget --binary '+self.binary+' |grep ": jmp '+register+'"|cut -d \' \' -f1')
s = popen.read()
if (s != ''):
rop = p32(int(s,16))
else:
log.info('Can\'t find jmp|call '+register+'')
rop = -1
return rop
def get_data(self, size, addr):
data = str()
s = self.gdb('x /'+str(size)+'gx '+hex(addr))
i = size
j = 1
while(i):
aline = s.split(':\t')[j].split('\n')[0]
if aline == '':
break
if(i>1):
data += p64(int(aline.split('\t')[0],16))
data += p64(int(aline.split('\t')[1],16))
i -= 2
if(j <= size/2):
j += 1
else:
data += p64(int(aline,16))
i -= 1
return data
| 2.15625 | 2 |
Source/AItetris.py | BSAA0203/AI_Tetris | 0 | 12786136 | import pygame as pg
import random, time, sys
import copy,numpy,pyautogui,math
# Define settings and constants
pyautogui.PAUSE = 0.03
pyautogui.FAILSAFE = True
# 게임 가로 세로 사이즈
WINDOWWIDTH = 800
WINDOWHEIGHT = 640
#블록 사이즈,가로,세로 길이
BOXSIZE = 30
BOXWIDTH = 5
BOXHEIGHT = 5
#보드 가로, 세로 길이
BOARDWIDTH = 10
BOARDHEIGHT = 20
BLANK = '0' # 빈 공간
XMARGIN = int((WINDOWWIDTH - BOARDWIDTH * BOXSIZE) / 2)
YMARGIN = WINDOWHEIGHT - (BOARDHEIGHT * BOXSIZE) - 5
WHITE = (255, 255, 255) # 텍스트 폰트 색상
BLACK = ( 0, 0, 0) #배경색
GRAY =(177,177,177) # 맵 안의 선 색상
#블록 색상들
RED = (155, 0, 0)
GREEN = ( 0, 155, 0)
BLUE = ( 0, 0, 155)
YELLOW = (155, 155, 0)
#블록 색상에 그라데이션 효과를 주기 위한 색상들
LIGHTRED = (175, 20, 20)
LIGHTGREEN = ( 20, 175, 20)
LIGHTBLUE = ( 20, 20, 175)
LIGHTYELLOW = (175, 175, 20)
# 보드 그리고 텍스트 색상들
BORDERCOLOR = BLUE
BGCOLOR = BLACK
TEXTCOLOR = WHITE
TEXTSHADOWCOLOR = GRAY
#컬러의 튜플화를 통한 랜덤 색상 지정 구현
COLORS =(BLUE, GREEN, RED, YELLOW)
LIGHTCOLORS = (LIGHTBLUE, LIGHTGREEN, LIGHTRED, LIGHTYELLOW)
# 블록 디자인
S_SHAPE_TEMPLATE = [['00000', '00000', '00110', '01100', '00000'],
['00000', '00100', '00110', '00010', '00000']]
Z_SHAPE_TEMPLATE = [['00000', '00000', '01100', '00110', '00000'],
['00000', '00100', '01100', '01000', '00000']]
I_SHAPE_TEMPLATE = [['00100', '00100', '00100', '00100', '00000'],
['00000', '00000', '11110', '00000', '00000']]
O_SHAPE_TEMPLATE = [['00000', '00000', '01100', '01100', '00000']]
J_SHAPE_TEMPLATE = [['00000', '01000', '01110', '00000',
'00000'], ['00000', '00110', '00100', '00100', '00000'],
['00000', '00000', '01110', '00010',
'00000'], ['00000', '00100', '00100', '01100', '00000']]
L_SHAPE_TEMPLATE = [['00000', '00010', '01110', '00000',
'00000'], ['00000', '00100', '00100', '00110', '00000'],
['00000', '00000', '01110', '01000',
'00000'], ['00000', '01100', '00100', '00100', '00000']]
T_SHAPE_TEMPLATE = [['00000', '00100', '01110', '00000',
'00000'], ['00000', '00100', '00110', '00100', '00000'],
['00000', '00000', '01110', '00100',
'00000'], ['00000', '00100', '01100', '00100', '00000']]
PIECES = {
'S': S_SHAPE_TEMPLATE,
'Z': Z_SHAPE_TEMPLATE,
'J': J_SHAPE_TEMPLATE,
'L': L_SHAPE_TEMPLATE,
'I': I_SHAPE_TEMPLATE,
'O': O_SHAPE_TEMPLATE,
'T': T_SHAPE_TEMPLATE
}
# Define learning parameters
alpha = 0.01
gamma = 0.9
MAX_GAMES = 20
explore_change = 0.5
weights = [-1, -1, -1, -30] # Initial weight vector
def Run_game(weights, explore_change):
board = get_blank_board() # 보드 생성
score = 0 # 스코어 초기화
level, fall_freq = get_level_and_fall_freq(score) # 레벨 그리고 블록 떨어지는 속도 초기화
current_move = [0, 0] # 블록의 최적화 움직임
falling_piece = get_new_piece() # 떨어지는 블록을 받고
next_piece = get_new_piece() # 다음 블록 만든다
last_fall_time = time.time() # 1초마다 블록이 떨어진다
while True:
if falling_piece is None:
# 떨어지는 블록이 없으면 다음 블록을 받는다
falling_piece = next_piece
next_piece = get_new_piece()
last_fall_time = time.time() # reset last_fall_time
if not is_valid_position(board, falling_piece): # 보드보다 블록이 더 높게 쌓였을 경우
# can't fit a new piece on the board, so game over
return score, weights, explore_change # 게임 오버로 인한 스코어, 학습 값 상태를 리턴
current_move, weights = gradient_descent(board, falling_piece, weights,
explore_change)
if explore_change > 0.001:
explore_change = explore_change * 0.99
else:
explore_change = 0
current_move = make_move(current_move)
for event in pg.event.get(): # event handling loop
if event.type == pg.QUIT:
check = False
sys.exit()
if event.type == pg.KEYDOWN:
if (event.key == pg.K_LEFT or event.key == pg.K_a) and is_valid_position(
board, falling_piece, adj_x=-1): # 왼쪽 방향키
falling_piece['x'] -= 1
elif (event.key == pg.K_RIGHT or event.key == pg.K_d) and is_valid_position(
board, falling_piece, adj_x=1): # 오른쪽 방향키
falling_piece['x'] += 1
elif (event.key == pg.K_UP or event.key == pg.K_w): # 위 방향키
falling_piece['rotation'] = (falling_piece['rotation'] + 1) % len(PIECES[falling_piece['shape']])
if not is_valid_position(board, falling_piece):
falling_piece['rotation'] = (falling_piece['rotation'] - 1) % len(PIECES[falling_piece['shape']])
elif (event.key == pg.K_DOWN or event.key == pg.K_s): # 아래 방향키
if is_valid_position(board, falling_piece, adj_y=1):
falling_piece['y'] += 1
elif event.key == pg.K_SPACE: # 스페이스 키
for i in range(1, BOARDHEIGHT):
if not is_valid_position(board, falling_piece, adj_y=i):
break
falling_piece['y'] += i - 1
if time.time() - last_fall_time > fall_freq: # 블록이 제시간에 맞게 떨어진 경우
if not is_valid_position(board, falling_piece, adj_y=1):
add_to_board(board, falling_piece) # 보드에 해당 블록을 채운다
lines, board = remove_complete_lines(board) # 지워진 라인 수를 받아
score += lines * lines # 스코어에 증가
level, fall_freq = get_level_and_fall_freq(score) # 레벨과 떨어지는 속도 조정
falling_piece = None # 떨어지는 블록은 현재 없다
else:
# 1초 간격으로 블록이 떨어지게 y 좌표 변화
falling_piece['y'] += 1
last_fall_time = time.time()
GAME.fill(BGCOLOR)
draw_board(board)
draw_status(score, level, current_move,games_completed)
draw_next_piece(next_piece)
if falling_piece is not None:
draw_piece(falling_piece)
pg.display.update()
FPS.tick(30) # 30 프레임으로 게임 동작
def make_text_objs(text, font, color):
surf = font.render(text, True, color) # 폰트 렌더링
return surf, surf.get_rect()
def show_text_screen(text): # 화면에 해당하는 내용 텍스트 출력
title_surf, title_rect = make_text_objs(text, SubFont, TEXTSHADOWCOLOR)
title_rect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2))
GAME.blit(title_surf, title_rect)
title_surf, title_rect = make_text_objs(text, SubFont, TEXTCOLOR)
title_rect.center = (int(WINDOWWIDTH / 2) - 3, int(WINDOWHEIGHT / 2) - 3)
GAME.blit(title_surf, title_rect)
press_key_surf, press_key_rect = make_text_objs('Please wait to continue.',
SubFont, TEXTCOLOR)
press_key_rect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2) + 100)
GAME.blit(press_key_surf, press_key_rect)
pg.display.update()
FPS.tick()
time.sleep(0.5)
def get_level_and_fall_freq(score):
level = int(score / 3) # 스코어 3배수 마다 레벨 증가
if level < 6: # 레벨 6전까진 떨어지는 속도 감소
fallsp = 0.6 - (level * 0.1) + 0.1
else: # 6 이후론 일정 속도로 유지
fallsp = 0.2
return level, fallsp # 레벨 과 떨어지는 속도 값 리턴
def get_new_piece():
# 랜덤함수로 새로운 블록 지정
shape = random.choice(list(PIECES.keys()))
new_piece = {
'shape': shape,
'rotation': random.randint(0,
len(PIECES[shape]) - 1),
'x': int(BOARDWIDTH / 2) - int(BOXWIDTH / 2),
'y': -2, # start it above the board (i.e. less than 0)
'color': random.randint(1,
len(COLORS) - 1)
}
return new_piece
def add_to_board(board, piece):
for x in range(BOXWIDTH):
for y in range(BOXHEIGHT):
if PIECES[piece['shape']][piece['rotation']][y][x] != BLANK and x + piece['x'] < 10 and y + piece['y'] < 20: # 블록이 떨어지려는 해당 보드 구간이 빈 공간이 아닐 경우
board[x + piece['x']][y + piece['y']] = piece['color']# 블록과 같은 색상으로 해당 보드 구간을 채운다
def get_blank_board():
# 정해둔 보드 가로 세로 사이즈 만큼 보드 배열 생성
board = []
for _ in range(BOARDWIDTH):
board.append(['0'] * BOARDHEIGHT)
return board
def is_on_board(x, y):
return x >= 0 and x < BOARDWIDTH and y < BOARDHEIGHT # 블록이 보드 안에 있을 경우 true를 리턴
def is_valid_position(board, piece, adj_x=0, adj_y=0):
for x in range(BOXWIDTH):
for y in range(BOXHEIGHT):
is_above_board = y + piece['y'] + adj_y < 0
if is_above_board or PIECES[piece['shape']][piece['rotation']][y][x] == BLANK: # 블록 떨어지는 구간이 빈 공간일 경우
continue # 남은 블록 관련 점검을 진행한다
if not is_on_board(x + piece['x'] + adj_x, y + piece['y'] + adj_y):
return False # 블록이 보드 틀을 벗어나려 한다면 false를 리턴
if board[x + piece['x'] + adj_x][y + piece['y'] + adj_y] != BLANK:
return False # 블록 떨어지는 구간이 빈 공간이 아닐 경우 false를 리턴
return True
def is_complete_line(board, y):
for x in range(BOARDWIDTH):
if board[x][y] == BLANK: # 보드의 라인에 빈 공간이 있을 경우
return False #false를 리턴
return True # 그 반대일 경우 true 리턴
def remove_complete_lines(board):
lines_removed = 0
y = BOARDHEIGHT - 1
while y >= 0:
if is_complete_line(board, y):
for pull_down_y in range(y, 0, -1):
for x in range(BOARDWIDTH):
board[x][pull_down_y] = board[x][pull_down_y - 1] # 지워지는 보드 라인 위에 쌓인 블록들을 밑으로 내린다
for x in range(BOARDWIDTH):
board[x][0] = BLANK # 빈 공간 없이 라인이 블록으로 가득찰 경우 그 라인의 보드들을 비운다
lines_removed += 1# 지워진 라인 수 카운트 값 증가
else:
y -= 1
return lines_removed, board
def convert_to_pixel_coords(boxx, boxy):
# Convert the given xy coordinates of the board to xy
# coordinates of the location on the screen.
return (XMARGIN + (boxx * BOXSIZE)), (YMARGIN + (boxy * BOXSIZE))
def draw_box(boxx, boxy, color, pixelx=None, pixely=None):# 보드 안에 꾸준하게 이벤트가 일어나는 블록 내용들을 렌더링
for i in range(BOARDWIDTH):
pg.draw.line(GAME, GRAY, ((XMARGIN + 10) + (i * BOXSIZE - 10), YMARGIN - 3),
((XMARGIN + 10) + (i * BOXSIZE - 10), YMARGIN + 600), 2) # 보드 사선 중 세로 선 그리기
for j in range(BOARDHEIGHT):
pg.draw.line(GAME, GRAY, (XMARGIN, (YMARGIN - 3) + (j * BOXSIZE)),
(XMARGIN + 300, (YMARGIN - 3) + (j * BOXSIZE)), 2) # 보드 사선 중 가로 선 그리기
if color == BLANK:
return
if pixelx is None and pixely is None:
pixelx, pixely = convert_to_pixel_coords(boxx, boxy)
pg.draw.rect(GAME, COLORS[color],
(pixelx + 1, pixely + 1, BOXSIZE - 1, BOXSIZE - 1))
pg.draw.rect(GAME, LIGHTCOLORS[color],
(pixelx + 1, pixely + 1, BOXSIZE - 4, BOXSIZE - 4))
def draw_board(board):
# 코딩 해둔 보드 배열을 화면에 렌더링 한다
pg.draw.rect(GAME, BORDERCOLOR,
(XMARGIN - 3, YMARGIN - 7, (BOARDWIDTH * BOXSIZE) + 8,
(BOARDHEIGHT * BOXSIZE) + 8), 5)
# fill the background of the board
pg.draw.rect(
GAME, BGCOLOR,
(XMARGIN, YMARGIN, BOXSIZE * BOARDWIDTH, BOXSIZE * BOARDHEIGHT))
# draw the individual boxes on the board
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
draw_box(x, y, board[x][y])
def draw_status(score, level, best_move, games_completed): # 화면에 스코어 레벨, 학습상태 그리고 다음 최적화된 블록 방향을 렌더링
# draw the score text
score_surf = SubFont.render('Score: %s' % score, True, TEXTCOLOR)
score_rect = score_surf.get_rect()
score_rect.topleft = (WINDOWWIDTH - 200, 20)
GAME.blit(score_surf, score_rect)
# draw the level text
level_surf = SubFont.render('Level: %s' % level, True, TEXTCOLOR)
level_rect = level_surf.get_rect()
level_rect.topleft = (WINDOWWIDTH - 200, 50)
GAME.blit(level_surf, level_rect)
# draw the best_move text
move_surf = SubFont.render('Current Move: %s' % best_move, True, TEXTCOLOR)
move_rect = move_surf.get_rect()
move_rect.topleft = (WINDOWWIDTH - 230, 300)
GAME.blit(move_surf, move_rect)
# draw the best_move text
move_surf = SubFont.render('Learing level : %s' % games_completed, True, TEXTCOLOR)
move_rect = move_surf.get_rect()
move_rect.topleft = (20, 150)
GAME.blit(move_surf, move_rect)
def draw_piece(piece, pixelx=None, pixely=None):
shape_to_draw = PIECES[piece['shape']][piece['rotation']]
if pixelx is None and pixely is None:
# 화면에 렌더링 해야할 블록 x,y 좌표를 픽셀 x,y로 받는다
pixelx, pixely = convert_to_pixel_coords(piece['x'], piece['y'])
for x in range(BOXWIDTH):
for y in range(BOXHEIGHT):
if shape_to_draw[y][x] != BLANK:
draw_box(None, None, piece['color'], pixelx + (x * BOXSIZE), pixely + (y * BOXSIZE))
def draw_next_piece(piece): # 화면에 다음 블록 모양 렌더링
# draw the "next" text
next_surf = SubFont.render('Next:', True, TEXTCOLOR)
next_rect = next_surf.get_rect()
next_rect.topleft = (WINDOWWIDTH - 200, 80)
GAME.blit(next_surf, next_rect)
# draw the "next" piece
draw_piece(piece, pixelx=WINDOWWIDTH - 180, pixely=100)
def get_parameters(board):
# This function will calculate different parameters of the current board
# Initialize some stuff
heights = [0]*BOARDWIDTH
diffs = [0]*(BOARDWIDTH-1)
holes = 0
diff_sum = 0
# Calculate the maximum height of each column
for i in range(0, BOARDWIDTH): # Select a column
for j in range(0, BOARDHEIGHT): # Search down starting from the top of the board
if int(board[i][j]) > 0: # Is the cell occupied?
heights[i] = BOARDHEIGHT - j # Store the height value
break
# Calculate the difference in heights
for i in range(0, len(diffs)):
diffs[i] = heights[i + 1] - heights[i]
# Calculate the maximum height
max_height = max(heights)
# Count the number of holes
for i in range(0, BOARDWIDTH):
occupied = 0 # Set the 'Occupied' flag to 0 for each new column
for j in range(0, BOARDHEIGHT): # Scan from top to bottom
if int(board[i][j]) > 0:
occupied = 1 # If a block is found, set the 'Occupied' flag to 1
if int(board[i][j]) == 0 and occupied == 1:
holes += 1 # If a hole is found, add one to the count
height_sum = sum(heights)
for i in diffs:
diff_sum += abs(i)
return height_sum, diff_sum, max_height, holes
def get_expected_score(test_board, weights):
# This function calculates the score of a given board state, given weights and the number
# of lines previously cleared.
height_sum, diff_sum, max_height, holes = get_parameters(test_board)
A = weights[0]
B = weights[1]
C = weights[2]
D = weights[3]
test_score = float(A * height_sum + B * diff_sum + C * max_height + D * holes)
return test_score
def simulate_board(test_board, test_piece, move):
# This function simulates placing the current falling piece onto the
# board, specified by 'move,' an array with two elements, 'rot' and 'sideways'.
# 'rot' gives the number of times the piece is to be rotated ranging in [0:3]
# 'sideways' gives the horizontal movement from the piece's current position, in [-9:9]
# It removes complete lines and gives returns the next board state as well as the number
# of lines cleared.
rot = move[0]
sideways = move[1]
test_lines_removed = 0
reference_height = get_parameters(test_board)[0]
if test_piece is None:
return None
# Rotate test_piece to match the desired move
for i in range(0, rot):
test_piece['rotation'] = (test_piece['rotation'] + 1) % len(PIECES[test_piece['shape']])
# Test for move validity!
if not is_valid_position(test_board, test_piece, adj_x=sideways, adj_y=0):
# The move itself is not valid!
return None
# Move the test_piece to collide on the board
test_piece['x'] += sideways
for i in range(0, BOARDHEIGHT):
if is_valid_position(test_board, test_piece, adj_x=0, adj_y=1):
test_piece['y'] = i
# Place the piece on the virtual board
if is_valid_position(test_board, test_piece, adj_x=0, adj_y=0):
add_to_board(test_board, test_piece)
test_lines_removed, test_board = remove_complete_lines(test_board)
height_sum, diff_sum, max_height, holes = get_parameters(test_board)
one_step_reward = 5 * (test_lines_removed * test_lines_removed) - (height_sum - reference_height)
return test_board, one_step_reward
def find_best_move(board, piece, weights, explore_change):
move_list = []
score_list = []
for rot in range(0, len(PIECES[piece['shape']])):
for sideways in range(-5, 6):
move = [rot, sideways]
test_board = copy.deepcopy(board)
test_piece = copy.deepcopy(piece)
test_board = simulate_board(test_board, test_piece, move)
if test_board is not None:
move_list.append(move)
test_score = get_expected_score(test_board[0], weights)
score_list.append(test_score)
best_score = max(score_list)
best_move = move_list[score_list.index(best_score)]
if random.random() < explore_change:
move = move_list[random.randint(0, len(move_list) - 1)]
else:
move = best_move
return move
def make_move(move):
# This function will make the indicated move, with the first digit
# representing the number of rotations to be made and the seconds
# representing the column to place the piece in.
rot = move[0]
sideways = move[1]
if rot != 0:
pyautogui.press('up')
rot -= 1
else:
if sideways == 0:
pyautogui.press('space')
if sideways < 0:
pyautogui.press('left')
sideways += 1
if sideways > 0:
pyautogui.press('right')
sideways -= 1
return [rot, sideways]
def gradient_descent(board, piece, weights, explore_change):
move = find_best_move(board, piece, weights, explore_change)
old_params = get_parameters(board)
test_board = copy.deepcopy(board)
test_piece = copy.deepcopy(piece)
test_board = simulate_board(test_board, test_piece, move)
if test_board is not None:
new_params = get_parameters(test_board[0])
one_step_reward = test_board[1]
for i in range(0, len(weights)):
weights[i] = weights[i] + alpha * weights[i] * (
one_step_reward - old_params[i] + gamma * new_params[i])
regularization_term = abs(sum(weights))
for i in range(0, len(weights)):
weights[i] = 100 * weights[i] / regularization_term
weights[i] = math.floor(1e4 * weights[i]) / 1e4 # Rounds the weights
return move, weights
def Run(g,f,s):
global GAME, FPS, SubFont
global weights,explore_change, games_completed
GAME = g
FPS = f
SubFont = s
games_completed = 0
while True: # game loop
games_completed += 1
newScore, weights, explore_change = Run_game(weights, explore_change)
print("Game Number ", games_completed, " achieved a score of: ", newScore )
if games_completed == MAX_GAMES: # 총 20번의 게임을 반복하면 게임 종료
show_text_screen('Game Finish')
time.sleep(3)
return
else:
show_text_screen('Game Over') # 아닐경우 계속해서 플레이 | 2.4375 | 2 |
setup.py | margulies/brainsvg | 0 | 12786137 | <filename>setup.py
from distutils.core import setup
setup(name='brainsvg',
version='0.0.1',
install_requires = ['numpy',
'gdist',
'nibabel',
'scipy',
'xml',
'pandas',
'matplotlib',
'colormap',
'networkx',
'surfdist'],
description='Package for creating brain-related svg illustrations based on data',
url='http://github.com/margulies/brainsvg',
author='margulies',
author_email='<EMAIL>',
license='MIT',
packages=['brainsvg'],
zip_safe=False)
| 1.203125 | 1 |
tpd_analyse/tpd.py | CatTheoryDTU/tpd-analyse | 0 | 12786138 | <gh_stars>0
"""
Main file for TPD analysis.
"""
import numpy as np
from glob import glob
from pprint import pprint
import os, sys
from scipy.optimize import curve_fit, least_squares, minimize
from tpd_analyse.tools.parser_class import experimentalTPD
import mpmath as mp
from mpmath import mpf
from ase.thermochemistry import HarmonicThermo, IdealGasThermo
from ase.io import read
from ase.db import connect
import csv
from ase.units import kB
from scipy import optimize
from scipy.optimize import newton
class PlotTPD():
def __init__(self, exp_data, order, T_switch, T_max, T_rate_min, beta,
thermo_gas, thermo_ads=None, correct_background=True, bounds=[], plot_temperature=np.linspace(100,400),
p=101325, initial_guess_theta=0.5, guess_b=0.1, calculate_eq_coverage=True, theta_range=None, fit_quad_ads_ads=False):
"""Perform the temperature programmed desorption analysis for a surface
based on configurational entropy an interaction parameters and a zero coverage
energy term.
Args:
exp_data (list): globbed files with csv
order (int): Order of the reaction
T_switch (list): Temperatures at which the reaction rate switches (K)
T_max (float): Temperature at which the TPD is cutoff (K)
T_rate_min (float): Temperature range where minimum rate of the TPD is expected (K)
beta (float): Rate of heating (K/s)
constants (list): Parameters to parse TPD
thermo_ads (obj): HarmonicThermo for the adsorbate
thermo_gas (obj): IdealGasThermo for the gas
bounds (list, optional): Bounds within to fit the coverage of the TPD. Defaults to [].
plot_temperature (array, optional): Temperature range to plot the equilbirum coverage. Defaults to np.linspace(100,400).
p (float, optional): Pressure of gas molecule. Defaults to 101325.
initial_guess_theta (float, optional): Initial guess for theta. Defaults to 0.5.
guess_b (float, optional): Initial guess for b. Defaults to 0.1.
theta_range (list, optional): Range of theta to fit. Defaults to None.
"""
# Define all the __init__ variables
self.exp_data = exp_data # globbed files with csv
self.order = order
self.thermo_ads = thermo_ads # adsorbate HarmonicThermo
self.thermo_gas = thermo_gas # gas phase IdealGasThermo
self.plot_temperature = plot_temperature # temperature for plotting equilibrium coverages
self.p = p # partial pressure of gas
self.bounds = bounds
self.correct_background = correct_background
self.initial_guess_theta = initial_guess_theta
self.guess_b = guess_b
self.T_switch = T_switch
self.T_max = T_max
self.T_rate_min = T_rate_min
self.beta = beta
self.calculate_eq_coverage = calculate_eq_coverage
self.theta_range = theta_range
self.fit_quad_ads_ads = fit_quad_ads_ads
# Results
self.norm_results = {} # Normalised results
self.results = {} # Final results
self.theta_eq = {} # Equilibrium coverages
self.theta_eq_p = {} # Equilibrium coverages positive error
self.theta_eq_n = {} # Equilibrium coverages negative error
self.dG = {} # dG for adsorption
self.temperature_range = {} # temperature range to plot
def _exponential_fit(self, temperature, a, k):
"""Exponential fit to the tail of the TPD to remove pumping
related rates
Args:
temperature (list): temperature list based on TPD
a (float): amplitude of exponent
k (float): argument of exponent
Returns:
list: rates for each temperature
"""
rate = a * np.exp(-k * temperature)
return rate
def get_results(self):
"""Perform the TPD analysis
"""
# T_switch, T_max, T_rate_min, beta = self.constants
# Do some checks on the temperatures
if isinstance(self.T_switch, (int,float)):
self.T_switch = [self.T_switch]
T_max = self.T_max
T_rate_min = self.T_rate_min
beta = self.beta
T_switch = self.T_switch
assert T_max >= np.max(T_switch); 'The maximum temperature must be greater than when the switch occurs'
assert np.max(T_rate_min) >= T_max; 'Maximum temperature of the TPD must be lower than that of the flat region'
# Create the temperature range based on the switch data
temperature_ranges = []
for i in range(len(T_switch)+1):
if i == 0:
temperature_ranges.append([0, T_switch[i]])
elif i == len(T_switch):
if T_switch[i-1] != T_max:
temperature_ranges.append([T_switch[i-1], T_max])
# range of temperatures for different TPD values
self.temperature_range = temperature_ranges
# Get the TPD results which includes background subtraction
# for each exposure
for index, f in enumerate(sorted(self.exp_data)):
exposure = float(f.split('/')[-1].split('.')[0].split('_')[1].replace('p', '.'))
self.norm_results[exposure] = experimentalTPD(tpd_filename=f,
temprange=temperature_ranges,
tempmin=T_rate_min,
beta=beta,
order=self.order,
correct_background=self.correct_background,
)
# Iterate over the different facets in temperature ranges
for surface_index in range(len(temperature_ranges)):
T_range = temperature_ranges[surface_index]
# Operate only on the right temperature range
indices = [ a for a in range(len(self.norm_results[exposure].temperature)) \
if T_range[0] < self.norm_results[exposure].temperature[a] < T_range[1] ]
# Normalise the data
self.results.setdefault(surface_index, {})[exposure] = {}
self.results[surface_index][exposure]['temperature'] = self.norm_results[exposure].temperature[indices]
self.results[surface_index][exposure]['normalized_rate'] = self.norm_results[exposure].normalized_rate[indices]
# some variables to make it easy to run
temperatures = self.results[surface_index][exposure]['temperature']
rates = self.results[surface_index][exposure]['normalized_rate']
# For each point get the energy of desorption as a function of the coverage
data = self._Ed_temp_dependent(
temperature=temperatures,
rate=rates,
beta=beta,)
# correct for any nans that could be in place
args_accept = [i for i in range(len(data[0])) \
if np.isfinite(data[0][i]) and \
data[1][i] > 0]
# Check if there is a coverage range that is chosen
if self.theta_range is not None:
# If there is a range of theta values to fit then
# only use the data that is within that range
args_accept = [i for i in args_accept \
if self.theta_range[0] < data[1][i] < self.theta_range[1]]
self.results[surface_index][exposure]['Ed'] = data[0][args_accept]
self.results[surface_index][exposure]['theta_rel'] = data[1][args_accept]
temperature_fit = self.norm_results[exposure].temperature[indices][args_accept]
self.results[surface_index][exposure]['temperature_fit'] = temperature_fit
# Fit the Desorption energy curve to the desorption energy equation
# First get good initial guesses for parameters
# For E0 we just take the mean of all the values
guess_E0 = np.mean(self.results[surface_index][exposure]['Ed'])
guess_b = self.guess_b
if not self.fit_quad_ads_ads:
popt, pcov = curve_fit(\
lambda temp, E0, b, theta_sat: self._fit_Ed_theta(temp, E0, b, theta_sat,
self.results[surface_index][exposure]['theta_rel']),
xdata = temperature_fit,
ydata = self.results[surface_index][exposure]['Ed'],
p0 = [guess_E0, guess_b, self.initial_guess_theta],
)
else:
popt, pcov = curve_fit(\
lambda temp, E0, b1, b2, theta_sat: self._fit_quad_Ed_theta(temp, E0, b1, b2, theta_sat,
self.results[surface_index][exposure]['theta_rel']),
xdata = temperature_fit,
ydata = self.results[surface_index][exposure]['Ed'],
p0 = [guess_E0] + guess_b + [self.initial_guess_theta],
)
if not self.fit_quad_ads_ads:
residual = self._least_sq_Ed_theta(popt, temperature=temperature_fit,
theta_rel = self.results[surface_index][exposure]['theta_rel'],
Ed_real = self.results[surface_index][exposure]['Ed'],)
else:
residual = self._least_sq_quad_Ed_theta(popt, temperature=temperature_fit,
theta_rel = self.results[surface_index][exposure]['theta_rel'],
Ed_real = self.results[surface_index][exposure]['Ed'],)
self.results[surface_index][exposure]['E0'] = popt[0]
if not self.fit_quad_ads_ads:
self.results[surface_index][exposure]['b'] = popt[1]
self.results[surface_index][exposure]['theta_sat'] = popt[2]
else:
self.results[surface_index][exposure]['b'] = popt[1:3]
self.results[surface_index][exposure]['theta_sat'] = popt[3]
self.results[surface_index][exposure]['error'] = residual
if not self.fit_quad_ads_ads:
self.results[surface_index][exposure]['Ed_fitted'] = self._fit_Ed_theta(temperature_fit, \
*popt, self.results[surface_index][exposure]['theta_rel'])
else:
self.results[surface_index][exposure]['Ed_fitted'] = self._fit_quad_Ed_theta(temperature_fit, \
*popt, self.results[surface_index][exposure]['theta_rel'])
self.results[surface_index][exposure]['configurational_entropy'] = \
self._get_configuration_entropy_contribution(temperature_fit, \
self.results[surface_index][exposure]['theta_sat'], \
self.results[surface_index][exposure]['theta_rel'], \
)
self.results[surface_index][exposure]['ads_ads_interaction'] = \
self._get_ads_ads_interaction(temperature_fit, \
self.results[surface_index][exposure]['theta_sat'], \
self.results[surface_index][exposure]['theta_rel'], \
self.results[surface_index][exposure]['b'], \
)
if not self.calculate_eq_coverage:
continue
# Calculate the coverage at equilbirum
self.theta_eq.setdefault(surface_index, {})[exposure] = {}
self.theta_eq_p.setdefault(surface_index, {})[exposure] = {}
self.theta_eq_n.setdefault(surface_index, {})[exposure] = {}
self.dG.setdefault(surface_index, {})[exposure] = {}
self.theta_eq[surface_index][exposure], self.dG[surface_index][exposure]\
= self._get_equilibirum_coverage(
E0 = self.results[surface_index][exposure]['E0'],
b = self.results[surface_index][exposure]['b'][0] if self.fit_quad_ads_ads else self.results[surface_index][exposure]['b'],
)
self.theta_eq_p[surface_index][exposure], _ \
= self._get_equilibirum_coverage(
E0 = self.results[surface_index][exposure]['E0'] + self.results[surface_index][exposure]['error'],
b = self.results[surface_index][exposure]['b'][0] if self.fit_quad_ads_ads else self.results[surface_index][exposure]['b'],
)
self.theta_eq_n[surface_index][exposure], _ \
= self._get_equilibirum_coverage(
E0 = self.results[surface_index][exposure]['E0'] - self.results[surface_index][exposure]['error'],
b = self.results[surface_index][exposure]['b'][0] if self.fit_quad_ads_ads else self.results[surface_index][exposure]['b'],
)
def _Ed_temp_dependent(self, temperature, rate, beta):
"""Gets the desorption energy as a function of the temperature
1. Do trapezoidal integration to get the coverage by integrating over the
rate and temperature
2. Get the desorption energy by fitting to the form
Ed = -kBT log(-dtheta/dt / mu / theta)
3. Normalise theta by dividing my maximum coverage
Args:
temperature (list): temperatures corresponding to the TPD
rate (list): rate from the TPD
beta (float): Rate of heating
Returns:
list: Desorption energy and coverage
"""
h = 4.135e-15 # eV.s
theta = []
for i in range(len(temperature)):
cov = np.trapz(rate[i:], temperature[i:])
theta.append(cov)
theta = np.array(theta)
rate = np.array(rate)
dtheta_dT = np.diff(theta) / np.diff(temperature)
dtheta_dt = beta * dtheta_dT #rate[0:-1]
temperature = np.array(temperature)
nu = kB * temperature[0:-1] / h
Ed = -kB * temperature[0:-1] * np.log( -1 * dtheta_dt / (nu * theta[0:-1]))
return [Ed, theta[0:-1]/max(theta[0:-1])]
def _least_sq_Ed_theta(self, x, temperature, theta_rel, Ed_real):
E_0, b, theta_sat = x
Ed_fit = []
for i in range(len(temperature)):
Ed = E_0 \
- kB * temperature[i] * np.log(theta_sat * theta_rel[i] / ( 1 - theta_sat * theta_rel[i] ) ) \
- b * theta_rel[i] * theta_sat
Ed_fit.append(Ed)
residual = Ed_real - Ed_fit
mea = np.mean([np.abs(a) for a in residual])
return mea
def _least_sq_quad_Ed_theta(self, x, temperature, theta_rel, Ed_real):
E_0, b1, b2, theta_sat = x
Ed_fit = []
for i in range(len(temperature)):
Ed = E_0 \
- kB * temperature[i] * np.log(theta_sat*theta_rel[i] / ( 1 - theta_sat*theta_rel[i]))
- b1 * theta_rel[i] * theta_sat
- b2 * (theta_rel[i] * theta_sat)**2
Ed_fit.append(Ed)
residual = Ed_real - Ed_fit
mea = np.mean([np.abs(a) for a in residual])
return mea
def _get_configuration_entropy_contribution(self, temperature, theta_sat, theta_rel):
"""Get the contribution of the configuration entropy to the total energy of desorption.
Args:
temperature (list): temperature range
theta_sat (float): saturation coverage of TPD
theta_rel (list): relative coverage of TPD
Returns:
E_config (list): contribution of the configuration entropy to the total entropy
"""
E_config = []
for i in range(len(temperature)):
Ec = - kB * temperature[i] * np.log(theta_sat*theta_rel[i] / ( 1 - theta_sat*theta_rel[i]))
E_config.append(Ec)
return E_config
def _get_ads_ads_interaction(self, temperature, theta_sat, theta_rel, b):
"""Get the contribution of the saturation coverage to the total energy of desorption.
Args:
temperature (list): temperature range
theta_sat (float): saturation coverage of TPD
theta_rel (list): relative coverage of TPD
b (float): slope of the saturation coverage
Returns:
E_b (list) : contribution of the saturation coverage to the total entropy
"""
E_b = []
for i in range(len(temperature)):
E_b.append(-b * theta_rel[i] * theta_sat)
return E_b
def _fit_Ed_theta(self, temperature, E_0, b, theta_sat, theta_rel):
"""Fit the desorption energy to the relative coverage
Fed into scipy curve fit
Args:
temperature (list): temperature range
E_0 (float): energy at zero coverage
b (float): interaction parameter
theta_sat (float): saturation coverage of TPD
theta_rel (float): relative coverage
Returns:
list: Desorption energy based on fit
"""
Ed_all = []
for i in range(len(temperature)):
Ed = E_0 \
- kB * temperature[i] * np.log(theta_sat*theta_rel[i] / ( 1 - theta_sat*theta_rel[i]))
- b * theta_rel[i] * theta_sat
Ed_all.append(Ed)
return Ed_all
def _fit_quad_Ed_theta(self, temperature, E_0, b1, b2, theta_sat, theta_rel):
""" Fit the Ed vs theta relationship using the configurational entropy
and the ads-ads interaction which has a quadratic dependence.
Args:
temperature (list): temperature range
E_0 (float): energy at zero coverage
b (float): interaction parameter
theta_sat (float): saturation coverage of TPD
theta_rel (float): relative coverage
Returns:
list: Desorption energy based on fit
"""
Ed_all = []
for i in range(len(temperature)):
Ed = E_0 \
- kB * temperature[i] * np.log(theta_sat*theta_rel[i] / ( 1 - theta_sat*theta_rel[i]))
- b1 * theta_rel[i] * theta_sat
- b2 * (theta_rel[i] * theta_sat)**2
Ed_all.append(Ed)
return Ed_all
def _eq_coverage_function(self, theta, T, G0, b, p):
"""Function to implicitly solve the equilibrium coverage
Args:
theta (float): Guessed coverage
T (float) : temperature
G0 (float): Free energy at the half a mono-layer coverage
b (float): Interaction parameter
p (float): partial pressure of CO
"""
kBT = kB * T
## start by calculating the equilibirum constant
K = np.exp( -1 * ( G0 + b * ( theta - 1./2. ) ) / kBT )
return theta - ( K / ( 1 + K ) )
def _jacobian(self, theta, T, G0, b, p):
"""Jacobian function for finding the root
Args:
theta (list): Guessed coverage
T ([type]): [description]
G0 (float): Free energy at the half a mono-layer coverage
b (float): Interaction parameter
p (float): partial pressure of CO
"""
kBT = kB * T
## start by calculating the equilibirum constant
K = np.exp( -1 * ( G0 + b * ( theta - 1./2. ) ) / kBT )
return 1 + K / (1+K)**2 * b / kBT
def _get_equilibirum_coverage(self, E0, b):
"""Equilibirum coverage based on equilibrium constant that is coverage dependent
Args:
E0 (float): Desorption energy at zero coverage
b (float): Interaction parameter
Returns:
list: equilibrium coverage and free energy of CO adsorption
"""
theta_eq = []
dG_eq = []
for index, T in enumerate(self.plot_temperature):
entropy_ads = self.thermo_ads.get_entropy(temperature=T, verbose=False)
entropy_gas = self.thermo_gas.get_entropy(temperature=T, \
pressure=self.p, verbose=False)
# converting from energies to free energies
entropy_difference = entropy_ads - entropy_gas
partial_co = self.p / 101325.
# convert desorption energy into adsorption energy
dG0 = -1 * E0 -1 * T * entropy_difference
K_guess = np.exp( -1 * dG0 / kB / T )
theta_guess = K_guess / ( 1 + K_guess )
try:
theta = newton(
func = lambda x: self._eq_coverage_function(x, T, dG0, b, partial_co ),
fprime = lambda x: self._jacobian(x, T, dG0, b, partial_co ),
x0=theta_guess,
)
except RuntimeError:
theta = 0
dG = ( -dG0 + b * ( theta - 1./2. ) )
theta_eq.append(theta)
dG_eq.append(dG)
return theta_eq, dG_eq
| 2.4375 | 2 |
Script/PostProcessMain.py | hkujy/LinJuanJuan | 0 | 12786139 | """
main entry of post process results data and plot
"""
import PlotHeatMap
import PostProcessDataFuns as psf
import PlotOperatorConverge
import para
import PlotGantt
# root_folder = r"C:\Users\phdji\OneDrive - Danmarks Tekniske Universitet\JuanJuanLin\Tests2022/"
# root_folder = r'C:/GitCodes/Res/'
# root_folder = r'C:/GitCodes/LearnByCompare/'
# root_folder = r'C:/GitCodes/RandomDemand/'
# root_folder = r'M:/LinJuan/500Iter/'
# plot 1 plot the heatmap for the optimal pattern
plot_opt_case_folder = r"M:/LinJuan/0.001ConvergeNoLearning/Operator/1_TestOp_8/"
def EffectOfOperators():
# convergence the algorithm
## each operator
OperatorFolder = root_folder + "/Operator/"
psf.effect_of_operators(OperatorFolder)
def CompareThree():
"""
compare three cases
1: Single operator 2. Uni. 2 adaptive
"""
OperatorFolder = root_folder + "/CompareThree/"
psf.CompareOneFolder(OperatorFolder,"CompareThree")
def PlotFinalRelation(test_folder:str):
"""visulise the dominate relationship
"""
# test_folder = "M:/LinJuan/0_ALNS/"
# test_folder = "C:/GitCodes/0_ALNS/"
bs = psf.getBestSeed(test_folder)
print("Best Seed = {0}".format(bs))
psf.plotRelation(test_folder)
if __name__ == "__main__":
# EffectOfOperators()
# CompareThree()
# test_folder = r'C:/GitCodes/1_TestOp_8/'
# PlotFinalRelation(test_folder)
# testfolder = root_folder + "/RemoveOperator/"
# psf.CompareOneFolder(testfolder,"RemoveOp")
# remark: I may need to adjust the heatmap to the dominate score map??
# op_folder = r"M:\LinJuan\0.001ConvergeNoLearning\Operator\1_TestOp_8"
# PlotHeatMap.plot_Patten_heat_map(para.FailureLinks,num_of_seed=para.NumOfTestSeed,_folder=op_folder)
# PlotOperatorConverge.change_operator_prob_over_iterations(op_folder,_num_operators=9)
# plot gantt chart
best_seed = psf.getBestSeed(plot_opt_case_folder)
# psf.print_best_seed_sol(plot_opt_case_folder,best_seed)
# psf.print_best_seed_period(plot_opt_case_folder,best_seed)
psf.plot_best_seed_period(plot_opt_case_folder)
PlotGantt.plot_general_Gant_chart("Gantt_SiouxFall",plot_opt_case_folder,best_seed)
pass
exit()
## unified prob
### Just copy the plot from unfolder
## adaptive probability
### Just copy the plot from ALNS folder
## change of the prob rate over all iterations
OperatorCovergeFolder = root_folder +"/9_ALNS/"
PlotOperatorConverge.change_operator_prob_over_iterations(root_folder)
# remark: need to set the nodes read
PlotHeatMap.plot_Patten_heat_map(set_fail_links=[])
# project schedule
## TODO: plot the gant chart for the general case
# compare with the gentic algorithm
| 2.171875 | 2 |
italian_csv_type_prediction/simple_types/plate_type.py | LucaCappelletti94/italian_csv_type_prediction | 0 | 12786140 | <reponame>LucaCappelletti94/italian_csv_type_prediction<filename>italian_csv_type_prediction/simple_types/plate_type.py
from ..utils import normalize
from .regex_type_predictor import RegexTypePredictor
from .string_type import StringType
class PlateType(StringType):
def __init__(self):
"""Create new Plate type predictor based on regex."""
super().__init__()
self._predictor = RegexTypePredictor([
r"^[a-z]{2}\s?[0-9]{3}\s?[a-z]{1}$",
r"^[a-z]{2}\s?[0-9]{3}\s?[a-z]{2}$",
r"^[a-z]{2}\s?[0-9]{4}\s?[a-z]{1}$",
r"^[a-z]{2}\s?[0-9]{4}\s?[a-z]{2}$",
r"^[a-z]{2}\s?[0-9]{2}\s?[a-z]{3}$",
r"^[a-z]{2}\s?[0-9]{5}\s?[a-z]{1}$",
r"^[a-z]{3}\s?[0-9]{2}\s?[a-z]{1}$",
r"^[a-z]{3}\s?[0-9]{3}\s?[a-z]{1}$",
r"^[a-z]{3}\s?[0-9]{4}\s?[a-z]{1}$",
r"^[a-z]{3}\s?[0-9]{1}\s?[a-z]{3}$",
r"^[a-z]{1}\s?[0-9]{1}\s?[a-z]{4}$",
r"^[a-z]{1}\s?[0-9]{2}\s?[a-z]{3}$",
r"^[a-z]{1}\s?[0-9]{4}\s?[a-z]{2}$",
r"^[a-z]{1}\s?[0-9]{4,5}\s?[a-z]{1}$",
r"^[a-z]{1}\s?[0-9]{3}\s?[a-z]{1,3}$",
r"^[a-z]{1}\s?[0-9]{4,6}$",
r"^[a-z]{2}\s?[0-9]{3,6}$",
r"^[a-z]{3}\s?[0-9]{2,5}$",
r"^[a-z]{4}\s?[0-9]{1,4}$",
r"^[a-z]{5}\s?[0-9]{1,3}$",
r"^[0-9]{2}\s?[a-z]{4}$",
r"^[0-9]{4}\s?[a-z]{2,3}$",
r"^[0-9]{1}\s?[a-z]{1}\s?[0-9]{5}$",
r"^[0-9]{1}\s?[a-z]{2}\s?[0-9]{4}$",
r"^[0-9]{1}\s?[a-z]{3}\s?[0-9]{2,3}$",
r"^[0-9]{3}\s?[a-z]{1}\s?[0-9]{3}$",
r"^[0-9]{3}\s?[a-z]{3}\s?[0-9]{2}$",
r"^[0-9]{2}\s?[a-z]{2}\s?[0-9]{2}$",
r"^[0-9]{2}\s?[a-z]{3}\s?[0-9]{1}$",
])
def convert(self, candidate):
return str(candidate).upper()
def validate(self, candidate, **kwargs) -> bool:
"""Return boolean representing if given candidate matches regex for ."""
if not super().validate(candidate, **kwargs):
return False
count = len(candidate)
if count > 8 or count < 5:
return False
return self._predictor.validate(candidate)
| 2.625 | 3 |
src/tests/test_bacon_with_eggs.py | lipegomes/rp-tdd-pytest | 0 | 12786141 | """
TDD - Test Driven Development
"""
import unittest
from src.base.bacon_with_eggs import bacon_with_eggs
class TestBaconWithEggs(unittest.TestCase):
def test_bacon_with_eggs_assertion_error_do_not_receive_int(self):
with self.assertRaises(AssertionError):
bacon_with_eggs('')
def test_bacon_with_eggs_return_bacon_with_eggs_if_the_input_is_a_multiple_of_3_and_5(self):
inputs = (15, 30, 90, 120)
output = "Bacon with Eggs"
for input in inputs:
with self.subTest(input=input, output=output):
self.assertEqual(
bacon_with_eggs(input),
output,
msg=f"'{input}' id not return the '{output}'"
)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 4 | 4 |
appengine_config.py | lemmings-io/facebook-messenger-google-app-engine | 4 | 12786142 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
`appengine_config.py` is automatically loaded when Google App Engine
starts a new instance of your application. This runs before any
WSGI applications specified in app.yaml are loaded.
"""
from google.appengine.ext import vendor
# Third-party libraries are stored in "lib", vendoring will make
# sure that they are importable by the application.
vendor.add('lib')
# disable warnings when using requests library with sockets on app engine
import requests
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
from requests.packages.urllib3.exceptions import SNIMissingWarning
requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
requests.packages.urllib3.disable_warnings(SNIMissingWarning)
| 1.804688 | 2 |
urls.py | fightingfish008/tornado-extensions | 5 | 12786143 | <reponame>fightingfish008/tornado-extensions<filename>urls.py
# -*- coding:utf-8 -*-
from libs.handler import ErrorHandler
from libs.decorators import route
from handlers import extensionCheck, systemInfo, celerySendSms
handlers = []
# add xxxx handlers
handlers.extend(extensionCheck.handlers) # app 版本相关功能
handlers.extend(systemInfo.handlers) # 调用 注册中心的接口
handlers.extend(celerySendSms.handlers) # celery 短信功能
#
# # add @route handlers
handlers.extend(route.get_routes())
#
# # Append default 404 handler, and make sure it is the last one.
# handlers.append((r".*", ErrorHandler))
| 1.984375 | 2 |
DCT/Noisy.py | Satjpatel/Digital-Image-Watermarking-and-Its-FPGA-Implementation | 0 | 12786144 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 6 23:59:38 2021
@author: <NAME>
"""
# Adding Salt and Pepper Noise to image
import cv2 as cv
import numpy as np
import random
# Adding salt and pepper noise
def gaussian_noise(image):
row = 512
col = 512
mean = 0
var = 0.1
sigma = var**0.5
gauss = np.random.normal(mean,sigma,(row,col))
gauss = gauss.reshape(row,col)
gauss_noisy = image + gauss
return gauss_noisy
def salt_and_pepper_noise(image):
# Getting the dimensions of the image
row , col = img.shape
# Randomly pick some pixels in the
# image for coloring them white
# Pick a random number between 300 and 10000
number_of_pixels = random.randint(300, 10000)
for i in range(number_of_pixels):
# Pick a random y coordinate
y_coord=random.randint(0, row - 1)
# Pick a random x coordinate
x_coord=random.randint(0, col - 1)
# Color that pixel to white
img[y_coord][x_coord] = 255
# Randomly pick some pixels in
# the image for coloring them black
# Pick a random number between 300 and 10000
number_of_pixels = random.randint(300 , 10000)
for i in range(number_of_pixels):
# Pick a random y coordinate
y_coord=random.randint(0, row - 1)
# Pick a random x coordinate
x_coord=random.randint(0, col - 1)
# Color that pixel to black
img[y_coord][x_coord] = 0
return img
img = cv.imread('Lenna.jpg', 0)
gn = gaussian_noise(img)
snp = salt_and_pepper_noise(img)
| 3.390625 | 3 |
setup.py | gillenbrown/prettyplot | 3 | 12786145 | from setuptools import setup, find_packages
setup(
name="betterplotlib",
version="1.5.0",
description="Some wrappers for matplotlib to make plotting easier and nicer.",
long_description="This module contains wrapper functions for matplotlib. A lot of the matplotlib plots are ugly and not easy to make, so I wrote some functions that do a lot of the stuff that should be easy, as well as wrappers for common plots that make them look nicer. ",
url="http://betterplotlib.readthedocs.io/en/master/",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
keywords="plotting matplotlib",
packages=find_packages(exclude=["docs"]),
install_requires=["matplotlib", "numpy", "palettable"]
)
| 1.609375 | 2 |
setup.py | neuro-inc/neuro-extras | 2 | 12786146 | <reponame>neuro-inc/neuro-extras<filename>setup.py<gh_stars>1-10
import re
from setuptools import find_packages, setup
with open("neuro_extras/version.py") as f:
txt = f.read()
try:
version = re.findall(r'^__version__ = "([^"]+)"\r?$', txt, re.M)[0]
except IndexError:
raise RuntimeError("Unable to determine version.")
setup(
name="neuro-extras",
version=version,
python_requires=">=3.6.0",
url="https://github.com/neuro-inc/neuro-extras",
packages=find_packages(),
install_requires=[
"neuro-cli>=21.1.13",
'dataclasses>=0.7; python_version<"3.7"',
"click>=7.0",
"toml>=0.10.0",
"pyyaml>=3.0",
],
entry_points={
"console_scripts": ["neuro-extras=neuro_extras:main"],
"neuro_api": ["neuro-extras=neuro_extras:setup_plugin"],
},
zip_safe=False,
include_package_data=True,
)
| 1.914063 | 2 |
gs/group/messages/post/text/postbody.py | groupserver/gs.group.messages.post.text | 0 | 12786147 | # -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2012, 2013, 2014, 2015 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
from __future__ import absolute_import, unicode_literals, print_function
from operator import attrgetter
from gs.cache import cache
from gs.group.privacy import get_visibility, PERM_ANN
from gs.group.messages.text import (
split_message, SplitMessage, HTMLBody, boldMatcher, emailMatcher, wwwMatcher, uriMatcher,
wrap_message)
from Products.GSGroup.interfaces import IGSMailingListInfo
from .matcher import (youTubeMatcher, vimeoMatcher, PublicEmailMatcher, )
# this is currently the hard limit on the number of word's we will process.
# after this we insert a message. TODO: make this more flexible by using
# AJAX to incrementally fetch large emails
EMAIL_WORD_LIMIT = 5000
class OnlineHTMLBody(HTMLBody):
'''The HTML form of a plain-text email body.
:param str originalText: The original (plain) text
:param object contentProvider: The content provider that is rendering this mess'''
def __init__(self, originalText, perm, okAddresses):
super(OnlineHTMLBody, self).__init__(originalText)
self.matchers = [youTubeMatcher, vimeoMatcher, boldMatcher, wwwMatcher, uriMatcher]
if perm == PERM_ANN: # The messages are visible to Anon
p = PublicEmailMatcher(okAddresses)
self.matchers.append(p)
else:
self.matchers.append(emailMatcher)
sorted(self.matchers, key=attrgetter('weight'))
@cache('gs.group.messages.post.postintroremainder',
lambda contentProvider, text: ':'.join(
(str(contentProvider.post['post_id']),
str((get_visibility(contentProvider.groupInfo.groupObj))))
), 3600)
def get_post_intro_and_remainder(contentProvider, text):
"""Get the introduction and remainder text of the formatted post
:param object contentProvider: The content provider renderning the message, providing access to
the context, groupInfo and other useful tidbits.
:parm str text: The text to split into an introduction and remainder
:returns: A 2-tuple of the strings that represent the email intro and the remainder."""
if not contentProvider.groupInfo.groupObj:
raise ValueError("The groupInfo object should always have a groupObj")
if not text:
# Sorry, Dijkstra
return SplitMessage('', '')
mailBody = wrap_message(text)
plain = split_message(mailBody)
messages = contentProvider.groupInfo.groupObj.messages
perm = get_visibility(messages)
ml = IGSMailingListInfo(contentProvider.groupInfo.groupObj)
okAddresses = (contentProvider.siteInfo.get_support_email(),
ml.get_property('mailto'))
markedUpIntro = ''
if plain.intro:
markedUpIntro = unicode(OnlineHTMLBody(plain.intro, perm, okAddresses))
markedUpRemainder = ''
if plain.remainder:
markedUpRemainder = unicode(OnlineHTMLBody(plain.remainder, perm, okAddresses))
retval = SplitMessage(markedUpIntro, markedUpRemainder)
return retval
| 1.585938 | 2 |
supersqlite/third_party/_apsw/tools/docmissing.py | plasticity-admin/supersqlite | 1,520 | 12786148 | <reponame>plasticity-admin/supersqlite<filename>supersqlite/third_party/_apsw/tools/docmissing.py
# python
#
# See the accompanying LICENSE file.
#
# Find things that haven't been documented and should be or have been
# but don't exist.
import glob, sys
import apsw
retval=0
classes={}
for filename in glob.glob("doc/*.rst"):
for line in open(filename, "rtU"):
line=line.strip().split()
if len(line)>=2:
if line[0]==".." and line[1] in ("method::", "automethod::", "attribute::"):
funcname=line[2].split("(")[0].strip()
if "." in funcname:
klass, funcname=funcname.split(".",1)
else:
klass="apsw"
if klass not in classes:
classes[klass]=[]
classes[klass].append(funcname)
# ok, so we know what was documented. Now lets see what exists
con=apsw.Connection(":memory:")
cur=con.cursor()
cur.execute("create table x(y); insert into x values(x'<PASSWORD>');select * from x")
blob=con.blobopen("main", "x", "y", con.last_insert_rowid(), 0)
vfs=apsw.VFS("aname", "")
vfsfile=apsw.VFSFile("", ":memory:", [apsw.SQLITE_OPEN_MAIN_DB|apsw.SQLITE_OPEN_CREATE|apsw.SQLITE_OPEN_READWRITE, 0])
# virtual tables aren't real - just check their size hasn't changed
assert len(classes['VTModule'])==2
del classes['VTModule']
assert len(classes['VTTable'])==13
del classes['VTTable']
assert len(classes['VTCursor'])==6
del classes['VTCursor']
for name, obj in ( ('Connection', con),
('Cursor', cur),
('blob', blob),
('VFS', vfs),
('VFSFile', vfsfile),
('apsw', apsw),
):
if name not in classes:
retval=1
print "class", name,"not found"
continue
for c in classes[name]:
if not hasattr(obj, c):
# it is legit for these to be missing from code (currently because code is broken)
if (name+"."+c) in ("apsw.async_control", "apsw.async_initialize", "apsw.async_run", "apsw.async_shutdown"):
continue
retval=1
print "%s.%s in documentation but not object" % (name, c)
for c in dir(obj):
if c.startswith("__"): continue
if name=="apsw":
# ignore constants and modules
if type(getattr(apsw, c)) in (type(3), type(sys)):
continue
# ignore debugging thingies
if c.startswith("test_") or c in ("faultdict", "_fini"):
continue
# ignore the exceptions
if isinstance(getattr(apsw, c), type) and issubclass(getattr(apsw,c), Exception):
continue
# ignore classes !!!
if c in ("Connection", "VFS", "VFSFile", "zeroblob", "Shell", "URIFilename"):
continue
# ignore mappings !!!
if c.startswith("mapping_"):
continue
if c not in classes[name]:
if "%s.%s" % (name, c) not in ("Cursor.next",):
retval=1
print "%s.%s on object but not in documentation" % (name, c)
sys.exit(retval)
| 2 | 2 |
configs/conf_exp.py | baimengwei/Traffic-Flow-Test-Platform | 2 | 12786149 | <reponame>baimengwei/Traffic-Flow-Test-Platform<gh_stars>1-10
class ConfExp:
def __init__(self, args):
self.__model_name = args.algorithm
self.__train_round = args.train_round
self.__num_generators = args.num_generator
self.__num_pipeline = args.num_pipeline
@property
def NUM_GENERATORS(self):
return self.__num_generators
@property
def TRAIN_ROUND(self):
return self.__train_round
@property
def MODEL_NAME(self):
return self.__model_name | 2.375 | 2 |
target/apps.py | groundupnews/gu | 19 | 12786150 | from django.apps import AppConfig
class TargetConfig(AppConfig):
name = 'target'
| 1.15625 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.