id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1650641
|
<gh_stars>0
class IntegrationRouter:
"""
A router to control all database operations on models in the
auth and contenttypes applications.
"""
call_center_models = {
'agent',
'audit',
'break',
'callattribute',
'callentry',
'callprogresslog',
'callrecording',
'calls',
'campaign',
'campaignentry',
'campaignexternalurl',
'campaignform',
'campaignformentry',
'contact',
'currentcallentry',
'currentcalls',
'dontcall',
'eccpauthorizedclients',
'form',
'formdatarecolected',
'formdatarecolectedentry',
'formfield',
'queuecallentry',
'valorconfig',
'cedulallamada'
}
call_center_tables = {
'agent',
'audit',
'break',
'call_attribute',
'call_entry',
'call_progress_log',
'call_recording',
'calls',
'campaign',
'campaign_entry',
'campaign_external_url',
'campaign_form',
'campaign_form_entry',
'contact',
'current_call_entry',
'current_calls',
'dont_call',
'eccp_authorized_clients',
'form',
'form_data_recolected',
'form_data_recolected_entry',
'form_field',
'queue_call_entry',
'valor_config',
'cedula_llamada'
}
def db_for_read(self, model, **hints):
"""
Attempts to read call_center models go to call_center.
"""
if model._meta.db_table in self.call_center_tables:
return 'call_center'
return None
def db_for_write(self, model, **hints):
"""
Attempts to write call_center models go to call_center.
"""
if model._meta.db_table in self.call_center_tables:
return 'call_center'
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the default or call_center table is
involved.
"""
if (
obj1._meta.db_table in self.call_center_tables or
obj2._meta.db_table in self.call_center_tables
):
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
Make sure the call_center tables only appear in the
'call_center' database.
"""
if model_name in self.call_center_models:
if db == 'call_center':
return True
return None
|
StarcoderdataPython
|
3249182
|
# Hysteresis model
# http://eprints.lancs.ac.uk/1375/1/MFI_10c.pdf
# Identification of Hysteresis Functions Using a Multiple Model Approach
# Mihaylova, Lampaert et al
import numpy as npy
from scipy.optimize import root
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import copy
#%%
plt.close('all')
class HysteresisModel:
"""
Hysteresis model comprising a number of elementary Maxwell-slip models
refer http://eprints.lancs.ac.uk/1375/1/MFI_10c.pdf
"""
def __init__(self,N,K,W=None,delta=None):
self.N = N
"""
Integer number of elementary models
"""
self.K = npy.ravel(npy.abs(K))
"""
Array of stiffness values for each elementary model
"""
if delta is None :
# K and W specified
if W is None:
raise ValueError("Error: either delta or W arguments "+
"must be provided!")
else:
W = npy.ravel(npy.abs(W)) # limiting friction values
else:
# K and delta specified
# W to be inferred, given this input
delta = npy.abs(npy.ravel(delta))
W = self.K * delta
self.W = W
"""
Array of limiting friction values for each elementary model
"""
# Initialise matrices F and C, which do not vary with input
self.F = npy.asmatrix(npy.identity(self.N))
self.C = npy.asmatrix(npy.diag(-self.K))
# Initialise matrices G and D, as empty
self.G = npy.asmatrix(npy.empty((self.N,1)))
self.D = npy.asmatrix(npy.empty((self.N,1)))
# Initialise array to contain case indexs
self.case = npy.zeros((self.N,),dtype=int)
@property
def x0(self):
return self._x0
@x0.setter
def x0(self,x0):
"""
Set initial states
"""
self._x0 = npy.asmatrix(npy.ravel(x0)).T
self.x =self.x0
if self.x.shape[0] != self.N:
raise ValueError("Error: x0 wrong shape!")
@property
def x(self):
return self._x
@x.setter
def x(self,val):
#print("States updated")
self._x = npy.asmatrix(val)
def update(self,u,save_states=True):
"""
Function to advance state-space description of model dynamics
by a single time step, returning next state and output vectors
"""
x = copy.deepcopy(self.x)
# Define G and D matrix entries
for i in range(self.N): # loop over all elementary models
Wi = self.W[i]
Ki = self.K[i]
# Evaluate switching parameter
fi = Ki * (u - x[i])
if fi > Wi:
# Case 2
self.case[i] = 2
self.G[i] = 1
self.D[i] = 0
x[i] = -Wi/Ki
elif fi < -Wi:
# Case 3
self.case[i] = 3
self.G[i] = 1
self.D[i] = 0
x[i] = +Wi/Ki
else:
# Case 1
self.case[i] = 1
self.G[i] = 0
self.D[i] = Ki
# Compute next states and output
# using eqns (10) and (11) in Mihaylova's paper
x_next = self.F * x + self.G * u
y_k = self.C * x + self.D * u
Fh_k = y_k.sum() # total hysteresis force
# Update states
if save_states:
self.x = x_next
return x_next, y_k, Fh_k
def run(self,x0,uVals):
"""
Run simulation from initial conditions, given inputs u
x0 : column vector [Nx1]
u : list or vector of length (nSteps,1)
"""
# Convert and check shape of u
uVals = npy.ravel(uVals)
nSteps = uVals.shape[0]
# Initialise state space eqns
self.x0 = x0
# Step through state space eqns
xVals = npy.zeros((nSteps,self.N))
yVals = npy.zeros((nSteps,self.N))
Fh_vals = npy.zeros((nSteps,))
for k, u_k in enumerate(uVals):
# Get next states and output
x_k, y_k, Fh_k = self.update(u_k)
# Store
xVals[k,:] = npy.ravel(x_k)
yVals[k,:] = y_k.T
Fh_vals[k] = Fh_k
# Store results
self.uVals = uVals
self.xVals = xVals
self.yVals = yVals
self.FhVals = Fh_vals
# Return states and output for each step
return xVals, yVals, Fh_vals
def write_results(self,
fname='results.csv',
delimiter=','):
arr = npy.asmatrix(self.uVals).T
titles = ["u"]
N = self.N
arr = npy.hstack((arr,self.xVals))
titles += ["x%d" % (i+1) for i in range(N)]
arr = npy.hstack((arr,self.yVals))
titles += ["y%d" % (i+1) for i in range(N)]
arr = npy.hstack((arr,npy.asmatrix(self.FhVals).T))
titles += ["Fh"]
npy.savetxt(fname=fname,
X=arr,
delimiter=delimiter,
header=delimiter.join(str(x) for x in titles))
def PlotResults_timeSeries(self,tVals):
"""
Plot results as time series
[t,u], [t,x], [t,y], [t,Fh]
"""
fig, axarr = plt.subplots(4,sharex=True)
fig.set_size_inches(16,9,forward=True)
ax1 = axarr[0]
ax1.plot(tVals,self.uVals)
ax1.xaxis.set_visible(False)
ax1.set_ylabel("u")
ax1.set_xlabel("Input displacement, u(t)")
ax2 = axarr[1]
ax2.plot(tVals,self.xVals)
ax2.xaxis.set_visible(False)
ax2.set_ylabel("x")
ax2.set_title("States of\nelementary models, x(t)")
ax3 = axarr[2]
ax3.plot(tVals,self.yVals)
ax3.xaxis.set_visible(False)
ax3.set_ylabel("y")
ax3.set_title("Outputs from\nelementary models, y(t)")
ax4 = axarr[3]
ax4.plot(tVals,self.FhVals)
ax4.set_xlabel("Time (seconds)")
ax4.set_ylabel("F$_h$")
ax4.set_title("Net output F$_h$")
def PlotResults(self):
"""
Plot results as [u,x], [u,y], [u,Fh] plots
"""
fig, axarr = plt.subplots(1,3,sharex=True)
fig.set_size_inches(16,9,forward=True)
ax1 = axarr[0]
ax1.plot(self.uVals,self.xVals)
ax1.set_xlabel("Input u")
ax1.set_title("States of\nelementary models, x")
ax2 = axarr[1]
ax2.plot(self.uVals,self.yVals)
ax2.set_xlabel("Slip (u-x)")
ax2.set_title("Outputs from\nelementary models, y")
ax3 = axarr[2]
ax3.plot(self.uVals,self.FhVals)
ax3.set_xlabel("Input u")
ax3.set_title("Net output F$_h$")
class static_response():
"""
Class used to compute response to forcing input
"""
def __init__(self,hys_obj,K1, K2):
self.hys_obj = hys_obj
self.K1 = K1
self.K2 = K2
def net_force(self,d,F_ext,verbose=False):
"""
Function which defines net force
given position 'u' and external force 'F_ext'
"""
u = d[0] - d[1] # relative displacement at friction interface
F_hys = self.hys_obj.update(u=u,save_states=False)[2]
F_net_1 = self.K1 * d[0] + F_hys - F_ext
F_net_2 = self.K2 * d[1] - F_hys
F_net = npy.array([F_net_1,F_net_2])
if verbose:
print("u = %.3e" % u)
print("x = {0}".format(self.hys_obj.x))
print("F_hys = {0}".format(F_hys))
print("F_net = {0}".format(F_net))
return F_net
def run(self,F_vals,x0=None,d0=None):
# Define function to solve for next u
def solve(d_last,F_k,hys_obj):
# Determine next u to satify equilibrium - i.e. zero net force
sol = root(fun=self.net_force,x0=d_last,args=(F_k,))
d_k = sol.x
u_k = d_k[0]-d_k[1]
F_net = self.net_force(d_k,F_k)
if not sol.success:
pass#print(sol.message)
x_k, y_k, F_hys_k = hys_obj.update(u=u_k,save_states=True)
return F_hys_k, d_k, u_k, x_k, y_k, F_net
# Set initial conditions
if x0 is None:
x0 = npy.zeros((self.hys_obj.N,))
self.hys_obj.x0 = x0
if d0 is None:
d0 = npy.array([0.0,0.0])
d_j = d0 # initial guess
# Run step by step
F_hys_vals = []
x_vals = []
u_vals = []
y_vals = []
F_net_vals = []
for j, F_j in enumerate(F_vals):
#print("--- Step #%d ---" % j)
F_hys_j, d_j, u_j, x_j, y_j, F_net = solve(d_j,F_j,self.hys_obj)
F_hys_vals.append(F_hys_j)
x_vals.append(npy.ravel(x_j))
y_vals.append(npy.ravel(y_j))
u_vals.append(u_j)
F_net_vals.append(F_net)
self.x_vals = x_vals
self.y_vals = y_vals
self.u_vals = u_vals
self.F_hys_vals = F_hys_vals
self.F_vals = F_vals
self.F_net_vals = F_net_vals
def plot(self):
fig, axarr = plt.subplots(3,2,sharex='col')
fig.set_size_inches(14,8)
ax = axarr[0,0]
ax.plot(self.F_vals,label='$F_{external}$')
ax.plot(self.F_hys_vals,label='$F_{hysteresis}$')
ax.legend()
ax.set_ylabel("Forces")
ax = axarr[1,0]
ax.plot(self.u_vals)
ax.set_ylabel("Displacement, u")
ax = axarr[2,0]
ax.plot(self.x_vals)
ax.set_xlabel("Step index")
ax.set_ylabel("States, x")
ax = axarr[0,1]
ax.plot(self.u_vals,self.y_vals)
ax.set_ylabel("Outputs, y")
ax = axarr[1,1]
ax.plot(self.u_vals,self.F_hys_vals)
ax.set_ylabel("$F_{hysteresis}$")
ax = axarr[2,1]
ax.plot(self.u_vals,self.F_vals)
ax.set_xlabel("Displacement, u")
ax.set_ylabel("$F_{external}$")
return fig
# -------- TEST ROUTINE ----------
if __name__ == "__main__":
test_routine = 1
if test_routine == 0:
# Define hysteresis model
K = [1000,2000,3000]
delta = [1,2,3]
Ne = len(K)
hys = HysteresisModel(Ne,K,delta=delta)
# Define displacement inputs
dt = 0.02
tmax = 10
u0 = 10
import random
def randomWalk(N,normalise=True):
x= [0]
for j in range(N-1):
step_x = random.randint(0,1)
if step_x == 1:
x.append(x[j] + 1 + 0.05*npy.random.normal())
else:
x.append(x[j] - 1 + 0.05*npy.random.normal())
x = npy.asarray(x)
if normalise:
absmaxVal = npy.max([npy.max(x),-npy.min(x)])
x = x / absmaxVal
return x
tVals = npy.arange(0,tmax,dt)
uVals = u0*randomWalk(tVals.shape[0])
#uVals = 4.5*npy.sin(2*npy.pi*0.5*tVals)
# Obtain states and outputs by state space stepping
hys.run(npy.zeros((Ne,)),uVals)
# Plot results
hys.PlotResults()
hys.PlotResults_timeSeries(tVals)
#hys.write_results()
elif test_routine==1:
# Define hysteresis model
K = [1000,2000,3000]
W = [1000,1000,1000]
Ne = len(K)
hys = HysteresisModel(Ne,K,W=W)
# Define force function
# Define displacement inputs
dt = 0.02
tmax = 10
u0 = 10
F0 = 3000
t_vals = npy.arange(0,tmax,dt)
F_vals = F0 * (npy.sin(2*npy.pi*t_vals) + npy.sin(2*npy.pi*3.2*t_vals))
# Define spring
K_spring = 1500
# Define and run analysis
analysis = static_response(hys_obj=hys,K_spring=K_spring)
analysis.run(F_vals=F_vals)
analysis.plot()
else:
raise ValueError("No test selected!")
#%%
#
|
StarcoderdataPython
|
1759116
|
<reponame>NicGobbi/age-of-empires-II-api
from numpy import genfromtxt
import os
from db import db
from api.models.factory import get_model
def populate_db():
for filename in os.listdir(os.path.abspath('./data')):
if not filename.endswith('.csv'):
continue
data = load_data('data/{}'.format(filename))
filename = filename.split(".")[0]
for row in data:
item = get_model(filename, row)
db.session.add(item)
db.session.commit()
def load_data(file_name):
data = genfromtxt(file_name, delimiter=',', skip_header=1,
dtype='unicode', autostrip=True)
return data.tolist()
|
StarcoderdataPython
|
1454
|
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Uploader module that handles batch jobs sent from Task Queue.
This module receives batch jobs from TaskQueue. For each job, the module loads
data from BigQuery and sends it to Merchant Center.
"""
import http
import json
import logging
import socket
from typing import List, Tuple
import flask
from google.cloud import bigquery
from google.cloud import logging as cloud_logging
from googleapiclient import errors
import batch_creator
import bigquery_client
import constants
import content_api_client
import result_recorder
import shoptimizer_client
from models import failure
from models import process_result
from models import upload_task
app = flask.Flask(__name__)
_logging_client = cloud_logging.Client()
_logging_client.setup_logging(log_level=logging.DEBUG)
_SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json'
OPERATION_TO_METHOD = {
constants.Operation.UPSERT: constants.Method.INSERT,
constants.Operation.DELETE: constants.Method.DELETE,
constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT
}
# Used to check if this is the last retry for alerting purposes.
# Should match task_retry_limit in appengine/initiator/queue.yaml.
TASK_RETRY_LIMIT = 5
@app.route('/insert_items', methods=['POST'])
def run_insert_process() -> Tuple[str, http.HTTPStatus]:
"""Handles uploading tasks pushed from Task Queue."""
return _run_process(constants.Operation.UPSERT)
@app.route('/delete_items', methods=['POST'])
def run_delete_process() -> Tuple[str, http.HTTPStatus]:
"""Handles deleting tasks pushed from Task Queue."""
return _run_process(constants.Operation.DELETE)
@app.route('/prevent_expiring_items', methods=['POST'])
def run_prevent_expiring_process() -> Tuple[str, http.HTTPStatus]:
"""Handles prevent expiring tasks pushed from Task Queue."""
return _run_process(constants.Operation.PREVENT_EXPIRING)
def _run_process(operation: constants.Operation) -> Tuple[str, http.HTTPStatus]:
"""Handles tasks pushed from Task Queue.
When tasks are enqueued to Task Queue by initiator, this method will be
called. It extracts necessary information from a Task Queue message. The
following processes are executed in this function:
- Loading items to process from BigQuery.
- Converts items into a batch that can be sent to Content API for Shopping.
- Sending items to Content API for Shopping (Merchant Center).
- Records the results of the Content API for Shopping call.
Args:
operation: Type of operation to perform on the items.
Returns:
The result of HTTP request.
"""
request_body = json.loads(flask.request.data.decode('utf-8'))
task = upload_task.UploadTask.from_json(request_body)
if task.batch_size == 0:
return 'OK', http.HTTPStatus.OK
batch_number = int(task.start_index / task.batch_size) + 1
logging.info(
'%s started. Batch #%d info: start_index: %d, batch_size: %d,'
'initiation timestamp: %s', operation.value, batch_number,
task.start_index, task.batch_size, task.timestamp)
try:
items = _load_items_from_bigquery(operation, task)
except errors.HttpError:
return 'Error loading items from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR
result = process_result.ProcessResult([], [], [])
try:
if not items:
logging.error(
'Batch #%d, operation %s: 0 items loaded from BigQuery so batch not sent to Content API. Start_index: %d, batch_size: %d,'
'initiation timestamp: %s', batch_number, operation.value,
task.start_index, task.batch_size, task.timestamp)
return 'No items to process', http.HTTPStatus.OK
method = OPERATION_TO_METHOD.get(operation)
# Creates batch from items loaded from BigQuery
original_batch, skipped_item_ids, batch_id_to_item_id = batch_creator.create_batch(
batch_number, items, method)
# Optimizes batch via Shoptimizer for upsert/prevent_expiring operations
if operation != constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON:
batch_to_send_to_content_api = _create_optimized_batch(
original_batch, batch_number, operation)
else:
batch_to_send_to_content_api = original_batch
# Sends batch of items to Content API for Shopping
api_client = content_api_client.ContentApiClient()
successful_item_ids, item_failures = api_client.process_items(
batch_to_send_to_content_api, batch_number, batch_id_to_item_id, method)
result = process_result.ProcessResult(
successfully_processed_item_ids=successful_item_ids,
content_api_failures=item_failures,
skipped_item_ids=skipped_item_ids)
except errors.HttpError as http_error:
error_status_code = http_error.resp.status
error_reason = http_error.resp.reason
result = _handle_content_api_error(error_status_code, error_reason,
batch_number, http_error, items,
operation, task)
return error_reason, error_status_code
except socket.timeout as timeout_error:
error_status_code = http.HTTPStatus.REQUEST_TIMEOUT
error_reason = 'Socket timeout'
result = _handle_content_api_error(error_status_code, error_reason,
batch_number, timeout_error, items,
operation, task)
return error_reason, error_status_code
else:
logging.info(
'Batch #%d with operation %s and initiation timestamp %s successfully processed %s items, failed to process %s items and skipped %s items.',
batch_number, operation.value, task.timestamp,
result.get_success_count(), result.get_failure_count(),
result.get_skipped_count())
finally:
recorder = result_recorder.ResultRecorder.from_service_account_json(
constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING,
constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING,
constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING)
recorder.insert_result(operation.value, result, task.timestamp,
batch_number)
return 'OK', http.HTTPStatus.OK
def _load_items_from_bigquery(
operation: constants.Operation,
task: upload_task.UploadTask) -> List[bigquery.Row]:
"""Loads items from BigQuery.
Args:
operation: The operation to be performed on this batch of items.
task: The Cloud Task object that initiated this request.
Returns:
The list of items loaded from BigQuery.
"""
table_id = f'process_items_to_{operation.value}_{task.timestamp}'
bq_client = bigquery_client.BigQueryClient.from_service_account_json(
constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING,
table_id)
try:
items_iterator = bq_client.load_items(task.start_index, task.batch_size)
except errors.HttpError as http_error:
logging.exception(
'Error loading items from %s.%s. HTTP status: %s. Error: %s',
constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status,
http_error.resp.reason)
raise
return list(items_iterator)
def _create_optimized_batch(batch: constants.Batch, batch_number: int,
operation: constants.Operation) -> constants.Batch:
"""Creates an optimized batch by calling the Shoptimizer API.
Args:
batch: The batch of product data to be optimized.
batch_number: The number that identifies this batch.
operation: The operation to be performed on this batch (upsert, delete,
prevent_expiring).
Returns:
The batch returned from the Shoptimizer API Client.
"""
try:
optimization_client = shoptimizer_client.ShoptimizerClient(
batch_number, operation)
except (OSError, ValueError):
return batch
return optimization_client.shoptimize(batch)
def _handle_content_api_error(
error_status_code: int, error_reason: str, batch_num: int, error: Exception,
item_rows: List[bigquery.Row], operation: constants.Operation,
task: upload_task.UploadTask) -> process_result.ProcessResult:
"""Logs network related errors returned from Content API and returns a list of item failures.
Args:
error_status_code: HTTP status code from Content API.
error_reason: The reason for the error.
batch_num: The batch number.
error: The error thrown by Content API.
item_rows: The items being processed in this batch.
operation: The operation to be performed on this batch of items.
task: The Cloud Task object that initiated this request.
Returns:
The list of items that failed due to the error, wrapped in a
process_result.
"""
logging.warning(
'Batch #%d with operation %s and initiation timestamp %s failed. HTTP status: %s. Error: %s',
batch_num, operation.value, task.timestamp, error_status_code,
error_reason)
# If the batch API call received an HttpError, mark every id as failed.
item_failures = [
failure.Failure(str(item_row.get('item_id', 'Missing ID')), error_reason)
for item_row in item_rows
]
api_result = process_result.ProcessResult([], item_failures, [])
if content_api_client.suggest_retry(
error_status_code) and _get_execution_attempt() < TASK_RETRY_LIMIT:
logging.warning(
'Batch #%d with operation %s and initiation timestamp %s will be requeued for retry',
batch_num, operation.value, task.timestamp)
else:
logging.error(
'Batch #%d with operation %s and initiation timestamp %s failed and will not be retried. Error: %s',
batch_num, operation.value, task.timestamp, error)
return api_result
def _get_execution_attempt() -> int:
"""Returns the number of times this task has previously been executed.
If the execution count header does not exist, it means the request did not
come from Cloud Tasks.
In this case, there will be no retry, so set execution attempt to the retry
limit.
Returns:
int, the number of times this task has previously been executed.
"""
execution_attempt = flask.request.headers.get(
'X-AppEngine-TaskExecutionCount', '')
if execution_attempt:
return int(execution_attempt)
else:
return TASK_RETRY_LIMIT
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
|
StarcoderdataPython
|
118252
|
from tkinter import *
from loadSettings import loadSettings
root = Tk()
root.geometry("500x500")
#Menu code
my_menu = Menu(root)
root.config(menu=my_menu)
root.title('Solmodoro Timer')
# root.iconbitmap('C:\Users\Diana\Desktop\mywebsite\images\solmi.png')
#File Menu
file_menu = Menu(my_menu, tearoff=0)
my_menu.add_cascade(label="File", menu=file_menu)
file_menu.add_command(label="Set New Pomodoro Timer")
file_menu.add_command(label="Quit", command=root.quit)
#Edit Menu
edit_menu = Menu(my_menu, tearoff=0)
my_menu.add_cascade(label="Edit", menu=edit_menu)
edit_menu.add_command(label="BGM Settings", command=loadSettings)
edit_menu.add_command(label="GUI Settings")
#Help Menu
help_menu = Menu(my_menu, tearoff=0)
my_menu.add_cascade(label="Help", menu=help_menu)
help_menu.add_command(label="Guide")
#Code
studyLabel = Label(root, text="Study duration (mins): ").grid(row=0, column=1)
breakLabel = Label(root, text="Break duration (mins): ").grid(row=2, column=1)
studyEntry = Entry(root).grid(row=1, column=1)
breakEntry = Entry(root).grid(row=3, column=1)
startButton = Button(root, text="Begin session").grid(row=4, column=0, columnspan=5)
root.mainloop()
|
StarcoderdataPython
|
1637208
|
import lx, modo, lxifc, lxu.command, tagger
CMD_NAME = tagger.CMD_SHADERTREE_CONSOLIDATE_BY_COLOR
def color_convert(color):
return [i*256 for i in color]
class CommandClass(tagger.CommanderClass):
#_commander_default_values = []
def commander_execute(self, msg, flags):
all_masks = modo.Scene().items('mask')
target_masks = []
unique_colors = set()
consolidation_masks = []
for mask in all_masks:
if mask.parent.id != modo.Scene().renderItem.id:
continue
if mask.channel(lx.symbol.sICHAN_MASK_PTYP).get() not in ('Material', ''):
continue
if len(mask.children()) != 1:
continue
material = mask.children()[0]
if material.type != 'advancedMaterial':
continue
target_masks.append({"mask_item": mask})
target_masks[-1]["material_item"] = material
target_masks[-1]["color"] = material.channel('diffCol').get()
target_masks[-1]["pTag"] = target_masks[-1]["mask_item"].channel(lx.symbol.sICHAN_MASK_PTAG).get()
unique_colors.add(target_masks[-1]["color"])
for c in unique_colors:
consolidation_masks.append({"color": c})
consolidation_masks[-1]["colorname"] = tagger.colors.ColorNames.findNearestWebColorName(color_convert(c))
consolidation_masks[-1]["hitlist"] = [m for m in target_masks if m["color"] == c]
for c in consolidation_masks:
c["pTag"] = c["colorname"]
all_existing_tags = tagger.scene.all_tags_by_type(lx.symbol.i_POLYTAG_MATERIAL)
n = 0
while c["pTag"] in all_existing_tags:
n += 1
c["pTag"] = "_".join((c["colorname"], str(n)))
c["consolidation_mask"] = tagger.shadertree.build_material(pTag = c["pTag"])
c["consolidation_mask"].children()[0].channel('diffCol').set(c["color"])
# The material.reassign command expects no textureLayers to be selected.
to_restore = [i for i in modo.Scene().selected if i.superType == 'textureLayer']
for textureLayer in to_restore:
textureLayer.deselect()
for hit in c["hitlist"]:
tagger.safe_removeItems([hit["mask_item"]], True)
lx.eval('!material.reassign {%s} %s' % (hit["pTag"], c["pTag"]))
for textureLayer in to_restore:
try:
textureLayer.select()
except:
pass
lx.bless(CommandClass, CMD_NAME)
|
StarcoderdataPython
|
4839125
|
from wikipedia2vec import Wikipedia2Vec
import pickle
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='robust04', help='dataset name: robust04/clueweb09')
args = parser.parse_args()
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
wiki2vec = Wikipedia2Vec.load('./enwiki_20180420_100d.pkl')
# wiki2vec = Wikipedia2Vec.load('./enwiki_20180420_300d.pkl')
ent2id = load_obj('./{}/ent2id'.format(args.dataset))
ent2vec = []
no_pretrain_emd_cnt = 0
for e in ent2id:
try:
ent2vec.append(wiki2vec.get_entity_vector(e))
except:
no_pretrain_emd_cnt += 1
ent2vec.append(np.random.randn(100))
# ent2vec.append(np.random.randn(300))
print(no_pretrain_emd_cnt) # clueweb09:22820, robust04:8423
print(len(ent2vec)) # clueweb09:226363, robust04:108627
np.save('./{}/ent_embedding_100d.npy'.format(args.dataset), ent2vec)
# np.save('./{}/ent_embedding_300d.npy'.format(args.dataset), ent2vec)
# que_ent = load_obj('./{}/que_entity'.format(args.dataset))
# with open('./{}/que_entity_list_unique.txt'.format(args.dataset), 'w') as f:
# for i in que_ent:
# f.writelines(str(i)+'\t')
# for word in que_ent[i]:
# f.writelines(str(ent2id[word])+' ')
# f.writelines('\n')
|
StarcoderdataPython
|
3222327
|
from functools import partial
import yaml
from galaxy import model
from galaxy.model import mapping
from galaxy.security.idencoding import IdEncodingHelper
from galaxy.util import bunch
class MockTrans(object):
def __init__(self):
self.app = TestApp()
self.sa_session = self.app.model.context
self._user = None
def save_workflow(self, workflow):
stored_workflow = model.StoredWorkflow()
stored_workflow.latest_workflow = workflow
workflow.stored_workflow = stored_workflow
stored_workflow.user = self.user
self.sa_session.add(stored_workflow)
self.sa_session.flush()
return stored_workflow
@property
def user(self):
if self._user is None:
self._user = model.User(
email="<EMAIL>",
password="password"
)
return self._user
class TestApp(object):
def __init__(self):
self.config = bunch.Bunch(
tool_secret="awesome_secret",
)
self.model = mapping.init(
"/tmp",
"sqlite:///:memory:",
create_tables=True
)
self.toolbox = TestToolbox()
self.datatypes_registry = TestDatatypesRegistry()
self.security = IdEncodingHelper(id_secret="testing")
class TestDatatypesRegistry(object):
def __init__(self):
pass
def get_datatype_by_extension(self, ext):
return ext
class TestToolbox(object):
def __init__(self):
self.tools = {}
def get_tool(self, tool_id, tool_version=None, exact=False, tool_uuid=None):
# Real tool box returns None of missing tool also
return self.tools.get(tool_id, None)
def get_tool_id(self, tool_id):
tool = self.get_tool(tool_id)
return tool and tool.id
def yaml_to_model(has_dict, id_offset=100):
if isinstance(has_dict, str):
has_dict = yaml.safe_load(has_dict)
workflow = model.Workflow()
workflow.steps = []
for i, step in enumerate(has_dict.get("steps", [])):
workflow_step = model.WorkflowStep()
if "order_index" not in step:
step["order_index"] = i
if "id" not in step:
# Fixed Offset ids just to test against assuption order_index != id
step["id"] = id_offset
id_offset += 1
step_type = step.get("type", None)
assert step_type is not None
if step_type == "subworkflow":
subworkflow_dict = step["subworkflow"]
del step["subworkflow"]
subworkflow = yaml_to_model(subworkflow_dict, id_offset=id_offset)
step["subworkflow"] = subworkflow
id_offset += len(subworkflow.steps)
for key, value in step.items():
if key == "input_connections":
raise NotImplementedError()
if key == "inputs":
inputs = []
for input_name, input_def in value.items():
step_input = model.WorkflowStepInput(workflow_step)
step_input.name = input_name
connections = []
for conn_dict in input_def.get("connections", []):
conn = model.WorkflowStepConnection()
for conn_key, conn_value in conn_dict.items():
if conn_key == "@output_step":
target_step = workflow.steps[conn_value]
conn_value = target_step
conn_key = "output_step"
if conn_key == "@input_subworkflow_step":
conn_value = step["subworkflow"].step_by_index(conn_value)
conn_key = "input_subworkflow_step"
setattr(conn, conn_key, conn_value)
connections.append(conn)
step_input.connections = connections
inputs.append(step_input)
value = inputs
if key == "workflow_outputs":
value = [partial(_dict_to_workflow_output, workflow_step)(_) for _ in value]
if key == 'collection_type':
key = 'tool_inputs'
value = {'collection_type': value}
setattr(workflow_step, key, value)
workflow.steps.append(workflow_step)
return workflow
def _dict_to_workflow_output(workflow_step, as_dict):
output = model.WorkflowOutput(workflow_step)
for key, value in as_dict.items():
setattr(output, key, value)
return output
|
StarcoderdataPython
|
1763126
|
<filename>vbx/events/__init__.py<gh_stars>1-10
from .call import Call
from .message import Message
__all__ = ['Call', 'Message']
|
StarcoderdataPython
|
1689308
|
import numpy as np
import scipy.special as ss
from scipy.optimize import root_scalar
from scipy import integrate
from csr2d.core2 import psi_x0, psi_s, Es_case_B, Fx_case_B_Chris, Es_case_A, Fx_case_A, Es_case_C, Fx_case_C, Es_case_D, Fx_case_D, psi_s_case_E, Es_case_E
from csr2d.core2 import alpha_exact_case_B_brentq, alpha_exact_case_D_brentq
from numba import njit
from quantecon.optimize.root_finding import newton
from scipy import optimize
from scipy.signal import find_peaks
def symmetric_vec(n, d):
"""
Returns a symmetric vector about 0 of length 2*n with spacing d.
The center = 0 is at [n-1]
"""
return np.arange(-n+1,n+1,1)*d
def green_mesh(density_shape, deltas, rho=None, gamma=None, offset=(0,0,0),
component='psi_s', map_f=map, phi=None, phi_m=None, lamb=None,
include_break_points=True, debug=False):
"""
Computes Green funcion meshes for a particular component
These meshes are in real space (not scaled space).
Parameters
----------
shape : tuple(int, int)
Shape of the charge mesh (nz, nx)
deltas : tuple(float, float)
mesh spacing corresonding to dz, dx
gamma : float
relativistic gamma
map_f : map function for creating potential grids.
Examples:
map (default)
executor.map
Returns:
Double-sized array for the Green function with the speficied component
"""
nz, nx = tuple(density_shape)
dz, dx = tuple(deltas) # Convenience
if debug:
print('component:', component)
# Change to internal coordinates
if (component != 'psi_s_case_E') & (component != 'Es_case_E_IGF') :
if debug:
print('Change to internal coordinates...')
# handle negative rho
#rho_sign = np.sign(rho)
#rho = abs(rho)
dx = dx/rho
dz = dz/(2*rho)
# Make an offset grid
vecs = [symmetric_vec(n, delta) for n, delta, o in zip(density_shape, [dz,dx], offset)]
#vecs[0] = rho_sign*vecs[0] # Flip sign of x
meshes = np.meshgrid(*vecs, indexing='ij') # this gives zm2 and xm2
# Only case B has a potential form of psi_s
if component == 'psi_s':
green = psi_s(*meshes, gamma)
# psi_x is incorrect
#elif component == 'psi_x':
# green = rho_sign*psi_x0(*meshes, gamma, dz, dx)
elif component == 'psi_s_case_E':
green = psi_s_case_E(*meshes, gamma)
#elif component == 'Es_case_E':
# green = Es_case_E(*meshes, gamma)
elif component == 'Es_case_D':
assert lamb>=0 , "lamb (exit distance over rho) must be positive for case D !"
green = Es_case_D(*meshes, gamma, lamb)
# Case A fields
elif component =='Es_case_A':
assert phi>=0 , "phi (entrance angle) must be positive for case A !"
green = Es_case_A(*meshes, gamma, phi/2)
elif component =='Fx_case_A':
assert phi>=0 , "phi (entrance angle) must be positive for case A !"
green = Fx_case_A(*meshes, gamma, phi/2)
# Case C fields
elif component =='Es_case_C':
assert phi_m>=0 , "phi_m must be positive for case C !"
assert lamb>=0 , "lamb (exit distance over rho) must be positive for case C !"
green = Es_case_C(zm2, xm2, gamma, phi_m/2, lamb)
elif component =='Fx_case_C':
assert phi_m>=0 , "phi_m must be positive for case C !"
assert lamb>=0 , "lamb (exit distance over rho) must be positive for case C !"
green = Fx_case_C(zm2, xm2, gamma, phi_m/2, lamb)
# ===================================================
# Case B fields IGF
elif component in ['Fx_case_B_IGF', 'Es_case_B_IGF','Es_case_E_IGF']:
if component == 'Es_case_B_IGF':
F = Es_case_B
elif component == 'Fx_case_B_IGF':
F = Fx_case_B_Chris
else:
F = Es_case_E
# Flat meshes
Z = meshes[0].flatten()
X = meshes[1].flatten()
# Select special points for IGF
ix_for_IGF = np.where(abs(Z) < dz*2.5)
# ix_for_IGF = np.where(np.logical_and( abs(Z)<dz*2, abs(X)<dx*2 ))
if debug:
print(f'Finding IGF for {len(ix_for_IGF[0])} points...')
Z_special = Z[ix_for_IGF]
X_special = X[ix_for_IGF]
if include_break_points == True:
xvec2 = vecs[1]
# The spike_list can not be an numpy array since its elements have potentially different sizes
def find_case_B_spike_x(x):
return find_Es_or_Fx_case_B_spike(F, x, gamma)
spike_list = list(map(find_case_B_spike_x, xvec2))
fzx = lambda z, x: IGF_z_case_B(F, z, x, dz, dx, gamma, xvec2=xvec2, spike_list=spike_list)/dz # evaluate special
else:
fzx = lambda z, x: IGF_z_case_B(F, z, x, dz, dx, gamma)/dz # evaluate special
res = map(fzx, Z_special, X_special)
G_short = np.array(list(res))
if debug:
print(f'Done. Starting midpoint method...')
G = F(Z, X, gamma) # Simple midpoint evaluation
G[ix_for_IGF] = G_short # Replace at special points with calculated IGF
green = G.reshape(meshes[0].shape) # reshape
# ===================================================
# Case D fields IGF
elif component in ['Fx_case_D_IGF', 'Es_case_D_IGF']:
assert lamb>=0 , "lamb (exit distance over rho) must be positive for case D !"
if component == 'Es_case_D_IGF':
F = Es_case_D
else:
F = Fx_case_D
# Flat meshes
Z = meshes[0].flatten()
X = meshes[1].flatten()
# Select special points for IGF
ix_for_IGF = np.where(abs(Z) < dz*3.5)
# ix_for_IGF = np.where(np.logical_and( abs(Z)<dz*2, abs(X)<dx*2 ))
if debug:
print(f'Finding IGF for {len(ix_for_IGF[0])} points...')
Z_special = Z[ix_for_IGF]
X_special = X[ix_for_IGF]
if include_break_points == True:
xvec2 = vecs[1]
# The spike_list can not be an numpy array since its elements have potentially different sizes
def find_case_D_spike_x(x):
return find_Es_or_Fx_case_D_spike(F, x, gamma, lamb)
spike_list = list(map(find_case_D_spike_x, xvec2))
fzx = lambda z, x: IGF_z_case_D(F, z, x, dz, dx, gamma, lamb, xvec2=xvec2, spike_list=spike_list)/dz # evaluate special
else:
fzx = lambda z, x: IGF_z_case_D(F, z, x, dz, dx, gamma, lamb)/dz # evaluate special
res = map(fzx, Z_special, X_special)
G_short = np.array(list(res))
print(f'Done. Starting midpoint method...')
G = F(Z, X, gamma, lamb) # Simple midpoint evaluation
G[ix_for_IGF] = G_short # Replace at special points with calculated IGF
green = G.reshape(meshes[0].shape) # reshape
else:
raise ValueError(f'Unknown component: {component}')
return green
def IGF_z_case_B(func, z, x, dz, dx, gamma, xvec2=None, spike_list=None):
"""
Special Integrated Green Function (IGF) in the z direction only
"""
#func_x = lambda x: func(z, x, gamma)
func_z = lambda z: func(z, x, gamma)
#if abs(z) < 1e-14:
# if (abs(x) < 1e-14):
# return 0
points = [z]
if spike_list != None:
x_index = np.argmin(np.abs(xvec2 - x))
spikes = spike_list[x_index] # a list of z_poisition of the spikes at xvecs[x_index]
spikes_in_dz = [zp for zp in spikes if zp < z+dz/2 and zp > z-dz/2]
# A rare situation in which too many break points are found (oscillatory curve)
# The integrator cannot take more than 100(?) of them
# This seems to happen for x = 0
# When this happens, neglect these points
if len(spikes_in_dz) > 10:
points = [z]
else:
points = [z] + spikes_in_dz
return integrate.quad(func_z, z-dz/2, z+dz/2, points = points, epsrel=1e-6, limit=100)[0]
def IGF_z_case_D(func, z, x, dz, dx, gamma, lamb, xvec2=None, spike_list=None):
"""
Special Integrated Green Function (IGF) in the z direction only
"""
#func_x = lambda x: func(z, x, gamma)
func_z = lambda z: func(z, x, gamma, lamb)
#if abs(z) < 1e-14:
# if (abs(x) < 1e-14):
# return 0
points = [z]
if spike_list != None:
x_index = np.argmin(np.abs(xvec2 - x))
spikes = spike_list[x_index] # a list of z_poisition of the spikes at xvecs[x_index]
spikes_in_dz = [zp for zp in spikes if zp < z+dz/2 and zp > z-dz/2]
# A rare situation in which too many break points are found (oscillatory curve)
# The integrator cannot take more than 100(?) of them
# This seems to happen for x = 0
# When this happens, neglect these points
if len(spikes_in_dz) > 10:
points = [z]
else:
points = [z] + spikes_in_dz
return integrate.quad(func_z, z-dz/2, z+dz/2, points = points, epsrel=1e-6, limit=100)[0]
def IGF_z_case_E(func, z, x, dz, dx, gamma):
"""
Special Integrated Green Function (IGF) in the z direction only
"""
#func_x = lambda x: func(z, x, gamma)
func_z = lambda z: func(z, x, gamma)
if abs(z) < 1e-14:
if (abs(x) < 1e-14):
return 0
return integrate.quad(func_z, z-dz/2, z+dz/2,
points = [z],
epsrel=1e-6, # Coarse
limit=100)[0]
def case_B_denom(z,x,gamma):
"""
The second numerator of Es_case_B and Fx_case_B
"""
beta2 = 1-1/gamma**2
beta = np.sqrt(beta2)
alp = alpha_exact_case_B_brentq(z, x, beta)
sin2a = np.sin(2*alp)
kap = (2*(alp - z))/beta # kappa for case B
return kap - beta*(1+x)*sin2a
def find_Es_or_Fx_case_B_spike(func, xval, gamma):
"""
Return a list of z values at which Es_case_B(z,xval) has spikes.
func has to be either "Es_case_B" or "Fx_case_B"
"""
def case_B_denom_z(z):
return case_B_denom(z,xval,gamma)
# First find where denom ~ 0, a good reference point close to spike(s)
op = optimize.root(case_B_denom_z, 0, tol=1E-6)
if op.success == False:
#print('no root found for denom!! Might be due to small gamma')
return [0]
root = op.x[0]
def func_z(z):
return func(z, xval, gamma)
# The range and resolution are subjected to changes...
zv = np.linspace( root - 2E-11, root + 2E-11, 2001 )
peak_ix = np.union1d(find_peaks( func_z(zv))[0], find_peaks( -func_z(zv))[0])
return list(zv[peak_ix])
def case_D_denom(z, x, gamma, lamb):
beta2 = 1-1/gamma**2
beta = np.sqrt(beta2)
alp = alpha_exact_case_D_brentq(z, x, beta, lamb)
sin2a = np.sin(2*alp)
cos2a = np.cos(2*alp)
kap = (2*(alp - z) + lamb)/beta # kappa for case D
return kap - beta*(lamb*cos2a + (1+x)*sin2a)
def find_Es_or_Fx_case_D_spike(func, xval, gamma, lamb):
"""
Return a list of z values at which Es_case_D(z,xval) has spikes
func has to be either "Es_case_D" or "Fx_case_D"
"""
def case_D_denom_z(z):
return case_D_denom(z, xval, gamma, lamb)
# First find where denom ~ 0, and we are close to spike
op = optimize.root(case_D_denom_z, 0, tol=1E-6)
if op.success == False:
#print('no root found for denom!! Might be due to small gamma')
return np.array([0])
root = op.x[0]
def func_z(z):
return func(z, xval, gamma, lamb)
zv = np.linspace( root - 2E-11, root + 2E-11, 2001 )
peak_ix = np.union1d(find_peaks( func_z(zv))[0], find_peaks( -func_z(zv))[0])
return list(zv[peak_ix])
## ============== Below are higher level functions ===================================
@njit
def my_2d_convolve2(g1, g2, ix1, ix2):
"""
Convolution for a specific observation point only, at (ix1, ix2)
Assumption: g2 is a double-sized grid of g1.
Parameters
----------
g1 : 2D array of size (nz, nx)
g2 : 2D array of size (2*nz, 2*nx)
ix1, ix2 : int
Returns:
A single value, the convolution result at (ix1, ix2)
"""
d1, d2 = g1.shape
g2_flip = np.flip(g2)
g2_cut = g2_flip[d1-ix1:2*d1-ix1, d2-ix2:2*d2-ix2]
sums = 0
for i in range(d1):
for j in range(d2):
sums+= g1[i,j]*g2_cut[i,j]
return sums
@njit
def boundary_convolve(case, z_observe, x_observe, zvec, xvec, dz, dx, lambda_grid_filtered, Green, gamma=None, rho=None, phi=None):
beta2 = 1-1/gamma**2
beta = np.sqrt(beta2)
x_observe_index = np.argmin(np.abs(xvec - x_observe))
z_observe_index = np.argmin(np.abs(zvec - z_observe))
nz = len(zvec)
nx = len(xvec)
cond = np.zeros( (nz,nx) ) # To be filled with True and Flase
# Boundary condition
temp = (x_observe - xvec)/rho
if case == 1:
zi_vec = rho*( phi - beta*np.sqrt(temp**2 + 4*(1 + temp)*np.sin(phi/2)**2))
for i in range(nx):
cond[:,i] = (zvec > z_observe - zi_vec[i])
elif case == 2:
zi_vec = rho*( phi - beta*np.sqrt(temp**2 + 4*(1 + temp)*np.sin(phi/2)**2))
zo_vec = -beta*np.abs(x_observe - xvec)
for i in range(nx):
cond[:,i] = (zvec > z_observe - zo_vec[i]) | (zvec < z_observe - zi_vec[i])
else:
print('Unknown case !!!')
#raise ValueError(f'Unknown case: {case} !!!')
lambda_grid_filtered_bounded = np.where(cond, 0, lambda_grid_filtered)
conv = my_2d_convolve2(lambda_grid_filtered_bounded, Green, z_observe_index, x_observe_index)
return conv
|
StarcoderdataPython
|
1616100
|
#108
def metade(preco=0):
res = preco/2
return res
def dobro(preco=0):
res = preco * 2
return res
def aumentar(preco=0, taxa=0):
res = preco + (preco*taxa/100)
return res
def diminuir(preco=0, taxa=0):
res = preco - (preco*taxa/100)
return res
def moeda(preco = 0, moeda = 'R$'):
return f'{moeda}{preco:.2f}'.replace('.',',')
|
StarcoderdataPython
|
3379615
|
"""
Validation of PacBio dataset XML (and referenced files)
"""
import xml.etree.ElementTree as ET
from cStringIO import StringIO
from urlparse import urlparse
import xml.parsers.expat
import traceback
import itertools
import argparse
import logging
import os.path
import sys
try:
from pyxb import exceptions_ as pyxbexceptions
except ImportError:
class pyxbexceptions(object):
class PyXBException(Exception):
pass
class ValidationError(Exception):
pass
class StructuralBadDocumentError(Exception):
pass
from pbcore.io.dataset.DataSetReader import xmlRootType
from pbcore.io.dataset.DataSetIO import _dsIdToName
from pbcore.io.dataset import DataSet, DataSetValidator
from pbcore.io import IndexedBamReader, IndexedFastaReader
import pbcore.io
from pbcoretools.pbvalidate.core import (get_context_class, run_validators,
ValidatorError, ValidateFile, ValidateFileObject)
from pbcoretools.pbvalidate import fasta
from pbcoretools.pbvalidate import bam
log = logging.getLogger(__name__)
class Constants(object):
XML_NAMESPACE = "http://pacificbiosciences.com/PacBioBaseDataModel.xsd"
class DatasetTypes(object):
BAM_DATASET = ["AlignmentSet", "ConsensusSet", "ConsensusAlignmentSet",
"SubreadSet", "TranscriptSet"]
FASTA_DATASET = ["BarcodeSet", "ContigSet", "ReferenceSet",
"GmapReferenceSet"]
HDF5_DATASET = ["HdfSubreadSet"]
ALL = BAM_DATASET + FASTA_DATASET + HDF5_DATASET
def _validate_read_groups(ctx, validators, reader):
"""
Extra loop for validating just the read groups in .bam file headers.
"""
if not DatasetReader.get_dataset_type(reader) in DatasetTypes.BAM_DATASET:
return None
try:
bam_readers = reader.resourceReaders()
for bam_reader in reader.resourceReaders():
if bam_reader is None:
log.warn("Skipping unopenable file")
continue
log.debug("Opened file: " + str(bam_reader))
bam.validate_read_groups(ctx, validators, bam_reader)
except IOError as e:
# missing file, will be caught by ValidateResources
return
class MissingIndexError(ValidatorError):
MESSAGE_FORMAT = "Missing corresponding index files for the underlying " +\
"raw data files."
class ReaderError(ValidatorError):
MESSAGE_FORMAT = "Unexpected error reading dataset: %s. This prevents " +\
"any further validation functions from being run."
class MissingEncodingError(ValidatorError):
MESSAGE_FORMAT = "This XML document is either missing the header " +\
"or the encoding type is missing or wrong; all DataSet XMLs should " +\
"explicitly specify UTF-8 encoding."
class XMLError(ValidatorError):
MESSAGE_FORMAT = "XML schema error: %s"
# XXX currently redundant with underlying dataset API, untested
class MissingResourceIdError (ValidatorError):
MESSAGE_FORMAT = "Found ExternalResource but no ResourceId is specified"
class MissingResourceError (ValidatorError):
MESSAGE_FORMAT = "The external resource %s referenced by this dataset " +\
"could not be located."
class ResourceOpenError (ValidatorError):
MESSAGE_FORMAT = "The external resource %s referenced by this dataset " +\
"is present, but could not be opened."
class DatasetTypeError(ValidatorError):
MESSAGE_FORMAT = "The expected type was %s, but the loaded dataset has " +\
"type %s. (Note that this may render any additional validation " +\
"errors irrelevant.)"
class FileNameError(ValidatorError):
MESSAGE_FORMAT = "The dataset file %s is named incorrectly - datasets " +\
"of type '%s' should have the extension '%s'."
class TimeStampedNameError(ValidatorError):
MESSAGE_FORMAT = "This dataset does not contain the TimeStampedName " +\
"attribute, which is a mandatory component of the current schema."
class NamespaceError(ValidatorError):
MESSAGE_FORMAT = "The XML namespace '%s' for externalResources is " +\
"different than the expected namespace '%s'; this may indicate " +\
"that it is using obsolete schema."
class RootTagError(ValidatorError):
MESSAGE_FORMAT = "The XML root tag '%s' does not match the declared " +\
"dataset MetaType '%s'."
class NumRecordsError(ValidatorError):
MESSAGE_FORMAT = "The number of records specified in the metadata (%s) " +\
"is greater than the number of records in the data file(s) (%s)."
class ValidateXML(ValidateFile):
def _get_errors(self, path):
emsg = None
try:
DataSetValidator.validateFile(path, skipResources=True)
except pyxbexceptions.StructuralBadDocumentError as e:
emsg = "{t} ('<{n}>')".format(t=type(e).__name__,
n=e.node.tagName) # pylint: disable=no-member
except pyxbexceptions.ValidationError as e:
emsg = "{t}: {m}".format(
t=type(e).__name__, m=e.details()) # pylint: disable=no-member
except pyxbexceptions.PyXBException as e:
emsg = "{t}: {m})".format(t=type(e).__name__, m=str(e.message))
except Exception as e:
emsg = str(e)
if emsg is not None:
return [XMLError.from_args(path, emsg)]
return []
def validate(self, path):
return len(self._get_errors(path)) == 0
def to_errors(self, path):
return self._get_errors(path)
class ValidateRootTag(ValidateXML):
def _get_errors(self, path):
first = DataSet(path, strict=False)
ds_id = first.objMetadata.get('MetaType')
xml_rt = xmlRootType(path)
ds_name = _dsIdToName(ds_id)
if ds_name != xml_rt:
if ds_name == "SubreadSet" and xml_rt == "HdfSubreadSet":
return []
return [RootTagError.from_args(path, xml_rt, _dsIdToName(ds_id))]
return []
class ValidateEncoding(ValidateXML):
def __init__(self, *args, **kwds):
self._has_xml_declaration = False
super(ValidateEncoding, self).__init__(*args, **kwds)
def _get_errors(self, path):
self._has_xml_declaration = False
e = []
with open(path, 'r') as xmlfile:
p = xml.parsers.expat.ParserCreate()
def handle_xml_decl(version, encoding, standalone):
if (not self._has_xml_declaration and
(encoding is None or encoding.lower() != "utf-8")):
e.append(MissingEncodingError.from_args(path))
self._has_xml_declaration = True
p.XmlDeclHandler = handle_xml_decl
p.Parse(xmlfile.read())
if not self._has_xml_declaration:
e.append(MissingEncodingError.from_args(path))
return e
class ValidateResources (ValidateFileObject):
"""
Verify that the external resources specified in the XML file actually
exist on the local filesystem
"""
def _get_errors(self, file_obj):
e = []
for item in file_obj.externalResources:
# XXX this is redundant
if (not hasattr(item, "resourceId") or
item.resourceId is None):
e.append(MissingResourceIdError.from_args(file_obj))
else:
continue
return e
def validate(self, file_obj):
return len(self._get_errors(file_obj)) == 0
def to_errors(self, file_obj):
return self._get_errors(file_obj)
class ValidateResourcesOpen (ValidateResources):
"""
Verify that the dataset object is capable of supplying open resource files.
Note that since we assume ValidateResources is being run first, we can
ignore any errors that result from the file(s) being absent entirely.
"""
def _get_errors(self, file_obj):
errors = []
try:
for r, f in itertools.izip(file_obj.externalResources,
file_obj.resourceReaders()):
if f is None:
errors.append(ResourceOpenError.from_args(file_obj,
urlparse(r.resourceId).path))
except IOError as e:
if e.filename is None or not os.path.exists(e.filename):
log.info("File %s doesn't exist, skipping" % e.filename)
return []
log.warn("Encountered IOError opening %s" % e.filename)
return [ResourceOpenError.from_args(file_obj, e.filename)]
else:
return errors
class ValidateIndex (ValidateResources):
def _get_errors(self, file_obj):
if not file_obj.isIndexed:
return [MissingIndexError.from_args(file_obj)]
return []
# TODO write test
class ValidateRandomAccess (ValidateResources):
def _get_errors(self, file_obj):
if len(file_obj.resourceReaders()) == 0 or not file_obj.isIndexed:
return []
errors = []
for rr in file_obj.resourceReaders():
if isinstance(rr, IndexedBamReader):
errors.extend(bam.ValidateRandomAccess()._get_errors(rr))
elif isinstance(rr, IndexedFastaReader):
errors.extend(fasta.ValidateRandomAccess()._get_errors(rr))
else:
# logging.warn("Can't check indices for %s" % rr.filename)
pass
return errors
def _dataset_type(ds):
return ds.objMetadata.get('MetaType').split(".")[-1]
class ValidateDatasetType (ValidateResources):
"""
Verify that the opened dataset class name is the same as the user-supplied
expected class name (if given).
"""
def __init__(self, dataset_type=None):
self.dataset_type = dataset_type
def _get_errors(self, file_obj):
ds_type = _dataset_type(file_obj)
if self.dataset_type is None or self.dataset_type == "any":
return []
elif self.dataset_type != ds_type:
# XXX see pbcore.io.dataset.DataSetIO:HdfSubreadSet - not sure
# I understand what's going on here but I think it is a patch for
# bug 27976
if self.dataset_type == "HdfSubreadSet" and ds_type == "SubreadSet":
return []
return [DatasetTypeError.from_args(
DatasetReader.get_dataset_object(file_obj),
self.dataset_type, ds_type)]
return []
class ValidateFileName (ValidateResources):
"""
Check for consistency with file name conventions enforced when writing
dataset files from the pbcore API in strict mode.
"""
def __init__(self, file_name=None):
self.file_name = file_name
def _get_errors(self, file_obj):
if self.file_name is not None:
dataset_type = DatasetReader.get_dataset_type(file_obj)
extension = ".%s.xml" % dataset_type.lower()
if not self.file_name.endswith(extension):
return [FileNameError.from_args(
DatasetReader.get_dataset_object(file_obj),
os.path.basename(self.file_name),
dataset_type,
extension)]
return []
class ValidateMetadata(ValidateResources):
"""
Check that the metadata in the XML file contains tags expected by the
current schema.
"""
def _get_errors(self, file_obj):
ds = DatasetReader.get_dataset_object(file_obj)
if not "TimeStampedName" in ds.objMetadata:
return [TimeStampedNameError.from_args(ds)]
return []
class ValidateNamespace(ValidateResources):
def _get_errors(self, file_obj):
ds = DatasetReader.get_dataset_object(file_obj)
ns = ds.externalResources.namespace
if ns != Constants.XML_NAMESPACE:
return [NamespaceError.from_args(ds, ns, Constants.XML_NAMESPACE)]
return []
class ValidateFileProxy (ValidateFileObject):
"""
Wrapper for calling a file validator repeatedly on all files in the
dataset. Since it is assumed that ValidateResourcesOpen will be run first,
failure to open a resource will be ignored with a log warning.
"""
validator_class = None
def __init__(self, **kwds):
self._validator = self.validator_class( # pylint: disable=not-callable
**kwds)
self._errors = set([])
def validate(self, file_obj):
try:
for _reader in file_obj.resourceReaders():
if _reader is None:
# XXX if this happens, the file simply isn't present -
# which is handled separately by ValidateResourcesOpen
return True
log.debug("Opened file: " + str(_reader))
if not self._validator.validate(_reader):
errors_ = self._validator.to_errors(_reader)
self._errors.update(set(errors_))
except IOError as e:
#log.warn("Can't open file %s" % e.filename)
return True
else:
return len(self._errors) == 0
def to_errors(self, file_obj):
return list(self._errors)
class ValidateContents (ValidateFileProxy):
"""Wrapper for pbvalidate.bam.ValidateContents"""
validator_class = bam.ValidateContents
class ValidateSorting (ValidateFileProxy):
"""Wrapper for pbvalidate.bam.ValidateSorting"""
validator_class = bam.ValidateSorting
class ValidateFastaRaw (ValidateFileObject):
"""Wrapper for pbvalidate.fasta.ValidateFastaRaw"""
def __init__(self, **kwds):
self._validator = fasta.ValidateFastaRaw(**kwds)
self._errors = set([])
def validate(self, file_obj):
for ext_res in file_obj.externalResources:
path = urlparse(ext_res.resourceId).path
log.debug("Validating %s" % path)
if not self._validator.validate(path):
errors_ = self._validator.to_errors(path)
self._errors.update(set(errors_))
return len(self._errors) == 0
def to_errors(self, file_obj):
return list(self._errors)
class ValidateNumRecords(ValidateResources):
def _get_errors(self, file_obj):
ds = DatasetReader.get_dataset_object(file_obj)
nr_metadata = ds.numRecords
nr_actual = 0
if ds.isIndexed:
for rr in ds.resourceReaders():
nr_actual += len(rr)
else:
for rr in ds.resourceReaders():
nr_actual += len([rec for rec in rr])
if nr_metadata > nr_actual:
return [NumRecordsError.from_args(ds, nr_metadata, nr_actual)]
return []
class DatasetReader (object):
"""
Proxy for opening a dataset and iterating over records while avoiding an
IOError if the resources can't be opened (since we already validate this
separately).
"""
def __init__(self, reader_class, file_name):
self.reader_class = reader_class
self.file_name = file_name
self._reader = self.reader_class(self.file_name)
@property
def reader_name(self):
return type(self._reader).__name__
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._reader.close()
return False
def __getattr__(self, name):
return getattr(self._reader, name)
def __iter__(self):
try:
for rec in self._reader:
yield rec
except IOError as e:
pass
def __str__(self):
return str(self._reader)
def __repr__(self):
return repr(self._reader)
@staticmethod
def get_dataset_type(file_obj):
"""
Return the class name of the actual DataSet-derived object, which
could be either file_obj or its _reader attribute. This allows the
relevant validator classes to be called with either a DatasetReader or
the underlying DataSet.
"""
if isinstance(file_obj, DatasetReader):
return file_obj.reader_name
return type(file_obj).__name__
@staticmethod
def get_dataset_object(file_obj):
if isinstance(file_obj, DatasetReader):
return file_obj._reader
return file_obj
def validate_dataset(
file_name,
dataset_type=None,
reference=None,
quick=False,
max_errors=None,
max_records=None,
contents=None,
aligned=None,
validate_index=False,
strict=False):
assert os.path.isfile(os.path.realpath(file_name))
ds = None
ReaderClass = getattr(pbcore.io, str(dataset_type), pbcore.io.openDataSet)
log.debug("ReaderClass: %s" % ReaderClass.__name__)
try:
# XXX suppressing logging errors temporarily
# logging.disable(logging.CRITICAL)
try:
ds = ReaderClass(file_name, strict=True)
finally:
pass # logging.disable(logging.NOTSET)
except Exception as e:
# XXX in strict mode the reader will cough up an IOError if the
# requested dataset type does not agree with the XML. if this happens
# there's no point doing any additional validation.
if False: # True:
# XXX actually, it can cough up other errors too if there is
# something wrong with the underlying files and it tries to read
# them immediately. Still treating this as a validation error, but
# it may indicate bugs.
_, _, ex_traceback = sys.exc_info()
tb_lines = traceback.format_exception(e.__class__, e, ex_traceback)
log.error("\n".join(tb_lines))
errors = [ReaderError.from_args(file_name, str(e))]
return errors, {}
log.debug("Dataset type: %s" % ds.__class__.__name__)
actual_dataset_type = _dataset_type(ds)
log.debug("Actual type: %s" % actual_dataset_type)
subread_types = (pbcore.io.SubreadSet, pbcore.io.AlignmentSet)
ccs_types = (pbcore.io.ConsensusReadSet, pbcore.io.ConsensusAlignmentSet)
transcript_types = (pbcore.io.TranscriptSet)
if isinstance(ds, ccs_types) and contents is None:
contents = "CCS"
elif isinstance(ds, subread_types) and contents is None:
contents = "SUBREAD"
elif isinstance(ds, transcript_types) and contents is None:
contents = "TRANSCRIPT"
validators = [
ValidateEncoding(),
ValidateRootTag(),
ValidateResources(),
ValidateDatasetType(dataset_type),
ValidateMetadata(),
ValidateNamespace(),
ValidateRandomAccess(),
]
if not actual_dataset_type in DatasetTypes.HDF5_DATASET:
validators.extend([
ValidateResourcesOpen(),
ValidateNumRecords(),
])
if validate_index:
validators.append(ValidateIndex())
if strict:
validators.extend([
ValidateXML(),
ValidateFileName(file_name),
])
additional_validation_function = None
opened_class_name = ds.__class__.__name__
# XXX not sure this is ideal - what if it opens as a ReferenceSet but we
# asked for an AlignmentSet? This is caught by ValidateDatasetType, but
# we'd still check for Fasta file errors.
if opened_class_name in DatasetTypes.FASTA_DATASET:
validators_ = fasta.get_validators(validate_raw_format=False)
validators_.insert(0, ValidateFastaRaw())
validators.extend(validators_)
elif opened_class_name in DatasetTypes.BAM_DATASET:
validators_ = bam.get_validators(aligned=aligned,
contents=contents,
include_file_validators=False)
validators_.insert(0, ValidateSorting())
validators_.insert(0, ValidateContents(aligned=aligned,
content_type=contents))
validators.extend(validators_)
additional_validation_function = _validate_read_groups
def ReaderClass_wrapper(*args, **kwds):
logging.disable(logging.CRITICAL)
try:
return DatasetReader(ReaderClass, *args, **kwds)
finally:
logging.disable(logging.NOTSET)
context_class = get_context_class(
quick=quick,
max_errors=max_errors,
max_records=max_records)
errors, metrics = run_validators(
context_class=context_class,
path=file_name,
reader_class=ReaderClass_wrapper,
validators=validators,
additional_validation_function=additional_validation_function)
return errors, metrics
def get_parser():
parser = argparse.ArgumentParser()
return parser
def get_format_specific_args(parser):
pass
|
StarcoderdataPython
|
3283614
|
<reponame>SnowMasaya/python-viasualize
import dataset
db = dataset.connect('sqlite:///nobel_prize.db')
wtable = db['winners']
winners = wtable.find()
winners = list(winners)
print(winners)
# wtable.drop()
wtable = db['winners']
winners = list(wtable.find())
print(winners)
|
StarcoderdataPython
|
3265197
|
from typing import Optional
from gym.envs import register as gym_register
_ENTRY_POINT_PREFIX = "airl_envs"
# _ENTRY_POINT_PREFIX = ""
def _register(env_name: str, entry_point: str, kwargs: Optional[dict] = None):
entry_point = f"{_ENTRY_POINT_PREFIX}.{entry_point}"
# entry_point = f"airl_envs.{entry_point}"
gym_register(id=env_name, max_episode_steps=1e6, entry_point=entry_point, kwargs=kwargs)
def _point_maze_register():
for dname, dval in {"": 0, "Wall": 1}.items():
for hname, hval in {"": 0, "Harder": 1}.items():
for rname, rval in {"": 0, "Reach": 1}.items():
for vname, vval in {"": False, "Vel": True}.items():
_register(
f"PointMaze{dname}{hname}{rname}{vname}-v0",
entry_point="envs:PointMazeEnv",
kwargs={"with_wall": dval, "harder":hval, "done_when_reach":rval, "include_vel": vval},
)
_point_maze_register()
# _register(
# "ObjPusher-v0",
# entry_point="pusher_env:PusherEnv",
# kwargs={"sparse_reward": False},
# )
# _register("TwoDMaze-v0", entry_point="envs:TwoDMaze")
# A modified ant which flips over less and learns faster via TRPO
_register(
"CustomAnt-v0",
entry_point="envs:CustomAntEnv",
kwargs={"gear": 30, "disabled": False},
)
_register(
"DisabledAnt-v0",
entry_point="envs:CustomAntEnv",
kwargs={"gear": 30, "disabled": True},
)
# register(
# id='TwoDMaze-v0',
# max_episode_steps=200,
# entry_point='airl_envs.envs:TwoDMaze',
# )
|
StarcoderdataPython
|
3298795
|
<gh_stars>1-10
import execjs
import time
import math
import hashlib
from functools import partial
class JSTool:
def __init__(self, file_path, func):
self.js_file_path = file_path
self.signature_js_func = func
def get_js(self, js_file_path, mode='r'):
"""
:param data:
@js_file_path: js脚本路径
@mode : IO操作js脚本文件的模式,默认为只读
:return:
@result : js脚本文本
"""
f = open(js_file_path, mode)
line = f.readline()
result = ''
while line:
result += line
line = f.readline()
return result
def py_to_js(self, js_file_path, js_func, *params):
"""
:usage:
python传值给js脚本执行后获取其结果
:param data:
@js_file_path : js脚本路径
@js_func : 要执行的js脚本的函数
@*params : python传给js函数的参数,可传多个
:return:
@result : js脚本执行后的结果
"""
js_script = self.get_js(js_file_path)
JsContext = execjs.compile(js_script)
result = JsContext.call(js_func, *params)
return result
def get_ascp(self):
"""
:usage:
获取今日头条伪造请求链接的参数AS、CP
:return:
@result : 元组(AS、CP)
"""
t = int(math.floor(time.time()))
e = hex(t).upper()[2:]
m = hashlib.md5()
m.update(str(t).encode(encoding='utf-8'))
i = m.hexdigest().upper()
if len(e) != 8:
AS = '479BB4B7254C150'
CP = '7E0AC8874BB0985'
return AS, CP
n = i[0:5]
a = i[-5:]
s = ''
r = ''
for o in range(5):
s += n[o] + e[o]
r += e[o + 3] + a[o]
AS = 'A1' + s + e[-3:]
CP = e[0:3] + r + 'E1'
return AS, CP
def payload_for_get(self, id, mode, max_behot_time):
"""
:usage:
根据参数,爬取模式:文章、视频、微头条等来生成今日头条爬取
用户首页内容的伪造请求参数
:param data:
@id : 用户id
@mode : 爬取模式,0:视频,1:文章,2:微头条
@max_bahot_time : 区分爬取下拉获取条目的时间戳标识,起始为0
:return:
@result : 生成的伪造请求参数
"""
_signature = self.py_to_js(self.signature_js_path, self.signature_js_func, id, max_behot_time)
# ascp = py_to_js(ascp_js_path,ascp_js_func)
_as, _cp = self.get_ascp()
return {
'page_type': mode,
'user_id': id,
'max_behot_time': max_behot_time,
'count': '20',
'as': _as,
'cp': _cp,
'_signature': _signature
}
|
StarcoderdataPython
|
34990
|
# import discord
# import asyncio
# import json
# from discord.ext import commands
# from discord.utils import get
# # from cogs.personalPoint import PersonalPoint
# from main import client
# from discord_ui import UI,Button
# from functions.userClass import User,experiences,levelNames
# from cogs.rank import getSortedMembers
# ui = UI(client)
# class Information(commands.Cog):
# def __init__(self,client):
# self.client = client
# @commands.command()
# async def bilgi(self,ctx):
# embed = discord.Embed(title="Üye Bilgi Ekranı",description="Üye bilgi ekranına hoş geldin.\nAşağıdaki butonlara basarak\nbilgisini almak istediğin içeriği görebilirsin.",color = 0x8d42f5,)
# embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)
# message = await ctx.channel.send(
# embed=embed,
# components = [
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# ),
# ]
# )
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# info[ctx.author.id] = message.id
# with open("files/infoMessage.json","w") as file:
# json.dump(info,file,indent=4)
# @ui.components.listening_component('seviye')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# else:
# await component.message.edit(components=[
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# disabled=True
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# disabled=True
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True,
# disabled=True
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# disabled=True
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True,
# disabled=True
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# disabled=True
# ),
# ])
# try:
# await component.respond()
# except:
# pass
# member = component.author
# user = User(member.id)
# if not member.bot:
# embed = discord.Embed(title=f"{member.name}#{member.discriminator} adlı kullanıcının değerleri",description="",color=0x8d42f5)
# embed.add_field(name="Mevcut değerler - 🏆 ",value="Seviyesi = **{}**\n Puanı = **{}**\n Rütbesi = **{}**\n".format(user.level,user.XP,user.levelName,inline=False))
# if user.isMaxLevel():
# embed.add_field(name="Bir sonraki rütbe - 🚀 ",value=f"**Maksimum seviyeye ulaştınız!**",inline=False)
# elif not user.isMaxLevel():
# if experiences[user.level] - user.XP <= 0:
# embed.add_field(name="Bir sonraki rütbe - 🚀 ",value=f"**{levelNames[user.getLevel(user.XP)]}** rütbesine ulaştın! Seviye atlamak için ses kanalına girebilirsin.",inline=False)
# else:
# embed.add_field(name="Bir sonraki rütbe - 🚀 ",value=f"**{levelNames[user.level]}** rütbesi için kalan puan = **{(experiences[user.level-2])-user.XP}**",inline=False)
# embed.set_author(name=component.author.display_name, icon_url=component.author.avatar_url)
# await component.message.edit(embed=embed,components=[
# Button(
# label="Geri",
# custom_id="geri",
# color=ButtonStyle.Grey,
# emoji="⬅️"
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# )
# ])
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# return
# try:
# await component.respond()
# except:
# pass
# @ui.components.listening_component('liderliktablosu')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# else:
# await component.message.edit(components=[
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# disabled=True
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# disabled=True
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True,
# disabled=True
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# disabled=True
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True,
# disabled=True
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# disabled=True
# ),
# ])
# try:
# await component.respond()
# except:
# pass
# sortedMembers = getSortedMembers(component)
# embed=discord.Embed(title="Sıralama",inline=False,color=0x8d42f5)
# embed.set_author(name=component.author.display_name, icon_url=component.author.avatar_url)
# count = 1
# for key,value in sortedMembers.items():
# embed.add_field(name="{} - {}".format(count,key),value="**Puan**: {}\n**Rütbe**: {}".format(value[0],value[1]),inline=False)
# count += 1
# if count == 11:break
# await component.message.edit(embed=embed,components=[
# Button(
# label="Geri",
# custom_id="geri",
# color=ButtonStyle.Grey,
# emoji="⬅️"
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# )
# ])
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# @ui.components.listening_component('detaylıbilgi')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# else:
# await component.message.edit(components=[
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# disabled=True
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# disabled=True
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True,
# disabled=True
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# disabled=True
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True,
# disabled=True
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# disabled=True
# ),
# ])
# liste = {}
# XP = {}
# for i in range(1,11):
# liste[f'level{i}'] = 0
# XP[f'xp{i}'] = ""
# if i == 1:
# XP[f"xp{i}"] += f"{levelNames[i-1]}"
# else:
# XP[f'xp{i}'] += f"{levelNames[i-1]} - {experiences[i-2]}"
# try:
# await component.respond()
# except:
# pass
# for member in client.get_all_members():
# if not member.bot:
# user = User(member.id)
# liste[f'level{user.level}'] += 1
# message = discord.Embed(title = "Detaylı Bilgi",description="**Aşağıda, hangi seviyede kaç kullanıcının bulunduğunu öğrenebilirsin**",color = 0x8d42f5)
# for level in range(1,11):
# XPs = XP[f'xp{level}']
# levels = liste[f'level{level}']
# if levels == 0:
# if XP[f'xp{level}'] == "Guest":
# message.add_field(name=f"*Seviye {level}* / {XPs}:",value=f"Bu seviyede herhangi biri yok.",inline=False)
# else:
# message.add_field(name=f"*Seviye {level}* / {XPs} XP:",value=f"Bu seviyede herhangi biri yok.",inline=False)
# else:
# if XP[f'xp{level}'] == "Guest":
# message.add_field(name=f"*Seviye {level}* / {XPs}:",value=f"**{levels}** kişi bu seviyede.",inline=False)
# else:
# message.add_field(name=f"*Seviye {level}* / {XPs} XP:",value=f"**{levels}** kişi bu seviyede.",inline=False)
# message.set_author(name=component.author.display_name, icon_url=component.author.avatar_url)
# await component.message.edit(embed=message,components=[
# Button(
# label="Geri",
# custom_id="geri",
# color=ButtonStyle.Grey,
# emoji="⬅️"
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# )
# ])
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# @ui.components.listening_component('görevler')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# else:
# await component.message.edit(components=[
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# disabled=True
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# disabled=True
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True,
# disabled=True
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# disabled=True
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True,
# disabled=True
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# disabled=True
# ),
# ])
# try:
# await component.respond()
# except:
# pass
# embed = discord.Embed(
# title = "Görevler",
# description = "**Bir gemiye atla ve bir oyun üret**;\nPC/Platform .............................. 10.0000 XP\nMobil ............................................... 5.000 XP\nHyperCasual................................... 2.000 XP\nGameJam.......................................... 1.000XP\n*Oyun yayınlanırsa kazanılan deneyim puanı iki katına çıkar*",
# color = 0x8d42f5
# )
# embed.add_field(
# name = "\n\nSunucu Takviyesi",
# value = "Her sunucu takviyesi başına **250 XP**",
# inline=False
# )
# embed.add_field(
# name = "\n\nSes Kanallarına Aktif Ol",
# value = "Dakika başına 1 XP\n*Not: Kazanılan XP, yayın ve kamera açma durumuna göre değişiklik gösterir.*",
# inline=False
# )
# embed.set_author(name=component.author.display_name, icon_url=component.author.avatar_url)
# await component.message.edit(embed=embed,components=[
# Button(
# label="Geri",
# custom_id="geri",
# color=ButtonStyle.Grey,
# emoji="⬅️"
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# )
# ])
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# @ui.components.listening_component('seviyeler')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# else:
# await component.message.edit(components=[
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# disabled=True
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# disabled=True
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True,
# disabled=True
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# disabled=True
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True,
# disabled=True
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# disabled=True
# ),
# ])
# try:
# await component.respond()
# except:
# pass
# embed = discord.Embed(
# title = "Seviyeler",
# description = "Aşağıda, sunucuda bulunan mevcut seviyeleri görebilirsin.",
# color = 0x8d42f5
# )
# embed.add_field(
# name = "Guest:",
# value = "Misafir statüsünde üye",
# inline = False,
# )
# embed.add_field(
# name = "Colony Member / 250 XP:",
# value = "Koloni üyesi",
# inline = False,
# )
# embed.add_field(
# name = "Open Crew / 1.987 XP:",
# value = "Açık gemilerde mürettebat olma hakkına sahip üye",
# inline = False,
# )
# embed.add_field(
# name = "Crew / 6.666 XP:",
# value = "Bütün gemilerde mürettebat olma hakkına sahip üye",
# inline = False,
# )
# embed.add_field(
# name = "Captain / 9.999 XP:",
# value = "Gemilere kaptanlık yapma hakkına sahip üye",
# inline = False,
# )
# embed.add_field(
# name = "Judge / 30.000 XP:",
# value = "Oy kullanma hakkına sahip üye",
# inline = False,
# )
# embed.add_field(
# name = "Colony Manager / 90.000 XP:",
# value = "Tasarlanacak oyunlara karar veren üye",
# inline = False,
# )
# embed.add_field(
# name = "Mars Lover / 300.000 XP:",
# value = "Yayınlanan bütün oyunlarda adına teşekkür edilen üye",
# inline = False,
# )
# embed.add_field(
# name = "Chief of the Colony / 900.000 XP:",
# value = "Kolonideki kamu yönetiminde, herhangi bir rolü alabilen üye, A.K.A Chief",
# inline = False,
# )
# embed.add_field(
# name = "Partner / 10.000.001 XP:",
# value = "Koloninin fahri ortağı",
# inline = False,
# )
# embed.set_author(name=component.author.display_name, icon_url=component.author.avatar_url)
# await component.message.edit(embed=embed,components = [
# Button(
# label="Geri",
# custom_id="geri",
# color=ButtonStyle.Grey,
# emoji="⬅️"
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# )
# ])
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# @ui.components.listening_component('geri')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# else:
# embed = discord.Embed(title="Üye Bilgi Ekranı",description="Üye bilgi ekranına hoş geldin.\nAşağıdaki butonlara basarak\nbilgisini almak istediğin içeriği görebilirsin.",color = 0x8d42f5)
# embed.set_author(name=component.author.display_name, icon_url=component.author.avatar_url)
# try:
# await component.respond()
# except:
# pass
# await component.message.edit(
# embed=embed,
# components = [
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True,
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True,
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# ),
# ]
# )
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# @ui.components.listening_component('sil')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# await component.message.delete()
# else:
# try:
# await component.respond()
# except:
# pass
# await component.message.delete()
# del info[component.author.id]
# with open("files/infoMessage.py","w",encoding="utf-8") as dosya:
# dosya.write("info = ")
# dosya.write(str(info))
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# def setup(client):
# client.add_cog(Information(client))
|
StarcoderdataPython
|
3366781
|
<filename>DataPreprocessing/data_segmentation.py
# coding=utf-8
import gc
import wordsegment
# import sys
# sys.setrecursionlimit(10000)
def word_segment(text, limit=250):
next_text = wordsegment.clean(text)
word_list = []
while len(next_text) > limit:
current_text = next_text[:limit]
next_text = next_text[limit:]
word_list.extend(wordsegment.segment(current_text))
next_text = ''.join([word_list[i] for i in xrange(-5, 0)]) + next_text
word_list = word_list[:-5]
gc.collect()
word_list.extend(wordsegment.segment(next_text))
text = ' '.join(w for w in word_list)
return text
|
StarcoderdataPython
|
103311
|
<gh_stars>0
"""Moduł zawierający bazowe wartości dotyczące rozgrywki.
Grafika:
* www.flaticon.com
* www.pexels.com
"""
import pygame
WIN_WIDTH = 1200 #: Szerokość okna
WIN_HEIGHT = 780 #: Wysokość okna
# GAME VARIABLES
RUN = True #: Warunek działania głównej pętli pygame.
FPS = 60 #: Liczba klatek na sekundę.
MID = WIN_WIDTH/2 #: Połowa szerokości ekranu.
BOTTOM = WIN_HEIGHT-100 # : Dolna część ekranu.
MARGIN = 60 #: Margines dla obiektów.
PLAYER_SPEED = 5 #: Szybkość gracza.
BULLET_SPEED = 6 #: Szybkość pocisków.
PANDAS_ENTRY_SPEED = 6 #: Szybkość animacji wejścia poziomów.
ANIM_SPEED = 2
BULLET_FREQUENCY = 6 #: Częstotliwość pocisków.
GRAVITY = 0.1 #: Grawitacja dla bananów - punktów.
# WINDOW SETTINGS
WIN = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT)) #: Okno gry
clock = pygame.time.Clock()
pygame.display.set_caption("Trash Pandas") #: Tytuł okna
pygame.font.init()
pygame.mixer.init()
pygame.mixer.music.load("assets/bananaeat.mp3")
pygame.mixer.music.set_volume(0.4)
# STRINGS
text_color = (255, 255, 255)
title = pygame.font.SysFont('comfortaaregular', 80)
myfont = pygame.font.SysFont('comfortaaregular', 50)
scoreboard_font = pygame.font.SysFont('comfortaaregular', 30)
TRASHPANDAS = title.render('TRASH PANDAS', False, text_color)
YOU_WIN = title.render('YOU WIN', False, text_color)
NICK = myfont.render('NICK', False, text_color)
# IMAGES
BG_LVL1 = pygame.image.load('assets/background_lvl1_blur.jpg') #: Tło 1 poziomu.
BG_LVL2 = pygame.image.load('assets/background_lvl2_blur.jpg') #: Tło 2 poziomu.
SHIP = pygame.image.load('assets/spaceshipv2.png') #: Grafika statku.
RACCOON = pygame.image.load('assets/racoon64.png') #: Grafika szopa.
HP = pygame.image.load('assets/heart_32.png') #: Grafika serca.
BULLET = pygame.image.load('assets/bullet_fire.png') #: Grafika pocisku.
POINT = pygame.image.load('assets/banana.png') #: Grafika pocisku.
|
StarcoderdataPython
|
1614459
|
from pydeation.document import Document
from pydeation.animation.animation import VectorAnimation, AnimationGroup
from pydeation.animation.object_animators import Show, Hide
from abc import ABC, abstractmethod
from collections import defaultdict
import c4d
class Scene(ABC):
"""abstract class acting as blueprint for scenes"""
def __init__(self, resolution="default"):
doc = Document()
self.document = doc.document
self.set_scene_name()
self.insert_document()
self.construct()
# render settings
self.render_settings = RenderSettings()
self.render_settings.set_resolution(resolution)
@abstractmethod
def construct(self):
"""here the actual scene consisting out of objects and animations is constructed
this method should be overwritten by the inheriting scene classes"""
pass
@property
def scene_name(self):
"""holds the scene name"""
return self._scene_name
@scene_name.setter
def scene_name(self, name):
self._scene_name = name
def set_scene_name(self):
"""sets the scene name and the document name"""
self.scene_name = self.__class__.__name__
self.document.SetDocumentName(self.scene_name)
def insert_document(self):
"""inserts the document into cinema"""
c4d.documents.InsertBaseDocument(self.document)
def group_animations_by_obj(self, animations):
"""sorts the animations by their target"""
animations_grouped_by_obj = defaultdict(list)
for animation in animations:
animations_grouped_by_obj[animation.target].append(animation)
return animations_grouped_by_obj
def group_obj_animations_by_desc_id(self, obj_animations):
"""sorts the animations by their description id"""
obj_animations_grouped_by_desc_id = defaultdict(list)
for obj_animation in obj_animations:
obj_animations_grouped_by_desc_id[obj_animation.param_id].append(
obj_animation)
return obj_animations_grouped_by_desc_id
def sort_desc_id_animations_chronologically(self, desc_id_animations):
"""sorts animations chronologically by the relative run times"""
desc_id_animations_chronological = sorted(
desc_id_animations, key=lambda x: x.rel_start)
return desc_id_animations_chronological
def link_animation_chains(self, animations):
"""sorts the animation by target and description id to identify and link animation chains
(sets initial value of following animation equal to final value of preceding one)"""
linked_animations = []
animations_grouped_by_obj = self.group_animations_by_obj(
animations) # group animations by object
for obj_animations in animations_grouped_by_obj.values():
obj_animations_grouped_by_desc_id = self.group_obj_animations_by_desc_id(
obj_animations) # group animations by desc id
for desc_id_animations in obj_animations_grouped_by_desc_id.values():
desc_id_animations_chronological = self.sort_desc_id_animations_chronologically(
desc_id_animations) # sort animations chronologically
for i, desc_id_animation in enumerate(desc_id_animations_chronological):
# only link vector animaitons
if type(desc_id_animation) is VectorAnimation:
# link chain according to type relative/absolute
previous_animations = desc_id_animations_chronological[:i]
# shift vector by all previous vectors
if desc_id_animation.relative:
for previous_animation in previous_animations:
desc_id_animation += previous_animation
# shift initial value by all previous vectors
else:
vectors = []
for previous_animation in previous_animations:
vector = previous_animation.get_vector() # get vector
vectors.append(vector) # collect vector
value_ini = sum(vectors) + \
desc_id_animation.value_ini
desc_id_animation.set_value_ini(
value_ini) # set new value
linked_animations += desc_id_animations_chronological
return linked_animations
def feed_run_time(self, animations, run_time):
"""feeds the run time to animations"""
for animation in animations:
animation.abs_run_time = run_time
def execute_animations(self, animations):
"""passes the run time to animations and executes them"""
for animation in animations:
animation.execute()
def add_time(self, run_time):
"""passes the run time in the document timeline"""
time_ini = self.document.GetTime()
time_fin = time_ini + c4d.BaseTime(run_time)
self.document.SetTime(time_fin)
c4d.EventAdd() # update cinema
def flatten(self, animations):
"""flattens animations by wrapping them inside animation group"""
animation_group = AnimationGroup(*animations)
flattened_animations = animation_group.animations
return flattened_animations
def add_show_animation(self, animation_group):
"""adds a show animator in the beginning of the animation group"""
objs = animation_group.get_objs()
min_rel_start = animation_group.get_min_rel_start()
animation_group_with_show = AnimationGroup(
(Show(*objs), (min_rel_start, min_rel_start)), animation_group) # we use a zero length tuple to keep compatibility with vector animations
return animation_group_with_show
def add_hide_animation(self, animation_group):
"""adds a show animator in the beginning of the animation group"""
objs = animation_group.get_objs()
max_rel_stop = animation_group.get_max_rel_stop()
animation_group_with_hide = AnimationGroup(
(Hide(*objs), (max_rel_stop, max_rel_stop)), animation_group) # we use a zero length tuple to keep compatibility with vector animations
return animation_group_with_hide
def handle_visibility(self, animations):
"""adds visibility animators depending on the category of the animation group"""
animation_groups_with_visibility = []
for animation in animations:
if type(animation) is AnimationGroup:
animation_group = animation # is animation group
if animation_group.category == "constructive":
animation_group_with_visibility = self.add_show_animation(
animation_group)
elif animation_group.category == "destructive":
animation_group_with_visibility = self.add_hide_animation(
animation_group)
else:
animation_group_with_visibility = animation_group
animation_groups_with_visibility.append(
animation_group_with_visibility)
else:
animation_groups_with_visibility.append(animation)
return animation_groups_with_visibility
def play(self, *animations, run_time=1):
"""handles several tasks for the animations:
- handles visibility
- flattens animations
- links animation chains
- feeds them the run time
- executes the animations"""
animations_with_visibility = self.handle_visibility(animations)
flattened_animations = self.flatten(animations_with_visibility)
linked_animations = self.link_animation_chains(flattened_animations)
self.feed_run_time(linked_animations, run_time)
self.execute_animations(linked_animations)
self.add_time(run_time)
def wait(self, seconds=1):
"""adds time without any animations"""
self.add_time(seconds)
class RenderSettings():
"""holds and writes the render settings to cinema"""
def __init__(self):
self.document = c4d.documents.GetActiveDocument() # get document
self.set_base_settings()
self.set_sketch_settings()
def set_base_settings(self):
"""sets the base settings"""
self.settings = self.document.GetActiveRenderData()
# set parameters
self.settings[c4d.RDATA_FRAMESEQUENCE] = 3 # set range to preview
self.settings[c4d.RDATA_FORMAT] = 1125 # set to MP4
def set_resolution(self, resolution):
"""sets the resolution for the render"""
if resolution == "verylow":
self.settings[c4d.RDATA_XRES] = 320
self.settings[c4d.RDATA_YRES] = 180
elif resolution == "low":
self.settings[c4d.RDATA_XRES] = 480
self.settings[c4d.RDATA_YRES] = 270
elif resolution == "default":
self.settings[c4d.RDATA_XRES] = 1280
self.settings[c4d.RDATA_YRES] = 720
elif resolution == "high":
self.settings[c4d.RDATA_XRES] = 2560
self.settings[c4d.RDATA_YRES] = 1440
elif resolution == "veryhigh":
self.settings[c4d.RDATA_XRES] = 3840
self.settings[c4d.RDATA_YRES] = 2160
def set_sketch_settings(self):
"""sets the sketch and toon settings"""
sketch_vp = c4d.documents.BaseVideoPost(
1011015) # add sketch render settings
# set parameters
sketch_vp[c4d.OUTLINEMAT_SHADING_BACK_COL] = c4d.Vector(
0, 0, 0) # set background to black
sketch_vp[c4d.OUTLINEMAT_SHADING_OBJECT] = False # disable shading
# set independent of pixel units
sketch_vp[c4d.OUTLINEMAT_PIXELUNITS_INDEPENDENT] = True
# show lines in editor view
sketch_vp[c4d.OUTLINEMAT_EDLINES_SHOWLINES] = True
sketch_vp[c4d.OUTLINEMAT_EDLINES_LINE_DRAW] = 1 # 3D lines in editor
# set to custom mode
sketch_vp[c4d.OUTLINEMAT_PIXELUNITS_INDEPENDENT_MODE] = 1
sketch_vp[c4d.OUTLINEMAT_PIXELUNITS_BASEW] = 1280 # set custom width
sketch_vp[c4d.OUTLINEMAT_PIXELUNITS_BASEH] = 700 # set custom height
sketch_vp[c4d.OUTLINEMAT_EDLINES_REDRAW_FULL] = True # redraw lines
sketch_vp[c4d.OUTLINEMAT_LINE_SPLINES] = True # enable splines
self.settings.InsertVideoPost(
sketch_vp) # insert sketch settings
|
StarcoderdataPython
|
4592
|
<reponame>TeoZosa/pytudes
"""https://www.educative.io/courses/grokking-the-coding-interview/N7rwVyAZl6D
Categories:
- Binary
- Bit Manipulation
- Blind 75
See Also:
- pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py
"""
from pytudes._2021.utils.linked_list import (
ListNode,
NodeType,
convert_list_to_linked_list,
)
def has_cycle(head: NodeType) -> bool:
"""
Args:
head: head of a singly-linked list of nodes
Returns:
whether or not the linked list has a cycle
Examples:
>>> has_cycle(None)
False
>>> head = ListNode("self-edge")
>>> head.next = head
>>> has_cycle(head)
True
>>> head = convert_list_to_linked_list([1,2,3,4,5,6])
>>> has_cycle(head)
False
>>> head.next.next.next.next.next.next = head.next.next
>>> has_cycle(head)
True
>>> head.next.next.next.next.next.next = head.next.next.next
>>> has_cycle(head)
True
"""
slow = fast = head
while fast is not None and fast.next is not None: # since fast ≥ slow
slow = slow.next
fast = fast.next.next
if slow == fast:
return True # found the cycle
else:
return False
def main():
head = convert_list_to_linked_list([1, 2, 3, 4, 5, 6])
print("LinkedList has cycle: " + str(has_cycle(head)))
head.next.next.next.next.next.next = head.next.next
print("LinkedList has cycle: " + str(has_cycle(head)))
head.next.next.next.next.next.next = head.next.next.next
print("LinkedList has cycle: " + str(has_cycle(head)))
main()
|
StarcoderdataPython
|
1774927
|
<gh_stars>1-10
from fastapi import FastAPI, status
app = FastAPI()
# Sample endpoint to get a succesfull response
@app.get("/success", status_code=status.HTTP_200_OK)
def success():
return {"msg": "Success"}
# Sample endpoint to get an error response
@app.get("/error", status_code=status.HTTP_403_FORBIDDEN)
def error():
return {"msg": "Error"}
|
StarcoderdataPython
|
161650
|
<gh_stars>0
import pyautogui
import time
from pynput.mouse import Listener
""" This scripts clicks forward surveys or courses that make you wait between pages for some seconds or till a video is
finished
"""
idx, idy = 0, 0
def main():
print("Please fulscreen or don't move that window. Click on the posiion where you want the mouse to keep clicking")
print("Waiting for mouse click position")
def on_click(x, y, button, pressed):
print('{0} at {1}'.format('Pressed' if pressed else 'Released',(x, y)))
global idx, idy
idx, idy = x,y
if not pressed:
# Stop listener
return False
# Collect events until released
with Listener(on_click=on_click) as listener:
listener.join()
print("Enter time to wait between next between clicks. Example 30 if 30 secs between pages: ")
time_duration = int(input())
print("Enter how many pages to skip. 20 more pages to complete the course: ")
pages = int(input())
for i in range(pages):
current_x, current_y = pyautogui.position()
pyautogui.click(x=round(idx), y=round(idy)) # this works better even when you are using the mouse
pyautogui.moveTo(current_x, current_y) # move back mouse
print("page {} skipped".format(i+1))
time.sleep(time_duration)
print("Process completed")
if __name__== "__main__":
main()
|
StarcoderdataPython
|
1738620
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Common Python library imports
# Pip package imports
import pytest
from flask import url_for
from flask_login import current_user
# Internal package imports
@pytest.mark.usefixtures('user')
class TestLogin:
def test_html_get_login(self, client, templates):
r = client.get(url_for('security.login'))
assert r.status_code == 200
assert templates[0].template.name == 'security/login_user.html'
def test_html_login_errors(self, client, templates):
r = client.post(url_for('security.login'),
data=dict(email=None, password=None))
assert templates[0].template.name == 'security/login_user.html'
assert b'Email not provided' in r.data
assert b'Password not provided' in r.data
def test_html_login_with_email(self, client, user):
r = client.post(url_for('security.login'),
data=dict(email=user.email, password='password'))
assert r.status_code == 302
assert r.path == '/'
assert current_user == user
def test_html_login_with_username(self, client, user):
r = client.post(url_for('security.login'),
data=dict(email=user.username, password='password'))
assert r.status_code == 302
assert r.path == '/'
assert current_user == user
def test_json_login_errors(self, api_client):
r = api_client.post(url_for('api.login'),
data=dict(email=None, password=None))
assert 'error' in r.json
def test_json_login_with_email(self, api_client, user):
r = api_client.post(url_for('api.login'),
data=dict(email=user.email, password='password'))
assert r.status_code == 200
assert 'user' in r.json
assert 'token' in r.json
assert r.json['user']['id'] == user.id
assert current_user == user
def test_json_login_with_username(self, api_client, user):
r = api_client.post(url_for('api.login'),
data=dict(email=user.username, password='password'))
assert r.status_code == 200
assert 'user' in r.json
assert 'token' in r.json
assert r.json['user']['id'] == user.id
assert current_user == user
def test_active_user_required(self, api_client, user):
user.active = False
user.save(commit=True)
r = api_client.post(url_for('api.login'),
data=dict(email=user.email, password='password'))
assert r.status_code == 401
@pytest.mark.options(SECURITY_CONFIRMABLE=True)
def test_confirmed_user_required(self, api_client):
from backend.security.models import User
from backend.security.views.user_resource import register_user
user = User(username='test',
email='<EMAIL>',
password='password',
first_name='the',
last_name='user')
register_user(user)
r = api_client.post(url_for('api.login'),
data=dict(email=user.email, password='password'))
assert r.status_code == 401
assert 'Email requires confirmation.' == r.json['error']
|
StarcoderdataPython
|
3214963
|
# wifi_controller.py/Open GoPro, Version 1.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Tue May 18 22:08:50 UTC 2021
"""Manage a WiFI connection using native OS commands."""
# TODO This file needs to be cleaned up.
import os
import re
import time
import logging
import tempfile
from enum import Enum, auto
from typing import List, Optional, Tuple, Any
from open_gopro.util import cmd
from open_gopro.interfaces import WifiController
logger = logging.getLogger(__name__)
def cmp(a: Any, b: Any) -> int:
"""Define this since it is not implemented in Python 3.
Args:
a (Any): compare to
b (Any): compare against
Returns:
int: compare result
"""
if a < b:
return -1
if a == b:
return 0
return 1
def ensure_sudo() -> None:
"""Verify that we are running as root
Raises:
Exception: Program is running as a user other than root
"""
user = cmd("whoami")
if "root" not in user:
logger.error(f"You need to be the root user to run this program but you are running as {user}")
raise Exception("Program needs to be run as root user.")
class Wireless(WifiController):
"""Top level abstraction of different WiFi drivers.
If interface is not specified (i.e. it is None), we will attempt ot automatically Receive response -->
disover a suitable interface
Args:
interface (str, optional): Interface. Defaults to None.
Raises:
Exception: Failed to find a suitable driver or auto-detect the network interface.
"""
_driver_name = "NOT INITIALIZED"
_driver: WifiController
# init
def __init__(self, interface: Optional[str] = None):
# detect and init appropriate driver
self._driver_name = self._detectDriver()
if self._driver_name == "nmcli":
self._driver = NmcliWireless(interface=interface)
elif self._driver_name == "nmcli0990":
self._driver = Nmcli0990Wireless(interface=interface)
elif self._driver_name == "wpa_supplicant":
self._driver = WpasupplicantWireless(interface=interface)
elif self._driver_name == "networksetup":
self._driver = NetworksetupWireless(interface=interface)
elif self._driver_name == "netsh":
self._driver = NetshWireless(interface=interface)
# attempt to auto detect the interface if none was provided
if self.interface() is None:
interfaces = self.interfaces()
if len(interfaces) > 0:
self.interface(interfaces[0])
# raise an error if there is still no interface defined
if self.interface() is None:
raise Exception("Unable to auto-detect the network interface.")
logger.debug(f"Using WiFi driver: {self._driver_name} with interface {self.interface()}")
def _detectDriver(self) -> str:
"""Try to find a Wifi driver that can be used.
Raises:
Exception: We weren't able to find a suitable driver
Returns:
str: Name of discovered driver
"""
# try netsh (Windows).
# NOTE! This must be first as the other checks break on Windows
response = cmd("which netsh")
if len(response) > 0 and "not found" not in response and "not recognized" not in response:
return "netsh"
response = cmd("get-command netsh")
if len(response) > 0 and "not found" not in response and "not recognized" not in response:
return "netsh"
# try nmcli (Ubuntu 14.04)
response = cmd("which nmcli")
if len(response) > 0 and "not found" not in response:
response = cmd("nmcli --version")
parts = response.split()
ver = parts[-1]
compare = self.vercmp(ver, "0.9.9.0")
if compare >= 0:
return "nmcli0990"
return "nmcli"
# try nmcli (Ubuntu w/o network-manager)
response = cmd("which wpa_supplicant")
if len(response) > 0 and "not found" not in response:
return "wpa_supplicant"
# try networksetup (Mac OS 10.10)
response = cmd("which networksetup")
if len(response) > 0 and "not found" not in response:
return "networksetup"
raise Exception("Unable to find compatible wireless driver.")
@staticmethod
def vercmp(actual: Any, test: Any) -> int:
"""Compare two versions.
Args:
actual (str): Version being compared
test (str): Thing that version is being compared to
Returns:
-1: a is less than b
0: a is equal to b
1: a is greater than b
"""
def normalize(v: str) -> List[int]:
"""Normalize a string vresion
Args:
v (str): input string
Returns:
List[int]: output int list
"""
return [int(x) for x in re.sub(r"(\.0+)*$", "", v).split(".")]
return cmp(normalize(actual), normalize(test))
def connect(self, ssid: str, password: str, timeout: float = 15) -> bool:
"""Wrapper to call the OS-specific driver method.
Args:
ssid (str): network SSID
password (str): network password
timeout (float, optional): Time before considering connection failed (in seconds). Defaults to 15.
Returns:
bool: True if the connect was successful, False otherwise
"""
return self._driver.connect(ssid, password)
def disconnect(self) -> bool:
"""Wrapper to call the OS-specific driver method.
Returns:
bool: True if the disconnect was successful, False otherwise
"""
return self._driver.disconnect()
def current(self) -> Tuple[Optional[str], Optional[str]]:
"""Wrapper to call the OS-specific driver method.
Returns:
Tuple[Optional[str], Optional[str]]: (ssid, network state)
"""
return self._driver.current()
def interfaces(self) -> List[str]:
"""Wrapper to call the OS-specific driver method.
Returns:
List[str]: list of discovered interfaces
"""
return self._driver.interfaces()
def interface(self, interface: Optional[str] = None) -> Optional[str]:
"""Wrapper to call the OS-specific driver method.
Use a str as interface to set it, otherwise use None to get it.
Args:
interface (Optional[str], optional): get or set interface. Defaults to None.
Returns:
Optional[str]: Str if getting
"""
return self._driver.interface(interface)
@property
def is_on(self) -> bool:
"""Wrapper to call the OS-specific driver method.
Returns:
bool: True if on, False if off
"""
return self._driver.is_on
def power(self, power: bool) -> None:
"""Wrapper to call the OS-specific driver method.
Args:
power (bool): [description]
Returns:
[type]: [description]
"""
return self._driver.power(power)
def driver(self) -> str:
"""Get the name of the driver currently being used.
Returns:
str: Driver name.
"""
return self._driver_name
class NmcliWireless(WifiController):
"""Linux nmcli Driver < 0.9.9.0."""
_interface = None
def __init__(self, interface: str = None) -> None:
ensure_sudo()
self.interface(interface)
@staticmethod
def _clean(partial: str) -> None:
"""Clean up connections.
This is needed to prevent the following error after extended use:
'maximum number of pending replies per connection has been reached'
Args:
partial (str): part of the connection name
"""
# list matching connections
response = cmd(f"nmcli --fields UUID,NAME con list | grep {partial}")
# delete all of the matching connections
for line in response.splitlines():
if len(line) > 0:
cmd(f"nmcli con delete uuid {line.split()[0]}")
@staticmethod
def _errorInResponse(response: str) -> bool:
"""Ignore warnings in nmcli output.
Sometimes there are warnings but we connected just fine
Args:
response (str): output to parse
Returns:
bool: True if errors found. False if not.
"""
# no error if no response
if len(response) == 0:
return False
# loop through each line
for line in response.splitlines():
# all error lines start with 'Error'
if line.startswith("Error"):
return True
# if we didn't find an error then we are in the clear
return False
def connect(self, ssid: str, password: str, timeout: float = 15) -> bool:
"""[summary].
Args:
ssid (str): network SSID
password (str): network password
timeout (float, optional): Time before considering connection failed (in seconds). Defaults to 15.
Returns:
bool: [description]
"""
# clean up previous connection
current, _ = self.current()
if current is not None:
self._clean(current)
# attempt to connect
response = cmd(
"nmcli dev wifi connect {} password {} iface {}".format(ssid, password, self._interface)
)
# parse response
return not self._errorInResponse(response)
def disconnect(self) -> bool:
"""[summary].
Returns:
bool: [description]
"""
return False
def current(self) -> Tuple[Optional[str], Optional[str]]:
"""[summary].
Returns:
Tuple[Optional[str], Optional[str]]: [description]
"""
# list active connections for all interfaces
response = cmd("nmcli con status | grep {}".format(self.interface()))
# the current network is in the first column
for line in response.splitlines():
if len(line) > 0:
return (line.split()[0], None)
# return none if there was not an active connection
return (None, None)
def interfaces(self) -> List[str]:
"""[summary].
Returns:
List[str]: [description]
"""
# grab list of interfaces
response = cmd("nmcli dev")
# parse response
interfaces = []
for line in response.splitlines():
if "wireless" in line:
# this line has our interface name in the first column
interfaces.append(line.split()[0])
# return list
return interfaces
def interface(self, interface: Optional[str] = None) -> Optional[str]:
"""[summary].
Args:
interface (Optional[str], optional): [description]. Defaults to None.
Returns:
Optional[str]: [description]
"""
if interface is not None:
self._interface = interface
return None
return self._interface
@property
def is_on(self) -> bool:
"""[summary].
Returns:
bool: [description]
"""
return "enabled" in cmd("nmcli nm wifi")
def power(self, power: bool) -> None:
"""[summary].
Args:
power (bool): [description]
"""
if power:
cmd("nmcli nm wifi on")
else:
cmd("nmcli nm wifi off")
class Nmcli0990Wireless(WifiController):
"""Linux nmcli Driver >= 0.9.9.0."""
_interface = None
def __init__(self, interface: str = None):
ensure_sudo()
self.interface(interface)
# TODO Is this needed?
@staticmethod
def _clean(partial: str) -> None:
"""Clean up connections.
This is needed to prevent the following error after extended use:
'maximum number of pending replies per connection has been reached'
Args:
partial (str): part of the connection name
"""
# list matching connections
response = cmd("nmcli --fields UUID,NAME con show | grep {}".format(partial))
# delete all of the matching connections
for line in response.splitlines():
if len(line) > 0:
uuid = line.split()[0]
cmd("nmcli con delete uuid {}".format(uuid))
@staticmethod
def _errorInResponse(response: str) -> bool:
"""Ignore warnings in nmcli output.
Sometimes there are warnings but we connected just fine
Args:
response (str): output to parse
Returns:
bool: True if errors found. False if not.
"""
# no error if no response
if len(response) == 0:
return False
# loop through each line
for line in response.splitlines():
# all error lines start with 'Error'
if line.startswith("Error"):
return True
# if we didn't find an error then we are in the clear
return False
def connect(self, ssid: str, password: str, timeout: float = 15) -> bool:
"""[summary].
Args:
ssid (str): network SSID
password (<PASSWORD>): network password
timeout (float, optional): Time before considering connection failed (in seconds). Defaults to 15.
Returns:
bool: [description]
"""
# Scan for networks. Don't bother checking: we'll allow the error to be passed from the connect.
cmd("nmcli dev wifi list --rescan yes")
# attempt to connect
response = cmd(f"nmcli dev wifi connect {ssid} password {password} ifname {self._interface}")
# TODO verify that we're connected (and use timeout)
# parse response
return not self._errorInResponse(response)
def disconnect(self) -> bool:
"""[summary].
Returns:
bool: [description]
"""
return False
def current(self) -> Tuple[Optional[str], Optional[str]]:
"""[summary].
Returns:
Tuple[Optional[str], Optional[str]]: [description]
"""
# list active connections for all interfaces
response = cmd("nmcli con | grep {}".format(self.interface()))
# the current network is in the first column
for line in response.splitlines():
if len(line) > 0:
return (line.split()[0], None)
# return none if there was not an active connection
return (None, None)
def interfaces(self) -> List[str]:
"""[summary].
Returns:
List[str]: [description]
"""
# grab list of interfaces
response = cmd("nmcli dev")
# parse response
interfaces = []
for line in response.splitlines():
if "wifi" in line:
# this line has our interface name in the first column
interfaces.append(line.split()[0])
# return list
return interfaces
def interface(self, interface: Optional[str] = None) -> Optional[str]:
"""[summary].
Args:
interface (Optional[str], optional): [description]. Defaults to None.
Returns:
Optional[str]: [description]
"""
if interface is not None:
self._interface = interface
return None
return self._interface
@property
def is_on(self) -> bool:
"""[summary].
Returns:
bool: [description]
"""
return "enabled" in cmd("nmcli r wifi")
def power(self, power: bool) -> None:
"""[summary].
Args:
power (bool): [description]
"""
if power:
cmd("nmcli r wifi on")
else:
cmd("nmcli r wifi off")
class WpasupplicantWireless(WifiController):
"""Linux wpa_supplicant Driver."""
_file = "/tmp/wpa_supplicant.conf"
_interface = None
def __init__(self, interface: str = None):
self.interface(interface)
def connect(self, ssid: str, password: str, timeout: float = 15) -> bool:
"""[summary].
Args:
ssid (str): network SSID
password (str): network password
timeout (float, optional): Time before considering connection failed (in seconds). Defaults to 15.
Returns:
bool: [description]
"""
# attempt to stop any active wpa_supplicant instances
# ideally we do this just for the interface we care about
cmd("sudo killall wpa_supplicant")
# don't do DHCP for GoPros; can cause dropouts with the server
cmd("sudo ifconfig {} 10.5.5.10/24 up".format(self._interface))
# create configuration file
f = open(self._file, "w")
f.write('network={{\n ssid="{}"\n psk="{}"\n}}\n'.format(ssid, password))
f.close()
# attempt to connect
cmd("sudo wpa_supplicant -i{} -c{} -B".format(self._interface, self._file))
# check that the connection was successful
# i've never seen it take more than 3 seconds for the link to establish
time.sleep(5)
current_ssid, _ = self.current()
if current_ssid != ssid:
return False
# attempt to grab an IP
# better hope we are connected because the timeout here is really long
# cmd('sudo dhclient {}'.format(self._interface))
# parse response
return True
def disconnect(self) -> bool:
"""[summary].
Returns:
bool: [description]
"""
return False
def current(self) -> Tuple[Optional[str], Optional[str]]:
"""[summary].
Returns:
Tuple[Optional[str], Optional[str]]: [description]
"""
# get interface status
response = cmd("iwconfig {}".format(self.interface()))
# the current network is on the first line like ESSID:"network"
line = response.splitlines()[0]
line = line.replace('"', "")
parts = line.split("ESSID:")
if len(parts) > 1:
network = parts[1].strip()
if network != "off/any":
return (network, None)
# return none if there was not an active connection
return (None, None)
def interfaces(self) -> List[str]:
"""[summary].
Returns:
List[str]: [description]
"""
# grab list of interfaces
response = cmd("iwconfig")
# parse response
interfaces = []
for line in response.splitlines():
if len(line) > 0 and not line.startswith(" "):
# this line contains an interface name!
if "no wireless extensions" not in line:
# this is a wireless interface
interfaces.append(line.split()[0])
return interfaces
def interface(self, interface: Optional[str] = None) -> Optional[str]:
"""[summary].
Args:
interface (Optional[str], optional): [description]. Defaults to None.
Returns:
Optional[str]: [description]
"""
if interface is not None:
self._interface = interface
return None
return self._interface
@property
def is_on(self) -> bool:
"""[summary].
Returns:
bool: [description]
"""
# TODO
return True
def power(self, power: bool) -> None:
"""[summary].
Args:
power (bool): [description]
"""
# TODO
return
class NetworksetupWireless(WifiController):
"""OS X networksetup Driver."""
_interface = None
def __init__(self, interface: str = None):
self.interface(interface)
def connect(self, ssid: str, password: str, timeout: float = 15) -> bool:
"""[summary].
Args:
ssid (str): network SSID
password (str): <PASSWORD>
timeout (float, optional): Time before considering connection failed (in seconds). Defaults to 15.
Raises:
Exception: [description]
Returns:
bool: [description]
"""
# Escape single quotes
ssid = ssid.replace(r"'", '''"'"''')
response = cmd(
"networksetup -setairportnetwork '{}' '{}' '{}'".format(self._interface, ssid, password)
)
if "not find" in response.lower():
return False
# Now wait for network to actually establish
current = self.current()[0]
logger.debug(f"current wifi: {current}")
while current is not None and ssid not in current and timeout < 10:
time.sleep(1)
current = self.current()[0]
logger.debug(f"current wifi: {current}")
timeout -= 1
if timeout == 0:
raise Exception("Wi-Fi connection timeout.")
# TODO There is some delay required here, presumably because the network is not ready.
time.sleep(5)
return True
def disconnect(self) -> bool:
"""[summary].
Returns:
bool: [description]
"""
return False
def current(self) -> Tuple[Optional[str], Optional[str]]:
"""[summary].
Returns:
Tuple[Optional[str], Optional[str]]: [description]
"""
# attempt to get current network
response = cmd("networksetup -getairportnetwork {}".format(self._interface))
# parse response
phrase = "Current Wi-Fi Network: "
if phrase in response:
return (response.replace("Current Wi-Fi Network: ", "").strip(), None)
return (None, None)
def interfaces(self) -> List[str]:
"""[summary].
Returns:
List[str]: [description]
"""
# grab list of interfaces
response = cmd("networksetup -listallhardwareports")
# parse response
interfaces = []
detectedWifi = False
for line in response.splitlines():
if detectedWifi:
# this line has our interface name in it
interfaces.append(line.replace("Device: ", ""))
detectedWifi = False
else:
# search for the line that has 'Wi-Fi' in it
if "Wi-Fi" in line:
detectedWifi = True
# return list
return interfaces
def interface(self, interface: Optional[str] = None) -> Optional[str]:
"""[summary].
Args:
interface (Optional[str], optional): [description]. Defaults to None.
Returns:
Optional[str]: [description]
"""
if interface is not None:
self._interface = interface
return None
return self._interface
@property
def is_on(self) -> bool:
"""[summary].
Returns:
bool: [description]
"""
return "On" in cmd("networksetup -getairportpower {}".format(self._interface))
def power(self, power: bool) -> None:
"""[summary].
Args:
power (bool): [description]
"""
if power:
cmd("networksetup -setairportpower {} on".format(self._interface))
else:
cmd("networksetup -setairportpower {} off".format(self._interface))
class NetshWireless(WifiController):
"""Windows Driver."""
# Used to build profile
template = r"""<?xml version="1.0"?>
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>{ssid}</name>
<SSIDConfig>
<SSID>
<name>{ssid}</name>
</SSID>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>manual</connectionMode>
<MSM>
<security>
<authEncryption>
<authentication>{auth}</authentication>
<encryption>{encrypt}</encryption>
<useOneX>false</useOneX>
</authEncryption>
<sharedKey>
<keyType>passPhrase</keyType>
<protected>false</protected>
<keyMaterial>{passwd}</keyMaterial>
</sharedKey>
</security>
</MSM>
<MacRandomization xmlns="http://www.microsoft.com/networking/WLAN/profile/v3">
<enableRandomization>false</enableRandomization>
</MacRandomization>
</WLANProfile>"""
def __init__(self, interface: str = None) -> None:
self._interface: Optional[str] = None
self.interface(interface)
self.ssid: Optional[str] = None
def __del__(self) -> None:
# TODO Do we want this?
# self._clean(self.ssid)
pass
def connect(self, ssid: str, password: str, timeout: float = 15) -> bool:
"""Establish a connection.
This is blocking and won't return until either a connection is established or
a 10 second timeout
Args:
ssid (str): SSID of network to connect to
password (str): password of network to connect to
timeout (float, optional): Time before considering connection failed (in seconds). Defaults to 15.
Returns:
bool: True if connected, False otherwise
"""
# Replace ampersand as it causes problems
password = password.replace("&", <PASSWORD>")
logger.info(f"Attempting to establish WiFi connection to {ssid}...")
# Start fresh each time.
self._clean(ssid)
# Create new profile
output = NetshWireless.template.format(ssid=ssid, auth="WPA2PSK", encrypt="AES", passwd=password)
logger.debug(output)
# Need ugly low level mkstemp and os here because standard tempfile can't be accessed by a subprocess in Windows :(
fd, filename = tempfile.mkstemp()
os.write(fd, output.encode("utf-8"))
os.close(fd)
response = cmd(f"netsh wlan add profile filename={filename}")
if "is added on interface" not in response:
raise Exception(response)
os.remove(filename)
# Try to connect
ssid_quotes = f'"{ssid}"'
response = cmd(f"netsh wlan connect ssid={ssid_quotes} name={ssid_quotes} interface={self._interface}")
if "was completed successfully" not in response:
raise Exception(response)
while self.current() != (ssid, "connected"):
logger.debug("Waiting 1 second for Wi-Fi connection to establish...")
time.sleep(1)
timeout -= 1
if timeout == 0:
raise Exception("Wi-Fi connection timeout.")
logger.info("Wifi connection established!")
self.ssid = ssid
return True
def disconnect(self) -> bool:
"""Terminate the WiFi connection.
Returns:
bool: True if the disconnect was successful, False otherwise.
"""
response = cmd(f"netsh wlan disconnect interface={self.interface()}")
return bool("completed successfully" in response.lower())
def current(self) -> Tuple[Optional[str], Optional[str]]:
"""Get the current network SSID and state.
# Here is an example of what we are parsing (i.e. to find FunHouse SSID):
# Name : Wi-Fi
# Description : TP-Link Wireless USB Adapter
# GUID : 093d8022-33cb-4400-8362-275eaf24cb86
# Physical address : 98:48:27:88:cb:18
# State : connected
# SSID : FunHouse
Raises:
Exception: Unexpected error.
Returns:
Tuple[Optional[str], Optional[str]]: Tuple of (ssid, network_state)
"""
class ParseState(Enum):
"""Current state of interface parsing"""
PARSE_INTERFACE = auto()
PARSE_SSID = auto()
PARSE_STATE = auto()
if self.interface is None:
self._interface = self.interfaces()[0]
if self._interface is None:
raise Exception("Can't auto-assign interface. None found.")
response = cmd("netsh wlan show interfaces")
parse_state = ParseState.PARSE_INTERFACE
ssid: Optional[str] = None
network_state: Optional[str] = None
for field in response.split("\r\n"):
if parse_state is ParseState.PARSE_INTERFACE:
if "Name" in field and self._interface in field:
parse_state = ParseState.PARSE_STATE
elif parse_state is ParseState.PARSE_STATE:
if "State" in field:
network_state = field.split(":")[1].strip()
parse_state = ParseState.PARSE_SSID
elif parse_state is ParseState.PARSE_SSID:
if "SSID" in field:
ssid = field.split(":")[1].strip()
break
return (ssid, network_state)
def interfaces(self) -> List[str]:
"""Discover all available interfaces.
# We're parsing, for example, the following line to find "Wi-Fi":
# Name : Wi-Fi
Returns:
List[str]: List of interfaces
"""
response = cmd("netsh wlan show interfaces")
interfaces = []
# Look behind to find field, then match (non-greedy) any chars until CRLF
match = "(?<={}).+?(?=\\r\\n)"
for interface in re.findall(match.format("Name"), response):
# Strip leading whitespace and then the first two chars of remaining (i.e. " :")
interfaces.append(interface.strip()[2:])
return interfaces
def interface(self, interface: str = None) -> Optional[str]:
"""Get or set the current interface.
Args:
interface (str, optional): String to set or None to get. Defaults to None.
Returns:
Optional[str]: If interface argument is None, this will be a string if there is a valid interface; otherwise None
"""
if interface is not None:
self._interface = interface
return None
return self._interface
@property
def is_on(self) -> bool:
"""Is Wifi enabled?
Returns:
bool: True if yes, False if no.
"""
# TODO
return True
def power(self, power: bool) -> None:
"""Enable / Disable WiFi.
Args:
power (bool): True to enable, False to disable.
"""
arg = "enable" if power is True else "disable"
cmd(f"netsh interface set interface {self._interface} {arg}")
@staticmethod
def _clean(ssid: Optional[str]) -> None:
"""Disconnect and delete SSID profile.
Args:
ssid (Optional[str]): name of SSID
"""
cmd("netsh wlan disconnect")
if ssid is not None:
cmd(f'netsh wlan delete profile name="{ssid}"')
|
StarcoderdataPython
|
3367400
|
<filename>tools/text_to_speech.py
# Adapted from:
# https://pythonprogramminglanguage.com/text-to-speech/
import pyttsx3
if __name__ == '__main__':
# Initializes the engine
engine = pyttsx3.init()
# Says somethings
engine.say('I like coconut.')
# Produce audio
engine.runAndWait()
|
StarcoderdataPython
|
11501
|
<reponame>brownaa/wagtail<gh_stars>1000+
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.translation import gettext as _
from wagtail.admin import messages
from wagtail.admin.views.pages.utils import get_valid_next_url_from_request
from wagtail.core import hooks
from wagtail.core.models import Page, UserPagePermissionsProxy
def unpublish(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
user_perms = UserPagePermissionsProxy(request.user)
if not user_perms.for_page(page).can_unpublish():
raise PermissionDenied
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
include_descendants = request.POST.get("include_descendants", False)
for fn in hooks.get_hooks('before_unpublish_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
page.unpublish(user=request.user)
if include_descendants:
for live_descendant_page in page.get_descendants().live().defer_streamfields().specific():
if user_perms.for_page(live_descendant_page).can_unpublish():
live_descendant_page.unpublish()
for fn in hooks.get_hooks('after_unpublish_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
messages.success(request, _("Page '{0}' unpublished.").format(page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_explore', page.get_parent().id)
return TemplateResponse(request, 'wagtailadmin/pages/confirm_unpublish.html', {
'page': page,
'next': next_url,
'live_descendant_count': page.get_descendants().live().count(),
})
|
StarcoderdataPython
|
3309267
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 25 10:04:06 2019
@author: <NAME>
"""
import cv2
import numpy as np
import pyzbar.pyzbar as pyzbar
image = cv2.imread("25.png")
decodedObjects = pyzbar.decode(image)
for obj in decodedObjects:
print("Type:", obj.type)
print("Data: ", obj.data, "\n")
cv2.imshow("Frame", image)
cv2.waitKey(0)
print(decodedObjects)
|
StarcoderdataPython
|
25500
|
<reponame>torresxb1/aws-sam-cli<filename>tests/unit/local/lambdafn/test_config.py
from unittest import TestCase
from unittest.mock import Mock
from parameterized import parameterized
from samcli.lib.utils.packagetype import ZIP
from samcli.local.lambdafn.config import FunctionConfig
from samcli.commands.local.cli_common.user_exceptions import InvalidSamTemplateException
class TestFunctionConfig(TestCase):
DEFAULT_MEMORY = 128
DEFAULT_TIMEOUT = 3
def setUp(self):
self.name = "name"
self.runtime = "runtime"
self.handler = "handler"
self.imageuri = None
self.imageconfig = None
self.packagetype = ZIP
self.code_path = "codepath"
self.memory = 1234
self.timeout = 34
self.env_vars_mock = Mock()
self.layers = ["layer1"]
self.architecture = "arm64"
def test_init_with_env_vars(self):
config = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
self.assertEqual(config.name, self.name)
self.assertEqual(config.runtime, self.runtime)
self.assertEqual(config.handler, self.handler)
self.assertEqual(config.imageuri, self.imageuri)
self.assertEqual(config.imageconfig, self.imageconfig)
self.assertEqual(config.packagetype, self.packagetype)
self.assertEqual(config.code_abs_path, self.code_path)
self.assertEqual(config.layers, self.layers)
self.assertEqual(config.memory, self.memory)
self.assertEqual(config.timeout, self.timeout)
self.assertEqual(config.env_vars, self.env_vars_mock)
self.assertEqual(self.env_vars_mock.handler, self.handler)
self.assertEqual(self.env_vars_mock.memory, self.memory)
self.assertEqual(self.env_vars_mock.timeout, self.timeout)
def test_init_without_optional_values(self):
config = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
)
self.assertEqual(config.name, self.name)
self.assertEqual(config.runtime, self.runtime)
self.assertEqual(config.handler, self.handler)
self.assertEqual(config.packagetype, self.packagetype)
self.assertEqual(config.imageuri, self.imageuri)
self.assertEqual(config.imageconfig, self.imageconfig)
self.assertEqual(config.code_abs_path, self.code_path)
self.assertEqual(config.layers, self.layers)
self.assertEqual(config.memory, self.DEFAULT_MEMORY)
self.assertEqual(config.timeout, self.DEFAULT_TIMEOUT)
self.assertIsNotNone(config.env_vars)
self.assertEqual(config.env_vars.handler, self.handler)
self.assertEqual(config.env_vars.memory, self.DEFAULT_MEMORY)
self.assertEqual(config.env_vars.timeout, self.DEFAULT_TIMEOUT)
def test_init_with_timeout_of_int_string(self):
config = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout="34",
env_vars=self.env_vars_mock,
)
self.assertEqual(config.name, self.name)
self.assertEqual(config.runtime, self.runtime)
self.assertEqual(config.handler, self.handler)
self.assertEqual(config.packagetype, self.packagetype)
self.assertEqual(config.imageuri, self.imageuri)
self.assertEqual(config.imageconfig, self.imageconfig)
self.assertEqual(config.code_abs_path, self.code_path)
self.assertEqual(config.layers, self.layers)
self.assertEqual(config.memory, self.memory)
self.assertEqual(config.timeout, 34)
self.assertEqual(config.env_vars, self.env_vars_mock)
self.assertEqual(self.env_vars_mock.handler, self.handler)
self.assertEqual(self.env_vars_mock.memory, self.memory)
self.assertEqual(self.env_vars_mock.timeout, 34)
class TestFunctionConfigInvalidTimeouts(TestCase):
def setUp(self):
self.name = "name"
self.runtime = "runtime"
self.handler = "handler"
self.imageuri = None
self.imageconfig = None
self.packagetype = ZIP
self.code_path = "codepath"
self.memory = 1234
self.env_vars_mock = Mock()
self.layers = ["layer1"]
self.architecture = "x86_64"
@parameterized.expand(
[
("none int string",),
({"dictionary": "is not a string either"},),
("/local/lambda/timeout",),
("3.2",),
("4.2",),
("0.123",),
]
)
def test_init_with_invalid_timeout_values(self, timeout):
with self.assertRaises(InvalidSamTemplateException):
FunctionConfig(
self.name,
self.runtime,
self.imageuri,
self.handler,
self.packagetype,
self.imageconfig,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=timeout,
env_vars=self.env_vars_mock,
)
class TestFunctionConfig_equals(TestCase):
DEFAULT_MEMORY = 128
DEFAULT_TIMEOUT = 3
def setUp(self):
self.name = "name"
self.name2 = "name2"
self.runtime = "runtime"
self.handler = "handler"
self.imageuri = None
self.imageconfig = None
self.packagetype = ZIP
self.code_path = "codepath"
self.memory = 1234
self.timeout = 34
self.env_vars_mock = Mock()
self.layers = ["layer1"]
self.architecture = "arm64"
def test_equals_function_config(self):
config1 = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
config2 = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
self.assertTrue(config1 == config2)
def test_not_equals_function_config(self):
config1 = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
config2 = FunctionConfig(
self.name2,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
self.assertTrue(config1 != config2)
|
StarcoderdataPython
|
3271761
|
<filename>Kaggle Fisheries/fisheries_create_dataset.py
## Kaggle Project
from __future__ import print_function
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from six.moves import cPickle as pickle
import glob
from scipy import ndimage
import numpy as np
import os
from PIL import Image
import scipy as sp
import matplotlib.pyplot as plt
# TODO: Add path where the train images are unzipped and stored
paths = "/home/animesh/Documents/Kaggle/Fisheries"
os.chdir(paths)
alb = os.path.join(paths,'ALB')
print(alb)
img_alb = os.path.join(alb,'*.jpg')
width = []
height = []
for infile in glob.glob(img_alb):
#file, ext = os.path.splitext(infile)
im = Image.open(infile)
width.append(im.size[0])
height.append(im.size[1])
print(np.average(width), np.average(height))
# 1272, 730
plt.scatter(width,height)
plt.show()
## Lets reduce to 128 x 128 images
img_alb = os.path.join(alb,'*.jpg')
size = 128, 128
for infile in glob.glob(img_alb):
outfile = os.path.splitext(infile)[0] + ".small"
file, ext = os.path.splitext(infile)
im = Image.open(infile).convert('L')
out = im.resize((size))
out.save(outfile, "JPEG")
# Display reduced images
os.chdir(alb)
im = Image.open("img_00015.small")
print(im.format, im.size, im.mode)
# Works
##Flatten to dataset - grayscale
alb_small = os.path.join(alb, "*.small")
image_size = 128
pixel_depth = 255
image_files = 1719
num_channels = 1
dataset_alb = np.ndarray(shape=(image_files, image_size, image_size), dtype=np.float32)
target_alb = np.ndarray(shape=(image_files), dtype=np.int_) # set to 0
num_images = 0
for filename in glob.glob(alb_small):
if num_images % 500 == 0: print(num_images)
try:
image_data = (ndimage.imread(filename, flatten=True).astype(float)) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset_alb[num_images, :, :] = image_data
name = os.path.basename(filename)
target_alb[num_images] = 0
num_images = num_images + 1
except IOError as e:
print('Could not read:', filename, ':', e, '- it\'s ok, skipping.')
print('Dataset shape:', dataset_alb.shape)
print('Target shape:', target_alb.shape)
print('Dataset Mean:', np.mean(dataset_alb))
print('Target Mean:', np.mean(target_alb))
# Repeat for BET
newloc = os.path.join(paths,'BET') #Update
image_files = 200 #Update
dataset_bet = np.ndarray(shape=(image_files, image_size, image_size), dtype=np.float32) #Update
target_bet = np.ndarray(shape=(image_files), dtype=np.int_) # set to 1 - Update
newpath1 = os.path.join(newloc,'*.jpg')
for infile in glob.glob(newpath1):
outfile = os.path.splitext(infile)[0] + ".small"
file, ext = os.path.splitext(infile)
im = Image.open(infile).convert('L')
out = im.resize((size))
out.save(outfile, "JPEG")
newpath2 = os.path.join(newloc, "*.small")
image_size = 128
pixel_depth = 255
num_images = 0
for filename in glob.glob(newpath2):
if num_images % 500 == 0: print(num_images)
try:
image_data = (ndimage.imread(filename, flatten=True).astype(float)) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset_bet[num_images, :, :] = image_data #Update
name = os.path.basename(filename)
target_bet[num_images] = 1 #Update
num_images = num_images + 1
except IOError as e:
print('Could not read:', filename, ':', e, '- it\'s ok, skipping.')
print('Dataset shape:', dataset_bet.shape)
print('Target shape:', target_bet.shape)
print('Dataset Mean:', np.mean(dataset_bet))
print('Target Mean:', np.mean(target_bet))
# Repeat for DOL
newloc = os.path.join(paths,'DOL') #Update
image_files = 117 #Update
dataset_dol = np.ndarray(shape=(image_files, image_size, image_size), dtype=np.float32) #Update
target_dol = np.ndarray(shape=(image_files), dtype=np.int_) # set to 2 - Update
newpath1 = os.path.join(newloc,'*.jpg')
for infile in glob.glob(newpath1):
outfile = os.path.splitext(infile)[0] + ".small"
file, ext = os.path.splitext(infile)
im = Image.open(infile).convert('L')
out = im.resize((size))
out.save(outfile, "JPEG")
newpath2 = os.path.join(newloc, "*.small")
image_size = 128
pixel_depth = 255
num_images = 0
for filename in glob.glob(newpath2):
if num_images % 500 == 0: print(num_images)
try:
image_data = (ndimage.imread(filename, flatten=True).astype(float)) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset_dol[num_images, :, :] = image_data #Update
name = os.path.basename(filename)
target_dol[num_images] = 2 #Update
num_images = num_images + 1
except IOError as e:
print('Could not read:', filename, ':', e, '- it\'s ok, skipping.')
print('Dataset shape:', dataset_dol.shape)
print('Target shape:', target_dol.shape)
print('Dataset Mean:', np.mean(dataset_dol))
print('Target Mean:', np.mean(target_dol))
# Repeat for LAG
newloc = os.path.join(paths,'LAG') #Update
image_files = 67 #Update
dataset_lag = np.ndarray(shape=(image_files, image_size, image_size), dtype=np.float32) #Update
target_lag = np.ndarray(shape=(image_files), dtype=np.int_) # set to 3 - Update
newpath1 = os.path.join(newloc,'*.jpg')
for infile in glob.glob(newpath1):
outfile = os.path.splitext(infile)[0] + ".small"
file, ext = os.path.splitext(infile)
im = Image.open(infile).convert('L')
out = im.resize((size))
out.save(outfile, "JPEG")
newpath2 = os.path.join(newloc, "*.small")
image_size = 128
pixel_depth = 255
num_images = 0
for filename in glob.glob(newpath2):
if num_images % 500 == 0: print(num_images)
try:
image_data = (ndimage.imread(filename, flatten=True).astype(float)) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset_lag[num_images, :, :] = image_data #Update
name = os.path.basename(filename)
target_lag[num_images] = 3 #Update
num_images = num_images + 1
except IOError as e:
print('Could not read:', filename, ':', e, '- it\'s ok, skipping.')
print('Dataset shape:', dataset_lag.shape)
print('Target shape:', target_lag.shape)
print('Dataset Mean:', np.mean(dataset_lag))
print('Target Mean:', np.mean(target_lag))
# Repeat for NoF
newloc = os.path.join(paths,'NoF') #Update
image_files = 465 #Update
dataset_nof = np.ndarray(shape=(image_files, image_size, image_size), dtype=np.float32) #Update
target_nof = np.ndarray(shape=(image_files), dtype=np.int_) # set to 4 - Update
newpath1 = os.path.join(newloc,'*.jpg')
for infile in glob.glob(newpath1):
outfile = os.path.splitext(infile)[0] + ".small"
file, ext = os.path.splitext(infile)
im = Image.open(infile).convert('L')
out = im.resize((size))
out.save(outfile, "JPEG")
newpath2 = os.path.join(newloc, "*.small")
image_size = 128
pixel_depth = 255
num_images = 0
for filename in glob.glob(newpath2):
if num_images % 500 == 0: print(num_images)
try:
image_data = (ndimage.imread(filename, flatten=True).astype(float)) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset_nof[num_images, :, :] = image_data #Update
name = os.path.basename(filename)
target_nof[num_images] = 4 #Update
num_images = num_images + 1
except IOError as e:
print('Could not read:', filename, ':', e, '- it\'s ok, skipping.')
print('Dataset shape:', dataset_nof.shape)
print('Target shape:', target_nof.shape)
print('Dataset Mean:', np.mean(dataset_nof))
print('Target Mean:', np.mean(target_nof))
# Repeat for Other
newloc = os.path.join(paths,'OTHER') #Update
image_files = 299 #Update
dataset_other = np.ndarray(shape=(image_files, image_size, image_size), dtype=np.float32) #Update
target_other = np.ndarray(shape=(image_files), dtype=np.int_) # set to 5 - Update
newpath1 = os.path.join(newloc,'*.jpg')
for infile in glob.glob(newpath1):
outfile = os.path.splitext(infile)[0] + ".small"
file, ext = os.path.splitext(infile)
im = Image.open(infile).convert('L')
out = im.resize((size))
out.save(outfile, "JPEG")
newpath2 = os.path.join(newloc, "*.small")
image_size = 128
pixel_depth = 255
num_images = 0
for filename in glob.glob(newpath2):
if num_images % 500 == 0: print(num_images)
try:
image_data = (ndimage.imread(filename, flatten=True).astype(float)) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset_other[num_images, :, :] = image_data #Update
name = os.path.basename(filename)
target_other[num_images] = 5 #Update
num_images = num_images + 1
except IOError as e:
print('Could not read:', filename, ':', e, '- it\'s ok, skipping.')
print('Dataset shape:', dataset_other.shape)
print('Target shape:', target_other.shape)
print('Dataset Mean:', np.mean(dataset_other))
print('Target Mean:', np.mean(target_other))
# Repeat for Shark
newloc = os.path.join(paths,'SHARK') #Update
image_files = 176 #Update
dataset_sh = np.ndarray(shape=(image_files, image_size, image_size), dtype=np.float32) #Update
target_sh = np.ndarray(shape=(image_files), dtype=np.int_) # set to 6 - Update
newpath1 = os.path.join(newloc,'*.jpg')
for infile in glob.glob(newpath1):
outfile = os.path.splitext(infile)[0] + ".small"
file, ext = os.path.splitext(infile)
im = Image.open(infile).convert('L')
out = im.resize((size))
out.save(outfile, "JPEG")
newpath2 = os.path.join(newloc, "*.small")
image_size = 128
pixel_depth = 255
num_images = 0
for filename in glob.glob(newpath2):
if num_images % 500 == 0: print(num_images)
try:
image_data = (ndimage.imread(filename, flatten=True).astype(float)) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset_sh[num_images, :, :] = image_data #Update
name = os.path.basename(filename)
target_sh[num_images] = 6 #Update
num_images = num_images + 1
except IOError as e:
print('Could not read:', filename, ':', e, '- it\'s ok, skipping.')
print('Dataset shape:', dataset_sh.shape)
print('Target shape:', target_sh.shape)
print('Dataset Mean:', np.mean(dataset_sh))
print('Target Mean:', np.mean(target_sh))
# Repeat for YFT
newloc = os.path.join(paths,'YFT') #Update
image_files = 734 #Update
dataset_yft = np.ndarray(shape=(image_files, image_size, image_size), dtype=np.float32) #Update
target_yft = np.ndarray(shape=(image_files), dtype=np.int_) # set to 7 - Update
newpath1 = os.path.join(newloc,'*.jpg')
for infile in glob.glob(newpath1):
outfile = os.path.splitext(infile)[0] + ".small"
file, ext = os.path.splitext(infile)
im = Image.open(infile).convert('L')
out = im.resize((size))
out.save(outfile, "JPEG")
newpath2 = os.path.join(newloc, "*.small")
image_size = 128
pixel_depth = 255
num_images = 0
for filename in glob.glob(newpath2):
if num_images % 500 == 0: print(num_images)
try:
image_data = (ndimage.imread(filename, flatten=True).astype(float)) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset_yft[num_images, :, :] = image_data #Update
name = os.path.basename(filename)
target_yft[num_images] = 7 #Update
num_images = num_images + 1
except IOError as e:
print('Could not read:', filename, ':', e, '- it\'s ok, skipping.')
print('Dataset shape:', dataset_yft.shape)
print('Target shape:', target_yft.shape)
print('Dataset Mean:', np.mean(dataset_yft))
print('Target Mean:', np.mean(target_yft))
## Concatenate into one master dataset
dataset = np.concatenate((dataset_alb,dataset_bet,dataset_dol,dataset_lag,dataset_nof,dataset_other,dataset_sh,dataset_yft))
target = np.concatenate((target_alb,target_bet,target_dol,target_lag,target_nof,target_other,target_sh,target_yft))
# Check Stats on the dataset
print('Dataset shape:', dataset.shape)
print('Target shape:', target.shape)
print('Dataset Mean:', np.mean(dataset))
print('Dataset Standard deviation:', np.std(dataset))
print('Dataset Max:', np.amax(dataset))
print('Dataset Min:', np.amin(dataset))
print('Target shape:', target.shape)
print('Target Mean:', np.mean(target))
print('Target Standard deviation:', np.std(target))
print('Target Max:', np.amax(target))
print('Target Min:', np.amin(target))
# Randomize dataset and target
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
all_dataset, all_labels = randomize(dataset, target)
print(all_dataset.shape, all_labels.shape)
print(all_labels[:10])
# split the full dataset into 80% test and 20% validation
from sklearn import cross_validation
X_train, X_valid, y_train, y_valid = cross_validation.train_test_split(
all_dataset, all_labels, test_size=0.2, random_state=2275)
print("train dataset", X_train.shape, y_train.shape)
print("Validation dataset", X_valid.shape, y_valid.shape)
# Pickle this dataset for future use if required. This step can be skipped
os.chdir(paths)
pickle_file = 'all_fish_gray.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'X_valid': X_valid,
'y_valid': y_valid,
'X_train': X_train,
'y_train': y_train,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
|
StarcoderdataPython
|
3344177
|
<gh_stars>0
a = int(input())
b = int(input())
x = a + b
print("X =", x)
|
StarcoderdataPython
|
106936
|
#! /opt/conda/bin/python3
""" File containing keras callback class to collect runstats of the training process """
# Copyright 2018 FAU-iPAT (http://ipat.uni-erlangen.de/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Any, Dict, List
from keras.callbacks import Callback # type: ignore
import keras.backend as backend # type: ignore
_TRunStats = Dict[str, List[float]] # pylint: disable=invalid-name
class RunStatsCallback(Callback):
"""
Callback class to log runstats of the keras model
This class stores the default history of the optimizer plus some
additional values. Those include value for each epoch for:
- Time required
- Base learning rate of the optimizer
At the end of the training those values call be accessed via the
runstats property.
"""
def __init__(self) -> None:
"""
Class initialization
"""
super(RunStatsCallback, self).__init__()
self._runstats = {} # type: _TRunStats
self._epoch_time_start = 0.0
# Allocate epoch and progress info tensors
self._epoch = backend.variable(0.0, dtype='float32', name='RunStatsCallbackEpoch')
self._max_epochs = backend.variable(1.0, dtype='float32', name='RunStatsCallbackMaxEpochs')
self._progress = self._epoch / self._max_epochs
def on_train_begin(self, logs: Dict[str, Any] = None) -> None:
"""
Callback method to setup at beginning of training
:param logs: Log data from keras
"""
self._runstats = {'time': [], 'lr': []}
epochs = self.params['epochs'] if self.params['epochs'] else 1.0
backend.set_value(self._max_epochs, epochs)
backend.set_value(self._epoch, 0.0)
def on_epoch_begin(self, epoch: int, logs: Dict[str, Any] = None) -> None:
"""
Callback method called at beginning of each epoch
:param epoch: Epoch to be started
:param logs: Log data from keras
"""
self._epoch_time_start = time.time()
backend.set_value(self._epoch, epoch)
def on_epoch_end(self, epoch: int, logs: Dict[str, Any] = None) -> None:
"""
Callback method called at the end of each epoch
:param epoch: Epoch to be ended
:param logs: Log data from keras
"""
backend.set_value(self._epoch, epoch+1)
# Store default history data
if logs:
for name in logs:
if name not in self._runstats:
self._runstats[name] = []
self._runstats[name].append(logs[name])
# Additionally store time required
self._runstats['time'].append(time.time() - self._epoch_time_start)
# Additionally store base learning rate of the optimizer
try:
learning_rate = self.model.optimizer.lr
self._runstats['lr'].append(backend.get_value(learning_rate))
except AttributeError:
pass
@property
def runstats(self) -> _TRunStats:
"""
runstats property
:return: runstats dictionary
"""
return self._runstats
@property
def progress(self):
"""
Progress tensor property
:return: progress tensor
"""
return self._progress
|
StarcoderdataPython
|
151410
|
from django.apps import AppConfig
class BookoneConfig(AppConfig):
name = 'bookone'
|
StarcoderdataPython
|
3285583
|
<filename>scripts/mprime_tradeoff/generate_mprime_data.py
import numpy as np
import csv
from itertools import product
import pandas as pd
import xarray as xr
from graal_utils import Timer
from hypergeo import hypinv_upperbound
import os
path = os.path.dirname(__file__) + '/data/'
def compute_bound_data(k, m, delta=0.05, d=10, max_mprime=300):
growth_function = lambda M: (np.e*M/d)**d
bounds = np.ones(max_mprime)
best_bound = 1
for mprime in range(1, max_mprime+1):
bound = hypinv_upperbound(k, m, growth_function, delta, mprime)
bounds[mprime-1] = bound
if bound <= best_bound:
best_bound = bound
return bounds
if __name__ == "__main__":
import matplotlib.pyplot as plt
max_mprime = 10_000
risks = np.linspace(0, .5, 11)
ms = [100, 200, 300, 500, 1000]
ds = [5, 10, 20, 35, 50]
deltas = [0.0001, 0.0025, 0.05, 0.1]
os.makedirs(path, exist_ok=True)
# Generates all the data and saves it
for m, risk, d, delta in product(ms, risks, ds, deltas):
k = int(m*risk)
filename = f'mprime_tradeoff-{m=}_{k=}_{d=}_{delta=}'
if os.path.exists(path + filename + '.csv'):
continue
with open(path + filename + '.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['mprime', 'bound'])
with Timer(f'm={m}, k={k}, d={d}, delta={delta}'):
bounds = compute_bound_data(k, m, delta=delta, d=d, max_mprime=max_mprime)
for mp, bound in enumerate(bounds, start=1):
writer.writerow([mp, bound])
# Computes the optimal value of mprime and saves it
best_mprimes = np.zeros((len(ms), len(risks), len(ds), len(deltas)))
best_bounds = np.zeros((len(ms), len(risks), len(ds), len(deltas)))
bounds_at_mprime_equals_m = np.zeros((len(ms), len(risks), len(ds), len(deltas)))
for m_idx, risk_idx, d_idx, delta_idx in product(range(len(ms)),
range(len(risks)),
range(len(ds)),
range(len(deltas))
):
m, risk, d, delta = ms[m_idx], risks[risk_idx], ds[d_idx], deltas[delta_idx]
k = int(m*risk)
filepath = path + f'mprime_tradeoff-{m=}_{k=}_{d=}_{delta=}.csv'
df = pd.read_csv(filepath, sep=',', header=0)
mprimes, bounds = df['mprime'], df['bound']
min_idx = np.argmin(bounds)
best_mprimes[m_idx, risk_idx, d_idx, delta_idx] = mprimes[min_idx]
best_bounds[m_idx, risk_idx, d_idx, delta_idx] = bounds[min_idx]
bounds_at_mprime_equals_m[m_idx, risk_idx, d_idx, delta_idx] = bounds[m-1]
data = xr.Dataset(
data_vars={
'bound': (['m', 'risk', 'd', 'delta'], best_bounds),
'bound_at_mprime=m': (['m', 'risk', 'd', 'delta'], bounds_at_mprime_equals_m),
'mprime': (['m', 'risk', 'd', 'delta'], best_mprimes)
},
coords={
'm': ms,
'risk': risks,
'd': ds,
'delta': deltas
}
)
data.to_netcdf(path + 'optimal_bound.nc')
|
StarcoderdataPython
|
1661033
|
import paddle.fluid.dataloader as data
import paddle
from PIL import Image
import os
import os.path
import numpy as np
import random
from numpy.random import randint
from opts import parser
args = parser.parse_args()
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def path(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
@property
def label(self):
return int(self._data[2])
class TSNDataSet(object):
def __init__(self, root_path, list_file,
num_segments=3, new_length=1, modality='RGB',
image_tmpl='img_{:05d}.jpg', transform=None,
force_grayscale=False, random_shift=True, test_mode=False):
self.root_path = root_path
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.batch_size=args.batch_size
if self.modality == 'RGBDiff':
self.new_length += 1# Diff needs one more image to calculate diff
self._parse_list()
def _load_image(self, directory, idx):
if self.modality == 'RGB' or self.modality == 'RGBDiff':
return [Image.open(os.path.join(directory, self.image_tmpl.format(idx))).convert('RGB')]
elif self.modality == 'Flow':
x_img = Image.open(os.path.join(directory, self.image_tmpl.format('x', idx))).convert('L')
y_img = Image.open(os.path.join(directory, self.image_tmpl.format('y', idx))).convert('L')
return [x_img, y_img]
def _parse_list(self):
self.video_list = [VideoRecord(x.strip().split(' ')) for x in open(self.list_file)]
def _sample_indices(self, record):
"""
:param record: VideoRecord
:return: list
"""
average_duration = (record.num_frames - self.new_length + 1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_val_indices(self, record):
#print(record.num_frames > self.num_segments + self.new_length - 1)
if record.num_frames > self.num_segments + self.new_length - 1:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
#print(offsets)
#print(offsets+1)
return offsets + 1
def _get_test_indices(self, record):
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])
return offsets + 1
def __getitem__(self,index):
batch = 0
imgs=[]
labels=[]
i=index*self.batch_size
if self.random_shift:
random.shuffle(self.video_list)
while i < (len(self.video_list)):
record = self.video_list[i]
if not self.test_mode:
segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record)
else:
segment_indices = self._get_test_indices(record)
img, label = self.get(record, segment_indices)
img=np.array(img).astype('float32')
label=np.array(label).astype('int64')
imgs.append(img)
labels.append(label)
batch += 1
i+=1
if batch == self.batch_size:
bimgs=np.array(imgs).reshape(-1,3,224,224)
blabels=np.array(labels).reshape(-1,1)
break
if batch == self.batch_size:
return bimgs,blabels
def get(self, record, indices):
images = list()
for seg_ind in indices:
p = int(seg_ind)
for i in range(self.new_length):
seg_imgs = self._load_image(record.path, p)
images.extend(seg_imgs)
if p < record.num_frames:
p += 1
process_data = self.transform(images)
return process_data, record.label
def __len__(self):
return len(self.video_list)
if __name__ == '__main__':
from transforms import *
from models import *
import paddle.fluid as fluid
fset = TSNDataSet("", 'data/ucf101_rgb_train_t.txt', num_segments=24,
new_length=1,
modality='RGB',
random_shift=False,
test_mode=False,
image_tmpl='img_'+'{:05d}.jpg' if args.modality in ["RGB", "RGBDiff"] else 'img_'+'{:05d}.jpg',
transform=Compose([
GroupScale(int(224)),
Stack(roll=True),
ToTorchFormatTensor(div=False),
IdentityTransform(),
]))
def batch_generator_creator():
def __reader__():
batch =0
img=[]
batch_data=[]
label=[]
for i in range(len(fset)):
record = fset.video_list[i]
if not fset.test_mode:
segment_indices = fset._sample_indices(record) if fset.random_shift else fset._get_val_indices(record)
else:
segment_indices = fset._get_test_indices(record)
print(record.path)
img, label = fset.get(record, segment_indices)
img=np.array(img).astype('float32')
label=np.array(label).astype('int64')
batch_data.append([img,label])
batch += 1
if batch == fset.batch_size:
yield batch_data
batch =0
img=[]
batch_data=[]
label=[]
return __reader__
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
with fluid.dygraph.guard(place):
t_loader= fluid.io.DataLoader.from_generator(capacity=10,return_list=True, iterable=True, drop_last=True)
t_loader.set_sample_list_generator(batch_generator_creator(), places=place)
#i=0
batch=len(fset)//fset.batch_size
for i in range (batch):
print(i)
img,lab =fset.__getitem__(i)
img = np.array(img).astype('float32').reshape(-1,3,224,224)
lab = np.array(lab).astype('int64').reshape(-1,1)
#print('\n 8888888888888888888888888')
if i==1:
break
i=0
for image, label in t_loader():
print(i)
i+=1
image=paddle.fluid.layers.reshape(image, shape=[-1,3,224,224])
label=paddle.fluid.layers.reshape(label, shape=[-1,1])
#print(i,image.shape,img.shape,label,lab)
if i==2:
break
|
StarcoderdataPython
|
1736866
|
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.spectral_norm as SpectralNorm
class Nothing(nn.Module):
def __init__(self):
super(Nothing, self).__init__()
def forward(self, x):
return x
def get_norm(norm_type, size):
if norm_type == 'batchnorm':
return nn.BatchNorm2d(size)
elif norm_type == 'instancenorm':
return nn.InstanceNorm2d(size)
def get_activation(activation_type):
if activation_type == 'relu':
return nn.ReLU(inplace=True)
elif activation_type == 'leakyrelu':
return nn.LeakyReLU(0.2, inplace=True)
elif activation_type == 'elu':
return nn.ELU(inplace=True)
elif activation_type == 'selu':
return nn.SELU(inplace=True)
elif activation_type == 'prelu':
return nn.PReLU()
elif activation_type == 'tanh':
return nn.Tanh()
elif activation_type is None:
return Nothing()
class ConvBlock(nn.Module):
def __init__(self, ni, no, ks, stride, pad=None, use_bn=True, use_sn=False, use_pixelshuffle=False,
norm_type='batchnorm', activation_type='leakyrelu', pad_type='Zero'):
super(ConvBlock, self).__init__()
self.use_bn = use_bn
self.use_sn = use_sn
self.use_pixelshuffle = use_pixelshuffle
self.norm_type = norm_type
self.pad_type = pad_type
if pad == None:
pad = ks // 2 // stride
ni_ = ni
if use_pixelshuffle:
self.pixelshuffle = nn.PixelShuffle(2)
ni_ = ni // 4
if pad_type == 'Zero':
self.conv = nn.Conv2d(ni_, no, ks, stride, pad, bias=False)
else:
self.conv = nn.Sequential(*[
nn.ReflectionPad2d(pad),
nn.Conv2d(ni_, no, ks, stride, 0, bias=False)
])
if self.use_bn:
self.bn = get_norm(norm_type, no)
if self.use_sn:
self.conv = SpectralNorm(self.conv)
self.act = get_activation(activation_type)
def forward(self, x):
out = x
if self.use_pixelshuffle:
out = self.pixelshuffle(out)
out = self.conv(out)
if self.use_bn:
out = self.bn(out)
out = self.act(out)
return out
class DeConvBlock(nn.Module):
def __init__(self, ni, no, ks, stride, pad=None, output_pad=0, use_bn=True, use_sn=False, norm_type='batchnorm',
activation_type='leakyrelu', pad_type='Zero'):
super(DeConvBlock, self).__init__()
self.use_bn = use_bn
self.use_sn = use_sn
self.norm_type = norm_type
self.pad_type = pad_type
if pad is None:
pad = ks // 2 // stride
if pad_type == 'Zero':
self.deconv = nn.ConvTranspose2d(ni, no, ks, stride, pad, output_padding=output_pad, bias=False)
else:
self.deconv = nn.Sequential(*[
nn.ReflectionPad2d(pad),
nn.ConvTranspose2d(ni, no, ks, stride, 0, output_padding=output_pad, bias=False)
])
if self.use_bn:
self.bn = get_norm(norm_type, no)
if self.use_sn:
self.deconv = SpectralNorm(self.deconv)
self.act = get_activation(activation_type)
def forward(self, x):
out = self.deconv(x)
if self.use_bn:
out = self.bn(out)
out = self.act(out)
return out
class UpSample(nn.Module):
def __init__(self):
super(UpSample, self).__init__()
self.scale_factor = 2
def forward(self, x):
return F.interpolate(x, None, self.scale_factor, 'bilinear', align_corners=True)
class DCGAN_D(nn.Module):
def __init__(self, sz, nc, ndf=64, use_sigmoid=True, use_bn=True, use_sn=False, norm_type='batchnorm',
activation_type='leakyrelu'):
super(DCGAN_D, self).__init__()
assert sz > 4, "Image size should be bigger than 4"
assert sz & (sz - 1) == 0, "Image size should be a power of 2"
self.sz = sz
self.nc = nc
self.ndf = ndf
self.use_bn = use_bn
self.use_sn = use_sn
self.norm_type = norm_type
cur_ndf = ndf
layers = [
ConvBlock(self.nc, self.ndf, 4, 2, 1, use_bn=False, use_sn=self.use_sn, activation_type=activation_type)]
for i in range(int(math.log2(self.sz)) - 3):
layers.append(ConvBlock(cur_ndf, cur_ndf * 2, 4, 2, 1, use_bn=self.use_bn, use_sn=self.use_sn,
norm_type=self.norm_type, activation_type=activation_type))
cur_ndf *= 2
layers.append(ConvBlock(cur_ndf, 1, 4, 1, 0, use_bn=False, use_sn=self.use_sn, activation_type=None))
self.main = nn.Sequential(*layers)
self.sigmoid = nn.Sigmoid()
self.use_sigmoid = use_sigmoid
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
out = self.main(x)
if self.use_sigmoid == True:
out = self.sigmoid(out)
return out
class DCGAN_G(nn.Module):
def __init__(self, sz, nz, nc, ngf=64, use_bn=True, use_sn=False, norm_type='batchnorm',
activation_type='leakyrelu'):
super(DCGAN_G, self).__init__()
self.sz = sz
self.nz = nz
self.nc = nc
self.ngf = ngf
self.use_bn = use_bn
self.use_sn = use_sn
self.norm_type = norm_type
cur_ngf = ngf * self.sz // 8
layers = [
DeConvBlock(self.nz, cur_ngf, 4, 1, 0, use_bn=self.use_bn, use_sn=self.use_sn, norm_type=self.norm_type,
activation_type=activation_type)]
for i in range(int(math.log2(self.sz)) - 3):
layers.append(DeConvBlock(cur_ngf, cur_ngf // 2, 4, 2, 1, use_bn=self.use_bn, use_sn=self.use_sn,
norm_type=self.norm_type, activation_type=activation_type))
cur_ngf = cur_ngf // 2
layers.append(DeConvBlock(self.ngf, self.nc, 4, 2, 1, use_bn=False, use_sn=self.use_sn, activation_type='tanh'))
self.main = nn.Sequential(*layers)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
out = self.main(x)
return out
class DCGAN_G_ResizedConv(nn.Module):
def __init__(self, sz, nz, nc, ngf=64, use_bn=True, use_sn=False, norm_type='batchnorm',
activation_type='leakyrelu'):
super(DCGAN_G_ResizedConv, self).__init__()
self.sz = sz
self.nz = nz
self.nc = nc
self.ngf = ngf
self.use_bn = use_bn
self.use_sn = use_sn
self.norm_type = norm_type
cur_ngf = ngf * self.sz // 8
layers = [ConvBlock(self.nz, cur_ngf, 4, 1, 3, use_bn=self.use_bn, use_sn=self.use_sn, norm_type=norm_type,
activation_type=activation_type), UpSample()]
for i in range(int(math.log2(self.sz)) - 3):
layers.extend([ConvBlock(cur_ngf, cur_ngf // 2, 3, 1, 1, use_bn=self.use_bn, use_sn=self.use_sn,
norm_type=norm_type, activation_type=activation_type), UpSample()])
cur_ngf = cur_ngf // 2
layers.append(ConvBlock(self.ngf, self.nc, 3, 1, 1, use_bn=False, use_sn=self.use_sn, activation_type='tanh'))
self.main = nn.Sequential(*layers)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
out = self.main(x)
return out
class DCGAN_G_PixelShuffle(nn.Module):
def __init__(self, sz, nz, nc, ngf=64, use_bn=True, use_sn=False, norm_type='batchnorm',
activation_type='leakyrelu'):
super(DCGAN_G_PixelShuffle, self).__init__()
self.sz = sz
self.nz = nz
self.nc = nc
self.ngf = ngf
self.use_bn = use_bn
self.use_sn = use_sn
self.norm_type = norm_type
cur_ngf = ngf * self.sz // 8
layers = [ConvBlock(self.nz, cur_ngf, 4, 1, 3, use_bn=self.use_bn, use_sn=self.use_sn, use_pixelshuffle=False,
norm_type=norm_type, activation_type=activation_type)]
for i in range(int(math.log2(self.sz)) - 3):
layers.extend([ConvBlock(cur_ngf, cur_ngf // 2, 3, 1, 1, use_bn=self.use_bn, use_sn=self.use_sn,
use_pixelshuffle=True, norm_type=norm_type, activation_type=activation_type)])
cur_ngf = cur_ngf // 2
layers.append(ConvBlock(self.ngf, self.nc, 3, 1, 1, use_bn=False, use_sn=self.use_sn, use_pixelshuffle=True,
activation_type='tanh'))
self.main = nn.Sequential(*layers)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
out = self.main(x)
return out
class Reshape(nn.Module):
def __init__(self, shape):
super(Reshape, self).__init__()
self.shape = shape
def forward(self, x):
out = x.view(*self.shape)
return out
|
StarcoderdataPython
|
3211613
|
<filename>nrm_analysis/fringefitting/subpix.py
#! /usr/bin/env python
import numpy as np
def rotatevectors(vectors, thetarad):
"""
vectors is a list of vectors - e.g. nrm hole centers
positive x decreases under slight rotation
positive y increases under slight rotation
"""
c, s = (np.cos(thetarad), np.sin(thetarad))
ctrs_rotated = []
for vector in vectors:
ctrs_rotated.append([c*vector[0] - s*vector[1],
s*vector[0] + c*vector[1]])
return np.array(ctrs_rotated)
def weightpixels(array, weightarray):
if np.shape(weightarray)[0] !=np.shape(weightarray)[1]:
raise ValueError("Pixel Weight Array Is Not Square")
oversample = np.shape(weightarray)[0]
shapex = np.shape(array)[0]
shapey = np.shape(array)[1]
b = array.copy()
b = b.reshape(shapex//oversample, oversample, shapey//oversample, oversample)
d = b.copy()
for i in range(np.shape(weightarray)[0]):
for j in range(np.shape(weightarray)[1]):
d[:,i,:,j] = d[:,i,:,j]* weightarray[i,j]
"""
# e.g 1 in the center, 0.8 on the edges:
d[:,1, :, 1] = d[:,1, :, 1]
d[:,0, :, :] = 0.8 * d[:,0, :, :]
d[:,2, :, :] = 0.8 * d[:,2, :, :]
d[:,:, :, 0] = 0.8 * d[:, :, :, 0]
d[:,:, :, 2] = 0.8 * d[:, :, :, 2]
for i in range(shapex):
for j in range(shapey):
d[i,:,j,:] = b[i,:,j,:] * weightarray
"""
return d.sum(-1).sum(1)
def pixelpowerprof(s = np.array([3,3]), power = 4, ctr = None ):
shape = np.array(s)
center = shape / 2
print(center)
y,x = np.indices(s)
pix = (shape[0] /2.0)*np.ones(shape) - np.abs( (x - center[0])**power + (y - center[1])**power )**(1/float(power))
return pix
if __name__ == "__main__":
pixw = np.array([ [0.8, 0.8, 0.8],
[0.8, 1.0, 0.8],
[0.8, 0.8, 0.8] ])
|
StarcoderdataPython
|
4819601
|
# -*- coding:utf-8 -*-
def cmdUrlMaker():
rServer = "192.168.0.19"
dServer = "192.168.0.70:7575"
serverSelect = input('Choose the number of the server. (1)192.168.0.19 (2)192.168.0.70:7575 : ')
if serverSelect == 1:
server = rServer
else:
server = dServer
cmd = raw_input('cmd : ')
paramCnt = input('paramCnt : ')
param = []
for i in range(1, paramCnt+1):
param.append(raw_input('param%s : ' % i))
urlHead = "http://%s/Python/ServerRequest?cmd=%s" % (server, cmd)
urlBody = ''
for i in range(0, paramCnt):
paramNum = i + 1
urlBody += '¶m%s=' % paramNum + param[i]
url = urlHead + urlBody
print url
if __name__ == '__main__':
cmdUrlMaker()
|
StarcoderdataPython
|
3281371
|
from PyInstaller.utils.hooks import collect_data_files, collect_submodules, \
copy_metadata
datas = copy_metadata('chaostoolkit-humio', recursive=True)
hiddenimports = (
collect_submodules('chaoshumio')
)
|
StarcoderdataPython
|
1756514
|
import json
import logging
from django.conf import settings
from django.utils.encoding import filepath_to_uri
from rest_framework import viewsets
from rest_framework.serializers import HyperlinkedModelSerializer, \
ReadOnlyField, Serializer
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import DjangoModelPermissions, \
DjangoModelPermissionsOrAnonReadOnly
from rest_framework.response import Response
from rest_framework.decorators import detail_route
from rest_framework.reverse import reverse
from rest_framework_extras.serializers import FormMixin
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from photologue.models import PhotoSize
from jmbo.models import ModelBase, Image
from jmbo.admin import ModelBaseAdmin
logger = logging.getLogger("django")
if settings.REST_FRAMEWORK.get("DEFAULT_VERSIONING_CLASS") != \
"rest_framework.versioning.URLPathVersioning":
logger.warning("""Jmbo: URLPathVersioning is not set as \
DEFAULT_VERSIONING_CLASS. It is strongly recommended to update your \
settings.""")
class PropertiesMixin(Serializer):
image_detail_url = ReadOnlyField()
class Meta:
fields = ("image_detail_url",)
class ModelBaseSerializer(
FormMixin, PropertiesMixin, HyperlinkedModelSerializer
):
class Meta:
model = ModelBase
admin = ModelBaseAdmin
fields = "__all__"
def get_extra_kwargs(self):
# We specify a base_name at router registration and this is a way to
# sneak in view_name so it resolves properly.
di = super(ModelBaseSerializer, self).get_extra_kwargs()
meta = self.Meta.model._meta
prefix = ("%s-%s" % (meta.app_label, meta.object_name)).lower()
if isinstance(self.context["view"], ModelBasePermittedViewSet):
di["url"] = {"view_name": "%s-permitted-detail" % prefix}
else:
di["url"] = {"view_name": "%s-detail" % prefix}
return di
class ModelBaseObjectsViewSet(viewsets.ModelViewSet):
queryset = ModelBase.objects.all().prefetch_related(
"categories", "sites", "layers", "tags", "images"
).select_related("owner", "content_type", "primary_category")
serializer_class = ModelBaseSerializer
authentication_classes = (
SessionAuthentication, BasicAuthentication, JSONWebTokenAuthentication
)
permission_classes = (DjangoModelPermissions,)
@detail_route(methods=["post"])
def publish(self, request, pk, **kwargs):
self.get_object().publish()
return Response({"status": "success"})
@detail_route(methods=["post"])
def unpublish(self, request, pk, **kwargs):
self.get_object().unpublish()
return Response({"status": "success"})
class ModelBasePermittedViewSet(viewsets.ReadOnlyModelViewSet):
queryset = ModelBase.permitted.all().prefetch_related(
"categories", "sites", "layers", "tags", "images"
).select_related("owner", "content_type", "primary_category")
serializer_class = ModelBaseSerializer
permission_classes = (DjangoModelPermissionsOrAnonReadOnly,)
class ImageSerializer(HyperlinkedModelSerializer):
class Meta:
model = Image
fields = "__all__"
class ImageViewSet(viewsets.ModelViewSet):
queryset = Image.objects.all()
serializer_class = ImageSerializer
authentication_classes = (
SessionAuthentication, BasicAuthentication, JSONWebTokenAuthentication
)
permission_classes = (DjangoModelPermissionsOrAnonReadOnly,)
@detail_route(methods=["get"])
def scales(self, request, pk, **kwargs):
"""Return link to a view that will redirect to the scaled image. This
intermediary view is required because we usually create the scaled
images lazily."""
li = []
obj = self.get_object()
for photosize in PhotoSize.objects.all():
url = request.build_absolute_uri(reverse(
"jmbo:image-scale-url",
(obj.pk, photosize.name),
))
li.append(url)
return Response(li)
def register(router, mapping=None):
"""Register all viewsets known to app, overriding any items already
registered with the same name."""
if mapping is None:
mapping = (
("jmbo-modelbase-permitted", ModelBasePermittedViewSet),
("jmbo-modelbase", ModelBaseObjectsViewSet),
("jmbo-image", ImageViewSet)
)
for pth, klass in mapping:
keys = [tu[0] for tu in router.registry]
try:
i = keys.index(pth)
del router.registry[i]
except ValueError:
pass
# Leave default handling intact until view_name issues are resolved
router.register(
r"%s" % pth,
klass
)
# Provide a base_name to consider app_label as well
router.register(
r"%s" % pth,
klass,
base_name=pth
)
|
StarcoderdataPython
|
1683233
|
<gh_stars>1-10
#!/usr/bin/env python
"""
Classes representing parameters for 1D GeoClaw runs
:Classes:
- GeoClawData1D
- GaugeData1D
:Constants:
- Rearth - Radius of earth in meters
- DEG2RAD factor to convert degrees to radians
- RAD2DEG factor to convert radians to degrees
"""
import os
import numpy
import clawpack.clawutil.data
# Radius of earth in meters.
# For consistency, should always use this value when needed, e.g.
# in setrun.py or topotools:
Rearth = 6367.5e3 # average of polar and equatorial radii
DEG2RAD = numpy.pi / 180.0
RAD2DEG = 180.0 / numpy.pi
LAT2METER = Rearth * DEG2RAD
class GeoClawData1D(clawpack.clawutil.data.ClawData):
r"""
1D Geoclaw data object
"""
def __init__(self):
super(GeoClawData1D,self).__init__()
# GeoClaw physics parameters
self.add_attribute('gravity',9.8)
self.add_attribute('earth_radius',Rearth)
self.add_attribute('coordinate_system',1)
self.add_attribute('friction_forcing',True)
self.add_attribute('friction_coefficient',0.025)
# GeoClaw algorithm parameters
self.add_attribute('dry_tolerance',1e-3)
self.add_attribute('friction_depth',1.0e6)
self.add_attribute('sea_level',0.0)
def write(self,data_source='setrun.py'):
self.open_data_file('geoclaw.data',data_source)
self.data_write('gravity')
self.data_write('earth_radius')
self.data_write('coordinate_system')
self.data_write('sea_level')
friction = self.friction_forcing
if isinstance(self.friction_forcing,bool):
if self.friction_forcing:
friction = 1
else:
friction = 0
elif isinstance(self.friction_forcing,str):
if self.friction_forcing in ['Manning','manning','MANNING']:
friction = 1
elif self.friction_forcing in ['Coulomb','coulomb','COULOMB']:
friction = 2
else:
friction = 0
self.friction_forcing = friction
self.data_write('friction_forcing')
self.data_write('friction_coefficient')
self.data_write('friction_depth')
self.data_write('dry_tolerance')
self.close_data_file()
# Gauge data object
# Gauge data object
class GaugeData1D(clawpack.clawutil.data.ClawData):
r"""
Gauge data object for 1d.
input specs for gauges are in 1d in setrun.py...output is like that of
2d amr (with level=1 and y=0) so that same reading/plotting tools can be used.
"""
@property
def gauge_numbers(self):
if len(self.gauges) == 1:
return [self.gauges[0][0]]
else:
return [gauge[0] for gauge in self.gauges]
def __init__(self, num_dim=2):
super(GaugeData1D,self).__init__()
self.add_attribute('num_dim',num_dim)
self.add_attribute('gauges',[])
def __str__(self):
output = "Gauges: %s\n" % len(self.gauges)
for gauge in self.gauges:
output = "\t".join((output,"%4i:" % gauge[0]))
output = " ".join((output,"%19.10e" % gauge[1]))
output = " ".join((output,"%17.10e" % gauge[2]))
output = " ".join((output,"%13.6e\n" % gauge[3]))
return output
def write(self,out_file='gauges.data',data_source='setrun.py'):
r"""Write out gague information data file."""
# Check to make sure we have only unique gauge numebrs
if len(self.gauges) > 0:
if len(self.gauge_numbers) != len(set(self.gauge_numbers)):
raise Exception("Non unique gauge numbers specified.")
# Write out gauge data file
self.open_data_file(out_file,data_source)
self.data_write(name='ngauges',value=len(self.gauges))
for gauge in self.gauges:
self._out_file.write("%4i %19.10e %19.10e %13.6e %13.6e\n" % tuple(gauge))
self.close_data_file()
def read(self,data_path="./",file_name='gauges.data'):
r"""Read gauge data file"""
path = os.path.join(data_path, file_name)
gauge_file = open(path,'r')
# Read past comments and blank lines
header_lines = 0
ignore_lines = True
while ignore_lines:
line = gauge_file.readline()
if line[0] == "#" or len(line.strip()) == 0:
header_lines += 1
else:
break
# Read number of gauges, should be line that was last read in
num_gauges = int(line.split()[0])
# Read in each gauge line
for n in xrange(num_gauges):
line = gauge_file.readline().split()
self.gauges.append([int(line[0]),float(line[1]),float(line[2]),
float(line[3]),float(line[4])])
gauge_file.close()
|
StarcoderdataPython
|
1723504
|
<reponame>binderwang/Implements-of-Reinforcement-Learning-Algorithms
# coding=utf-8
import pandas as pd
import numpy as np
from base.maze import Maze
class QLearning(object):
def __init__(self, actions, env, alpha=0.01, gamma=0.9, epsilon=0.9):
self.actions = actions
self.env = env
self.alpha = alpha
self.gamma = gamma
self.epsilon = epsilon
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
def get_action(self, state):
self.check_if_state_exist(state)
if np.random.uniform() < self.epsilon:
target_actions = self.q_table.loc[state, :]
target_actions = target_actions.reindex(np.random.permutation(target_actions.index))
target_action = target_actions.idxmax()
else:
target_action = np.random.choice(self.actions)
return target_action
def update_q_value(self, state, action, reward, state_next):
self.check_if_state_exist(state_next)
q_value_predict = self.q_table.loc[state, action]
if state_next != 'done':
q_value_real = reward + self.gamma * self.q_table.loc[state_next, :].max()
else:
q_value_real = reward
self.q_table.loc[state, action] += self.alpha * (q_value_real - q_value_predict)
def check_if_state_exist(self, state):
if state not in self.q_table.index:
self.q_table = self.q_table.append(
pd.Series(
[0] * len(self.actions),
index=self.q_table.columns,
name=state
)
)
def train(self):
for episode in range(100):
print('Episode: {}'.format(episode))
state = self.env.reset()
while True:
self.env.render()
# Get next action.
action = self.get_action(str(state))
# Get next state.
state_next, reward, done = self.env.step(action)
# Update Q table.
self.update_q_value(str(state), action, reward, str(state_next))
state = state_next
if done:
break
self.env.destroy()
if __name__ == '__main__':
env = Maze()
model = QLearning(list(range(env.n_actions)), env)
env.after(100, model.train)
env.mainloop()
|
StarcoderdataPython
|
3255626
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# test_maxsum.py
# algorithms
#
# Created by <NAME> on 06/19/21
# Copyright © 2021 <NAME>. All rights reserved.
#
import pytest
@pytest.mark.parametrize(
"input,output",
[
([4, 4, 9, -5, -6, -1, 5, -6, -8, 9], (17, 0, 2)),
([8, -10, 10, -9, -6, 9, -7, -4, -10, -8], (10, 2, 2)),
([10, 1, -10, -8, 6, 10, -10, 6, -3, 10], (19, 4, 9)),
],
)
def test_max_sum(input, output):
from jcvi.algorithms.maxsum import max_sum
assert max_sum(input) == output
|
StarcoderdataPython
|
1678085
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import argparse
import json
from sdf_timing import sdfparse
def merge(timings_list, site):
merged_timings = dict()
for timings in timings_list:
divider = '/'
if 'divider' in timings['header']:
divider = timings['header']['divider']
for cell in timings['cells']:
for cell_instance in timings['cells'][cell]:
if site in cell_instance.split(divider):
if 'cells' not in merged_timings:
merged_timings['cells'] = dict()
if cell not in merged_timings['cells']:
merged_timings['cells'][cell] = dict()
if cell_instance not in merged_timings['cells'][cell]:
merged_timings['cells'][cell][cell_instance] = dict()
if cell_instance in merged_timings['cells'][cell][
cell_instance]:
assert merged_timings['cells'][cell][cell_instance] == \
timings['cells'][cell][cell_instance], \
"Attempting to merge differing cells"
merged_timings['cells'][cell][cell_instance] = timings[
'cells'][cell][cell_instance]
return merged_timings
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--sdfs', nargs='+', type=str, help="List of sdf files to merge")
parser.add_argument('--site', type=str, help="Site we want to merge")
parser.add_argument('--json', type=str, help="Debug JSON")
parser.add_argument('--out', type=str, help="Merged sdf name")
args = parser.parse_args()
timings_list = list()
for sdf in args.sdfs:
with open(sdf, 'r') as fp:
timing = sdfparse.parse(fp.read())
timings_list.append(timing)
merged_sdf = merge(timings_list, args.site)
open(args.out, 'w').write(sdfparse.emit(merged_sdf, timescale='1ns'))
if args.json is not None:
with open(args.json, 'w') as fp:
json.dump(merged_sdf, fp, indent=4, sort_keys=True)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
180278
|
import functools
import flask_login
import signals
import inspect
from mocha import (utils,
abort,
request,
)
from mocha.core import apply_function_to_members
from . import (is_authenticated,
not_authenticated,
ROLES_ADMIN,
ROLES_MANAGER,
ROLES_CONTRIBUTOR,
ROLES_MODERATOR,
__options__,
login_manager)
def login_required(func):
"""
A wrapper around the flask_login.login_required.
But it also checks the presence of the decorator: @login_not_required
On a "@login_required" class, method containing "@login_not_required" will
still be able to access without authentication
:param func:
:return:
"""
if inspect.isclass(func):
apply_function_to_members(func, login_required)
return func
else:
@functools.wraps(func)
def decorated_view(*args, **kwargs):
if "login_not_required" not in utils.get_decorators_list(func) \
and not_authenticated():
return login_manager.unauthorized()
return func(*args, **kwargs)
return decorated_view
def login_not_required(func):
"""
Dummy decorator. @login_required will inspect the method
to look for this decorator
Use this decorator when you want do not require login in a "@login_required" class/method
:param func:
:return:
"""
@functools.wraps(func)
def decorated_view(*args, **kwargs):
return func(*args, **kwargs)
return decorated_view
def logout_user(f):
"""
Decorator to logout user
:param f:
:return:
"""
@functools.wraps(f)
def deco(*a, **kw):
signals.user_logout(lambda: flask_login.current_user)
flask_login.logout_user()
return f(*a, **kw)
return deco
def require_verified_email(f):
pass
def require_login_allowed(f):
"""
Decorator to abort if login is not allowed
:param f:
:return:
"""
@functools.wraps(f)
def deco(*a, **kw):
if not __options__.get("allow_login"):
abort(403, "Login not allowed. Contact admin if it's a mistake")
return f(*a, **kw)
return deco
def require_register_allowed(f):
"""
Decorator to abort if register is not allowed
:param f:
:return:
"""
@functools.wraps(f)
def deco(*a, **kw):
if not __options__.get("allow_register"):
abort(403, "Signup not allowed. Contact admin if it's a mistake")
return f(*a, **kw)
return deco
def require_social_login_allowed(f):
"""
Decorator to abort if social login is not allowed
:param f:
:return:
"""
@functools.wraps(f)
def deco(*a, **kw):
if not __options__.get("allow_social_login"):
abort(403,
"Social login not allowed. Contact admin if it's a mistake")
return f(*a, **kw)
return deco
def accepts_roles(*roles):
"""
A decorator to check if user has any of the roles specified
@roles_accepted('superadmin', 'admin')
def fn():
pass
"""
def wrapper(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if is_authenticated():
if not flask_login.current_user.has_any_roles(*roles):
return abort(403)
else:
return abort(401)
return f(*args, **kwargs)
return wrapped
return wrapper
def accepts_admin_roles(func):
"""
Decorator that accepts only admin roles
:param func:
:return:
"""
if inspect.isclass(func):
apply_function_to_members(func, accepts_admin_roles)
return func
else:
@functools.wraps(func)
def decorator(*args, **kwargs):
return accepts_roles(*ROLES_ADMIN)(func)(*args, **kwargs)
return decorator
def accepts_manager_roles(func):
"""
Decorator that accepts only manager roles
:param func:
:return:
"""
if inspect.isclass(func):
apply_function_to_members(func, accepts_manager_roles)
return func
else:
@functools.wraps(func)
def decorator(*args, **kwargs):
return accepts_roles(*ROLES_MANAGER)(func)(*args, **kwargs)
return decorator
def accepts_contributor_roles(func):
"""
Decorator that accepts only contributor roles
:param func:
:return:
"""
if inspect.isclass(func):
apply_function_to_members(func, accepts_contributor_roles)
return func
else:
@functools.wraps(func)
def decorator(*args, **kwargs):
return accepts_roles(*ROLES_CONTRIBUTOR)(func)(*args, **kwargs)
return decorator
def accepts_moderator_roles(func):
"""
Decorator that accepts only moderator roles
:param func:
:return:
"""
if inspect.isclass(func):
apply_function_to_members(func, accepts_moderator_roles)
return func
else:
@functools.wraps(func)
def decorator(*args, **kwargs):
return accepts_roles(*ROLES_MODERATOR)(func)(*args, **kwargs)
return decorator
def jwt_required(func):
"""
Checks if the Authorization barer exists. Otherwise throw 401
:param func:
:return:
"""
if inspect.isclass(func):
apply_function_to_members(func, jwt_required)
return func
else:
@functools.wraps(func)
def deco(*a, **kw):
if not "Authorization" in request.headers:
abort(401, "Not Authorized")
return func(*a, **kw)
return deco
|
StarcoderdataPython
|
3318348
|
<filename>imgsteg/__main__.py
import argparse
import sys
from . import ui
from .imgsteg import Imgsteg
def blank_builder(parser):
pass
def extract_bits(args):
if args.infile is None:
raise Exception()
imgsteg = Imgsteg(args.infile)
channel_map = {
'r': imgsteg.RED,
'g': imgsteg.GREEN,
'b': imgsteg.BLUE,
}
pairs = list(map(lambda x: (channel_map[x[0]], int(x[1:])), args.bits.split(',')))
res = imgsteg.extract_bits(pairs)
sys.stdout.buffer.write(res)
def extract_bits_builder(parser):
parser.add_argument('--bits', dest='bits', required=True)
def extract_bit_plane(args):
if args.infile is None:
raise Exception()
imgsteg = Imgsteg(args.infile)
channel_map = {
'r': imgsteg.RED,
'g': imgsteg.GREEN,
'b': imgsteg.BLUE,
}
channel, nth = (channel_map[args.bit[0]], int(args.bit[1:]))
new_im = imgsteg.extract_bit_plane(channel, nth)
new_im.save(args.outfile, quality=100)
def extract_bit_plane_builder(parser):
parser.add_argument('--bit', dest='bit', required=True)
def gray_bits(args):
if args.infile is None:
raise Exception()
imgsteg = Imgsteg(args.infile)
new_im = imgsteg.gray_bits()
new_im.save(args.outfile, quality=100)
def inversion(args):
if args.infile is None:
raise Exception()
imgsteg = Imgsteg(args.infile)
new_im = imgsteg.invert()
new_im.save(args.outfile, quality=100)
def _ui(args):
if args.infile is None:
raise Exception()
ui.server.serve(args.infile)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
modules = [
('ui', _ui, blank_builder),
('inversion', inversion, blank_builder),
('gray_bits', gray_bits, blank_builder),
('extract_bits', extract_bits, extract_bits_builder),
('extract_bit_plane', extract_bit_plane, extract_bit_plane_builder),
]
for module_name, handler, builder in modules:
subparser = subparsers.add_parser(module_name)
subparser.set_defaults(handler=handler)
builder(subparser)
subparser.add_argument('infile', nargs='?', type=argparse.FileType('rb'), help='Input file.')
subparser.add_argument('outfile', nargs='?', type=argparse.FileType('wb'), help='Output file. If blank, stdout is used.')
def main():
args = parser.parse_args()
if hasattr(args, 'handler'):
args.handler(args)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3310139
|
<reponame>groboclown/petronia<gh_stars>10-100
"""
OS Process control.
"""
|
StarcoderdataPython
|
3254674
|
<reponame>brunodccarvalho/competitive
from decimal import *
getcontext().prec = 30
for tc in range(int(input())):
[N, R, G] = map(int, input().split())
win = map(Decimal, input().split())
win = sorted(win, reverse=True)
zero = Decimal(0)
first = [zero]
for topK in range(1, N + 1):
first.append(sum(win[0:topK]))
M = R * G
a = [zero] * (M + 1)
count = [zero] * (M + 1)
Ewin = sum(win) / N
for g in range(0, G):
a[g] = Decimal(Ewin)
count[g] = Decimal(1)
for g in range(G, M + 1):
X, C = 0, 0
for i in range(g - G, g):
X += count[i] * a[i]
C += count[i]
for K in range(1, N):
Y = first[K] / K
r = Decimal(1.0 * N / K - 1)
T = r * C + 1
gain = (Y + r * X) / T
if a[g] < gain:
a[g] = gain
count[g] = T
print("Case #%d: %.15f" % (tc + 1, a[M]))
|
StarcoderdataPython
|
3205195
|
<reponame>PolPsychCam/Twitter-NLP-SNA<filename>data_collection_12_WordCounts.py
"""
data_collection_12_WordCounts.py
6 - preparation for word frequency analysis
7 - word frequency analysis
8 - calculations
9 - trying to understand why centrality is related to noun/propernoun use
@author: lizakarmannaya
"""
########################################################################
######## 6 - preparation for word frequency analysis ###################
########################################################################
import pandas as pd
import os
import glob
import csv
import collections
import matplotlib.pyplot as plt
import re
#import spacy
#everything below has been repeated for RIGHT too
os.chdir(os.path.expanduser("~"))
os.chdir('ark-tweet-nlp-0.3.2/outputs_conll/RIGHT') #do this once
errors = []
Propernoun_tags = ['^', 'Z', 'M']
Noun_tags = ['N', 'S', 'L']
Propernouns_LEFT = []
Nouns_LEFT = []
Propernouns_RIGHT = []
Nouns_RIGHT = []
counter = 0
for txt_file in glob.glob("*.txt"):
counter+=1
#extract user_id from file name
user_id = txt_file.split("tweets_")[1]
user_id = user_id.split(".txt")[0]
with open(txt_file, 'r') as f:
try:
for tweet in f.read().split('\n\n'): #for every tweet from this user
lines = tweet.split('\n') #create iterable with every triple of tab-separasted tags
lines_split = [x.split('\t') for x in lines] #this is now a list of 3 items
for triple in lines_split:
if triple[1] in Propernoun_tags:
Propernouns_RIGHT.append(triple[0])
elif triple[1] in Noun_tags:
Nouns_RIGHT.append(triple[0])
except IndexError as e:
errors.append({user_id: {tweet: e}})
print(f'finished file {counter} out of 17789 LEFT/16496 RIGHT')
len(Propernouns_LEFT) #1,564,753
len(errors) #17788 - 1 for each tweet - this is the blank line at the end
len(Nouns_LEFT) #5,669,614
Propernouns_LEFT[0] #print out the first item in Propernouns_LEFT
len(Propernouns_RIGHT) #1,199,460
len(errors) #16496 - 1 for each tweet - this is the blank line at the end
len(Nouns_RIGHT) #3,787,619
os.chdir(os.path.expanduser("~"))
with open ('RESULTS_LEFT_Propernoun_frequency_list.txt', 'w') as f:
[f.write(str(val) + '\n') for val in Propernouns_LEFT]
with open ('RESULTS_LEFT_Noun_frequency_list.txt', 'w') as f:
[f.write(str(val) + '\n') for val in Nouns_LEFT]
with open ('RESULTS_RIGHT_Propernoun_frequency_list.txt', 'w') as f:
[f.write(str(val) + '\n') for val in Propernouns_RIGHT]
with open ('RESULTS_RIGHT_Noun_frequency_list.txt', 'w') as f:
[f.write(str(val) + '\n') for val in Nouns_RIGHT]
#with open("RESULTS_LEFT_Noun_frequency_list.txt", "w") as f:
# for s in score:
# f.write(str(s) +"\n")
## read files in again
os.chdir(os.path.expanduser("~"))
Propernouns_LEFT = []
with open("RESULTS_LEFT_Propernoun_frequency_list.txt", "r") as f:
for line in f:
Propernouns_LEFT.append(str(line.strip()))
Nouns_LEFT = []
with open("RESULTS_LEFT_Noun_frequency_list.txt", "r") as f:
for line in f:
Nouns_LEFT.append(str(line.strip()))
Propernouns_RIGHT = []
with open("RESULTS_RIGHT_Propernoun_frequency_list.txt", "r") as f:
for line in f:
Propernouns_RIGHT.append(str(line.strip()))
Nouns_RIGHT = []
with open("RESULTS_RIGHT_Noun_frequency_list.txt", "r") as f:
for line in f:
Nouns_RIGHT.append(str(line.strip()))
len(Nouns_LEFT) #e.g. check that they loaded in correctly - 5669614
len(set(Propernouns_LEFT)) #224612
len(set(Nouns_LEFT)) #165551
########################################################################
######## 7 - WORD FREQUENCY ANALYSIS ###################################
########################################################################
#tf-idf - to find words especially important for
#n-grams - e.g. models like fasttext - will create similar vectors for misspelt words
#formula from Sylwester & Purver
#first lowercase, remove stopwords, lemmatise/extract word stems
#formula from Bryden et al. (2013)
#see log odds ratio here https://www.tidytextmining.com/twitter.html
type(Propernouns_LEFT[0]) #str
#### 7.1 - analyse word frequencies without any cleaning
## ignore #tweets per user or #tags per tweet
## --> put all tags into one list
Propernouns_LEFT
Nouns_LEFT
Propernouns_RIGHT
Nouns_RIGHT
#### 1. Propernouns_LEFT ####
counts_Propernouns_LEFT = collections.Counter(Propernouns_LEFT)
counts_Propernouns_LEFT.most_common(30)
#create df
counts_Propernouns_LEFT_30 = pd.DataFrame(counts_Propernouns_LEFT.most_common(30), columns=['words', 'count'])
counts_Propernouns_LEFT_30.head()
#create graph
fig, ax = plt.subplots(figsize=(8, 8))
# Plot horizontal bar graph
counts_Propernouns_LEFT_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="red")
ax.set_title("Common Propernouns Found in LEFT Tweets (Including All Words)")
plt.savefig('RESULTS/WordCounts/Propernouns_LEFT_mostcommon.png')
#save df of most common words
#'RESULTS/WordCounts/Propernouns_LEFT_mostcommon.csv'
#### 2. Propernouns_RIGHT ####
counts_Propernouns_RIGHT = collections.Counter(Propernouns_RIGHT)
counts_Propernouns_RIGHT_30 = pd.DataFrame(counts_Propernouns_RIGHT.most_common(30), columns=['words', 'count'])
counts_Propernouns_RIGHT_30.head()
fig, ax = plt.subplots(figsize=(8, 8))
counts_Propernouns_RIGHT_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="blue")
ax.set_title("Common Propernouns Found in RIGHT Tweets (Including All Words)")
plt.savefig('RESULTS/WordCounts/Propernouns_RIGHT_mostcommon.png')
#### 3. Nouns_LEFT ####
counts_Nouns_LEFT = collections.Counter(Nouns_LEFT)
counts_Nouns_LEFT_30 = pd.DataFrame(counts_Nouns_LEFT.most_common(30), columns=['words', 'count'])
counts_Nouns_LEFT_30.head()
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_LEFT_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="red")
ax.set_title("Common Nouns Found in LEFT Tweets (Including All Words)")
plt.savefig('RESULTS/WordCounts/Nouns_LEFT_mostcommon.png')
#### 4. Nouns_RIGHT ####
counts_Nouns_RIGHT = collections.Counter(Nouns_RIGHT)
counts_Nouns_RIGHT_30 = pd.DataFrame(counts_Nouns_RIGHT.most_common(30), columns=['words', 'count'])
counts_Nouns_RIGHT_30.head()
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_RIGHT_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="blue")
ax.set_title("Common Nouns Found in RIGHT Tweets (Including All Words)")
plt.savefig('RESULTS/WordCounts/Nouns_RIGHT_mostcommon.png')
#### 7.2 all to lowercase
## remember, RTs have been removed entirely already
Propernouns_LEFT_lower = [word.lower() for word in Propernouns_LEFT]
Propernouns_RIGHT_lower = [word.lower() for word in Propernouns_RIGHT]
Nouns_LEFT_lower = [word.lower() for word in Nouns_LEFT]
Nouns_RIGHT_lower = [word.lower() for word in Nouns_RIGHT]
#repeat all graphs from above
counts_Propernouns_LEFT_lower = collections.Counter(Propernouns_LEFT_lower)
counts_Propernouns_LEFT_lower_30 = pd.DataFrame(counts_Propernouns_LEFT_lower.most_common(30), columns=['words', 'count'])
counts_Propernouns_LEFT_lower_30.head()
fig, ax = plt.subplots(figsize=(8, 8))
counts_Propernouns_LEFT_lower_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="red")
ax.set_title("Common Propernouns Found in LEFT Tweets (Including All Words lowercased)")
plt.savefig('RESULTS/WordCounts/Propernouns_LEFT_mostcommon_lowercase.png')
#### 7.3 - lemmatise
#### 7.3.1 - lemmatise using NLTK WordNet
## NB drop all emojis from tokens
## NB drop # from tokens
#example
## Nb need to define POS for which I am lemmatising this
import nltk
#nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
type(Propernouns_LEFT[0])
for word in Propernouns_LEFT:
word = word.lower()
print ("{0:20}{1:20}".format(word,wordnet_lemmatizer.lemmatize(word, pos='n')))
#this doesn't drop 's
for word in Nouns_LEFT:
print ("{0:20}{1:20}".format(word,wordnet_lemmatizer.lemmatize(word, pos="n")))
#this doesn't drop 's
#### 7.3.2 - lemmatise using SpaCy
import spacy
# Initialize spacy 'en' model, keeping only tagger component needed for lemmatization
nlp = spacy.load('en', disable=['parser', 'ner'])
sentence = "The striped bats are hanging on their feet for best"
# Parse the sentence using the loaded 'en' model object `nlp`
doc = nlp(sentence)
# Extract the lemma for each token and join
" ".join([token.lemma_ for token in doc])
#### 7.3.3 - clean (pseudo-lemmatise) using RegExpression
## manually drop emojis
#function to prepare for dropping emojis
def remove_emoji(string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', string)
## manually srop 's, 'll, 'd, 've, 'm, 're - #NB apostrophe has to be both ’ and '
## manually lowercase, strip of whitespace, drop '#' (hastags here are used as parts of speech - see POS tagger description)
## manually remove emoji
Propernouns_LEFT_clean = [string.lower().strip().replace("#", "") for string in Propernouns_LEFT]
Propernouns_LEFT_clean = [re.sub(r"((’|')(s|ll|d|ve|m|re))", "", string) for string in Propernouns_LEFT_clean]
Propernouns_LEFT_clean = [remove_emoji(string) for string in Propernouns_LEFT_clean]
Propernouns_LEFT_clean = list(filter(None, Propernouns_LEFT_clean)) #drop empty stirng which is the result of dropping emoji
#now plot the most common 30
counts_Propernouns_LEFT_clean = collections.Counter(Propernouns_LEFT_clean)
len(counts_Propernouns_LEFT_clean) #166338
#counts_Propernouns_LEFT_clean.most_common(30)
#create df
counts_Propernouns_LEFT_clean_30 = pd.DataFrame(counts_Propernouns_LEFT_clean.most_common(30), columns=['words', 'count'])
counts_Propernouns_LEFT_clean_30.head()
#create graph
fig, ax = plt.subplots(figsize=(8, 8))
# Plot horizontal bar graph
counts_Propernouns_LEFT_clean_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="red")
ax.set_title("Common Propernouns Found in LEFT Tweets (Including All Words without 's|'ll|'d|'ve|'m|'re, '#' & emojis, lowercased, no emoji)")
plt.savefig('RESULTS/WordCounts/Propernouns_LEFT_clean_3_mostcommon.png')
#repeat for Propernouns RIGHT
Propernouns_RIGHT_clean = [string.lower().strip().replace("#", "") for string in Propernouns_RIGHT]
Propernouns_RIGHT_clean = [re.sub(r"((’|')(s|ll|d|ve|m|re))", "", string) for string in Propernouns_RIGHT_clean]
Propernouns_RIGHT_clean = [remove_emoji(string) for string in Propernouns_RIGHT_clean]
Propernouns_RIGHT_clean = list(filter(None, Propernouns_RIGHT_clean)) #drop empty stirng which is the result of dropping emoji
counts_Propernouns_RIGHT_clean = collections.Counter(Propernouns_RIGHT_clean)
len(counts_Propernouns_RIGHT_clean) #146319
counts_Propernouns_RIGHT_clean_30 = pd.DataFrame(counts_Propernouns_RIGHT_clean.most_common(30), columns=['words', 'count'])
counts_Propernouns_RIGHT_clean_30.head()
fig, ax = plt.subplots(figsize=(8, 8))
counts_Propernouns_RIGHT_clean_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="blue")
ax.set_title("Common Propernouns Found in RIGHT Tweets (Including All Words without 's|'ll|'d|'ve|'m|'re, '#' & emojis, lowercased, no emoji)")
plt.savefig('RESULTS/WordCounts/Propernouns_RIGHT_clean_3_mostcommon.png')
#repeat for Nouns LEFT
Nouns_LEFT_clean = [string.lower().strip().replace("#", "") for string in Nouns_LEFT]
Nouns_LEFT_clean = [re.sub(r"((’|')(s|ll|d|ve|m|re))", "", string) for string in Nouns_LEFT_clean]
Nouns_LEFT_clean = [remove_emoji(string) for string in Nouns_LEFT_clean]
Nouns_LEFT_clean = list(filter(None, Nouns_LEFT_clean)) #drop empty stirng which is the result of dropping emoji
counts_Nouns_LEFT_clean = collections.Counter(Nouns_LEFT_clean)
len(counts_Nouns_LEFT_clean) #116856
counts_Nouns_LEFT_clean_30 = pd.DataFrame(counts_Nouns_LEFT_clean.most_common(30), columns=['words', 'count'])
counts_Nouns_LEFT_clean_30.head()
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_LEFT_clean_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="red")
ax.set_title("Common Nouns Found in LEFT Tweets (Including All Words without 's|'ll|'d|'ve|'m|'re, '#' & emojis, lowercased)")
plt.savefig('RESULTS/WordCounts/Nouns_LEFT_clean_3_mostcommon.png')
#repeat for Nouns RIGHT
Nouns_RIGHT_clean = [string.lower().strip().replace("#", "") for string in Nouns_RIGHT]
Nouns_RIGHT_clean = [re.sub(r"((’|')(s|ll|d|ve|m|re))", "", string) for string in Nouns_RIGHT_clean]
Nouns_RIGHT_clean = [remove_emoji(string) for string in Nouns_RIGHT_clean]
Nouns_RIGHT_clean = list(filter(None, Nouns_RIGHT_clean)) #drop empty stirng which is the result of dropping emoji
counts_Nouns_RIGHT_clean = collections.Counter(Nouns_RIGHT_clean)
len(counts_Nouns_RIGHT_clean) #93293
counts_Nouns_RIGHT_clean_30 = pd.DataFrame(counts_Nouns_RIGHT_clean.most_common(30), columns=['words', 'count'])
counts_Nouns_RIGHT_clean_30.head()
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_RIGHT_clean_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="blue")
ax.set_title("Common Nouns Found in RIGHT Tweets (Including All Words without 's|'ll|'d|'ve|'m|'re, '#' & emojis, lowecased)")
plt.savefig('RESULTS/WordCounts/Nouns_RIGHT_clean_3_mostcommon.png')
#### 7.4 - stem? - NO, doesn't work
from nltk.stem import PorterStemmer
from nltk.stem import LancasterStemmer
porter = PorterStemmer()
lancaster=LancasterStemmer()
for word in Propernouns_LEFT:
print ("{0:20}{1:20}".format(word,porter.stem(word)))
#this returns words as lowercase, without 's' at the end, without the 's' after apostrophe, but still including the apostrophe itself
#'Stalinism' --> 'Stalin'
#'Christmas' --> 'christma'
#'Coronavirus' --> 'coronaviru'
for word in Propernouns_LEFT:
print ("{0:20}{1:20}".format(word,lancaster.stem(word)))
#Energy’s --> energy’s
#Aluminium --> alumin
#Australia --> austral
#Union --> un
#### 7.4 - more cleaning?
#### 7.4.1 - do I need to exclude 'coronavirus, covid etc.'?
len(Propernouns_LEFT) #1564753
len(Propernouns_RIGHT) #1199460
len(Nouns_LEFT) #5669614
len(Nouns_RIGHT) #3787619
coronavirus = ['coronavirus', 'sarscov2', 'covid', 'covid19', 'covid_19', 'covid-19', 'covid2019', 'covid_2019', 'covid-2019', 'cov19', 'cov_19', 'cov-19', 'cov2019', 'cov_2019', 'cov-2019', 'cv19', 'cv_19', 'cv-19', 'cv2019', 'cv_2019', 'cv-2019', 'covid19uk', 'covid2019uk']
#already lowercased
#already dropped hashtags & emojis &'s etc.
len(Propernouns_LEFT_clean) #1546104
len(Propernouns_RIGHT_clean) #1182685
len(Nouns_LEFT_clean) #5665362
len(Nouns_RIGHT_clean) #3783808
my_collection_Propernouns = Propernouns_LEFT_clean+Propernouns_RIGHT_clean
len(my_collection_Propernouns) #2728789
my_collection_Nouns = Nouns_LEFT_clean + Nouns_RIGHT_clean
len(my_collection_Nouns) #9449170
#now create a list of all words in these 2 collections that match tags in 'coronavirus'
coronavirus_in_my_collection_Nouns = []
for word in my_collection_Nouns:
if word in coronavirus:
coronavirus_in_my_collection_Nouns.append(word)
len(coronavirus_in_my_collection_Nouns) #13349
len(coronavirus_in_my_collection_Nouns)/len(my_collection_Nouns) #0.0014127166724696456 --> =0.1% --> don't need to multiverse Nouns
coronavirus_in_my_collection_Propernouns = []
for word in my_collection_Propernouns:
if word in coronavirus:
coronavirus_in_my_collection_Propernouns.append(word)
len(coronavirus_in_my_collection_Propernouns) #24478
len(coronavirus_in_my_collection_Propernouns)/len(my_collection_Propernouns) #0.008970279490279388 --> don't need to multiverse Propernouns
########
## now also calcualte proportions 'coronavirus' by side
coronavirus_in_my_collection_Nouns_Left = []
for word in Nouns_LEFT_clean:
if word in coronavirus:
coronavirus_in_my_collection_Nouns_Left.append(word)
len(coronavirus_in_my_collection_Nouns_Left)/len(Nouns_LEFT_clean) #0.0012387557935397597
coronavirus_in_my_collection_Nouns_Right = []
for word in Nouns_RIGHT_clean:
if word in coronavirus:
coronavirus_in_my_collection_Nouns_Right.append(word)
len(coronavirus_in_my_collection_Nouns_Right)/len(Nouns_RIGHT_clean) #0.001673182148777105
coronavirus_in_my_collection_Propernouns_Left = []
for word in Propernouns_LEFT_clean:
if word in coronavirus:
coronavirus_in_my_collection_Propernouns_Left.append(word)
len(coronavirus_in_my_collection_Propernouns_Left)/len(Propernouns_LEFT_clean) #0.00806220021421586
coronavirus_in_my_collection_Propernouns_Right = []
for word in Propernouns_RIGHT_clean:
if word in coronavirus:
coronavirus_in_my_collection_Propernouns_Right.append(word)
len(coronavirus_in_my_collection_Propernouns_Right)/len(Propernouns_RIGHT_clean) #0.010157396094479933
#### 7.4.1 - do I need to exclude pronouns from Nouns lists?
first_pers_pronouns = ['i', 'we', 'me','us','mine','ours','my','our','myself','ourselves']
second_pers_pronouns = ['you','yours','your','yourself','yourselves']
third_pers_pronouns = ['he', 'she', 'it', 'they', 'her','him','them','hers','his','its','theirs','his','their','herself','himself','itself','themselves']
other_pronouns = ['all','another','any','anybody','anyone','anything','both','each','either','everybody','everyone','everything','few','many','most','neither','nobody','none','noone','nothing','one','other','others','several','some','somebody','someone','something','such','that','these','this','those','what','whatrever','which','whichever','who','whoever','whom','whomever','whose','as','that','what','whatever','thou','thee','thy','thine','ye','eachother','everybody','naught','nought','somewhat','thyself','whatsoever','whence','where','whereby','wherever']
pronouns = first_pers_pronouns + second_pers_pronouns + third_pers_pronouns + other_pronouns
len(pronouns) #94
#now create a list of all words in these 2 collections that match tags in 'coronavirus'
pronouns_in_my_collection_Nouns = []
for word in my_collection_Nouns:
if word in pronouns:
pronouns_in_my_collection_Nouns.append(word)
len(pronouns_in_my_collection_Nouns) #929401
len(pronouns_in_my_collection_Nouns)/len(my_collection_Nouns) #0.09835795101580351 --> need to multiverse Nouns?
pronouns_in_my_collection_Propernouns = []
for word in my_collection_Propernouns:
if word in pronouns:
pronouns_in_my_collection_Propernouns.append(word)
len(pronouns_in_my_collection_Propernouns) #15902
len(pronouns_in_my_collection_Propernouns)/len(my_collection_Propernouns) #0.005827493441229791 --> don't need to multiverse Propernouns
## --> re-plot Nouns without pronouns:
def remove_pronouns(wordlist):
wordlist_clean = [word for word in wordlist if word not in pronouns]
return wordlist_clean
len(Nouns_LEFT_clean) #5665362
len(Nouns_RIGHT_clean) #3783808
Nouns_LEFT_clean_nopronouns = remove_pronouns(Nouns_LEFT_clean)
len(Nouns_LEFT_clean_nopronouns) #5074947
Nouns_RIGHT_clean_nopronouns = remove_pronouns(Nouns_RIGHT_clean)
len(Nouns_RIGHT_clean_nopronouns) #3444822
counts_Nouns_LEFT_clean_nopronouns = collections.Counter(Nouns_LEFT_clean_nopronouns)
len(counts_Nouns_LEFT_clean_nopronouns) #116784
counts_Nouns_LEFT_clean_nopronouns_30 = pd.DataFrame(counts_Nouns_LEFT_clean_nopronouns.most_common(30), columns=['words', 'count'])
counts_Nouns_LEFT_clean_nopronouns_30.head()
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_LEFT_clean_nopronouns_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="red")
ax.set_title("Common Nouns Found in LEFT Tweets (Including All non-pronoun Words without 's|'ll|'d|'ve|'m|'re, '#' & emojis, lowecased)")
plt.savefig('RESULTS/WordCounts/Nouns_LEFT_clean_3_mostcommon_nopronouns.png')
counts_Nouns_RIGHT_clean_nopronouns = collections.Counter(Nouns_RIGHT_clean_nopronouns)
len(counts_Nouns_RIGHT_clean_nopronouns) #93220
counts_Nouns_RIGHT_clean_nopronouns_30 = pd.DataFrame(counts_Nouns_RIGHT_clean_nopronouns.most_common(30), columns=['words', 'count'])
counts_Nouns_RIGHT_clean_nopronouns_30.head()
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_RIGHT_clean_nopronouns_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="blue")
ax.set_title("Common Nouns Found in RIGHT Tweets (Including All non-pronoun Words without 's|'ll|'d|'ve|'m|'re, '#' & emojis, lowecased)")
plt.savefig('RESULTS/WordCounts/Nouns_RIGHT_clean_3_mostcommon_nopronouns.png')
#### 7.5 - see how big a proportion of my Propernouns/Nouns emojis constitute
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", flags=re.UNICODE)
my_collection_Propernouns_uncleaned = Propernouns_LEFT + Propernouns_RIGHT
#need to use the uncleaned collection, because in the cleaned version I remove these emojis
emojis_in_Propernouns = []
for word in my_collection_Propernouns_uncleaned:
match_tag = emoji_pattern.match(word)
if match_tag:
emojis_in_Propernouns.append(word)
len(emojis_in_Propernouns) #46959
len(emojis_in_Propernouns)/len(my_collection_Propernouns_uncleaned) #0.01698819881101782
## --> emojis constitute only 1.7% of this collection
## --> maybe shouls still try to drop entire tag if it contains emoji - as the words follower by them are rarely propernuons
my_collection_Nouns_uncleaned = Nouns_LEFT + Nouns_RIGHT_clean
emojis_in_Nouns = []
for word in my_collection_Nouns_uncleaned:
match_tag = emoji_pattern.match(word)
if match_tag:
emojis_in_Nouns.append(word)
len(emojis_in_Nouns) #6229
len(emojis_in_Nouns)/len(my_collection_Nouns_uncleaned) #0.000658914835283985
#######################################################
########### 8 - calculations ##########################
#######################################################
#P(word use | political affiliation) = #times word occurs in tweets of followers of this side / count of all words used in tweets of followers of this side
df_LEFT = pd.read_csv('RESULTS_LEFT_noun_frequency_2.csv', index_col=0)
df_LEFT.head()
total_tags_LEFT = df_LEFT['total_tags'].sum()
total_tags_LEFT #33017889
df_RIGHT = pd.read_csv('RESULTS_RIGHT_noun_frequency_2.csv', index_col=0)
df_LEFT.head()
total_tags_RIGHT = df_RIGHT['total_tags'].sum()
total_tags_RIGHT #22513236
#Nouns, LEFT
counts_Nouns_LEFT_clean = collections.Counter(Nouns_LEFT_clean) #dictionary
len(counts_Nouns_LEFT_clean) #116856 - same as len(set(Nouns_LEFT_clean))
data=[]
for word in set(Nouns_LEFT_clean): #only loop through each word once - no repetitions
count = counts_Nouns_LEFT_clean[word]
proportion = counts_Nouns_LEFT_clean[word]/total_tags_LEFT
data.append([word, count, proportion])
df_Nouns_proportions_LEFT = pd.DataFrame(columns=['word', 'count', 'proportion'], data=data)
df_Nouns_proportions_LEFT.to_csv('df_Nouns_proportions_LEFT.csv')
#Nouns, RIGHT
counts_Nouns_RIGHT_clean = collections.Counter(Nouns_RIGHT_clean)
len(counts_Nouns_RIGHT_clean) #93293
data=[]
for word in set(Nouns_RIGHT_clean):
count = counts_Nouns_RIGHT_clean[word]
proportion = counts_Nouns_RIGHT_clean[word]/total_tags_RIGHT
data.append([word, count, proportion])
df_Nouns_proportions_RIGHT = pd.DataFrame(columns=['word', 'count', 'proportion'], data=data)
df_Nouns_proportions_RIGHT.to_csv('df_Nouns_proportions_RIGHT.csv')
#Propernouns, LEFT
counts_Propernouns_LEFT_clean = collections.Counter(Propernouns_LEFT_clean) #dictionary
len(counts_Propernouns_LEFT_clean) #166338 - same as len(set(Propernouns_LEFT_clean))
data=[]
for word in set(Propernouns_LEFT_clean):
count = counts_Propernouns_LEFT_clean[word]
proportion = counts_Propernouns_LEFT_clean[word]/total_tags_LEFT
data.append([word, count, proportion])
df_Propernouns_proportions_LEFT = pd.DataFrame(columns=['word', 'count', 'proportion'], data=data)
df_Propernouns_proportions_LEFT.shape
df_Propernouns_proportions_LEFT.to_csv('df_Propernouns_proportions_LEFT.csv')
#Propernouns, RIGHT
counts_Propernouns_RIGHT_clean = collections.Counter(Propernouns_RIGHT_clean) #dictionary
len(counts_Propernouns_RIGHT_clean) #146319
data=[]
for word in set(Propernouns_RIGHT_clean):
count = counts_Propernouns_RIGHT_clean[word]
proportion = counts_Propernouns_RIGHT_clean[word]/total_tags_RIGHT
data.append([word, count, proportion])
df_Propernouns_proportions_RIGHT = pd.DataFrame(columns=['word', 'count', 'proportion'], data=data)
df_Propernouns_proportions_RIGHT.to_csv('df_Propernouns_proportions_RIGHT.csv')
#### re-import data
df_Propernouns_proportions_LEFT = pd.read_csv('df_Propernouns_proportions_LEFT.csv', index_col=0)
df_Propernouns_proportions_RIGHT = pd.read_csv('df_Propernouns_proportions_RIGHT.csv', index_col=0)
df_Nouns_proportions_LEFT = pd.read_csv('df_Nouns_proportions_LEFT.csv', index_col=0)
df_Nouns_proportions_RIGHT = pd.read_csv('df_Nouns_proportions_RIGHT.csv', index_col=0)
df_Propernouns_proportions_LEFT.shape #(166338, 3)
df_Propernouns_proportions_RIGHT.shape #(146319, 3)
df_Nouns_proportions_LEFT.shape #(116856, 3)
df_Nouns_proportions_RIGHT.shape #(93293, 3)
#drop words with counts<20 from each df
df_Propernouns_proportions_LEFT = df_Propernouns_proportions_LEFT[df_Propernouns_proportions_LEFT['count']>10]
df_Propernouns_proportions_LEFT.shape #(13378, 3)
df_Propernouns_proportions_RIGHT = df_Propernouns_proportions_RIGHT[df_Propernouns_proportions_RIGHT['count']>10]
df_Propernouns_proportions_RIGHT.shape #(10943, 3)
df_Nouns_proportions_LEFT = df_Nouns_proportions_LEFT[df_Nouns_proportions_LEFT['count']>10]
df_Nouns_proportions_LEFT.shape #(17368, 3)
df_Nouns_proportions_RIGHT = df_Nouns_proportions_RIGHT[df_Nouns_proportions_RIGHT['count']>10]
df_Nouns_proportions_RIGHT.shape #(14468, 3)
#### NOW display them by highest proportion first
df_Propernouns_proportions_LEFT.head()
df_Propernouns_proportions_LEFT_sorted = df_Propernouns_proportions_LEFT.sort_values(by = 'count', ascending=False)
df_Propernouns_proportions_LEFT_sorted.head()
df_Propernouns_proportions_RIGHT.head()
df_Propernouns_proportions_RIGHT_sorted = df_Propernouns_proportions_RIGHT.sort_values(by = 'count', ascending=False)
df_Propernouns_proportions_LEFT_sorted.head()
df_Propernouns_LEFT-RIGHT = pd.DataFrame()
#re-set index as 'word' so I can loop over specific words?
for index in df_Propernouns_proportions_LEFT.index:
df_Propernouns_LEFT['word'].values[index] = df_Propernouns_proportions_LEFT['word'].values[index]
#df_Propernouns_proportions_LEFT.at
df_Propernouns_LEFT-RIGHT['LEFT-RIGHT'].values[index] = df_Propernouns_proportions_LEFT['proportion'].values[index]
################################################################################
#### 9 - trying to understand why centrality is related to noun/propernoun use :
################################################################################
## --> analyse words used by 10 MOST hubs-central users
#find 10 most hubs-central users in df
df = pd.read_csv('RESULTS_df_multiverse_DIRECTED.csv', index_col=0)
df.head()
df.shape
df_LEFT = df[df['side']=='LEFT']
df_LEFT.shape
df_LEFT.head()
df_RIGHT = df[df['side']=='RIGHT']
df_RIGHT.shape
df_LEFT_sorted = df_LEFT.sort_values(by='hubs', ascending=False) #most central at the top
df_LEFT_sorted = df_LEFT_sorted.head(10)
LEFT_central_ids = list(df_LEFT_sorted['user_id_str'])
LEFT_central_ids
#now manualy save these ids into 'ark-tweet-nlp-0.3.2/outputs_conll/LEFT/most_central'
df_RIGHT_sorted = df_RIGHT.sort_values(by='hubs', ascending=False) #most central at the top
df_RIGHT_sorted = df_RIGHT_sorted.head(10)
RIGHT_central_ids = list(df_RIGHT_sorted['user_id_str'])
RIGHT_central_ids
#now manualy save these ids into 'ark-tweet-nlp-0.3.2/outputs_conll/LEFT/most_central'
## 1. LEFT
os.chdir(os.path.expanduser("~"))
os.chdir('ark-tweet-nlp-0.3.2/outputs_conll/LEFT/most_central') #do this once
errors = []
Propernoun_tags = ['^', 'Z', 'M']
Noun_tags = ['N', 'S', 'L']
Propernouns_LEFT_central = [] #skip this at the second run
Nouns_LEFT_central = [] #skip this at the second run
Propernouns_RIGHT_central = []
Nouns_RIGHT_central = []
counter = 0
for txt_file in glob.glob("*.txt"):
counter+=1
#extract user_id from file name
user_id = txt_file.split("tweets_")[1]
user_id = user_id.split(".txt")[0]
with open(txt_file, 'r') as f:
try:
for tweet in f.read().split('\n\n'): #for every tweet from this user
lines = tweet.split('\n') #create iterable with every triple of tab-separasted tags
lines_split = [x.split('\t') for x in lines] #this is now a list of 3 items
for triple in lines_split:
if triple[1] in Propernoun_tags:
Propernouns_LEFT_central.append(triple[0]) #CHANGE to LEFT/RIGHT
elif triple[1] in Noun_tags:
Nouns_LEFT_central.append(triple[0]) #CHANGE to LEFT/RIGHT
except IndexError as e:
errors.append({user_id: {tweet: e}})
print(f'finished file {counter}')
len(Propernouns_LEFT_central) #363
len(errors) #10 - 1 for each tweet - this is the blank line at the end
len(Nouns_LEFT_central) #1668
Propernouns_LEFT_central[0]
##NOW re-run this with RIGHT
len(Propernouns_RIGHT_central) #431
len(errors) #10 - 1 for each tweet - this is the blank line at the end
len(Nouns_RIGHT_central) #1546
Nouns_RIGHT_central[0]
#now clean these
def clean_wordlist(wordlist):
wordlist_clean = [string.lower().strip().replace("#", "") for string in wordlist]
wordlist_clean = [re.sub(r"((’|')(s|ll|d|ve|m|re))", "", string) for string in wordlist_clean]
wordlist_clean = [remove_emoji(string) for string in wordlist_clean] ##NB define this function earlier
wordlist_clean = list(filter(None, wordlist_clean)) #drop empty stirng which is the result of dropping emoji
return wordlist_clean
Propernouns_LEFT_central_clean = clean_wordlist(Propernouns_LEFT_central)
Nouns_LEFT_central_clean = clean_wordlist(Nouns_LEFT_central)
Propernouns_RIGHT_central_clean = clean_wordlist(Propernouns_RIGHT_central)
Nouns_RIGHT_central_clean = clean_wordlist(Nouns_RIGHT_central)
len(Propernouns_LEFT_central_clean) #363 --> 362
len(Nouns_LEFT_central) #1668 --> 1668
len(Propernouns_RIGHT_central) #431 --> 431
len(Nouns_RIGHT_central) #1546 --> 1546
###visualise & save most common words for these 10 users
os.chdir(os.path.expanduser("~"))
#Propernouns
counts_Propernouns_LEFT_c = collections.Counter(Propernouns_LEFT_central_clean)
counts_Propernouns_LEFT_c_30 = pd.DataFrame(counts_Propernouns_LEFT_c.most_common(30), columns=['words', 'count'])
fig, ax = plt.subplots(figsize=(8, 8))
counts_Propernouns_LEFT_c_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="red")
ax.set_title("Common Propernouns Found in Tweets of 10 most LEFT-central users (Including All Words cleaned)")
plt.savefig('RESULTS/WordCounts/Propernouns_LEFT_10mostcentral_mostcommon_clean.png')
counts_Propernouns_RIGHT_c = collections.Counter(Propernouns_RIGHT_central_clean)
counts_Propernouns_RIGHT_c_30 = pd.DataFrame(counts_Propernouns_RIGHT_c.most_common(30), columns=['words', 'count'])
fig, ax = plt.subplots(figsize=(8, 8))
counts_Propernouns_RIGHT_c_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="blue")
ax.set_title("Common Propernouns Found in Tweets of 10 most RIGHT-central users (Including All Words cleaned)")
plt.savefig('RESULTS/WordCounts/Propernouns_RIGHT_10mostcentral_mostcommon_clean.png')
#Nouns
counts_Nouns_LEFT_c = collections.Counter(Nouns_LEFT_central_clean)
counts_Nouns_LEFT_c_30 = pd.DataFrame(counts_Nouns_LEFT_c.most_common(30), columns=['words', 'count'])
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_LEFT_c_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="red")
ax.set_title("Common Nouns Found in Tweets of 10 most LEFT-central users (Including All Words cleaned)")
plt.savefig('RESULTS/WordCounts/Nouns_LEFT_10mostcentral_mostcommon_clean.png')
counts_Nouns_RIGHT_c = collections.Counter(Nouns_RIGHT_central_clean)
counts_Nouns_RIGHT_c_30 = pd.DataFrame(counts_Nouns_RIGHT_c.most_common(30), columns=['words', 'count'])
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_RIGHT_c_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="blue")
ax.set_title("Common Nouns Found in Tweets of 10 most RIGHT-central users (Including All Words cleaned)")
plt.savefig('RESULTS/WordCounts/Nouns_RIGHT_10mostcentral_mostcommon_clean.png')
## --> analyse words used by 10 LEAST hubs-central users
#find 10 most hubs-central users in df
#1. re-import df
df = pd.read_csv('RESULTS_df_multiverse_DIRECTED.csv', index_col=0)
df.shape
df_LEFT = df[df['side']=='LEFT']
df_RIGHT = df[df['side']=='RIGHT']
df_LEFT_sorted = df_LEFT.sort_values(by='hubs', ascending=False) #most central at the top
df_RIGHT_sorted = df_RIGHT.sort_values(by='hubs', ascending=False) #most central at the top
df_LEFT_sorted = df_LEFT_sorted.tail(10)
LEFT_leastcentral_ids = list(df_LEFT_sorted['user_id_str'])
LEFT_leastcentral_ids
#now manualy save these ids into 'ark-tweet-nlp-0.3.2/outputs_conll/LEFT/least_central'
df_RIGHT_sorted = df_RIGHT_sorted.tail(10)
RIGHT_leastcentral_ids = list(df_RIGHT_sorted['user_id_str'])
RIGHT_leastcentral_ids
#now manualy save these ids into 'ark-tweet-nlp-0.3.2/outputs_conll/LEFT/least_central'
## 1. LEFT; 2. RIGHT
os.chdir(os.path.expanduser("~"))
os.chdir('ark-tweet-nlp-0.3.2/outputs_conll/LEFT/least_central') #do this once
errors = []
Propernoun_tags = ['^', 'Z', 'M']
Noun_tags = ['N', 'S', 'L']
Propernouns_LEFT_leastcentral = []
Nouns_LEFT_leastcentral = []
Propernouns_RIGHT_leastcentral = []
Nouns_RIGHT_leastcentral = []
counter = 0
for txt_file in glob.glob("*.txt"):
counter+=1
#extract user_id from file name
user_id = txt_file.split("tweets_")[1]
user_id = user_id.split(".txt")[0]
with open(txt_file, 'r') as f:
try:
for tweet in f.read().split('\n\n'): #for every tweet from this user
lines = tweet.split('\n') #create iterable with every triple of tab-separasted tags
lines_split = [x.split('\t') for x in lines] #this is now a list of 3 items
for triple in lines_split:
if triple[1] in Propernoun_tags:
Propernouns_LEFT_leastcentral.append(triple[0])#CHANGE to LEFT/RIGHT
elif triple[1] in Noun_tags:
Nouns_LEFT_leastcentral.append(triple[0]) #CHANGE to LEFT/RIGHT
except IndexError as e:
errors.append({user_id: {tweet: e}})
print(f'finished file {counter}')
len(Propernouns_LEFT_leastcentral) #1139
len(errors) #10 - 1 for each tweet - this is the blank line at the end
len(Nouns_LEFT_leastcentral) #3375
Propernouns_LEFT_leastcentral[0] #'Newhaven'
len(Propernouns_RIGHT_leastcentral) #894
len(errors) #10 - 1 for each tweet - this is the blank line at the end
len(Nouns_RIGHT_leastcentral) #3424
#now clean these using functions defined above
Propernouns_LEFT_leastcentral_clean = clean_wordlist(Propernouns_LEFT_leastcentral)
Nouns_LEFT_leastcentral_clean = clean_wordlist(Nouns_LEFT_leastcentral)
Propernouns_RIGHT_leastcentral_clean = clean_wordlist(Propernouns_RIGHT_leastcentral)
Nouns_RIGHT_leastcentral_clean = clean_wordlist(Nouns_RIGHT_leastcentral)
len(Propernouns_LEFT_leastcentral_clean) #1139 --> 1112
len(Nouns_LEFT_leastcentral_clean) #3375 --> 3370
len(Propernouns_RIGHT_leastcentral_clean) # 894 --> 683
len(Nouns_RIGHT_leastcentral_clean) #3424 --> 3090
###visualise & save most common words for these 10 users
os.chdir(os.path.expanduser("~"))
#Propernouns
counts_Propernouns_LEFT_lc = collections.Counter(Propernouns_LEFT_leastcentral_clean)
counts_Propernouns_LEFT_lc_30 = pd.DataFrame(counts_Propernouns_LEFT_lc.most_common(30), columns=['words', 'count'])
fig, ax = plt.subplots(figsize=(8, 8))
counts_Propernouns_LEFT_lc_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="red")
ax.set_title("Common Propernouns Found in Tweets of 10 least LEFT-central users (Including All Words cleaned)")
plt.savefig('RESULTS/WordCounts/Propernouns_LEFT_10leasttcentral_mostcommon_clean.png')
counts_Propernouns_RIGHT_lc = collections.Counter(Propernouns_RIGHT_leastcentral_clean)
counts_Propernouns_RIGHT_lc_30 = pd.DataFrame(counts_Propernouns_RIGHT_lc.most_common(30), columns=['words', 'count'])
fig, ax = plt.subplots(figsize=(8, 8))
counts_Propernouns_RIGHT_lc_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="blue")
ax.set_title("Common Propernouns Found in Tweets of 10 least RIGHT-central users (Including All Words)")
plt.savefig('RESULTS/WordCounts/Propernouns_RIGHT_10leastcentral_mostcommon_clean.png')
#Nouns
counts_Nouns_LEFT_lc = collections.Counter(Nouns_LEFT_leastcentral_clean)
counts_Nouns_LEFT_lc_30 = pd.DataFrame(counts_Nouns_LEFT_lc.most_common(30), columns=['words', 'count'])
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_LEFT_lc_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="red")
ax.set_title("Common Nouns Found in Tweets of 10 least LEFT-central users (Including All Words cleaned)")
plt.savefig('RESULTS/WordCounts/Nouns_LEFT_10leastcentral_mostcommon.png')
counts_Nouns_RIGHT_lc = collections.Counter(Nouns_RIGHT_leastcentral_clean)
counts_Nouns_RIGHT_lc_30 = pd.DataFrame(counts_Nouns_RIGHT_lc.most_common(30), columns=['words', 'count'])
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_RIGHT_lc_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="blue")
ax.set_title("Common Nouns Found in Tweets of 10 least RIGHT-central users (Including All Words cleaned)")
plt.savefig('RESULTS/WordCounts/Nouns_RIGHT_10leastcentral_mostcommon.png')
|
StarcoderdataPython
|
94525
|
<reponame>cavayangtao/rmtt_ros
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import RegionOfInterest as ROI
from sensor_msgs.msg import Range
from std_msgs.msg import UInt8
import smach
import smach_ros
import datetime
import numpy as np
# define state Tag_track
class Tag_track(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['next', 'stay'])
def execute(self, userdata):
global pad_id, mission
rospy.loginfo('Executing state Tag_track')
if pad_id == 1:
zero_twist = Twist()
pub.publish(zero_twist)
mission = 2
return 'next'
else:
return 'stay'
# define state Rise
class Rise(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['next', 'stay'])
def execute(self, userdata):
global height, mission
rospy.loginfo('Executing state Rise')
if height > 1.5:
zero_twist = Twist()
pub.publish(zero_twist)
mission = 3
return 'next'
else:
vel_rise = Twist()
vel_rise.linear.z = 0.2
pub.publish(vel_rise)
return 'stay'
# define state Turn
class Turn(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['stay'])
def execute(self, userdata):
global mission, rad
rospy.loginfo('Executing state Turn')
vel_rise = Twist()
vel_rise.angular.z = 0.2
pub.publish(vel_rise)
return 'stay'
def callback_cmd_tag(msg):
if mission == 1:
vel_tag = msg
pub.publish(vel_tag)
def callback_mission_pad(msg):
global pad_id
pad_id = msg.data
def callback_tof_btm(msg):
global height
height = msg.range
if __name__ == '__main__':
mission = 1
pad_id = 0
vel_tag = Twist()
rospy.init_node('state_machine')
rospy.Subscriber('/mission_pad_id', UInt8, callback_mission_pad)
rospy.Subscriber('/cmd_vel_tag', Twist, callback_cmd_tag)
rospy.Subscriber('/tof_btm', Range, callback_tof_btm)
pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1)
# rate
# rate = rospy.Rate(10.0)
# Create a SMACH state machine
sm = smach.StateMachine(outcomes=['end'])
# Open the container
with sm:
# Add states to the container
smach.StateMachine.add('TAG_TRACK', Tag_track(),
transitions={'next':'RISE', 'stay':'TAG_TRACK'})
smach.StateMachine.add('RISE', Rise(),
transitions={'next':'TURN', 'stay':'RISE'})
smach.StateMachine.add('TURN', Turn(),
transitions={'stay':'TURN'})
# Create and start the introspection server
# sis = smach_ros.IntrospectionServer('my_smach_introspection_server', sm, '/SM_ROOT')
# sis.start()
while not rospy.is_shutdown():
outcome = sm.execute()
# rate.sleep()
# Wait for ctrl-c to stop the application
rospy.spin()
# sis.stop()
|
StarcoderdataPython
|
160349
|
#!/usr/bin/env python
"""Write out the KL distance between two kmer models
"""
from __future__ import print_function
import os, sys
import numpy as np
from vis_kmer_distributions import *
from scipy.stats import entropy
from scipy.spatial.distance import euclidean
from itertools import product
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser (description=__doc__)
parser.add_argument('--pk_dir', action='store', default=None, required=True, type=str, dest='pk_dir',
help="Path to experimental kmer distriutions")
parser.add_argument('--out', action='store', default=None, required=True, type=str, dest='out',
help="place to put result files")
args = parser.parse_args()
return args
_SQRT2 = np.sqrt(2)
def hellinger2(p, q):
return euclidean(np.sqrt(p), np.sqrt(q)) / _SQRT2
def main(args):
args = parse_args()
file_with_ont_model = "../../tests/minion_test_reads/C/" \
"makeson_PC_MA_286_R7.3_ZYMO_C_1_09_11_15_1714_1_ch1_file1_strand.fast5"
assert os.path.exists(file_with_ont_model), "Didn't find ONT model containing file"
kl_out_file_path = args.out + "kl_distance.txt"
hd_out_file_path = args.out + "hellinger_distance.txt"
assert os.path.exists(kl_out_file_path) is not True, "Out file {} already exists".format(kl_out_file_path)
assert os.path.exists(hd_out_file_path) is not True, "Out file {} already exists".format(hd_out_file_path)
kl_out = open(kl_out_file_path, 'w')
hd_out = open(hd_out_file_path, 'w')
x_vals = np.linspace(30, 90, 600)
print("Collecting distances for {pk} against ONT table\n".format(pk=args.pk_dir), file=sys.stdout)
for kmer in product("ACGT", repeat=6):
kmer = ''.join(kmer)
template_pdf, complement_pdf = plot_ont_distribution(kmer=kmer, fast5=file_with_ont_model, x_vals=x_vals)
hdp_distribution = KmerHdpDistribution(data_directory=args.pk_dir, kmer=kmer)
ent = entropy(pk=hdp_distribution.density, qk=template_pdf, base=2)
h_distance = hellinger2(p=hdp_distribution.density, q=template_pdf)
print("finished with kmer {kmer} entropy {ent} hellinger distance {hd}"
"".format(kmer=kmer, ent=ent, hd=h_distance), file=sys.stderr)
kl_out.write("{ent}\n".format(ent=ent))
hd_out.write("{hd}\n".format(hd=h_distance))
kl_out.close()
hd_out.close()
print("\nFinished collecting distances", file=sys.stdout)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
StarcoderdataPython
|
3344520
|
<reponame>bekou/evidence_aware_nlp4if
import os
import torch
import numpy as np
import json
import re
from torch.autograd import Variable
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def tok2int_sent(sentence, tokenizer, max_seq_length):
"""Loads a data file into a list of `InputBatch`s."""
sent_a, sent_b = sentence
tokens_a = tokenizer.tokenize(sent_a)
tokens_b = None
if sent_b:
#print ("set")
tokens_b = tokenizer.tokenize(sent_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens = tokens + tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids
def tok2int_list(src_list, tokenizer, max_seq_length, max_seq_size=-1):
inp_padding = list()
msk_padding = list()
seg_padding = list()
for step, sent in enumerate(src_list):
input_ids, input_mask, input_seg = tok2int_sent(sent, tokenizer, max_seq_length)
inp_padding.append(input_ids)
msk_padding.append(input_mask)
seg_padding.append(input_seg)
#if max_seq_size != -1:
# inp_padding = inp_padding[:max_seq_size]
# msk_padding = msk_padding[:max_seq_size]
# seg_padding = seg_padding[:max_seq_size]
# inp_padding += ([[0] * max_seq_length] * (max_seq_size - len(inp_padding)))
# msk_padding += ([[0] * max_seq_length] * (max_seq_size - len(msk_padding)))
# seg_padding += ([[0] * max_seq_length] * (max_seq_size - len(seg_padding)))
return inp_padding, msk_padding, seg_padding
class DataLoader(object):
''' For data iteration '''
def __init__(self, data_path, tokenizer, args, test=False, cuda=True, batch_size=64):
self.cuda = cuda
self.batch_size = batch_size
self.tokenizer = tokenizer
self.max_len = args.max_len
self.evi_num = args.evi_num
self.threshold = args.threshold
self.data_path = data_path
self.test = test
examples = self.read_file(data_path)
self.examples = examples
self.total_num = len(examples)
if self.test:
self.total_num = 100000
self.total_step = np.ceil(self.total_num * 1.0 / batch_size)
self.shuffle()
else:
self.total_step = self.total_num / batch_size
self.shuffle()
self.step = 0
def process_sent(self, sentence):
sentence = re.sub(" \-LSB\-.*?\-RSB\-", "", sentence)
sentence = re.sub("\-LRB\- \-RRB\- ", "", sentence)
sentence = re.sub(" -LRB-", " ( ", sentence)
sentence = re.sub("-RRB-", " )", sentence)
sentence = re.sub("--", "-", sentence)
sentence = re.sub("``", '"', sentence)
sentence = re.sub("''", '"', sentence)
return sentence
def process_wiki_title(self, title):
title = re.sub("_", " ", title)
title = re.sub(" -LRB-", " ( ", title)
title = re.sub("-RRB-", " )", title)
title = re.sub("-COLON-", ":", title)
return title
def read_file(self, data_path):
examples = list()
with open(data_path) as fin:
for step, line in enumerate(fin):
sublines = line.strip().split("\t")
examples.append([self.process_sent(sublines[0]), self.process_sent(sublines[2]),self.process_sent(sublines[4])])
return examples
def shuffle(self):
np.random.shuffle(self.examples)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __len__(self):
return self._n_batch
def next(self):
''' Get the next batch '''
if self.step < self.total_step:
examples = self.examples[self.step * self.batch_size : (self.step+1)*self.batch_size]
anc_inputs=list()
pos_inputs = list()
neg_inputs = list()
for example in examples:
anc_inputs.append([example[0],''])
pos_inputs.append([example[0], example[1]])
neg_inputs.append([example[0], example[2]])
inp_anc, msk_anc, seg_anc = tok2int_list(anc_inputs, self.tokenizer, self.max_len)
inp_pos, msk_pos, seg_pos = tok2int_list(pos_inputs, self.tokenizer, self.max_len)
inp_neg, msk_neg, seg_neg = tok2int_list(neg_inputs, self.tokenizer, self.max_len)
inp_tensor_pos = Variable(
torch.LongTensor(inp_pos))
msk_tensor_pos = Variable(
torch.LongTensor(msk_pos))
seg_tensor_pos = Variable(
torch.LongTensor(seg_pos))
inp_tensor_neg = Variable(
torch.LongTensor(inp_neg))
msk_tensor_neg = Variable(
torch.LongTensor(msk_neg))
seg_tensor_neg = Variable(
torch.LongTensor(seg_neg))
inp_tensor_anc = Variable(
torch.LongTensor(inp_anc))
msk_tensor_anc = Variable(
torch.LongTensor(msk_anc))
seg_tensor_anc = Variable(
torch.LongTensor(seg_anc))
if self.cuda:
inp_tensor_pos = inp_tensor_pos.cuda()
msk_tensor_pos = msk_tensor_pos.cuda()
seg_tensor_pos = seg_tensor_pos.cuda()
inp_tensor_neg = inp_tensor_neg.cuda()
msk_tensor_neg = msk_tensor_neg.cuda()
seg_tensor_neg = seg_tensor_neg.cuda()
inp_tensor_anc = inp_tensor_anc.cuda()
msk_tensor_anc = msk_tensor_anc.cuda()
seg_tensor_anc = seg_tensor_anc.cuda()
self.step += 1
return inp_tensor_pos, msk_tensor_pos, seg_tensor_pos, inp_tensor_neg, msk_tensor_neg, seg_tensor_neg,inp_tensor_anc, msk_tensor_anc, seg_tensor_anc
else:
self.step = 0
if not self.test:
#examples = self.read_file(self.data_path)
#self.examples = examples
self.shuffle()
raise StopIteration()
class DataLoaderTest(object):
''' For data iteration '''
def __init__(self, data_path, tokenizer, args, cuda=True, batch_size=64):
self.cuda = cuda
self.batch_size = batch_size
self.tokenizer = tokenizer
self.max_len = args.max_len
self.evi_num = args.evi_num
self.threshold = args.threshold
self.data_path = data_path
inputs, ids, evi_list = self.read_file(data_path)
self.inputs = inputs
self.ids = ids
self.evi_list = evi_list
self.total_num = len(inputs)
self.total_step = np.ceil(self.total_num * 1.0 / batch_size)
self.step = 0
def process_sent(self, sentence):
sentence = re.sub(" \-LSB\-.*?\-RSB\-", "", sentence)
sentence = re.sub("\-LRB\- \-RRB\- ", "", sentence)
sentence = re.sub(" -LRB-", " ( ", sentence)
sentence = re.sub("-RRB-", " )", sentence)
sentence = re.sub("--", "-", sentence)
sentence = re.sub("``", '"', sentence)
sentence = re.sub("''", '"', sentence)
return sentence
def process_wiki_title(self, title):
title = re.sub("_", " ", title)
title = re.sub(" -LRB-", " ( ", title)
title = re.sub("-RRB-", " )", title)
title = re.sub("-COLON-", ":", title)
return title
def read_file(self, data_path):
inputs = list()
ids = list()
evi_list = list()
with open(data_path) as fin:
for step, line in enumerate(fin):
instance = json.loads(line.strip())
claim = instance['claim']
id = instance['id']
for evidence in instance['evidence']:
ids.append(id)
inputs.append([self.process_sent(claim), self.process_sent(evidence[2])])
evi_list.append(evidence)
return inputs, ids, evi_list
def shuffle(self):
np.random.shuffle(self.examples)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __len__(self):
return self._n_batch
def next(self):
''' Get the next batch '''
if self.step < self.total_step:
anc_inputs=list()
inputs = self.inputs[self.step * self.batch_size : (self.step+1)*self.batch_size]
ids = self.ids[self.step * self.batch_size: (self.step + 1) * self.batch_size]
evi_list = self.evi_list[self.step * self.batch_size: (self.step + 1) * self.batch_size]
#print (len(inputs[0][0]))
#print (inputs[0][0])
for example in inputs:
anc_inputs.append([example[0],''])
#print (len(inputs))
#print (len(anc_inputs))
#print (inputs)
#print (anc_inputs)
inp_anc, msk_anc, seg_anc = tok2int_list(anc_inputs, self.tokenizer, self.max_len, -1)
inp, msk, seg = tok2int_list(inputs, self.tokenizer, self.max_len, -1)
inp_tensor_input = Variable(
torch.LongTensor(inp))
msk_tensor_input = Variable(
torch.LongTensor(msk))
seg_tensor_input = Variable(
torch.LongTensor(seg))
inp_anc_tensor_input = Variable(
torch.LongTensor(inp_anc))
msk_anc_tensor_input = Variable(
torch.LongTensor(msk_anc))
seg_anc_tensor_input = Variable(
torch.LongTensor(seg_anc))
if self.cuda:
inp_anc_tensor_input = inp_anc_tensor_input.cuda()
msk_anc_tensor_input = msk_anc_tensor_input.cuda()
seg_anc_tensor_input = seg_anc_tensor_input.cuda()
inp_tensor_input = inp_tensor_input.cuda()
msk_tensor_input = msk_tensor_input.cuda()
seg_tensor_input = seg_tensor_input.cuda()
self.step += 1
return inp_tensor_input, msk_tensor_input, seg_tensor_input, ids, evi_list,inp_anc_tensor_input,msk_anc_tensor_input,seg_anc_tensor_input
else:
self.step = 0
raise StopIteration()
|
StarcoderdataPython
|
172622
|
# -*- coding: utf-8 -*-
# Author: XuMing <<EMAIL>>
# Brief:
import time
from multiprocessing import Pool
def function(index):
print('Start process: ', index)
time.sleep(3)
print('End process', index)
if __name__ == '__main__':
pool = Pool(processes=3)
for i in range(14):
pool.apply_async(function, (i,))
print("Started processes")
pool.close()
pool.join()
print("Subprocess done.")
|
StarcoderdataPython
|
154980
|
from flask import Flask, render_template, request
from recipe_scrapers import scrape_me
import sqlite3
app = Flask(__name__) # create app instance
@app.route("/")
def index(): # Home page of the KitchenCompanion app
return render_template('index.html', title = 'Home')
@app.route("/view") # Connects to database, fetches all records, and returns view.html to display list
def view():
con = sqlite3.connect("test.db") #Open Connection to DB
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from recipes")
rows = cur.fetchall()
return render_template('view.html', rows = rows, title = 'View Recipes')
@app.route("/add",methods = ["POST","GET"]) # Form page to input recipe URL to be added to DB
def add():
con = sqlite3.connect("test.db") #Open Connection to DB
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from sources")
webrows = cur.fetchall()
return render_template('add.html', webrows = webrows, title = 'Add Recipes')
@app.route("/save",methods = ["POST","GET"]) # Accepts add.html form URL, uses recipe_scrapers package, returns recipe strings. Adds each to DB
def save():
msg = "msg" # For displaying status message if recipe was added
if request.method == "POST":
try:
recipe = request.form["recipe"]
scraper = scrape_me(recipe) # url as a string, it can be url from any site listed in the README
title = scraper.title() #returned as str
totalTime = scraper.total_time() #returned as int
yields = scraper.yields() #returned as str
ingredientsList = scraper.ingredients() #returned as list
seperator = ', ' #For ingredients returned as list
ingredientsString = seperator.join(ingredientsList) # Ingredients list to string
instructions = scraper.instructions() #returned as str
with sqlite3.connect("test.db") as con: #Open Connection to DB, inserts above recipe strings
cur = con.cursor()
cur.execute("INSERT into recipes (title, totaltime,yields,ingredients,instructions) values (?,?,?,?,?)",(title,totalTime,yields,ingredientsString,instructions,))
con.commit()
msg = "Recipe successfully added!"
pagetitle = 'Success!'
except:
con.rollback()
msg = "Unable to add recipe :("
pagetitle = 'Error'
finally:
con.close()
return render_template("save.html",title = pagetitle, msg = msg)
@app.route("/delete",methods = ["POST","GET"]) # Presents delete.html form, user inputs recipe ID to delete from DB. Not really needed....
def delete(): # call method & return html template
return render_template('delete.html', title = 'Delete Recipe')
@app.route("/deletestatus",methods = ["POST","GET"]) # Delete recipe from DB with input from /delete method input
def deletestatus():
id = request.form["id"] # Unique recipe ID from VIEW to be used for deletion
with sqlite3.connect("test.db") as con:
try:
cur = con.cursor()
cur.execute("delete from recipes where id = ??",id)
msg = "Recipe successfully deleted"
pagetitle = 'Success!'
return render_template("deletestatus.html",title = pagetitle, msg = msg)
except:
msg = "Unable to delete recipe :("
pagetitle = 'Error'
finally:
return render_template("deletestatus.html",title = pagetitle, msg = msg)
@app.route("/recipe",methods = ["POST","GET"]) # Page to view single recipe chosen from view.html page
def recipe():
if request.method == "POST":
try:
id = request.form["recipeid"] #
with sqlite3.connect("test.db") as con:
cur = con.cursor()
sqlite_select_query = """SELECT * from recipes where id = ?"""
cur.execute(sqlite_select_query, (id, ))
singlerow = cur.fetchall()
print(type(singlerow))
title = singlerow[1]
print(title[1])
except:
title = 'Recipe'
finally:
return render_template('recipe.html', singlerow = singlerow, title = title)
if __name__ == "__main__": # on running python app.py
app.run(debug=True) # run the flask app
#TODO Actual CSS styling, bug fixes in app.py, refactoring, code indentation, recipe presentation, grid & flexbox layouts, search, toasts for add/deletions, unit conversions, much much more
|
StarcoderdataPython
|
3286945
|
<gh_stars>0
import types
class TsvRecord(object):
__slots__ = ('__keys', '__vals')
def __init__(self, vals = None, keys = None):
"""
r = TsvRecord([1,2,3,4,5])
r.__vals == [1,2,3,4,5]
r.__keys == None
"""
self.__vals = vals or []
if keys:
assert len(keys) == len(vals)
self.__keys = dict(zip(keys, range(len(keys))))
else:
self.__keys = None
def attachKeys(self, keys):
assert len(keys) == len(self.__vals)
self.__keys = dict(zip(keys, range(len(keys))))
def keys(self):
if not self.__keys:
return range(len(self.__vals))
return sorted(self.__keys, key=self.__keys.get)
def values(self):
return self.__vals
def items(self):
if not self.__keys:
return enumerate(self.__vals)
return zip(list(self.keys()), list(self.__vals))
def __repr__(self):
return '<TsvRecord: {!r}>'.format(dict(self.items()))
def __getitem__(self, key):
if isinstance(key, (slice, int)):
return self.__vals[key]
if key in self.__keys:
return self.__vals[self.__keys[key]]
raise KeyError("Record contains no '{}' field.".format(key))
def __getattr__(self, key):
if str(key).startswith('__') or str(key).startswith('_TsvRecord'):
return super(TsvRecord, self).__getattr__(key)
return self[key]
def __setitem__(self, key, value):
if isinstance(key, int):
if key > len(self) or key < 0:
raise IndexError('Index out of range: {}'.format(key))
elif self.__keys and key in self.__keys:
self.__vals[self.__keys[key]] = value
else:
self.__keys = self.__keys or {}
self.__keys[key] = len(self)
self.__vals.append(value)
def __len__(self):
return len(self.__vals)
def __setattr__(self, key, value):
if str(key).startswith('__') or str(key).startswith('_TsvRecord'):
super(TsvRecord, self).__setattr__(key, value)
else:
self[key] = value
def get(self, key, default=None):
"""Returns the value for a given key, or default."""
try:
return self[key]
except KeyError:
return default
def __eq__(self, other):
return self.keys() == other.keys() and self.values() == other.values()
def __ne__(self, other):
return not self.__eq__(other)
def asDict(self, ordered=False):
"""Returns the row as a dictionary, as ordered."""
return OrderedDict(self.items()) if ordered else dict(items.items())
def __contains__(self, key):
return self.__keys and key in self.__keys
def __delitem__(self, key):
if not self.__keys:
del self.__vals[key]
elif key in self.__keys:
del self.__vals[self.__keys[key]]
del self.__keys[key]
else:
del self.__keys[list(self.keys())[key]]
del self.__vals[key]
class TsvReader(object):
def __init__(self,
infile,
delimit = '\t',
comment = '#',
skip = 0,
cnames = True, # "False": no head; "None"/"True": split first line with delimit, "Callback": get head for first line in your way
attach = True,
row = None, # row factory
cname0 = "ROWNAME"):
openfunc = open
if infile.endswith('.gz'):
import gzip
openfunc = gzip.open
self.file = openfunc(infile)
self.delimit = delimit
self.comment = comment
self.attach = attach
self.row = row
self.tell = 0
if skip > 0:
for _ in range(skip):
self.file.readline()
while True:
tell = self.file.tell()
line = self.file.readline()
if comment and line.startswith(comment):
continue
self.file.seek(tell)
break
headline = self.file.readline() if cnames is not False else ''
if callable(cnames):
self.cnames = cnames(headline)
elif headline:
if comment and headline.startswith(comment):
headline = headline[1:].lstrip()
self.cnames = headline.rstrip('\n').split(delimit)
else:
self.cnames = []
# try to add "cname0" as column name
tell = self.file.tell()
firstline = self.file.readline().rstrip('\n')
ncols = len(firstline.split(delimit))
if firstline and self.cnames and len(self.cnames) == ncols - 1:
self.cnames.insert(0, cname0)
if firstline and self.cnames and len(self.cnames) != ncols:
raise ValueError('Not a valid tsv file. Head has %s columns, while first line has %s.' % (len(self.cnames), ncols))
self.file.seek(tell)
self.tell = tell
self.meta = self.cnames
def next(self):
line = self.file.readline()
line = line.rstrip('\n')
# empty lines not allowed
if not line: raise StopIteration()
record = TsvRecord(line.split(self.delimit))
if self.attach and self.cnames:
record.attachKeys(self.cnames)
if callable(self.row):
return self.row(record)
return record
def dump(self, col = None):
if col is None:
return list(self)
if not isinstance(col, list):
return [r[col] for r in self]
return [tuple(r[c] for c in col) for r in self]
def __next__(self):
return self.next()
def rewind(self):
self.file.seek(self.tell)
def __iter__(self):
return self
def __del__(self):
self.close()
def close(self):
if self.file:
self.file.close()
class TsvWriter(object):
def __init__(self, outfile, delimit = '\t', append = False):
openfunc = open
if outfile.endswith('.gz'):
import gzip
openfunc = gzip.open
self.delimit = delimit
self.cnames = []
self.file = openfunc(outfile, 'w' if not append else 'a')
def writeHead(self, callback = True):
if not self.cnames:
return
if callback and callable(callback):
head = callback(self.cnames)
self.file.write(head + "\n")
elif callback:
head = self.delimit.join(self.cnames)
self.file.write(head + "\n")
def write(self, record):
if isinstance(record, list) or isinstance(record, types.GeneratorType):
self.file.write(self.delimit.join(str(v) for v in record) + '\n')
elif isinstance(record, TsvRecord):
if not self.cnames:
self.write(record.values())
else:
self.write(record[n] for n in self.cnames)
else:
self.file.write(str(record))
def __del__(self):
self.close()
def close(self):
if self.file:
self.file.close()
class TsvJoin(object):
@staticmethod
def compare(a, b, reverse = False):
if not reverse:
return 0 if a < b else 1 if a > b else -1
else:
return 0 if a > b else 1 if a < b else -1
def __init__(self, *files, **inopts):
inopts_default = dict(
delimit = '\t',
comment = '#',
skip = 0,
cnames = True,
attach = False,
row = None,
cname0 = "ROWNAME"
)
inopts_multi = {}
self.length = len(files)
for key, val in inopts.items():
if not isinstance(val, list):
inopts_multi[key] = [val] * self.length
elif len(val) < self.length:
inopts_multi[key] = val + [inopts_default[key]] * (self.length - len(val))
else:
inopts_multi[key] = val
inopts = []
for i in range(self.length):
inopts.append({k:v[i] for k,v in inopts_multi.items()})
self.readers = [TsvReader(f, **inopts[i]) for i,f in enumerate(files)]
def _defaultMatch(self, *rows):
data = [row[0] for row in rows]
mind = min(data)
return -1 if data.count(mind) == self.length else data.index(mind)
def join(self, do, outfile, match = None, outopts = None):
outopts = outopts or {}
outopts_default = dict(
delimit = "\t",
append = False
)
outopts_default.update(outopts)
outopts = outopts_default
cnames = False
if 'cnames' in outopts:
cnames = outopts['cnames']
del outopts['cnames']
out = TsvWriter(outfile, **outopts)
out.cnames = sum((reader.cnames for reader in self.readers if reader.cnames), [])
out.writeHead(cnames)
match = match or self._defaultMatch
rows = [None] * self.length
while True:
try:
for i, row in enumerate(rows):
rows[i] = row or next(self.readers[i])
m = match(*rows)
if m < 0: # matched
do(out, *rows)
m = 0
rows[m] = None
except StopIteration:
break
except Exception:
from sys import stderr
from traceback import format_exc
info = format_exc().splitlines()
info.append("With rows:")
info.extend(["- {}".format(r) for r in rows])
stderr.write("\n".join(info) + "\n\n")
rows = [None] * self.length
continue
out.close()
|
StarcoderdataPython
|
140506
|
#!/usr/bin/env python
##############################################################
# $Id$
# Project: WGS pipeline for Nephele project
# Language: Python 2.7
# Authors: <NAME>, <NAME>, <NAME>
# History: July 2015 Start of development
##############################################################
__author__ = "<NAME>"
__copyright__ = ""
__credits__ = ["<NAME>"]
__license__ = ""
__version__ = "1.0.1-dev"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import sys, os, random, time, glob
syscall = lambda cmd: (os.popen(cmd).read()).rstrip("\n")
def read_config( file_name, config ): #########################
config_file=open( file_name, 'r')
l=[]
for line in config_file:
if("" == line): # check for end of file
break
s=line.rstrip("\n")
s.strip()
if("" == s): # ignore empty lines
continue
if("#"==s[:1]): # ignore comments
continue
del l[:] # clear list
l=s.split(',')
config[l[0]]=l[1]
config_file.close()
### read_config ###
def send2log( message, log_file ): #######################
date = syscall("TZ='America/New_York' date")
os.system( "echo >>"+log_file)
if 0!=os.system( "echo '"+date+' '+message+"' >>"+log_file):
sys.exit(777)
### send2log ###
def exec_sys(cmd): #######################
#print >> sys.stderr, "Executing:",cmd
if 0!=os.system(cmd):
print >> sys.stderr, "ERROR when executing:",cmd
sys.exit(777)
### exec_sys ###
########### main ##############################
def main():
if len( sys.argv ) < 2:
print >> sys.stderr, "\n\n\nUsage: " + sys.argv[0] + " <configuration file>\n\n\n"
sys.exit(551)
# Read config file
conf_file = sys.argv[1]
if not os.path.isfile( conf_file ):
print >> sys.stderr, "ERROR: no config file:" + conf_file
sys.exit(555)
config = {}
read_config( conf_file,config )
work_dir=os.getcwd()
config['LOG_FILE']='logfile.txt'
log_file=work_dir+'/'+config['LOG_FILE']
##### Define optional and default parameters
for key in ['ZIP_FILE', 'MAP_FILE']:
if(key not in config.keys()):
config[key]=''
send2log( 'WGS pipeline started', log_file )
# get env.json if available
if os.path.isfile('./env.json'):
send2log( 'env.json=', log_file )
syscall( 'cat ./env.json >> '+log_file)
w="WGS pipeline configuration\n"
for k in sorted(config.keys()):
if 'UseCode'==k:
continue
config[k]=config[k].replace("\"", "_")
config[k]=config[k].replace("\'", "_")
w=w+k+','+config[k]+"\n"
# print configuration to log file
send2log( w, log_file )
####################################################
os.chdir(work_dir)
cmd='unzip -oqj ./'+config['READS_ZIP']
exec_sys(cmd)
cmd='rm -rf ./'+config['READS_ZIP']
exec_sys(cmd)
send2log( "reads unarchived", log_file )
############ execute wgsp.sh ###########################
ana_file = open("wgsp_exe.sh", 'w')
command="anadama pipeline anadama_workflows.pipelines:WGSPipeline -f 'raw_seq_files: glob:*.fastq' -o 'decontaminate.threads: 32' -o 'metaphlan2.nproc: 8' -A anadama_workflows.pipelines:VisualizationPipeline -f 'sample_metadata: " + config['MAP_FILE'] + "'"
send2log(command, log_file)
ana_file.write(command)
ana_file.write("\n")
ana_file.close()
#exec_sys(cmd)
######### end main ####################################
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
68154
|
<filename>python/image.py
# -*- coding: utf-8 -*-
"""
image.py
Converts an image into matrix.drawPixel commands for a 32x32 LED display
"""
from skimage import io, transform
image = io.imread("q.png")
image = transform.resize(image, [32,32])
io.imsave("q-sm.png", image) #uncomment to save 32x32 img
print image[0][0]
f = open("matrix.txt", "w")
for i in range(32):
for j in range(32):
px = image[i][j]
if (px[3] == 0):
px = ["0", "0", "0"] #transparent = black
else:
px = [str(int(px[k]*7)) for k in range(3)] #scale colors to 0-7
colorString = "matrix.Color333(" + ",".join(px) + ")"
f.write("matrix.drawPixel(" + str(i) + "," + str(j) + "," + colorString + ");\n")
f.close()
|
StarcoderdataPython
|
1609999
|
<reponame>icpac-igad/wagtail-leaflet-widget
#!/usr/bin/env python
import sys
from django.conf import settings
from django.core.management import execute_from_command_line
if not settings.configured:
params = dict(
LOGGING={
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'wagtailgeowidget': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': True,
},
},
},
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
INSTALLED_APPS=[
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'wagtail.wagtailcore',
'wagtail.wagtailsites',
'wagtail.wagtailusers',
'wagtail.wagtailimages',
'taggit',
'wagtailgeowidget',
"tests",
],
MIDDLEWARE_CLASSES=[],
ROOT_URLCONF='tests.urls',
)
settings.configure(**params)
def runtests():
argv = sys.argv[:1] + ["test"] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == "__main__":
runtests()
|
StarcoderdataPython
|
85661
|
<reponame>Saumitra-Shukla/keras-bert
from tensorflow.python.ops.math_ops import erf, sqrt
__all__ = ['gelu']
def gelu(x):
return 0.5 * x * (1.0 + erf(x / sqrt(2.0)))
|
StarcoderdataPython
|
1665459
|
<gh_stars>10-100
#-------------------------------------------------------------------------------
# Name: servertoken
# Purpose: Demo to show how to get a services list from a federated server
# using a portal username and password
#
# Author: EsriNL DevTeam (MVH)
#
# Created: 20210709
# Copyright: (c) <NAME> 2021
# Licence: MIT License
#-------------------------------------------------------------------------------
import requests
from logUtils import *
# Log file location
_logFilePath = r"D:/Temp/Logging/servertoken_[date].log"
# ArcGIS Enterprise settings
_portalUrl = "[YOUR-PORTAL-URL]"
_serverUrl = "[YOUR-SERVER-ADMIN-URL]"
_username = "[YOUR-USERNAME]"
_password = "[<PASSWORD>]"
#Generate token from Portal
generateTokenUrl = f"{_portalUrl}/sharing/rest/generateToken"
params = {}
params["username"] = _username
params["password"] = <PASSWORD>
params["client"] = "referer"
params["referer"] = _serverUrl
params["f"] = "json"
LogInfo(f"Generating token on {generateTokenUrl} for {_username}")
r = requests.post(generateTokenUrl,params)
portalTokenObj = r.json()
portalToken = portalTokenObj["token"]
#Exchange the portal token for server token
params = {}
params["token"] = portalToken
params["serverURL"] = _serverUrl
params["f"] = "json"
LogInfo(f"changing token on {_serverUrl}")
r = requests.post(generateTokenUrl,params)
serverTokenObj = r.json()
serverToken = serverTokenObj["token"]
#Get Server contents
servicesUrl = f"{_serverUrl}/services"
params = {}
params["token"] = serverToken
params["f"] = "json"
LogInfo(f"Getting root services list on {servicesUrl}")
r = requests.get(servicesUrl,params)
servicesList = r.json()
#getting the services list
services = servicesList["services"]
LogInfo(f"Got {len(services)} root services and {len(servicesList['folders'])} folders")
#looping through all the folders to get all services
for folder in servicesList["folders"]:
folderUrl = f"{servicesUrl}/{folder}"
LogInfo(f"Getting services for folder: {folderUrl}")
r = requests.post(folderUrl,params)
folderServicesList = r.json()
services += folderServicesList["services"]
LogInfo(f"Got {len(folderServicesList['services'])} services in folder {folder}")
#print the summary
LogInfo("="*80)
for service in services:
LogInfo(f"{service['folderName']:<10} | {service['type']:<15} | {service['serviceName']}")
LogInfo(f"Found {len(services)} services in {len(servicesList['folders'])} folders and root")
|
StarcoderdataPython
|
113673
|
import unittest
from deckbuilder.Gloss import Gloss
class TestGloss(unittest.TestCase):
def test_gloss(self):
gloss = Gloss()
result = gloss.fetch_glosses('よく晴れた夜空')
self.assertEqual(len(result), 3)
def test_clean_gloss_front(self):
gloss = Gloss()
text1 = ' 夜空 【よぞら】 (n) night sky; (P); ED; Name(s): 【やくう】 (f) Yakuu 【よぞら】 (f) Yozora SrcHNA '
clean1 = gloss.clean_front(text1)
self.assertEqual('夜空 - night sky; (P); ED; Name(s): 【やくう】 (f) Yakuu 【よぞら】 (f) Yozora SrcHNA', clean1)
text2 = ' 晴れ : 晴れ(P); 晴; 霽れ 【はれ】 (n,adj-no) (1) (See 快晴・かいせい) clear weather; fine weather;'
clean2 = gloss.clean_front(text2)
self.assertEqual('晴れ - clear weather; fine weather;', clean2)
def test_clean_front_particle(self):
gloss = Gloss()
text = "帝国の from 帝国 【ていこく】 (n) (1) empire; (adj-no) (2) imperial; (P); ED"
text = gloss.remove_dict_annotations(text)
text = gloss.clean_front(text)
text = gloss.clean_verb_stem(text)
text = gloss.clean_back(text)
text = gloss.remove_furigana(text)
self.assertEqual("帝国 - empire; (2) imperial", text)
def test_clean_front_particle2(self):
gloss = Gloss()
text = "異形の from : 異形; 異型 【いけい】 (n,adj-no) atypical appearance; atypicality; heteromorphy; 【いぎょう】 ; (adj-no,adj-na,n) fantastic; grotesque; strange-looking; suspicious-looking; ED "
text = gloss.remove_dict_annotations(text)
text = gloss.clean_front(text)
text = gloss.clean_verb_stem(text)
text = gloss.clean_back(text)
text = gloss.remove_furigana(text)
self.assertEqual("異形 - atypical appearance; atypicality; heteromorphy; ; fantastic; grotesque; strange-looking; suspicious-looking", text)
def test_clean_front_retain_brace(self):
gloss = Gloss()
text = "改ページ : 改ページ; 改頁 【かいページ】 {comp} repagination; new page; form feed; page break;"
text = gloss.remove_dict_annotations(text)
text = gloss.clean_front(text)
text = gloss.clean_verb_stem(text)
text = gloss.clean_back(text)
text = gloss.remove_furigana(text)
self.assertEqual("改ページ - {comp} repagination; new page; form feed; page break", text)
def test_clean_verb_stem_withnumbers(self):
gloss = Gloss()
result = gloss.clean_verb_stem('《verb stem》 晴れる : 晴れる(P); 霽れる 【はれる】 ; (v1,vi) (1) to clear up')
self.assertEqual('(1) to clear up', result)
def test_clean_verb_stem_withoutnumbers(self):
gloss = Gloss()
inputText = '《verb stem》 晴れる : 晴れる(P); 霽れる 【はれる】 ; (v1,vi) to clear up'
#need to clear junk like '(P)' so we find the actual text start
inputText = gloss.remove_dict_annotations(inputText)
result = gloss.clean_verb_stem(inputText)
self.assertEqual('to clear up', result)
def test_clean_gloss_back(self):
gloss = Gloss()
text1 = ' thank you for ...; KD '
clean1 = gloss.clean_back(text1)
self.assertEqual(' thank you for ...', clean1)
text2 = '(4) to be dispelled; to be banished; (P); ED '
text2 = gloss.remove_dict_annotations(text2)
clean2 = gloss.clean_back(text2)
self.assertEqual('(4) to be dispelled; to be banished', clean2)
text3 = ' 夜空 【よぞら】 (n) night sky; (P); ED; Name(s): 【やくう】 (f) Yakuu 【よぞら】 (f) Yozora SrcHNA '
text3 = gloss.remove_dict_annotations(text3)
clean3 = gloss.clean_back(text3)
self.assertEqual(' 夜空 【よぞら】 night sky', clean3)
def test_cleanall_1(self):
gloss = Gloss()
text = ' よく (adv) (1) nicely; properly; well; skillfully; skilfully; (2) (uk) frequently; often; (3) (uk) I\'m glad that you ...; thank you for ...; KD '
clean = gloss.remove_dict_annotations(text)
clean = gloss.clean_front(clean)
clean = gloss.clean_verb_stem(clean)
clean = gloss.clean_back(clean)
self.assertEqual('よく - nicely; properly; well; skillfully; skilfully; (2) frequently; often; (3) I\'m glad that you ...; thank you for ...', clean)
def test_cleanall_2(self):
gloss = Gloss()
text = ' 晴れ : 晴れ(P); 晴; 霽れ 【はれ】 (n,adj-no) (1) (See 快晴・かいせい) clear weather; fine weather; (adj-no,n) (2) (ant: 褻) formal; ceremonial; public; (3) cleared of suspicion; (P); 《verb stem》 晴れる : 晴れる(P); 霽れる 【はれる】 ; (v1,vi) (1) to clear up; to clear away; to be sunny; to stop raining; (2) to refresh (e.g. spirits); (3) (See 疑いが晴れる) to be cleared (e.g. of a suspicion); (4) to be dispelled; to be banished; (P); ED '
clean = gloss.remove_dict_annotations(text)
clean = gloss.clean_front(clean)
clean = gloss.clean_verb_stem(clean)
clean = gloss.clean_back(clean)
self.assertEqual('晴れ - clear weather; fine weather; (2) (ant: 褻) formal; ceremonial; public; (3) cleared of suspicion; (1) to clear up; to clear away; to be sunny; to stop raining; (2) to refresh (e.g. spirits); (3) (See 疑いが晴れる) to be cleared (e.g. of a suspicion); (4) to be dispelled; to be banished', clean)
def test_cleanall_3(self):
gloss = Gloss()
text = ' 夜空 【よぞら】 (n) night sky; (P); ED; Name(s): 【やくう】 (f) Yakuu 【よぞら】 (f) Yozora SrcHNA '
clean = gloss.remove_dict_annotations(text)
clean = gloss.clean_front(clean)
clean = gloss.clean_verb_stem(clean)
clean = gloss.clean_back(clean)
self.assertEqual('夜空 - night sky', clean)
def test_get_readings(self):
gloss = Gloss()
text = "頭 【あたま(P); かしら(P)】 (n) (1) head; (2) hair (on one's head); (3) (あたま only) mind; brains; intellect; (4) leader; chief; boss; captain; (5) top; tip; (6) beginning; start; (7) (あたま only) head; person; (8) (かしら only) top structural component of a kanji; (9) (あたま only) (col) {mahj} (See 雀頭・ジャントー) pair; (P); 【とう】 ; (ctr) counter for large animals (e.g. head of cattle); counter for insects in a collection; counter for helmets, masks, etc.; (P); : 頭; 首 【こうべ; かぶり(頭); ず(頭); つむり(頭); つむ(頭); つぶり(頭)(ok); かぶ(頭)(ok)】 ; (n) head; : ど頭; 頭 【どたま】 ; (n) (uk) (derog) head; dome; bean; nob; noggin; 【かぶし】 ; (n) (arch) (uk) shape of one's head; 【がしら】 ; (suf) (1) (after a noun) top of ...; head of ...; (2) (after the -masu stem of a verb) the moment that ...;"
readings = gloss.get_readings(text)
self.assertEqual(6, len(readings))
self.assertEqual('頭 【あたま(P); かしら(P)】', readings[0])
self.assertEqual('【とう】', readings[1])
self.assertEqual('頭; 首 【こうべ; かぶり(頭); ず(頭); つむり(頭); つむ(頭); つぶり(頭)(ok); かぶ(頭)(ok)】', readings[2])
self.assertEqual('ど頭; 頭 【どたま】', readings[3])
self.assertEqual('【かぶし】', readings[4])
self.assertEqual('【がしら】', readings[5])
def test_generate_alt_readings(self):
gloss = Gloss()
text = "頭 【あたま(P); かしら(P)】 (n) (1) head; (2) hair (on one's head); (3) (あたま only) mind; brains; intellect; (4) leader; chief; boss; captain; (5) top; tip; (6) beginning; start; (7) (あたま only) head; person; (8) (かしら only) top structural component of a kanji; (9) (あたま only) (col) {mahj} (See 雀頭・ジャントー) pair; (P); 【とう】 ; (ctr) counter for large animals (e.g. head of cattle); counter for insects in a collection; counter for helmets, masks, etc.; (P); : 頭; 首 【こうべ; かぶり(頭); ず(頭); つむり(頭); つむ(頭); つぶり(頭)(ok); かぶ(頭)(ok)】 ; (n) head; : ど頭; 頭 【どたま】 ; (n) (uk) (derog) head; dome; bean; nob; noggin; 【かぶし】 ; (n) (arch) (uk) shape of one's head; 【がしら】 ; (suf) (1) (after a noun) top of ...; head of ...; (2) (after the -masu stem of a verb) the moment that ...;"
text = gloss.remove_dict_annotations(text)
readings = gloss.get_readings(text)
altreadings = gloss.generate_alt_readings(readings)
self.assertEqual('頭 【あたま かしら】 | 【とう】 | 頭; 首 【こうべ; かぶり(頭); ず(頭); つむり(頭); つむ(頭); つぶり(頭)(ok); かぶ(頭)(ok)】 | ど頭; 頭 【どたま】 | 【かぶし】 | 【がしら】</br>', altreadings)
def test_generate_alt_readings_withparticle(self):
gloss = Gloss()
text = '帝国の from 帝国 【ていこく】 (n) (1) empire; (adj-no) (2) imperial; (P); ED'
text = gloss.remove_dict_annotations(text)
readings = gloss.get_readings(text)
altreadings = gloss.generate_alt_readings(readings)
self.assertEqual('帝国 【ていこく】</br>', altreadings)
def test_generate_alt_readings_noreading(self):
gloss = Gloss()
text = "ここから (exp) from here; KD"
readings = gloss.get_readings(text)
altreadings = gloss.generate_alt_readings(readings)
self.assertEqual(0, len(altreadings))
@unittest.skip("this would look nicer but I don't actually care")
def test_get_readings_cleaned(self):
gloss = Gloss()
text = gloss.remove_dict_annotations("頭 【あたま(P); かしら(P)】 (n) (1) head;")
readings = gloss.get_readings(text)
self.assertEqual('頭 【あたま; かしら】', readings[0])
def test_remove_furigana(self):
gloss = Gloss()
text = "頭 【あたま(P); かしら(P)】 (n) (1) head; (2) hair (on one's head); (3) (あたま only) mind; brains; intellect; (4) leader; chief; boss; captain; (5) top; tip; (6) beginning; start; (7) (あたま only) head; person; (8) (かしら only) top structural component of a kanji; (9) (あたま only) (col) {mahj} (See 雀頭・ジャントー) pair; (P); 【とう】 ; (ctr) counter for large animals (e.g. head of cattle); counter for insects in a collection; counter for helmets, masks, etc.; (P); : 頭; 首 【こうべ; かぶり(頭); ず(頭); つむり(頭); つむ(頭); つぶり(頭)(ok); かぶ(頭)(ok)】 ; (n) head; : ど頭; 頭 【どたま】 ; (n) (uk) (derog) head; dome; bean; nob; noggin; 【かぶし】 ; (n) (arch) (uk) shape of one's head; 【がしら】 ; (suf) (1) (after a noun) top of ...; head of ...; (2) (after the -masu stem of a verb) the moment that ...;"
clean = gloss.remove_furigana(text)
self.assertEqual("頭 (n) (1) head; (2) hair (on one's head); (3) (あたま only) mind; brains; intellect; (4) leader; chief; boss; captain; (5) top; tip; (6) beginning; start; (7) (あたま only) head; person; (8) (かしら only) top structural component of a kanji; (9) (あたま only) (col) {mahj} (See 雀頭・ジャントー) pair; (P); ; (ctr) counter for large animals (e.g. head of cattle); counter for insects in a collection; counter for helmets, masks, etc.; (P); : 頭; 首 ; (n) head; : ど頭; 頭 ; (n) (uk) (derog) head; dome; bean; nob; noggin; ; (n) (arch) (uk) shape of one's head; ; (suf) (1) (after a noun) top of ...; head of ...; (2) (after the -masu stem of a verb) the moment that ...;", clean)
def test_clean_front_no_tabs(self):
gloss = Gloss()
text = '衛星 【えいせい】 (n) (1) {astron} (natural) satellite; moon; (2) (See 人工衛星) (artificial) satellite; (P); ED Name(s): 【えいせい】 (u) Eisei '
clean = self.clean_all(gloss, text)
self.assertEqual('衛星 - {astron} (natural) satellite; moon; (2) (See 人工衛星) (artificial) satellite', clean)
def clean_all(self, gloss, text):
clean = gloss.remove_dict_annotations(text)
clean = gloss.clean_front(clean)
clean = gloss.clean_verb_stem(clean)
clean = gloss.clean_back(clean)
clean = gloss.remove_furigana(clean)
return clean
def test_leading_text(self):
gloss = Gloss()
text = " Possible inflected verb or adjective: (passive)<br>描く : 描く(P); 画く 【えがく(P); かく】 (v5k,vt) (1) (See 書く・2) to draw; to paint; to sketch; (2) (えがく only) to depict; to describe; (3) to picture in one's mind; to imagine; (4) to form a certain shape (e.g. path of an action, appearance of an object, etc.); (P); ED "
clean = self.clean_all(gloss, text)
self.assertEqual("描く - to draw; to paint; to sketch; (2) (えがく only) to depict; to describe; (3) to picture in one's mind; to imagine; (4) to form a certain shape (e.g. path of an action, appearance of an object, etc.)", clean)
def test_should_ignore_gloss(self):
gloss = Gloss()
gloss.ignore_set.add("生活")
text = "生活 【せいかつ】 (n,vs) living; life (one's daily existence); livelihood; (P); ED "
shouldIgnore = gloss.is_known_word(text)
self.assertTrue(shouldIgnore)
def test_populate_ignore_set_jlpt(self):
gloss = Gloss()
frequency = [{'word': 'tst', 'jlpt': 'N2', 'frequency': 1},
{'word': 'tst2', 'jlpt': 'N4', 'frequency': 1}]
gloss.populate_ignore_set(frequency, 3, 100)
self.assertEqual('tst', gloss.ignore_set.pop())
self.assertEqual(len(gloss.ignore_set), 0)
|
StarcoderdataPython
|
3385075
|
# -*- coding: utf-8 -*-
print(abs(-1))
# convert other collections to list
print(list((1, 2, 3)))
print(list({1, 2, 3}))
print(list({'a': 1, 'b': 2}))
print(list(range(100)))
# data type of an object
print(type({1, 2, 3}))
print(type((1)))
print(type((1,)))
|
StarcoderdataPython
|
4815069
|
<gh_stars>1-10
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
################################################################################
# GPU nvdia GTX970m
df = pd.read_csv('results/results_gpu.csv')
df['ActivFunc'] = 0
df.loc[range(12),['ActivFunc']] = 'ReLU'
df.loc[range(12,19),['ActivFunc']] = 'Tanh'
df.loc[range(19,len(df)),['ActivFunc']] = 'LeakyReLU'
fig = plt.figure(figsize=(10,5))
# plt.title('Model accuracy on test set')
plt.grid(axis='y')
model_names = df.ModelName[:12]
# width of the bars
barWidth = 0.3
# Accuracy bars
bars_relu = df.meanAccuracy_te[df['ActivFunc'] == 'ReLU']
bars_tanh = df.meanAccuracy_te[df['ActivFunc'] == 'Tanh']
bars_Lrelu = df.meanAccuracy_te[df['ActivFunc'] == 'LeakyReLU']
x_relu = pd.array(range(12), dtype='float')
x_Lrelu = x_relu + 2*barWidth
x_tanh = x_relu + barWidth
x_relu[-5:] += barWidth
# Height of the error bars
yer_relu = df.stdAccuracy_te[df['ActivFunc'] == 'ReLU']
yer_tanh = df.stdAccuracy_te[df['ActivFunc'] == 'Tanh']
yer_Lrelu = df.stdAccuracy_te[df['ActivFunc'] == 'LeakyReLU']
transparent = (0, 0, 0, 0)
plt.bar(x_relu, bars_relu, width= barWidth, yerr=yer_relu, capsize=3, label='ReLU')
plt.bar(x_Lrelu[:len(bars_Lrelu)], bars_Lrelu, width = barWidth, yerr=yer_Lrelu, capsize=3, label='LeakyReLU')
plt.bar(x_tanh[:len(bars_tanh)], bars_tanh, width = barWidth, yerr=yer_tanh, capsize=3, label='Tanh')
# general layout
plt.ylim(0.4,1)
plt.xticks([r + barWidth for r in range(len(bars_relu))], model_names, rotation=25, ha = 'right')
plt.ylabel('Accuracy')
# left, right = plt.xlim()
# plt.xlim(left, right)
# plt.hlines(CV_per_class_results['Logistic Regression'].mean(), left, right, color='seagreen', linestyles='dashed', label='Best per-class accuracy')
# plt.hlines(CV_results['Logistic Regression'].mean(), left, right, color='steelblue', linestyles='dotted', label='Best overall accuracy')
plt.legend()
plt.savefig('results/plots/BarPlotResults-gpu.png', bbox_inches='tight', pad_inches=0.2)
################################################################################
fig = plt.figure(figsize=(10,5))
# plt.title('Training Time (seconds)')
plt.grid(axis='y')
model_names = df.ModelName[:12]
# width of the bars
barWidth = 0.5
# Time bars
bars_relu = df.drop(index=range(7))[df.drop(index=range(7))['ActivFunc'] == 'ReLU']['meanTime_tr']
x_relu = pd.array(range(len(bars_relu)), dtype='float')
# Height of the error bars
yer_relu = df.drop(index=range(7))[df.drop(index=range(7))['ActivFunc'] == 'ReLU']['stdTime_tr']
transparent = (0, 0, 0, 0)
plt.bar(x_relu, bars_relu, width= barWidth, yerr=yer_relu, capsize=3, label='ReLU')
# general layout
bpttom, top = plt.ylim()
plt.yscale('log')
plt.ylim(0.75,top)
plt.xticks([r for r in range(len(bars_relu))], model_names[7:], rotation=25, ha = 'right')
plt.ylabel('Time')
# plt.xlim(left, right)
# plt.hlines(CV_per_class_results['Logistic Regression'].mean(), left, right, color='seagreen', linestyles='dashed', label='Best per-class accuracy')
# plt.hlines(CV_results['Logistic Regression'].mean(), left, right, color='steelblue', linestyles='dotted', label='Best overall accuracy')
plt.legend()
plt.savefig('results/plots/BarPlotTime-slow-gpu.png', bbox_inches='tight', pad_inches=0.2)
################################################################################
fig = plt.figure(figsize=(10,5))
# plt.title('Model accuracy on test set')
plt.grid(axis='y')
model_names = df.ModelName[:12]
# width of the bars
barWidth = 0.3
# Accuracy bars
bars_relu = df.meanTime_tr[df['ActivFunc'] == 'ReLU']
bars_tanh = df.meanTime_tr[df['ActivFunc'] == 'Tanh']
bars_Lrelu = df.meanTime_tr[df['ActivFunc'] == 'LeakyReLU']
x_relu = pd.array(range(12), dtype='float')
x_Lrelu = x_relu + 2*barWidth
x_tanh = x_relu + barWidth
x_relu[-5:] += barWidth
# Height of the error bars
yer_relu = df.stdTime_tr[df['ActivFunc'] == 'ReLU']
yer_tanh = df.stdTime_tr[df['ActivFunc'] == 'Tanh']
yer_Lrelu = df.stdTime_tr[df['ActivFunc'] == 'LeakyReLU']
transparent = (0, 0, 0, 0)
plt.bar(x_relu, bars_relu, width= barWidth, yerr=yer_relu, capsize=3, label='ReLU')
plt.bar(x_Lrelu[:len(bars_Lrelu)], bars_Lrelu, width = barWidth, yerr=yer_Lrelu, capsize=3, label='LeakyReLU')
plt.bar(x_tanh[:len(bars_tanh)], bars_tanh, width = barWidth, yerr=yer_tanh, capsize=3, label='Tanh')
# general layout
plt.yscale('log')
# plt.ylim(0.4,1)
plt.xticks([r + barWidth for r in range(len(bars_relu))], model_names, rotation=25, ha = 'right')
plt.ylabel('Training time (sec)')
# left, right = plt.xlim()
# plt.xlim(left, right)
# plt.hlines(CV_per_class_results['Logistic Regression'].mean(), left, right, color='seagreen', linestyles='dashed', label='Best per-class accuracy')
# plt.hlines(CV_results['Logistic Regression'].mean(), left, right, color='steelblue', linestyles='dotted', label='Best overall accuracy')
plt.legend()
plt.savefig('results/plots/BarPlotTime-all-gpu.png', bbox_inches='tight', pad_inches=0.2)
################################################################################
# CPU on virtual machine results
df2 = pd.read_csv('results/results_cpu.csv')
df2['ActivFunc'] = 0
df2.loc[range(7),['ActivFunc']] = 'ReLU'
df2.loc[range(7,14),['ActivFunc']] = 'Tanh'
df2.loc[range(14,len(df2)),['ActivFunc']] = 'LeakyReLU'
df2.loc[df2.ModelName.eq('DropoutFullyConnectedBatchNorm'), 'stdTime_tr'] = 0
fig = plt.figure(figsize=(10,5))
# plt.title('Model accuracy on test set')
plt.grid(axis='y')
model_names = df2.ModelName[:7]
# width of the bars
barWidth = 0.3
# Accuracy bars
bars_relu = df2.meanAccuracy_te[df2['ActivFunc'] == 'ReLU']
bars_tanh = df2.meanAccuracy_te[df2['ActivFunc'] == 'Tanh']
bars_Lrelu = df2.meanAccuracy_te[df2['ActivFunc'] == 'LeakyReLU']
x_relu = pd.array(range(7), dtype='float')
x_Lrelu = x_relu + 2*barWidth
x_tanh = x_relu + barWidth
# Height of the error bars
yer_relu = df2.stdAccuracy_te[df2['ActivFunc'] == 'ReLU']
yer_tanh = df2.stdAccuracy_te[df2['ActivFunc'] == 'Tanh']
yer_Lrelu = df2.stdAccuracy_te[df2['ActivFunc'] == 'LeakyReLU']
transparent = (0, 0, 0, 0)
plt.bar(x_relu, bars_relu, width= barWidth, yerr=yer_relu, capsize=3, label='ReLU')
plt.bar(x_Lrelu[:len(bars_Lrelu)], bars_Lrelu, width = barWidth, yerr=yer_Lrelu, capsize=3, label='LeakyReLU')
plt.bar(x_tanh[:len(bars_tanh)], bars_tanh, width = barWidth, yerr=yer_tanh, capsize=3, label='Tanh')
# general layout
plt.ylim(0.4,1)
plt.xticks([r + barWidth for r in range(len(bars_relu))], model_names, rotation=25, ha = 'right')
plt.ylabel('Accuracy')
# left, right = plt.xlim()
# plt.xlim(left, right)
# plt.hlines(CV_per_class_results['Logistic Regression'].mean(), left, right, color='seagreen', linestyles='dashed', label='Best per-class accuracy')
# plt.hlines(CV_results['Logistic Regression'].mean(), left, right, color='steelblue', linestyles='dotted', label='Best overall accuracy')
plt.legend()
plt.savefig('results/plots/BarPlotResults-cpu.png', bbox_inches='tight', pad_inches=0.2)
################################################################################
fig = plt.figure(figsize=(10,5))
# plt.title('Model accuracy on test set')
plt.grid(axis='y')
model_names = df2.ModelName[:7]
# width of the bars
barWidth = 0.3
# Time bars
bars_relu = df2.meanTime_tr[df2['ActivFunc'] == 'ReLU']
bars_tanh = df2.meanTime_tr[df2['ActivFunc'] == 'Tanh']
bars_Lrelu = df2.meanTime_tr[df2['ActivFunc'] == 'LeakyReLU']
x_relu = pd.array(range(7), dtype='float')
x_Lrelu = x_relu + 2*barWidth
x_tanh = x_relu + barWidth
# Height of the error bars
yer_relu = df2.stdTime_tr[df2['ActivFunc'] == 'ReLU']
yer_tanh = df2.stdTime_tr[df2['ActivFunc'] == 'Tanh']
yer_Lrelu = df2.stdTime_tr[df2['ActivFunc'] == 'LeakyReLU']
transparent = (0, 0, 0, 0)
plt.bar(x_relu, bars_relu, width= barWidth, yerr=yer_relu, capsize=3, label='ReLU')
plt.bar(x_Lrelu[:len(bars_Lrelu)], bars_Lrelu, width = barWidth, yerr=yer_Lrelu, capsize=3, label='LeakyReLU')
plt.bar(x_tanh[:len(bars_tanh)], bars_tanh, width = barWidth, yerr=yer_tanh, capsize=3, label='Tanh')
# general layout
plt.ylim()
plt.xticks([r + barWidth for r in range(len(bars_relu))], model_names, rotation=25, ha = 'right')
plt.ylabel('Time')
# left, right = plt.xlim()
# plt.xlim(left, right)
# plt.hlines(CV_per_class_results['Logistic Regression'].mean(), left, right, color='seagreen', linestyles='dashed', label='Best per-class accuracy')
# plt.hlines(CV_results['Logistic Regression'].mean(), left, right, color='steelblue', linestyles='dotted', label='Best overall accuracy')
plt.legend()
plt.savefig('results/plots/BarPlotTimes-cpu.png', bbox_inches='tight', pad_inches=0.2)
|
StarcoderdataPython
|
1678119
|
<gh_stars>0
from ..Qt import QtGui, QtCore, QtWidgets, USE_PYSIDE
if not USE_PYSIDE:
import sip
from .GraphicsItem import GraphicsItem
__all__ = ['GraphicsObject']
class GraphicsObject(GraphicsItem, QtWidgets.QGraphicsObject):
"""
**Bases:** :class:`GraphicsItem <pyqtgraph.graphicsItems.GraphicsItem>`, :class:`QtGui.QGraphicsObject`
Extension of QGraphicsObject with some useful methods (provided by :class:`GraphicsItem <pyqtgraph.graphicsItems.GraphicsItem>`)
"""
_qtBaseClass = QtWidgets.QGraphicsObject
def __init__(self, *args):
self.__inform_view_on_changes = True
QtWidgets.QGraphicsObject.__init__(self, *args)
self.setFlag(self.ItemSendsGeometryChanges)
GraphicsItem.__init__(self)
def itemChange(self, change, value):
ret = QtWidgets.QGraphicsObject.itemChange(self, change, value)
if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:
self.parentChanged()
try:
inform_view_on_change = self.__inform_view_on_changes
except AttributeError:
# It's possible that the attribute was already collected when the itemChange happened
# (if it was triggered during the gc of the object).
pass
else:
if inform_view_on_change and change in [self.ItemPositionHasChanged, self.ItemTransformHasChanged]:
self.informViewBoundsChanged()
## workaround for pyqt bug:
## http://www.riverbankcomputing.com/pipermail/pyqt/2012-August/031818.html
if not USE_PYSIDE and change == self.ItemParentChange and isinstance(ret, QtWidgets.QGraphicsItem):
ret = sip.cast(ret, QtWidgets.QGraphicsItem)
return ret
|
StarcoderdataPython
|
1715057
|
<gh_stars>1000+
import pygtk,math,string
pygtk.require('2.0')
import gtk
class mainwin():
def __init__(self): #This function autorun at assign object to class >> "win=mainwin()"
self.mwin=gtk.Window()
self.mwin.set_size_request(300,270)
self.mwin.set_resizable(False)
self.mwin.set_title('Calculator')
#=============== Create menu bar and popups ===============
self.menus=(
("/_Calculator",None,None,0,"<Branch>"),
("/Calculator/_CE","<Control>C",self.ce,0,"<StockItem>",gtk.STOCK_CANCEL),
("/Calculator/C_lear","<Control>L",self.clear,0,"<StockItem>",gtk.STOCK_CLEAR),
("/Calculator/_Bksp","<Backspace>",self.bksp,0,"<StockItem>",gtk.STOCK_GO_BACK ),
("/Calculator/sep1",None,None,0,"<Separator>"),
("/Calculator/_Quit","<Control>Q",gtk.main_quit,0,"<StockItem>",gtk.STOCK_QUIT),
)
#======== Create map of buttons and clicked event =========
ButtonsProp=(
(("Bksp",self.bksp) ,("Clear",self.clear) ,("CE",self.ce)),
(("7",self.calc_numbers) ,("8",self.calc_numbers) ,(" 9 ",self.calc_numbers) ,(" / " ,self.calc_operators),("sqrt",self.sqrt)),
(("4" ,self.calc_numbers) ,("5",self.calc_numbers) ,(" 6 ",self.calc_numbers) ,(" * ",self.calc_operators) ,("%" ,self.percent)),
(("1",self.calc_numbers) ,("2" ,self.calc_numbers),(" 3 " ,self.calc_numbers),(" - ",self.calc_operators) ,("1/x",self.one_div_x)),
(("0",self.calc_numbers) ,("+/-",self.change_sign),(" . ",self.dot) ,(" + " ,self.calc_operators),("=",self.do_equal))
)
#================ Create entry and buttons ================
self.accelg=gtk.AccelGroup()
itemfac=gtk.ItemFactory(gtk.MenuBar,"<main>")
itemfac.create_items(self.menus)
self.mwin.add_accel_group(self.accelg)
self.itemfac=itemfac
self.menubar=itemfac.get_widget("<main>")
self.vb1=gtk.VBox(0,0)
self.vb1.pack_start(self.menubar,0,0,0)
self.mwin.add(self.vb1)
self.mtext=gtk.Entry()
self.mtext.set_text("0")
self.mtext.set_editable(False)
self.hb1=gtk.HBox()
self.hb1.pack_start(self.mtext,1,1,4)
self.vb1.pack_start(self.hb1,0,0,3)
self.mtable=gtk.Table(4,3)
self.mtable.set_row_spacings(3)
self.mtable.set_col_spacings(3)
x=y=0
for i in ButtonsProp:
for j in i:
btn=gtk.Button(j[0])
btn.connect("clicked",j[1])
self.mtable.attach(btn,x,x+1,y,y+1)
x+=1
print j[0] ," ",
x=0
y+=1
print ""
self.hb2=gtk.HBox()
self.hb2.pack_start(self.mtable,1,1,4)
self.vb1.pack_start(self.hb2,1,1,2)
#============= set flags =============
self.zero=True
self.equal=False
self.oldnum=0
self.operator=""
# { The End }
def do_equal(self,widget):
if self.oldnum != 0:
if not self.equal: self.currentnum=string.atof(self.mtext.get_text())
if self.operator==" / ":
if self.currentnum==0: return 1
self.mtext.set_text(str(self.oldnum/self.currentnum));self.oldnum=string.atof(self.mtext.get_text())
elif self.operator==" * ":
self.mtext.set_text(str(self.oldnum*self.currentnum));self.oldnum=string.atof(self.mtext.get_text())
elif self.operator==" - ":
self.mtext.set_text(str(self.oldnum-self.currentnum));self.oldnum=string.atof(self.mtext.get_text())
elif self.operator==" + ":
self.mtext.set_text(str(self.oldnum+self.currentnum));self.oldnum=string.atof(self.mtext.get_text())
self.clear_dot_zero()
self.equal=True
def calc_numbers(self,widget):
if self.zero==True :
self.mtext.set_text("")
self.zero=False
if self.mtext.get_text()=="0": self.mtext.set_text("")
self.mtext.set_text(self.mtext.get_text()+ str(string.atoi( widget.get_label())))
def calc_operators(self,widget):
self.oldnum=string.atof(self.mtext.get_text())
self.operator=widget.get_label()
self.zero=True
self.equal=False
def change_sign(self,widget):
self.mtext.set_text(str(string.atof(self.mtext.get_text())* -1))
self.clear_dot_zero()
def dot(self,widget):
if self.zero==True :
self.oldnum=string.atof(self.mtext.get_text())
self.mtext.set_text("0")
self.zero=False
if string.find(self.mtext.get_text(),".")<=0:
self.mtext.set_text(self.mtext.get_text()+".")
self.zero=False
def sqrt(self,widget):
self.mtext.set_text(str(math.sqrt(string.atof(self.mtext.get_text()))))
self.clear_dot_zero()
self.zero=True
def percent(self,widget):
if (self.oldnum != 0)and(string.atof(self.mtext.get_text()) != 0):
self.mtext.set_text(str(string.atof(self.mtext.get_text())*(self.oldnum*0.01)))
self.clear_dot_zero()
def one_div_x(self,widget):
if string.atof(self.mtext.get_text()) != 0:
self.mtext.set_text(str(1/string.atof(self.mtext.get_text())))
self.clear_dot_zero()
self.zero=True
def bksp(self,widget,e=0):
self.mtext.set_text(self.mtext.get_text()[0:-1])
if self.mtext.get_text()=="":
self.mtext.set_text("0")
self.zero=True
def clear(self,widget,e=0):
self.mtext.set_text("0")
self.zero=True
def ce(self,widget,e=0):
self.mtext.set_text("0")
self.operator=""
self.oldnum=0
self.zero=True
def clear_dot_zero(self):
if self.mtext.get_text()[-2:]==".0":
self.mtext.set_text(self.mtext.get_text()[0:-2])
def main(self):
self.mwin.connect("destroy",gtk.main_quit)
self.mwin.show_all()
gtk.main()
if __name__=='__main__':
win=mainwin()
win.main()
|
StarcoderdataPython
|
3270934
|
<filename>Ano_1/LabI/Projeto Final - Moura/repositoryContent/ImageEditor/simpleImageEditor.py
#encoding=utf-8
import sys
from PIL import Image
from ImageEditor.imageMenu import *
from ImageEditor.effects import *
from ImageEditor.filters import *
menu = """
----------------------------------------
Select the effect you want to apply:
SIMPLE EDITS # enquanto estiver em criação, usar 1 a 20
1 - (For server only) Returns std format image and dic of eddited thumbs
2 - Rotate
3 - More contrast
4 - Less contrast
5 - More saturation
6 - Less saturation
7 - More luminosity
8 - Less luminosity
9 - Reduce palette (bpp)
10 - Blur (2 px radius)
11 - Negative
12 - Black and White
13 - Sepia
14 - Edge detection (black edge, white background)
FILTERS # enquanto estiver em criação, usar 21 a 30
21 - Old photo
22 - Pencil drawing
23 - Color drawing
24 - Circles
MEMES
31 - Top text meme
32 - Center text meme
33 - Bottom text meme
34 - Mix text meme
35 - Top text shaded snap
36 - Center text shaded snap
37 - Bottom text shaded snap
38 - Mix text shaded snap
39 - Top text solid snap
40 - Center text solid snap
41 - Bottom text solid snap
42 - Mix text solid snap
CONTROLS # para já só está activo o zero. letras não funcionam
30 - UNDO LAST EDIT
0 - SAVE IMAGE # and CLOSE
----------------------------------------
"""
def get_file():
"""Creates image object from sys.argv or from console. Returns image and filename"""
try:
if (len(sys.argv) >= 2):
filename = sys.argv[1]
im = Image.open(filename)
return (im, filename)
else:
filename = input("\nWhat file do you want to edit? : ")
im = Image.open(filename)
return (im, filename)
except (IOError):
print ("\nFile " + filename + " does not exist")
exit(0)
def get_menu_option():
"""Reads menu option from sys.argv or console. Returns choosen option"""
try:
if (len(sys.argv) >= 3):
op = sys.argv[2]
return op
else:
op = int(input("Option: "))
return op
except (ValueError):
print ('\nInvalid option. Option should be a number')
def main():
print ("------------------------------")
print ("100BRAINS - SIMPLE IMAGE EDITOR")
option = 0
im, filename = get_file()
new_im = prepare_image(filename)
while option != -1: # runs indefinitely. Saving the final edited image exits the program directly.
print(menu)
option = get_menu_option()
new_im = run_option(None, option, new_im)
new_im.show()
#Run the program
main()
|
StarcoderdataPython
|
123345
|
<reponame>LawAlias/gisflask
#coding:utf-8
import urllib,urllib2
from flask import flash,render_template,request,redirect,url_for
from flask.views import MethodView
from apis import app
from flask_login import login_required, current_user
from geomodule.utils import shp2geo_nowriter,geofunc,shp2wkt,geojson2wkt
from main.utils import getUid,getCurrOClock,log,getCurrTime
from main.models import Point,Line,Role,Layer
from main.proModels import NO_FLYREGION
from main.extensions import db
from main.proforms import POIForm
import json
import uuid
import datetime
@app.route('/apis',methods=['GET','POST'])
def apis():
return ('apis load success')
#mvc
class LineTest(MethodView):
def get(self,uid):
return uid
class LayerFeature(MethodView):
def get(self,uid):
try:
layer=db.session.query(Layer).filter(Layer.uid==uid).first()
if(layer):
return layer.toGeoJson()
except Exception, e:
log.info("用户{0}查询图层出错:{1},uid参数:{2}".format(current_user.name,str(e),uid))
return 'False'
def delete(self,uid):
try:
layer=db.session.query(Layer).filter(Layer.uid==uid).first()
db.session.delete(layer)
db.session.commit()
return 'True'
except Exception, e:
log.info("用户{0}删除图层出错:{1},uid参数:{2}".format(current_user.name,str(e),uid))
return 'False'
class LayerFeatures(MethodView):
decorators=[login_required]
#查询该角色的所有图层
def get(self):
try:
role=current_user.role
layers=db.session.query(Layer).filter(Layer.roles.contains(role)).all()
return json.dumps(layers)
except Exception, e:
log.info("用户{0}查询图层出错:{1}".format(current_user.name,str(e)))
return 'False'
def post(self):
name=request.form["name"]
uid=request.form["uid"]
try:
layer=db.session.query(Layer).filter(Layer.uid==uid).first()
if(layer):#如果已存在,那直接修改
layer.name=name
db.session.commit()
return "True"
else:
roles=[current_user.role]
new_layer=Layer(name=name,uid=uid,create_user=current_user.name,create_time=getCurrTime(),roles=roles)
db.session.add(new_layer)
db.session.commit()
return "True"
except Exception, e:
log.info("用户{0}更新图层出错:{1},uid参数:{2}".format(current_user.name,str(e),uid))
return 'False'
# elif:#否则就是新加或是更改图形
app.add_url_rule('/api/layerfeature/<string:uid>',view_func=LayerFeature.as_view('layerfeature_api'),methods=['GET','DELETE'])
app.add_url_rule('/api/layerfeatures',view_func=LayerFeatures.as_view('layerfeatures_api'),methods=['POST'])
class LineFeature(MethodView):
def get(self,uid):
try:
line=db.session.query(Line).filter(Line.uid==uid).first()
return json.dumps(line)
except Exception, e:
log.info("用户{0}查询要素出错:{1},uid参数:{2}".format(current_user.name,str(e),uid))
return 'False'
def delete(self,uid):
try:
line=db.session.query(Line).filter(Line.uid==uid).first()
db.session.delete(line)
db.session.commit()
return 'True'
except Exception, e:
log.info("用户{0}删除要素出错:{1},uid参数:{2}".format(current_user.name,str(e),uid))
return 'False'
class LineFeatures(MethodView):
decorators=[login_required]
def post(self):
try:
geojson=request.form["geojson"]
layerId=request.form["layerId"]
geojson=json.loads(geojson)
if(geojson.has_key('type') and geojson['type']=='FeatureCollection'):
if geojson.has_key('properties') and geojson['properties'].has_key('uid'):#样式和属性只能针对单个要素修改,所以要求有uid
uid=geojson['properties']['uid']
line=db.session.query(Line).filter(Line.uid==uid).first()
for key in properties.keys():#改属性
if(key is not 'style' or key is not 'uid' ):
line[key]=properties[key]
elif(key is 'style'):
styles=json.dumps(properties['style'])
print("styles")
print(styles)
line.style=styles
db.session.commit()
else:
for feature in geojson['features']:
feaImport(feature,layerId)
return 'True'
else:#否则就是个feature
feaImport(geojson,layerId)
return 'True'
except Exception, e:
log.info("用户{0}更新要素出错:{1},json参数:{2}".format(current_user.name,str(e),geojson))
return 'False'
# elif:#否则就是新加或是更改图形
app.add_url_rule('/api/linefeature/<string:uid>',view_func=LineFeature.as_view('linefeature_api'),methods=['GET','DELETE'])
app.add_url_rule('/api/linefeatures',view_func=LineFeatures.as_view('linefeatures_api'),methods=['POST'])
#feature入库
def feaImport(feature,layerId):
geometry=feature['geometry']
styles=feature['properties']
fields=feature['properties']['field'] if feature['properties'].has_key('field') else {name:'name'}
wkt,geoType=geojson2wkt(geometry)
uid=styles['uid']
currentime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if geoType is 'Point':
pt=Point(name=fields['name'],uid=uid,create_time=currentime,create_user=current_user.name,style=json.dumps(styles),roles=[current_user.role],geo=wkt)#怎么把geojson转化为geom
db.session.add(pt)
db.session.commit()
elif geoType is 'LINESTRING':
line=db.session.query(Line).filter(Line.uid==uid).first()
if(line):
line.style=json.dumps(styles)
line.name=fields['name']
line.geo=wkt
db.session.commit()
else:
new_line=Line(layer_id=layerId,name=fields['name'],uid=uid,create_time=currentime,create_user=current_user.name,style=json.dumps(styles),roles=[current_user.role],geo=wkt)#怎么把geojson转化为geom
db.session.add(new_line)
db.session.commit()
elif geoType is 'Polygon':
uid=getUid()
is_enable="1"
polygon=NO_FLYREGION(guid=uid,is_enable=is_enable,geo=geojson)
@app.route('/api/saveGeo',methods=['POST'])
@login_required
def saveGeo():
geojson=request.form["geojson"]#FeatureCollection
geojson=json.loads(geojson)
for feature in geojson['features']:
geometry=feature['geometry']
styles=feature['properties']
wkt,geoType=geojson2wkt(geometry)
if geoType is 'Point':
pt=Point(name='name',create_user=current_user.name,style=json.dumps(styles),roles=[current_user.role],geo=wkt)#怎么把geojson转化为geom
db.session.add(pt)
db.session.commit()
elif geoType is 'LINESTRING':
pt=Line(name='name',create_user=current_user.name,style=json.dumps(styles),roles=[current_user.role],geo=wkt)#怎么把geojson转化为geom
db.session.add(pt)
db.session.commit()
elif geoType is 'Polygon':
uid=getUid()
is_enable="1"
polygon=NO_FLYREGION(guid=uid,is_enable=is_enable,geo=geojson)
flash("保存成功")
return "success"
@app.route('/api/getGeos',methods=['POST'])
@login_required
#查询该角色管理的矢量数据
def getGeos():
try:
geos=[]
role_id=request.form["role_id"]
role=db.session.query(Role).filter(Role.id==role_id).first()
lines=db.session.query(Line).filter(Line.roles.contains(role)).all()
pts=db.session.query(Point).filter(Point.roles.contains(role)).all()
geos.extend(lines)
geos.extend(pts)
features=[]
for geo in geos:
geojson=db.session.execute(geo.geo.ST_AsGeoJSON()).scalar()
feature={
"type": "Feature",
"geometry": json.loads(geojson),
"properties": {
"name": geo.name,
"create_user":geo.create_user,
"create_time":str(geo.create_time),
"uid":geo.uid,
"style":geo.style
}
}
features.append(feature)
response={
"code":200,
"data":features
}
return json.dumps(response)
except Exception, e:
log.info("用户{0}获取矢量图层出错:{1},role_id参数:{2}".format(current_user.name,str(e),role_id))
return 'False'
@app.route('/api/queryAllLayers',methods=['POST'])
@login_required
#查询所有图层,转化成geojson
def queryAllLayers():
try:
role=current_user.role;
layers=db.session.query(Layer).filter(Layer.roles.contains(role)).all()
feaCollections=[]
for layer in layers:
layer_id=layer.id
print("layer_id")
print(layer_id)
lines=db.session.query(Line).filter(Line.layer_id==layer_id,Line.isDel==0).all()
pts=db.session.query(Point).filter(Point.layer_id==layer_id,Point.isDel==1).all()
geos=[]
geos.extend(lines)
geos.extend(pts)
features=[]
for geo in geos:
feature=geo.toGeoJson()
features.append(feature)
feaCollection={
"type": "FeatureCollection",
"features":features,
"properties": {
"name": layer.name,
"uid": layer.id,
"create_time":str(layer.create_time),
"create_user":current_user.name
}
}
feaCollections.append(feaCollection)
response={
"code":200,
"data":feaCollections
}
return json.dumps(response)
except Exception, e:
log.info("用户{0}获取图层出错:{1}".format(current_user.name,str(e)))
return 'False'
@app.route('/api/queryAllLayerNames',methods=['POST'])
@login_required
#查询该角色管理的图层名称集合
def queryAllLayerNames():
try:
layers=current_user.role.getLayers();
response={
"code":200,
"data":layers
}
return json.dumps(response)
except Exception, e:
log.info("用户{0}获取图层名称集合出错:{1}".format(current_user.name,str(e)))
return 'False'
@app.route('/api/getGaodes',methods=['POST'])
def getGaodes():
key=request.form['key']
keywords=request.form['keywords']
city=request.form['city']
offset=request.form['offset']
# 爬取数据
# headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0'}
textmod={'key':key,'keywords':keywords,'city':city,'children':'1','offset':offset,'page':'1','extensions': 'all'}
textmod = urllib.urlencode(textmod)
url1 = "http://restapi.amap.com/v3/place/text"
req = urllib2.Request(url = '%s%s%s' % (url1,'?',textmod))
res = urllib2.urlopen(req)
res = res.read()
# json解析
datajson = json.loads(res)
# 名字 位置 地址 电话 区划
# name location address tel adname
pois = datajson['pois']
features=[]
for i in range(0,len(pois)):
location=pois[i]['location'].split(',')
fea={
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
float(location[0]),
float(location[1])
]
},
"properties": {
"name": pois[i]['name'],
"address":pois[i]['address'],
"tel":pois[i]['tel'],
"adname":pois[i]['adname']
}
}
features.append(fea)
featureCollection={
"type":"FeatureCollection",
"features":features
}
featureCollection=json.dumps(featureCollection)
featureCollection.replace('u\'','\'')
featureCollection=featureCollection.decode("unicode-escape")
return featureCollection
#根据关键词查询要素
@app.route('/api/searchFeature',methods=['POST'])
def searchFeature():
key=request.form['key']
print(key)
if(key==''):
flash('请输入关键字','warning')
return redirect(url_for('map'))
geo=""
line=db.session.query(Line).filter(Line.name==key).first()
if(line):
geo=line.toGeoJson()
else:
pt=db.session.query(Point).filter(Point.name==key).first()
if(pt):
geo=pt.toGeoJson()
response={
"code":200,
"data":geo
}
return json.dumps(response)
|
StarcoderdataPython
|
53300
|
# raider.io api configuration
RIO_MAX_PAGE = 5
# need to update in templates/stats_table.html
# need to update in templates/compositions.html
# need to update in templates/navbar.html
RIO_SEASON = "season-sl-3"
WCL_SEASON = 3
WCL_PARTITION = 1
# config
RAID_NAME = "<NAME>"
# for heroic week, set this to 10
# after that in the season, set this at 16
MIN_KEY_LEVEL = 16
# to generate a tier list based on heroic week data
# have to manually toggle this
MAX_RAID_DIFFICULTY = "Mythic"
#MAX_RAID_DIFFICULTY = "Heroic"
|
StarcoderdataPython
|
41786
|
<reponame>Ricyteach/candemachine<filename>candemachine/exceptions.py<gh_stars>0
class CandeError(Exception):
pass
class CandeSerializationError(CandeError):
pass
class CandeDeserializationError(CandeError):
pass
class CandeReadError(CandeError):
pass
class CandePartError(CandeError):
pass
class CandeFormatError(CandePartError):
pass
|
StarcoderdataPython
|
3302113
|
<reponame>AstunTechnology/featureserver
'''
Created on Oct 16, 2011
@author: michel
'''
import os
import sys
from lxml import etree
from lxml import objectify
from copy import deepcopy
from FeatureServer.WebFeatureService.Transaction.TransactionAction import TransactionAction
class Transaction(object):
tree = None
namespaces = {'gml' : 'http://www.opengis.net/gml',
'fs' : 'http://featureserver.org/fs'}
def getActions(self):
return self.tree
def parse(self, xml):
self.parser = objectify.makeparser(remove_blank_text=True, ns_clean=True)
self.dom = etree.XML(xml, parser=self.parser)
self.parseDOM()
def parseDOM(self, node = None, transaction = None):
if node == None:
node = self.dom
if transaction == None:
transaction = TransactionAction(node)
transaction_class = None
for trans in node.iterchildren():
if str(trans.xpath('local-name()')) == 'Insert':
for child in trans.iterchildren():
transaction_class = self.getTransactionInstance(str(trans.xpath('local-name()')), deepcopy(child))
transaction.appendChild(transaction_class)
elif str(trans.xpath('local-name()')) == 'Update' or str(trans.xpath('local-name()')) == 'Delete':
transaction_class = self.getTransactionInstance(str(trans.xpath('local-name()')), deepcopy(trans))
transaction.appendChild(transaction_class)
self.tree = transaction
def getTransactionInstance(self, transaction, node):
try:
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
transaction_module = __import__(transaction, globals(), locals())
except ImportError:
raise Exception("Could not find transaction for %s" % transaction)
transaction_func = getattr(transaction_module, transaction)
return transaction_func(node)
def render(self, datasource, node = None):
if node == None:
node = self.tree
self.create(datasource, node)
def create(self, datasource, node):
for child in node:
self.create(datasource, child)
node.createStatement(datasource)
def assemble(self, datasource, node, sql = ''):
for child in node:
sql += self.assemble(datasource, child, sql)
return sql
def __str__(self, *args, **kwargs):
return etree.tostring(self.dom, pretty_print = True)
|
StarcoderdataPython
|
1713634
|
import inspect
import re
import itertools
def _empty_func():
pass
def set_signature(signature):
def decorator(func):
return wraps(_empty_func, expected=signature)(func)
return decorator
def get_function_body(func):
source_lines = inspect.getsourcelines(func)[0]
source_lines = itertools.dropwhile(lambda x: x.startswith('@'), source_lines)
line = next(source_lines).strip()
if not line.startswith('def ') and not line.startswith('class'):
return line.rsplit(':')[-1].strip()
elif not line.endswith(':'):
for line in source_lines:
line = line.strip()
if line.endswith(':'):
break
# Handle functions that are not one-liners
first_line = next(source_lines)
# Find the indentation of the first line
indentation = len(first_line) - len(first_line.lstrip())
return ''.join([first_line[indentation:]] + [line[indentation:] for line in source_lines])
class ExistingArgument(ValueError):
pass
class MissingArgument(ValueError):
pass
def make_sentinel(name='_MISSING', var_name=None):
"""Creates and returns a new **instance** of a new class, suitable for
usage as a "sentinel", a kind of singleton often used to indicate
a value is missing when ``None`` is a valid input.
Args:
name (str): Name of the Sentinel
var_name (str): Set this name to the name of the variable in
its respective module enable pickleability.
>>> make_sentinel(var_name='_MISSING')
_MISSING
The most common use cases here in boltons are as default values
for optional function arguments, partly because of its
less-confusing appearance in automatically generated
documentation. Sentinels also function well as placeholders in queues
and linked lists.
.. note::
By design, additional calls to ``make_sentinel`` with the same
values will not produce equivalent objects.
>>> make_sentinel('TEST') == make_sentinel('TEST')
False
>>> type(make_sentinel('TEST')) == type(make_sentinel('TEST'))
False
"""
class Sentinel(object):
def __init__(self):
self.name = name
self.var_name = var_name
def __repr__(self):
if self.var_name:
return self.var_name
return '%s(%r)' % (self.__class__.__name__, self.name)
if var_name:
def __reduce__(self):
return self.var_name
def __nonzero__(self):
return False
__bool__ = __nonzero__
return Sentinel()
def _indent(text, margin, newline='\n', key=bool):
"based on boltons.strutils.indent"
indented_lines = [(margin + line if key(line) else line)
for line in text.splitlines()]
return newline.join(indented_lines)
NO_DEFAULT = make_sentinel(var_name='NO_DEFAULT')
class FunctionBuilder(object):
"""The FunctionBuilder type provides an interface for programmatically
creating new functions, either based on existing functions or from
scratch.
Note: Based on https://boltons.readthedocs.io
Values are passed in at construction or set as attributes on the
instance. For creating a new function based of an existing one,
see the :meth:`~FunctionBuilder.from_func` classmethod. At any
point, :meth:`~FunctionBuilder.get_func` can be called to get a
newly compiled function, based on the values configured.
>>> fb = FunctionBuilder('return_five', doc='returns the integer 5',
... body='return 5')
>>> f = fb.get_func()
>>> f()
5
>>> fb.varkw = 'kw'
>>> f_kw = fb.get_func()
>>> f_kw(ignored_arg='ignored_val')
5
Note that function signatures themselves changed quite a bit in
Python 3, so several arguments are only applicable to
FunctionBuilder in Python 3. Except for *name*, all arguments to
the constructor are keyword arguments.
Args:
name (str): Name of the function.
doc (str): `Docstring`_ for the function, defaults to empty.
module (str): Name of the module from which this function was
imported. Defaults to None.
body (str): String version of the code representing the body
of the function. Defaults to ``'pass'``, which will result
in a function which does nothing and returns ``None``.
args (list): List of argument names, defaults to empty list,
denoting no arguments.
varargs (str): Name of the catch-all variable for positional
arguments. E.g., "args" if the resultant function is to have
``*args`` in the signature. Defaults to None.
varkw (str): Name of the catch-all variable for keyword
arguments. E.g., "kwargs" if the resultant function is to have
``**kwargs`` in the signature. Defaults to None.
defaults (dict): A mapping of argument names to default values.
kwonlyargs (list): Argument names which are only valid as
keyword arguments. **Python 3 only.**
kwonlydefaults (dict): A mapping, same as normal *defaults*,
but only for the *kwonlyargs*. **Python 3 only.**
annotations (dict): Mapping of type hints and so
forth. **Python 3 only.**
filename (str): The filename that will appear in
tracebacks. Defaults to "boltons.funcutils.FunctionBuilder".
indent (int): Number of spaces with which to indent the
function *body*. Values less than 1 will result in an error.
dict (dict): Any other attributes which should be added to the
functions compiled with this FunctionBuilder.
All of these arguments are also made available as attributes which
can be mutated as necessary.
.. _Docstring: https://en.wikipedia.org/wiki/Docstring#Python
"""
_argspec_defaults = {'args': list,
'varargs': lambda: None,
'varkw': lambda: None,
'defaults': lambda: None,
'kwonlyargs': list,
'kwonlydefaults': dict,
'annotations': dict}
@classmethod
def _argspec_to_dict(cls, f):
argspec = inspect.getfullargspec(f)
return dict((attr, getattr(argspec, attr))
for attr in cls._argspec_defaults)
_defaults = {'doc': str,
'dict': dict,
'is_async': lambda: False,
'module': lambda: None,
'body': lambda: 'pass',
'indent': lambda: 4,
'annotations': dict,
'filename': lambda: 'py2mint.utils.FunctionBuilder'}
_defaults.update(_argspec_defaults)
_compile_count = itertools.count()
def __init__(self, name, **kw):
self.name = name
for a, default_factory in self._defaults.items():
val = kw.pop(a, None)
if val is None:
val = default_factory()
setattr(self, a, val)
if kw:
raise TypeError('unexpected kwargs: %r' % kw.keys())
return
# def get_argspec(self): # TODO
def get_sig_str(self, with_annotations=True):
"""Return function signature as a string.
with_annotations is ignored on Python 2. On Python 3 signature
will omit annotations if it is set to False.
"""
if with_annotations:
annotations = self.annotations
else:
annotations = {}
return inspect.formatargspec(self.args,
self.varargs,
self.varkw,
[],
self.kwonlyargs,
{},
annotations)
_KWONLY_MARKER = re.compile(r"""
\* # a star
\s* # followed by any amount of whitespace
, # followed by a comma
\s* # followed by any amount of whitespace
""", re.VERBOSE)
def get_invocation_str(self):
kwonly_pairs = None
formatters = {}
if self.kwonlyargs:
kwonly_pairs = dict((arg, arg)
for arg in self.kwonlyargs)
formatters['formatvalue'] = lambda value: '=' + value
# TODO: Replace with inspect.signature
sig = inspect.formatargspec(self.args,
self.varargs,
self.varkw,
[],
kwonly_pairs,
kwonly_pairs,
{},
**formatters)
sig = self._KWONLY_MARKER.sub('', sig)
return sig[1:-1]
@classmethod
def from_func(cls, func):
"""Create a new FunctionBuilder instance based on an existing
function. The original function will not be stored or
modified.
"""
# TODO: copy_body? gonna need a good signature regex.
# TODO: might worry about __closure__?
if not callable(func):
raise TypeError('expected callable object, not %r' % (func,))
kwargs = {'name': func.__name__,
'doc': func.__doc__,
'module': func.__module__,
'annotations': getattr(func, "__annotations__", {}),
'dict': getattr(func, '__dict__', {})}
kwargs.update(cls._argspec_to_dict(func))
# _inspect_iscoroutinefunction always False in Py3?
# _inspect_iscoroutinefunction = lambda func: False
# if _inspect_iscoroutinefunction(func):
# kwargs['is_async'] = True
return cls(**kwargs)
def get_func(self, execdict=None, add_source=True, with_dict=True):
"""Compile and return a new function based on the current values of
the FunctionBuilder.
Args:
execdict (dict): The dictionary representing the scope in
which the compilation should take place. Defaults to an empty
dict.
add_source (bool): Whether to add the source used to a
special ``__source__`` attribute on the resulting
function. Defaults to True.
with_dict (bool): Add any custom attributes, if
applicable. Defaults to True.
To see an example of usage, see the implementation of
:func:`~boltons.funcutils.wraps`.
"""
execdict = execdict or {}
body = self.body or self._default_body
tmpl = 'def {name}{sig_str}:'
tmpl += '\n{body}'
if self.is_async:
tmpl = 'async ' + tmpl
body = _indent(self.body, ' ' * self.indent)
name = self.name.replace('<', '_').replace('>', '_') # lambdas
src = tmpl.format(name=name, sig_str=self.get_sig_str(with_annotations=False),
doc=self.doc, body=body)
self._compile(src, execdict)
func = execdict[name]
func.__name__ = self.name
func.__doc__ = self.doc
func.__defaults__ = self.defaults
func.__kwdefaults__ = self.kwonlydefaults
func.__annotations__ = self.annotations
if with_dict:
func.__dict__.update(self.dict)
func.__module__ = self.module
# TODO: caller module fallback?
if add_source:
func.__source__ = src
return func
def get_defaults_dict(self):
"""Get a dictionary of function arguments with defaults and the
respective values.
"""
ret = dict(reversed(list(zip(reversed(self.args),
reversed(self.defaults or [])))))
kwonlydefaults = getattr(self, 'kwonlydefaults', None)
if kwonlydefaults:
ret.update(kwonlydefaults)
return ret
def get_arg_names(self, only_required=False):
arg_names = tuple(self.args) + tuple(getattr(self, 'kwonlyargs', ()))
if only_required:
defaults_dict = self.get_defaults_dict()
arg_names = tuple([an for an in arg_names if an not in defaults_dict])
return arg_names
def add_arg(self, arg_name, default=NO_DEFAULT, kwonly=False):
"""Add an argument with optional *default* (defaults to
``funcutils.NO_DEFAULT``). Pass *kwonly=True* to add a
keyword-only argument
"""
if arg_name in self.args:
raise ExistingArgument('arg %r already in func %s arg list' % (arg_name, self.name))
if arg_name in self.kwonlyargs:
raise ExistingArgument('arg %r already in func %s kwonly arg list' % (arg_name, self.name))
if not kwonly:
self.args.append(arg_name)
if default is not NO_DEFAULT:
self.defaults = (self.defaults or ()) + (default,)
else:
self.kwonlyargs.append(arg_name)
if default is not NO_DEFAULT:
self.kwonlydefaults[arg_name] = default
return
def remove_arg(self, arg_name):
"""Remove an argument from this FunctionBuilder's argument list. The
resulting function will have one less argument per call to
this function.
Args:
arg_name (str): The name of the argument to remove.
Raises a :exc:`ValueError` if the argument is not present.
"""
args = self.args
d_dict = self.get_defaults_dict()
try:
args.remove(arg_name)
except ValueError:
try:
self.kwonlyargs.remove(arg_name)
except (AttributeError, ValueError):
# py2, or py3 and missing from both
exc = MissingArgument('arg %r not found in %s argument list:'
' %r' % (arg_name, self.name, args))
exc.arg_name = arg_name
raise exc
else:
self.kwonlydefaults.pop(arg_name, None)
else:
d_dict.pop(arg_name, None)
self.defaults = tuple([d_dict[a] for a in args if a in d_dict])
return
def _compile(self, src, execdict):
filename = ('<%s-%d>'
% (self.filename, next(self._compile_count),))
try:
code = compile(src, filename, 'single')
exec(code, execdict)
except Exception:
raise
return execdict
def _parse_wraps_expected(expected):
# expected takes a pretty powerful argument, it's processed
# here. admittedly this would be less trouble if I relied on
# OrderedDict (there's an impl of that in the commit history if
# you look
if expected is None:
expected = []
elif isinstance(expected, str):
expected = [(expected, NO_DEFAULT)]
expected_items = []
try:
expected_iter = iter(expected)
except TypeError as e:
raise ValueError('"expected" takes string name, sequence of string names,'
' iterable of (name, default) pairs, or a mapping of '
' {name: default}, not %r (got: %r)' % (expected, e))
for argname in expected_iter:
if isinstance(argname, str):
# dict keys and bare strings
try:
default = expected[argname]
except TypeError:
default = NO_DEFAULT
else:
# pairs
try:
argname, default = argname
except (TypeError, ValueError):
raise ValueError('"expected" takes string name, sequence of string names,'
' iterable of (name, default) pairs, or a mapping of '
' {name: default}, not %r')
if not isinstance(argname, str):
raise ValueError('all "expected" argnames must be strings, not %r' % (argname,))
expected_items.append((argname, default))
return expected_items
def wraps(func, injected=None, expected=None, **kw):
"""Modeled after the built-in :func:`functools.wraps`, this function is
used to make your decorator's wrapper functions reflect the
wrapped function's:
* Name
* Documentation
* Module
* Signature
The built-in :func:`functools.wraps` copies the first three, but
does not copy the signature. This version of ``wraps`` can copy
the inner function's signature exactly, allowing seamless usage
and :mod:`introspection <inspect>`. Usage is identical to the
built-in version::
>>> from py2misc.py2store.mint import wraps
>>>
>>> def print_return(func):
... @wraps(func)
... def wrapper(*args, **kwargs):
... ret = func(*args, **kwargs)
... print(ret)
... return ret
... return wrapper
...
>>> @print_return
... def example():
... '''docstring'''
... return 'example return value'
>>>
>>> val = example()
example return value
>>> example.__name__
'example'
>>> example.__doc__
'docstring'
In addition, the boltons version of wraps supports modifying the
outer signature based on the inner signature. By passing a list of
*injected* argument names, those arguments will be removed from
the outer wrapper's signature, allowing your decorator to provide
arguments that aren't passed in.
Args:
func (function): The callable whose attributes are to be copied.
injected (list): An optional list of argument names which
should not appear in the new wrapper's signature.
expected (list): An optional list of argument names (or (name,
default) pairs) representing new arguments introduced by
the wrapper (the opposite of *injected*). See
:meth:`FunctionBuilder.add_arg()` for more details.
update_dict (bool): Whether to copy other, non-standard
attributes of *func* over to the wrapper. Defaults to True.
inject_to_varkw (bool): Ignore missing arguments when a
``**kwargs``-type catch-all is present. Defaults to True.
For more in-depth wrapping of functions, see the
:class:`FunctionBuilder` type, on which wraps was built.
"""
if injected is None:
injected = []
elif isinstance(injected, str):
injected = [injected]
else:
injected = list(injected)
expected_items = _parse_wraps_expected(expected)
if isinstance(func, (classmethod, staticmethod)):
raise TypeError('wraps does not support wrapping classmethods and'
' staticmethods, change the order of wrapping to'
' wrap the underlying function: %r'
% (getattr(func, '__func__', None),))
update_dict = kw.pop('update_dict', True)
inject_to_varkw = kw.pop('inject_to_varkw', True)
if kw:
raise TypeError('unexpected kwargs: %r' % kw.keys())
fb = FunctionBuilder.from_func(func)
for arg in injected:
try:
fb.remove_arg(arg)
except MissingArgument:
if inject_to_varkw and fb.varkw is not None:
continue # keyword arg will be caught by the varkw
raise
for arg, default in expected_items:
fb.add_arg(arg, default) # may raise ExistingArgument
if fb.is_async:
fb.body = 'return await _call(%s)' % fb.get_invocation_str()
else:
fb.body = 'return _call(%s)' % fb.get_invocation_str()
def wrapper_wrapper(wrapper_func):
execdict = dict(_call=wrapper_func, _func=func)
fully_wrapped = fb.get_func(execdict, with_dict=update_dict)
fully_wrapped.__wrapped__ = func # ref to the original function (#115)
return fully_wrapped
return wrapper_wrapper
|
StarcoderdataPython
|
100141
|
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from sklearn.utils import shuffle
def wide_net(x):
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.orthogonal_initializer(),
biases_initializer=tf.constant_initializer(0.0)):
h = slim.fully_connected(x, 1000)
return slim.fully_connected(h, 10, activation_fn=None)
def deep_net(x):
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.orthogonal_initializer(),
biases_initializer=tf.constant_initializer(0.0)):
h = slim.stack(x, slim.fully_connected, [200] * 17)
return slim.fully_connected(h, 10, activation_fn=None)
################################################################################
def batch(ims, labels, batchsize):
ims, labels = shuffle(ims, labels)
shape = ims.shape
for i in range(len(labels)//batchsize):
yield (i, ims[i*batchsize:(i+1)*batchsize, ...],
labels[i*batchsize:(i+1)*batchsize, ...])
def validate(sess, writer, step, x, T, valid_ims, valid_labels, batchsize, name=''):
### Validate classifier
metrics = tf.get_collection('METRICS')
updates = tf.get_collection('METRIC_UPDATES')
variables = tf.get_collection('LOCAL_VARIABLES', scope='metrics')
sess.run(tf.variables_initializer(variables))
# eval and aggregate
for _, batch_ims, batch_labels in batch(valid_ims, valid_labels, batchsize):
sess.run(updates, {x: batch_ims, T: batch_labels})
values = sess.run(metrics, {x: batch_ims, T: batch_labels})
# write summary
for k, v in zip(metrics, values):
add_summary(writer, step, 'valid/'+name, float(v))
def add_summary(writer, step, name, val):
summ = tf.Summary(value=[tf.Summary.Value(tag=name, simple_value=val)])
writer.add_summary(summ, step)
def get_loss_fn(name, logits):
return
|
StarcoderdataPython
|
1781737
|
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django.contrib.auth.forms import AuthenticationForm
class AuthForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', 'Submit'))
|
StarcoderdataPython
|
3253389
|
import logging
import re
# from discord.commands import Option
import discord
from alttprbot import models
from alttprbot.alttprgen.smz3multi import generate_multiworld
from discord.ext import commands
from slugify import slugify
PRESET_OPTIONS = {
'sm': [
discord.SelectOption(label="casual_full"),
discord.SelectOption(label="casual_split"),
discord.SelectOption(label="tournament_full"),
discord.SelectOption(label="tournament_split"),
],
'smz3': [
discord.SelectOption(label="casual"),
discord.SelectOption(label="casualkeys"),
discord.SelectOption(label="hard"),
discord.SelectOption(label="hardkeys"),
discord.SelectOption(label="normal"),
discord.SelectOption(label="normalkeys"),
]
}
# class MultiworldPresetDropdown(discord.ui.Select):
# def __init__(self, randomizer, mwcreateview):
# self.mwcreateview = mwcreateview
# super().__init__(
# placeholder="Choose a preset...",
# min_values=1,
# max_values=1,
# options=PRESET_OPTIONS[randomizer],
# row=1
# )
# async def callback(self, interaction: discord.Interaction):
# self.mwcreateview.preset_name = self.values[0]
class MultiworldSignupView(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
@discord.ui.select(
placeholder="Choose a randomizer",
min_values=1,
max_values=1,
options=[
discord.SelectOption(label="smz3", description="SM + ALTTP Combo Randomizer"),
discord.SelectOption(label="sm", description="Super Metroid Randomizer")
],
custom_id="sahabot:multiworld:randomizer",
row=1
)
async def randomizer(self, select: discord.ui.Select, interaction: discord.Interaction):
embed = interaction.message.embeds[0]
multiworld = await self.create_or_update_multiworld(interaction)
if not multiworld.owner_id == interaction.user.id:
await interaction.response.send_message("You are not authorized to set the randomizer.", ephemeral=True)
return
multiworld.randomizer = select.values[0]
multiworld.preset = None
embed = interaction.message.embeds[0]
embed = set_embed_field("Randomizer", multiworld.randomizer, embed)
embed = set_embed_field("Preset", "Not yet chosen", embed)
await multiworld.save()
preset_select: discord.ui.Select = discord.utils.get(self.children, custom_id='sahabot:multiworld:preset')
preset_select.disabled = False
preset_select.options = PRESET_OPTIONS[multiworld.randomizer]
await interaction.response.edit_message(embed=embed, view=self)
@discord.ui.select(
placeholder="Choose a preset",
min_values=1,
max_values=1,
options=[
discord.SelectOption(label="none", description="Choose a randomizer first!")
],
custom_id="sahabot:multiworld:preset",
row=2,
disabled=True
)
async def preset(self, select: discord.ui.Select, interaction: discord.Interaction):
embed = interaction.message.embeds[0]
multiworld = await self.create_or_update_multiworld(interaction)
if not multiworld.owner_id == interaction.user.id:
await interaction.response.send_message("You are not authorized set the preset.", ephemeral=True)
return
multiworld.preset = select.values[0]
embed = interaction.message.embeds[0]
embed = set_embed_field("Preset", multiworld.preset, embed)
await multiworld.save(update_fields=['preset'])
await interaction.response.edit_message(embed=embed, view=self)
@discord.ui.button(label="Join", style=discord.ButtonStyle.blurple, custom_id="sahabot:multiworld:join", row=3)
async def join(self, button: discord.ui.Button, interaction: discord.Interaction):
multiworld = await self.create_or_update_multiworld(interaction)
entrant = await models.MultiworldEntrant.get_or_none(discord_user_id=interaction.user.id, multiworld=multiworld)
if entrant:
await interaction.response.pong()
return
await models.MultiworldEntrant.create(discord_user_id=interaction.user.id, multiworld=multiworld)
await self.update_player_list(interaction.message)
await interaction.response.pong()
@discord.ui.button(label="Leave", style=discord.ButtonStyle.secondary, custom_id="sahabot:multiworld:leave", row=3)
async def leave(self, button: discord.ui.Button, interaction: discord.Interaction):
multiworld = await self.create_or_update_multiworld(interaction)
entrant = await models.MultiworldEntrant.get_or_none(discord_user_id=interaction.user.id, multiworld=multiworld)
if entrant:
await entrant.delete()
await self.update_player_list(interaction.message)
await interaction.response.pong()
@discord.ui.button(label="Start", style=discord.ButtonStyle.green, custom_id="sahabot:multiworld:start", row=4)
async def start(self, button: discord.ui.Button, interaction: discord.Interaction):
message = interaction.message
embed = message.embeds[0]
multiworld = await self.create_or_update_multiworld(interaction)
if not multiworld.owner_id == interaction.user.id:
await interaction.response.send_message("You are not authorized to start this game.", ephemeral=True)
return
if not multiworld.randomizer or not multiworld.preset:
await interaction.response.send_message("Please ensure you choose both a randomizer and preset before starting.", ephemeral=True)
return
embed = set_embed_field("Status", "⌚ Game closed for entry. Rolling...", embed)
await message.edit(embed=embed)
await self.update_player_list(message)
players = await self.get_player_members(message)
if len(players) < 2:
embed = set_embed_field("Status", "👍 Open for entry", embed)
await interaction.message.edit(embed=embed, view=self)
await interaction.response.send_message("You must have at least two players to create a multiworld.", ephemeral=True)
return
await interaction.response.defer()
player_names = [slugify(p.display_name, lowercase=False, max_length=19, separator=" ") for p in players]
seed = await generate_multiworld(multiworld.preset, player_names, tournament=False, randomizer=multiworld.randomizer)
dm_embed = discord.Embed(
title=f"{multiworld.randomizer.upper()} Multiworld Game"
)
dm_embed.add_field(name="Players", value='\n'.join([p.name for p in players]), inline=False)
dm_embed.add_field(name="Game Room", value=seed.url, inline=False)
for player in players:
try:
await player.send(embed=dm_embed)
except Exception:
logging.exception(f"Unable to send DM to {player.mention}!")
embed = set_embed_field("Status", "✅ Game started! Check your DMs.", embed)
multiworld.status = "CLOSED"
await multiworld.save()
for item in self.children:
item.disabled = True
await interaction.message.edit(embed=embed, view=self)
@discord.ui.button(label="Cancel", style=discord.ButtonStyle.red, custom_id="sahabot:multiworld:cancel", row=4)
async def cancel(self, button: discord.ui.Button, interaction: discord.Interaction):
message = interaction.message
embed = message.embeds[0]
multiworld = await self.create_or_update_multiworld(interaction)
if not multiworld.owner_id == interaction.user.id:
await interaction.response.send_message("You are not authorized to cancel this game.", ephemeral=True)
return
embed = interaction.message.embeds[0]
embed = set_embed_field("Status", "❌ Cancelled.", embed)
multiworld.status = "CANCELLED"
await multiworld.save()
for item in self.children:
item.disabled = True
await interaction.response.edit_message(embed=embed, view=self)
async def update_player_list(self, message: discord.Message):
embed = message.embeds[0]
player_list_resp = await models.MultiworldEntrant.filter(multiworld__message_id=message.id)
mentions = [f"<@{p.discord_user_id}>" for p in player_list_resp]
if mentions:
embed = set_embed_field("Players", '\n'.join(mentions), embed)
else:
embed = set_embed_field("Players", 'No players yet.', embed)
await message.edit(embed=embed, view=self)
return player_list_resp
def allow_start(self, multiworld: models.Multiworld, entrants: models.MultiworldEntrant):
return entrants > 2 and not multiworld.preset is None and not multiworld.randomizer is None
async def get_player_members(self, message: discord.Message):
guild = message.guild
embed = message.embeds[0]
player_list_resp = await models.MultiworldEntrant.filter(multiworld__message_id=message.id)
entrant_discords = [await guild.fetch_member(p.discord_user_id) for p in player_list_resp]
mentions = [p.mention for p in entrant_discords]
if mentions:
embed = set_embed_field("Players", '\n'.join(mentions), embed)
else:
embed = set_embed_field("Players", 'No players yet.', embed)
return entrant_discords
async def create_or_update_multiworld(self, interaction: discord.Interaction):
embed = interaction.message.embeds[0]
multiworld, _ = await models.Multiworld.update_or_create(message_id=interaction.message.id, defaults={'owner_id': get_owner(embed, interaction.guild).id, 'status': "STARTED"})
return multiworld
# class MultiworldCreateView(discord.ui.View):
# def __init__(self, randomizer):
# super().__init__(timeout=300)
# self.randomizer = randomizer
# self.preset_name = None
# self.add_item(MultiworldPresetDropdown(self.randomizer, self))
# @discord.ui.button(label="Create", style=discord.ButtonStyle.green, row=2)
# async def create(self, button: discord.ui.Button, interaction: discord.Interaction):
# embed = discord.Embed(
# title=f'{self.randomizer.upper()} Multiworld Game',
# description=(
# 'A new multiworld game has been initiated, click "Join" to join. Click "Leave" to leave.\n'
# f'When everyone is ready the game creator, {interaction.user.mention}, can click "Start" to create a session.\n'
# f'The game creator can click "Cancel" to cancel this game.'
# ),
# color=discord.Color.dark_blue()
# )
# embed.add_field(name="Status", value="👍 Open for entry", inline=False)
# embed.add_field(name="Preset", value=f"[{self.preset_name.lower()}](https://github.com/tcprescott/sahasrahbot/blob/master/presets/{self.randomizer.lower()}/{self.preset_name.lower()}.yaml)", inline=False)
# embed.add_field(name="Players", value="No players yet.", inline=False)
# message = await interaction.channel.send(embed=embed)
# await models.Multiworld.create(message_id=message.id, owner_id=interaction.user.id, randomizer=self.randomizer, preset=self.preset_name, status="STARTED")
# await message.edit(view=MultiworldSignupView())
class Multiworld(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.persistent_views_added = False
@ commands.Cog.listener()
async def on_ready(self):
if not self.persistent_views_added:
self.bot.add_view(MultiworldSignupView())
self.persistent_views_added = True
@commands.slash_command(name='multiworld')
async def multiworld(self, ctx: discord.commands.ApplicationContext):
"""
Creates a multiworld session
"""
embed = discord.Embed(
title=f'Multiworld Game',
description=(
'A new multiworld game has been initiated, click "Join" to join. Click "Leave" to leave.\n'
f'When everyone is ready the game creator, {ctx.author.mention}, can click "Start" to create a session.\n'
f'The game creator can click "Cancel" to cancel this game.'
),
color=discord.Color.dark_blue()
)
embed.add_field(name="Owner", value=ctx.author.mention, inline=False)
embed.add_field(name="Status", value="👍 Open for entry", inline=False)
embed.add_field(name="Randomizer", value="Not yet chosen", inline=False)
embed.add_field(name="Preset", value="Not yet chosen", inline=False)
embed.add_field(name="Players", value="No players yet.", inline=False)
await ctx.respond(embed=embed, view=MultiworldSignupView())
# await models.Multiworld.create(message_id=ctx.interaction, owner_id=ctx.author.id, status="STARTED")
def set_embed_field(name: str, value: str, embed: discord.Embed) -> discord.Embed:
for idx, field in enumerate(embed.fields):
if field.name == name:
embed.set_field_at(idx, name=name, value=value, inline=field.inline)
return embed
def get_embed_field(name: str, embed: discord.Embed) -> str:
for field in embed.fields:
if field.name == name:
return field.value
return None
def get_owner(embed: discord.Embed, guild: discord.Guild) -> discord.Member:
value = get_embed_field("Owner", embed)
if value is None:
return
user_id = int(re.search('<@([0-9]*)>', value).groups()[0])
return guild.get_member(user_id)
def setup(bot):
bot.add_cog(Multiworld(bot))
|
StarcoderdataPython
|
3312278
|
<reponame>fabiommendes/capacidade_hospitalar
from django.apps import AppConfig as DjangoAppConfig
from django.utils.translation import gettext_lazy as _
class AppConfig(DjangoAppConfig):
name = "hcap_geo"
verbose_name = _("Geography")
|
StarcoderdataPython
|
115990
|
from . import DutchDraw
from .DutchDraw import *
|
StarcoderdataPython
|
3292512
|
# coding=utf-8
"""A little helper allowing to mock requests very effectively for tests"""
from __future__ import unicode_literals
import functools
import json
from contextlib import contextmanager
import logging
import requests
from mock import patch, MagicMock
@contextmanager
def patch_requests(mapping=None, allowed_domains=None, allowed_methods=None): # pylint: disable=too-complex
"""
mapping is a dict of str => data
so that "toto" => {"response" => {"success" : 1}, "json" => True/False} means that
any url called with *toto* will return {"success" : 1}
json part is optional
allowed_domains can be used in place of a mapping if you don't care about specifying specific return values
but is required so as to ensure that you are only patching the specific domains that your test expects to hit.
allowed_methods limits the methods that can be called on requests
"""
if mapping is None:
if allowed_domains is None:
raise ValueError('patch_requests(): you must specify a mapping or a list of allowed_domains')
mapping = {domain: {} for domain in allowed_domains}
def _request_response_from_query(_, url, **kwargs): # pylint: disable=C0111,W0613
return _response(url)
def _other_response_from_query(url, **kwargs): # pylint: disable=C0111,W0613
return _response(url)
def _response(url):
"""
If the requested URL is found in the mapping, returns the mocked response as configured
"""
logging.debug("mocking %s", url)
for (token, config) in mapping.iteritems():
if token in url:
resp = requests.Response()
resp.url = config.get('url', url)
resp.status_code = config.get('http_code', 200)
if config.get("json", True) and 'response' in config:
resp._content = json.dumps(config["response"]) # pylint: disable=W0212
elif config.get("stream", False):
resp.raw = MagicMock(
stream=MagicMock(return_value=config["response"])
)
else:
# str: Requests uses str as bytes internally, at least on Python 2
resp._content = str(config.get("response", '')) # pylint: disable=W0212
if config.get('headers'):
resp.headers = config.get('headers')
return resp
raise Exception("Requests mock called with unexpected URL, nothing in the mapping for %s" % url)
if allowed_methods is None:
allowed_methods = ['get', 'post', 'put', 'head', 'patch', 'options', 'delete']
methods_map = {method: MagicMock(side_effect=_other_response_from_query) for method in allowed_methods}
methods_map['request'] = MagicMock(side_effect=_request_response_from_query)
with patch.multiple('requests', **methods_map):
with patch.multiple('requests.Session', **methods_map):
yield {k: getattr(requests, k) for k in methods_map}
def patch_requests_decorator(mapping):
"""
Use patch_requests as decorator.
"""
def decorator(func): # pylint: disable=C0111
@functools.wraps(func)
def inner(*args, **kwargs): # pylint: disable=C0111
with patch_requests(mapping):
return func(*args, **kwargs)
return inner
return decorator
|
StarcoderdataPython
|
3209042
|
import os
from django.conf import settings as project_settings
from django.test.client import RequestFactory
from django.utils.text import slugify
from rest_framework.renderers import JSONRenderer
from slackchatbakery.utils.aws import defaults, get_bucket
from slackchatbakery.conf import settings
class StaticsPublishingMixin(object):
"""
Handles publishing serialized context to S3.
"""
def get_request(self, production=False, subpath=""):
"""Construct a request we can use to render the view.
Send environment variable in querystring to determine whether
we're using development static file URLs or production."""
if production:
env = {"env": "prod"}
else:
env = {"env": "dev"}
kwargs = {**{"subpath": subpath}, **env}
return RequestFactory().get("", kwargs)
def get_serialized_context(self):
"""OVERWRITE this method to return serialized context data.
Use the serializer for the page you would hit.
Used to bake out serialized context data.
"""
return {}
def publish_serialized_data(self, subpath="", **kwargs):
"""Publishes serialized data."""
data = self.get_serialized_data(**kwargs)
json_string = JSONRenderer().render(data) # noqa
key = os.path.join(
settings.S3_UPLOAD_ROOT,
self.get_publish_path(),
os.path.join(subpath, "data.json"),
)
print(">>> Publish data to: ", key)
bucket = get_bucket()
bucket.put_object(
Key=key,
ACL=defaults.ACL,
Body=json_string,
CacheControl=defaults.CACHE_HEADER,
ContentType="application/json",
)
return data
|
StarcoderdataPython
|
1799810
|
# Copyright (c) 2021 <NAME>
# This code is part of the pymscrape project
import copy
import tkinter as tk
from tkinter import ttk
from PIL import Image, ImageTk
import numpy as np
import cv2 as cv
import random
# Base tkinter scroll/zoom class based on
# https://stackoverflow.com/questions/41656176/tkinter-canvas-zoom-move-pan
class AutoScrollbar(ttk.Scrollbar):
''' A scrollbar that hides itself if it's not needed.
Works only if you use the grid geometry manager '''
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
self.grid_remove()
else:
self.grid()
ttk.Scrollbar.set(self, lo, hi)
def pack(self, **kw):
raise tk.TclError('Cannot use pack with this widget')
def place(self, **kw):
raise tk.TclError('Cannot use place with this widget')
class Zoom_Scroll(ttk.Frame):
def __init__(self, mainframe, image, title='Zoom and Scroll'):
ttk.Frame.__init__(self, master=mainframe)
self.master.title(title)
# Buttons
b_done = tk.Button(
self.master, text="Done (Enter)", command=self.quit)
b_done.grid(row=0, column=0, sticky='w')
c = 10
# Vertical and horizontal scrollbars for canvas
vbar = AutoScrollbar(self.master, orient='vertical')
hbar = AutoScrollbar(self.master, orient='horizontal')
vbar.grid(row=1, column=c, sticky='ns')
hbar.grid(row=2, column=0, columnspan=c, sticky='we')
# Create canvas and put image on it
self.canvas = tk.Canvas(
self.master, highlightthickness=0,
xscrollcommand=hbar.set, yscrollcommand=vbar.set
)
self.canvas.grid(row=1, column=0, columnspan=c, sticky='nswe')
self.canvas.update() # wait till canvas is created
vbar.configure(command=self.scroll_y) # bind scrollbars to the canvas
hbar.configure(command=self.scroll_x)
# Make the canvas expandable
self.master.rowconfigure(1, weight=1)
[self.master.columnconfigure(i, weight=1) for i in range(c)]
# Bind events to the Canvas
self.canvas.bind('<Configure>', self.show_image) # canvas is resized
self.canvas.bind('<ButtonPress-1>', self.move_from)
self.canvas.bind('<B1-Motion>', self.move_to)
self.canvas.bind('<MouseWheel>', self.wheel) # with Windows and MacOS
self.canvas.bind('<Button-5>', self.wheel) # only with Linux
self.canvas.bind('<Button-4>', self.wheel) # only with Linux
self.canvas.bind('<Return>', self.quit)
self.canvas.focus_set()
self.image = Image.fromarray(image)
self.width, self.height = self.image.size
self.imscale = 1.0 # scale for the canvaas image
self.delta = 1.3 # zoom magnitude
# Put image into rectangle, use to set proper coordinates to the image
self.container = self.canvas.create_rectangle(
0, 0, self.width, self.height, width=0)
self.show_image()
def quit(self, event=None):
self.master.destroy()
def scroll_y(self, *args, **kwargs):
''' Scroll canvas vertically and redraw the image '''
self.canvas.yview(*args, **kwargs) # scroll vertically
self.show_image() # redraw the image
def scroll_x(self, *args, **kwargs):
''' Scroll canvas horizontally and redraw the image '''
self.canvas.xview(*args, **kwargs) # scroll horizontally
self.show_image() # redraw the image
def move_from(self, event):
''' Remember previous coordinates for scrolling with the mouse '''
self.canvas.scan_mark(event.x, event.y)
def move_to(self, event):
''' Drag (move) canvas to the new position '''
self.canvas.scan_dragto(event.x, event.y, gain=1)
self.show_image() # redraw the image
def wheel(self, event):
''' Zoom with mouse wheel '''
x = self.canvas.canvasx(event.x)
y = self.canvas.canvasy(event.y)
bbox = self.canvas.bbox(self.container) # get image area
if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:
pass
else:
return # zoom only inside image area
scale = 1.0
# Respond to Linux (event.num) or Windows (event.delta) wheel event
if event.num == 5 or event.delta == -120: # scroll down
i = min(self.width, self.height)
if int(i * self.imscale) < 30:
return # image is less than 30 pixels
self.imscale /= self.delta
scale /= self.delta
if event.num == 4 or event.delta == 120: # scroll up
i = min(self.canvas.winfo_width(), self.canvas.winfo_height())
if i < self.imscale:
return # 1 pixel is bigger than the visible area
self.imscale *= self.delta
scale *= self.delta
self.canvas.scale('all', x, y, scale, scale) # rescale all objects
self.show_image()
def get_coords(self, x, y):
bbox = self.canvas.bbox(self.container) # get image area
x_plot = self.canvas.canvasx(0) + x
y_plot = self.canvas.canvasy(0) + y
x_true = (x_plot - bbox[0])/self.imscale
y_true = (y_plot - bbox[1])/self.imscale
return x_true, y_true, x_plot, y_plot
def get_plot_coords(self, x_true, y_true):
bbox = self.canvas.bbox(self.container)
x_plot = self.imscale*x_true + bbox[0]
y_plot = self.imscale*y_true + bbox[1]
return x_plot, y_plot
def show_image(self, event=None):
''' Show image on the Canvas '''
bbox1 = self.canvas.bbox(self.container) # get image area
# Remove 1 pixel shift at the sides of the bbox1
bbox1 = (bbox1[0] + 1, bbox1[1] + 1, bbox1[2] - 1, bbox1[3] - 1)
bbox2 = (self.canvas.canvasx(0), # get visible area of the canvas
self.canvas.canvasy(0),
self.canvas.canvasx(self.canvas.winfo_width()),
self.canvas.canvasy(self.canvas.winfo_height()))
bbox = [min(bbox1[0], bbox2[0]), min(bbox1[1], bbox2[1]),
max(bbox1[2], bbox2[2]), max(bbox1[3], bbox2[3])]
if bbox[0] == bbox2[0] and bbox[2] == bbox2[2]:
bbox[0] = bbox1[0]
bbox[2] = bbox1[2]
if bbox[1] == bbox2[1] and bbox[3] == bbox2[3]:
bbox[1] = bbox1[1]
bbox[3] = bbox1[3]
self.canvas.configure(scrollregion=bbox) # set scroll region
x1 = max(bbox2[0] - bbox1[0], 0)
y1 = max(bbox2[1] - bbox1[1], 0)
x2 = min(bbox2[2], bbox1[2]) - bbox1[0]
y2 = min(bbox2[3], bbox1[3]) - bbox1[1]
if int(x2 - x1) > 0 and int(y2 - y1) > 0:
x = min(int(x2 / self.imscale), self.width)
y = min(int(y2 / self.imscale), self.height)
image = self.image.crop(
(int(x1 / self.imscale), int(y1 / self.imscale), x, y))
imagetk = ImageTk.PhotoImage(
image.resize((int(x2 - x1), int(y2 - y1))))
imageid = self.canvas.create_image(
max(bbox2[0], bbox1[0]), max(bbox2[1], bbox1[1]),
anchor='nw', image=imagetk)
self.canvas.lower(imageid)
self.canvas.imagetk = imagetk
class Get_Legend_Box(Zoom_Scroll):
def __init__(
self, mainframe, image,
title='Right click to select top left and botton '
+ 'right corners of legend box.'
):
Zoom_Scroll.__init__(self, mainframe, image, title=title)
b_delete = tk.Button(
self.master, text="Delete Box (d)", command=self.delete_box
)
b_delete.grid(row=0, column=1)
self.p1 = None
self.p2 = None
self.box_r = None
self.p1_r = None
self.p2_r = None
self.canvas.bind("<Button 3>", self.draw_box)
self.canvas.bind('<Return>', self.quit)
self.canvas.bind('<d>', self.delete_box)
self.canvas.focus_set()
def delete_box(self, event=None):
[
self.canvas.delete(obj)
for obj in [self.box_r, self.p1_r, self.p2_r]
]
self.p1 = None
self.p2 = None
self.box_r = None
def draw_box(self, event):
x, y, x_plot, y_plot = self.get_coords(event.x, event.y)
if not self.p1:
self.canvas.delete(self.p1_r)
self.p1 = [x, y]
self.p1_r = self.canvas.create_rectangle(
x_plot-2*self.imscale, y_plot-2*self.imscale,
x_plot+2*self.imscale, y_plot+2*self.imscale,
width=1, fill='red', outline='red'
)
elif not self.p2:
if (x > self.p1[0]) and (y > self.p1[1]):
self.canvas.delete(self.p2_r)
self.p2 = [x, y]
self.p2_r = self.canvas.create_rectangle(
x_plot-2*self.imscale, y_plot-2*self.imscale,
x_plot+2*self.imscale, y_plot+2*self.imscale,
width=1, fill='red', outline='red'
)
p1_plot = self.get_plot_coords(self.p1[0], self.p1[1])
self.box_r = self.canvas.create_rectangle(
p1_plot[0], p1_plot[1], x_plot, y_plot,
width=1, outline='red'
)
else:
[
self.canvas.delete(obj)
for obj in [self.box_r, self.p1_r, self.p2_r]
]
self.p1 = None
self.p2 = None
self.box_r = None
class Choose_Points(Zoom_Scroll):
def __init__(
self, mainframe, image, text_list, title='Right click to record point.'
):
Zoom_Scroll.__init__(self, mainframe, image, title=title)
self.points = []
self.names = []
self.points_r = []
self.names_r = []
self.text_list = copy.deepcopy(text_list)
b_delete = tk.Button(
self.master, text="Delete Last Point (d)",
command=self.delete_last_point)
b_delete.grid(row=0, column=1)
self.canvas.bind('<ButtonPress-3>', self.record_point)
self.canvas.bind('<d>', self.delete_last_point)
self.canvas.focus_set()
def delete_last_point(self, event=None):
if len(self.points) > 0:
self.points.pop()
self.names.pop()
self.canvas.delete(self.names_r[-1])
self.canvas.delete(self.points_r[-1])
self.names_r.pop()
self.points_r.pop()
def record_point(self, event):
''' Show image on the Canvas '''
x, y, x_plot, y_plot = self.get_coords(event.x, event.y)
on_image = (
0 <= x <= self.image.size[0]) * (0 <= y <= self.image.size[1])
if on_image:
r = self.canvas.create_rectangle(
x_plot-2*self.imscale, y_plot-2*self.imscale,
x_plot+2*self.imscale, y_plot+2*self.imscale,
width=1, fill='red', outline='red')
self.points_r.append(r)
self.new_window = tk.Toplevel(self.master)
self.new_window.lift()
self.app = Name_Polygons_Popup(
self.new_window, self.text_list)
self.master.wait_window(self.new_window)
self.canvas.focus_set()
if 0 <= self.app.v.get() < len(self.text_list):
name = self.text_list[self.app.v.get()]
elif self.app.v.get() == -1:
name = self.app.n.get()
r = self.canvas.create_text(
x_plot+10*self.imscale, y_plot, anchor='w',
text='({}, {}) '.format(round(x, 4), round(y, 4)) + name,
fill='red', font=('Arial', 14, 'bold'))
self.names_r.append(r)
self.points.append((x, y))
self.names.append(name)
class Name_Polygons(Zoom_Scroll):
def __init__(
self, mainframe, image, coords, text_list,
names=None, title='Confirm Object Names'):
Zoom_Scroll.__init__(self, mainframe, image, title=title)
self.text_list = copy.deepcopy(text_list)
# Convert line coords into thin poly coords
self.coords = copy.deepcopy(coords)
for i in range(len(self.coords)):
if not np.all(self.coords[i][0] == self.coords[i][-1]):
thick_im = np.zeros(image.shape[:2]).astype(np.uint8)
for j in range(len(self.coords[i])-1):
thick_im += cv.line(
thick_im, tuple(np.squeeze(self.coords[i][j])),
tuple(np.squeeze(self.coords[i][j+1])), 1, 2)
thick_im = (thick_im > 0).astype(np.uint8)
thick_line = cv.findContours(
thick_im, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_L1)[0][0]
self.coords[i] = thick_line
self.com = []
self.raw_image = copy.deepcopy(image)
for i in range(len(self.coords)):
M = cv.moments(self.coords[i])
try:
self.com.append(
[int(M['m10']/M['m00']), int(M['m01']/M['m00'])]
)
except:
self.com.append(self.coords[i][0].flatten().tolist())
self.highlighted = np.array([True]*len(self.coords))
if not names:
names = ['No label']*len(self.coords)
self.names = names
self.names_offset = [
random.uniform(0,2*np.pi) for i in range(len(self.names))
]
self.names_r = [None]*len(self.coords)
self.label_set = np.array([[set([])]*image.shape[1]]*image.shape[0])
for i in range(len(self.coords)):
contour = cv.drawContours(
np.zeros(image.shape[:2]), self.coords, i, 1, -1
)
sets = self.label_set[contour > 0]
sets = [s.union({i}) for s in sets]
self.label_set[contour > 0] = sets
self.contour_image = copy.deepcopy(self.raw_image).astype(np.uint8)
for i in np.argwhere(self.highlighted == True).flatten():
self.contour_image = cv.drawContours(
self.contour_image, self.coords, i, (255, 0, 0), 2)
self.image = Image.fromarray(self.contour_image)
for i in range(len(self.coords)):
if self.com[i]:
self.names_r[i] = self.canvas.create_text(
self.com[i][0] + 5 * np.cos(self.names_offset[i]),
self.com[i][1] + 5 * np.sin(self.names_offset[i]),
anchor='w', text=self.names[i], fill='red',
font=('Arial', 14, 'bold'))
b_all = tk.Button(
self.master, text="Highlight All (a)",
command=self.highlight_all)
b_all.grid(row=0, column=2)
b_none = tk.Button(
self.master, text="Highlight None (n)",
command=self.highlight_none)
b_none.grid(row=0, column=1)
self.canvas.bind("<Button 3>", self.highlight_poly)
self.canvas.bind('<q>', self.quit)
self.canvas.bind('<a>', self.highlight_all)
self.canvas.bind('<n>', self.highlight_none)
self.canvas.focus_set()
self.show_image()
def highlight_all(self, event=None):
self.highlighted = np.array([True]*len(self.coords))
self.contour_image = copy.deepcopy(self.raw_image).astype(np.uint8)
for i in np.argwhere(self.highlighted == True).flatten():
self.contour_image = cv.drawContours(
self.contour_image, self.coords, i, (255, 0, 0), 2)
self.canvas.delete(self.names_r[i])
com_x_plot, com_y_plot = self.get_plot_coords(
self.com[i][0], self.com[i][1]
)
fill = 'red'
font = ('Arial', 14, 'bold')
self.names_r[i] = self.canvas.create_text(
com_x_plot+5*np.cos(self.names_offset[i])*self.imscale,
com_y_plot+5*np.sin(self.names_offset[i])*self.imscale,
anchor='w', text=self.names[i],
fill=fill, font=font
)
self.image = Image.fromarray(self.contour_image.astype(np.uint8))
self.show_image()
def highlight_none(self, event=None):
self.highlighted = np.array([False]*len(self.coords))
self.contour_image = copy.deepcopy(self.raw_image).astype(np.uint8)
for i in np.argwhere(self.highlighted == False).flatten():
self.contour_image = cv.drawContours(
self.contour_image, self.coords, i, (0, 255, 0), 1)
self.canvas.delete(self.names_r[i])
com_x_plot, com_y_plot = self.get_plot_coords(
self.com[i][0], self.com[i][1])
fill = '#0f0'
font = ('Arial', 14)
self.names_r[i] = self.canvas.create_text(
com_x_plot+5*np.cos(self.names_offset[i])*self.imscale,
com_y_plot+5*np.sin(self.names_offset[i])*self.imscale,
anchor='w', text=self.names[i],
fill=fill, font=font)
self.image = Image.fromarray(self.contour_image.astype(np.uint8))
self.show_image()
def highlight_poly(self, event):
x, y, x_plot, y_plot = self.get_coords(event.x, event.y)
inds = self.label_set[round(y), round(x)]
if len(inds) > 0:
if len(inds) == 1:
ind = list(inds)[0]
else:
self.new_window = tk.Toplevel(self.master)
self.new_window.lift()
self.app = Name_Polygons_Popup(
self.new_window,
[
str(i+1) + ' ' + self.names[i]
+ ' (' + (not self.highlighted[i])*'Not '
+ 'Highlighted)' for i in inds],
title='Choose object.')
self.app.e.destroy()
self.app.rb[-1].destroy()
self.master.wait_window(self.new_window)
self.canvas.focus_set()
ind = list(inds)[self.app.v.get()]
self.highlighted[ind] = not self.highlighted[ind]
self.contour_image = copy.deepcopy(self.raw_image).astype(np.uint8)
for i in np.argwhere(self.highlighted == False).flatten():
self.contour_image = cv.drawContours(
self.contour_image, self.coords, i, (0, 255, 0), 1)
for i in np.argwhere(self.highlighted == True).flatten():
self.contour_image = cv.drawContours(
self.contour_image, self.coords, i, (255, 0, 0), 2)
self.image = Image.fromarray(self.contour_image.astype(np.uint8))
self.show_image()
if self.highlighted[ind]:
self.new_window = tk.Toplevel(self.master)
self.new_window.lift()
self.app = Name_Polygons_Popup(
self.new_window, self.text_list)
self.master.wait_window(self.new_window)
self.canvas.focus_set()
if 0 <= self.app.v.get() < len(self.text_list):
self.names[ind] = self.text_list[self.app.v.get()]
elif self.app.v.get() == -1:
self.names[ind] = self.app.n.get()
self.text_list.append(self.app.n.get())
for i in range(len(self.coords)):
if self.com[i]:
self.canvas.delete(self.names_r[i])
com_x_plot, com_y_plot = self.get_plot_coords(
self.com[i][0], self.com[i][1])
if self.highlighted[i]:
fill = 'red'
font = ('Arial', 14, 'bold')
else:
fill = '#0f0'
font = ('Arial', 14)
text_x = com_x_plot
text_x += 5 * np.cos(self.names_offset[i]) * self.imscale
text_y = com_y_plot
text_y += 5 * np.sin(self.names_offset[i]) * self.imscale
self.names_r[i] = self.canvas.create_text(
text_x, text_y, anchor='w', text=self.names[i],
fill=fill, font=font)
class Name_Polygons_Popup():
def __init__(
self, master, text_list,
title='Choose a legend entry for this polygon.'):
self.master = master
self.frame = tk.Frame(self.master)
self.master.title(title)
self.v = tk.IntVar()
self.v.set(999)
self.n = tk.StringVar()
self.rb = []
button = tk.Button(
self.frame, text="Done (Enter)", state=tk.DISABLED,
command=self.master.destroy)
def activate_button():
button['state'] = tk.NORMAL
for i in range(len(text_list)):
rb = tk.Radiobutton(
self.frame,
text=str(i+1) + '. ' + text_list[i],
padx=20,
variable=self.v,
value=i,
justify=tk.LEFT,
command=activate_button)
self.rb.append(rb)
rb.grid(row=i)
rb = tk.Radiobutton(
self.frame,
text=str(len(text_list)+1) + '. Add New',
padx=20,
variable=self.v,
value=-1,
justify=tk.LEFT,
command=activate_button)
self.rb.append(rb)
rb.grid(row=len(text_list))
# for i in range(len(self.rb)):
# if (i+1) < 10:
# self.master.bind(
# str(i+1), lambda e, bn=i: self.rb[bn].invoke()
# )
self.master.bind('<Return>', lambda e: button.invoke())
e = tk.Entry(
self.frame, textvariable=self.n)
self.e = e
e.grid(row=len(text_list)+1)
button.grid(row=len(text_list)+2)
self.frame.pack()
class Confirm_Names(ttk.Frame):
def __init__(
self, master, text_list,
title='Confirm Legend Entries'):
self.master = master
self.master.title(title)
self.frame = tk.Frame(self.master)
self.text_list = copy.deepcopy(text_list)
self.n = [tk.StringVar() for i in range(len(self.text_list))]
for i in range(len(self.n)):
self.n[i].set(self.text_list[i])
self.e = []
for i in range(len(text_list)):
label_text = tk.StringVar()
label_text.set(str(i+1) + '.')
label = tk.Label(self.master, textvariable=label_text)
label.grid(row=i, column=0)
e = tk.Entry(self.master, textvariable=self.n[i], width=50)
self.e.append(e)
e.grid(row=i, column=1)
button = tk.Button(
self.master, text="Done (Enter)", command=self.master.destroy
)
button.grid(row=len(text_list), column=1, columnspan=2)
self.master.bind('<Return>', lambda e: button.invoke())
class Define_Training_Regions(Zoom_Scroll):
def __init__(
self, mainframe, image, text_list,
legend=None, title='Choose training regions.'
):
Zoom_Scroll.__init__(self, mainframe, image, title=title)
self.label = 0
self.names = ['Backgound']
self.master.title(
'Choose training regions for {}'.format(self.names[self.label])
)
self.text_list = text_list + ['Map background']
self.p1 = None
self.p2 = None
self.boxes = [np.array([[]]).reshape([0, 5]).astype(int)]
self.p1_r = None
self.p2_r = None
self.canvas.bind("<Button 3>", self.draw_box)
self.canvas.bind('<Left>', self.previous_label)
self.canvas.bind('<p>', self.previous_label)
self.canvas.bind('<Right>', self.next_label)
self.canvas.bind('<n>', self.next_label)
self.canvas.focus_set()
try:
shape = legend.shape
except:
shape = [0]
if len(shape) in [2, 3]:
self.new_window = tk.Toplevel(self.master)
self.new_window.title('Legend Provided for Reference')
self.new_window.lift()
self.new_window.canvas = tk.Canvas(
self.new_window, width=legend.shape[1],
height=legend.shape[0], cursor='tcross')
self.new_window.canvas.update() # wait till canvas is created
self.new_window.canvas.pack(expand='yes', fill='both')
im = Image.fromarray(legend)
ph = ImageTk.PhotoImage(image=im)
self.new_window.canvas.ph = ph
self.new_window.canvas.create_image(0, 0, image=ph, anchor='nw')
self.new_window.canvas.ph = ph
b_next = tk.Button(
self.master, text="Next Category (Right Arrow)",
command=self.next_label)
b_next.grid(row=0, column=2)
b_previous = tk.Button(
self.master, text="Previous Category (Left Arrow)",
command=self.previous_label)
b_previous.grid(row=0, column=1)
self.canvas.focus_set()
def draw_box(self, event):
boxes = self.boxes[self.label]
x, y, x_plot, y_plot = self.get_coords(event.x, event.y)
if boxes.size > 0:
x_cond = np.logical_and(boxes[:, 0] <= x, x <= boxes[:, 2])
y_cond = np.logical_and(boxes[:, 1] <= y, y <= boxes[:, 3])
in_boxes = np.argwhere(
np.logical_and(x_cond, y_cond)).flatten()
else:
in_boxes = np.array([])
if in_boxes.size > 0:
for box_num in in_boxes:
self.canvas.delete(boxes[box_num, 4])
self.boxes[self.label] = np.delete(
self.boxes[self.label], in_boxes.tolist(), 0)
self.p1 = None
self.p2 = None
self.canvas.delete(self.p1_r)
self.canvas.delete(self.p2_r)
else:
if not self.p1:
self.canvas.delete(self.p1_r)
self.p1 = [x, y]
self.p1_r = self.canvas.create_rectangle(
x_plot-2*self.imscale, y_plot-2*self.imscale,
x_plot+2*self.imscale, y_plot+2*self.imscale,
width=1, fill='red', outline='red')
elif not self.p2:
if (x > self.p1[0]) and (y > self.p1[1]):
self.canvas.delete(self.p2_r)
self.p2 = [x, y]
self.p2_r = self.canvas.create_rectangle(
x_plot-2*self.imscale, y_plot-2*self.imscale,
x_plot+2*self.imscale, y_plot+2*self.imscale,
width=1, fill='red', outline='red')
p1_x_plot, p1_y_plot = self.get_plot_coords(
self.p1[0], self.p1[1])
r = self.canvas.create_rectangle(
p1_x_plot, p1_y_plot, x_plot, y_plot,
width=2, outline='red')
self.boxes[self.label] = np.append(
self.boxes[self.label], [
[
self.p1[0], self.p1[1],
self.p2[0], self.p2[1], r]],
axis=0).astype(int)
else:
self.canvas.delete(self.p1_r)
self.canvas.delete(self.p2_r)
self.p1 = [x, y]
self.p1_r = self.canvas.create_rectangle(
x_plot-2*self.imscale, y_plot-2*self.imscale,
x_plot+2*self.imscale, y_plot+2*self.imscale,
width=2, outline='red', fill='red')
self.p2 = None
def next_label(self, event=None):
if len(self.boxes) < 20:
self.canvas.delete(self.p1_r)
self.canvas.delete(self.p2_r)
self.p1 = None
self.p2 = None
for box in self.boxes[self.label]:
self.canvas.delete(box[4])
if self.label == len(self.boxes)-1:
self.boxes.append(np.array([[]]).reshape([0, 5]).astype(int))
self.new_window = tk.Toplevel(self.master)
self.new_window.lift()
self.app = Name_Polygons_Popup(
self.new_window, self.text_list,
'Select name for new group of training boxes.')
self.master.wait_window(self.new_window)
self.canvas.focus_set()
if 0 <= self.app.v.get() < len(self.text_list):
self.names.append(self.text_list[self.app.v.get()])
elif self.app.v.get() == -1:
self.names.append(self.app.n.get())
self.text_list.append(self.app.n.get())
self.label += 1
self.master.title(
'Choose training regions for {}'.format(
self.names[self.label]))
for box in self.boxes[self.label]:
p1_x_plot, p1_y_plot = self.get_plot_coords(box[0], box[1])
p2_x_plot, p2_y_plot = self.get_plot_coords(box[2], box[3])
r = self.canvas.create_rectangle(
p1_x_plot, p1_y_plot, p2_x_plot, p2_y_plot,
width=2, outline='red')
box[4] = r
def previous_label(self, event=None):
if self.label > 0:
self.canvas.delete(self.p1_r)
self.canvas.delete(self.p2_r)
self.p1 = None
self.p2 = None
for box in self.boxes[self.label]:
self.canvas.delete(box[4])
self.label -= 1
for box in self.boxes[self.label]:
p1_x_plot, p1_y_plot = self.get_plot_coords(box[0], box[1])
p2_x_plot, p2_y_plot = self.get_plot_coords(box[2], box[3])
r = self.canvas.create_rectangle(
p1_x_plot, p1_y_plot, p2_x_plot, p2_y_plot,
width=2, outline='red'
)
box[4] = r
self.master.title(
'Choose training regions for {}'.format(
self.names[self.label]))
class Choose_Kept_Categories():
def __init__(
self, master, text_list,
title='Choose the recovered polygon classes to keep.'):
self.master = master
self.frame = tk.Frame(self.master)
self.master.title(title)
self.v = [tk.IntVar() for i in range(len(text_list))]
button = tk.Button(
self.frame, text="Done", state=tk.DISABLED,
command=self.master.destroy)
def activate_button():
button['state'] = tk.NORMAL
for i in range(len(text_list)):
tk.Checkbutton(
self.frame, text=text_list[i], padx=20,
variable=self.v[i], justify=tk.LEFT,
command=activate_button).grid(row=i)
button.grid(row=len(text_list))
self.frame.pack()
class Choose_Map():
def __init__(
self, master, page_nums, dir,
title='Choose a map to scrape.'):
self.master = master
self.master.title(title)
self.container = ttk.Frame(self.master)
self.canvas = tk.Canvas(self.container)
scrollbar = ttk.Scrollbar(
self.container, orient="vertical", command=self.canvas.yview)
self.scrollable_frame = ttk.Frame(self.canvas)
self.scrollable_frame.bind(
"<Configure>",
lambda e: self.canvas.configure(
scrollregion=self.canvas.bbox("all")))
self.canvas.create_window(
(0, 0), window=self.scrollable_frame, anchor="nw")
self.canvas.configure(yscrollcommand=scrollbar.set)
self.rb = []
self.v = tk.IntVar()
self.v.set(-999)
self.ph = []
self.fn = []
self.n = [tk.StringVar() for i in range(len(page_nums))]
self.button = tk.Button(
self.container, text="Done (Enter)", state=tk.DISABLED,
command=self.master.destroy)
def activate_button():
self.button['state'] = tk.NORMAL
for i in range(len(page_nums)):
file_name = 'page-' + str(page_nums[i]) + '.png'
self.fn.append(file_name)
img = Image.open(dir + file_name)
img.thumbnail([256, 256], Image.ANTIALIAS)
self.ph.append(ImageTk.PhotoImage(img))
for i in range(len(page_nums)):
self.n[i].set('Page ' + str(page_nums[i]))
page_label = tk.Label(
self.scrollable_frame, textvariable=self.n[i],)
row = i // 3
col = i-3*row
page_label.grid(row=row, column=2*col+1)
rb = tk.Radiobutton(
self.scrollable_frame, variable=self.v,
value=i, image=self.ph[i],
command=activate_button, height=220)
self.rb.append(rb)
rb.grid(row=row, column=2*col)
self.button.pack()
self.master.bind('<MouseWheel>', self.on_mousewheel)
self.master.bind('<Button-5>', self.scroll_up)
self.master.bind('<Button-4>', self.scroll_down)
self.master.bind('<Return>', lambda e: self.button.invoke())
self.container.pack(fill='both', expand=True)
self.canvas.pack(side="left", fill="both", expand=True)
scrollbar.pack(side="right", fill="y")
def scroll_up(self, event):
self.canvas.yview_scroll(1, "units")
def scroll_down(self, event):
self.canvas.yview_scroll(-1, "units")
def on_mousewheel(self, event):
self.canvas.yview_scroll(event.delta, "units")
class Choose_Map_Template(Choose_Map):
def __init__(
self, master, page_nums, current_page, dir,
title='Choose a map template.'
):
Choose_Map.__init__(self, master, page_nums, dir, title=title)
def activate_button():
self.button['state'] = tk.NORMAL
self.n_new = tk.StringVar()
self.n_new.set(
'Create new template from current map (page ' + str(current_page)
+ ')')
label = tk.Label(
self.scrollable_frame, textvariable=self.n_new)
row = (len(page_nums)+1)//3
col = len(page_nums)+1-3*row
label.grid(row=row, column=2*col+1)
file_name = 'page-' + str(current_page) + '.png'
self.fn.append(file_name)
img = Image.open(dir + file_name)
img.thumbnail([256, 256], Image.ANTIALIAS)
self.ph_new = ImageTk.PhotoImage(img)
rb = tk.Radiobutton(
self.scrollable_frame, variable=self.v, value=-1,
image=self.ph_new, command=activate_button, height=220)
self.rb.append(rb)
rb.grid(row=row, column=2*col)
|
StarcoderdataPython
|
1623034
|
<filename>dassl/modeling/ops/mixup.py
import torch
def mixup(x1, x2, y1, y2, beta, preserve_order=False):
"""Mixup.
Args:
x1 (torch.Tensor): data with shape of (b, c, h, w).
x2 (torch.Tensor): data with shape of (b, c, h, w).
y1 (torch.Tensor): label with shape of (b, n).
y2 (torch.Tensor): label with shape of (b, n).
beta (float): hyper-parameter for Beta sampling.
preserve_order (bool): apply lmda=max(lmda, 1-lmda).
Default is False.
"""
lmda = torch.distributions.Beta(beta, beta).sample([x1.shape[0], 1, 1, 1])
if preserve_order:
lmda = torch.max(lmda, 1 - lmda)
lmda = lmda.to(x1.device)
xmix = x1*lmda + x2 * (1-lmda)
lmda = lmda[:, :, 0, 0]
ymix = y1*lmda + y2 * (1-lmda)
return xmix, ymix
|
StarcoderdataPython
|
1633912
|
# -*- coding: utf-8 -*-
import numpy
import time
import os
import magdynlab.instruments
import magdynlab.controllers
import magdynlab.data_types
import threading_decorators as ThD
import matplotlib.pyplot as plt
def Plot_IxV(Data):
f = plt.figure('IxV Semi', (5, 4))
if not(f.axes):
plt.subplot()
ax = f.axes[0]
#check Y scale
ymax = numpy.nanmax(Data['V'])
ymin = numpy.nanmin(Data['V'])
dy = numpy.max([ymax - ymin, 1E-6])
if not(ax.lines):
ax.plot([],[],'b.-')
ax.set_xlim([Data['I'].min(), Data['I'].max()])
ax.set_ylim([ymax+dy, ymin-dy])
line = ax.lines[-1]
line.set_data(Data['I'], Data['V'])
ax.set_xlabel('Current (A)')
ax.set_ylabel('Voltage (V)')
ax.grid(True)
yc = (ymax + ymin)/2
ymin, ymax = ax.get_ylim()
ymax = numpy.max([yc + dy*1.1/2, ymax])
ymin = numpy.min([yc - dy*1.1/2, ymin])
ax.set_ylim([ymin, ymax])
f.tight_layout()
f.canvas.draw()
def resistance(Data):
I = Data['I']
V = Data['V']
R = numpy.polyfit(I, V, 1)[0]
return R
class IxV(object):
def __init__(self, ResouceNames={}):
logFile = os.path.expanduser('~/MagDynLab.log')
defaultRN = dict(RN_SCA = 'TCPIP::192.168.13.7::5025::SOCKET')
defaultRN.update(ResouceNames)
RN_SCA = defaultRN['RN_SCA']
self.SCA = magdynlab.instruments.KEYSIGHT_B1500A(ResourceName=RN_SCA,
logFile=logFile)
#Experimental/plot data
self.Data = magdynlab.data_types.DataContainer()
self.Data.file_id = '.IxV_Semi'
self.Info = ''
def Measure(self, file_name=None):
self.Data.info = self.Info
print('Measuring : %s' %file_name)
# Get one measurement to get the data shape and dictionaries
m_data = self.SCA.getResultDictionary(new=True, delete=True)
for key in m_data.keys():
self.Data[key] = m_data[key]
if file_name is not None:
self.Data.save(file_name)
self.Data.savetxt(file_name + '.IxV',
keys=[k for k in self.Data.keys()])
print('DONE')
print('Resistance : %0.3E Ohms' % resistance(self.Data))
Plot_IxV(self.Data)
|
StarcoderdataPython
|
149705
|
<gh_stars>0
import os
import bentoml
def test_requirement_txt_env(tmpdir):
req_txt_file = tmpdir.join("requirements.txt")
with open(str(req_txt_file), 'wb') as f:
f.write(b"numpy\npandas\ntorch")
@bentoml.env(requirements_txt=str(req_txt_file))
class ServiceWithFile(bentoml.BentoService):
@bentoml.api(bentoml.handlers.DataframeHandler)
def predict(self, df):
return df
service_with_file = ServiceWithFile()
assert 'numpy' in service_with_file.env._pip_dependencies
assert 'pandas' in service_with_file.env._pip_dependencies
assert 'torch' in service_with_file.env._pip_dependencies
saved_path = service_with_file.save('/tmp')
with open(os.path.join(saved_path, 'requirements.txt'), 'rb') as f:
content = f.read().decode('utf-8')
assert 'numpy' in content
assert 'pandas' in content
assert 'torch' in content
def test_pip_dependencies_env():
@bentoml.env(pip_dependencies="numpy")
class ServiceWithString(bentoml.BentoService):
@bentoml.api(bentoml.handlers.DataframeHandler)
def predict(self, df):
return df
service_with_string = ServiceWithString()
assert 'numpy' in service_with_string.env._pip_dependencies
@bentoml.env(pip_dependencies=['numpy', 'pandas', 'torch'])
class ServiceWithList(bentoml.BentoService):
@bentoml.api(bentoml.handlers.DataframeHandler)
def predict(self, df):
return df
service_with_list = ServiceWithList()
assert 'numpy' in service_with_list.env._pip_dependencies
assert 'pandas' in service_with_list.env._pip_dependencies
assert 'torch' in service_with_list.env._pip_dependencies
def test_pip_dependencies_with_archive(tmpdir):
@bentoml.env(pip_dependencies=['numpy', 'pandas', 'torch'])
class ServiceWithList(bentoml.BentoService):
@bentoml.api(bentoml.handlers.DataframeHandler)
def predict(self, df):
return df
service_with_list = ServiceWithList()
saved_path = service_with_list.save(str(tmpdir))
requirements_txt_path = os.path.join(saved_path, 'requirements.txt')
with open(requirements_txt_path, 'rb') as f:
saved_requirements = f.read()
module_list = saved_requirements.decode('utf-8').split('\n')
assert 'numpy' in module_list
assert 'pandas' in module_list
assert 'torch' in module_list
|
StarcoderdataPython
|
55791
|
<gh_stars>0
import asyncio
import json
import re
from itertools import cycle
from threading import Thread
from time import sleep
import serial
from aiohttp import web
from scipy import signal
class Sensor:
# Serial message patterns.
re_patterns = [
r'(RPY) - Roll: (-?\d+) \| Pitch: (-?\d+) \| Yaw: (-?\d+)',
r'(ACC) - x: (-?\d+) \| y: (-?\d+) \| z: (-?\d+)',
r'(GYR) - x: (-?\d+) \| y: (-?\d+) \| z: (-?\d+)',
r'(MAG) - x: (-?\d+) \| y: (-?\d+) \| z: (-?\d+)',
]
def __init__(self, port='COM4', history=None):
self._port = port
self._close = False
self._indexes = {
'RPY': 0,
'ACC': 0,
'GYR': 0,
'MAG': 0,
}
if history is not None:
self._history = history
self._thread = None
else:
self._history = {
'RPY': [],
'ACC': [],
'GYR': [],
'MAG': [],
}
self._thread = Thread(target=self._update)
self._thread.start()
def _update(self):
self._ser = serial.Serial(self._port, 115200, timeout=0.1)
self._ser.readline() # primer reading x2
self._ser.readline()
temp = {}
while not self._close:
while True:
try:
line = self._ser.readline().decode()
except UnicodeDecodeError:
# Truncated unicode may appear when the program just starts.
continue
else:
break
if line.startswith('END') and len(temp) == 4:
for k, v in temp.items():
self._history[k].append(v)
temp = {}
else:
for pattern in self.re_patterns:
match = re.search(pattern, line)
t = []
if match:
if match.group(1) == 'RPY':
for i in range(2, 5):
v = float(match.group(i)) / 100
t.append(v)
t[1] = -t[1] # pitch is reversed.
elif match.group(1) == 'ACC':
for i in range(2, 5):
v = -float(match.group(i)) / (65536 / 2) * 2 # So do ACC
t.append(v)
elif match.group(1) == 'GYR':
for i in range(2, 5):
v = float(match.group(i)) / (65536 / 2) * 2000
t.append(v)
elif match.group(1) == 'MAG':
for i in range(2, 5):
v = float(match.group(i)) * 0.15
t.append(v)
temp[match.group(1)] = t
break
def next(self, key):
'''Get the next data point, and block until data are ready.'''
key = key.upper()
index = self._indexes[key]
seq = self._history[key]
while index >= len(seq):
sleep(0.05)
self._indexes[key] = index + 1
return seq[index]
def save(self, path):
with open(path, encoding='utf-8', mode='w') as file:
json.dump(self._history, file)
def close(self):
self._close = True
while self._thread and self._thread.is_alive():
sleep(0.1)
async def mock_display_handler(request):
'''For testing when the board is not connected.'''
roll = cycle(range(10, 100, 10))
pitch = cycle(range(10, 100, 10))
yaw = cycle(range(10, 100, 10))
ws = web.WebSocketResponse()
await ws.prepare(request)
while True:
try:
await ws.receive(timeout=0.01)
except asyncio.TimeoutError:
sleep(0.5)
# await ws.send_json([next(roll), next(pitch), next(yaw)])
await ws.send_json([29.89, 55.37, 10.97])
else:
break
return ws
async def display_handler(request):
'''Handler for AH'''
ws = web.WebSocketResponse()
await ws.prepare(request)
sensor = Sensor()
while True:
try:
await ws.receive(timeout=0.01)
except asyncio.TimeoutError:
await ws.send_json(sensor.next('RPY'))
else:
break
if request.app['files']['save_file'] != 'None':
sensor.save(request.app['files']['save_file'])
sensor.close()
return ws
async def analyse_handler(request):
'''Handler for charts'''
param = await request.json()
name = param['name']
order = param['order']
freq = param['freq']
with open(request.app['files']['history_file'], encoding='utf-8') as file:
history = json.load(file)
data = history[name]
# Filter
if order != 'None':
b, a = signal.butter(int(order), freq, 'lowpass')
data_x = [v[0] for v in data]
data_y = [v[1] for v in data]
data_z = [v[2] for v in data]
data_x = signal.filtfilt(b, a, data_x)
data_y = signal.filtfilt(b, a, data_y)
data_z = signal.filtfilt(b, a, data_z)
data = [[v_x, v_y, v_z] for v_x, v_y, v_z in zip(data_x, data_y, data_z)]
return web.json_response(data)
async def path_handler(request):
'''Receive paths to history json.'''
path_obj = await request.json()
if 'display' in path_obj:
if path_obj['display'] == 'None':
app['files']['save_file'] = path_obj['display']
text = 'ACK'
else:
try:
with open(path_obj['display'], encoding='utf-8', mode='w') as _:
app['files']['save_file'] = path_obj['display']
except Exception as exc:
text = repr(exc)
else:
text = 'ACK'
elif 'analyse' in path_obj:
try:
with open(path_obj['analyse'], encoding='utf-8') as _:
app['files']['history_file'] = path_obj['analyse']
except Exception as exc:
text = repr(exc)
else:
text = 'ACK'
else:
text = 'Invalid path type!'
return web.Response(text=text)
app = web.Application()
app['files'] = {
'save_file': None,
'history_file': None,
}
app.add_routes([
web.get('/display', mock_display_handler),
# web.get('/display', display_handler),
web.post('/analyse', analyse_handler),
web.post('/path', path_handler),
web.static('/', './static'),
])
web.run_app(app)
|
StarcoderdataPython
|
3381490
|
<reponame>bensternthal/lumbergh
from funfactory.urlresolvers import reverse
from nose.tools import eq_
from careers.base.tests import TestCase
from careers.careers.tests import PositionFactory
class CareersTest(TestCase):
"""Tests static pages for careers"""
def test_position_case_sensitive_match(self):
"""
Validate that a position match is returned from a case-sensitive job id and it doesn't
raise a multiple records error.
"""
job_id_1 = 'oflWVfwb'
job_id_2 = 'oFlWVfwB'
PositionFactory.create(job_id=job_id_1)
PositionFactory.create(job_id=job_id_2)
url = reverse('careers.position', kwargs={'job_id': job_id_1})
response = self.client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['position'].job_id, job_id_1)
url = reverse('careers.position', kwargs={'job_id': job_id_2})
response = self.client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['position'].job_id, job_id_2)
|
StarcoderdataPython
|
1752648
|
<filename>setup.py
from distutils.core import setup
setup(
name='juramote',
version='0.1.0',
author='<NAME>',
author_email='<EMAIL>',
packages=['juramote'],
url='https://6xq.net/juramote/',
license='LICENSE.txt',
description='Remote control for Jura coffee maker.',
long_description=open('README.rst').read(),
install_requires=[
'pyserial>=3',
'Flask',
'wtforms',
],
entry_points={
'console_scripts': [
'juramotecli = juramote.cli:main',
'juramotehttpd = juramote.server:main'],
},
)
|
StarcoderdataPython
|
3314817
|
<filename>vesper/mpg_ranch/nfc_bounding_interval_annotator_1_0/annotator.py
"""
Module containing NFC bounding interval annotator, version 1.0.
An NFC bounding interval annotator sets values for the `Call Start Index`
and `Call End Index` annotations for a clip containing a nocturnal flight
call (NFC). If the annotations already exist their values are overwritten,
and if they do not already exist they are created. The clip is assumed to
contain an NFC.
"""
from collections import defaultdict
import logging
import resampy
from vesper.command.annotator import Annotator as AnnotatorBase
from vesper.django.app.models import AnnotationInfo
from vesper.mpg_ranch.nfc_bounding_interval_annotator_1_0.inferrer \
import Inferrer
from vesper.singleton.clip_manager import clip_manager
import vesper.django.app.model_utils as model_utils
import vesper.mpg_ranch.nfc_bounding_interval_annotator_1_0.dataset_utils \
as dataset_utils
import vesper.util.open_mp_utils as open_mp_utils
_CLASSIFICATION_ANNOTATION_NAME = 'Classification'
_START_INDEX_ANNOTATION_NAME = 'Call Start Index'
_END_INDEX_ANNOTATION_NAME = 'Call End Index'
_MODEL_INFOS = {
# Tseep 14k
'Tseep':
(('Tseep_Start_2020-08-07_14.02.08', 30),
('Tseep_End_2020-08-07_15.10.03', 10)),
# Tseep 9.5k
# 'Tseep':
# ('Tseep_Start_2020-07-10_17.17.48', 'Tseep_End_2020-07-10_18.02.04'),
# Tseep 10k
# 'Tseep':
# ('Tseep_Start_2020-07-10_11.53.54', 'Tseep_End_2020-07-10_12.27.40'),
# Tseep 5k without dropout
# 'Tseep':
# ('Tseep_Start_2020-07-08_19.11.45', 'Tseep_End_2020-07-08_19.37.02'),
# Tseep 5k with dropout of .25 : performance worse than without
# 'Tseep':
# ('Tseep_Start_2020-07-08_20.36.20', 'Tseep_End_2020-07-09_11.00.19'),
}
class Annotator(AnnotatorBase):
extension_name = 'MPG Ranch NFC Bounding Interval Annotator 1.0'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
open_mp_utils.work_around_multiple_copies_issue()
# Suppress TensorFlow INFO and DEBUG log messages.
logging.getLogger('tensorflow').setLevel(logging.WARN)
self._inferrers = dict(
(t, _create_inferrer(t))
for t in ('Tseep',))
self._annotation_infos = _get_annotation_infos()
def annotate_clips(self, clips):
clip_lists = self._get_call_clip_lists(clips)
annotated_clip_count = 0
for clip_type, clips in clip_lists.items():
inferrer = self._inferrers.get(clip_type)
if inferrer is not None:
# have inferrer for this clip type
inference_sample_rate = inferrer.sample_rate
clips, waveform_dataset = \
self._get_clip_waveforms(clips, inference_sample_rate)
bounds = inferrer.get_call_bounds(waveform_dataset)
for clip, (start_index, end_index) in zip(clips, bounds):
self._annotate_clip(
clip, _START_INDEX_ANNOTATION_NAME, start_index,
inference_sample_rate)
self._annotate_clip(
clip, _END_INDEX_ANNOTATION_NAME, end_index,
inference_sample_rate)
annotated_clip_count += len(clips)
return annotated_clip_count
def _get_call_clip_lists(self, clips):
"""Gets a mapping from clip types to lists of call clips."""
# Get mapping from clip types to call clip lists.
clip_lists = defaultdict(list)
for clip in clips:
if _is_call_clip(clip):
clip_type = model_utils.get_clip_type(clip)
clip_lists[clip_type].append(clip)
return clip_lists
def _get_clip_waveforms(self, clips, inference_sample_rate):
result_clips = []
waveforms = []
for clip in clips:
try:
waveform = self._get_clip_samples(clip, inference_sample_rate)
except Exception as e:
logging.warning(
f'Could not annotate clip "{clip}", since its samples '
f'could not be obtained. Error message was: {str(e)}')
else:
# got clip samples
result_clips.append(clip)
waveforms.append(waveform)
waveforms = \
dataset_utils.create_waveform_dataset_from_tensors(waveforms)
return result_clips, waveforms
def _get_clip_samples(self, clip, inference_sample_rate):
# Get clip samples.
samples = clip_manager.get_samples(clip)
if clip.sample_rate != inference_sample_rate:
# need to resample
samples = resampy.resample(
samples, clip.sample_rate, inference_sample_rate)
return samples
def _annotate_clip(
self, clip, annotation_name, index, inference_sample_rate):
# If needed, modify index to account for difference between
# clip and inference sample rates.
if clip.sample_rate != inference_sample_rate:
factor = clip.sample_rate / inference_sample_rate
index = int(round(index * factor))
# Make index a recording index rather than a clip index.
index += clip.start_index
annotation_info = self._annotation_infos[annotation_name]
annotation_value = str(index)
model_utils.annotate_clip(
clip, annotation_info, annotation_value,
creating_user=self._creating_user,
creating_job=self._creating_job,
creating_processor=self._creating_processor)
def _create_inferrer(clip_type):
model_infos = _MODEL_INFOS[clip_type]
return Inferrer(*model_infos)
def _get_annotation_infos():
return dict(
(name, _get_annotation_info(name))
for name in (_START_INDEX_ANNOTATION_NAME, _END_INDEX_ANNOTATION_NAME))
def _get_annotation_info(name):
try:
return AnnotationInfo.objects.get(name=name)
except AnnotationInfo.DoesNotExist:
raise ValueError(f'Unrecognized annotation "{name}".')
def _is_call_clip(clip):
annotations = model_utils.get_clip_annotations(clip)
classification = annotations.get(_CLASSIFICATION_ANNOTATION_NAME)
return classification is not None and classification.startswith('Call')
def _convert_clip_index_to_recording_index(
clip, clip_index, sample_rate):
if sample_rate != clip.sample_rate:
clip_index = int(round(clip_index * clip.sample_rate / sample_rate))
return clip.start_index + clip_index
|
StarcoderdataPython
|
101923
|
<reponame>howaboutudance/pyloggerkinesis<gh_stars>0
from . import stand_dist
import logging
from pathlib import Path
import argparse
import os
import time
from aws_logging_handlers.Kinesis import KinesisHandler
# Logging Configuratiion
LOGGING_FORMATTER = ("%(asctime)s %(process)s:%(thread)d " +
"%(levelname)s %(module)s:%(funcName)s:%(lineno)d -- %(message)s")
logger = logging.getLogger("pystandlogger")
logger.setLevel(logging.DEBUG)
cl = logging.StreamHandler()
cl.setLevel(logging.INFO)
cl.setFormatter(logging.Formatter(LOGGING_FORMATTER))
logger.addHandler(cl)
kl = KinesisHandler("logger_test", "us-west-2")
kl.setLevel(logging.INFO)
kl.setFormatter(logging.Formatter(LOGGING_FORMATTER))
logger.addHandler(kl)
logger.info({"event": "starting code"})
if (env_n := os.environ.get("N_RECORDS")):
n_default = int(env_n)
else:
n_default = 200
# argparse setup
parser = argparse.ArgumentParser()
parser.add_argument("-n", default=n_default, type=int)
args = parser.parse_args()
n_records: int = args.n
for x in range(n_records):
stand_dist.summary()
time.sleep(0.1)
|
StarcoderdataPython
|
95753
|
def append_suppliers_list():
suppliers = []
counter = 1
supply = ""
while supply != "stop":
supply = input(f'Enter first name and last name of suppliers {counter} \n')
suppliers.append(supply)
counter += 1
suppliers.pop()
return suppliers
append_suppliers_list()
|
StarcoderdataPython
|
107732
|
<reponame>elainehoml/Savu<gh_stars>10-100
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: scikitimage_sart
:platform: Unix
:synopsis: Wrapper for scikitimage SART function
"""
import logging
from savu.plugins.reconstructions.base_recon import BaseRecon
from savu.plugins.driver.cpu_plugin import CpuPlugin
import skimage.transform as transform
import numpy as np
from scipy import ndimage
from savu.plugins.utils import register_plugin
@register_plugin
class ScikitimageSart(BaseRecon, CpuPlugin):
def __init__(self):
logging.debug("initialising Scikitimage SART")
logging.debug("Calling super to make sure that all superclasses are " +
" initialised")
super(ScikitimageSart, self).__init__("ScikitimageSart")
def _shift(self, sinogram, centre_of_rotation):
centre_of_rotation_shift = \
(sinogram.shape[0] // 2) - float(centre_of_rotation)
return ndimage.interpolation.shift(sinogram, centre_of_rotation_shift)
def process_frames(self, data):
sino = data[0]
centre_of_rotations, angles, vol_shape, init = self.get_frame_params()
in_pData = self.get_plugin_in_datasets()[0]
sinogram = np.swapaxes(sino, 0, 1)
sinogram = self._shift(sinogram, centre_of_rotations)
sino = sinogram.astype(np.float64)
dim_detX = in_pData.get_data_dimension_by_axis_label('x', contains=True)
size = self.parameters['output_size']
size = in_pData.get_shape()[dim_detX] if size == 'auto' or \
size is None else size
result = \
transform.iradon(sino, theta=angles,
output_size=size,
filter_name=self.parameters['filter'],
interpolation=self.parameters['interpolation'],
circle=self.parameters['circle'])
for i in range(self.parameters["iterations"]):
print("Iteration %i" % i)
result = transform.iradon_sart(sino, theta=angles, image=result,
projection_shifts=None,
clip=self.parameters['clip'],
relaxation=\
self.parameters['relaxation'])
return result
def get_max_frames(self):
return 'single'
|
StarcoderdataPython
|
165899
|
<gh_stars>0
#!/usr/bin/env python3
#
# Copyright 2018 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import asyncio
import time
import typing
import unittest
import unittest.mock
from . import AsyncBase
from .. import events
from .. import irc
class ManagerTests(AsyncBase):
def setUp(self) -> None:
super().setUp()
self.irc_c = irc.Context()
self.em = events.Manager(self.irc_c)
def test_context_register_and_fire(self):
called = 0
async def aobserver(*args, **kws) -> None:
nonlocal called
called += 1
def observer(*args, **kws) -> None:
nonlocal called
called += 1
async def inner_test():
async with self.em as em:
em('TEST_EVENT').observe(aobserver).observe(observer)
await em['TEST_EVENT'](self.irc_c)
self.assertEqual(called, 2)
self.assertIsInstance(self.em['OTHER_EVENT'], events.NullEvent)
self.assertFalse('OTHER_EVENT' in em)
self.assertTrue('TEST_EVENT' in em)
self.assertEqual(['test_event'], list(self.em))
self.loop.run_until_complete(inner_test())
|
StarcoderdataPython
|
1786934
|
n, k = map(int, input().split())
height = list(map(int, input().rstrip().split()))
a, m = 0, max(height)
if m > k:
a = m - k
print(a)
|
StarcoderdataPython
|
3224157
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-core/ampel/demo/DemoPlainT3Unit.py
# License: BSD-3-Clause
# Author: <NAME> <<EMAIL>>
# Date: 17.12.2021
# Last Modified Date: 17.12.2021
# Last Modified By: <NAME> <<EMAIL>>
from typing import Union
from ampel.types import UBson
from ampel.struct.UnitResult import UnitResult
from ampel.view.T3Store import T3Store
from ampel.abstract.AbsT3PlainUnit import AbsT3PlainUnit
class DemoPlainT3Unit(AbsT3PlainUnit):
a_parameter: int = 9000
my_t3_doc_tag: str = "A_TAG"
def post_init(self) -> None:
self.logger.info("post_init was called")
def process(self, t3s: T3Store) -> UBson | UnitResult:
self.logger.info("Running DemoPlainT3Unit")
if not t3s.units:
self.logger.info(f"T3 store contains t3 views for units: {t3s.units}")
else:
self.logger.info("T3 store contains no t3 views")
return UnitResult(
body = {'a_parameter': self.a_parameter},
tag = self.my_t3_doc_tag
)
|
StarcoderdataPython
|
144092
|
<reponame>calebmarcus/awacs<filename>awacs/kinesisanalytics.py
# Copyright (c) 2012-2013, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'Amazon Kinesis Analytics'
prefix = 'kinesisanalytics'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
AddApplicationInput = Action('AddApplicationInput')
AddApplicationOutput = Action('AddApplicationOutput')
AddApplicationReferenceDataSource = \
Action('AddApplicationReferenceDataSource')
CreateApplication = Action('CreateApplication')
DeleteApplication = Action('DeleteApplication')
DeleteApplicationOutput = Action('DeleteApplicationOutput')
DeleteApplicationReferenceDataSource = \
Action('DeleteApplicationReferenceDataSource')
DescribeApplication = Action('DescribeApplication')
DiscoverInputSchema = Action('DiscoverInputSchema')
GetApplicationState = Action('GetApplicationState')
ListApplications = Action('ListApplications')
StartApplication = Action('StartApplication')
StopApplication = Action('StopApplication')
UpdateApplication = Action('UpdateApplication')
|
StarcoderdataPython
|
4842166
|
# Determine common prefix in array of strings.
# Problem has been split in two parts, easier to understand.
# First function compare two strings.
# Function to find common prefix between two strings
# Result is common prefix.
import profile
def commonPrefix(str1, str2):
result = "";
n1 = len(str1)
n2 = len(str2)
a = 0
b = 0
#Starting lopping from beginning, a,b = 0 and incrementing by 1
while a <= n1 - 1 and b <= n2 - 1:
# Compare string (one letter)
if (str1[a] != str2[b]):
break
result += str1[a]
#Incrementing loop counter
a += 1
b += 1
return (result)
# Iterating trough array of strings.
# This is horizontal search, first we compare first two elements in list to get common prefix.
# After that in next loop cycle we compare prefix with next element ( 2, 3 ... -> endOfList)
def commonPrefixArr (list, n):
prefix = list[0]
for i in range (1, n):
# In first iteration we compare index 0 and 1, in second prefix and index 2
prefix = commonPrefix(prefix, list[i])
return (prefix)
# This is second (more pythonic) version using python functions (zip, sort)
def longestCommonPrefixPython(list):
# Some tests at beginning (array with zero or one member)
if not list: return -1
if len(list) == 1:
return list[0]
# First sorting list
s = sorted(l1)
#print(list)
#print(s)
# Result -> common prefix
prefix = ""
# Looping , searching for common prefix
# List is sorted, because that we need to check first two elements
# Zip function will return zip object: print (tuple(zip(s[0],s[1]))) -> (('f', 'f'), ('l', 'l'), ('o', 'o'), ('p', 'r'), ('p', 'e'), ('e', 's'), ('r', 't'))
for x, y in zip(s[0], s[-1]):
# If key, value is same we build prefix concating string
if x == y:
prefix += x
else:
break
return prefix
# # First solution (not using python builtin functions)
# Test array
l1 = ["flower","flowpi","florest", "flopper", "flotto"]
n = len(l1)
# Common prefix
#profile.run("commonPrefixArr(l1, n)")
c = commonPrefixArr(l1, n)
#Display prefix
if (len(c)):
print ("Prefix is -",c)
else:
print("No common prefix")
# Second solution using python zip, sort
print("Second solution: " + longestCommonPrefixPython(l1))
#profile.run("longestCommonPrefixPython(l1)")
# Results of profiler, second solutions is faster.
# python 007.CommonPrefix.py"
# 17 function calls in 0.016 seconds
# Ordered by: standard name
# ncalls tottime percall cumtime percall filename:lineno(function)
# 4 0.000 0.000 0.000 0.000 007.CommonPrefix.py:11(commonPrefix)
# 1 0.000 0.000 0.000 0.000 007.CommonPrefix.py:38(commonPrefixArr)
# 1 0.000 0.000 0.000 0.000 :0(exec)
# 8 0.000 0.000 0.000 0.000 :0(len)
# 1 0.016 0.016 0.016 0.016 :0(setprofile)
# 1 0.000 0.000 0.000 0.000 <string>:1(<module>)
# 1 0.000 0.000 0.016 0.016 profile:0(commonPrefixArr(l1, n))
# 0 0.000 0.000 profile:0(profiler)
# 7 function calls in 0.000 seconds
# Ordered by: standard name
# ncalls tottime percall cumtime percall filename:lineno(function)
# 1 0.000 0.000 0.000 0.000 007.CommonPrefix.py:48(longestCommonPrefixPython)
# 1 0.000 0.000 0.000 0.000 :0(exec)
# 1 0.000 0.000 0.000 0.000 :0(len)
# 1 0.000 0.000 0.000 0.000 :0(setprofile)
# 1 0.000 0.000 0.000 0.000 :0(sorted)
# 1 0.000 0.000 0.000 0.000 <string>:1(<module>)
# 1 0.000 0.000 0.000 0.000 profile:0(longestCommonPrefixPython(l1))
# 0 0.000 0.000 profile:0(profiler)
|
StarcoderdataPython
|
1746625
|
<reponame>tor-councilmatic/scrapers-ca
from __future__ import unicode_literals
from utils import CanadianScraper, CanadianPerson as Person
import re
COUNCIL_PAGE = 'http://www.lambtononline.ca/home/government/accessingcountycouncil/countycouncillors/Pages/default.aspx'
class LambtonPersonScraper(CanadianScraper):
def scrape(self):
councillor_seat_number = 1
page = self.lxmlize(COUNCIL_PAGE)
# Tableception here, first tr is left column, second the right column
councillors_left = page.xpath('//div[@id="content"]/table/tr/td[1]/table/tr')
councillors_right = page.xpath('//div[@id="content"]/table/tr/td[2]/table/tr')
councillors = councillors_left + councillors_right
for councillor in councillors:
node = councillor.xpath('.//tr[1]')
text = node[0].text_content()
if 'Deputy Warden' in text:
role = 'Deputy Warden'
name = text.replace('Deputy Warden', '')
district = 'Lambton'
elif 'Warden' in text:
role = 'Warden'
name = text.replace('Warden', '')
district = 'Lambton'
else:
role = 'Councillor'
name = text
district = 'Lambton (seat {})'.format(councillor_seat_number)
councillor_seat_number += 1
p = Person(primary_org='legislature', name=name, district=district, role=role)
p.add_source(COUNCIL_PAGE)
p.image = councillor.xpath('.//img/@src')[0]
info = councillor.xpath('./td/table/tr[2]/td')[0].text_content()
residential_info = re.search('(?<=Residence:)(.*(?=Business Phone)|.*(?=Municipal Office))', info, flags=re.DOTALL).group(0)
self.get_contacts(residential_info, 'residence', p)
municipal_info = re.findall(r'(?<=Municipal Office:)(.*(?=Bio)|.*)', info, flags=re.DOTALL)[0]
self.get_contacts(municipal_info, 'legislature', p)
yield p
def get_contacts(self, text, note, councillor):
address = text.split('Telephone')[0].split('Phone')[0]
councillor.add_contact('address', address, note)
text = text.replace(address, '').split(':')
for i, contact in enumerate(text):
if i == 0:
continue
contact_type = next(x.strip() for x in re.findall(r'[A-Za-z ]+', text[i - 1]) if x.strip() and x.strip() != 'ext')
if '@' in contact:
contact = contact.strip()
else:
contact = re.findall(r'[0-9]{3}[- ][0-9]{3}-[0-9]{4}(?: ext\. [0-9]+)?', contact)[0].replace(' ', '-')
if 'Fax' in contact_type:
councillor.add_contact('fax', contact, note)
elif 'Tel' in contact_type:
councillor.add_contact('voice', contact, note)
elif 'email' in contact_type:
councillor.add_contact('email', contact)
|
StarcoderdataPython
|
1722916
|
<reponame>materialsvirtuallab/m3gnet
import unittest
import numpy as np
import tensorflow as tf
from pymatgen.core import Lattice, Structure
from m3gnet.graph import Index, MaterialGraph, RadiusCutoffGraphConverter
class TestConverter(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.s1 = Structure(Lattice.cubic(3.17), ["Mo", "Mo"], [[0, 0, 0], [0.5, 0.5, 0.5]])
cls.g1 = RadiusCutoffGraphConverter(cutoff=5.0, threebody_cutoff=4.0).convert(cls.s1)
def test_graph(self):
self.assertTrue(isinstance(self.g1, MaterialGraph))
glist = self.g1.as_list()
np.testing.assert_array_almost_equal(glist[Index.ATOMS].ravel(), [42, 42])
gstr = str(self.g1)
self.assertTrue(gstr.startswith("<Material"))
self.assertTrue(isinstance(self.g1.atoms, np.ndarray))
gtf = self.g1.as_tf()
self.assertTrue(isinstance(gtf.atoms, tf.Tensor))
self.assertTrue(self.g1.n_atom == 2)
self.assertTrue(self.g1.n_bond == self.g1.n_bonds[0])
self.assertTrue(self.g1.n_struct == 1)
self.assertTrue(self.g1.has_threebody)
g2 = MaterialGraph.from_list(self.g1.as_list())
self.assertTrue(self.g1 == g2)
g3 = self.g1.copy()
self.assertTrue(self.g1 == g3)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
1649193
|
def data_range(x):
return max(x)-min(x)
x = [12,23,22,43,57,84,23,11,66,24]
print(data_range(x))
|
StarcoderdataPython
|
3288196
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import mock
from bravado.fido_client import FidoResponseAdapter
def test_header_conversion():
fido_response = mock.Mock(
name='fido_response',
headers={
b'Content-Type': [b'application/json'],
'x-weird-ä'.encode('latin1'): ['ümläüt'.encode('utf8')],
b'X-Multiple': [b'donotuse', b'usethis'],
},
)
response_adapter = FidoResponseAdapter(fido_response)
assert response_adapter.headers == {
'content-type': 'application/json',
'X-WEIRD-ä': 'ümläüt',
'X-Multiple': 'usethis',
}
|
StarcoderdataPython
|
1724968
|
from rest_framework import serializers
from rest_framework.exceptions import PermissionDenied
from api.profiles.serializers import ProfileUsernamePictureSerializer
from apps.comments.models import Comment
from apps.parties.models import Party
class CommentSerializer(serializers.ModelSerializer):
author = ProfileUsernamePictureSerializer(read_only=True)
class Meta:
model = Comment
exclude = ['party', 'is_active']
class CommentWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ['text']
def validate(self, attrs):
slug = self.context['request'].path_info.split('/')[3]
party = Party.objects.get(slug=slug)
author = self.context['request'].user.profile
if author not in party.participants.all():
raise PermissionDenied('파티 참여자만 댓글을 달 수 있습니다.')
else:
attrs['party'] = party
return attrs
def create(self, validated_data):
party = validated_data.pop('party')
author = self.context['request'].user.profile
return Comment.objects.create_comment(
party=party,
author=author,
**validated_data
)
def update(self, instance, validated_data):
return Comment.objects.update_comment(instance, validated_data['text'])
|
StarcoderdataPython
|
110348
|
"""Unit test package for sentry_onboarding."""
|
StarcoderdataPython
|
4838016
|
from portfolio.base.views import HomeView, ResumeView
import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portfolio.settings')
django.setup()
from django.test import TestCase
from django.urls import reverse, resolve
class TestUrls(TestCase):
def test_home_url_is_resolved(self):
url = reverse('hero')
self.assertEqual(resolve(url).func.view_class, HomeView)
def test_resume_url_is_resolved(self):
url = reverse('resume')
self.assertEqual(resolve(url).func.view_class, ResumeView)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.