id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
180376
|
<filename>music.py
from itertools import cycle, repeat, chain, islice
from instruments import default_tone, kick, silence
def play_sequence(sequence, instrument=default_tone):
for freq, duration in sequence:
yield instrument(freq, duration)
def play_drumbase(beats, duration, drum=kick):
for x in beats:
if x:
yield drum(duration)
else:
yield silence(duration)
def tone(n, base_freq=440.0):
"""Return the frequency of the nth interval from base_freq (in 12-TET)."""
# -2 -1 0 1 2 3 4 5 6 7 8 9 10 11 12
# G G# A A# B C C# D D# E F F# G G# A
# G Ab A Bb B C Db D Eb E F Gb G Ab A
return base_freq * 2 ** (n/12)
# This dict maps the name of the notes from C0 (included) to C8 (excluded)
# to the corresponding frequency (in 12-TET).
tones = [tone(i) for i in range(-57, 39)]
names_sharp = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
names_flat = ['C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab', 'A', 'Bb', 'B']
notes = {}
octaves = chain.from_iterable(repeat(o, 12) for o in range(8))
for t, ns, nf, o in zip(tones, cycle(names_sharp), cycle(names_flat), octaves):
notes[f'{ns}{o}'] = notes[f'{nf}{o}'] = t
# This dict maps the names of the notes with the name of the
# following note, depending on the amount of semitones in the
# interval. This ensures that e.g., a C will always be followed
# by a D, and that, if you go up 1 semitone from a A, you get a
# B flat and not an A sharp.
next_notes = {
'C': {1: 'Db', 2: 'D', 3: 'D#'},
'C#': {1: 'D', 2: 'D#'},
'Db': {2: 'Eb', 3: 'E'},
'D': {1: 'Eb', 2: 'E', 3: 'E#'},
'D#': {1: 'E', 2: 'E#'},
'Eb': {1: 'Fb', 2: 'F'},
'E': {1: 'F', 2: 'F#'},
'E#': {1: 'F#'},
'Fb': {2: 'Gb', 3: 'G'},
'F': {1: 'Gb', 2: 'G', 3: 'G#'},
'F#': {1: 'G', 2: 'G#'},
'Gb': {2: 'Ab', 3: 'A'},
'G': {1: 'Ab', 2: 'A', 3: 'A#'},
'G#': {1: 'A', 2: 'A#'},
'Ab': {2: 'Bb', 3: 'B'},
'A': {1: 'Bb', 2: 'B', 3: 'B#'},
'A#': {1: 'B', 2: 'B#'},
'Bb': {1: 'Cb', 2: 'C'},
'B': {1: 'C', 2: 'C#'},
'B#': {1: 'C#'},
'Cb': {2: 'Db', 3: 'D'},
}
class Note:
"""A note with a name and no frequency/duration."""
def __init__(self, note):
self.note = note.note if isinstance(note, Note) else note
def __eq__(self, other):
return self.note == other.note
def __hash__(self):
return hash(self.note)
def __repr__(self):
return self.note
def __str__(self):
return self.note
def next_note(self, interval):
# this assumes heptatonic scales
return Note(next_notes[self.note][interval])
def get_freq(self, octave):
"""Return the frequency of the note at the given octave."""
return notes[f'{self.note}{octave}']
# intervals for some common scales
intervals = {
'major': [2, 2, 1, 2, 2, 2, 1],
'melodic minor': [2, 1, 2, 2, 2, 2, 1],
'harmonic minor': [2, 1, 2, 2, 1, 3, 1],
'harmonic major': [2, 2, 1, 2, 1, 3, 1],
'diminished': [2, 1, 2, 1, 2, 1, 2, 1],
'augmented': [3, 1, 3, 1, 3, 1],
'whole-tone': [2, 2, 2, 2, 2, 2],
'pentatonic major': [2, 2, 3, 2, 3],
}
# currently only works with heptatonic scales
class Scale:
def __init__(self, key, scale, mode=1):
self.key, self.scale, self.mode = key, scale, mode
mode -= 1 # start from 0
scale_intervals = intervals[scale]
self.intervals = list(islice(cycle(scale_intervals), mode,
mode+len(scale_intervals)))
notes = [Note(key)]
for i in self.intervals:
notes.append(notes[-1].next_note(i))
self.notes = notes
#def __init__(self, key, scale, mode):
#self.key, self.scale, self.mode = key, scale, mode
#mode -= 1 # start from 0
#scale_intervals = intervals[scale]
#self.intervals = list(islice(cycle(scale_intervals), mode,
#mode+len(scale_intervals)))
#offsets = [0, *self.intervals]
#sharps = names_sharp * 2
#flats = names_flat * 2
#key_offset = sharps.index(key) if key in sharps else flats.index(key)
#self.notes = [sharps[key_offset+k] for k in accumulate(offsets)]
#print(self.notes)
#if any(note[0] in self.notes for note in self.notes if '#' in note):
#self.notes = [flats[key_offset+k] for k in accumulate(offsets)]
def __repr__(self):
modes = {1: 'I', 2: 'II', 3: 'III', 4: 'IV', 5: 'V', 6: 'VI', 7: 'VII'}
notes = " ".join(map(str, self.notes))
return (f'<Scale key={self.key!r} scale={self.scale!r} '
f'mode={modes[self.mode]!r} notes={notes!r}>')
def __str__(self):
return ' '.join(map(str, self.notes))
def __iter__(self):
return iter(self.notes)
|
StarcoderdataPython
|
3245291
|
"""
For the ``future`` package.
Turns any print statements into functions and adds this import line:
from __future__ import print_function
at the top to retain compatibility with Python 2.6+.
"""
from libfuturize.fixes.fix_print import FixPrint
from libfuturize.fixer_util import future_import
class FixPrintWithImport(FixPrint):
run_order = 7
def transform(self, node, results):
n_stmt = super(FixPrintWithImport, self).transform(node, results)
future_import(u'print_function', node)
return n_stmt
|
StarcoderdataPython
|
120440
|
from typing import List
from inspect import signature as create_inspect_signature
from injecta.dtype.DType import DType
from injecta.service.class_.InspectedArgument import InspectedArgument
from injecta.service.class_.InspectedArgumentResolver import InspectedArgumentResolver
from injecta.module import attribute_loader
class InspectedArgumentsResolver:
def __init__(self):
self.__inspected_argument_resolver = InspectedArgumentResolver()
def resolve_constructor(self, dtype: DType) -> List[InspectedArgument]:
class_definition = attribute_loader.load(dtype.module_name, dtype.class_name)
while "__init__" not in class_definition.__dict__:
first_parent_class = class_definition.__bases__[0]
# no constructor found in base class or parents
if first_parent_class.__module__ == "builtins" and first_parent_class.__name__ == "object":
return []
class_definition = attribute_loader.load(first_parent_class.__module__, first_parent_class.__name__)
return self.__resolve(getattr(class_definition, "__init__"))
def resolve_method(self, dtype: DType, method_name: str) -> List[InspectedArgument]:
class_definition = attribute_loader.load(dtype.module_name, dtype.class_name)
return self.__resolve(getattr(class_definition, method_name))
def __resolve(self, obj):
signature = create_inspect_signature(obj)
def is_real_argument(argument):
argument_name, _ = argument
return argument_name != "self"
inspected_arguments = list(filter(is_real_argument, signature.parameters.items()))
return list(map(lambda argument: self.__inspected_argument_resolver.resolve(argument[0], argument[1]), inspected_arguments))
|
StarcoderdataPython
|
4801903
|
#!/usr/bin/env python3
tests = [
# RMW Hz, runs
('f', 80, 10),
('c', 80, 10),
('f', 100, 10),
('c', 100, 10),
('f', 120, 10),
('c', 120, 10),
]
rmw_names = {'f': 'FastRTPS', 'c': 'CycloneDDS'}
rmw_colors = {
'f': [
'#0000ff',
'#0000ef',
'#0000df',
'#0000cf',
'#0000bf',
'#0000af',
'#00009f',
'#00008f',
'#00007f',
'#00006f',
],
'c': [
'#00ff00',
'#00ef00',
'#00df00',
'#00cf00',
'#00bf00',
'#00af00',
'#009f00',
'#008f00',
'#007f00',
'#006f00',
]}
# data_by_frequency = {}
for rmw, frequency, runs in tests:
rmw_name = rmw_names[rmw]
print(rmw_name, 'with', frequency, 'hz', f'({runs} runs)')
all_runs_sent = []
all_runs_received = []
for run in range(1, runs + 1):
print(' run', run, '-', 'sent', 'vs', 'received')
pub_file = f'{rmw}-p-{frequency}-{run}.txt'
sub_file = f'{rmw}-s-{frequency}-{run}.txt'
with open(pub_file, 'r') as h:
pub_lines = h.read().splitlines()
assert 'Topic name: Array60k' in pub_lines
assert f'Publishing rate: {frequency}' in pub_lines
assert 'Maximum runtime (sec): 30' in pub_lines
assert 'Number of publishers: 1' in pub_lines
assert 'Number of subscribers: 0' in pub_lines
with open(sub_file, 'r') as h:
sub_lines = h.read().splitlines()
assert 'Topic name: Array60k' in sub_lines
assert f'Publishing rate: {frequency}' in sub_lines
assert 'Maximum runtime (sec): 30' in sub_lines
assert 'Number of publishers: 0' in sub_lines
assert 'Number of subscribers: 1' in sub_lines
assert pub_lines[19].startswith('T_experiment,')
assert pub_lines[49] == 'Maximum runtime reached. Exiting.'
assert sub_lines[19].startswith('T_experiment,')
assert sub_lines[49] == 'Maximum runtime reached. Exiting.'
run_sent = []
run_received = []
for i in range(20, 49):
pub_cols = pub_lines[i].split(',\t')
sub_cols = sub_lines[i].split(',\t')
# print(pub_cols[0], sub_cols[0])
assert pub_cols[0].startswith('%d.' % (i - 18))
assert sub_cols[0].startswith('%d.' % (i - 18))
sent = pub_cols[3].strip()
received = sub_cols[2].strip()
print(' ', sent, received)
run_sent.append(int(sent))
run_received.append(int(received))
all_runs_sent.append(run_sent)
all_runs_received.append(run_received)
import pandas
data = {}
for run in range(1, runs + 1):
# data[f'Sent by Publisher #{run}'] = all_runs_sent[run - 1]
# data[f'Received by Subscription #{run}'] = all_runs_received[run - 1]
data[f'Run #{run}'] = all_runs_received[run - 1]
tdf = pandas.DataFrame(data)
tdf.index += 1 # Index from 1, since the index is really time in seconds
ax = tdf.plot(kind='line', colors=rmw_colors[rmw])
ax.set_title(
f'Array60k @ {frequency} Hz - 1 to 1 Pub/Sub across wifi\n'
f'{rmw_name}, reliable, volatile, keep_last@10')
ax.set_xlabel('Time in Seconds')
ax.set_ylabel('Number of Messages')
ax.get_figure().savefig(f'{rmw}-{frequency}.png', bbox_inches='tight')
print()
|
StarcoderdataPython
|
3219745
|
<gh_stars>1-10
import cv2
import mediapipe as mp
import math
from imutils.video import VideoStream
from imutils.video import FileVideoStream
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
import collections
class PoseEstimator:
def __init__(self, window_size=8, smoothing_function=None):
"""
Window Size to specify how much frames to be considered for smoothing
"""
if(smoothing_function == 'savgol') and ((window_size % 2) == 0):
print('Is Here')
print(window_size)
self.window_size = window_size - 1
print(self.window_size)
else:
self.window_size = window_size
self.smoothing_function = smoothing_function
self.mp_drawing = mp.solutions.drawing_utils
self.mp_pose = mp.solutions.pose
self.pose = self.mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.1)
self.writer = None
self.coords_array = []
def get_pose_coords(self, image):
"""
Function returns the coordinates of wrist, elbow and shoulder if given an image.
"""
try:
image_height, image_width, _ = image.shape
results = self.pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if not results.pose_landmarks:
raise ValueError('No poses detected')
get_pose = results.pose_landmarks.landmark
lm = self.mp_pose.PoseLandmark
left_wrist_x = get_pose[lm.LEFT_WRIST].x*image_width
left_wrist_y = get_pose[lm.LEFT_WRIST].y*image_height
left_elbow_x = get_pose[lm.LEFT_ELBOW].x*image_width
left_elbow_y = get_pose[lm.LEFT_ELBOW].y*image_height
left_shoulder_x = get_pose[lm.LEFT_SHOULDER].x*image_width
left_shoulder_y = get_pose[lm.LEFT_SHOULDER].y*image_height
left_hip_x = get_pose[lm.LEFT_HIP].x*image_width
left_hip_y = get_pose[lm.LEFT_HIP].y*image_height
left_knee_x = get_pose[lm.LEFT_KNEE].x*image_width
left_knee_y = get_pose[lm.LEFT_KNEE].y*image_height
left_ankle_x = get_pose[lm.LEFT_ANKLE].x*image_width
left_ankle_y = get_pose[lm.LEFT_ANKLE].y*image_height
right_wrist_x = get_pose[lm.RIGHT_WRIST].x*image_width
right_wrist_y = get_pose[lm.RIGHT_WRIST].y*image_height
right_elbow_x = get_pose[lm.RIGHT_ELBOW].x*image_width
right_elbow_y = get_pose[lm.RIGHT_ELBOW].y*image_height
right_shoulder_x = get_pose[lm.RIGHT_SHOULDER].x*image_width
right_shoulder_y = get_pose[lm.RIGHT_SHOULDER].y*image_height
right_hip_x = get_pose[lm.RIGHT_HIP].x*image_width
right_hip_y = get_pose[lm.RIGHT_HIP].y*image_height
right_knee_x = get_pose[lm.RIGHT_KNEE].x*image_width
right_knee_y = get_pose[lm.RIGHT_KNEE].y*image_height
right_ankle_x = get_pose[lm.RIGHT_ANKLE].x*image_width
right_ankle_y = get_pose[lm.RIGHT_ANKLE].y*image_height
nose_x = get_pose[lm.NOSE].x*image_width
nose_y = get_pose[lm.NOSE].y*image_height
return (left_wrist_x, left_wrist_y, left_elbow_x, left_elbow_y, left_shoulder_x, left_shoulder_y, left_hip_x, left_hip_y, left_knee_x, left_knee_y, left_ankle_x, left_ankle_y,
right_wrist_x, right_wrist_y, right_elbow_x, right_elbow_y, right_shoulder_x, right_shoulder_y, right_hip_x, right_hip_y, right_knee_x, right_knee_y, right_ankle_x, right_ankle_y,
nose_x,nose_y)
except Exception as e:
print(e)
return None
def smoothen_coords(self, pose_coords):
"""
Function to smooth the coordinates of last n coordinates where
n is the window size.
Input is a list of tuple of coordinates.
"""
if len(self.coords_array) == self.window_size:
self.coords_array.pop(0)
self.coords_array.append(pose_coords)
if self.smoothing_function == 'mean':
smoothened_coords = np.array(self.coords_array).mean(axis=0)
elif self.smoothing_function == 'savgol':
try:
savgol = lambda arr: savgol_filter(arr, self.window_size, 1)[-1]
coords_np_arr = np.array(self.coords_array)
smoothened_coords = np.apply_along_axis(savgol, 0,
coords_np_arr)
self.coords_array.pop()
self.coords_array.append(smoothened_coords)
except ValueError as ve:
print(ve)
return pose_coords
else:
return pose_coords
return tuple(smoothened_coords)
def get_annotated_image(self, image, pose_coords):
"""
Function to draw and visualize the coordinates in the image.
"""
left_wrist_x, left_wrist_y, left_elbow_x, left_elbow_y, left_shoulder_x, left_shoulder_y, left_hip_x, left_hip_y, left_knee_x, left_knee_y, left_ankle_x, left_ankle_y, right_wrist_x, right_wrist_y, right_elbow_x, right_elbow_y, right_shoulder_x, right_shoulder_y, right_hip_x, right_hip_y, right_knee_x, right_knee_y, right_ankle_x, right_ankle_y, nose_x, nose_y = pose_coords
annotated_image = image.copy()
##Drawing Cirlces
#Nose
cv2.circle(annotated_image,
(int(nose_x), int(nose_y)),
10,(0,0,255),-1)
#Shoulders
cv2.circle(annotated_image,
(int(left_shoulder_x), int(left_shoulder_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_shoulder_x), int(right_shoulder_y)),
10,(0,0,255),-1)
#Elbows
cv2.circle(annotated_image,
(int(left_elbow_x), int(left_elbow_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_elbow_x), int(right_elbow_y)),
10,(0,0,255),-1)
#Wrists
cv2.circle(annotated_image,
(int(left_wrist_x), int(left_wrist_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_wrist_x), int(right_wrist_y)),
10,(0,0,255),-1)
#Hips
cv2.circle(annotated_image,
(int(left_hip_x), int(left_hip_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_hip_x), int(right_hip_y)),
10,(0,0,255),-1)
#Knees
cv2.circle(annotated_image,
(int(left_knee_x), int(left_knee_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_knee_x), int(right_knee_y)),
10,(0,0,255),-1)
#Ankles
cv2.circle(annotated_image,
(int(left_ankle_x), int(left_ankle_y)),
10,(0,0,255),-1)
cv2.circle(annotated_image,
(int(right_ankle_x), int(right_ankle_y)),
10,(0,0,255),-1)
##Drawing Lines
#Nose-Shoulder
cv2.line(annotated_image,
(int(nose_x), int(nose_y)),
(int((left_shoulder_x+right_shoulder_x)/2), int((left_shoulder_y+right_shoulder_y)/2)),
(0,0,255),3)
#Shoulder
cv2.line(annotated_image,
(int(left_shoulder_x), int(left_shoulder_y)),
(int(right_shoulder_x), int(right_shoulder_y)),
(0,0,255),3)
#Shoulder-Elbow
cv2.line(annotated_image,
(int(left_shoulder_x), int(left_shoulder_y)),
(int(left_elbow_x), int(left_elbow_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_shoulder_x), int(right_shoulder_y)),
(int(right_elbow_x), int(right_elbow_y)),
(0,0,255),3)
#Elbow-Wrist
cv2.line(annotated_image,
(int(left_elbow_x), int(left_elbow_y)),
(int(left_wrist_x), int(left_wrist_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_elbow_x), int(right_elbow_y)),
(int(right_wrist_x), int(right_wrist_y)),
(0,0,255),3)
#Shoulder-Hip
cv2.line(annotated_image,
(int(left_shoulder_x), int(left_shoulder_y)),
(int(left_hip_x), int(left_hip_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_shoulder_x), int(right_shoulder_y)),
(int(right_hip_x), int(right_hip_y)),
(0,0,255),3)
#Hip
cv2.line(annotated_image,
(int(left_hip_x), int(left_hip_y)),
(int(right_hip_x), int(right_hip_y)),
(0,0,255),3)
#Hip-Knee
cv2.line(annotated_image,
(int(left_hip_x), int(left_hip_y)),
(int(left_knee_x), int(left_knee_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_hip_x), int(right_hip_y)),
(int(right_knee_x), int(right_knee_y)),
(0,0,255),3)
#Knee-Ankle
cv2.line(annotated_image,
(int(left_knee_x), int(left_knee_y)),
(int(left_ankle_x), int(left_ankle_y)),
(0,0,255),3)
cv2.line(annotated_image,
(int(right_knee_x), int(right_knee_y)),
(int(right_ankle_x), int(right_ankle_y)),
(0,0,255),3)
return annotated_image
def write_image(self, image):
"""
Function for displaying the image.
"""
if self.writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
self.writer = cv2.VideoWriter("test6.mp4", fourcc, 25,
(image.shape[1], image.shape[0]), True)
self.writer.write(image)
show = cv2.resize(image, None,
fx=1, fy =1)
show = cv2.flip(image, 1)
cv2.imshow("Frame", show)
key = cv2.waitKey(1) & 0xFF
return key
def run_estimator(self):
"""
Main Function to run the Pose Estimator.
"""
capture = cv2.VideoCapture(0)
while (capture.isOpened()):
# Read a frame
ret, image = capture.read(0)
if ret:
try:
# Get the pose coordinates in a tuple
pose_coords = self.get_pose_coords(image)
if pose_coords:
# If poses are detected then apply the smoothing filter
# And annotate the image
pose_coords = self.smoothen_coords(pose_coords)
annotated_image = self.get_annotated_image(image, pose_coords)
else:
# If no poses are detected, then just display the frame
pose_coords = None
self.write_image(image)
continue
# Write the annotated image
key = self.write_image(annotated_image)
except ValueError as ve:
print(ve)
key = self.write_image(image)
if key == ord("q"):
break
cv2.destroyAllWindows()
capture.release()
if self.writer is not None:
self.writer.release()
self.pose.close()
s = PoseEstimator(window_size=8)
s.run_estimator()
|
StarcoderdataPython
|
1685076
|
<filename>helheim-tseries_decomp.py
## Time series decomposition on Helheim velocity
## 6 May 2020 EHU
import numpy as np
import matplotlib.pyplot as plt
import iceutils as ice
import sys
## Set up combined hdf5 stack
#fpath='/Users/lizz/Documents/Research/Gld-timeseries/Stack/'
hel_stack = ice.MagStack(files=['vx.h5', 'vy.h5'])
data_key = 'igram' # <NAME> convention for access to datasets in hdf5 stack
## Extract time series at selected points
xy_1 = (308103., -2577200.) #polar stereo coordinates of a point near Helheim 2009 terminus, in m
xy_2 = (302026., -2566770.) # point up on North branch
xy_3 = (297341., -2571490.) # point upstream on main branch
xy_4 = (294809., -2577580.) # point on southern tributary
xys = (xy_1, xy_2, xy_3, xy_4)
labels = ('Near terminus', 'North branch', 'Main branch', 'South branch')
series = [hel_stack.timeseries(xy=xyi, key=data_key) for xyi in xys]
## Set up design matrix and perform lasso regression, according to Bryan's documentation
def build_collection(dates):
"""
Function that creates a list of basis functions for a given datetime vector dates.
"""
# Get date bounds
tstart, tend = dates[0], dates[-1]
# Initalize a collection and relevant basis functions
collection = ice.tseries.timefn.TimefnCollection()
periodic = ice.tseries.timefn.fnmap['periodic']
ispl = ice.tseries.timefn.fnmap['isplineset']
poly = ice.tseries.timefn.fnmap['poly']
# Add polynomial first for secular components
collection.append(poly(tref=tstart, order=1, units='years'))
# Add seasonal terms
collection.append(periodic(tref=tstart, units='years', period=0.5,
tmin=tstart, tmax=tend))
collection.append(periodic(tref=tstart, units='years', period=1.0,
tmin=tstart, tmax=tend))
# Integrated B-slines for transient signals
# In general, we don't know the timescales of transients a prior
# Therefore, we add integrated B-splines of various timescales where the
# timescale is controlled by the 'nspl' value (this means to divide the time
# vector into 'nspl' equally spaced spline center locations)
for nspl in [128, 64, 32, 16, 8, 4]:
collection.append(ispl(order=3, num=nspl, units='years', tmin=tstart, tmax=tend))
# Done
return collection
# Create an evenly spaced time array for time series predictions
t_grid = np.linspace(hel_stack.tdec[0], hel_stack.tdec[-1], 1000)
# First convert the time vectors to a list of datetime
dates = ice.tdec2datestr(hel_stack.tdec, returndate=True)
dates_grid = ice.tdec2datestr(t_grid, returndate=True)
# Build the collection
collection = build_collection(dates)
# Instantiate a model for inversion
model = ice.tseries.Model(dates, collection=collection)
# Instantiate a model for prediction
model_pred = ice.tseries.Model(dates_grid, collection=collection)
## Access the design matrix for plotting
G = model.G
# plt.plot(hel_stack.tdec, G)
# plt.xlabel('Year')
# plt.ylabel('Amplitude')
# plt.show()
# First create a solver with a minimum data threshold of 200 (i.e., we require at least
# 200 valid data points to perform an inversion). This should file for all four time series
ridge = ice.tseries.select_solver('ridge', reg_indices=model.itransient, penalty=2, n_min=200)
# Un-comment the following two lines to ensure warnings are printed out every time
#import warnings
#warnings.simplefilter('always', UserWarning)
# Loop over time series
for i in range(len(series)):
# This should raise a warning and return a FAIL status (=0)
status, m, Cm = ridge.invert(model.G, series[i])
if status == ice.FAIL:
print('Unsuccessful inversion')
# Create ridge regression solver that damps out the transient spline coefficients
#solver = ice.tseries.select_solver('ridge', reg_indices=model.itransient, penalty=2)
solver = ice.tseries.select_solver('lasso', reg_indices=model.itransient, penalty=1.5, rw_iter=1)
# Loop over time series
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12,6))
for i in range(len(series)):
# Perform inversion to get coefficient vector and coefficient covariance matrix
status, m, Cm = solver.invert(model.G, series[i]) # fit near-terminus (series[0]) first
assert status == ice.SUCCESS, 'Failed inversion'
# Model will perform predictions
pred = model_pred.predict(m)
# Separate out seasonal (short-term) and secular + transient (long-term) signals
short_term = pred['seasonal']
long_term = pred['secular'] + pred['transient']
# Remove long-term signals from data
series_short_term = series[i] - np.interp(hel_stack.tdec, t_grid, long_term)
#print(len(pred['full']), len(series[0]))
#print(sum(np.isnan(pred['full'])), sum(np.isnan(series[0])))
#print(np.nanmean(pred['full']), np.nanmean(series[0]))
# Plot long-term
ax1.plot(hel_stack.tdec, series[i], '.')
ax1.plot(t_grid, long_term, label=labels[i])
# Plot short-term (add estimated bias (m[0]) for visual clarity)
ax2.plot(hel_stack.tdec, series_short_term + m[0], '.')
ax2.plot(t_grid, short_term + m[0], label=labels[i])
for ax in (ax1, ax2):
ax.set_xlabel('Year')
ax.set_ylabel('Velocity')
ax1.set_title('Multi-annual fit')
ax2.set_title('Seasonal fit')
plt.show()
|
StarcoderdataPython
|
1775676
|
<filename>code/method/settings.py<gh_stars>0
from enum import Enum
class DistanceMetric(Enum):
EUCLIDEAN = 'euclidean'
COSINE = 'cosine'
MANHATTAN = 'manhattan'
class CommunityMethod(Enum):
LOUVAIN = 'LOUVAIN'
GREEDY = 'GREEDY'
GIRVAN = 'GIRVAN'
LABEL_PROPAGATION = 'LABEL_PROPAGATION'
class Settings:
def __init__(self, k:int, metric:DistanceMetric, community_method: CommunityMethod, scoring_metric: DistanceMetric=DistanceMetric.EUCLIDEAN):
self.k = k
self.metric = metric
self.community_method = community_method
self.scoring_metric = scoring_metric
|
StarcoderdataPython
|
3300188
|
<filename>src/Application/PythonScriptModule/pymodules_old/circuits/core/events.py
# Package: events
# Date: 11th April 2010
# Author: <NAME>, prologic at shortcircuit dot net dot au
"""Events
This module define the basic Event object and commmon events.
"""
class Event(object):
"""Create a new Event Object
Create a new Event Object populating it with the given list of arguments
and keyword arguments.
:ivar name: The name of the Event
:ivar channel: The channel this Event is bound for
:ivar target: The target Component this Event is bound for
:ivar success: An optional channel to use for Event Handler success
:ivar failure: An optional channel to use for Event Handler failure
:ivar filter: An optional channel to use if an Event is filtered
:ivar start: An optional channel to use before an Event starts
:ivar end: An optional channel to use when an Event ends
:ivar value: The future Value object used to store the result of an event
:param args: list of arguments
:type args: tuple
:param kwargs: dct of keyword arguments
:type kwargs: dict
"""
channel = None
target = None
handler = None
success = None
failure = None
filter = None
start = None
end = None
value = None
def __init__(self, *args, **kwargs):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
self.args = list(args)
self.kwargs = kwargs
def __getstate__(self):
keys = ("args", "kwargs", "channel", "target", "success", "failure",
"filter", "start", "end", "value", "source")
return dict([(k, getattr(self, k, None)) for k in keys])
@property
def name(self):
return self.__class__.__name__
def __eq__(self, other):
""" x.__eq__(other) <==> x==other
Tests the equality of Event self against Event y.
Two Events are considered "equal" iif the name,
channel and target are identical as well as their
args and kwargs passed.
"""
return (self.__class__ is other.__class__
and self.channel == other.channel
and self.args == other.args
and self.kwargs == other.kwargs)
def __repr__(self):
"x.__repr__() <==> repr(x)"
if type(self.channel) is tuple:
channel = "%s:%s" % self.channel
else:
channel = self.channel or ""
return "<%s[%s] %s %s>" % (self.name, channel, self.args, self.kwargs)
def __getitem__(self, x):
"""x.__getitem__(y) <==> x[y]
Get and return data from the Event object requested by "x".
If an int is passed to x, the requested argument from self.args
is returned index by x. If a str is passed to x, the requested
keyword argument from self.kwargs is returned keyed by x.
Otherwise a TypeError is raised as nothing else is valid.
"""
if type(x) is int:
return self.args[x]
elif type(x) is str:
return self.kwargs[x]
else:
raise TypeError("Expected int or str, got %r" % type(x))
def __setitem__(self, i, y):
"""x.__setitem__(i, y) <==> x[i] = y
Modify the data in the Event object requested by "x".
If i is an int, the ith requested argument from self.args
shall be changed to y. If i is a str, the requested value
keyed by i from self.kwargs, shall by changed to y.
Otherwise a TypeError is raised as nothing else is valid.
"""
if type(i) is int:
self.args[i] = y
elif type(i) is str:
self.kwargs[i] = y
else:
raise TypeError("Expected int or str, got %r" % type(i))
class Error(Event):
"""Error Event
This Event is sent for any exceptions that occur during the execution
of an Event Handler that is not SystemExit or KeyboardInterrupt.
:param type: type of exception
:type type: type
:param value: exception object
:type value: exceptions.TypeError
:param traceback: traceback of exception
:type traceback: traceback
:param kwargs: (Optional) Additional Information
:type kwargs: dict
"""
channel = "exception"
def __init__(self, type, value, traceback, handler=None):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Error, self).__init__(type, value, traceback, handler)
class Success(Event):
"""Success Event
This Event is sent when an Event Handler's execution has completed
successfully.
:param evt: The event that succeeded
:type evt: Event
:param handler: The handler that executed this event
:type handler: @handler
:param retval: The returned value of the handler
:type retval: object
"""
def __init__(self, event, handler, retval):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Success, self).__init__(event, handler, retval)
class Failure(Event):
"""Failure Event
This Event is sent when an error has occured with the execution of an
Event Handlers.
:param evt: The event that failued
:type evt: Event
:param handler: The handler that failed
:type handler: @handler
:param error: A tuple containing the exception that occured
:type error: (etype, evalue, traceback)
"""
def __init__(self, event, handler, error):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Failure, self).__init__(event, handler, error)
class Filter(Event):
"""Filter Event
This Event is sent when an Event is filtered by some Event Handler.
:param evt: The event that was filtered
:type evt: Event
:param handler: The handler that filtered this event
:type handler: @handler
:param retval: The returned value of the handler
:type retval: object
"""
def __init__(self, event, handler, retval):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Filter, self).__init__(event, handler, retval)
class Start(Event):
"""Start Event
This Event is sent just before an Event is started
:param evt: The event about to start
:type evt: Event
"""
def __init__(self, event):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Start, self).__init__(event)
class End(Event):
"""End Event
This Event is sent just after an Event has ended
:param evt: The event that has finished
:type evt: Event
:param handler: The last handler that executed this event
:type handler: @handler
:param retval: The returned value of the last handler
:type retval: object
"""
def __init__(self, event, handler, retval):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(End, self).__init__(event, handler, retval)
class Started(Event):
"""Started Event
This Event is sent when a Component has started running.
:param component: The component that was started
:type component: Component or Manager
:param mode: The mode in which the Component was started,
P (Process), T (Thread) or None (Main Thread / Main Process).
:type str: str or None
"""
def __init__(self, component, mode):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Started, self).__init__(component, mode)
class Stopped(Event):
"""Stopped Event
This Event is sent when a Component has stopped running.
:param component: The component that has stopped
:type component: Component or Manager
"""
def __init__(self, component):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Stopped, self).__init__(component)
class Signal(Event):
"""Signal Event
This Event is sent when a Component receives a signal.
:param signal: The signal number received.
:type int: An int value for the signal
:param stack: The interrupted stack frame.
:type object: A stack frame
"""
def __init__(self, signal, stack):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Signal, self).__init__(signal, stack)
class Registered(Event):
"""Registered Event
This Event is sent when a Component has registered with another Component
or Manager. This Event is only sent iif the Component or Manager being
registered with is not itself.
:param component: The Component being registered
:type component: Component
:param manager: The Component or Manager being registered with
:type manager: Component or Manager
"""
def __init__(self, component, manager):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Registered, self).__init__(component, manager)
class Unregistered(Event):
"""Unregistered Event
This Event is sent when a Component has been unregistered from it's
Component or Manager.
"""
def __init__(self, component, manager):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Unregistered, self).__init__(component, manager)
|
StarcoderdataPython
|
3231873
|
<reponame>sem6-nu/CAPSTONE-I
import requests
import matplotlib.pyplot as plt
from PIL import Image
from matplotlib import patches
from io import BytesIO
import os
import ObjectAPIConfig as cnfg
image_path = os.path.join('images.jpg')
image_data = open(image_path, "rb").read()
subscription_key, object_api_url = cnfg.config();
headers = {'Content-Type': 'application/octet-stream',
'Ocp-Apim-Subscription-Key': subscription_key}
params = {'visualFeatures': 'Description'}
response = requests.post(object_api_url, params=params, headers=headers, data=image_data)
response.raise_for_status()
objects = response.json()
print(objects)
|
StarcoderdataPython
|
1798664
|
<reponame>zayne-siew/AutoGFormBot
#!/usr/bin/env python3
"""
Abstract base classes (ABCs) for Google Form questions.
This script serves as an interface for documenting function implementation.
Usage:
This script should not be used directly, other than its ABC functionalities.
"""
from abc import ABC, abstractmethod
from selenium.webdriver.remote.webelement import WebElement
from typing import Any, Optional, Tuple
class AbstractQuestion(ABC):
"""AbstractQuestion class as ABC for custom Google Form question classes."""
# region Getter methods
@abstractmethod
def _get_question_element(self) -> Optional[WebElement]:
"""Gets the web element which represents the entire question."""
pass
@abstractmethod
def get_header(self) -> Optional[str]:
"""Gets the question header."""
pass
@abstractmethod
def get_description(self) -> Optional[str]:
"""Gets the question description."""
pass
@abstractmethod
def is_required(self) -> Optional[bool]:
"""Checks if the question is required."""
pass
@abstractmethod
def get_answer_elements(self) -> Any:
"""Gets the web elements related to answering of the question."""
pass
# endregion Getter methods
# region Setter methods
@abstractmethod
def _set_header(self, header: str) -> None:
"""Sets the question header."""
pass
@abstractmethod
def set_question_element(self, element: WebElement) -> None:
"""Sets the web element representing the entire question if it has changed."""
pass
@abstractmethod
def set_answer_elements(self, *args, **kwargs) -> None:
"""Sets the web elements required for answering the question if it has changed."""
pass
@abstractmethod
def set_description(self, description: str) -> None:
"""Sets the question description if it has changed."""
pass
@abstractmethod
def set_required(self, required: bool) -> None:
"""Toggles the required flag if it has changed."""
pass
# endregion Setter methods
@abstractmethod
def _is_valid(self, *elements: WebElement) -> bool:
"""Check if the web element(s) is/are still valid."""
pass
@abstractmethod
def get_info(self) -> Optional[bool]:
"""Obtains question metadata from Google Form."""
pass
@abstractmethod
def answer(self, *args, **kwargs) -> Optional[bool]:
"""Provide instruction to answer the question."""
pass
class AbstractOptionQuestion(ABC):
"""AbstractOptionQuestion class as ABC for custom Google Form question classes which offer options."""
# region Getter methods
@abstractmethod
def get_options(self) -> Optional[Tuple[str, ...]]:
"""Gets a list of all possible options."""
pass
@abstractmethod
def get_other_option_element(self) -> Optional[WebElement]:
"""Gets the web element for the other option input field."""
pass
# endregion Getter methods
# region Setter methods
@abstractmethod
def _set_options(self, *options: str) -> None:
"""Sets the list of options provided if it has changed."""
pass
@abstractmethod
def set_other_option_element(self, element: WebElement) -> None:
"""Sets the other option element if it has changed."""
pass
# endregion Setter methods
@abstractmethod
def _is_option(self, option: str) -> bool:
"""Check if the option is specified."""
pass
@abstractmethod
def _has_other_option(self) -> bool:
"""Check if there is an 'Other' option specified."""
pass
if __name__ == '__main__':
pass
|
StarcoderdataPython
|
3216994
|
<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Interactive Windows Registry analysis tool.
preg is an interactive Windows Registry analysis tool that utilizes
plaso Windows Registry parser plugins, dfwinreg Windows Registry and
dfvfs storage media image capabilities.
"""
from __future__ import print_function
from __future__ import unicode_literals
import locale
import sys
import IPython
from dfvfs.lib import definitions as dfvfs_definitions
# pylint: disable=import-error
# pylint: disable=no-name-in-module
try:
# Support version 1.x of IPython.
from IPython.terminal.embed import InteractiveShellEmbed
except ImportError:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
from IPython.config.loader import Config
from IPython.core import magic
from plaso.cli import tools as cli_tools
from plaso.cli import views as cli_views
from plaso.lib import errors
from plaso.lib import timelib
from l2tpreg import helper
from l2tpreg import hexdump
from l2tpreg import plugin_list
from l2tpreg import preg_tool
# Older versions of IPython don't have a version_info attribute.
if getattr(IPython, 'version_info', (0, 0, 0)) < (1, 2, 1):
raise ImportWarning(
'Preg requires at least IPython version 1.2.1.')
@magic.magics_class
class PregMagics(magic.Magics):
"""Preg iPython magics."""
# Needed to give the magic class access to the front end tool
# for processing and formatting.
console = None
REGISTRY_KEY_PATH_SEPARATOR = '\\'
# TODO: move into helper.
REGISTRY_FILE_BASE_PATH = '\\'
# TODO: Use the output writer from the tool.
output_writer = cli_tools.StdoutOutputWriter()
def _HiveActionList(self, unused_line):
"""Handles the hive list action.
Args:
line (str): command line provide via the console.
"""
self.console.PrintRegistryFileList()
self.output_writer.Write('\n')
self.output_writer.Write(
'To open a Registry file, use: hive open INDEX\n')
def _HiveActionOpen(self, line):
"""Handles the hive open action.
Args:
line (str): command line provide via the console.
"""
try:
registry_file_index = int(line[5:], 10)
except ValueError:
self.output_writer.Write(
'Unable to open Registry file, invalid index number.\n')
return
try:
self.console.LoadRegistryFile(registry_file_index)
except errors.UnableToLoadRegistryHelper as exception:
self.output_writer.Write(
'Unable to load hive, with error: {0:s}.\n'.format(exception))
return
registry_helper = self.console.current_helper
self.output_writer.Write('Opening hive: {0:s} [{1:s}]\n'.format(
registry_helper.path, registry_helper.collector_name))
self.console.SetPrompt(registry_file_path=registry_helper.path)
def _HiveActionScan(self, line):
"""Handles the hive scan action.
Args:
line (str): command line provide via the console.
"""
# Line contains: "scan REGISTRY_TYPES" where REGISTRY_TYPES is a comma
# separated list.
registry_file_type_string = line[5:]
if not registry_file_type_string:
registry_file_types = self.console.preg_tool.GetRegistryTypes()
else:
registry_file_types = [
string.strip() for string in registry_file_type_string.split(',')]
registry_helpers = self.console.preg_tool.GetRegistryHelpers(
self.console.preg_tool.artifacts_registry,
registry_file_types=registry_file_types)
for registry_helper in registry_helpers:
self.console.AddRegistryHelper(registry_helper)
self.console.PrintRegistryFileList()
def _PrintPluginHelp(self, plugin_object):
"""Prints the help information of a plugin.
Args:
plugin_object (WindowsRegistryPlugin): a Windows Registry plugin.
"""
table_view = cli_views.CLITableView(title=plugin_object.NAME)
# TODO: replace __doc__ by DESCRIPTION.
description = plugin_object.__doc__
table_view.AddRow(['Description', description])
self.output_writer.Write('\n')
for registry_key in plugin_object.expanded_keys:
table_view.AddRow(['Registry Key', registry_key])
table_view.Write(self.output_writer)
def _SanitizeKeyPath(self, key_path):
"""Sanitizes a Windows Registry key path.
Args:
key_path (str): Windows Registry key path.
Returns:
str: sanitized Windows Registry key path.
"""
key_path = key_path.replace('}', '}}')
key_path = key_path.replace('{', '{{')
return key_path.replace('\\', '\\\\')
@magic.line_magic('cd')
def ChangeDirectory(self, key_path):
"""Change between Registry keys, like a directory tree.
The key path can either be an absolute path or a relative one.
Absolute paths can use '.' and '..' to denote current and parent
directory/key path. If no key path is set the current key is changed
to point to the root key.
Args:
key_path (str): Windows Registry key path to change to.
"""
if not self.console and not self.console.IsLoaded():
return
registry_helper = self.console.current_helper
if not registry_helper:
return
registry_key = registry_helper.ChangeKeyByPath(key_path)
if not registry_key:
self.output_writer.Write(
'Unable to change to key: {0:s}\n'.format(key_path))
return
sanitized_path = self._SanitizeKeyPath(registry_key.path)
self.console.SetPrompt(
registry_file_path=registry_helper.path,
prepend_string=sanitized_path)
@magic.line_magic('hive')
def HiveActions(self, line):
"""Handles the hive actions.
Args:
line (str): command line provide via the console.
"""
if line.startswith('list'):
self._HiveActionList(line)
elif line.startswith('open ') or line.startswith('load '):
self._HiveActionOpen(line)
elif line.startswith('scan'):
self._HiveActionScan(line)
@magic.line_magic('ls')
def ListDirectoryContent(self, line):
"""List all subkeys and values of the current key.
Args:
line (str): command line provide via the console.
"""
if not self.console and not self.console.IsLoaded():
return
if 'true' in line.lower():
verbose = True
elif '-v' in line.lower():
verbose = True
else:
verbose = False
sub = []
current_file = self.console.current_helper
if not current_file:
return
current_key = current_file.GetCurrentRegistryKey()
for key in current_key.GetSubkeys():
# TODO: move this construction into a separate function in OutputWriter.
time_string = timelib.Timestamp.CopyToIsoFormat(
key.last_written_time.GetPlasoTimestamp())
time_string, _, _ = time_string.partition('.')
sub.append(('{0:>19s} {1:>15s} {2:s}'.format(
time_string.replace('T', ' '), '[KEY]',
key.name), True))
for value in current_key.GetValues():
if not verbose:
sub.append(('{0:>19s} {1:>14s}] {2:s}'.format(
'', '[' + value.data_type_string, value.name), False))
else:
if value.DataIsString():
value_string = value.GetDataAsObject()
elif value.DataIsInteger():
value_string = '{0:d}'.format(value.GetDataAsObject())
elif value.DataIsMultiString():
value_string = '{0:s}'.format(''.join(value.GetDataAsObject()))
elif value.DataIsBinaryData():
value_string = hexdump.Hexdump.FormatData(
value.data, maximum_data_size=16)
else:
value_string = ''
sub.append((
'{0:>19s} {1:>14s}] {2:<25s} {3:s}'.format(
'', '[' + value.data_type_string, value.name, value_string),
False))
for entry, subkey in sorted(sub):
if subkey:
self.output_writer.Write('dr-xr-xr-x {0:s}\n'.format(entry))
else:
self.output_writer.Write('-r-xr-xr-x {0:s}\n'.format(entry))
@magic.line_magic('parse')
def ParseCurrentKey(self, line):
"""Parse the current key.
Args:
line (str): command line provide via the console.
"""
if not self.console and not self.console.IsLoaded():
return
if 'true' in line.lower():
verbose = True
elif '-v' in line.lower():
verbose = True
else:
verbose = False
current_helper = self.console.current_helper
if not current_helper:
return
current_key = current_helper.GetCurrentRegistryKey()
parsed_data = self.console.preg_tool.ParseRegistryKey(
current_key, current_helper)
self.console.preg_tool.PrintParsedRegistryKey(
parsed_data, file_entry=current_helper.file_entry, show_hex=verbose)
# Print a hexadecimal representation of all binary values.
if verbose:
header_shown = False
current_key = current_helper.GetCurrentRegistryKey()
for value in current_key.GetValues():
if not value.DataIsBinaryData():
continue
if not header_shown:
table_view = cli_views.CLITableView(
title='Hexadecimal representation')
header_shown = True
else:
table_view = cli_views.CLITableView()
table_view.AddRow(['Attribute', value.name])
table_view.Write(self.output_writer)
self.console.preg_tool.PrintSeparatorLine()
self.console.preg_tool.PrintSeparatorLine()
value_string = hexdump.Hexdump.FormatData(value.data)
self.output_writer.Write(value_string)
self.output_writer.Write('\n')
self.output_writer.Write('+-'*40)
self.output_writer.Write('\n')
@magic.line_magic('plugin')
def ParseWithPlugin(self, line):
"""Parses a Windows Registry key using a specific plugin.
Args:
line (str): command line provide via the console.
"""
if not self.console and not self.console.IsLoaded():
self.output_writer.Write('No hive loaded, unable to parse.\n')
return
current_helper = self.console.current_helper
if not current_helper:
return
if not line:
self.output_writer.Write('No plugin name added.\n')
return
plugin_name = line
if '-h' in line:
items = line.split()
if len(items) != 2:
self.output_writer.Write('Wrong usage: plugin [-h] PluginName\n')
return
if items[0] == '-h':
plugin_name = items[1]
else:
plugin_name = items[0]
registry_file_type = current_helper.file_type
registry_plugin_list = self.console.preg_tool.GetWindowsRegistryPlugins()
plugin_object = registry_plugin_list.GetPluginObjectByName(
registry_file_type, plugin_name)
if not plugin_object:
self.output_writer.Write(
'No plugin named: {0:s} available for Registry type {1:s}\n'.format(
plugin_name, registry_file_type))
return
key_paths = plugin_list.PluginList.GetKeyPathsFromPlugin(plugin_object)
if not key_paths:
self.output_writer.Write(
'Plugin: {0:s} has no key information.\n'.format(line))
return
if '-h' in line:
self._PrintPluginHelp(plugin_object)
return
for key_path in key_paths:
registry_key = current_helper.GetKeyByPath(key_path)
if not registry_key:
self.output_writer.Write('Key: {0:s} not found\n'.format(key_path))
continue
# Move the current location to the key to be parsed.
self.ChangeDirectory(key_path)
# Parse the key.
current_key = current_helper.GetCurrentRegistryKey()
parsed_data = self.console.preg_tool.ParseRegistryKey(
current_key, current_helper, use_plugins=[plugin_name])
self.console.preg_tool.PrintParsedRegistryKey(
parsed_data, file_entry=current_helper.file_entry)
@magic.line_magic('pwd')
def PrintCurrentWorkingDirectory(self, unused_line):
"""Print the current path.
Args:
line (str): command line provide via the console.
"""
if not self.console and not self.console.IsLoaded():
return
current_helper = self.console.current_helper
if not current_helper:
return
self.output_writer.Write('{0:s}\n'.format(
current_helper.GetCurrentRegistryPath()))
class PregConsole(object):
"""Preg iPython console."""
_BASE_FUNCTIONS = [
('cd key', 'Navigate the Registry like a directory structure.'),
('ls [-v]', (
'List all subkeys and values of a Registry key. If called as ls '
'True then values of keys will be included in the output.')),
('parse -[v]', 'Parse the current key using all plugins.'),
('plugin [-h] plugin_name', (
'Run a particular key-based plugin on the loaded hive. The correct '
'Registry key will be loaded, opened and then parsed.')),
('get_value value_name', (
'Get a value from the currently loaded Registry key.')),
('get_value_data value_name', (
'Get a value data from a value stored in the currently loaded '
'Registry key.')),
('get_key', 'Return the currently loaded Registry key.')]
@property
def current_helper(self):
"""The currently loaded Registry helper."""
return self._currently_registry_helper
def __init__(self, tool):
"""Initialize a console.
Args:
tool (PregTool): preg tool.
"""
super(PregConsole, self).__init__()
self._currently_registry_helper = None
self._currently_loaded_helper_path = ''
self._registry_helpers = {}
preferred_encoding = locale.getpreferredencoding()
if not preferred_encoding:
preferred_encoding = 'utf-8'
# TODO: Make this configurable, or derive it from the tool.
self._output_writer = cli_tools.StdoutOutputWriter(
encoding=preferred_encoding)
self.preg_tool = tool
def _CommandGetCurrentKey(self):
"""Retreives the currently loaded Registry key.
Returns:
dfwinreg.WinRegistryKey: currently loaded Registry key or None if
not available.
"""
return self._currently_registry_helper.GetCurrentRegistryKey()
def _CommandGetValue(self, value_name):
"""Retrieves a value from the currently loaded Windows Registry key.
Args:
value_name (str): name of the value to be retrieved.
Returns:
dfwinreg.WinRegistryValue: a Windows Registry value, or None if not
available.
"""
current_key = self._currently_registry_helper.GetCurrentRegistryKey()
if current_key:
return current_key.GetValueByName(value_name)
def _CommandGetValueData(self, value_name):
"""Retrieves a value data from the currently loaded Windows Registry key.
Args:
value_name (str): name of the value to be retrieved.
Returns:
object: Windows Registry value data, or None if not available.
"""
registry_value = self._CommandGetValue(value_name)
if registry_value:
return registry_value.GetDataAsObject()
def AddRegistryHelper(self, registry_helper):
"""Add a Registry helper to the console object.
Args:
registry_helper (PregRegistryHelper): registry helper.
Raises:
ValueError: if not Registry helper is supplied or Registry helper is not
the correct object (instance of PregRegistryHelper).
"""
if not registry_helper:
raise ValueError('No Registry helper supplied.')
if not isinstance(registry_helper, helper.PregRegistryHelper):
raise ValueError(
'Object passed in is not an instance of PregRegistryHelper.')
if registry_helper.path not in self._registry_helpers:
self._registry_helpers[registry_helper.path] = registry_helper
def GetConfig(self):
"""Retrieves the iPython configuration.
Returns:
IPython.terminal.embed.InteractiveShellEmbed: iPython configuration.
"""
try:
# The "get_ipython" function does not exist except within an IPython
# session.
return get_ipython() # pylint: disable=undefined-variable
except NameError:
return Config()
def IsLoaded(self):
"""Checks if a Windows Registry file is loaded.
Returns:
bool: True if a Registry helper is currently loaded, False otherwise.
"""
registry_helper = self._currently_registry_helper
if not registry_helper:
return False
current_key = registry_helper.GetCurrentRegistryKey()
if hasattr(current_key, 'path'):
return True
if registry_helper.name != 'N/A':
return True
self._output_writer.Write(
'No hive loaded, cannot complete action. Use "hive list" '
'and "hive open" to load a hive.\n')
return False
def PrintBanner(self):
"""Writes a banner to the output writer."""
self._output_writer.Write('\n')
self._output_writer.Write(
'Welcome to PREG - home of the Plaso Windows Registry Parsing.\n')
table_view = cli_views.CLITableView(
column_names=['Function', 'Description'], title='Available commands')
for function_name, description in self._BASE_FUNCTIONS:
table_view.AddRow([function_name, description])
table_view.Write(self._output_writer)
if len(self._registry_helpers) == 1:
self.LoadRegistryFile(0)
registry_helper = self._currently_registry_helper
self._output_writer.Write(
'Opening hive: {0:s} [{1:s}]\n'.format(
registry_helper.path, registry_helper.collector_name))
self.SetPrompt(registry_file_path=registry_helper.path)
# TODO: make sure to limit number of characters per line of output.
registry_helper = self._currently_registry_helper
if registry_helper and registry_helper.name != 'N/A':
self._output_writer.Write(
'Registry file: {0:s} [{1:s}] is available and loaded.\n'.format(
registry_helper.name, registry_helper.path))
else:
self._output_writer.Write('More than one Registry file ready for use.\n')
self._output_writer.Write('\n')
self.PrintRegistryFileList()
self._output_writer.Write('\n')
self._output_writer.Write((
'Use "hive open INDEX" to load a Registry file and "hive list" to '
'see a list of available Registry files.\n'))
self._output_writer.Write('\nHappy command line console fu-ing.')
def LoadRegistryFile(self, index):
"""Loads a Registry file helper from the list of Registry file helpers.
Args:
index (int): index of the Registry helper.
Raises:
UnableToLoadRegistryHelper: if the index attempts to load an entry
that does not exist or if there are no Registry helpers loaded.
"""
helper_keys = self._registry_helpers.keys()
if not helper_keys:
raise errors.UnableToLoadRegistryHelper('No Registry helpers loaded.')
if index < 0 or index >= len(helper_keys):
raise errors.UnableToLoadRegistryHelper('Index out of bounds.')
if self._currently_registry_helper:
self._currently_registry_helper.Close()
registry_helper_path = helper_keys[index]
self._currently_registry_helper = (
self._registry_helpers[registry_helper_path])
self._currently_loaded_helper_path = registry_helper_path
self._currently_registry_helper.Open()
def PrintRegistryFileList(self):
"""Prints a list of all available registry helpers."""
if not self._registry_helpers:
return
self._output_writer.Write('Index Hive [collector]\n')
for index, registry_helper in enumerate(self._registry_helpers.values()):
collector_name = registry_helper.collector_name
if not collector_name:
collector_name = 'Currently Allocated'
if self._currently_loaded_helper_path == registry_helper.path:
star = '*'
else:
star = ''
self._output_writer.Write('{0:<5d} {1:s}{2:s} [{3:s}]\n'.format(
index, star, registry_helper.path, collector_name))
def SetPrompt(
self, registry_file_path=None, config=None, prepend_string=None):
"""Sets the prompt string on the console.
Args:
registry_file_path (Optional[str]): name or path of the Windows Registry
file.
config (Optional[IPython.terminal.embed.InteractiveShellEmbed]): iPython
configuration, where None will attempt to automatically derive
the configuration.
prepend_string (Optional[str]): text to prepend in the command prompt.
"""
if registry_file_path is None:
path_string = 'Unknown Registry file loaded'
else:
path_string = registry_file_path
prompt_strings = [
r'[{color.LightBlue}\T{color.Normal}]',
r'{color.LightPurple} ',
path_string,
r'\n{color.Normal}']
if prepend_string is not None:
prompt_strings.append('{0:s} '.format(prepend_string))
prompt_strings.append(r'[{color.Red}\#{color.Normal}] \$ ')
if config is None:
ipython_config = self.GetConfig()
else:
ipython_config = config
try:
ipython_config.PromptManager.in_template = r''.join(prompt_strings)
except AttributeError:
ipython_config.prompt_manager.in_template = r''.join(prompt_strings)
def Run(self):
"""Runs the interactive console."""
source_type = self.preg_tool.source_type
if source_type == dfvfs_definitions.SOURCE_TYPE_FILE:
registry_file_types = []
elif self.preg_tool.registry_file:
registry_file_types = [self.preg_tool.registry_file]
else:
# No Registry type specified use all available types instead.
registry_file_types = self.preg_tool.GetRegistryTypes()
registry_helpers = self.preg_tool._GetRegistryHelpers(
self.preg_tool.artifacts_registry,
plugin_names=self.preg_tool.plugin_names,
registry_file_types=registry_file_types)
for registry_helper in registry_helpers:
self.AddRegistryHelper(registry_helper)
# Adding variables in scope.
namespace = {}
namespace.update(globals())
namespace.update({
'console': self,
'get_current_key': self._CommandGetCurrentKey,
'get_key': self._CommandGetCurrentKey,
'get_value': self. _CommandGetValue,
'get_value_data': self. _CommandGetValueData,
'tool': self.preg_tool})
ipshell_config = self.GetConfig()
if len(self._registry_helpers) == 1:
self.LoadRegistryFile(0)
registry_helper = self._currently_registry_helper
if registry_helper:
registry_file_path = registry_helper.name
else:
registry_file_path = 'NO HIVE LOADED'
self.SetPrompt(registry_file_path=registry_file_path, config=ipshell_config)
# Starting the shell.
ipshell = InteractiveShellEmbed(
user_ns=namespace, config=ipshell_config, banner1='', exit_msg='')
ipshell.confirm_exit = False
self.PrintBanner()
# Adding "magic" functions.
ipshell.register_magics(PregMagics)
PregMagics.console = self
# Set autocall to two, making parenthesis not necessary when calling
# function names (although they can be used and are necessary sometimes,
# like in variable assignments, etc).
ipshell.autocall = 2
# Registering command completion for the magic commands.
ipshell.set_hook(
'complete_command', CommandCompleterCd, str_key='%cd')
ipshell.set_hook(
'complete_command', CommandCompleterVerbose, str_key='%ls')
ipshell.set_hook(
'complete_command', CommandCompleterVerbose, str_key='%parse')
ipshell.set_hook(
'complete_command', CommandCompleterPlugins, str_key='%plugin')
ipshell()
# Completer commands need to be top level methods or directly callable
# and cannot be part of a class that needs to be initialized.
def CommandCompleterCd(console, unused_core_completer):
"""Command completer function for cd.
Args:
console: IPython shell object (instance of InteractiveShellEmbed).
"""
return_list = []
namespace = getattr(console, 'user_ns', {})
magic_class = namespace.get('PregMagics', None)
if not magic_class:
return return_list
if not magic_class.console.IsLoaded():
return return_list
registry_helper = magic_class.console.current_helper
current_key = registry_helper.GetCurrentRegistryKey()
for key in current_key.GetSubkeys():
return_list.append(key.name)
return return_list
# Completer commands need to be top level methods or directly callable
# and cannot be part of a class that needs to be initialized.
def CommandCompleterPlugins(console, core_completer):
"""Command completer function for plugins.
Args:
console: IPython shell object (instance of InteractiveShellEmbed).
core_completer: IPython completer object (instance of completer.Bunch).
Returns:
A list of command options.
"""
namespace = getattr(console, 'user_ns', {})
magic_class = namespace.get('PregMagics', None)
if not magic_class:
return []
if not magic_class.console.IsLoaded():
return []
command_options = []
if not '-h' in core_completer.line:
command_options.append('-h')
registry_helper = magic_class.console.current_helper
registry_file_type = registry_helper.file_type
registry_plugin_list = console.preg_tool.GetWindowsRegistryPlugins()
# TODO: refactor this into PluginsList.
for plugin_cls in registry_plugin_list.GetKeyPlugins(registry_file_type):
if plugin_cls.NAME == 'winreg_default':
continue
command_options.append(plugin_cls.NAME)
return command_options
# Completer commands need to be top level methods or directly callable
# and cannot be part of a class that needs to be initialized.
def CommandCompleterVerbose(unused_console, core_completer):
"""Command completer function for verbose output.
Args:
core_completer: IPython completer object (instance of completer.Bunch).
Returns:
A list of command options.
"""
if '-v' in core_completer.line:
return []
return ['-v']
def Main():
"""Run the tool."""
tool = preg_tool.PregTool()
if not tool.ParseArguments():
return False
if tool.run_mode == tool.RUN_MODE_LIST_PLUGINS:
tool.ListPluginInformation()
elif tool.run_mode == tool.RUN_MODE_REG_KEY:
tool.RunModeRegistryKey()
elif tool.run_mode == tool.RUN_MODE_REG_PLUGIN:
tool.RunModeRegistryPlugin()
elif tool.run_mode == tool.RUN_MODE_REG_FILE:
tool.RunModeRegistryFile()
elif tool.run_mode == tool.RUN_MODE_CONSOLE:
preg_console = PregConsole(tool)
preg_console.Run()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
|
StarcoderdataPython
|
14866
|
from assets.lambdas.transform_findings.index import TransformFindings
import boto3
from moto import mock_s3
def __make_bucket(bucket_name: str):
bucket = boto3.resource('s3').Bucket(bucket_name)
bucket.create()
return bucket
@mock_s3
def test_fix_dictionary():
bucket = __make_bucket('tester')
transform_findings = TransformFindings(bucket.name)
finding = {
'first/level/test': 'test',
'ProductArn': 'arn:aws:securityhub:us-east-1::product/aws/securityhub',
'Types': ['Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark'],
'Description': 'Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to port 22.',
'SchemaVersion': '2018-10-08',
'Compliance': {'Status': 'PASSED'},
'GeneratorId': 'arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0/rule/4.1',
'FirstObservedAt': '2021-01-31T04:52:30.123Z',
'CreatedAt': '2021-01-31T04:52:30.123Z',
'RecordState': 'ACTIVE',
'Title': '4.1 Ensure no security groups allow ingress from 0.0.0.0/0 to port 22',
'Workflow': {'Status': 'RESOLVED'},
'LastObservedAt': '2021-05-07T11:05:27.353Z',
'Severity': {'Normalized': 0, 'Label': 'INFORMATIONAL', 'Product': 0, 'Original': 'INFORMATIONAL'},
'UpdatedAt': '2021-05-07T11:05:25.775Z',
'FindingProviderFields': {
'Types': [
'Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark'],
'Severity': {'Normalized': 0, 'Label': 'INFORMATIONAL', 'Product': 0, 'Original': 'INFORMATIONAL'}
},
'WorkflowState': 'NEW',
'ProductFields': {
'StandardsGuideArn': 'arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0',
'StandardsGuideSubscriptionArn': 'arn:aws:securityhub:us-east-1:0123456789:subscription/cis-aws-foundations-benchmark/v/1.2.0',
'RuleId': '4.1',
'RecommendationUrl': 'https://docs.aws.amazon.com/console/securityhub/standards-cis-4.1/remediation',
'RelatedAWSResources:0/name': 'securityhub-restricted-ssh-38a80c22',
'RelatedAWSResources:0/type': 'AWS::Config::ConfigRule',
'StandardsControlArn': 'arn:aws:securityhub:us-east-1:0123456789:control/cis-aws-foundations-benchmark/v/1.2.0/4.1',
'aws/securityhub/ProductName': 'Security Hub',
'aws/securityhub/CompanyName': 'AWS',
'aws/securityhub/FindingId': 'arn:aws:securityhub:us-east-1::product/aws/securityhub/arn:aws:securityhub:us-east-1:0123456789:subscription/cis-aws-foundations-benchmark/v/1.2.0/4.1/finding/2a55570b-74e9-4aa3-9f4e-66f515c7ff03'
},
'AwsAccountId': '0123456789',
'Id': 'arn:aws:securityhub:us-east-1:0123456789:subscription/cis-aws-foundations-benchmark/v/1.2.0/4.1/finding/2a55570b-74e9-4aa3-9f4e-66f515c7ff03',
'Remediation': {
'Recommendation': {
'Text': 'For directions on how to fix this issue, please consult the AWS Security Hub CIS documentation.',
'Url': 'https://docs.aws.amazon.com/console/securityhub/standards-cis-4.1/remediation'}
},
'Resources': [{
'Partition': 'aws',
'Type': 'AwsEc2SecurityGroup',
'Details': {
'AwsEc2SecurityGroup': {
'GroupName': 'default',
'OwnerId': '0123456789',
'VpcId': 'vpc-0123456789',
'IpPermissions': [{'IpProtocol': '-1', 'UserIdGroupPairs': [
{'UserId': '0123456789', 'GroupId': 'sg-0123456789'}]}],
'IpPermissionsEgress': [{'IpProtocol': '-1', 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}],
'GroupId': 'sg-0123456789'}
},
'Region': 'us-east-1', 'Id': 'arn:aws:ec2:us-east-1:0123456789:security-group/sg-0123456789'
}]
}
result = transform_findings.fix_dictionary(finding)
assert isinstance(result, dict)
assert 'first/level/test' not in result
assert 'first_level_test' in result
assert 'ProductFields' in result
assert 'aws/securityhub/ProductName' not in result['ProductFields']
assert 'aws_securityhub_ProductName' in result['ProductFields']
assert 'aws/securityhub/CompanyName' not in result['ProductFields']
assert 'aws_securityhub_CompanyName' in result['ProductFields']
assert 'aws/securityhub/FindingId' not in result['ProductFields']
assert 'aws_securityhub_FindingId' in result['ProductFields']
assert 'RelatedAWSResources:0/name' not in result['ProductFields']
assert 'RelatedAWSResources_0_name' in result['ProductFields']
|
StarcoderdataPython
|
1702635
|
<gh_stars>0
# python imports
import datetime
import os
# django imports
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.utils.translation import ugettext_lazy as _
# resources imports
from resources.config import CSS
from resources.config import RESOURCE_CHOICES
from resources.config import RESOURCES_DIRECTORY
class MergedResource(models.Model):
"""A merged resource out of one or more registered Resources.
**Attibute:**
last_updated
The date the merged resources has been updated.
type
The type of resources which are merged.
group
The group resources which are merged.
filename
The current filename of the merged resource.
position
The position of the merged resource.
"""
last_updated = models.DateTimeField(_("Last update"), blank=True, null=True)
type = models.PositiveSmallIntegerField(_("Type"), choices=RESOURCE_CHOICES)
group = models.CharField(_("Group"), blank=True, max_length=255)
filename = models.CharField(_("Filename"), blank=True, max_length=40)
position = models.PositiveIntegerField(default=10)
def get_path(self):
"""Returns the full path to the resource.
"""
return os.path.join(RESOURCES_DIRECTORY, self.filename)
def update(self, text, position):
"""Creates a new resources file and sets text and timestamp.
"""
import resources.utils
resources.utils.create_directory()
filename = self._create_filename()
new = open(os.path.join(settings.MEDIA_ROOT, RESOURCES_DIRECTORY, filename), "w")
new.write(text)
new.close()
try:
old_filename = os.path.join(settings.MEDIA_ROOT, self.filename)
os.unlink(old_filename)
except OSError:
pass
self.last_updated = datetime.datetime.now()
self.filename = filename
self.position = position
self.save()
cache.set("resourcesmanager-%s" % self.type, self)
def _create_filename(self):
suffix = "css" if self.type == CSS else "js"
if self.group:
return "%s_%s.%s" % (self.group, datetime.datetime.now().strftime("%Y%m%d%H%M%S"), suffix)
else:
return "%s.%s" % (datetime.datetime.now().strftime("%Y%m%d%H%M%S"), suffix)
class Resource(models.Model):
"""A registered resource like CSS or Javascript.
**Attributes**:
path
The path to the resource (without MEDIA_URL).
position
The position of the resource within the merged file (important if one
resource is dependend of another).
type
The type of the resource: At the time being: Javascript or CSS.
minify
If True the resource is minified.
media
The media type of the resource. Only used for CSS resources.
"""
path = models.CharField(_(u"Script"), max_length=255)
position = models.PositiveIntegerField(default=10)
type = models.PositiveSmallIntegerField(choices=RESOURCE_CHOICES)
merge = models.PositiveSmallIntegerField(default=1)
minify = models.PositiveSmallIntegerField(default=1)
group = models.CharField(_(u"Group"), blank=True, max_length=255)
media = models.CharField(blank=True, max_length=30)
def __unicode__(self):
return self.path
class Meta:
ordering = ("position", )
def save(self, force_insert=False, force_update=False):
"""
"""
super(Resource, self).save(force_insert, force_update)
import resources.utils
resources.utils.reset()
resources.utils.create_resources()
def get_path(self):
"""Returns the full path to the resource.
"""
return os.path.join(RESOURCES_DIRECTORY, self.path)
|
StarcoderdataPython
|
38406
|
# BIP39
from bip_utils.bip.bip39_ex import Bip39InvalidFileError, Bip39ChecksumError
from bip_utils.bip.bip39 import (
Bip39WordsNum, Bip39EntropyBitLen,
Bip39EntropyGenerator, Bip39MnemonicGenerator, Bip39MnemonicValidator, Bip39SeedGenerator
)
# BIP32
from bip_utils.bip.bip32_ex import Bip32KeyError, Bip32PathError
from bip_utils.bip.bip32_utils import Bip32Utils
from bip_utils.bip.bip32_path import Bip32PathParser
from bip_utils.bip.bip32 import Bip32
# BIP44/49/84
from bip_utils.bip.bip44_base_ex import Bip44DepthError, Bip44CoinNotAllowedError
from bip_utils.bip.bip44_base import Bip44Changes, Bip44Coins, Bip44Levels
from bip_utils.bip.bip44 import Bip44
from bip_utils.bip.bip49 import Bip49
from bip_utils.bip.bip84 import Bip84
|
StarcoderdataPython
|
64205
|
import urllib.request
import json
import pytest
# import uuid
# import decimal
#from server.models.partner import Partner
base_url = "http://localhost:5000/v1/users/11<PASSWORD>/cart"
headers = {'Content-Type': 'application/json;charset=UTF-8'}
class TestCart(object):
def test_get_list_200(self):
res = urllib.request.urlopen(base_url)
data = str(res.read())
assert "200" in data
#assert "products" in data
def test_add_200(self):
data = '{ \
"product_id": "TEST_PRODUCT_A" \
}'.encode('utf8')
req = urllib.request.Request(base_url,
data=data,
headers=headers,
method='POST')
res = urllib.request.urlopen(req)
data = res.read().decode('utf8')
#partner_id = json.loads(data).get('partner').get('partner_id')
assert "200" in data
assert "products" in data
def test_update_200(self):
data = '{ \
"product_quantity": 2 \
}'.encode('utf8')
req = urllib.request.Request(base_url + '/TEST_PRODUCT_A',
data=data,
headers=headers,
method='PUT')
res = urllib.request.urlopen(req)
data = res.read().decode('utf8')
assert "200" in data
assert "products" in data
def test_delete_200(self):
req = urllib.request.Request(base_url + '/TEST_PRODUCT_A',
headers=headers,
method='DELETE')
res = urllib.request.urlopen(req)
data = res.read().decode('utf8')
assert "200" in data
|
StarcoderdataPython
|
1674424
|
<reponame>Matej-Chmel/KVContest-data-test-suite
from random import randint
from src.common import storage
from src.dataset_generator import data
class Implementation:
cyc_cmd = None
I = Implementation
def add_line() -> str:
key, val = None, None
while True:
cmd_tuple = next(I.cyc_cmd)
if cmd_tuple[0] == 'H':
h_len = next(data.cyc_H_len)
key = ' '.join([
next(data.new_key_generator())
if next(data.cyc_key_is_new)
else data.existing_key_else_new()
for i in range(h_len)
])
else:
try:
key = next(data.new_key_generator()) if cmd_tuple[1] else data.existing_key()
except KeyError:
continue
if cmd_tuple[0] == 'S':
val = data.rnd_value(next(data.cyc_val_len))
elif cmd_tuple[0] == 'A':
current_len = len(storage[key])
available_len = data.value_len.max - current_len
if available_len < data.append_len.min:
continue
val = data.rnd_value(randint(data.append_len.min, min([available_len, data.append_len.max])))
return f"{cmd_tuple[0]} {key}{' ' + val if val else ''}"
|
StarcoderdataPython
|
1653176
|
<reponame>CyberFlameGO/peeringdb
# Generated by Django 2.2.12 on 2020-05-13 10:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("peeringdb_server", "0035_traffic_levels"),
]
operations = [
migrations.AlterModelOptions(
name="deskproticket",
options={
"verbose_name": "DeskPRO Ticket",
"verbose_name_plural": "DeskPRO Tickets",
},
),
migrations.AddField(
model_name="ixlanprefix",
name="in_dfz",
field=models.BooleanField(default=False),
),
]
|
StarcoderdataPython
|
1779325
|
<reponame>paregorios/tfc-campa-epigraphy
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Parse Campā Inventory row into places
"""
from campa.geography.place import CampaPlace
from campa.geography.logger import SelfLogger
from campa.geography.norm import norm
from colorama import Fore, Style
from copy import deepcopy
import inspect
import json
from pathlib import Path
from pprint import pformat
import pycountry
import re
import sys
from wikidata_suggest import suggest
class PlaceParser(SelfLogger):
def __init__(self, districts, communes, villages, gazetteer):
self.cache = {}
self.districts_path = str(districts)
self.communes_path = str(communes)
self.villages_path = str(villages)
logger = self._get_logger()
with districts.open('r', encoding='utf-8') as f:
self.districts = json.load(f)
del f
logger.debug(
'read {} districts from {}'
''.format(len(self.districts), districts))
with communes.open('r', encoding='utf-8') as f:
self.communes = json.load(f)
del f
logger.debug(
'read {} communes from {}'
''.format(len(self.communes), communes))
with villages.open('r', encoding='utf-8') as f:
self.villages = json.load(f)
del f
logger.debug(
'read {} villages from {}'
''.format(len(self.villages), villages))
self.gazetteer = gazetteer
def parse(self, **kwargs):
logger = self._get_logger()
logger.debug('kwargs:\n%s', pformat(kwargs, indent=4))
keys = ['country', 'province', 'district', 'commune', 'village', 'position']
for k in keys:
v = kwargs[k]
try:
place = getattr(self, '_parse_{}'.format(k))(**kwargs)
except LookupError:
msg = (
'LOOKUP FAILED for "{}" as a "{}"'
''.format(v, k))
if k != 'country':
msg += ' in {}'.format(kwargs['country'])
if k != 'cnumber':
msg += ' (C{})'.format(kwargs['cnumber'])
logger.warning(msg)
place = self._make_place(name=v, ptype=k)
if place is not None:
self.gazetteer.set_place(place)
def _make_place(self, pid='slug', **kwargs):
slug = None
try:
kwargs['ptype']
except KeyError:
types = []
else:
t = kwargs['ptype']
types = [t]
if t == 'country':
types.append('ADM1')
elif t == 'province':
types.append('ADM2')
elif t == 'district':
types.append('ADM3')
elif t == 'commune':
types.append('ADM4')
elif t == 'village':
types.append('PPA')
try:
kwargs['repository']
except KeyError:
pass
else:
if kwargs['repository'] == 'wikidata':
try:
lookup = self.gazetteer.lookup(kwargs['id'])
except LookupError:
pass
else:
if kwargs['name'] not in lookup.names:
lookup.set_name(kwargs['name'])
return lookup
slug = kwargs['id']
if pid == 'slug':
if slug is None:
try:
name = kwargs['project_name']
except KeyError:
name = kwargs['name']
slug = '-'.join(
re.sub(r'[()-_]+', '', norm(name).lower()).split())
p = CampaPlace(pid=slug, types=types, gazetteer=self.gazetteer, **kwargs)
else:
p = CampaPlace(pid=pid, types=types, gazetteer=self.gazetteer, **kwargs)
return p
def _parse_cnumber(self, **kwargs):
pass
def _parse_commune(self, **kwargs):
commune_name = kwargs['commune']
logger = self._get_logger()
commune = None
if not self._present('commune', commune_name):
return
try:
commune = self.communes[commune_name]
except KeyError:
commune = self._suggest_wikidata(commune_name, 'commune')
if commune is not None:
self.communes[commune_name] = commune
self._save_communes()
else:
logger.debug('using stored wikidata commune information')
if commune is None:
commune = kwargs
else:
commune = deepcopy(commune)
for k, v in kwargs.items():
if k not in ['country', 'province', 'district']:
continue
try:
commune[k]
except KeyError:
commune[k] = v
try:
commune['name']
except KeyError:
commune['name'] = commune_name
else:
commune['project_name'] = commune_name
p = self._make_place(pid='slug', ptype='commune', **commune)
logger.debug('CampaPlace:\n%s', pformat(p.__dict__, indent=4))
return p
def _parse_country(self, **kwargs):
country_name = kwargs['country']
logger = self._get_logger()
country = None
lookup = None
if not self._present('country', country_name):
return
try:
country = self.cache[country_name]
except KeyError:
if country_name == 'Cambodge':
lookup = 'Cambodia'
else:
lookup = country_name
country = self._suggest_pycountry(lookup, 'country')
if country is None:
raise NotImplementedError(country_name)
else:
self.cache[country_name] = country
else:
logger.debug('using stored pycountry country information')
if country is None:
country = kwargs
else:
country = country.__dict__['_fields']
try:
country['name']
except KeyError:
country['name'] = country_name
else:
country['project_name'] = country_name
p = self._make_place(pid='slug', ptype='country', **country)
logger.debug('CampaPlace:\n%s', pformat(p.__dict__, indent=4))
return p
def _parse_district(self, **kwargs):
district_name = kwargs['district']
logger = self._get_logger()
if not self._present('district', district_name):
return
district = None
try:
district = self.districts[district_name]
except KeyError:
district = self._suggest_wikidata(district_name, 'district')
if district is not None:
self.districts[district_name] = district
self._save_districts()
else:
logger.debug('using stored wikidata district information')
if district is None:
district = kwargs
else:
district = deepcopy(district)
for k, v in kwargs.items():
if k not in ['country', 'province']:
continue
try:
district[k]
except KeyError:
district[k] = v
try:
district['name']
except KeyError:
district['name'] = district_name
else:
district['project_name'] = district_name
p = self._make_place(pid='slug', ptype='district', **district)
logger.debug('CampaPlace:\n%s', pformat(p.__dict__, indent=4))
return p
def _parse_position(self, **kwargs):
position_name = kwargs['position']
if not self._present('position', position_name):
return
raise NotImplementedError(inspect.currentframe().f_code.co_name)
def _parse_province(self, **kwargs):
province_name = kwargs['province']
logger = self._get_logger()
province = None
if not self._present('province', province_name):
return
try:
province = self.cache[province_name]
except KeyError:
province = self._suggest_pycountry(province_name, 'province')
if province is None:
raise NotImplementedError(province_name)
else:
self.cache[province_name] = province
else:
logger.debug('using stored pycountry province information')
if province is None:
province = kwargs
else:
province = deepcopy(province.__dict__['_fields'])
for k, v in kwargs.items():
if k not in ['country']:
continue
try:
province[k]
except KeyError:
province[k] = v
try:
province['name']
except KeyError:
province['name'] = province_name
else:
province['project_name'] = province_name
p = self._make_place(pid='slug', ptype='province', **province)
logger.debug('CampaPlace:\n%s', pformat(p.__dict__, indent=4))
return p
def _parse_village(self, **kwargs):
village_name = kwargs['village']
if not self._present('village', village_name):
return
logger = self._get_logger()
village = None
try:
village = self.villages[village_name]
except KeyError:
village = self._suggest_wikidata(village_name, 'village')
if village is not None:
self.villages[village_name] = village
self._save_villages()
else:
logger.debug('using stored wikidata village information')
if village is None:
village = deepcopy(kwargs)
del village['village']
else:
village = deepcopy(village)
for k, v in kwargs.items():
if k not in ['country', 'province', 'district', 'commune']:
continue
try:
village[k]
except KeyError:
village[k] = v
try:
village['name']
except KeyError:
village['name'] = village_name
else:
village['project_name'] = village_name
p = self._make_place(pid='slug', ptype='village', **village)
logger.debug('CampaPlace:\n%s', pformat(p.__dict__, indent=4))
return p
def _present(self, field_name, value):
if value == '':
logger = self._get_logger()
logger.debug('IGNORED: %s (%s)', field_name, 'empty string')
return False
return value
def _save_communes(self):
communes = Path(self.communes_path)
communes.rename(self.communes_path + '.bak')
communes = Path(self.communes_path)
with communes.open('w', encoding='utf-8') as fp:
json.dump(self.communes, fp, indent=4, ensure_ascii=False)
del fp
def _save_districts(self):
districts = Path(self.districts_path)
districts.rename(self.districts_path + '.bak')
districts = Path(self.districts_path)
with districts.open('w', encoding='utf-8') as fp:
json.dump(self.districts, fp, indent=4, ensure_ascii=False)
del fp
def _save_villages(self):
villages = Path(self.villages_path)
villages.rename(self.villages_path + '.bak')
villages = Path(self.villages_path)
with villages.open('w', encoding='utf-8') as fp:
json.dump(self.villages, fp, indent=4, ensure_ascii=False)
del fp
def _suggest_pycountry(self, term, ptype):
logger = self._get_logger()
if ptype == 'country':
suggestion = pycountry.countries.lookup(term)
elif ptype == 'province':
suggestion = pycountry.subdivisions.lookup(term)
else:
raise NotImplementedError(ptype, term)
msg = (
'{}:\n{}'
''.format(
ptype,
pformat(suggestion.__dict__['_fields'], indent=4)))
logger.debug(msg)
return suggestion
def _suggest_wikidata(self, term, ptype):
logger = self._get_logger()
print(
Fore.CYAN + Style.BRIGHT + 'WIKIDATA LOOKUP: "{}" ({})'
''.format(term, ptype) + Style.RESET_ALL)
suggestion = suggest(term)
if suggestion is not None:
msg = (
'{}:\n{}'
''.format(
ptype,
pformat(suggestion, indent=4)))
logger.debug(msg)
else:
logger.debug('No wikidata suggestion was accepted by the user')
return suggestion
|
StarcoderdataPython
|
3266524
|
<reponame>pashakondratyev/ParseBook<filename>parse.py<gh_stars>1-10
import sys
from threads.threads import Threads, Thread, Message
import json, html
THREAD_TAG = '<div class="thread">'
MESSAGE_TAG = '<div class="message">'
def main(messages_path, out_path):
with open(messages_path) as fp:
messages_html = fp.read()
threads = parse_html(messages_html)
with open(out_path, "w") as fp:
json.dump(threads, fp, indent=2)
def parse_html(msgs_html):
threads = Threads()
h1_open = msgs_html.find("<h1>") + len("<h1>")
h1_close = msgs_html.find("</h1>", h1_open)
threads.owner = html.unescape( msgs_html[h1_open:h1_close].strip() )
next_thread = msgs_html.find(THREAD_TAG, h1_close) + len(THREAD_TAG)
while (next_thread < len(msgs_html) ):
thread_index = next_thread
next_thread = msgs_html.find(THREAD_TAG, thread_index)
next_thread = next_thread + len(THREAD_TAG) if next_thread != -1 else len(msgs_html)
thread = get_thread_for_people(msgs_html, thread_index, threads)
next_msg = msgs_html.find(MESSAGE_TAG, thread_index) + len(MESSAGE_TAG)
while (next_msg < next_thread):
msg_index = next_msg
next_msg = get_message(msgs_html, msg_index, thread)
next_msg = next_msg + len(MESSAGE_TAG) if next_msg != -1 else next_thread
# --- end of thread ---
messages_to_list(thread)
# --- end of all threads ---
for people in threads.threads:
thread = threads[people]
threads[people] = {
'people': thread.people,
'messages': thread.message_list,
'size': len(thread.message_list)
}
return threads.__dict__
def messages_to_list(thread):
message = thread.messages
start = len(thread.message_list)
thread.message_list.extend( [None] * thread.size )
for i in range(start, start + thread.size):
thread.message_list[i] = {
'sender': message.sender,
'created_at': message.created_at,
'content': message.content
}
message = message.next_msg
thread.size = 0
thread.messages = Message()
def get_thread_for_people(msgs_html, start, threads):
end = msgs_html.find(MESSAGE_TAG, start)
people = html.unescape( msgs_html[start:end].strip() ).split(', ')
people = [p.split('@')[0] for p in people]
people.sort()
people = " ".join(people)
if not threads[people]: threads[people] = Thread(people)
return threads[people]
def get_message(msgs_html, start, thread):
next_msg = msgs_html.find(MESSAGE_TAG, start)
msg_html = msgs_html[start:next_msg].strip().strip("\n")
sender = get_tag(msg_html, '<span class="user">', '</span>')
created_at = get_tag(msg_html, '<span class="meta">', '</span>')
content = get_tag(msg_html, '<p>', '</p>')
thread.prepend_message( Message(sender, created_at, content) )
thread.size += 1
return next_msg
def get_tag(string, tag_open, tag_close):
start = string.find(tag_open) + len(tag_open)
close = string.find(tag_close, start)
return html.unescape( string[start:close].strip() )
if not len(sys.argv) == 3:
print("ERROR need in and out path")
sys.exit()
main(sys.argv[1], sys.argv[2])
|
StarcoderdataPython
|
1765867
|
from __future__ import (absolute_import, division, print_function)
import cProfile
import os
import time
from existing_code import myinterface
from mycode import validator
from mycode.cpp_equal_share import *
import traceback
from mycode.utilf import println
import util
def test_ga(cn):
ces = CppEqualShare(cn)
ces.run()
best_individual = ces._ga_runner.get_best_individual()
best_individual.revaluate()
#println(ces.cn.to_string(verbose=True))
result_str = "dt: {}, fit: {}, ind: {}, info: {}".format(ces._ga_runner.elapsed_time, best_individual.fitness, best_individual,
ces.cn.to_string(verbose=False).replace("\n", "|"))
println(result_str)
#validator.validate_cpp_network(ces.cn, check_redundancy=False)
def test_greedy(cn, mesh_filename,ldf=False):
start_of_computation = time.clock()
cpg = myinterface.run_greedy(filename=mesh_filename,use_least_demanding=ldf)
elapsed_time = time.clock() - start_of_computation
# println(util.print_cpg(cpg))
cn.copy_fcpg_solution(cpg.cn)
# println(cn.to_string(verbose=True))
T.assertEqual(cn.V, set(cpg.cn.V))
T.assertIsNotNone(cn.G.graph["l_CLC"])
ind = Individual2(cn)
ind.set_fitness_values(cn.compute_fitness_values())
result_str = "dt: {}, fit: {}, ind: {}, info: {}".format(elapsed_time, ind.fitness, ind,
cn.to_string(verbose=False).replace("\n", "|"))
println(result_str)
validator.validate_cpp_network(cn, check_redundancy=False)
def f():
#input = "representation=greedy,flowOrder=random,numFlows=100,mu=5,fitness4th=min,vcFactor=1.2"
#sys.argv = ["./run", "8", input]
assert len(sys.argv) == 3
#print(sys.argv)
#exit(1)
config_dic = util.set_settings()
cn, mesh_filename = util.create_cn(config_dic)
if config.REPRESENTATION == "greedy":
test_greedy(cn, mesh_filename,ldf=True)
else:
test_ga(cn)
if __name__ == "__main__":
f()
|
StarcoderdataPython
|
1662156
|
<filename>dokx-search/dokx-build-search-index.py
"""
Create and populate a minimal PostgreSQL schema for full text search
"""
import sqlite3
import glob
import os
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output", type=str, help="Path to write SQLite3 search index")
parser.add_argument("--debug", type=bool, help="Debug mode")
parser.add_argument('input', type=str, help="Path to input directory of Markdown files")
args = parser.parse_args()
DB = sqlite3.connect(database=args.output)
DB.text_factory = str
path = args.input
def debug(msg):
if args.debug:
print(msg)
srcLinkPattern = re.compile('<a class="entityLink".*</a>')
def makeSearchText(section):
return buffer(re.sub(srcLinkPattern, "", section))
def sections(path):
pattern = re.compile('<a (name|id)="(.*)"></a>')
for packageName in os.listdir(path):
for filePath in glob.glob(os.path.join(path, packageName, "*.md")):
debug("Indexing " + filePath)
with open(filePath, 'r') as f:
section = ""
tag = packageName + "." + os.path.basename(filePath) + ".dok"
for line in f.readlines():
result = pattern.match(line)
if result:
section = makeSearchText(section)
yield packageName, tag, section
tag = result.group(2)
section = ""
else:
section += line
section = makeSearchText(section)
yield packageName, tag, section
def load_db():
"""Add sample data to the database"""
ins = """INSERT INTO fulltext_search(package, tag, doc) VALUES(?, ?, ?);"""
for (packageName, tag, section) in sections(path):
DB.execute(ins, (packageName, tag, section))
DB.commit()
def init_db():
"""Initialize our database"""
DB.execute("DROP TABLE IF EXISTS fulltext_search")
DB.execute("""CREATE VIRTUAL TABLE fulltext_search USING fts4(
id SERIAL,
package TEXT,
tag TEXT,
doc TEXT,
tokenize=porter
);""")
if __name__ == "__main__":
init_db()
load_db()
DB.close()
|
StarcoderdataPython
|
3377243
|
<reponame>fsmosca/python-ataxx
import ataxx
import ataxx.players
import ataxx.pgn
import random
import string
import copy
import unittest
class TestMethods(unittest.TestCase):
def test_fen(self):
fens = [
"x5o/7/7/7/7/7/o5x x 0 1",
"x5o/7/2-1-2/7/2-1-2/7/o5x o 0 1",
"x5o/7/2-1-2/3-3/2-1-2/7/o5x x 0 1",
"x5o/7/3-3/2-1-2/3-3/7/o5x o 0 1"
]
for fen in fens:
board = ataxx.Board(fen)
self.assertTrue(board.get_fen() == fen)
fens = [
"",
"a x 0 1",
"x5o/7/7/7/7/7/o5x a 0 1",
"x5o/7/7/7/7/7/o5x x a 1",
"x5o/7/7/7/7/7/o5x x 0 a",
"x5o/7/7/7/7/7/o5x x 0 1 a",
"x5o/7/7/7/7/7/o5x x -5 1",
"x5o/7/7/7/7/7/o5x x 0 -5"
]
for fen in fens:
board = ataxx.Board()
self.assertTrue(board.set_fen(fen) != True)
def test_perft(self):
positions = [
{"fen": "7/7/7/7/7/7/7 x", "nodes": [1, 0, 0, 0, 0]},
{"fen": "x5o/7/7/7/7/7/o5x x", "nodes": [1, 16, 256, 6460, 155888]},
{"fen": "x5o/7/2-1-2/7/2-1-2/7/o5x o", "nodes": [1, 14, 196, 4184, 86528]},
{"fen": "x5o/7/2-1-2/3-3/2-1-2/7/o5x x", "nodes": [1, 14, 196, 4100, 83104]},
{"fen": "x5o/7/3-3/2-1-2/3-3/7/o5x o", "nodes": [1, 16, 256, 5948, 133264]},
{"fen": "7/7/7/7/2-----/2-----/2--x1o x", "nodes": [1, 1, 0, 0, 0]},
{"fen": "7/7/7/7/2-----/2-----/2--x1o o", "nodes": [1, 1, 0, 0, 0]},
]
depth = 4
for position in positions:
fen = position["fen"]
board = ataxx.Board(fen)
for idx, nodes in enumerate(position["nodes"]):
if idx > depth:
break
self.assertTrue(board.perft(idx) == nodes)
def test_single_double(self):
fens = [
"x5o/7/7/7/7/7/o5x x",
"x5o/7/2-1-2/7/2-1-2/7/o5x o",
"x5o/7/2-1-2/3-3/2-1-2/7/o5x x",
"x5o/7/3-3/2-1-2/3-3/7/o5x o"
]
for fen in fens:
board = ataxx.Board(fen)
for move in board.legal_moves():
self.assertTrue(move.is_single() != move.is_double())
def test_from_san(self):
fens = [
"x5o/7/7/7/7/7/o5x x",
"x5o/7/2-1-2/7/2-1-2/7/o5x o",
"x5o/7/2-1-2/3-3/2-1-2/7/o5x x",
"x5o/7/3-3/2-1-2/3-3/7/o5x o"
]
for fen in fens:
board = ataxx.Board(fen)
for move in board.legal_moves():
self.assertTrue(ataxx.Move.from_san(str(move)) == move)
def test_null_move(self):
nullmove = ataxx.Move.null()
self.assertTrue(nullmove == ataxx.Move(-1, -1, -1, -1))
self.assertTrue(nullmove == ataxx.Move.null())
self.assertTrue(nullmove != ataxx.Move(0, 0, 0, 0))
self.assertTrue(str(nullmove) == "0000")
board1 = ataxx.Board()
board2 = ataxx.Board()
# Make the null move
board2.makemove(nullmove)
pieces1, turn1, halfmoves1, _ = board1.get_fen().split(" ")
pieces2, turn2, halfmoves2, _ = board2.get_fen().split(" ")
# Check changes made
self.assertTrue(pieces1 == pieces2)
self.assertTrue(turn1 != turn2)
self.assertTrue(int(halfmoves1)+1 == int(halfmoves2))
def test_single_equality(self):
nums = [0,1,2,3,4,5,6]
squares = [[f,r] for f in nums for r in nums]
for sq_to in squares:
a, b, c, d = sq_to + sq_to
move1 = ataxx.Move(a, b, c, d)
for sq_from in squares:
a, b, c, d = sq_from + sq_to
move2 = ataxx.Move(a, b, c, d)
if move2.is_single():
self.assertTrue(move1 == move2)
elif move2.is_double():
self.assertTrue(move1 != move2)
def test_set_get(self):
nums = [0,1,2,3,4,5,6]
squares = [[f,r] for f in nums for r in nums]
board = ataxx.Board("empty")
for x, y in squares:
for piece in [ataxx.BLACK, ataxx.WHITE, ataxx.GAP, ataxx.EMPTY]:
board.set(x, y, piece)
self.assertTrue(piece == board.get(x, y))
def test_main_line(self):
for _ in range(10):
history = []
# Play random moves on the board
board1 = ataxx.Board("startpos")
while not board1.gameover() and len(history) < 50:
moves = board1.legal_moves()
move = random.choice(moves)
board1.makemove(move)
history.append(move)
# Replay the moves on a new board
board2 = ataxx.Board("startpos")
for move in board1.main_line():
board2.makemove(move)
self.assertTrue(board1.main_line() == history)
self.assertTrue(board1.get_fen() == board2.get_fen())
def test_players(self):
positions = [
{"fen": "x5o/7/7/7/7/7/o5x x", "moves": ["f1", "f2", "g2", "a6", "b6", "b7"]},
{"fen": "x5o/7/2-1-2/7/2-1-2/7/o5x o", "moves": ["a2", "b1", "b2", "g6", "f6", "f7"]},
{"fen": "x5o/7/2-1-2/3-3/2-1-2/7/o5x x", "moves": ["f1", "f2", "g2", "a6", "b6", "b7"]},
{"fen": "x5o/7/3-3/2-1-2/3-3/7/o5x o", "moves": ["a2", "b1", "b2", "g6", "f6", "f7"]},
{"fen": "7/3o3/7/3x3/7/7/7 x", "moves": ["c5", "d5", "e5"]},
{"fen": "3o3/7/7/3x3/7/7/7 x", "moves": ["d4c6", "d4d6", "d4e6"]},
{"fen": "3o3/7/3x3/3x3/7/7/7 x", "moves": ["c6", "d6", "e6"]},
{"fen": "o4oo/7/x5x/7/7/7/7 x", "moves": ["f6", "g6"]},
{"fen": "7/3o3/7/3x3/7/7/3oo2 x", "moves": ["d4d2", "d4e2"]},
{"fen": "7/7/7/7/7/7/7 x", "moves": ["0000"]}
]
for position in positions:
fen = position["fen"]
moves = position["moves"]
# Greedy player
board = ataxx.Board(fen)
for _ in range(100):
move = ataxx.players.greedy(board)
self.assertTrue(str(move) in moves)
def test_make_undo(self):
fens = [
"x5o/7/7/7/7/7/o5x x 0 1",
"x5o/7/2-1-2/7/2-1-2/7/o5x o 0 1",
"x5o/7/2-1-2/3-3/2-1-2/7/o5x x 0 1",
"x5o/7/3-3/2-1-2/3-3/7/o5x o 0 1",
"7/3o3/7/3x3/7/7/3oo2 x 0 1"
]
for fen in fens:
board = ataxx.Board(fen)
while not board.gameover() and board.halfmove_clock < 500:
current_fen = board.get_fen()
# Test all legal moves
for move in board.legal_moves():
board.makemove(move)
board.undo()
self.assertTrue(board.get_fen() == current_fen)
# Test null move
board.makemove(ataxx.Move.null())
board.undo()
self.assertTrue(board.get_fen() == current_fen)
# Pick a random move and keep going
move = random.choice(board.legal_moves())
board.makemove(move)
# Undo every move in the game
while board.main_line():
board.undo()
# Make sure we're back where we started
self.assertTrue(board.get_fen() == fen)
def test_pgn(self):
def random_phrase(n):
return ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.punctuation + string.digits + " ", k=n))
pgns = [
"[Event \"Example 1\"]\n[Black \"Player 1\"]\n[White \"Player 2\"]\n[UTCDate \"1970.01.01\"]\n[UTCTime \"00:00:00\"]\n[FEN \"x5o/7/7/7/7/7/o5x x\"]\n[Result \"*\"]\n\n1. a7c5 a2 2. g2 *",
"[Event \"Example 2\"]\n[Black \"Player 1\"]\n[White \"Player 2\"]\n[UTCDate \"1970.01.01\"]\n[UTCTime \"00:00:00\"]\n[FEN \"x5o/7/7/7/7/7/o5x x\"]\n[Result \"*\"]\n\n1. a7c5 { Test 123 } 1... a2 { Test } 2. g2 *",
"[Event \"Example 3\"]\n[Black \"Player 1\"]\n[White \"Player 2\"]\n[UTCDate \"1970.01.01\"]\n[UTCTime \"00:00:00\"]\n[FEN \"x5o/7/7/7/7/7/o5x x\"]\n[Result \"*\"]\n\n1. a7c7 (1. a7c5 { Test }) 1... g7f5 (1... a2 { Test } 2. g2 (2. f2 { Test })) 2. g1f3 a1b3 { Test 123 } *",
"[Event \"Example 4\"]\n[Black \"Player 1\"]\n[White \"Player 2\"]\n[UTCDate \"1970.01.01\"]\n[UTCTime \"00:00:00\"]\n[FEN \"x5o/7/7/7/7/7/o5x x\"]\n[Result \"*\"]\n\n1. a7c7 { Test } (1. a7c5 { Test }) 1... g7f5 (1... a2 { Test } 2. g2 (2. f2 { Test } 2... a1c2)) 2. g1f3 a1b3 { Test 123 } *"
]
# Test some known pgn strings
for pgn in pgns:
self.assertTrue(str(ataxx.pgn.parse(pgn)) == pgn)
# Try parse some random games
# These won't have variations or comments in them
for _ in range(10):
board = ataxx.Board()
while not board.gameover() and board.halfmove_clock < 500:
move = ataxx.players.random_move(board)
board.makemove(move)
pgn = ataxx.pgn.Game()
pgn.headers["Event"] = random_phrase(12)
pgn.headers["Black"] = random_phrase(12)
pgn.headers["White"] = random_phrase(12)
pgn.headers["FEN"] = ataxx.FEN_STARTPOS
pgn.headers["Result"] = board.result()
pgn.from_board(board)
# Human readable pgn string
pgn_string = str(pgn)
# Test: pgn string ---> pgn ---> pgn string
self.assertTrue(str(ataxx.pgn.parse(pgn_string)) == pgn_string)
# Check the pgn main line matches the board
moves = [n.move for n in pgn.main_line()]
self.assertTrue(moves == board.main_line())
# Create a pgn ourselves
game = ataxx.pgn.Game()
game.headers["FEN"] = ataxx.FEN_STARTPOS
game.headers["Result"] = "*"
node = game.add_variation(ataxx.Move.from_san("g2"), comment="First move")
node = node.add_variation(ataxx.Move.from_san("a1a3"), comment="Second move")
self.assertTrue(str(game) == "[Event \"Example\"]\n[FEN \"x5o/7/7/7/7/7/o5x x 0 1\"]\n[Result \"*\"]\n\n1. g2 { First move } a1a3 { Second move } *")
def test_result(self):
positions = [
{"fen": "x5o/7/7/7/7/7/o5x x", "result": "*"},
{"fen": "x5o/7/7/7/7/7/o5x o", "result": "*"},
{"fen": "x5o/7/2-1-2/7/2-1-2/7/o5x x", "result": "*"},
{"fen": "x5o/7/2-1-2/7/2-1-2/7/o5x o", "result": "*"},
{"fen": "x6/7/7/7/7/7/7 x", "result": "1-0"},
{"fen": "x6/7/7/7/7/7/7 o", "result": "1-0"},
{"fen": "o6/7/7/7/7/7/7 x", "result": "0-1"},
{"fen": "o6/7/7/7/7/7/7 o", "result": "0-1"},
{"fen": "1xxxxxx/xxxxxxx/xxxxxxx/xxxxooo/ooooooo/ooooooo/ooooooo x", "result": "*"},
{"fen": "1xxxxxx/xxxxxxx/xxxxxxx/xxxxooo/ooooooo/ooooooo/ooooooo o", "result": "*"},
{"fen": "1oooooo/ooooooo/ooooooo/ooooxxx/xxxxxxx/xxxxxxx/xxxxxxx x", "result": "*"},
{"fen": "1oooooo/ooooooo/ooooooo/ooooxxx/xxxxxxx/xxxxxxx/xxxxxxx o", "result": "*"},
{"fen": "xxxxxxx/xxxxxxx/xxxxxxx/xxxxooo/ooooooo/ooooooo/ooooooo x", "result": "1-0"},
{"fen": "xxxxxxx/xxxxxxx/xxxxxxx/xxxxooo/ooooooo/ooooooo/ooooooo o", "result": "1-0"},
{"fen": "ooooooo/ooooooo/ooooooo/ooooxxx/xxxxxxx/xxxxxxx/xxxxxxx x", "result": "0-1"},
{"fen": "ooooooo/ooooooo/ooooooo/ooooxxx/xxxxxxx/xxxxxxx/xxxxxxx o", "result": "0-1"},
{"fen": "7/7/7/7/7/7/7 o", "result": "1/2-1/2"},
{"fen": "x5o/7/7/7/7/7/o5x x 99 0", "result": "*"},
{"fen": "x5o/7/7/7/7/7/o5x x 100 0", "result": "1/2-1/2"},
{"fen": "x5o/7/7/7/7/7/o5x x 0 400", "result": "*"},
{"fen": "x5o/7/7/7/7/7/o5x x 0 401", "result": "1/2-1/2"},
]
for position in positions:
fen = position["fen"]
result = position["result"]
board = ataxx.Board(fen)
# Check the result is right
self.assertTrue(board.result() == result)
# Check that if we double pass (null move) we get a decisive result
if result == "*":
board.makemove(ataxx.Move.null())
board.makemove(ataxx.Move.null())
self.assertTrue(board.result() != "*")
def test_counters(self):
positions = [
{"move": "g1f3", "fen": "x5o/7/7/7/5x1/7/o6 o 1 1"},
{"move": "a1c1", "fen": "x5o/7/7/7/5x1/7/2o4 x 2 2"},
{"move": "b6", "fen": "x5o/1x5/7/7/5x1/7/2o4 o 0 2"},
{"move": "c1e3", "fen": "x5o/1x5/7/7/4oo1/7/7 x 0 3"},
{"move": "0000", "fen": "x5o/1x5/7/7/4oo1/7/7 o 1 3"},
]
board = ataxx.Board();
for position in positions:
move = position["move"]
fen = position["fen"]
board.makemove(ataxx.Move.from_san(move))
self.assertTrue(board.get_fen() == fen)
def test_draws(self):
# Check nullmove draw conditions
board = ataxx.Board()
board.makemove(ataxx.Move.null())
board.makemove(ataxx.Move.null())
self.assertTrue(board.gameover())
self.assertFalse(board.fifty_move_draw())
self.assertFalse(board.max_length_draw())
# Check double move draw conditions
board = ataxx.Board()
for i in range(500):
if i < 50:
self.assertFalse(board.gameover())
self.assertFalse(board.fifty_move_draw())
self.assertFalse(board.max_length_draw())
elif i < 400:
self.assertTrue(board.gameover())
self.assertTrue(board.fifty_move_draw())
self.assertFalse(board.max_length_draw())
else:
self.assertTrue(board.gameover())
self.assertTrue(board.fifty_move_draw())
self.assertTrue(board.max_length_draw())
if i % 2 == 0:
board.makemove(ataxx.Move.from_san("g1g3"))
board.makemove(ataxx.Move.from_san("a1a3"))
else:
board.makemove(ataxx.Move.from_san("g3g1"))
board.makemove(ataxx.Move.from_san("a3a1"))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3292533
|
import argparse
import os
import sys
import pandas as pd
from keras import backend as K
from keras_radam import RAdam
from augmentations import *
from losses import *
from model import *
from siim_data_loader import *
from utils import *
from segmentation_models import Unet
parser = argparse.ArgumentParser()
parser.add_argument("--network", default="UEfficientNetV2")
parser.add_argument("--fine_size", default=512, type=int, help="Resized image size")
parser.add_argument("--batch_size", default=5, type=int, help="Batch size for training")
parser.add_argument("--train_path", default="pneumotorax512/train/", help="train path")
parser.add_argument("--masks_path", default="pneumotorax512/masks/", help="mask path")
parser.add_argument("--test_path", default="pneumotorax512/test/", help="test path")
parser.add_argument("--pretrain_weights", help="pretrain weights")
parser.add_argument("--epoch", default=30, type=int, help="Number of training epochs")
parser.add_argument("--swa_epoch", default=15, type=int, help="Number of swa epochs")
parser.add_argument("--debug", default=False, type=bool, help="Debug")
args = parser.parse_args()
def train(
list_train,
list_valid,
train_path,
masks_path,
model,
epoch,
batch_size,
fold,
imh_size,
swa_epoch,
):
swa = SWA(f"models/keras_swa_{fold}.model", swa_epoch)
snapshot = SnapshotCallbackBuilder(swa, epoch, 1, fold)
training_generator = DataGenerator(
list_train, train_path, masks_path, AUGMENTATIONS_TRAIN, batch_size, imh_size
)
validation_generator = DataGenerator(
list_valid, train_path, masks_path, AUGMENTATIONS_TEST, batch_size, imh_size
)
history = model.fit_generator(
generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=epoch,
verbose=2,
callbacks=snapshot.get_callbacks(),
)
if __name__ == "__main__":
debug = args.debug
df = pd.read_csv("train_proc_v2_gr.csv")
test_files = [
os.path.splitext(filename)[0]
for filename in os.listdir(os.path.join(os.getcwd(), args.test_path))
]
df_test = pd.DataFrame(test_files, columns=["id"])
epoch = args.epoch
list_thresh = []
for fold in [0]:
print("-----fold-----")
df_train = df[df.fold != fold].copy().reset_index(drop=True)
df_valid = df[df.fold == fold].copy().reset_index(drop=True)
if debug:
df_train = df[df.fold != 0].copy().reset_index(drop=True)
df_valid = df[df.fold == 0].copy().reset_index(drop=True)
df_train = df_train.iloc[:60]
df_valid = df_train.iloc[:60]
df_test = df_test.iloc[:60]
epoch = 3
K.clear_session()
model = get_network(
args.network, input_shape=(args.fine_size, args.fine_size, 3), drop_out=0.5
)
model.compile(loss=bce_dice_loss, optimizer="adam", metrics=[my_iou_metric])
train(
df_train["id"].values,
df_valid["id"].values,
args.train_path,
args.masks_path,
model,
epoch,
args.batch_size,
fold,
args.fine_size,
args.swa_epoch,
)
try:
print("using swa weight model")
model.load_weights(f"models/keras_swa_{fold}.model")
except Exception as e:
print(e)
model.load_weights(f"models/keras_{fold}.model")
val_predict = predict_validation_result(
model,
args.train_path,
args.masks_path,
df_valid["id"].values,
args.batch_size,
args.fine_size,
)
best_threshhold = prderict_best_threshhold(
df_valid["id"].values, args.masks_path, val_predict, args.fine_size
)
list_thresh.append(best_threshhold)
predict = predict_result(
model,
df_test["id"].values,
args.test_path,
args.fine_size,
best_threshhold,
args.batch_size,
fold,
)
if fold == 0:
preds_test = predict
else:
preds_test += predict
submit(preds_test, df_test["id"].values, args.network, max(list_thresh))
|
StarcoderdataPython
|
80066
|
import coreapi
import json
import requests
from rest_framework import status, renderers
from rest_framework.response import Response
from rest_framework.views import APIView
from config.settings.base import FAIRSHAKE_TOKEN
from presqt.api_v1.utilities import (
fairshake_request_validator, fairshake_assessment_validator, fairshare_results)
from presqt.utilities import PresQTValidationError, read_file
class FairshakeAssessment(APIView):
"""
"""
renderer_classes = [renderers.JSONRenderer]
def get(self, request, rubric_id):
"""
Get details of the provided rubric.
This includes the metrics expected to be answered by the end user.
Returns
-------
200: OK
{
"metrics": {
"30": "The structure of the repository permits efficient discovery of data and metadata by end users.",
"31": "The repository uses a standardized protocol to permit access by users.",
"32": "The repository provides contact information for staff to enable users with questions or suggestions to interact with repository experts.",
"33": "Tools that can be used to analyze each dataset are listed on the corresponding dataset pages.",
"34": "The repository maintains licenses to manage data access and use.",
"35": "The repository hosts data and metadata according to a set of defined criteria to ensure that the resources provided are consistent with the intent of the repository.",
"36": "The repository provides documentation for each resource to permit its complete and accurate citation.",
"37": "A description of the methods used to acquire the data is provided.",
"38": "Version information is provided for each resource, where available."
},
"answer_options": {
"0.0": "no",
"0.25": "nobut",
"0.5": "maybe",
"0.75": "yesbut",
"1.0": "yes"
}
}
400: Bad Request
{
"error": "PresQT Error: 'egg' is not a valid rubric id. Choices are: ['93', '94', '95', '96']"
}
"""
rubrics = ['93', '94', '95', '96']
if rubric_id not in rubrics:
return Response(data={
'error': f"PresQT Error: '{rubric_id}' is not a valid rubric id. Choices are: {rubrics}"},
status=status.HTTP_400_BAD_REQUEST)
metrics = read_file(
'presqt/specs/services/fairshake/fairshake_test_fetch.json', True)[rubric_id]
answer_options = read_file(
'presqt/specs/services/fairshake/fairshake_score_translator.json', True)
# Make these dicts for FE ease
new_metrics = []
for key, value in metrics.items():
new_metrics.append({
'id': key,
'metric_value': value
})
new_answer_options = []
for key, value in answer_options.items():
new_answer_options.append({
'value': key,
'value_text': value
})
payload = {
"metrics": new_metrics,
"answer_options": new_answer_options
}
return Response(data=payload, status=status.HTTP_200_OK)
def post(self, request, rubric_id):
"""
Returns assessment results to the user.
Returns
-------
200: OK
{
"digital_object_id": 166055,
"rubric_responses": [
{
"metric": "The structure of the repository permits efficient discovery of data and metadata by end users.",
"score": "0.0",
"score_explanation": "no"
},
...
]
}
400: Bad Request
{
"error": "PresQT Error: 'eggs' is not a valid rubric id. Options are: ['93', '94', '95', '96']"
}
or
{
"error": "PresQT Error: 'project_url' missing in POST body."
}
or
{
"error": "PresQT Error: 'project_title' missing in POST body."
}
or
{
"error": "PresQT Error: 'rubric_answers' missing in POST body."
}
or
{
"error": "PresQT Error: 'rubric_answers' must be an object with the metric id's as the keys and answer values as the values."
}
or
{
"error": "Missing response for metric '30'. Required metrics are: ['30', '31', '32']"
}
or
{
"error": "'egg' is not a valid answer. Options are: ['0.0', '0.25', '0.5', '0.75', '1.0']"
}
or
{
"error": "'egg' is not a valid metric. Required metrics are: ['30', '31', '32']"
}
"""
try:
rubric_id, digital_object_type, project_url, project_title = fairshake_request_validator(
request, rubric_id)
rubric_answers = fairshake_assessment_validator(request, rubric_id)
except PresQTValidationError as e:
return Response(data={'error': e.data}, status=e.status_code)
# First we need to register our new `digital_object` using this information
# The PresQT project id on FAIRshake is 116
project_id = 116
try:
client = coreapi.Client(auth=coreapi.auth.TokenAuthentication(
token=FAIRSHAKE_TOKEN, scheme='token'))
schema = client.get('https://fairshake.cloud/coreapi/')
except coreapi.exceptions.ErrorMessage:
return Response(data={'error': "FAIRshake Error: Invalid token provided in code."},
status=status.HTTP_400_BAD_REQUEST)
try:
digital_object = client.action(schema, ['digital_object', 'create'], params=dict(
url=project_url,
title=project_title,
projects=[project_id],
type=digital_object_type,
rubrics=[int(rubric_id)]
))
digital_object_id = digital_object['id']
except coreapi.exceptions.ErrorMessage:
return Response(data={'error': "FAIRshake Error: Returned an error trying to register digital object."},
status=status.HTTP_400_BAD_REQUEST)
if rubric_id != '96':
# Do the manual assessment here
assessment_answers = []
# Need to translate the JSON strings to ints and floats
for key, value in rubric_answers.items():
assessment_answers.append({
'metric': int(key),
'answer': float(value)
})
else:
# We gonna try and do automatic stuff
data = {
'resource': project_url,
'executor': 'PresQT',
'title': f"{project_title} FAIR Evaluation"
}
# Send the data to FAIRshare to run automatic tests
response = requests.post(
'https://w3id.org/FAIR_Evaluator/collections/16/evaluate',
headers={"Content-Type": "application/json", "Accept": "application/json"},
data=json.dumps(data))
if response.status_code != 200:
return Response(data={'error': "FAIRshare returned a {} error trying to process the request".format(response.status_code)},
status=status.HTTP_503_SERVICE_UNAVAILABLE)
response_json = response.json()
results = fairshare_results(response_json, [1, 5, 8, 10, 17, 22])
rubric_translator = read_file(
'presqt/specs/services/fairshake/fairshare_fairshake_helper.json', True)
assessment_answers = []
for result in results:
# We need to build a dict for the assessment.
# Translate test to rubric number
metric = rubric_translator[result['metric_link']]
# Translate successes and failures to yes's and no's
if result['successes']:
answer = 1.0
elif result['failures']:
answer = 0.0
else:
answer = 0.5
assessment_answers.append({
'metric': metric,
'answer': answer
})
try:
# Do the assessment
assessment = client.action(schema, ['assessment', 'create'], params=dict(
project=project_id,
target=digital_object_id,
rubric=int(rubric_id),
methodology="self",
answers=assessment_answers,
published=True))
except coreapi.exceptions.ErrorMessage:
return Response(data={'error': "FAIRshake Error: Returned an error trying to post manual assessment."},
status=status.HTTP_400_BAD_REQUEST)
# Bring in our translation files...
test_translator = read_file(
'presqt/specs/services/fairshake/fairshake_test_fetch.json', True)[rubric_id]
score_translator = read_file(
'presqt/specs/services/fairshake/fairshake_score_translator.json', True)
results = []
for score in assessment['answers']:
metric = test_translator[str(score['metric'])]
score_number = str(score['answer'])
score_words = 'not applicable'
score_words = score_translator[score_number]
results.append({
"metric": metric,
"score": score_number,
"score_explanation": score_words
})
payload = {
"digital_object_id": digital_object_id,
"rubric_responses": results
}
return Response(status=status.HTTP_200_OK,
data=payload)
|
StarcoderdataPython
|
71084
|
<reponame>jason-zl190/sisr
import tensorflow as tf
import subprocess
import atexit
class StartTensorBoard(tf.keras.callbacks.Callback):
def __init__(self, log_dir):
super()
self.log_dir = log_dir
def start_tensorboard(self, log_dir):
try:
p = subprocess.Popen(['tensorboard', '--host', '0.0.0.0', '--logdir', self.log_dir])
except subprocess.CalledProcessError as err:
print('ERROR:', err)
atexit.register(lambda: p.kill())
print('\n\n Starting Tensorboard at: {}\n\n'.format(self.log_dir))
def on_train_begin(self, logs={}):
self.start_tensorboard(self.log_dir)
|
StarcoderdataPython
|
40719
|
<reponame>rafaelcorazzi/game-scraper<filename>src/infrastructure/services/scraper_services.py
import bs4
import requests
from bs4 import BeautifulSoup
import base64
import hashlib
from typing import List
import re
from src.helpers.utils import Utils
from src.domain.console_domain import ConsolePlataform
from src.domain.console_games_domain import ConsoleGames
from src.domain.game_domain import Game
import maya
import uuid
class ScraperServices:
@staticmethod
def __html_result(url: str = None) -> bs4.BeautifulSoup:
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
return soup
@staticmethod
def game_details(link: str = None, reference_id: str = None, console_code: str = None) -> Game:
page = ScraperServices.__html_result(f'https://jogorama.com.br/{link}/')
data_sheet = page.select('.ficha')
title = re.sub('[^A-Za-z0-9]+', ' ', str(page.findAll('span', attrs={"itemprop": "name"})[0].text))
owner = re.sub('[^A-Za-z0-9]+', ' ', str(page.findAll('span', attrs={"itemprop": "author"})[0].text))
publisher = re.sub('[^A-Za-z0-9]+', ' ', str(page.findAll('span', attrs={"itemprop": "publisher"})[0].text))
genre = '' if len(page.findAll('span', attrs={"itemprop": "genre"})) == 0 else str(page.findAll('span', attrs={"itemprop": "genre"})[0].text)
game_detail: Game = Game()
release_year = 0
release_month = 0
release_day = 0
release_date = ''
if re.search('<b>Lançamento:</b>([^,]+)<br/>', str(data_sheet[0])) is not None:
released = re.search('<b>Lançamento:</b>([^,]+)<br/>', str(data_sheet[0])).group(1)
result = re.findall(r'\d+', released)[0];
release_day = int(result) if len(result) == 2 else 0
release_year = result if len(result) == 4 else re.findall(r'\d+', released)[1]
if re.search(r'(?<=de)([\S\s]*)(?=de)', released) is not None:
release_month = Utils.month_converter(re.search(r'(?<=de)([\S\s]*)(?=de)', released).group(1).replace(' ', ''))
if release_month > 0 and release_day > 0:
release_date = maya.parse(f'{release_year}-{release_month}-{release_day}').datetime()
game_uuid = f'{reference_id} - {title} - {owner} - {publisher} - {release_year}'
game_detail.game_id = str(uuid.uuid5(uuid.NAMESPACE_URL, game_uuid))
game_detail.reference_id = reference_id
game_detail.title = title
game_detail.console_code = str(uuid.uuid5(uuid.NAMESPACE_URL, console_code))
game_detail.release_date = release_date
game_detail.release_year = release_year
game_detail.cover_image = base64.b64encode(requests.get(f"https://jogorama.com.br/thumbr.php?l=180&a=400&img=capas/{reference_id}.jpg").content)
game_detail.owner = owner
game_detail.publisher = publisher
game_detail.genre = genre
#print(f'{reference_id} - {title} - {owner} - {publisher} - {genre} - {release_date} - {release_year}')
return game_detail
@staticmethod
def list_of_games_by_plataform(console_code: str = None) -> List[ConsoleGames]:
page = ScraperServices.__html_result(f'https://jogorama.com.br/jogos/{console_code}/lista-de-jogos/')
rows_games = page.select('.lista')
glst = []
for r in rows_games:
consoles_games = r.select('li')
for c in consoles_games:
for z in c.find_all('a', href=True):
game_list: ConsoleGames = ConsoleGames()
game_list.console_code = console_code
game_list.reference_id = z['href'].split("/")[3]
game_list.title = z['title'].strip().replace("'", " ")
game_list.link = z['href']
#print(game_list.to_json())
glst.append(game_list.to_json())
return glst
@staticmethod
def list_of_console() -> List[ConsolePlataform]:
# ToDo: alterar para config
page = ScraperServices.__html_result('https://jogorama.com.br/')
rows = page.select('.menu')
plt = []
clt = []
i = 0
for a in rows:
i += 1
if i == 2:
consoles = a.select('li') # a.select('')
for b in consoles:
console: ConsolePlataform = ConsolePlataform()
console.console_plataform_name = b.select_one('a').text.strip()
console.console_plataform_code = b.select_one('a').text.strip().replace(' ', '-').lower()
console.console_uuid = str(uuid.uuid5(uuid.NAMESPACE_URL, b.select_one('a').text.strip().replace(' ', '-').lower()))
clt.append(console)
plt.append(console.to_json())
return clt, plt
|
StarcoderdataPython
|
3278144
|
<reponame>martinfleis/seashore-streets<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# # Measure contextual morphometric characters
#
# Computational notebook 03 for Climate adaptation plans in the context of coastal settlements: the case of Portugal.
#
# Date: 27/06/2020
#
# ---
#
# This notebook measure contextual (code uses older term summative) characters. It requires data from `02_Measure_morphometric_characters.ipynb` and additional manually assigned attributes:
#
# - Attribute `part` in `name_blg` for cases which were divided into parts. Each part should be marked by unique `int`.
# - Attribute `case` in `name_str` capturing which LineStrings form the seashore street itself. (1 - True)
#
# Structure of GeoPackages:
#
# ```
# ./data/
# atlantic.gpkg
# name_blg - Polygon layers
# name_str - LineString layers
# name_case - Polygon layers
# name_tess - Polygon layers
# name_blocks - Polygon layers
# ...
# preatl.gpkg
# name_blg
# name_str
# name_case
# ...
# premed.gpkg
# name_blg
# name_str
# name_case
# ...
# med.gpkg
# name_blg
# name_str
# name_case
# ...
# ```
#
# CRS of the original data is EPSG:3763.
#
# ```
# <Projected CRS: EPSG:3763>
# Name: ETRS89 / Portugal TM06
# Axis Info [cartesian]:
# - X[east]: Easting (metre)
# - Y[north]: Northing (metre)
# Area of Use:
# - name: Portugal - mainland - onshore
# - bounds: (-9.56, 36.95, -6.19, 42.16)
# Coordinate Operation:
# - name: Portugual TM06
# - method: Transverse Mercator
# Datum: European Terrestrial Reference System 1989
# - Ellipsoid: GRS 1980
# - Prime Meridian: Greenwich
# ```
# In[3]:
import geopandas as gpd
import numpy as np
import scipy as sp
import momepy as mm
import pandas as pd
import fiona
import inequality
from inequality.theil import Theil
# In[4]:
fiona.__version__, gpd.__version__, mm.__version__, sp.__version__, np.__version__, pd.__version__, inequality.__version__
# In[ ]:
folder = 'data/'
# In[ ]:
summative = pd.DataFrame()
# In[ ]:
parts = ['atlantic', 'preatl', 'premed', 'med']
for part in parts:
path = folder + part + '.gpkg'
layers = [x[:-4] for x in fiona.listlayers(path) if 'blg' in x]
for l in layers:
buildings = gpd.read_file(path, layer=l + '_blg')
edges = gpd.read_file(path, layer=l + '_str')
tessellation = gpd.read_file(path, layer=l + '_tess')
blocks = gpd.read_file(path, layer=l + '_blocks')
buildings = buildings.merge(edges.drop(columns='geometry'), on='nID', how='left')
buildings = buildings.merge(tessellation.drop(columns=['bID', 'geometry', 'nID']), on='uID', how='left')
data = buildings.merge(blocks.drop(columns='geometry'), on='bID', how='left')
to_summ = ['sdbAre', 'sdbPer', 'ssbCCo', 'ssbCor', 'ssbSqu', 'ssbERI',
'ssbElo', 'ssbCCD', 'stbCeA', 'mtbSWR', 'mtbAli', 'mtbNDi', 'ldbPWL',
'stbSAl', 'ltcBuA', 'sssLin', 'sdsSPW', 'stsOpe', 'svsSDe', 'sdsAre', 'sdsBAr', 'sisBpM',
'sdcLAL', 'sdcAre', 'sscERI', 'sicCAR', 'stcSAl', 'ldkAre', 'lskElo', 'likGra', 'meshedness',
]
spec = ['sdsLen']
if 'part' in data.columns:
for part in set(data.part):
subset = data.loc[data.part == part]
for col in to_summ:
values = subset[col]
values_IQ = mm.limit_range(values, rng=(25, 75))
values_ID = mm.limit_range(values, rng=(10, 90))
summative.loc[l + str(part), col + '_meanIQ'] = np.mean(values_IQ)
summative.loc[l + str(part), col + '_rangeIQ'] = sp.stats.iqr(values)
summative.loc[l + str(part), col + '_TheilID'] = Theil(values_ID).T
for col in spec:
values = subset.loc[subset.case == 1][col]
values_IQ = mm.limit_range(values, rng=(25, 75))
values_ID = mm.limit_range(values, rng=(10, 90))
summative.loc[l + str(part), col + '_meanIQ'] = np.mean(values_IQ)
summative.loc[l + str(part), col + '_rangeIQ'] = sp.stats.iqr(values)
summative.loc[l + str(part), col + '_TheilID'] = Theil(values_ID).T
else:
for col in to_summ:
values = data[col]
values_IQ = mm.limit_range(values, rng=(25, 75))
values_ID = mm.limit_range(values, rng=(10, 90))
summative.loc[l, col + '_meanIQ'] = np.mean(values_IQ)
summative.loc[l, col + '_rangeIQ'] = sp.stats.iqr(values)
summative.loc[l, col + '_TheilID'] = Theil(values_ID).T
for col in spec:
values = data.loc[data.case == 1][col]
values_IQ = mm.limit_range(values, rng=(25, 75))
values_ID = mm.limit_range(values, rng=(10, 90))
summative.loc[l, col + '_meanIQ'] = np.mean(values_IQ)
summative.loc[l, col + '_rangeIQ'] = sp.stats.iqr(values)
summative.loc[l, col + '_TheilID'] = Theil(values_ID).T
# In[ ]:
summative.to_csv('data/summative_data.csv')
|
StarcoderdataPython
|
3394810
|
import argparse
import json
import datetime
from convert_json_line_to_point import convert_json_line_to_point
from influxdb.line_protocol import make_lines
def main(k6json_location, output_location):
with open(k6json_location, 'r') as f:
lines = f.readlines()
output_file = open(output_location, 'w+')
for line in lines:
point = convert_json_line_to_point(line)
if point:
output_file.write(to_line_protocol(point))
output_file.close()
def to_line_protocol(point):
data = {
'points': [point]
}
return make_lines(data)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'json', help='Location of the k6 JSON results file')
parser.add_argument(
'output', help='File where to save the converted result')
args = parser.parse_args()
main(args.json, args.output)
|
StarcoderdataPython
|
198112
|
<filename>setup.py
#!/usr/bin/env python
from setuptools import setup
ver_dic = {}
version_file = open("logpyle/version.py")
try:
version_file_contents = version_file.read()
finally:
version_file.close()
exec(compile(version_file_contents, "logpyle/version.py", 'exec'), ver_dic)
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name="logpyle",
version=ver_dic["VERSION_TEXT"],
description="Time series logging for Python",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
python_requires='~=3.6',
install_requires=[
"six>=1.8.0",
"pytools>=2011.1",
"matplotlib",
"pymbolic",
],
package_data={"logpyle": ["py.typed"]},
scripts=[
"bin/logtool",
"bin/runalyzer-gather",
"bin/runalyzer",
],
author="<NAME>",
url="https://github.com/illinois-ceesd/logpyle",
author_email="<EMAIL>",
license="MIT",
packages=["logpyle"])
|
StarcoderdataPython
|
1685019
|
<gh_stars>0
"""
get words which are both semantically and phonetically associated with the given word
"""
from .candidate.datamuse import meanslike
from .candidate.wordnet import senselike
from .measurement.measure import measure
def get_candidate(word):
"""get candidate word set
@word -- the given word
@return -- a set of candidate words
"""
candidates = set()
candidates |= meanslike(word)
candidates |= senselike(word)
# remove '_' and '-' between words --> candidates is a LIST now
candidates = [w.replace('_', ' ') for w in candidates]
candidates = [w.replace('-', ' ') for w in candidates]
# remove words which contains special characters (e.g. Ann's book)
candidates = [w for w in candidates if ''.join(w.split()).isalpha()]
# remove phrase has more than two words
candidates = [w for w in candidates if len(w.split()) < 3]
# turn all words into lowercase
candidates = [w.lower() for w in candidates]
# remove words contain word itself
candidates = [w for w in candidates if not (word in w)]
return candidates
def semanphone(word):
"""finish all works here
@word -- the given word
@return -- a list of (word, score) pairs
"""
# word should be lower case
word = word.lower()
candidates = get_candidate(word)
performance = [(w, measure(word, w)) for w in candidates]
winners = sorted(performance, key=lambda x:x[1], reverse=True)
QUOTA = 5
# change format for the database
return [w[0] for w in winners[:min(QUOTA, len(winners))]]
def main(word):
"""for testing functions in this module
"""
output_file = word + '_semanphone'
with open(output_file, 'w+') as output:
for w in semanphone(word):
output.write("{w}\t{s}".format(w=w[0], s=w[1]))
output.write('\n')
print(output_file + " was generated successfully!")
if __name__ == '__main__':
import plac
plac.call(main)
|
StarcoderdataPython
|
1678569
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by <NAME> <<EMAIL>>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Takes care of adapting pyDAL to web2py's needs
-----------------------------------------------
"""
from pydal import DAL as DAL
from pydal import Field
from pydal.objects import Row, Rows, Table, Query, Expression
from pydal import SQLCustomType, geoPoint, geoLine, geoPolygon
def _default_validators(db, field):
"""
Field type validation, using web2py's validators mechanism.
makes sure the content of a field is in line with the declared
fieldtype
"""
from gluon import validators
field_type, field_length = field.type, field.length
requires = []
if field_type in (('string', 'text', 'password')):
requires.append(validators.IS_LENGTH(field_length))
elif field_type == 'json':
requires.append(validators.IS_EMPTY_OR(validators.IS_JSON()))
elif field_type == 'double' or field_type == 'float':
requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100))
elif field_type == 'integer':
requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31))
elif field_type == 'bigint':
requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63))
elif field_type.startswith('decimal'):
requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10))
elif field_type == 'date':
requires.append(validators.IS_DATE())
elif field_type == 'time':
requires.append(validators.IS_TIME())
elif field_type == 'datetime':
requires.append(validators.IS_DATETIME())
elif db and field_type.startswith('reference') and \
field_type.find('.') < 0 and \
field_type[10:] in db.tables:
referenced = db[field_type[10:]]
if hasattr(referenced, '_format') and referenced._format:
requires = validators.IS_IN_DB(db, referenced._id,
referenced._format)
if field.unique:
requires._and = validators.IS_NOT_IN_DB(db, field)
if field.tablename == field_type[10:]:
return validators.IS_EMPTY_OR(requires)
return requires
elif db and field_type.startswith('list:reference') and \
field_type.find('.') < 0 and \
field_type[15:] in db.tables:
referenced = db[field_type[15:]]
if hasattr(referenced, '_format') and referenced._format:
requires = validators.IS_IN_DB(db, referenced._id,
referenced._format, multiple=True)
else:
requires = validators.IS_IN_DB(db, referenced._id,
multiple=True)
if field.unique:
requires._and = validators.IS_NOT_IN_DB(db, field)
if not field.notnull:
requires = validators.IS_EMPTY_OR(requires)
return requires
if field.unique:
requires.append(validators.IS_NOT_IN_DB(db, field))
sff = ['in', 'do', 'da', 'ti', 'de', 'bo']
if field.notnull and not field_type[:2] in sff:
requires.append(validators.IS_NOT_EMPTY())
elif not field.notnull and field_type[:2] in sff and requires:
requires[0] = validators.IS_EMPTY_OR(requires[0])
return requires
from gluon.serializers import custom_json, xml
from gluon.utils import web2py_uuid
from gluon import sqlhtml
DAL.serializers = {'json': custom_json, 'xml': xml}
DAL.validators_method = _default_validators
DAL.uuid = lambda x: web2py_uuid()
DAL.representers = {
'rows_render': sqlhtml.represent,
'rows_xml': sqlhtml.SQLTABLE
}
DAL.Field = Field
DAL.Table = Table
#: add web2py contrib drivers to pyDAL
from pydal.drivers import DRIVERS
if not DRIVERS.get('pymysql'):
try:
from .contrib import pymysql
DRIVERS['pymysql'] = pymysql
except:
pass
if not DRIVERS.get('pyodbc'):
try:
from .contrib import pypyodbc as pyodbc
DRIVERS['pyodbc'] = pyodbc
except:
pass
if not DRIVERS.get('pg8000'):
try:
from .contrib import pg8000
DRIVERS['pg8000'] = pg8000
except:
pass
|
StarcoderdataPython
|
3396558
|
n = int(input('Insira o numero que quer saber da tabuada: '))
if n > 0:
for i in range(1, n + 1):
for j in range(i, i * n + 1, i):
print(j,end=' ')
print()
else:
print('ERRO. Insira um número maior que zero.')
|
StarcoderdataPython
|
3350191
|
"""Execute all known QJs, run the query portion of a QJ, remediate a QJ and prune results according to Job config
settings"""
import os.path
from typing import Any, Dict
from altimeter.qj.config import QJHandlerConfig
from altimeter.qj.lambdas.executor import executor
from altimeter.qj.lambdas.pruner import pruner
from altimeter.qj.lambdas.query import query
from altimeter.qj.lambdas.remediator import remediator
class InvalidLambdaModeException(Exception):
"""Indicates the mode associated with the queryjob lambda is invalid"""
def lambda_handler(event: Dict[str, Any], _: Any) -> None:
"""Lambda entrypoint"""
handler = QJHandlerConfig()
if handler.mode == "executor":
executor(event)
elif handler.mode == "query":
query(event)
elif handler.mode == "pruner":
pruner()
elif handler.mode == "remediator":
remediator(event)
else:
raise InvalidLambdaModeException(
f"Invalid lambda MODE value.\nENV: {os.environ}\nEvent: {event}"
)
|
StarcoderdataPython
|
3376861
|
<filename>code/robotling/main.py
# ----------------------------------------------------------------------------
# main.py
# Main program; is automatically executed after reboot.
#
# For decription, see `hexbug.py`
#
# The MIT License (MIT)
# Copyright (c) 2019 <NAME>
# 2018-12-22, reorganised into a module with the class `Hexbug` and a simpler
# main program file (`main.py`). All hardware-related settings
# moved to separate file (`hexbug_config-py`)
# 2018-12-28, adapted to using the new `spin_ms()` function to keep the
# robotling board updated; does not require a Timer anymore.
# For details, see `robotling.py`.
# 2019-04-07, added new "behaviour" (take a nap)
# 2019-07-13, added new "behaviour" (find light)
# `hexbug_config.py` reorganised and cleaned up
# 2019-07-24, added "memory" to turn direction (experimental)
# 2019-08-19, in case of an uncaught exception, it tries to send a last
# MQTT message containing the traceback to the broker
# 2021-04-29, Some refactoring; configuration parameters now clearly marked
# as such (`cfg.xxx`)
#
# ----------------------------------------------------------------------------
from hexbug import *
# ----------------------------------------------------------------------------
def main():
# Setup the robot's spin function
r.spin_ms(period_ms=cfg.TM_PERIOD, callback=r.housekeeper)
# Angle the IR sensor towards floor in front
r.ServoRangingSensor.angle = cfg.SCAN_DIST_SERVO
r.spin_ms(100)
# Loop ...
print("Entering loop ...")
try:
try:
lastTurnDir = 0
round = 0
while True:
try:
r.onLoopStart()
if r.onHold:
# Some problem was detected (e.g. robot tilted etc.), skip all
# the following code
lastTurnDir = 0
continue
r._t.spin()
finally:
# Make sure the robotling board get updated at least once per loop
r.spin_ms()
round += 1
except KeyboardInterrupt:
print("Loop stopped.")
finally:
# Make sure that robot is powered down
r.ServoRangingSensor.off()
r.powerDown()
r.printReport()
# ----------------------------------------------------------------------------
# Create instance of HexBug, derived from the Robotling class
r = HexBug(cfg.MORE_DEVICES)
# Call main
if __name__ == "__main__":
try:
main()
except Exception as e:
# Create an in-memory file-like string object (`sIO`) to accept the
# exception's traceback (`sys.print_exception` requires a file-like
# object). This traceback string is then converted into a dictionary
# to be sent as an MQTT message (if telemetry is activate)
if cfg.SEND_TELEMETRY and r._t._isReady:
import sys, uio
sIO = uio.StringIO()
sys.print_exception(e, sIO)
r._t.publishDict(KEY_RAW, {KEY_DEBUG: str(sIO.getvalue())})
# Re-raise the exception such that it becomes also visible in the REPL
raise e
# ----------------------------------------------------------------------------
|
StarcoderdataPython
|
184004
|
from bs4 import BeautifulSoup
import requests
import json
# instagram URL
URL = "https://www.instagram.com/{}/"
# parse function
def parse_data(s):
# creating a dictionary
data = {}
# splitting the content
# then taking the first part
s = s.split("-")[0]
# again splitting the content
s = s.split(" ")
# assigning the values
data['UserName'] = username
data['Followers'] = s[0]
data['Following'] = s[2]
data['Posts'] = s[4]
# returning the dictionary
return data
# scrape function
def scrape_data(username):
# getting the request from url
r = requests.get(URL.format(username))
# converting the text
s= BeautifulSoup(r.text, "html.parser")
# finding meta info
meta = s.find("meta", property="og:description")
# calling parse method
return parse_data(meta.attrs['content'])
def write_json(data, filename='data.json'):
with open(filename,"w") as f:
json.dump(data, f, indent=4)
#main function
if __name__ == "__main__":
username="satyu.26"
# calling scrape function
data = scrape_data(username)
#storing data into JSON
fetch = write_json(data)
# printing the info
print(data)
|
StarcoderdataPython
|
1728865
|
<reponame>y2kconnect/utilities
# -*- encoding: utf-8 -*-
# python apps
import chardet
import magic
import os
def analysis_filename(f_name):
'解析文件名'
s_dir = s_name = base_name = ext_name = None
s_dir, s_name = os.path.split(f_name)
if s_name:
base_name, ext_name = os.path.splitext(s_name)
return (s_dir, s_name, base_name, ext_name)
def bytes_decode(msg_b, arr_code=None):
""" bytes解码
msg_b 字符串的二进制格式
arr_code 编码的名称
"""
if not isinstance(msg_b, bytes) or len(msg_b) == 0:
return msg_b
if arr_code is None:
info_chardet = chardet.detect(msg_b)
arr_code = (info_chardet['encoding'],)
elif not hasattr(arr_code, '__iter__'):
arr_code = [arr_code]
for code in arr_code:
try:
s = msg_b.decode(code)
break
except UnicodeDecodeError:
pass
else:
raise UnicodeDecodeError
return s
def check_filename_length(filename):
""" 检查文件名超长 """
if isinstance(f_name, str):
n = len(f_name.encode())
elif isinstance(f_name, bytes):
n = len(f_name)
else:
msg = "文件名类型错误!filename: {}".format(filename)
raise ValueError(msg)
if 220 < n:
f_name = "{}...{}".format(f_name[:20], f_name[-20:])
else:
f_name = f_name
return f_name
def check_filename_repeat(f_name):
'检查文件名重复'
s_dir, s_name, base_name, ext_name = analysis_filename(f_name)
filename = f_name
i = 0
while os.path.exists(output_name):
# 文件存在
i = i + 1
s_name = "{}_{}{}".format(base_name, i, ext_name)
filename = os.path.join(s_dir, s_name)
return filename
def extract_email(f_name, msg_b=None, output_dir=None):
''' 展开文件的内容(news or email格式)
f_name 文件名
msg_b 文件内容(2进制格式)
output_dir 输出目录
'''
arr_filename = []
arr_error = []
try:
s_dir, s_name, base_name, ext_name = analysis_filename(f_name)
if output_dir:
path_root = os.path.join(output_dir, base_name.strip())
else:
path_root = os.path.join(s_dir, base_name.strip())
if not os.path.exists(path_root):
os.mkdir(path_root)
if msg_b is None:
with open(f_name, 'rb') as f:
msg_b = f.read()
info_chardet = chardet.detect(msg_b)
msg = msg_b.decode(info_chardet['encoding'])
obj_message = email.message_from_string(msg)
for i, part in enumerate(obj_message.walk()):
try:
if part.get_content_maintype() == 'multipart':
continue
data = part.get_payload(decode=True)
s_name = part.get_filename()
if not s_name:
ext = mimetypes.guess_extension(part.get_content_type())
if not ext:
s = magic.from_buffer(data)
arr = s.lower().split()
if arr:
ext = '.{}'.format(arr[0])
else:
ext = '.bin'
s_name = 'part-{}{}'.format(i, ext)
filename = os.path.join(path_root, s_name)
filename = check_filename_repeat(filename)
with open(filename, 'wb') as f:
f.write(data)
arr_filename.append(filename)
except Exception as e:
arr_error.append({'e': str(e), 'f_name': f_name, 'i': i})
except Exception as e:
arr_error.append({'e': str(e), 'f_name': f_name})
return (arr_filename, arr_error)
|
StarcoderdataPython
|
3213881
|
<filename>chrome/test/data/android/manage_render_test_goldens.py
#!/usr/bin/env python
#
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Simple script to automatically download all current golden images for Android
# render tests or upload any newly generated ones.
import argparse
import hashlib
import multiprocessing
import os
import subprocess
STORAGE_BUCKET = 'chromium-android-render-test-goldens'
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
CHROMIUM_SRC = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..', '..'))
GOLDEN_DIRECTORIES = [
os.path.join(THIS_DIR, 'render_tests'),
os.path.join(
CHROMIUM_SRC, 'components', 'test', 'data', 'js_dialogs', 'render_tests'),
os.path.join(
CHROMIUM_SRC, 'components', 'test', 'data', 'payments', 'render_tests'),
os.path.join(
CHROMIUM_SRC, 'components', 'test', 'data', 'permission_dialogs',
'render_tests'),
os.path.join(
CHROMIUM_SRC, 'components', 'test', 'data', 'vr_browser_ui',
'render_tests'),
os.path.join(
CHROMIUM_SRC, 'components', 'test', 'data', 'vr_browser_video',
'render_tests'),
]
# This is to prevent accidentally uploading random, non-golden images that
# might be in the directory.
ALLOWED_DEVICE_SDK_COMBINATIONS = [
# From RenderTestRule.java
'Nexus_5-19',
'Nexus_5X-23',
# For VR tests.
'Pixel_XL-25',
'Pixel_XL-26',
]
# Assume a quad core if we can't get the actual core count.
try:
THREAD_COUNT = multiprocessing.cpu_count()
# cpu_count only gets the physical core count. There doesn't appear to be a
# simple way of determining whether a CPU supports simultaneous multithreading
# in Python, so assume that anything with 6 or more cores supports it.
if THREAD_COUNT >= 6:
THREAD_COUNT *= 2
except NotImplementedError:
THREAD_COUNT = 4
def is_file_of_interest(f):
if not f.endswith('.png'):
return False
for combo in ALLOWED_DEVICE_SDK_COMBINATIONS:
if combo in f:
return True
return False
def download(directory):
# If someone removes a SHA1 file, we want to remove the associated PNG file
# the next time images are updated.
images_to_delete = []
for f in os.listdir(directory):
if not is_file_of_interest(f):
continue
sha1_path = os.path.join(directory, f + '.sha1')
if not os.path.exists(sha1_path):
images_to_delete.append(os.path.join(directory, f))
for image_path in images_to_delete:
os.remove(image_path)
# Downloading the files can be very spammy, so only show the output if
# something actually goes wrong.
try:
subprocess.check_output([
'download_from_google_storage',
'--bucket', STORAGE_BUCKET,
'-d', directory,
'-t', str(THREAD_COUNT),
], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print ('Downloading RenderTest goldens in directory %s failed with error '
'%d: %s') % (directory, e.returncode, e.output)
def upload(directory):
files_to_upload = []
for f in os.listdir(directory):
# Skip any files that we don't care about.
if not is_file_of_interest(f):
continue
png_path = os.path.join(directory, f)
# upload_to_google_storage will upload a file even if it already exists
# in the bucket. As an optimization, hash locally and only pass files to
# the upload script if they don't have a matching .sha1 file already.
sha_path = png_path + '.sha1'
if os.path.isfile(sha_path):
with open(sha_path) as sha_file:
with open(png_path, 'rb') as png_file:
h = hashlib.sha1()
h.update(png_file.read())
if sha_file.read() == h.hexdigest():
continue
files_to_upload.append(png_path)
if len(files_to_upload):
subprocess.check_call([
'upload_to_google_storage.py',
'--bucket', STORAGE_BUCKET,
'-t', str(THREAD_COUNT),
] + files_to_upload)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('action', choices=['download', 'upload'],
help='Which action to perform')
args = parser.parse_args()
if args.action == 'download':
for d in GOLDEN_DIRECTORIES:
download(d)
else:
for d in GOLDEN_DIRECTORIES:
upload(d)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1636251
|
<reponame>am1tyadava/amityadav_blog
from __future__ import unicode_literals
from django.contrib import admin
from django.core.urlresolvers import NoReverseMatch
from django.core.urlresolvers import reverse
from django.utils.html import conditional_escape
from django.utils.html import format_html_join
from django.utils.translation import ugettext_lazy as _
from .models import Posts
class PostAdmin(admin.ModelAdmin):
date_hierarchy = 'publication_date'
fieldsets = (
(_('Content'), {
'fields': (('title', 'status'), 'content',)}),
(_('Publication'), {
'fields': ('publication_date',),
'classes': ('collapse', 'collapse-closed')}),
(_('Discussions'), {
'fields': ('comment_enabled',),
'classes': ('collapse', 'collapse-closed')}),
(_('Metadatas'), {
'fields': ('authors',),
'classes': ('collapse', 'collapse-closed')}),
(None, {'fields': ('categories', 'tags', 'slug')}))
filter_horizontal = ('categories',)
prepopulated_fields = {'slug': ('title',)}
search_fields = ('title', 'content', 'tags')
actions = ['make_published', 'close_comments', ]
actions_on_top = True
actions_on_bottom = True
def __init__(self, model, admin_site):
self.form.admin_site = admin_site
super(PostAdmin, self).__init__(model, admin_site)
def get_categories(self, post):
"""
Return the categories linked in HTML.
"""
try:
return format_html_join(
', ', '<a href="{}" target="blank">{}</a>',
[(category.get_absolute_url(), category.title)
for category in post.categories.all()])
except NoReverseMatch:
return ', '.join([conditional_escape(category.title)
for category in post.categories.all()])
get_categories.short_description = _('category(s)')
def get_tags(self, post):
"""
Return the tags linked in HTML.
"""
try:
return format_html_join(
', ', '<a href="{}" target="blank">{}</a>',
[(reverse('zinnia:tag_detail', args=[tag]), tag)
for tag in post.tags_list])
except NoReverseMatch:
return conditional_escape(post.tags)
get_tags.short_description = _('tag(s)')
def get_is_visible(self, post):
"""
Admin wrapper for post.is_visible.
"""
return post.is_visible
get_is_visible.boolean = True
get_is_visible.short_description = _('is visible')
admin.site.register(Posts, PostAdmin)
|
StarcoderdataPython
|
76746
|
from tensorflow.keras.models import load_model
from time import sleep
from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
import cv2
import numpy as np
import os
from mtcnn import MTCNN
# Importing the MTCNN detector to detect faces
detector = MTCNN()
# Path to the emotion detection model
model_path = os.path.join("model","accuracy_80.h5")
classifier =load_model(model_path)
class_labels = ['Angry','Happy','Neutral','Sad','Surprise'] # Remember to keep in alphabetical order
def face_emot_detect(vid_path,filename,output_path):
"""
Take video path and find emotion and tag in video
:param vid_path: complete path of input video
:param filename: name of the video file
:result: bool, dictionary of detected emotions
"""
label_info = [0,0,0,0,0]
cap = cv2.VideoCapture(vid_path)
if cap.isOpened() == False:
print('No video found')
return False
# for saving
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
# output file name, fourcc code, frame/sec, size tuple
if output_path == '':
out = cv2.VideoWriter(filename, fourcc, int(cap.get(5)), (int(cap.get(3)),int(cap.get(4))))
else:
out = cv2.VideoWriter(output_path+'//'+filename, fourcc, int(cap.get(5)), (int(cap.get(3)),int(cap.get(4))))
while(True):
# Read one frame at a time
ret, frame = cap.read()
labels = []
# If a frame is returned
if ret == True:
# Get a dictionary of all faces
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = detector.detect_faces(frame)
# For every face in the faces detected in the current frame
for face in faces:
# Get the confidence value of the 'f' being a face
if face.get('confidence')>=0.9:
# Get the co-ordinates of the cropped area wherein face lies
x,y,w,h = face.get('box')
# Draw a Rectangle
frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
roi_gray = gray[y:y+h,x:x+w]
try:
roi_gray = cv2.resize(roi_gray,(48,48),interpolation=cv2.INTER_AREA)
except:
print(f"error in {filename}")
if np.sum([roi_gray])!=0:
roi = roi_gray.astype('float')/255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi,axis=0)
# make a prediction on the ROI, then lookup the class
preds = classifier.predict(roi)[0]
label_info[preds.argmax()] += 1
label = class_labels[preds.argmax()]
label_position = (x,y-5)
cv2.putText(frame,label,label_position,cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2)
else:
cv2.putText(frame,'No Face Found',(20,60),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2)
out.write(frame)
else:
break
# Freeing all resources
out.release()
cap.release()
return dict(zip(class_labels,label_info))
|
StarcoderdataPython
|
53557
|
import enum
import sys
import os.path
TokenType = enum.Enum("TokenType", "form lemma parse morph_lemma all")
ChunkType = enum.Enum("ChunkType", "book chapter verse paragraph pericope")
chunk_data_filename = {
ChunkType.book: "books.txt",
ChunkType.chapter: "chapters.txt",
ChunkType.verse: "verses.txt",
ChunkType.paragraph: "paragraphs.txt",
ChunkType.pericope: "pericopes.txt"
}
chunk_ids = {}
chunk_data = {}
def load_chunk_data():
for chunk_type, filename in chunk_data_filename.items():
chunk_ids[chunk_type] = []
print(f'loading {filename}', file=sys.stderr)
with open(os.path.join(os.path.dirname(__file__), filename), encoding="UTF-8") as f:
for line in f:
try:
chunk_id, token_start, token_end = line.strip().split(maxsplit=2)
chunk_data[(chunk_type, chunk_id)] = (
int(token_start), int(token_end)
)
chunk_ids[chunk_type].append(chunk_id)
except:
print(line)
sys.exit()
def load_wlc():
with open(os.path.join(os.path.dirname(__file__), "tokens.txt"), 'r', encoding="UTF-8") as f:
for line in f:
yield line.replace('\n','').split('\t', maxsplit=4)
token_data = {}
def load_tokens():
for token_type in TokenType:
token_data[token_type] = []
for token_id, ref, form, lemma, parse in load_wlc():
token_data[token_type.form].append(form)
token_data[token_type.lemma].append(lemma)
token_data[token_type.morph_lemma].append((parse, lemma))
token_data[token_type.parse].append(parse)
token_data[token_type.all].append((form, lemma, parse))
load_tokens()
load_chunk_data()
def get_tokens(token_type, chunk_type=None, chunk_id=None):
if chunk_type and chunk_id:
start, end = chunk_data[(chunk_type, chunk_id)]
return token_data[token_type][start - 1:end]
elif chunk_type is None and chunk_id is None:
return token_data[token_type]
else:
raise ValueError(
"either both or neigher of chunk_type and chunk_id"
"must be provided"
)
def get_tokens_by_chunk(token_type, chunk_type):
return {
chunk_id: get_tokens(token_type, chunk_type, chunk_id)
for chunk_id in chunk_ids[chunk_type]
}
def get_chunk_ids(chunk_type):
return chunk_ids[chunk_type]
def pprint_text(items):
return ' '.join(items).replace(' ׃', '׃').replace(' ־ ', '־').replace(' /', '').replace('/', '')
def load_pericope_verse_map():
data = {}
with open(os.path.join(os.path.dirname(__file__),'pericope_verse_map.txt'), 'r', encoding="UTF-8") as f:
for line in f:
pid, start, end = line.strip().split(" ", maxsplit=2)
data[pid] = (start, end)
return data
if __name__ == "__main__":
from heb_lex_tools import HEBLEX
glosser = HEBLEX()
for token in get_tokens(TokenType.lemma, ChunkType.verse, "Gen.1.1"):
print(f"{token}: '{glosser.strongs_to_gloss(token)}'")
with open('test.txt', 'w', encoding="UTF-8") as f:
print(pprint_text(get_tokens_by_chunk(TokenType.form, ChunkType.verse)["Gen.1.1"]), file=f)
|
StarcoderdataPython
|
1657939
|
from apps.common.func.CommonFunc import *
from apps.common.func.LanguageFunc import *
from django.shortcuts import render, HttpResponse
from urllib import parse
from apps.common.config import commonWebConfig
from apps.common.func.WebFunc import *
from apps.ui_globals.services.global_textService import global_textService
from apps.config.services.serviceConfService import ServiceConfService
import json
from apps.version_manage.services.common_service import VersionService
def globalTextCheck(request):
langDict = getLangTextDict(request)
context = {}
context["uiUserCenterGlobalTextPage"] = "current-page"
context["userName"] = request.session.get("userName")
if not isRelease:
context["env"] = "test"
# 文本
text = {}
text["pageTitle"] = langDict["web"]["httpUserCenterGlobalTextPageTitle"]
text["subPageTitle"] = langDict["web"]["httpUserCenterGlobalTextSubPageTitle"]
context["text"] = text
context.update(getHttpConfForUI())
context["page"] = 1
return render(request, "ui_globals/global_text_conf.html", context)
def queryText(request):
page = request.POST.get("page")
if isInt(page):
page = int(page)
else:
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("queryArr")))
orderBy = request.POST.get("orderBy")
if isSqlInjectable(orderBy):
return HttpResponse("<script>alert('查询条件非法');</script>")
#根据版本判断应该从哪个表里取数据 王吉亮添加于20180224
if VersionService.isCurrentVersion(request):
tbName = "tb3_ui_global_text"
versionCondition = ""
else:
tbName = "tb_version_global_text"
versionCondition = "and versionName='%s'" % request.session.get("version")
execSql = "SELECT g.*,u.userName FROM %s g LEFT JOIN tb_user u ON g.addBy = u.loginName WHERE 1=1 AND g.state=1 %s" %(tbName,versionCondition)
checkList = []
for key in checkArr:
if checkArr[key] == "":
continue
elif key == "addBy":
checkList.append("%%%s%%" % checkArr[key])
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and (g.addBy LIKE %s or u.userName LIKE %s) """
continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and g.%s """ % key
execSql += """ LIKE %s"""
execSql += """ ORDER BY %s""" % orderBy
context = pagination(sqlStr=execSql, attrList=checkList, page=page, pageNum=commonWebConfig.testCasePageNum)
context.update(getHttpConfForUI())
response = render(request, "ui_globals/SubPages/global_text_conf_sub_page.html", context)
return response
def textConfDel(request):
id = request.GET.get("id")
if VersionService.isCurrentVersion(request):
getText = global_textService.getText(id)
if getText.addBy != request.session.get("loginName"):
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON,"只能删除自己的变量").toJson())
try:
global_textService.delText(id)
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
except Exception as e :
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON,e).toJson())
else:
getText = global_textService.getVersionText(id)
#addBy不是fk了
if getText.addBy != request.session.get("loginName"):
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON,"只能删除自己的变量").toJson())
try:
global_textService.delVersionText(id)
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
except Exception as e :
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON,e).toJson())
def textConfAdd(request):
TextData = json.loads(parse.unquote(request.POST.get("data")))
TextData["addBy"] = request.session.get("loginName")
if VersionService.isCurrentVersion(request):
try:
global_textService.addText(TextData)
except Exception as e:
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON,"key重复").toJson())
return HttpResponse(ApiReturn().toJson())
else:
try:
global_textService.addVersionText(TextData,VersionService.getVersionName(request))
except Exception as e:
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON,"key重复").toJson())
return HttpResponse(ApiReturn().toJson())
def textConfEdit(request):
TextData = json.loads(parse.unquote(request.POST.get("data")))
if VersionService.isCurrentVersion(request):
getText = global_textService.getText(TextData["id"])
if getText.addBy != request.session.get("loginName"):
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON, "只能修改自己的变量").toJson())
global_textService.editText(TextData)
return HttpResponse(ApiReturn().toJson())
else:
getText = global_textService.getVersionText(TextData["id"])
if getText.addBy != request.session.get("loginName"):
return HttpResponse(ApiReturn(ApiReturn.CODE_GLOBAL_EXCEPITON, "只能修改自己的变量").toJson())
global_textService.editVersionText(TextData,VersionService.getVersionName(request))
return HttpResponse(ApiReturn().toJson())
def getTextConf(request):
id = request.GET.get("id")
if VersionService.isCurrentVersion(request):
varData = dbModelToDict(global_textService.getText(id))
else:
varData = dbModelToDict(global_textService.getVersionText(id))
httpConfList = HttpConfService.queryUIRunHttpConfSort(request)
varData["httpConf"] = {}
varData["httpConf"]["common"] = substr(varData["textValue"], "[CONF=common]", "[ENDCONF]")
for i in range(0, len(httpConfList)):
if httpConfList[i]["httpConfKey"] not in varData["textValue"]:
varData["httpConf"]["%s" % httpConfList[i]["httpConfKey"]] = ""
continue
varData["httpConf"]["%s" % httpConfList[i]["httpConfKey"]] = substr(varData["textValue"],"[CONF=%s]" % httpConfList[i]["httpConfKey"],"[ENDCONF]")
return HttpResponse(ApiReturn(body=varData).toJson())
|
StarcoderdataPython
|
1673869
|
<gh_stars>0
#!/usr/bin/env python3
import logging
from .device import *
class Site(object):
def __init__(self, unifi, data):
self.unifi = unifi
self.id = data['_id']
self.desc = data['desc']
self.name = data['name']
self.role = data['role']
def api_endpoint(self, endpoint):
return 's/' + self.name + '/' + endpoint
def device(self):
# https://hemma:8443/api/s/default/stat/device
data = self.unifi.api_get(self.api_endpoint('stat/device'))
ret = []
for d in data['data']:
if d['model'] == 'UGW3':
ret.append(UGW3.UGW3(self, d))
elif d['model'] == 'US8P150':
ret.append(US8P150.US8P150(self, d))
elif d['model'] == 'USL16LP':
ret.append(USL16LP.USL16LP(self, d))
elif d['model'] == 'USL8LP':
ret.append(USL8LP.USL8LP(self, d))
elif d['model'] == 'USMINI':
ret.append(USMINI.USMINI(self, d))
elif d['model'] == 'USF5P':
ret.append(USF5P.USF5P(self, d))
elif d['model'] == 'U7PG2':
ret.append(U7PG2.U7PG2(self, d))
elif d['model'] == 'U7HD':
ret.append(U7HD.U7HD(self, d))
elif d['model'] == 'U7LT':
ret.append(U7LT.U7LT(self, d))
elif d['model'] == 'U7NHD':
ret.append(U7NHD.U7NHD(self, d))
elif d['model'] == 'UHDIW':
ret.append(UHDIW.UHDIW(self, d))
else:
logging.warning(f"Unknown device type/model: {d['type']}/{d['model']}")
return ret
def sta(self):
# https://hemma:8443/api/s/default/stat/sta
data = self.unifi.api_get(self.api_endpoint('stat/sta'))
return data['data']
# sta:
# "data": [
# {
# "_id": "58556a70b410cf6b940e570e",
# "_is_guest_by_usw": false,
# "_last_seen_by_usw": 1485694644,
# "_uptime_by_usw": 3701886,
# "assoc_time": 1481992812,
# "first_seen": 1481992816,
# "ip": "192.168.1.1",
# "is_guest": false,
# "is_wired": true,
# "last_seen": 1485694644,
# "latest_assoc_time": 1481992813,
# "mac": "00:0d:b9:40:80:48",
# "network": "LAN",
# "network_id": "56c87bd0b41038d25762ce8b",
# "oui": "PcEngine",
# "site_id": "56c87bc1b41038d25762ce86",
# "sw_depth": 0,
# "sw_mac": "f0:9f:c2:0a:4a:ca",
# "sw_port": 8,
# "uptime": 3701832,
# "user_id": "58556a70b410cf6b940e570e"
# },
# {
# "_id": "56c994a0b41038d25762cee2",
# "_is_guest_by_uap": false,
# "_is_guest_by_usw": false,
# "_last_seen_by_uap": 1485694651,
# "_last_seen_by_usw": 1485694644,
# "_uptime_by_uap": 14226,
# "_uptime_by_usw": 46707,
# "ap_mac": "44:d9:e7:f6:9f:99",
# "assoc_time": 1485647936,
# "authorized": true,
# "bssid": "46:d9:e7:f8:9f:99",
# "bytes-r": 7,
# "ccq": 333,
# "channel": 36,
# "essid": "PNet",
# "first_seen": 1456051360,
# "hostname": "Lenas-iPhone",
# "idletime": 8,
# "ip": "192.168.1.247",
# "is_guest": false,
# "is_wired": false,
# "last_seen": 1485694651,
# "latest_assoc_time": 1485680425,
# "mac": "80:ea:96:08:ed:81",
# "network": "LAN",
# "network_id": "56c87bd0b41038d25762ce8b",
# "noise": -104,
# "oui": "Apple",
# "powersave_enabled": true,
# "qos_policy_applied": true,
# "radio": "na",
# "radio_proto": "na",
# "rssi": 24,
# "rx_bytes": 44044326,
# "rx_bytes-r": 1,
# "rx_packets": 250896,
# "rx_rate": 150000,
# "signal": -80,
# "site_id": "56c87bc1b41038d25762ce86",
# "sw_depth": 1,
# "sw_mac": "f0:9f:c2:0a:4a:ca",
# "sw_port": 5,
# "tx_bytes": 347850716,
# "tx_bytes-r": 6,
# "tx_packets": 255025,
# "tx_power": 40,
# "tx_rate": 150000,
# "uptime": 46715,
# "user_id": "56c994a0b41038d25762cee2"
# }
|
StarcoderdataPython
|
1602371
|
import numpy as np
from collections.abc import Sequence
from typing import BinaryIO
from ..gmxflow import GmxFlow, GmxFlowVersion
# Fields expected to be read in the files.
__FIELDS = ['X', 'Y', 'N', 'T', 'M', 'U', 'V']
# Fields which represent data in the flow field, excluding positions.
__DATA_FIELDS = ['N', 'T', 'M', 'U', 'V']
# List of fields in the order of writing.
__FIELDS_ORDERED = ['N', 'T', 'M', 'U', 'V']
def read_flow(filename: str) -> GmxFlow:
"""Read flow field data from a file.
Args:
filename (str): File to read data from.
Returns:
GmxFlow: Flow field data.
"""
def get_header_field(info, label):
try:
field = info[label]
except KeyError:
raise ValueError(f"could not read {label} from `{filename}`")
return field
data, info = _read_data(filename)
shape = get_header_field(info, 'shape')
spacing = get_header_field(info, 'spacing')
origin = get_header_field(info, 'origin')
version_str = get_header_field(info, 'format')
if version_str == 'GMX_FLOW_1':
version = GmxFlowVersion(1)
elif version_str == 'GMX_FLOW_2':
version = GmxFlowVersion(2)
else:
raise ValueError(f"unknown file format `{version_str}`")
dtype = [(l, float) for l in data.keys()]
num_bins = np.prod(shape)
data_new = np.zeros((num_bins, ), dtype=dtype)
for key, value in data.items():
data_new[key] = value
return GmxFlow(
data=data_new,
shape=shape,
spacing=spacing,
version=version,
origin=origin,
)
def _read_data(filename: str) -> tuple[dict[str, np.ndarray], dict[str, str]]:
"""Read field data from a file.
The data is returned on a regular grid, adding zeros for bins with no values
or which are not present in the (possibly not-full) input grid.
The `x` and `y` coordinates are bin center positions, not corner.
Args:
filename (str): A file to read data from.
Returns:
(dict, dict): 2-tuple of dict's with data and information.
"""
with open(filename, 'rb') as fp:
fields, num_values, info = _read_header(fp)
data = _read_values(fp, num_values, fields)
x0, y0 = info['origin']
nx, ny = info['shape']
dx, dy = info['spacing']
x = x0 + dx * (np.arange(nx) + 0.5)
y = y0 + dy * (np.arange(ny) + 0.5)
xs, ys = np.meshgrid(x, y, indexing='ij')
grid = np.zeros((nx, ny), dtype=[(l, float) for l in __FIELDS])
grid['X'] = xs
grid['Y'] = ys
for l in __DATA_FIELDS:
grid[l][data['IX'], data['IY']] = data[l]
grid = grid.ravel()
return {l: grid[l] for l in __FIELDS}, info
def _read_values(fp: BinaryIO,
num_values: int,
fields: Sequence[str],
) -> dict[str, np.ndarray]:
"""Read the binary data in the given order."""
dtypes = {
'IX': np.uint64,
'IY': np.uint64,
'N': np.float32,
'T': np.float32,
'M': np.float32,
'U': np.float32,
'V': np.float32,
}
return {
l: np.fromfile(fp, dtype=dtypes[l], count=num_values)
for l in fields
}
def _read_header(fp: BinaryIO) -> tuple[list[str], int, dict[str, str]]:
"""Read header information and forward the pointer to the data."""
def read_shape(line):
return tuple(int(v) for v in line.split()[1:3])
def read_spacing(line):
return tuple(float(v) for v in line.split()[1:3])
def read_num_values(line):
return int(line.split()[1].strip())
def read_format(line):
return line.lstrip("FORMAT").strip()
def parse_field_labels(line):
return line.split()[1:]
def read_header_string(fp):
buf_size = 1024
header_str = ""
while True:
buf = fp.read(buf_size)
pos = buf.find(b'\0')
if pos != -1:
header_str += buf[:pos].decode("ascii")
offset = buf_size - pos - 1
fp.seek(-offset, 1)
break
else:
header_str += buf.decode("ascii")
return header_str
info = {}
header_str = read_header_string(fp)
for line in header_str.splitlines():
line_type = line.split(maxsplit=1)[0].upper()
if line_type == "SHAPE":
info['shape'] = read_shape(line)
elif line_type == "SPACING":
info['spacing'] = read_spacing(line)
elif line_type == "ORIGIN":
info['origin'] = read_spacing(line)
elif line_type == "FIELDS":
fields = parse_field_labels(line)
elif line_type == "NUMDATA":
num_values = read_num_values(line)
elif line_type == "FORMAT":
info['format'] = read_format(line)
info['num_bins'] = info['shape'][0] * info['shape'][1]
return fields, num_values, info
|
StarcoderdataPython
|
4841783
|
<gh_stars>0
from .anon import anonymize_url
from .log_util import quiet_loggers, setup
|
StarcoderdataPython
|
1776371
|
from unittest import TestCase
from orcid2vivo_app.utility import clean_orcid, is_valid_orcid
class TestUtility(TestCase):
def test_clean_orcid(self):
orcid = '0000-0003-1527-0030'
# Test with orcid.org prefix.
self.assertEqual(clean_orcid('orcid.org/' + orcid), orcid)
# Test with http://orcid.org prefix.
self.assertEqual(clean_orcid('http://orcid.org/' + orcid), orcid)
# Test without prefix.
self.assertEqual(clean_orcid(orcid), orcid)
def test_is_valid_orcid(self):
self.assertTrue(is_valid_orcid("0000-0003-1527-0030"))
self.assertTrue(is_valid_orcid("0000-0003-1527-003X"))
self.assertFalse(is_valid_orcid("0000-0003-1527-00301"))
self.assertFalse(is_valid_orcid("0000-0003-1527-003"))
|
StarcoderdataPython
|
118788
|
<gh_stars>0
from string import Template
"""Insert Query template"""
db_insert = Template("INSERT INTO ${voms_tbl} (subject, issuer, vo_id)"
" SELECT curr.subject, curr.issuer, curr.vo_id"
" FROM ${voms_tbl}_temp curr LEFT JOIN ${voms_tbl} prev"
" ON curr.subject=prev.subject"
" AND curr.issuer=prev.issuer"
" AND curr.vo_id=prev.vo_id WHERE prev.subject IS NULL")
defaults_db_insert = {
"voms_tbl": "voms_members"
}
"""Delete Query template"""
db_delete = Template("DELETE FROM ${voms_tbl} t1 USING ("
" SELECT prev.subject, prev.issuer, prev.vo_id"
" FROM ${voms_tbl} prev LEFT JOIN ${voms_tbl}_temp curr"
" ON curr.subject=prev.subject"
" AND curr.issuer=prev.issuer"
" AND curr.vo_id=prev.vo_id WHERE curr.subject IS NULL) sq"
" WHERE sq.subject=t1.subject"
" AND sq.issuer=t1.issuer"
" AND sq.vo_id=t1.vo_id")
defaults_db_delete = {
"voms_tbl": "voms_members",
}
""" Create Temp Table """
tbl_tmp_create = Template("CREATE TEMP TABLE ${voms_tbl}_temp ("
" id integer PRIMARY KEY,"
" subject character varying(256) NOT NULL,"
" issuer character varying(256) NOT NULL,"
" vo_id character varying(256) NOT NULL,"
" created timestamp without time zone)")
defaults_tbl_tmp_create = {
"voms_tbl": "voms_members",
}
""" Insert in Temp Table """
tbl_tmp_insert = Template("INSERT INTO ${voms_tbl}_temp (id, subject, issuer, vo_id, created) VALUES %s")
defaults_tbl_tmp_insert = {
"voms_tbl": "voms_members",
}
""" Delete from Temp Table """
tbl_tmp_delete = Template("DELETE FROM ${voms_tbl}_temp"
" WHERE id IN (SELECT id FROM ("
" SELECT id, ROW_NUMBER() OVER ("
" partition BY subject, issuer, vo_id ORDER BY id) AS rnum"
" FROM ${voms_tbl}_temp) t WHERE t.rnum > 1)")
defaults_tbl_tmp_delete = {
"voms_tbl": "voms_members",
}
|
StarcoderdataPython
|
3273034
|
<reponame>dvalentina/2019-2-Track-Backend-V-Danilova
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
def index(request):
return render(request, 'index.html')
def login(request):
return render(request, 'login.html')
@login_required
def home(request):
return render(request, 'index.html')
|
StarcoderdataPython
|
3327996
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by <NAME>
import unittest
from skbio import Sequence
import pandas.testing as pdtest
from allfreqs.classes import MultiAlignment, Reference
from allfreqs.tests.constants import (
SAMPLE_SEQUENCES_DICT, SAMPLE_SEQUENCES_TABMSA,
SAMPLE_REF_FASTA, SAMPLE_REFERENCE_INDEXES
)
class TestMultiAlignment(unittest.TestCase):
def setUp(self) -> None:
self.multialg = MultiAlignment(SAMPLE_SEQUENCES_DICT)
def test_tabmsa(self):
pdtest.assert_frame_equal(self.multialg.tabmsa,
SAMPLE_SEQUENCES_TABMSA)
def test_length(self):
self.assertEqual(5, len(self.multialg))
class TestReference(unittest.TestCase):
def test_indexes_from_str(self):
ref = "AAG-CTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGG-TAT"
reference = Reference(ref)
self.assertEqual(SAMPLE_REFERENCE_INDEXES, reference.indexes)
def test_indexes_from_sequence(self):
ref = Sequence.read(SAMPLE_REF_FASTA)
reference = Reference(ref)
self.assertEqual(SAMPLE_REFERENCE_INDEXES, reference.indexes)
def test_length(self):
ref = "AAG-CTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGG-TAT"
reference = Reference(ref)
self.assertEqual(44, len(reference))
|
StarcoderdataPython
|
80132
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 14 13:54:22 2017
@author: <NAME> @ Gilmour group @ EMBL Heidelberg
@descript: Functions for converting fluorescence intensity distributions
into a point cloud representation and then register them to
the image frame.
"""
#------------------------------------------------------------------------------
### Imports
# Standard external imports
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy.ndimage as ndi
import sys, os
# Other external imports
from sklearn.decomposition import PCA
#------------------------------------------------------------------------------
### Function for landmark extraction
def generate_pcl(image, nHits, adjust_power=1, replace=False, seed=None):
# Seed random number generator
if seed:
np.random.seed(seed)
# Normalize to a total intensity of 1
normed = image.astype(np.float) / np.sum(image)
# Draw from distribution (without replacement)
indx_arr = np.arange(normed.flatten().shape[0])
hits_arr = np.random.choice(indx_arr,
size=nHits,
replace=replace,
p=normed.flatten())
# Unravel flat index hits array
hits = np.array(np.unravel_index(hits_arr,np.shape(normed))).T
# Return result
return hits
#------------------------------------------------------------------------------
### Main function
def point_cloud_pipeline(stack, ref_stack,
fpath, fname, res, num_LMs=500,
verbose=False, show=False):
"""Pipeline that extracts and aligns point clouds
from intensity distributions.
Parameters
----------
stack : 3D numpy image
Intensity distribution to convert to point cloud.
ref_stack : 3D numpy image
Intensity distribution reflecting overall tissue
shape (usually the membranes). Used for aligning
the cloud to the image frame and normalizing z.
fpath : string
Path of the source image file corresponding to the
input stack. Used to find matching metadata.
fname : list of strings
File name of the source image file corresponding
to the input stack. Used to find matching metadata.
res : list of floats
Pixel size in each dimension: [z, y, x].
Returns
-------
lms : numpy array of shape (num_LMs, 3)
Landmark coordinates in the image space (zyx).
lms_tf : numpy array of shape (num_LMs, 3)
Aligned and z-normalized landmark coordinates(zyx).
lum_dist_lms : numpy array of shape (num_LMs)
Euclidean distance of landmarks to the lumen.
"""
#--------------------------------------------------------------------------
### Run landmark assignment
# Run landmark assignment
lms = generate_pcl(stack, num_LMs, seed=42)
ref_lms = generate_pcl(ref_stack, num_LMs, seed=42)
# Change from pixels to um
lms = lms * np.array(res)
ref_lms = ref_lms * np.array(res)
# Plot results
if show:
plt.scatter(lms[:,2], lms[:,1], c=lms[:,0], cmap='viridis')
plt.title('Channel landmarks in image frame')
plt.show()
plt.scatter(ref_lms[:,2], ref_lms[:,1], c=ref_lms[:,0], cmap='viridis')
plt.title('Reference landmarks in image frame')
plt.show()
#--------------------------------------------------------------------------
### Cloud alignment via PCA
# Prep
pca = PCA()
# Fit PCA model to data
pca.fit(ref_lms)
# Ensure that the sign of PCs is consistent with the image frame
# Note: Given that the images are always acquired in the same orientations,
# a matching orientation can be ensured by finding the highest
# contributing image axis for each PC, and invert the PC if that
# contribution is negative. In other words, one ensures for each PC
# that the highest-contributing image axis is positively correlated
# with the PC.
# Find highest contributions of image axes to each PC
# Note: This asks "which image axis contributes the most to this PC?"
max_weights = np.argmax(np.abs(pca.components_),axis=1)
# Get the signs of the highest contributions
signs = np.sign(pca.components_[np.arange(pca.components_.shape[0]),max_weights])
# Using the signs, flip those PCs where the sign is negative
pca.components_ = pca.components_ * signs[:, np.newaxis]
# Match the order of PCs to the order of image dimensions (zyx)
# Note: Following the transform, the PCs will be sorted according to
# explained variance. Instead, they should be sorted in order of the
# highest contributing image dimension.
# Find indices for zyx-sorting of transformed data
# Note: This asks "which PC is most contributed to by this image axis?"
zyx_sort = np.argmax(np.abs(pca.components_),axis=0)
# Transform landmarks, sort according to zyx
lms_tf = pca.transform(lms)[:,zyx_sort]
ref_lms_tf = pca.transform(ref_lms)[:,zyx_sort]
# Get PCs and explained variance to report
PCs = np.copy(pca.components_.T)
PCvars = np.copy(pca.explained_variance_ratio_)
# Print results
if verbose:
print '\n PCs:'
print ' ', str(PCs).replace('\n','\n ')
print ' Explained variance:'
print ' ', str(PCvars)
# Plot results
if show:
plt.scatter(lms_tf[:,2], lms_tf[:,1], c=lms_tf[:,0],
cmap='viridis')
plt.title('Channel landmarks in matched frame')
plt.show()
plt.scatter(ref_lms_tf[:,2], ref_lms_tf[:,1], c=ref_lms_tf[:,0],
cmap='viridis')
plt.title('Reference landmarks in matched frame')
plt.show()
#--------------------------------------------------------------------------
### Normalize z
### ...by scaling the 1st and 99th percentile to 0 and 1, respectively.
# Get percentiles
mem_bot = np.percentile(ref_lms_tf[:,0],1)
mem_top = np.percentile(ref_lms_tf[:,0],99)
# Scale
lms_tf[:,0] = (lms_tf[:,0] - mem_bot) / (mem_top - mem_bot)
ref_lms_tf[:,0] = (ref_lms_tf[:,0] - mem_bot) / (mem_top - mem_bot)
#--------------------------------------------------------------------------
### Additional Measure: Distance from Lumen
# Import lumen data
lumen = 'none'
with open(os.path.join(fpath, r"metadata.txt"),"r") as infile:
for line in infile.readlines():
line = line.strip()
line = line.split('\t')
if line[0] in fname:
lumen = np.array([int(value) for value in line[1:4]])
break
if lumen is 'none':
raise Exception("Appropriate lumen metadata not found. Aborting!")
# Change from pixels to resolution
lumen = lumen * np.array(res)
# Get Euclidean distance from lumen
lum_dist_lms = np.sqrt(np.sum((lms-lumen)**2.0, axis=1))
# Transform to PCA space
lumen_tf = pca.transform(lumen.reshape(1,-1))[:,zyx_sort].squeeze()
# Normalization of z
lumen_tf[0] = (lumen_tf[0] - mem_bot) / (mem_top - mem_bot)
# Report
if verbose:
print ' Lumen (raw & tf):'
print ' ', lumen
print ' ', lumen_tf
# Plot to double-check
if show:
plt.scatter(ref_lms[:,2], ref_lms[:,1], c=ref_lms[:,0],
cmap='viridis')
plt.scatter(lumen[2], lumen[1], c='r', s=100)
plt.title('Reference landmarks in image frame (with lumen)')
plt.show()
plt.scatter(ref_lms_tf[:,2], ref_lms_tf[:,1], c=ref_lms_tf[:,0],
cmap='viridis')
plt.scatter(lumen_tf[2], lumen_tf[1], c='r', s=100)
plt.title('Reference landmarks in matched frame (with lumen)')
plt.show()
#--------------------------------------------------------------------------
### Return results
return lms, lms_tf, lum_dist_lms
#------------------------------------------------------------------------------
|
StarcoderdataPython
|
94373
|
#Write a Python program to print the documents (syntax, description etc.) of Python built-in function(s)
# abs can be substitued for another built-in functions
print(abs.__doc__)
|
StarcoderdataPython
|
1735393
|
<reponame>voxity/vox-ui-api<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
def config_loader(config, environment):
"""
Charge la configuration de l'application.
:param flask.Config config: généralement app.config
:param str environement: le nom de l'environement (prod, dev, test, ...)
:return: None
L'ordre de chargement est le suivant :
* la configuration par defaut (app.config.default)
* la configuration en fonction de l'environment (prod, dev, test, ...) dans le app.config.env
* la configuration de l'utilisateur depuis le fichier definit dans la
variable d'environement VOX_PEER_CONFIG.
Il permet de surcharger la configuration.
"""
config.from_object('app.config.default')
config.from_object('app.config.env.%s' % (environment))
config.from_envvar('VOX_PEERS_CONFIG', silent=True)
|
StarcoderdataPython
|
1763349
|
# -*- coding: utf-8 -*-
from datetime import datetime, tzinfo
import email
from email.header import decode_header
from email.utils import parsedate_tz, mktime_tz
import re
from tempfile import TemporaryFile
import unicodedata
from trac.attachment import Attachment
from trac.db import Table, Column, Index
from trac.mimeview.api import KNOWN_MIME_TYPES
from trac.resource import Resource
from trac.util.datefmt import from_utimestamp, to_utimestamp, utc
from trac.util.text import stripws
try:
unichr
except NameError:
unichr = chr # In Python 3 chr can be used instead of unichr in Python 2
try:
xrange
except NameError:
xrange = range # In Python 3 range can be used instead of xrange in Python 2
SCHEMA = [
Table('mailarchive', key='id')[
Column('id'),
Column('subject'),
Column('fromheader'),
Column('toheader'),
Column('date', type='int64'),
Column('body'),
Column('allheaders'),
Column('comment'),
Index(['date']),
],
]
EXT_MAP = dict((t, exts[0]) for t, exts in KNOWN_MIME_TYPES.items())
EXT_MAP['image/gif'] = 'gif'
EXT_MAP['image/jpeg'] = 'jpeg'
EXT_MAP['image/png'] = 'png'
EXT_MAP['image/tiff'] = 'tiff'
EXT_MAP['image/svg+xml'] = 'svg'
DELETE_CHARS_RE = re.compile(
'[' +
''.join(filter(lambda c: unicodedata.category(c) == 'Cc',
map(unichr, xrange(0x10000)))) +
'\\/:*?"<>|' +
']')
def normalized_filename(filename):
filename = DELETE_CHARS_RE.sub(' ', filename)
filename = stripws(filename)
return filename
def header_to_unicode(header):
if header is None:
return None
if isinstance(header, unicode):
return header
return u''.join(unicode(part, charset or 'ASCII', errors='replace')
for part, charset in decode_header(header))
def to_unicode(s, charset):
return None if s is None else unicode(s, charset, errors='replace')
def get_charset(m, default='ASCII'):
return m.get_content_charset() or m.get_charset() or default
def terms_to_clauses(terms):
"""Split list of search terms and the 'or' keyword into list of lists of search terms."""
clauses = [[]]
for term in terms:
if term == 'or':
clauses.append([])
else:
clauses[-1].append(term)
return clauses
def search_clauses_to_sql(db, columns, clauses):
"""Convert a search query into an SQL WHERE clause and corresponding
parameters.
Similar to trac.search.search_to_sql but supports 'or' clauses.
The result is returned as an `(sql, params)` tuple.
"""
assert columns and clauses
likes = ['%s %s' % (i, db.like()) for i in columns]
c = ' OR '.join(likes)
sql = '(' + ') OR ('.join('(' + ') AND ('.join([c] * len(clause)) + ')' for clause in clauses) + ')'
args = []
for clause in clauses:
for term in clause:
args.extend(['%' + db.like_escape(term) + '%'] * len(columns))
return sql, tuple(args)
class ArchivedMail(object):
def __init__(self, id, subject, fromheader, toheader, body, allheaders, date, comment):
self.id = id
self.subject = subject
self.fromheader = fromheader
self.toheader = toheader
self.body = body
self.allheaders = allheaders
self.date = from_utimestamp(date)
self.comment = comment
@classmethod
def parse(cls, id, source):
msg = email.message_from_string(source)
charset = get_charset(msg)
body = None
for part in msg.walk():
content_type = part.get_content_type()
if content_type == 'text/plain':
charset = get_charset(part, get_charset(msg))
body = part.get_payload(decode=True)
break # Take only the first text/plain part as the body
elif content_type == 'message/rfc822':
if part.get('Content-Transfer-Encoding') == 'base64':
# This is an invalid email and Python will misdetect a 'text/plain' part that is actually a base64 encoded attachment.
break
date = datetime.fromtimestamp(mktime_tz(parsedate_tz(msg['date'])), utc)
allheaders = '\n'.join("%s: %s" % item for item in msg.items())
mail = ArchivedMail(id,
header_to_unicode(msg['subject']),
header_to_unicode(msg['from']),
header_to_unicode(msg['to']),
to_unicode(body, charset),
to_unicode(allheaders, 'ASCII'),
to_utimestamp(date),
'')
return (mail, msg)
@classmethod
def add(cls, env, mail):
# Insert mail
with env.db_transaction as db:
cursor = db.cursor()
cursor.execute("""
INSERT INTO mailarchive
(id, subject, fromheader, toheader, body, allheaders, date, comment)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
""", (mail.id, mail.subject, mail.fromheader, mail.toheader, mail.body, mail.allheaders, to_utimestamp(mail.date), mail.comment))
@classmethod
def storeattachments(cls, env, mail, msg):
def add_attachment(payload, filename):
with TemporaryFile('w+b') as file:
file.write(payload)
size = file.tell()
file.seek(0)
attachment = Attachment(env, 'mailarchive', mail.id)
attachment.insert(filename, file, size)
def get_filename(part, index):
filename = header_to_unicode(part.get_filename())
if not filename:
mimetype = part.get_content_type()
ext = EXT_MAP.get(mimetype) or part.get_content_subtype() or mimetype or '_'
filename = "unnamed-part-%s.%s" % (index, ext)
return normalized_filename(filename)
for index, part in enumerate(msg.walk()):
cd = part.get('Content-Disposition')
if cd:
d = cd.strip().split(';')
if d[0].lower() == 'attachment':
filename = get_filename(part, index)
if part.get_content_type() == 'message/rfc822' and part.get('Content-Transfer-Encoding') == 'base64':
# This is an invalid email and Python will misdetect the attachment in a separate 'text/plain' part, not here.
# TODO: actually extract that separate 'text/plain' attachment somehow.
add_attachment('Invalid attachment: message/rfc822 parts can not be base64 encoded!', filename)
continue
add_attachment(part.get_payload(decode=True), filename)
continue
cid = part.get('Content-ID')
if cid:
filename = get_filename(part, index)
add_attachment(part.get_payload(decode=True), filename)
@classmethod
def select_all(cls, env):
with env.db_query as db:
return [ArchivedMail(id, subject, fromheader, toheader, body, allheaders, date, comment)
for id, subject, fromheader, toheader, body, allheaders, date, comment in
db("""
SELECT id, subject, fromheader, toheader, body, allheaders, date, comment
FROM mailarchive
""")]
@classmethod
def select_all_paginated(cls, env, page, max_per_page):
with env.db_query as db:
return [ArchivedMail(id, subject, fromheader, toheader, body, allheaders, date, comment)
for id, subject, fromheader, toheader, body, allheaders, date, comment in
db("""
SELECT id, subject, fromheader, toheader, body, allheaders, date, comment
FROM mailarchive
ORDER BY date DESC
LIMIT %d OFFSET %d
""" % (max_per_page, max_per_page * (page - 1)))]
@classmethod
def count_all(cls, env):
with env.db_query as db:
return db("""
SELECT COUNT(*)
FROM mailarchive
""")[0][0]
@classmethod
def select_filtered_paginated(cls, env, page, max_per_page, filter):
if not filter:
return cls.select_all_paginated(env, page, max_per_page)
with env.db_query as db:
terms = filter.split()
sql_query, args = search_clauses_to_sql(db, ['body', 'allheaders', 'comment'], terms_to_clauses(terms))
return [ArchivedMail(id, subject, fromheader, toheader, body, allheaders, date, comment)
for id, subject, fromheader, toheader, body, allheaders, date, comment in
db("""
SELECT id, subject, fromheader, toheader, body, allheaders, date, comment
FROM mailarchive
WHERE %s
ORDER BY date DESC
LIMIT %d OFFSET %d
""" % (sql_query, max_per_page, max_per_page * (page - 1)), args)]
@classmethod
def count_filtered(cls, env, filter):
if not filter:
return cls.count_all(env)
with env.db_query as db:
terms = filter.split()
sql_query, args = search_clauses_to_sql(db, ['body', 'allheaders', 'comment'], terms_to_clauses(terms))
return db("""
SELECT COUNT(*)
FROM mailarchive
WHERE
""" + sql_query, args)[0][0]
@classmethod
def search(cls, env, terms, max=0):
with env.db_query as db:
sql_query, args = search_clauses_to_sql(db, ['body', 'allheaders', 'comment'], terms_to_clauses(terms))
if max > 0:
sql_query += " LIMIT %d" % (max,)
return [ArchivedMail(id, subject, fromheader, toheader, body, allheaders, date, comment)
for id, subject, fromheader, toheader, body, allheaders, date, comment in
db("""
SELECT id, subject, fromheader, toheader, body, allheaders, date, comment
FROM mailarchive
WHERE
""" + sql_query, args)]
@classmethod
def select_by_id(cls, env, id):
rows = env.db_query("""
SELECT id, subject, fromheader, toheader, body, allheaders, date, comment
FROM mailarchive
WHERE id=%s
""", (str(id),))
if not rows:
return None
id, subject, fromheader, toheader, body, allheaders, date, comment = rows[0]
return ArchivedMail(id, subject, fromheader, toheader, body, allheaders, date, comment)
@classmethod
def update_comment(cls, env, id, comment):
with env.db_transaction as db:
cursor = db.cursor()
cursor.execute("""
UPDATE mailarchive
SET comment=%s
WHERE id=%s
""", (comment, str(id)))
|
StarcoderdataPython
|
140120
|
import datetime
import json
import os
import discord
from discord.errors import HTTPException
from discord.ext import commands
class Logging(commands.Cog, description="Keep a track of what members do in your server with this category."):
def __init__(self, bot):
self.bot = bot
with open("storage/modlogs_channels.json", "r") as modlogsFile:
self.modlogsFile = json.load(modlogsFile)
@commands.command(name="messagelogschannel",
aliases=["seteditedlogschannel", "setdeletedlogschannel",
"setlogschannel", "setlogchannel"],
description="Sets the channel in which edited/deleted message logs are sent.")
@commands.has_permissions(administrator=True)
async def set_modlogs_channel(self, ctx, *channel: discord.TextChannel):
if not channel:
try:
set=self.modlogsFile.get(str(ctx.guild.id))
embed=discord.Embed(title="Current Message Log channel",description=f"<#{set}>",color=discord.Color.random())
return await ctx.send(embed=embed)
except:
return await ctx.send('Not set')
channel_id = channel.id
self.modlogsFile[str(ctx.guild.id)] = int(channel_id)
with open("storage/modlogs_channels.json", "w") as modlogsFile:
json.dump(self.modlogsFile, modlogsFile, indent=4)
await ctx.send(embed=discord.Embed(description=f"Logs channel set as {channel.name} succesfully. "
f"Edited/Deleted mesages, and profile changes will be shown in this channel.",color=discord.Color.green()))
# message edit event
@commands.Cog.listener()
async def on_message_edit(self, before, after):
message_channel_id = self.modlogsFile.get(str(before.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
message_link = f"https://discord.com/channels/{before.guild.id}/{before.channel.id}/{before.id}"
embed = discord.Embed(title=f"Message edited in {before.channel.name}",
color=before.author.color, timestamp=after.created_at)
embed.add_field(name="Before", value=before.content)
embed.add_field(name="After", value=after.content)
embed.add_field(
name="Link", value=f"__[Message]({message_link})__")
embed.set_footer(text=f"Author • {before.author} | Edited")
embed.set_thumbnail(url=before.author.avatar_url)
# the edited timestamp would come in the right, so we dont need to specify it in the footer
try:
await message_channel.send(embed=embed)
except: # embeds dont have a message.content, so it gives us an error
pass
# message delete event
@commands.Cog.listener()
async def on_message_delete(self, message):
message_channel_id = self.modlogsFile.get(str(message.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
embed = discord.Embed(title=f"Message deleted in {message.channel.name}",
color=message.author.color, timestamp=message.created_at)
embed.add_field(name="Content", value=message.content)
embed.set_footer(text=f"Author • {message.author} | Created")
embed.set_thumbnail(url=message.author.avatar_url)
if message_channel is None:
return
try:
await message_channel.send(embed=embed)
except HTTPException:
pass
@commands.Cog.listener()
async def on_bulk_message_delete(self, messages):
message_channel_id = (self.modlogsFile.get(str(messages[0].guild.id)))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
with open(f"storage/tempText/{messages[0].guild.id}.txt", "w") as temp_textfile:
for x in messages:
line1 = f"{x.channel.name} | From: {x.author} | Sent At: {x.created_at}\n"
temp_textfile.write(line1)
temp_textfile.write(f"{x.content}\n\n")
file = discord.File(f"./storage/tempText/{messages[0].guild.id}.txt")
await message_channel.send(file=file, content=f"{len(messages)} messages deleted. "
f"Sending information as text file.")
os.remove(f"./storage/tempText/{messages[0].guild.id}.txt")
# ban event
@commands.Cog.listener()
async def on_member_ban(self, guild, member):
message_channel_id = self.modlogsFile.get(str(guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
embed = discord.Embed(title=f"{member} has been banned from {guild.name}", description=f"ID: {member.id}",
timestamp=member.created_at)
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text="Account created at")
await message_channel.send(embed=embed)
@commands.Cog.listener()
async def on_member_update(self, before, after):
message_channel_id = self.modlogsFile.get(str(before.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
# nickname change
if not before.nick == after.nick:
embed = discord.Embed(title=f"{before}'s nickname has been updated", description=f"ID: {before.id}",
color=after.color, timestamp=before.created_at)
embed.add_field(
name="Before", value=before.display_name)
embed.add_field(
name="After", value=after.display_name)
embed.set_thumbnail(url=after.avatar_url)
embed.set_footer(text="Account created at")
await message_channel.send(embed=embed)
# role change
if not before.roles == after.roles:
embed = discord.Embed(title=f"{before}'s roles have been updated", description=f"ID: {before.id}",
color=after.color, timestamp=before.created_at)
before_roles_str, after_roles_str = "", ""
for x in before.roles[::-1]:
before_roles_str += f"{x.mention} "
for x in after.roles[::-1]:
after_roles_str += f"{x.mention} "
embed.add_field(
name="Before", value=before_roles_str)
embed.add_field(name="After", value=after_roles_str)
embed.set_thumbnail(url=after.avatar_url)
embed.set_footer(text="Account created at")
await message_channel.send(embed=embed)
# unban event
@commands.Cog.listener()
async def on_member_unban(self, guild, member):
message_channel_id = self.modlogsFile.get(str(guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
embed = discord.Embed(title=f"{member} has been unbanned", description=f"ID: {member.id}",
color=discord.Color.green(),
timestamp=member.created_at)
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text="Account created at")
await message_channel.send(embed=embed)
# join event
@commands.Cog.listener()
async def on_member_join(self, member):
message_channel_id = self.modlogsFile.get(str(member.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
embed = discord.Embed(title=f"{member} joined the the server.", color=discord.Color.green(),
timestamp=datetime.datetime.utcnow(),
description=f"**Their account was created at:** {member.created_at}")
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text="Join time")
await message_channel.send(embed=embed)
# leave event
@commands.Cog.listener()
async def on_member_remove(self, member):
message_channel_id = self.modlogsFile.get(str(member.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
roles = [role for role in member.roles]
embed = discord.Embed(title=f"{member} has left the server.", color=discord.Color.dark_red(),
timestamp=datetime.datetime.utcnow(),
description=f"**Their account was created at:** {member.created_at}")
embed.add_field(name="Their roles", value=" ".join(
[role.mention for role in roles]))
embed.set_footer(text=f"Left at")
embed.set_thumbnail(url=member.avatar_url)
await message_channel.send(embed=embed)
def setup(bot):
bot.add_cog(Logging(bot))
|
StarcoderdataPython
|
1767198
|
from microbit import *
import radio
radio.config(group=0)
radio.on()
display.show("-")
while True:
if button_a.was_pressed():
radio.send("A")
if button_b.was_pressed():
radio.send("B")
try:
msg = radio.receive()
if msg is not None:
if len(msg) > 0:
display.show(msg)
except:
display.show("X")
radio.off()
sleep(250)
radio.on()
display.show("-")
|
StarcoderdataPython
|
187388
|
<filename>utils/generate_det_roidb.py
import argparse
import os
import pickle as pkl
import numpy as np
from detection.utils.list_util import load_img_list
# from pycocotools.coco import COCO
#
#
# dataset_split_mapping = {
# "train2014": "train2014",
# "val2014": "val2014",
# "valminusminival2014": "val2014",
# "minival2014": "val2014",
# "train2017": "train2017",
# "val2017": "val2017",
# "test-dev2017": "test2017",
# "train": "train"
# }
def parse_args():
parser = argparse.ArgumentParser(description='Generate SimpleDet GroundTruth Database')
# parser.add_argument('--dataset', help='dataset name', type=str)
# parser.add_argument('--dataset-split', help='dataset split, e.g. train2017, minival2014', type=str)
parser.add_argument('--img_lst_path', help='path to ds')
parser.add_argument('--data_root', help='path to root')
args = parser.parse_args()
return args.img_lst_path, args.data_root
def generate_groundtruth_database(img_lst_path, data_root):
total_rec_list = load_img_list(img_lst_path, data_root)
# img_ids = None
version = 1
roidb = []
for i, item in enumerate(total_rec_list):
roi_rec = {
'image_url': item['img_path'],
'im_id': 'det1.2_{}'.format(i),
'h': 576,
'w': 1024,
'gt_class': item['boxes'][:, 4], # list of class id + offset
'gt_bbox': item['boxes'],
'gt_poly': None,
'version': version,
'flipped': False}
roidb.append(roi_rec)
return roidb
if __name__ == "__main__":
d, dsplit = parse_args()
roidb = generate_groundtruth_database(d, dsplit)
os.makedirs("data/cache", exist_ok=True)
with open("data/cache/%s_%s.roidb" % (d, dsplit), "wb") as fout:
pkl.dump(roidb, fout)
|
StarcoderdataPython
|
111516
|
<gh_stars>10-100
#!/usr/bin/env python3
#
# vsim_defines.py
# <NAME> <<EMAIL>>
#
# Copyright (C) 2015-2017 ETH Zurich, University of Bologna
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
#
# templates for vcompile.csh scripts
VSIM_PREAMBLE = """#!/bin/tcsh
source ${PULP_PATH}/%s/vcompile/setup.csh
##############################################################################
# Settings
##############################################################################
set IP=%s
##############################################################################
# Check settings
##############################################################################
# check if environment variables are defined
if (! $?MSIM_LIBS_PATH ) then
echo "${Red} MSIM_LIBS_PATH is not defined ${NC}"
exit 1
endif
if (! $?IPS_PATH ) then
echo "${Red} IPS_PATH is not defined ${NC}"
exit 1
endif
set LIB_NAME="${IP}_lib"
set LIB_PATH="${MSIM_LIBS_PATH}/${LIB_NAME}"
set IP_PATH="${IPS_PATH}/%s"
set RTL_PATH="${RTL_PATH}"
##############################################################################
# Preparing library
##############################################################################
echo "${Green}--> Compiling ${IP}... ${NC}"
rm -rf $LIB_PATH
vlib $LIB_PATH
vmap $LIB_NAME $LIB_PATH
##############################################################################
# Compiling RTL
##############################################################################
"""
VSIM_POSTAMBLE ="""
echo "${Cyan}--> ${IP} compilation complete! ${NC}"
exit 0
##############################################################################
# Error handler
##############################################################################
error:
echo "${NC}"
exit 1
"""
VSIM_PREAMBLE_SUBIP = """
echo "${Green}Compiling component: ${Brown} %s ${NC}"
echo "${Red}"
"""
VSIM_VLOG_INCDIR_CMD = "+incdir+"
## Add -suppress 2583 to remove warning about always_comb|ff wrapped with
# generate struct that can be only checked after elaboration at vopt stage
VSIM_VLOG_CMD = "vlog -quiet -sv -suppress 2583 -work ${LIB_PATH} %s %s %s || goto error\n"
VSIM_VCOM_CMD = "vcom -quiet -suppress 2583 -work ${LIB_PATH} %s %s || goto error\n"
# templates for vsim.tcl
VSIM_TCL_PREAMBLE = """set VSIM_%s_LIBS " \\\
"""
VSIM_TCL_CMD = " -L %s_lib \\\n"
VSIM_TCL_POSTAMBLE = """"
"""
# templates for vcompile_libs.tc
VCOMPILE_LIBS_PREAMBLE = """#!/usr/bin/tcsh
echo \"\"
echo \"${Green}--> Compiling PULP IPs libraries... ${NC}\"
"""
VCOMPILE_LIBS_CMD = "tcsh ${PULP_PATH}/%s/vcompile/ips/vcompile_%s.csh || exit 1\n"
VCOMPILE_LIBS_XILINX_CMD = "tcsh ${PULP_PATH}/fpga/sim/vcompile/ips/vcompile_%s.csh || exit 1\n"
|
StarcoderdataPython
|
3209151
|
import json
def main():
with open("./_data/componentes-curriculares.json", "r") as file:
componentes = json.load(file)
for componente in componentes:
codigo = componente['codigo']
print('Gerando Componente', componente['codigo'], ' - ', componente['nome'])
text = f"---\ncodigo: {codigo}\nlayout: componente\n---\n"
local = f"./curso/componentes/{codigo.lower()}.html"
with open(local, "w") as file:
file.write(text)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1678685
|
"""
Bing (Videos)
@website https://www.bing.com/videos
@provide-api yes (http://datamarket.azure.com/dataset/bing/search)
@using-api no
@results HTML
@stable no
@parse url, title, content, thumbnail
"""
from json import loads
from lxml import html
from searx.engines.xpath import extract_text
from searx.url_utils import urlencode
categories = ['videos']
paging = True
safesearch = True
time_range_support = True
number_of_results = 10
search_url = 'https://www.bing.com/videos/asyncv2?{query}&async=content&'\
'first={offset}&count={number_of_results}&CW=1366&CH=25&FORM=R5VR5'
time_range_string = '&qft=+filterui:videoage-lt{interval}'
time_range_dict = {'day': '1440',
'week': '10080',
'month': '43200',
'year': '525600'}
# safesearch definitions
safesearch_types = {2: 'STRICT',
1: 'DEMOTE',
0: 'OFF'}
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * 10 + 1
# safesearch cookie
params['cookies']['SRCHHPGUSR'] = \
'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
# language cookie
params['cookies']['_EDGE_S'] = 'mkt=' + params['language'].lower() + '&F=1'
# query and paging
params['url'] = search_url.format(query=urlencode({'q': query}),
offset=offset,
number_of_results=number_of_results)
# time range
if params['time_range'] in time_range_dict:
params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in dom.xpath('//div[@class="dg_u"]'):
# try to extract the url
url_container = result.xpath('.//div[@class="sa_wrapper"]/@data-eventpayload')
if len(url_container) > 0:
url = loads(url_container[0])['purl']
else:
url = result.xpath('./a/@href')[0]
# discard results that do not return an external url
# very recent results sometimes don't return the video's url
if url.startswith('/videos/search?'):
continue
title = extract_text(result.xpath('./a//div[@class="tl"]'))
content = extract_text(result.xpath('.//div[@class="pubInfo"]'))
thumbnail = result.xpath('.//div[@class="vthumb"]/img/@src')[0]
results.append({'url': url,
'title': title,
'content': content,
'thumbnail': thumbnail,
'template': 'videos.html'})
# first page ignores requested number of results
if len(results) >= number_of_results:
break
return results
|
StarcoderdataPython
|
79619
|
import copy
import logging
from abc import ABC
from typing import Dict, Optional, Type, Union
import torch
from pytorch_lightning import LightningModule
from torch.nn.modules import Module
from torch.utils.data import DataLoader
from .generic_model import GenericModel
from .lightning_model import LightningModel
logger = logging.getLogger()
class GenericMCDropoutModel(GenericModel, ABC):
"""
Generic model wrapper for mcdropout uncertainty estimator
"""
def __init__(
self,
model_class: Type[Module],
model_config: Union[str, Dict],
trainer_config: Union[str, Dict],
n_estimators: int = 10,
eval_dropout_prob: float = 0.2,
):
super(GenericMCDropoutModel, self).__init__(model_class, model_config, trainer_config)
_check_mc_dropout_model(model_class, model_config)
self.n_estimators = n_estimators
self.eval_dropout_prob = eval_dropout_prob
def __call__(self, loader: DataLoader) -> torch.Tensor:
"""
:param loader: pytorch dataloader
:return: model predictions
"""
if self.current_model is None:
raise ValueError("No current model, call 'train(train_loader, valid_loader)' to train the model first")
predictions = []
model = self.current_model
model.eval()
with torch.no_grad():
_enable_only_dropout_layers(model, self.eval_dropout_prob)
for _ in range(self.n_estimators):
model_prediction = []
for x, _ in loader:
model_prediction.append(model(x).detach().cpu())
predictions.append(torch.cat(model_prediction, 0))
predictions = torch.stack(predictions)
return predictions
class LightningMCDropoutModel(GenericMCDropoutModel, LightningModel):
r"""
Wrapper for MC Dropout estimator with pytorch lightning trainer
Example:
.. code-block:: python
import torch
import pytorch_lightning as pl
class PyLModel(pl.LightningModule):
def __init__(self, in_dim, out_dim):
super(PyLModel, self).()
self.linear = torch.nn.Linear(in_dim, out_dim)
# need to define other train/test steps and optimizers methods required
# by pytorch-lightning to run this example
wrapper = LightningMCDropoutModel(
PyLModel,
model_config={"in_dim":10, "out_dim":1},
trainer_config={"epochs":100},
n_estimators=10,
eval_dropout_prob=0.2,
)
wrapper.train(train_loader, valid_loader)
predictions = wrapper(loader)
assert predictions.size(0) == 10
"""
def __init__(
self,
model_class: Type[LightningModule],
model_config: Union[Dict, str],
trainer_config: Union[Dict, str],
n_estimators: int = 10,
eval_dropout_prob: float = 0.2,
):
super(LightningMCDropoutModel, self).__init__(
model_class,
model_config,
trainer_config,
n_estimators=n_estimators,
eval_dropout_prob=eval_dropout_prob,
)
def _enable_only_dropout_layers(model: Module, p: Optional[float] = None) -> None:
def enable_dropout_on_module(m):
if m.__class__.__name__.startswith("Dropout"):
if isinstance(p, float) and (0 <= p <= 1):
m.p = p
elif isinstance(p, float) and (p < 0 or p > 1):
logger.warning(f"Evaluation dropout probability should be a float between 0 and 1, got {p}")
m.train()
model.apply(enable_dropout_on_module)
def _check_mc_dropout_model(model_class: Type[Module], model_config: Dict) -> None:
model = model_class(**model_config)
def has_dropout_module(model):
is_dropout = []
for m in model.children():
if m.__class__.__name__.startswith("Dropout"):
is_dropout.append(True)
else:
is_dropout += has_dropout_module(m)
return is_dropout
if not any(has_dropout_module(model)):
raise ValueError("Model provided do not contain any torch.nn.Dropout modules, cannot apply MC Dropout")
|
StarcoderdataPython
|
4836492
|
import numpy as np
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.cluster import MiniBatchKMeans, KMeans
from tqdm import tqdm
import joblib
import seaborn as sns
import matplotlib.pyplot as plt
import os
import pickle
from argparse import ArgumentParser
def learn_f0_kmeans(f0_path, voiced_flag_path, split, output_dir, visualize=False, n_clusters=12, batch_size=10000, max_no_improvement=100, max_iter=300, n_init=20):
with open(f0_path, "rb") as f:
f0 = pickle.load(f)
with open(voiced_flag_path, "rb") as f:
voiced_flag = pickle.load(f)
assert len(f0) == len(voiced_flag), "the number of records is mismatch between f0 and voiced_flag"
print(f" the number of records is {len(f0)}")
tot_voiced_f0 = []
for i in tqdm(range(len(f0))):
for j in range(len(f0[i])):
if voiced_flag[i][j]:
tot_voiced_f0.append(f0[i][j])
km_model = MiniBatchKMeans(n_clusters=n_clusters, batch_size=batch_size, max_iter=max_iter, n_init=n_init)
tot_voiced_f0 = np.array(tot_voiced_f0).reshape(-1, 1)
print(tot_voiced_f0.shape)
print("[INFO] start running kmeans")
kmeans = km_model.fit(tot_voiced_f0)
print("[INFO] finish running kmeans")
print(sorted(km_model.cluster_centers_))
# save kmeans model
km_path = os.path.join(output_dir, f"{split}_kmeans_f0.pkl")
joblib.dump(km_model, km_path)
# visualize f0 histogram
if visualize:
plt.hist(tot_voiced_f0, bins=50)
plt.savefig(os.path.join(output_dir, "kmeans_f0_result.jpg"))
if __name__ == "__main__":
PARSER = ArgumentParser()
PARSER.add_argument("-p", "--f0_path", required=True)
PARSER.add_argument("-f", "--voiced_flag_path", required=True)
PARSER.add_argument("-s", "--split", help="dataset split name", required=True)
PARSER.add_argument("-o", "--output_dir", help="the directory to save trained kmeans model", required=True)
PARSER.add_argument("--visualize", action='store_true', help="whether to visualize kmeans result by tsne visualization")
PARSER.add_argument("--n_clusters", type=int, default=12, help="number of clusters for kmeans")
learn_f0_kmeans(**vars(PARSER.parse_args()))
|
StarcoderdataPython
|
3260425
|
<gh_stars>1-10
from .haze_net import AODNet
|
StarcoderdataPython
|
1763108
|
<reponame>serpis/pynik
# coding: utf-8
from commands import Command
import random
import datetime
import utility
import standard
import re
class Game:
def __init__(self, name):
self.name = name
self.players = {}
self.timeout = None
self.time = None
self.current_question = None
self.timeout_streak = 0
self.running = False
self.words = ["fur", "nigeria", "chewing gum", "cigar", "gamecube", "flower", "mp3", "bottle", "film", "radio", "knob", "fuck", "temperature", "milk", "mouse", "man", "wax", "pillow", "bicycle", "pub", "telephone", "stalk", "dog", "cat", "blacksmith", "glass", "door", "house", "metal", "lighter", "window", "mechanic", "camera", "stapler", "pencil", "tape", "scissors"]
def set_dictionary(self, dictionary):
self.dictionary = dictionary
self.words = ["fur", "nigeria", "disco", "chewing gum", "cigar", "gamecube", "flower", "mp3", "bottle", "film", "radio", "knob", "fuck", "temperature", "milk", "mouse", "man", "wax", "pillow", "bicycle", "pub", "telephone", "stalk", "dog", "cat", "blacksmith", "glass", "door", "house", "metal", "lighter", "window", "mechanic", "camera", "stapler", "pencil", "tape", "scissors"]
def on_tick(self, bot, time):
self.time = time
if self.running and (not self.timeout or time > self.timeout):
if self.timeout and time - self.timeout > datetime.timedelta(0, 0, 0, 0, 10): #if we're 10 minutes it's more than just lag...
self.running = False
return
self.timeout_streak += 1
if self.timeout_streak > 3:
self.timeout_streak = 0
self.send_timeout(bot)
self.send_timeout_quit(bot)
self.stop(bot)
else:
if self.current_question:
self.send_timeout(bot)
self.new_question()
self.send_question(bot)
def on_privmsg(self, bot, source, target, message):
self.on_tick(bot, self.time)
if self.running:
if self.current_question[1] == message:
self.timeout_streak = 0
if source in self.players:
self.players[source] += 1
else:
self.players[source] = 1
bot.tell(self.name, "Yay! %s got it!" % utility.extract_nick(source))
self.new_question()
self.send_question(bot)
def start(self, bot):
if not self.running:
self.running = True
self.current_question = None
self.timeout = None
bot.tell(self.name, "Game started.")
def new_question(self):
if len(self.words):
word = self.words[0]
self.words = self.words[1:]
question = standard.WikipediaCommand.instance.wp_get(word)
if question:
question = re.sub("(?i)" + word, "*" * len(word), question)
self.current_question = (question, word)
if not self.current_question:
self.current_question = random.choice(self.dictionary.items())
self.timeout = self.time + datetime.timedelta(0, 30)
def send_question(self, bot):
bot.tell(self.name, "Question: %s" % self.current_question[0])
def stop(self, bot):
if self.running:
self.running = False
bot.tell(self.name, "Game stopped.")
def format_hiscore(self, tuple):
return "%s: %d" % (utility.extract_nick(tuple[0]), tuple[1])
def send_hiscore(self, bot):
l = self.players.items()
l.sort(key=lambda x: (x[1], x[0]))
str = ", ".join(map(self.format_hiscore, reversed(l)))
bot.tell(self.name, "Hi-score: %s." % str)
def send_timeout(self, bot):
bot.tell(self.name, "Timed out. Answer: %s." % self.current_question[1])
def send_timeout_quit(self, bot):
bot.tell(self.name, "Stopping inactive game.")
class GamePlugin(Command):
hooks = ['on_privmsg']
def __init__(self):
self.dictionary = { "*round time machine*": "clock", "*fourlegged reliever*": "chair", "*round rubber carrier*": "wheel", "*code machine*": "matricks", "*italian plumber*": "mario", "*squishy ball with gun*": "tee", "*round house kick master*": "chuck norris", "*best encoding*": "utf-8" }
self.games = {}
def on_load(self):
self.load_games()
for game in self.games.values():
game.set_dictionary(self.dictionary)
def on_unload(self):
self.save_games()
def on_save(self):
self.save_games()
def save_games(self):
utility.save_data("games", self.games)
def load_games(self):
self.games = utility.load_data("games", {})
def trig_gamestart(self, bot, source, target, trigger, argument):
if not target in self.games.keys():
self.games[target] = Game(target)
self.games[target].set_dictionary(self.dictionary)
game = self.games[target]
game.start(bot)
def trig_gamestop(self, bot, source, target, trigger, argument):
if target in self.games.keys():
game = self.games[target]
game.stop(bot)
self.on_save()
def trig_gamehiscore(self, bot, source, target, trigger, argument):
if target in self.games.keys():
game = self.games[target]
game.send_hiscore(bot)
else:
return "I have no hiscore for this game."
def on_privmsg(self, bot, source, target, message):
if target in self.games.keys():
game = self.games[target]
game.on_privmsg(bot, source, target, message)
return None
def timer_beat(self, bot, time):
for game in self.games.values():
game.on_tick(bot, time)
|
StarcoderdataPython
|
169228
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes residual symmetries of solutions.
As all critical points with a rank-2 simple Lie group symmetry have been
known for many years, we can restrict ourselves to a residual Lie symmetry of
Spin(3)^A x U(1)^B. This considerably simplifies the analysis.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cmath
import collections
import glob
import itertools
import math
import numpy
import os
import pprint
# CAUTION: scipy.linalg.eigh() will produce an orthonormal basis, while
# scipy.linalg.eig(), when used on a hermitean matrix, typically will not
# orthonormalize eigenvectors in degenerate eigenspaces.
# This behavior is not documented properly, but "obvious" when considering
# the underlying algorithm.
import scipy.linalg
from dim4.so8_supergravity_extrema.code import algebra
CanonicalizedSymmetry = collections.namedtuple(
'CanonicalizedSymmetry',
['u1s', # Sequence of U(1) generators, each as a 28-vector acting on [ik].
'semisimple_part', # [28, d]-array, semisimple part of the algebra.
'spin3_cartan_gens' # Cartan generators, one per spin(3) subalgebra.
])
# A `Spin8Action` tuple consists of an einsum reduction-string,
# typically of the form 'aij,aN->jiN', as well as the 1st tensor-argument
# to the corresponding contraction.
Spin8Action = collections.namedtuple(
'Spin8Action', ['einsum', 'tensor'])
class BranchingFormatter(object):
"""Base class for branching-formatters."""
def format(self, num_spin3s, branching):
return self.sum_join(self.format_irreps(num_spin3s, b) for b in branching)
def format_branching_tag(self, tag):
"""Formats tag (8, 'v') -> '8v' etc."""
tag_dim, tag_subscript = tag
return '%s%s' % (tag_dim, tag_subscript)
def sum_join(self, formatted):
return ' + '.join(formatted)
def format_multiplicity(self, multiplicity, formatted_obj):
"""Adds a multiplicity prefix to a formatted object."""
if multiplicity == 1:
return formatted_obj
return '%dx%s' % (multiplicity, formatted_obj)
def format_irreps(self, num_spin3s, irreps_part):
"""Formats a group of identical irreducible representations."""
charges, mult = irreps_part
return self.format_multiplicity(mult,
self.format_irrep(num_spin3s, charges))
def format_irrep(self, num_spin3s, charges):
"""Formats a single irreducible representation."""
if set(charges[:num_spin3s]) == {0}:
spin3_part = ''
else:
spin3_part = 'x'.join('%s' % int(round(2 * c + 1))
for c in charges[:num_spin3s])
assert all(c == int(c) for c in charges[num_spin3s:])
u1_part = ', '.join(str(int(c)) for c in charges[num_spin3s:])
if spin3_part:
return ('[%s]{%s}' % (spin3_part, u1_part) if u1_part
else '[%s]' % spin3_part)
else:
return '{%s}' % u1_part
class LaTeXBranchingFormatter(BranchingFormatter):
"""BranchingFormatter that generates LaTeX code."""
def format_branching_tag(self, tag):
"""Formats tag (8, 'v') -> '8_{v}' etc."""
tag_dim, tag_subscript = tag
return '%s_{%s}' % (tag_dim, tag_subscript)
def format_multiplicity(self, multiplicity, formatted_obj):
if multiplicity == 1:
return formatted_obj
return r'%d\times%s' % (multiplicity, formatted_obj)
def _format_charge(self, c, sub_super):
assert c == int(c)
if c == 0:
return ''
return r'%s{\scriptscriptstyle %s}' % (sub_super, '-+'[c > 0] * abs(int(c)))
def format_irrep(self, num_spin3s, charges):
# We use style such as 33^{+++}_{--},
# i.e. 1st U(1) gets superscript charges,
# 2nd U(1) gets subscript charges.
assert all(c == int(c) for c in charges[num_spin3s:])
if set(charges[:num_spin3s]) <= {0}:
spin3_part = r'\mathbf{1}' # No Spin3s, or only singlet.
elif num_spin3s == 1:
spin3_part = r'\mathbf{%s}' % int(round(2 * charges[0] + 1))
else:
spin3_part = '(%s)' % (
','.join(r'\mathbf{%d}' % int(round(2 * c + 1))
for c in charges[:num_spin3s]))
num_u1s = len(charges) - num_spin3s
u1a_part = u1b_part = ''
if num_u1s >= 1:
u1a_part = self._format_charge(charges[num_spin3s], '^')
if num_u1s == 2:
u1b_part = self._format_charge(charges[num_spin3s + 1], '_')
return spin3_part + u1a_part + u1b_part
TEXT_FORMATTER = BranchingFormatter()
LATEX_FORMATTER = LaTeXBranchingFormatter()
# The Spin(8) structure constants.
_spin8_fabc = 2 * numpy.einsum('cik,abik->abc',
algebra.su8.m_28_8_8,
# We do not need to antisymmetrize [ik] here,
# as the above factor already does this.
numpy.einsum('aij,bjk->abik',
algebra.su8.m_28_8_8,
algebra.su8.m_28_8_8))
_spin8_action56 = numpy.einsum('aik,ABik->aAB',
algebra.su8.m_28_8_8,
algebra.su8.m_action_56_56_8_8)
# Branching-rules task specification, as used for the `decomposition_tasks`
# argument to spin3u1_decompose().
# One may generally want to pass an extended arg that adds tasks which also
# decompose e.g. degenerate mass-eigenstates w.r.t. symmetry.
# These are also used to find scaling for u(1) generators that makes all
# 8v, 8s, 8c charges integral.
SPIN8_ACTION_8V = Spin8Action(einsum='aij,aN->jiN',
tensor=algebra.su8.m_28_8_8)
SPIN8_ACTION_8V = Spin8Action(einsum='aij,aN->jiN',
tensor=algebra.su8.m_28_8_8)
SPIN8_ACTION_8S = Spin8Action(
einsum='aAB,aN->BAN',
tensor=numpy.einsum('aij,ijAB->aAB',
0.25 * algebra.su8.m_28_8_8,
algebra.spin8.gamma_vvss))
SPIN8_ACTION_8C = Spin8Action(
einsum='aAB,aN->BAN',
tensor=numpy.einsum('aij,ijAB->aAB',
0.25 * algebra.su8.m_28_8_8,
algebra.spin8.gamma_vvcc))
SPIN8_ACTION_AD = Spin8Action(einsum='aAB,aN->BAN', tensor=_spin8_fabc * 0.5)
SPIN8_ACTION_FERMIONS = Spin8Action(einsum='aAB,aN->BAN',
tensor=_spin8_action56)
SPIN8_ACTION_SCALARS = Spin8Action(
einsum='aAB,aN->BAN',
tensor=0.5 * algebra.e7.spin8_action_on_v70o)
SPIN8_BRANCHINGS_VSC = (
(SPIN8_ACTION_8V,
[((8, 'v'), numpy.eye(8))]),
(SPIN8_ACTION_8S,
[((8, 's'), numpy.eye(8))]),
(SPIN8_ACTION_8C,
[((8, 'c'), numpy.eye(8))]))
# Extended branching-rules task speficication, adds 28->... branching.
SPIN8_BRANCHINGS = (
SPIN8_BRANCHINGS_VSC +
((SPIN8_ACTION_AD, [((28, ''), numpy.eye(28))]),))
def round2(x):
"""Rounds number to 2 digits, canonicalizing -0.0 to 0.0."""
return numpy.round(x, 2) or 0.0
def allclose2(p, q):
"""Determines if `p` and `q` match to two digits."""
return numpy.allclose(p, q, rtol=0.01, atol=0.01)
def aggregate_eigenvectors(eigvals, eigvecs, tolerance=1e-6):
"""Collects eigenvectors by eigenvalue into eigenspaces.
The `eigvals` and `eigvecs` arguments must be as produced by
scipy.linalg.eigh().
Args:
eigvals, array of eigenvalues. Must be approximately-real.
eigvecs, array of eigenvectors.
tolerance, float. Tolerance threshold for considering eigenvalues
as degenerate.
Returns:
List of the form [(eigenvalue, eigenspace), ...],
where each `eigenspace` is a list of eigenvectors for the corresponding
eigenvalue.
Raises:
ValueError, if reality requirements are violated.
"""
if not numpy.allclose(eigvals, eigvals.real):
raise ValueError('Non-real eigenvalues.')
eigvalue_and_aggregated_eigvecs = []
for eigvalue, eigvec in sorted(zip(eigvals.real,
[tuple(v.astype(numpy.complex128))
for v in eigvecs.T]),
# Do not compare eigenvectors for degenerate
# eigenvalues. Sort by descending order.
key=lambda ev_evec: -ev_evec[0]):
for eigvalue_known, eigvecs_known in eigvalue_and_aggregated_eigvecs:
if abs(eigvalue - eigvalue_known) <= tolerance:
eigvecs_known.append(eigvec)
break
else: # Reached end of loop.
eigvalue_and_aggregated_eigvecs.append((eigvalue, [eigvec]))
return eigvalue_and_aggregated_eigvecs
def get_residual_gauge_symmetry(v70, threshold=0.05):
"""Maps scalar 70-vector to [a, n]-tensor of unbroken symmetry generators.
Index `a` is a Spin(8)-adjoint index, `n` counts (orthonormal) basis vectors.
Args:
v70: The e7/su8 70-vector describing a point on the scalar manifold.
threshold: Threshold on the generalized SVD-eigenvalue for considering
a direction as belonging to the residual symmetry.
"""
su, ss, svh = scipy.linalg.svd(
numpy.einsum('avw,v->aw',
algebra.e7.spin8_action_on_v70,
v70))
del svh # Unused.
# Select those columns for which the diagonal entry is essentially zero.
return su.T[ss <= threshold].T
def get_simultaneous_eigenbasis(commuting_gens,
gen_action_einsum='abc,aN->cbN',
gen_action_tensor=_spin8_fabc,
initial_space=None,
checks=True,
tolerance=1e-6):
"""Finds a simultaneous eigenbasis for a collection of commuting generators.
Args:
commuting_gens: [28, N]-array of real and mutually orthogonal generators.
gen_action_einsum: numpy.einsum() contraction specification that maps
`gen_action_tensor` and `commuting_gens` to a set of N matrices given as
[D, D, N]-array that represent the generators on the desired space.
initial_space: [D, K]-dimensional initial space to decompose into
eigenspaces, or `None`. If `None`, uses numpy.eye(D).
checks: If True, perform internal consistency checks.
tolerance: Tolerance difference-threshold for considering
two eigenvalues as identical.
Returns:
Pair of (simultaneous_eigenbasis, charges), where `simultaneous_eigenbasis`
is a [28, K]-dimensional array of eigenvectors, and `charges` is a list
of corresponding charge-tuples.
"""
# Map generators to endomorphisms. Our conventions are such that
# the result of contracting with `gen_action_tensor` also gets multiplied
# with 1j. For spin(8) action on 8v, 8s, 8c, 28, etc., this ensures that
# with all-real generators and all-real action-tensor, we get hermitean
# endomorphisms with all-real spectrum.
gens_action = numpy.einsum(gen_action_einsum,
gen_action_tensor,
commuting_gens) * 1j
if initial_space is None:
initial_space = numpy.eye(gens_action.shape[0])
#
def recursively_split_eigenspaces(num_generator, charge_tagged_eigenspaces):
"""Recursively splits an eigenspace.
Args:
num_generator: The number of the commuting generator to use for the next
splitting-step.
charge_tagged_eigenspaces: List [(partial_charges, subspace), ...]
where `partial_charges` is a tuple of charges w.r.t. the first
`num_generator` generators (so, () for num_generator == 0),
and `subspace` is a [D, K]-array of subspace directions.
Returns:
(Ultimately), fully split charge_tagged_eigenspaces, where the
`partial_charges` tags list as many charges as there are generators.
"""
if num_generator == gens_action.shape[-1]:
return charge_tagged_eigenspaces
gen_action = gens_action[:, :, num_generator]
split_eigenspaces = []
for charges, espace in charge_tagged_eigenspaces:
if checks:
eigenspace_sprod = numpy.einsum('aj,ak->jk', espace.conj(), espace)
assert allclose2(
eigenspace_sprod,
numpy.eye(espace.shape[1])), (
'Weird Eigenspace normalization: ' + repr(
numpy.round(eigenspace_sprod, 3)))
gen_on_eigenspace = numpy.einsum(
'aj,ak->jk',
espace.conj(),
numpy.einsum('ab,bj->aj', gen_action, espace))
sub_eigvals, sub_eigvecs_T = scipy.linalg.eigh(gen_on_eigenspace)
list_approx_eigval_and_eigvecs = []
for sub_eigval, sub_eigvec in zip(sub_eigvals, sub_eigvecs_T.T):
# Lift back to original space.
eigvec = numpy.einsum('gs,s->g', espace, sub_eigvec) # |v> <v| G |v>
if checks:
gv = numpy.dot(gen_action, eigvec)
ev = sub_eigval * eigvec
assert allclose2(gv, ev), (
'Sub-Eigval is bad: g*v=%r, e*v=%r' % (
numpy.round(gv, 3), numpy.round(ev, 3)))
assert allclose2(
numpy.dot(eigvec.conj(), eigvec), 1.0), (
'Eigenvector is not normalized.')
for seen_eigval, seen_eigvecs in list_approx_eigval_and_eigvecs:
if abs(sub_eigval - seen_eigval) <= tolerance:
assert all(allclose2(0, numpy.dot(s.conj(), eigvec))
for s in seen_eigvecs), 'Non-Orthogonality'
seen_eigvecs.append(eigvec)
break
else: # Reached end of list.
list_approx_eigval_and_eigvecs.append(
(sub_eigval, # This is also the actual eigenvalue.
[eigvec]))
for eigval, eigvecs in list_approx_eigval_and_eigvecs:
eigenspace = numpy.stack(eigvecs, axis=-1)
assert allclose2(
numpy.einsum('aj,ak->jk', eigenspace.conj(), eigenspace),
numpy.eye(eigenspace.shape[-1])), 'Bad Eigenspace'
split_eigenspaces.append((charges + (eigval,), eigenspace))
return recursively_split_eigenspaces(num_generator + 1, split_eigenspaces)
#
charge_tagged_eigenspaces = recursively_split_eigenspaces(
0, [((), initial_space)])
simultaneous_eigenbasis = numpy.stack(
[evec for _, espace in charge_tagged_eigenspaces for evec in espace.T],
axis=-1)
charges = [evec_charges
for evec_charges, espace in charge_tagged_eigenspaces
for evec in espace.T]
return simultaneous_eigenbasis, charges
def scale_u1_generator_to_8vsc_integral_charges(u1_gen, round_to_digits=3):
"""Scales a generator such that all 8v, 8s, 8c charges are integers."""
charges = []
for spin8action, _ in SPIN8_BRANCHINGS_VSC:
eigvals, _ = scipy.linalg.eigh(
numpy.einsum(spin8action.einsum,
spin8action.tensor,
1j * u1_gen.reshape((28, 1)))[:, :, 0])
assert numpy.allclose(eigvals, eigvals.real)
for eigval in eigvals:
charges.append(eigval)
approx_charges = sorted(set(abs(numpy.round(c, 6)) for c in charges) - {0.0})
factor = 1.0 / approx_charges[0]
for n in range(1, 25):
scaled_charges = [numpy.round(factor * n * c, round_to_digits)
for c in approx_charges]
if all(x == int(x) for x in scaled_charges):
return factor * n * u1_gen
raise ValueError('Could not re-scale U(1)-generator.')
def canonicalize_u1s(u1s, tolerance=1e-3):
"""Canonicalizes a collection of up to two u(1) generators."""
if u1s.shape[1] == 0:
return numpy.zeros([28, 0])
if u1s.shape[0] != 28:
raise ValueError(
'Each U(1) generator should be given as a 28-vector.')
num_u1s = u1s.shape[1]
if num_u1s > 2:
raise ValueError('Cannot handle more than two U(1)s')
if num_u1s == 1:
return scale_u1_generator_to_8vsc_integral_charges(u1s[:, 0]).reshape(28, 1)
eigvecs_T, evec_charges = get_simultaneous_eigenbasis(u1s)
a_vecs_eigvals = numpy.array(evec_charges).T
# Otherwise, we have exactly two U(1)s.
# How to reduce the charge-lattice?
zs = numpy.array([x + 1j * y for x, y in a_vecs_eigvals.T])
zs_by_origin_distance = sorted([z for z in zs if abs(z) >= tolerance],
key=abs)
z1 = zs_by_origin_distance[0]
angle = math.atan2(z1.imag, z1.real)
cos_angle = math.cos(angle)
sin_angle = math.sin(angle)
u1a = u1s[:, 0] * cos_angle + u1s[:, 1] * sin_angle
u1b = u1s[:, 0] * sin_angle - u1s[:, 1] * cos_angle
canon_u1s = numpy.stack([
scale_u1_generator_to_8vsc_integral_charges(u1a),
scale_u1_generator_to_8vsc_integral_charges(u1b)], axis=1)
return canon_u1s
def decompose_reductive_lie_algebra(residual_symmetry,
threshold=0.05):
"""Decomposes a residual symmetry into semisimple and u(1) parts.
Args:
residual_symmetry: Residual symmetry as produced by
`get_residual_gauge_symmetry()`.
threshold: Threshold for SVD generalized commutator-eigenvalue to consider
a generator as being part of the non-semisimple subalgebra.
"""
no_symmetry = numpy.zeros([28, 0])
if residual_symmetry.shape[1] == 0:
return no_symmetry, no_symmetry
commutators = numpy.einsum(
'avc,cw->avw',
numpy.einsum('abc,bv->avc', _spin8_fabc, residual_symmetry),
residual_symmetry)
su, ss, svh = scipy.linalg.svd(commutators.reshape(commutators.shape[0], -1))
del svh # Unused.
# We want those commutators that do not go to zero.
derivative_symmetry = su.T[:len(ss)][ss >= threshold].T
# By construction (via SVD), and using orthogonality of our spin(8) basis,
# `derivative_symmetry` already consists of orthogonal spin(8) generators, i.e.
# tr(AB) = 0 for basis vectors A != B.
# The 'complement' consists of u(1) factors that have zero inner product with
# `derivative_symmetry`.
if derivative_symmetry.size:
inner_products_with_input = numpy.einsum('av,aw->vw',
residual_symmetry,
derivative_symmetry)
su, ss, svh = scipy.linalg.svd(inner_products_with_input)
# Zero-pad the vector of 'generalized eigenvalues' to su's size.
ss_ext = numpy.concatenate(
[ss, numpy.zeros([max(0, su.shape[0] - len(ss))])])
u1s = numpy.einsum('av,vn->an',
residual_symmetry,
su.T[ss_ext <= threshold].T)
else: # All residual symmetry is in u(1)-factors.
return no_symmetry, residual_symmetry
# Assert that our U1s are orthogonal.
if u1s.size:
# Check generator orthonormality.
assert numpy.allclose(numpy.einsum('av,aw->vw', u1s, u1s),
numpy.eye(u1s.shape[1]), atol=1e-6)
else:
u1s = no_symmetry
return derivative_symmetry, u1s
def find_raw_cartan_subalgebra(spin8_subalgebra_generators, threshold=1e-3):
"""Finds a Cartan subalgebra for an algebra if the form A*so(3) + B*u(1)."""
if spin8_subalgebra_generators.shape[1] == 0:
return numpy.zeros([28, 0])
subalgebra_sprods = numpy.einsum(
'aj,ak->jk', spin8_subalgebra_generators, spin8_subalgebra_generators)
# Check that incoming subalgebra-generators really are reasonably orthonormal
# (up to overall scaling) w.r.t. Cartan-Killing metric.
assert numpy.allclose(subalgebra_sprods,
numpy.eye(spin8_subalgebra_generators.shape[1]))
cartan_generators_found = []
residual_charge_zero_subspace = spin8_subalgebra_generators
while True:
gen = residual_charge_zero_subspace[:, 0]
cartan_generators_found.append(gen)
assert numpy.allclose(gen, gen.real), 'Generator is not real!'
orthogonal_subalgebra = residual_charge_zero_subspace[:, 1:]
if not orthogonal_subalgebra.shape[1]:
return numpy.stack(cartan_generators_found, axis=-1)
gen_ad_action_on_spin8 = numpy.einsum('abc,a->cb', _spin8_fabc, gen)
gen_action_on_orthogonal_subalgebra = numpy.einsum(
'ai,aj->ij',
orthogonal_subalgebra,
numpy.einsum('bc,cj->bj',
gen_ad_action_on_spin8 * 1j,
orthogonal_subalgebra))
assert numpy.allclose(gen_action_on_orthogonal_subalgebra +
gen_action_on_orthogonal_subalgebra.T,
numpy.zeros_like(gen_action_on_orthogonal_subalgebra))
eigvals, eigvecs_T = scipy.linalg.eigh(gen_action_on_orthogonal_subalgebra)
nullspace_gens = []
for eigval, eigvec in zip(eigvals, eigvecs_T.T):
if abs(eigval) <= threshold:
assert numpy.allclose(eigvec, eigvec.real)
nullspace_gens.append(
numpy.einsum('ai,i->a', orthogonal_subalgebra, eigvec.real))
if not len(nullspace_gens):
return numpy.stack(cartan_generators_found, axis=-1)
nullspace = numpy.stack(nullspace_gens, axis=1)
assert numpy.allclose(nullspace, nullspace.real), 'Non-real nullspace'
assert numpy.allclose(numpy.einsum('ai,aj->ij', nullspace, nullspace),
numpy.eye(nullspace.shape[1])), 'Non-Ortho Nullspace'
residual_charge_zero_subspace = nullspace
def weightspace_decompose(generator_action,
cartan_subalgebra_generators,
space,
tolerance=1e-6):
"""Decomposes `space` into subspaces tagged by weight-vectors."""
seq_cartan_generators = list(cartan_subalgebra_generators.T)
def cartan_split(subspace_tagged_by_weight_prefix, num_cartan_generator):
cartan_action = numpy.einsum(
'aIJ,a->IJ',
generator_action,
seq_cartan_generators[num_cartan_generator] * 1j)
result = []
for weight_prefix, subspace in subspace_tagged_by_weight_prefix:
assert numpy.allclose(
numpy.einsum('aJ,aK->JK', subspace.conj(), subspace),
numpy.eye(subspace.shape[1])), (
'Non-orthonormalized subspace:\n' +
repr(numpy.round(numpy.einsum('aJ,aK->JK',
subspace.conj(),
subspace), 3)))
cartan_action_on_subspace = numpy.einsum(
'Jm,Jn->mn', subspace.conj(),
numpy.einsum('JK,Kn->Jn', cartan_action, subspace))
eigvals, eigvecs_T = scipy.linalg.eigh(cartan_action_on_subspace)
eigval_and_rel_eigenspace = aggregate_eigenvectors(eigvals, eigvecs_T)
for eigval, rel_eigenspace in eigval_and_rel_eigenspace:
ext_weight_prefix = (weight_prefix + (eigval,))
result.append((ext_weight_prefix,
numpy.einsum('In,nj->Ij',
subspace,
numpy.stack(rel_eigenspace, axis=-1))))
if num_cartan_generator == len(seq_cartan_generators) - 1:
return result
return cartan_split(result, num_cartan_generator + 1)
return cartan_split([((), space)], 0)
def get_simple_roots_info(rootspaces, threshold=0.01):
"""Extracts simple roots from weightspace-decomposition of a Lie algebra."""
# Finite-dimensional simple Lie algebras have one-dimensional root spaces.
# We use this to eliminate the Cartan subalgebra at the zero-root.
rank = len(rootspaces[0][0])
null_root = (0.0,) * rank
positive_roots = [root for root, subspace in rootspaces
if subspace.shape[1] == 1 and root > null_root]
def root_length_squared(root):
return sum(x * x for x in root)
def root_distance(root1, root2):
return max(abs(r1 - r2) for r1, r2 in zip(root1, root2))
# If the root is 'clearly too long', drop it rightaway.
# It does not hurt if we allow a large amount of slack,
# as this is just for increased performance.
threshold_root_length_squared = max(
map(root_length_squared, positive_roots)) * (1 + threshold)
sum_roots = []
for root1 in positive_roots:
for root2 in positive_roots:
root12 = tuple(r1 + r2 for r1, r2 in zip(root1, root2))
if root_length_squared(root12) > threshold_root_length_squared:
continue
for sum_root in sum_roots:
if root_distance(sum_root, root12) <= threshold:
break # We already know this sum-root.
else: # Reached end of loop.
sum_roots.append(root12)
simple_roots = [root for root in positive_roots
if not any(root_distance(sum_root, root) < threshold
for sum_root in sum_roots)]
a_simple_roots = numpy.array(simple_roots)
simple_root_sprods = numpy.einsum('rj,rk->jk', a_simple_roots, a_simple_roots)
# We always normalize the length-squared of the longest root to 2.
scaling_factor_squared = 2.0 / max(
simple_root_sprods[n, n] for n in range(simple_root_sprods.shape[0]))
scaling_factor = math.sqrt(scaling_factor_squared)
scaled_root_sprods = simple_root_sprods * scaling_factor_squared
# For spin(3)^N, the roots have to be mutually orthogonal
# with length-squared 2.
assert numpy.allclose(scaled_root_sprods,
2 * numpy.eye(simple_root_sprods.shape[0]) )
pos_simple_rootspaces = [(pos_root, scaling_factor * pos_rootspace)
for (pos_root, pos_rootspace) in rootspaces
for simple_root in simple_roots
if tuple(simple_root) == tuple(pos_root)]
canonicalized_cartan_subalgebra_generators = []
for pos_root, pos_rootspace in pos_simple_rootspaces:
# For finite-dimensional Lie algebras, root spaces are one-dimensional.
assert pos_rootspace.shape[1] == 1
l_plus = pos_rootspace[:, 0]
l_minus = l_plus.conj()
cartan_h = -1j * numpy.einsum('abc,a,b->c', _spin8_fabc, l_plus, l_minus)
canonicalized_cartan_subalgebra_generators.append(cartan_h)
# TODO(tfish): Only return what we need, and *not* in a dict.
return dict(simple_root_sprods=simple_root_sprods,
canonicalized_cartan_subalgebra=numpy.stack(
canonicalized_cartan_subalgebra_generators, axis=-1),
scaling_factor_squared=scaling_factor_squared,
pos_simple_rootspaces=pos_simple_rootspaces,
scaled_root_sprods=scaled_root_sprods,
scaled_roots=a_simple_roots * math.sqrt(scaling_factor_squared))
def canonicalize_residual_spin3u1_symmetry(residual_symmetry):
"""Canonicalizes a residual so(3)^M u(1)^N symmetry."""
semisimple_part, raw_u1s = decompose_reductive_lie_algebra(residual_symmetry)
u1s = canonicalize_u1s(raw_u1s)
spin3_cartan_gens_raw = find_raw_cartan_subalgebra(semisimple_part)
return CanonicalizedSymmetry(u1s=u1s,
semisimple_part=semisimple_part,
spin3_cartan_gens=spin3_cartan_gens_raw)
def group_charges_into_spin3u1_irreps(num_spin3s, charge_vecs):
"""Groups observed charges into irreducible representations.
Args:
num_spin3s: Length of the prefix of the charge-vector that belongs to
spin(3) angular momentum operators.
charge_vecs: List of charge-tuple vectors.
Returns:
List [((tuple(highest_spin3_weights) + tuple(u1_charges)), multiplicity),
...] of irreducible-representation descriptions, sorted by descending
combined-charge-vector.
"""
def spin3_weights(highest_weight):
"""Computes a list of spin3 weights for a given irrep highest weight.
E.g.: highest_weight = 1.5 -> [1.5, 0.5, -0.5, -1.5].
Args:
highest_weight: The highest weight (Element of [0, 0.5, 1.0, 1.5, ...]).
Returns: List of weights, in descending order.
"""
w2 = int(round(2 * highest_weight))
return [highest_weight - n for n in range(1 + w2)]
def descendants(cvec):
for spin3_part in itertools.product(
*[spin3_weights(w) for w in cvec[:num_spin3s]]):
yield spin3_part + cvec[num_spin3s:]
charges_todo = collections.Counter(charge_vecs)
irreps = collections.defaultdict(int)
while charges_todo:
cvec, cvec_mult = sorted(charges_todo.items(), reverse=True)[0]
for cvec_desc in descendants(cvec):
charges_todo[cvec_desc] -= cvec_mult
if charges_todo[cvec_desc] == 0:
del charges_todo[cvec_desc]
irreps[cvec] += cvec_mult
return sorted(irreps.items(), reverse=True) # Highest charges first.
def spin3u1_decompose(canonicalized_symmetry,
decomposition_tasks=SPIN8_BRANCHINGS,
simplify=round2):
"""Computes decompositions into so(3)^M x u(1)^N irreducible representations.
Args:
canonicalized_symmetry: A `CanonicalizedSymmetry` object.
decomposition_tasks: Sequence of pairs (spin8action, tasks),
where `tasks` is a sequence of pairs (tag, orthogonalized_subspace).
simplify: The rounding function used to map approximately-integer charges
to integers.
"""
spin3_gens = (canonicalized_symmetry.spin3_cartan_gens.T
if (canonicalized_symmetry.spin3_cartan_gens is not None
and len(canonicalized_symmetry.spin3_cartan_gens)) else [])
u1_gens = (canonicalized_symmetry.u1s.T
if (canonicalized_symmetry.u1s is not None
and len(canonicalized_symmetry.u1s)) else [])
num_spin3s = len(spin3_gens)
num_u1s = len(u1_gens)
def grouped(charges):
# Spin(3) angular momentum charges need to be half-integral.
# For U(1) generators, we are not requiring this.
assert all(round2(2 * c) == int(round2(2 * c))
for charge_vec in charges
for c in charge_vec[:num_spin3s])
return group_charges_into_spin3u1_irreps(
num_spin3s,
[tuple(map(simplify, charge_vec)) for charge_vec in charges])
if num_spin3s:
rootspaces = weightspace_decompose(
_spin8_fabc,
spin3_gens.T,
canonicalized_symmetry.semisimple_part)
sroot_info = get_simple_roots_info(rootspaces)
angular_momentum_u1s = list(sroot_info['canonicalized_cartan_subalgebra'].T)
else:
angular_momentum_u1s = []
list_commuting_gens = (
[g for g in [angular_momentum_u1s, u1_gens] if len(g)])
commuting_gens = (numpy.concatenate(list_commuting_gens).T
if list_commuting_gens else numpy.zeros([28, 0]))
ret = []
for spin8action, tasks in decomposition_tasks:
ret.append([])
for task_tag, space_to_decompose in tasks:
_, charges = get_simultaneous_eigenbasis(
commuting_gens,
gen_action_einsum=spin8action.einsum,
gen_action_tensor=spin8action.tensor,
initial_space=space_to_decompose)
ret[-1].append((task_tag, grouped(charges)))
return ret
def spin3u1_branching_and_spectra(canonicalized_symmetry,
decomposition_tasks=()):
"""Computes so(3)^M x u(1)^N spectra."""
vsc_ad_branching = spin3u1_decompose(canonicalized_symmetry)
spectra = spin3u1_decompose(canonicalized_symmetry,
decomposition_tasks)
return vsc_ad_branching, spectra
def spin3u1_physics(
canonicalized_symmetry,
mass_tagged_eigenspaces_gravitinos=(),
mass_tagged_eigenspaces_fermions=(),
mass_tagged_eigenspaces_scalars=(),
# Note that we see cases where we have very uneven parity-mixtures.
parity_tolerance=1e-7):
"""Computes so(3)^M x u(1)^N spectra."""
vsc_ad_branching = spin3u1_decompose(canonicalized_symmetry)
decomposition_tasks = []
# Gravitino tasks.
gravitino_tasks = []
for gravitino_mass, basis in mass_tagged_eigenspaces_gravitinos:
subspace = numpy.array(basis).T
task_tag = ('gravitinos', subspace.shape, gravitino_mass)
gravitino_tasks.append((task_tag, subspace))
decomposition_tasks.append(
(SPIN8_ACTION_8V, gravitino_tasks))
# Fermion tasks.
fermion_tasks = []
for fermion_mass, basis in mass_tagged_eigenspaces_fermions:
subspace = numpy.array(basis).T
task_tag = ('fermions', subspace.shape, fermion_mass)
fermion_tasks.append((task_tag, subspace))
decomposition_tasks.append(
(SPIN8_ACTION_FERMIONS, fermion_tasks))
# Scalar tasks.
scalar_tasks = []
# For scalars, we try to split off mass-eigenstates that are
# 35s-only or 35c-only.
p_op = numpy.eye(70)
p_op[35:, 35:] *= -1
for scalar_mass, basis in mass_tagged_eigenspaces_scalars:
a_basis = numpy.array(basis)
p_op_on_basis = numpy.einsum('jn,nm,km->jk', a_basis.conj(), p_op, a_basis)
assert numpy.allclose(p_op_on_basis, p_op_on_basis.real)
assert numpy.allclose(p_op_on_basis, p_op_on_basis.T)
p_op_eigvals, p_op_eigvecs_T = numpy.linalg.eigh(p_op_on_basis)
p_op_eigvals_re = p_op_eigvals.real
assert numpy.allclose(p_op_eigvals, p_op_eigvals_re)
# We have to lift the p_op_eigvecs_T to a_basis.
subspace_eigvecs = numpy.einsum('vn,vV->Vn', p_op_eigvecs_T, a_basis)
eigval_eigvecs = aggregate_eigenvectors(p_op_eigvals_re, subspace_eigvecs,
tolerance=1e-4)
# subspaces_35s and subspaces_35c each have <=1 entries.
subspaces_35s = [eigvecs for eigval, eigvecs in eigval_eigvecs
if eigval > 1 - parity_tolerance]
subspaces_35c = [eigvecs for eigval, eigvecs in eigval_eigvecs
if eigval < -1 + parity_tolerance]
merged_subspaces_other = [
eigvec for eigval, eigvecs in eigval_eigvecs
for eigvec in eigvecs
if -1 + parity_tolerance <= eigval <= 1 - parity_tolerance]
for subspace in subspaces_35s:
a_subspace = numpy.array(subspace).T
task_tag = ('scalars', a_subspace.shape, scalar_mass, 's')
scalar_tasks.append((task_tag, a_subspace))
for subspace in subspaces_35c:
a_subspace = numpy.array(subspace).T
task_tag = ('scalars', a_subspace.shape, scalar_mass, 'c')
scalar_tasks.append((task_tag, a_subspace))
# "Mixture" states. While we do get them in terms of parity-eigenstates,
# for 'weird' eigenvalues such as -1/3. Here, we just merge them all back
# together into one space, i.e. forget about resolving the spectrum.
# Why? Otherwise, we may see in the report
# "0.000m{1}, 0.000m{1}, 0.000m{1}, ...", which is not overly informative.
a_subspace = numpy.array(merged_subspaces_other).T
if len(merged_subspaces_other):
task_tag = ('scalars', a_subspace.shape, scalar_mass, 'm')
scalar_tasks.append((task_tag, a_subspace))
decomposition_tasks.append(
(SPIN8_ACTION_SCALARS, scalar_tasks))
spectra = spin3u1_decompose(canonicalized_symmetry,
decomposition_tasks)
return vsc_ad_branching, spectra
|
StarcoderdataPython
|
60845
|
<reponame>oushu1zhangxiangxuan1/learn_leveldb<gh_stars>0
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: core/contract/proposal_contract.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='core/contract/proposal_contract.proto',
package='protocol',
syntax='proto3',
serialized_options=_b('\n\030org.tron.protos.contractZ)github.com/tronprotocol/grpc-gateway/core'),
serialized_pb=_b('\n%core/contract/proposal_contract.proto\x12\x08protocol\"^\n\x17ProposalApproveContract\x12\x15\n\rowner_address\x18\x01 \x01(\x0c\x12\x13\n\x0bproposal_id\x18\x02 \x01(\x03\x12\x17\n\x0fis_add_approval\x18\x03 \x01(\x08\"\xa8\x01\n\x16ProposalCreateContract\x12\x15\n\rowner_address\x18\x01 \x01(\x0c\x12\x44\n\nparameters\x18\x02 \x03(\x0b\x32\x30.protocol.ProposalCreateContract.ParametersEntry\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\"D\n\x16ProposalDeleteContract\x12\x15\n\rowner_address\x18\x01 \x01(\x0c\x12\x13\n\x0bproposal_id\x18\x02 \x01(\x03\x42\x45\n\x18org.tron.protos.contractZ)github.com/tronprotocol/grpc-gateway/coreb\x06proto3')
)
_PROPOSALAPPROVECONTRACT = _descriptor.Descriptor(
name='ProposalApproveContract',
full_name='protocol.ProposalApproveContract',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='owner_address', full_name='protocol.ProposalApproveContract.owner_address', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='proposal_id', full_name='protocol.ProposalApproveContract.proposal_id', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_add_approval', full_name='protocol.ProposalApproveContract.is_add_approval', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=145,
)
_PROPOSALCREATECONTRACT_PARAMETERSENTRY = _descriptor.Descriptor(
name='ParametersEntry',
full_name='protocol.ProposalCreateContract.ParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='protocol.ProposalCreateContract.ParametersEntry.key', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='protocol.ProposalCreateContract.ParametersEntry.value', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=267,
serialized_end=316,
)
_PROPOSALCREATECONTRACT = _descriptor.Descriptor(
name='ProposalCreateContract',
full_name='protocol.ProposalCreateContract',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='owner_address', full_name='protocol.ProposalCreateContract.owner_address', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parameters', full_name='protocol.ProposalCreateContract.parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PROPOSALCREATECONTRACT_PARAMETERSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=148,
serialized_end=316,
)
_PROPOSALDELETECONTRACT = _descriptor.Descriptor(
name='ProposalDeleteContract',
full_name='protocol.ProposalDeleteContract',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='owner_address', full_name='protocol.ProposalDeleteContract.owner_address', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='proposal_id', full_name='protocol.ProposalDeleteContract.proposal_id', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=318,
serialized_end=386,
)
_PROPOSALCREATECONTRACT_PARAMETERSENTRY.containing_type = _PROPOSALCREATECONTRACT
_PROPOSALCREATECONTRACT.fields_by_name['parameters'].message_type = _PROPOSALCREATECONTRACT_PARAMETERSENTRY
DESCRIPTOR.message_types_by_name['ProposalApproveContract'] = _PROPOSALAPPROVECONTRACT
DESCRIPTOR.message_types_by_name['ProposalCreateContract'] = _PROPOSALCREATECONTRACT
DESCRIPTOR.message_types_by_name['ProposalDeleteContract'] = _PROPOSALDELETECONTRACT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ProposalApproveContract = _reflection.GeneratedProtocolMessageType('ProposalApproveContract', (_message.Message,), dict(
DESCRIPTOR = _PROPOSALAPPROVECONTRACT,
__module__ = 'core.contract.proposal_contract_pb2'
# @@protoc_insertion_point(class_scope:protocol.ProposalApproveContract)
))
_sym_db.RegisterMessage(ProposalApproveContract)
ProposalCreateContract = _reflection.GeneratedProtocolMessageType('ProposalCreateContract', (_message.Message,), dict(
ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict(
DESCRIPTOR = _PROPOSALCREATECONTRACT_PARAMETERSENTRY,
__module__ = 'core.contract.proposal_contract_pb2'
# @@protoc_insertion_point(class_scope:protocol.ProposalCreateContract.ParametersEntry)
))
,
DESCRIPTOR = _PROPOSALCREATECONTRACT,
__module__ = 'core.contract.proposal_contract_pb2'
# @@protoc_insertion_point(class_scope:protocol.ProposalCreateContract)
))
_sym_db.RegisterMessage(ProposalCreateContract)
_sym_db.RegisterMessage(ProposalCreateContract.ParametersEntry)
ProposalDeleteContract = _reflection.GeneratedProtocolMessageType('ProposalDeleteContract', (_message.Message,), dict(
DESCRIPTOR = _PROPOSALDELETECONTRACT,
__module__ = 'core.contract.proposal_contract_pb2'
# @@protoc_insertion_point(class_scope:protocol.ProposalDeleteContract)
))
_sym_db.RegisterMessage(ProposalDeleteContract)
DESCRIPTOR._options = None
_PROPOSALCREATECONTRACT_PARAMETERSENTRY._options = None
# @@protoc_insertion_point(module_scope)
|
StarcoderdataPython
|
1698937
|
"""
@brief test log(time=4s)
@author <NAME>
"""
import sys
import os
import unittest
import warnings
from docutils.parsers.rst import directives
from sphinx.errors import ExtensionError
from pyquickhelper.pycode import get_temp_folder
from pyquickhelper.helpgen import rst2html, rst2rst_folder
from pyquickhelper.sphinxext import TocDelayDirective
class TestTocDelayExtension(unittest.TestCase):
def test_post_parse(self):
directives.register_directive("tocdelay", TocDelayDirective)
def test_regex(self):
s = "2016-06-11 - Make a reference to a blog post <2016/2016-06-11_blogpost_with_label>"
reg = TocDelayDirective.regex_title
gr = reg.search(s)
self.assertTrue(gr is not None)
self.assertEqual(tuple(gr.groups()),
("2016-06-11 - Make a reference to a blog post",
"2016/2016-06-11_blogpost_with_label"))
def test_tocdelay1(self):
content = """
.. tocdelay::
blog/2015/2015-04-05_first_blogpost
""".replace(" ", "")
try:
rst2html(content, layout="sphinx",
writer="rst", keep_warnings=True)
except ValueError as e:
self.assertIn("No found document", str(e))
def test_tocdelay2(self):
path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(path, "data", "blog")
content = """
.. tocdelay::
:path: {0}
2015/2015-04-05_first_blogpost
""".replace(" ", "").format(path)
try:
rst2html(content, layout="sphinx",
writer="rst", keep_warnings=True)
except (KeyError, ExtensionError) as e:
self.assertIn(
"event 'doctree-resolved' threw an exception", str(e))
def test_tocdelay3(self):
temp = get_temp_folder(__file__, "temp_tocdelay3")
path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(path, "data", "blog")
content = """
.. tocdelay::
:path: {0}
2015/2015-04-05_first_blogpost
""".replace(" ", "").format(path)
try:
rst2rst_folder(content, temp)
except (KeyError, ExtensionError) as e:
self.assertIn(
"event 'doctree-resolved' threw an exception", str(e))
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
1665079
|
<reponame>armandok/pySLAM-D<gh_stars>1-10
import habitat_sim
from Config import Config
class Simulator:
def __init__(self):
test_scene = Config().scene
sim_settings = {
"width": 640, # Spatial resolution of the observations
"height": 480,
"scene": test_scene, # Scene path
"default_agent": 0,
"sensor_height": 1.5, # Height of sensors in meters
"color_sensor": True, # RGB sensor
"semantic_sensor": True, # Semantic sensor
"depth_sensor": True, # Depth sensor
"seed": 1,
}
cfg = self.make_cfg(sim_settings)
self.sim = habitat_sim.Simulator(cfg)
self.action_names = list(
cfg.agents[
sim_settings["default_agent"]
].action_space.keys()
)
self.total_frames = 0
def make_cfg(self, settings):
sim_cfg = habitat_sim.SimulatorConfiguration()
sim_cfg.gpu_device_id = 0
sim_cfg.scene.id = settings["scene"]
# Note: all sensors must have the same resolution
sensors = {
"color_sensor": {
"sensor_type": habitat_sim.SensorType.COLOR,
"resolution": [settings["height"], settings["width"]],
"position": [0.0, settings["sensor_height"], 0.0],
},
"depth_sensor": {
"sensor_type": habitat_sim.SensorType.DEPTH,
"resolution": [settings["height"], settings["width"]],
"position": [0.0, settings["sensor_height"], 0.0],
},
"semantic_sensor": {
"sensor_type": habitat_sim.SensorType.SEMANTIC,
"resolution": [settings["height"], settings["width"]],
"position": [0.0, settings["sensor_height"], 0.0],
},
}
sensor_specs = []
for sensor_uuid, sensor_params in sensors.items():
if settings[sensor_uuid]:
sensor_spec = habitat_sim.SensorSpec()
sensor_spec.uuid = sensor_uuid
sensor_spec.sensor_type = sensor_params["sensor_type"]
sensor_spec.resolution = sensor_params["resolution"]
sensor_spec.position = sensor_params["position"]
sensor_specs.append(sensor_spec)
# Here you can specify the amount of displacement in a forward action and the turn angle
agent_cfg = habitat_sim.agent.AgentConfiguration()
agent_cfg.sensor_specifications = sensor_specs
agent_cfg.action_space = {
"move_forward": habitat_sim.agent.ActionSpec(
"move_forward", habitat_sim.agent.ActuationSpec(amount=0.5)
),
"move_backward": habitat_sim.agent.ActionSpec(
"move_backward", habitat_sim.agent.ActuationSpec(amount=0.5)
),
"turn_left": habitat_sim.agent.ActionSpec(
"turn_left", habitat_sim.agent.ActuationSpec(amount=10.0)
),
"turn_right": habitat_sim.agent.ActionSpec(
"turn_right", habitat_sim.agent.ActuationSpec(amount=10.0)
),
}
return habitat_sim.Configuration(sim_cfg, [agent_cfg])
def get_obs(self, action):
observations = self.sim.step(action)
rgb = observations["color_sensor"]
# semantic = observations["semantic_sensor"]
depth = observations["depth_sensor"]
self.total_frames += 1
return rgb, depth
def reset(self):
agent = self.sim.get_agent(0)
agent_state = agent.initial_state # agent.get_state()
num_start_tries = 0
while num_start_tries < 50:
agent_state.position = self.sim.pathfinder.get_random_navigable_point()
num_start_tries += 1
agent.set_state(agent_state)
|
StarcoderdataPython
|
1781079
|
from __future__ import annotations
from typing import Optional, TYPE_CHECKING, Union
from pyspark.sql.types import StructType, DataType
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.fhir_types.string import FhirString
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.base_types.fhir_complex_type_base import FhirComplexTypeBase
from spark_fhir_schemas.r4.complex_types.contributor import ContributorSchema
if TYPE_CHECKING:
pass
# id_ (string)
# extension (Extension)
# type_ (ContributorType)
from spark_auto_mapper_fhir.value_sets.contributor_type import ContributorTypeCode
# name (string)
# contact (ContactDetail)
from spark_auto_mapper_fhir.complex_types.contact_detail import ContactDetail
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class Contributor(FhirComplexTypeBase):
"""
Contributor
fhir-base.xsd
A contributor to the content of a knowledge asset, including authors, editors, reviewers, and endorsers.
If the element is present, it must have a value for at least one of the defined elements, an @id referenced from the Narrative, or extensions
"""
# noinspection PyPep8Naming
def __init__(
self,
*,
id_: Optional[FhirString] = None,
extension: Optional[FhirList[ExtensionBase]] = None,
type_: ContributorTypeCode,
name: FhirString,
contact: Optional[FhirList[ContactDetail]] = None,
) -> None:
"""
A contributor to the content of a knowledge asset, including authors, editors,
reviewers, and endorsers.
If the element is present, it must have a value for at least one of the
defined elements, an @id referenced from the Narrative, or extensions
:param id_: None
:param extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
:param type_: The type of contributor.
:param name: The name of the individual or organization responsible for the contribution.
:param contact: Contact details to assist a user in finding and communicating with the
contributor.
"""
super().__init__(
id_=id_,
extension=extension,
type_=type_,
name=name,
contact=contact,
)
def get_schema(
self, include_extension: bool
) -> Optional[Union[StructType, DataType]]:
return ContributorSchema.get_schema(include_extension=include_extension)
|
StarcoderdataPython
|
3242538
|
<filename>mediaProject/account/views.py
from django.core.exceptions import ValidationError
from django.utils import timezone
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.mail import BadHeaderError, send_mail
from .models import UserProfile, Friendship
from group.models import Group
def user_login_check(user):
return user.is_anonymous()
@user_passes_test(user_login_check)
def create_account(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
name = request.POST['name']
last_name = request.POST['last_name']
email = request.POST['email']
date = request.POST['date']
sex = request.POST['sex']
user_exist = User.objects.filter(username=username).first()
email_exist = User.objects.filter(email=email).first()
if not user_exist:
if email_exist:
sms = "%s ya esta siendo usado por otro usuario" % email
return render(request, 'create_account.html', locals())
profile = UserProfile()
profile.born_date = date
profile.sex = sex
user = User(username=username, email=email, first_name=name, last_name=last_name)
user.set_password(password)
try:
user.save()
profile.user = user
profile.save()
except ValidationError:
user.delete()
sms = "el formato de la fecha esta mal"
return render(request, 'create_account.html', locals())
return redirect(reverse('account:dashboard'))
else:
sms = "%s ya esta siendo usado por otro, por favor seleccione otro" % username
return render(request, 'create_account.html', locals())
else:
return render(request, 'create_account.html', locals())
@login_required
def user_search(request):
pass
@login_required
def user_update(request, pk):
profile = UserProfile.objects.get(user_id=pk)
return render(request, 'edit_profile.html', locals())
@login_required
def friendship(request, id_receiver):
user_sender = User.objects.get(id=request.user.id)
user_receiver = User.objects.get(id=id_receiver)
friend = Friendship()
exits = Friendship.objects.filter(receiver_id=id_receiver)
if exits:
sms = "ya envio una peticion a %s" % user_receiver.username
return redirect(reverse("account:dashboard") + "?sms=%s" % sms)
friend.sender = user_sender
friend.receiver = user_receiver
friend.status = 2
friend.date = timezone.now()
friend.save()
sms_ok = "se le ha enviado una peticion a %s" % user_receiver.username
return redirect(reverse('account:dashboard')+ "?sms_ok=%s" % sms_ok)
def accept_friendship(request, id):
solicitude = Friendship.objects.get(id=id)
solicitude.status = 0
solicitude.save()
return redirect(reverse('account:dashboard'))
def notification(request):
friend_request = Friendship.objects.filter(Q(status=2) & Q(receiver=request.user))
return render(request, 'notification.html', locals())
def users(request):
users_list = UserProfile.objects.all()
return render(request, 'users.html', locals())
def user_profile(request, pk):
profile = UserProfile.objects.get(user_id=pk)
own = UserProfile.objects.get(user_id=request.user.pk)
is_my_profile = own == profile
return render(request, 'user_profile.html', locals())
@login_required
def user_login(request):
users = UserProfile.objects.filter(is_conected=True).exclude(user_id=request.user.pk)
friend = Friendship.objects.filter(Q(status=0) & (Q(sender=request.user) | Q(receiver=request.user)))
return render(request, 'user_login.html', locals())
@login_required
def user_staff_login(request):
users_staff = UserProfile.objects.filter(is_conected=True, user__is_staff=True).exclude(user_id=request.user.pk)
return render(request, 'user_staff.html', locals())
@login_required
def dashboard(request):
if request.method == 'GET':
if "create-group" in request.GET:
name = request.GET.get("name-group", None)
about = request.GET.get("about-group", None)
if name is not None and about is not None:
exist = Group.objects.filter(name=name)
if exist:
message_error = "ese grupo ya existe, por favor, escoja otro nombre"
return render(request, 'index.html', locals())
group = Group(name=name, about=about, owner=request.user)
group.save()
return redirect(reverse('account:dashboard'))
else:
friend_request = Friendship.objects.filter(Q(status=2) & Q(receiver=request.user))
users = UserProfile.objects.filter(is_conected=True)
user = request.user
return render(request, 'index.html', locals())
else:
if "send" in request.POST:
email = request.POST.get('email', "")
subject = request.POST.get('subject', "")
message = request.POST.get('message', "")
if email and subject and message:
try:
# email = EmailMultiAlternatives(subject, message, '<EMAIL>',
# ['<EMAIL>'])
# message_html = '<h1>' + message + '</h1>'
# email.attach_alternative(message_html, 'text/html')
# email.send(False)
subject = '[The Wall] ' + subject
send_mail(subject, message, '<EMAIL>', ['<EMAIL>'],
False, 'rhernandeza', 'seabiskuit32+')
except BadHeaderError:
error = True
message_error = 'Se ha encontrado una cabecera invalida'
return render(request, 'index.html', locals())
return render(request, 'index.html', locals())
else:
error = True
message_error = 'Debe de llenar todos los campos'
return render(request, 'index.html', locals())
else: # si "save" in request.POST
full_name = request.POST.get('name', "")
email = request.POST.get('email', "")
user = request.user
user.first_name = full_name.split(" ")[0]
user.last_name = " ".join(full_name.split(" ")[1:])
user.email = email
user.save()
return render(request, 'index.html', locals())
def information(request):
mesagge = 'Informaciones recientes'
return render(request, 'informations.html', locals())
def login_view(request):
if request.user.is_authenticated():
return redirect(reverse('account:dashboard') + "?sms_ok=usted ya esta logeado")
else:
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is None:
error = True
message = 'Usuario o contrasena incorrectos'
return render(request, 'login.html', locals())
elif user.is_active:
login(request, user)
profile = UserProfile.objects.get(user=user)
profile.is_conected = True
profile.save()
return redirect(reverse('account:dashboard'))
else:
error = True
message = 'Este usuario ha sido baneado, por favor contacte al administrador'
return render(request, 'login.html', locals())
else:
return render(request, 'login.html', locals())
@login_required
def logout_view(request):
profile = UserProfile.objects.get(user=request.user)
profile.is_conected = False
profile.save()
logout(request)
return redirect(reverse('account:login') + "?sms_ok=hasta pronto")
def chat(request):
my = UserProfile.objects.get(user=request.user)
return render(request, 'chat.html', locals())
def contact_us(request):
profile = UserProfile.objects.get(user_id=request.user.pk)
return render(request, 'contact.html', locals())
def terms(request):
return render(request, 'terms.html')
def about(request):
return render(request, 'about.html')
def privacy(request):
return render(request, 'privacy.html')
|
StarcoderdataPython
|
48515
|
"""add root_cause table
Revision ID: 7ddd008bcaaa
Revises: <PASSWORD>
Create Date: 2021-11-06 19:20:07.167512
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7ddd008bcaaa'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('employees',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('type', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('managers',
sa.Column('id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id'], ['employees.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('drivers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('manager_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['id'], ['employees.id'], ),
sa.ForeignKeyConstraint(['manager_id'], ['managers.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('drivers')
op.drop_table('managers')
op.drop_table('employees')
# ### end Alembic commands ###
|
StarcoderdataPython
|
4802913
|
<reponame>zhoulh0322/zfused_outsource_old
# Copyright 2017 by <NAME>. All Rights Reserved.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
import traceback
try:
import maya.cmds
except ImportError:
traceback.print_exc()
__all__ = [
"setNamespace",
"getFromDagPath",
"getFromDagPaths",
"getFromSelection",
]
def setNamespace(dagPath, namespace):
"""
Return the given dagPath with the given namespace.
setNamespace("|group|control", "character")
result: |character:group|character:control
setNamespace("|character:group|character:control", "")
result: |group|control
:type namespace: str
"""
result = dagPath
currentNamespace = getFromDagPath(dagPath)
# Ignore any further processing if the namespace is the same.
if namespace == currentNamespace:
pass
# Replace the current namespace with the specified one
elif currentNamespace and namespace:
result = dagPath.replace(currentNamespace + ":", namespace + ":")
# Remove existing namespace
elif currentNamespace and not namespace:
result = dagPath.replace(currentNamespace + ":", "")
# Set namespace if a current namespace doesn't exists
elif not currentNamespace and namespace:
result = dagPath.replace("|", "|" + namespace + ":")
if namespace and not result.startswith("|"):
result = namespace + ":" + result
return result
def getFromDagPaths(dagPaths):
"""
:type dagPaths: list[str]
:rtype: list[str]
"""
namespaces = []
for dagPath in dagPaths:
namespace = getFromDagPath(dagPath)
namespaces.append(namespace)
return list(set(namespaces))
def getFromDagPath(dagPath):
"""
:type dagPath: str
:rtype: str
"""
shortName = dagPath.split("|")[-1]
namespace = ":".join(shortName.split(":")[:-1])
return namespace
def getFromSelection():
"""
:rtype: list[str]
"""
dagPaths = maya.cmds.ls(selection=True)
return getFromDagPaths(dagPaths)
|
StarcoderdataPython
|
1638886
|
<reponame>Terence-Guan/Python.HackerRank
def mean(values: list) -> int:
length = len(values)
result = 0
for value in values:
result += value
result /= length
return result
def median(values: list) -> float:
length = len(values)
values = sorted(values)
if length % 2 != 0:
return values[length // 2]
else:
return (values[length // 2] + values[length // 2 - 1]) / 2
def mode(values: list) -> int:
counters = dict()
result = None
for value in values:
if value in counters:
counters[value] += 1
else:
counters[value] = 1
if (result is None) or (counters[value] > counters[result]):
result = value
elif (counters[value] == counters[result]) and (value < result):
result = value
return result
n = int(input())
x = [int(token) for token in input().split()]
print(mean(x))
print(median(x))
print(mode(x))
|
StarcoderdataPython
|
1656044
|
<reponame>YuriiShuginin/ITMO-ICT-Frontend-2021
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import SignupAPIView, NoteViewSet,\
Public, Logout, ProfileView,ConfidentProfileView,\
NoteViewDetail,NoteDetailCreate,PublicViewDetail
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView
from rest_framework.authtoken.views import obtain_auth_token
app_name = 'api'
urlpatterns = [
path('signup/', SignupAPIView.as_view(), name='signup'),
path('token/', obtain_auth_token, name='token'),
path('logout/', Logout.as_view(), name='logout'),
path('profile/<slug>/', ProfileView.as_view({'get': 'list'}), name='profile_info'),
path('profile/', ConfidentProfileView.as_view({'get': 'list'}), name='self_pr_info'),
path('profilee/', ConfidentProfileView.as_view({'put': 'update'}), name='profile_update'),
path('notes/<int:pk>/edit/', NoteViewDetail.as_view({'put': 'update'}), name='reload'),
path('notes/delete/<int:pk>/', NoteViewSet.as_view({'delete': 'destroy'}), name='del'),
path('notes/new/', NoteDetailCreate.as_view({'post': 'create'}), name='create'),
path('notes/', NoteViewSet.as_view({'get': 'list'}), name='note-list'),
path('notes/public/', Public.as_view({'get':'list',}), name='shared'),
path('notes/public/<int:pk>/', PublicViewDetail.as_view({'get':'list',}), name='shared'),
path('notes/<int:pk>/', NoteViewDetail.as_view({'get': 'list'}), name='note-detail'),
]
|
StarcoderdataPython
|
3267524
|
from tinkerforge.bricklet_joystick import BrickletJoystick
from tinkerforge.bricklet_multi_touch import BrickletMultiTouch
from modules.navigation import StateModule
class InputModule(StateModule):
inputs = {}
def try_bricklet(self, uid, device_identifier, position):
if device_identifier == 210:
self.inputs["joystick"] = BrickletJoystick(
uid, self.controller.ipcon)
self.inputs["joystick"].set_debounce_period(400)
self.inputs["joystick"].register_callback(
self.inputs["joystick"].CALLBACK_POSITION_REACHED,
self.joystick_position)
self.inputs["joystick"].register_callback(
self.inputs["joystick"].CALLBACK_PRESSED,
self.joystick_pushed)
self.inputs["joystick"].set_position_callback_threshold(
"o", -99, 99, -99, 99)
if device_identifier == 234:
self.inputs["multitouch"] = BrickletMultiTouch(
uid, self.controller.ipcon)
self.inputs["multitouch"].set_electrode_sensitivity(125)
self.inputs["multitouch"].register_callback(
self.inputs["multitouch"].CALLBACK_TOUCH_STATE,
self.multitouch)
def joystick_position(self, x, y):
# print("joystick", x, y)
if "SchedulerModule" in self.controller.modules:
self.controller.modules["SchedulerModule"].motion_detected()
if y == 100:
self.controller.navigate("up")
elif y == -100:
self.controller.navigate("down")
if x == 100:
self.controller.navigate("back")
elif x == -100:
self.controller.navigate("forward")
def joystick_pushed(self):
self.controller.navigate("forward")
def multitouch(self, state):
if "SchedulerModule" in self.controller.modules:
self.controller.modules["SchedulerModule"].motion_detected()
if state & (1 << 12):
pass
if (state & 0xfff) == 0:
pass
else:
try:
if state & (1 << 0):
self.controller.navigate("forward")
if state & (1 << 1):
self.controller.navigate("back")
if state & (1 << 2):
self.controller.navigate("up")
if state & (1 << 3):
self.controller.navigate("down")
except Exception:
pass
|
StarcoderdataPython
|
3253024
|
<reponame>gary-stu/FL
#!/usr/bin/env python3
import platform
from datetime import datetime
import os
from random import choice, randint, seed
from subprocess import Popen
from time import sleep
from signal import SIGTERM
class FL:
def __init__(self):
# Set parameters here
# both path must start with a r: r'', r'Intervals' etc...
self.video_path = r'.' # path to videos 1.mp4 to 100.mp4 (r'' and r'.' means same directory as python file)
self.interval_path = r'.' # path to interval pics/webms/musics or whatever
self.interval_length = 10 # number of seconds of interval
self.fullscreen = False # must be True or False
self.output_log = False # Do you want to output logfile even without errors?
# accepted filetypes for intervals
# only file from these types will be played (to avoid trying to play a fucking .docx)
self.filetypes = ['jpg', 'jpeg', 'png', 'gif', 'webm', 'webp', 'mp4']
# Do not touch the rest
self.mpv = ''
self.messages = []
self.posLog = [1]
self.intervals = []
self.logname = "fl_log0.txt"
self.myOs = platform.system()
nb = 0
while os.path.isfile(self.logname):
nb += 1
self.logname = 'fl_log' + str(nb) + '.txt'
# Script requires each file to be {number}.mp4
# rename files from original file pool if necessary
def rename_files_if_necessary(self):
self.info("Renaming file if necessary")
if os.path.isfile(os.path.join(self.video_path, "1 - Start.mp4")):
os.rename(os.path.join(self.video_path, "1 - Start.mp4"), os.path.join(self.video_path, "1.mp4"))
self.info(" Renamed '1 - Start.mp4' in '1.mp4'")
if os.path.isfile(os.path.join(self.video_path, "25 - Checkpoint.mp4")):
os.rename(os.path.join(self.video_path, "25 - Checkpoint.mp4"), os.path.join(self.video_path, "25.mp4"))
self.info(" Renamed '25 - Checkpoint.mp4' in '25.mp4'")
if os.path.isfile(os.path.join(self.video_path, "50 - Checkpoint.mp4")):
os.rename(os.path.join(self.video_path, "50 - Checkpoint.mp4"), os.path.join(self.video_path, "50.mp4"))
self.info(" Renamed '50 - Checkpoint.mp4' in '50.mp4'")
if os.path.isfile(os.path.join(self.video_path, "75 - Checkpoint.mp4")):
os.rename(os.path.join(self.video_path, "75 - Checkpoint.mp4"), os.path.join(self.video_path, "75.mp4"))
self.info(" Renamed '75 - Checkpoint.mp4' in '75.mp4'")
if os.path.isfile(os.path.join(self.video_path, "100 - End.mp4")):
os.rename(os.path.join(self.video_path, "100 - End.mp4"), os.path.join(self.video_path, "100.mp4"))
self.info(" Renamed '100 - End.mp4' in '100.mp4'")
self.info("Done")
# Returns all files (with extension in defined self.filetypes) in a given folder and all its subfolders
def listdir_rec(self, pth):
for root, directories, filenames in os.walk(pth):
for filename in filenames:
if filename.split('.')[-1] in self.filetypes: yield (os.path.join(root, filename))
# Prints the message, and adds it to buffer (if we want to write logfile later)
def info(self, msg):
print(msg)
self.messages.append(msg)
# Checks the configuration
def check(self):
checks = True
self.info('Checking if all 100 videos are found using "self.video_path"')
videos = os.listdir(self.video_path)
test = 0
for f in videos:
for i in range(1, 101):
if f == str(i) + '.mp4':
test += 1
if test == 0:
self.info("ERROR")
self.info('No videos were found in folder "' + self.video_path + '"')
self.info(
' Please change the "video_path" variable to a folder with the required videos (from 1.mp4 to 100.mp4)')
checks = False
elif test != 100:
self.info("ERROR")
self.info('Not all videos were found in folder "' + self.video_path + '"')
self.info(' Number of videos with name format 1.mp4, 2.mp4, etc... found : ' + str(test))
self.info(' Please check you have all required files (from 1.mp4 to 100.mp4) in that folder')
checks = False
else:
self.info(' Done')
self.info('Trying to find some interval files using "self.interval_path"')
self.intervals = list(self.listdir_rec(self.interval_path))
if len(self.intervals) == 0:
self.info("ERROR")
self.info('No file found for Intervals')
self.info(' Either update the "self.interval_path" variable (current: "' + self.interval_path + '")')
self.info(' Or update the possible filetype list :')
self.info(' ' + str(self.filetypes))
self.info(' Note : "self.interval_path" will look in all the subfolders as well')
checks = False
else:
self.info(' Done')
self.info('Checking if mpv can be run')
test = os.popen('mpv --version').read()
if test.startswith('mpv'):
self.info(' mpv is installed')
self.mpv = 'mpv '
else:
if self.myOs == 'Windows':
self.info(' mpv not installed, trying to revert to static binary')
self.mpv = 'binaries/Windows/mpv.exe '
elif self.myOs == 'Darwin':
self.info(' mpv not installed, trying to revert to static binary')
self.mpv = 'binaries/OSX/mpv '
elif self.myOs == 'Linux':
self.info("ERROR")
self.info(' No static binary available for Linux')
self.info(' Please install mpv through your package manager')
self.info(' or through mpv-build')
checks = False
if self.fullscreen: self.mpv += '-fs '
self.mpv += '-msg-level=all=no '
return checks
# Rolls a dice (returns random number between 1 and 6)
def diceroll(self, pos):
self.info('Rolling dice')
roll = randint(1, 6)
self.info(' Rolled a ' + str(roll))
newpos = pos + roll
self.posLog.append(newpos)
return newpos
# Plays video given the number
def video(self, nb):
file = os.path.join(self.video_path, str(nb) + '.mp4')
self.info('Playing : "' + file + "'")
self.info(" " + self.mpv + '"' + file + '"')
os.system(self.mpv + '"' + file + '"')
# Plays a random interval in self.intervals
def interval(self):
file = choice(self.intervals)
self.info('Interval : "' + file + '"')
self.info(' ' + self.mpv + '-loop "' + file + '"')
if self.myOs == "Windows":
p = Popen(self.mpv + '-loop "' + file + '"')
sleep(self.interval_length)
p.terminate()
elif self.myOs == 'Darwin':
p = Popen(self.mpv + '-loop "' + file + '"', shell=True)
sleep(self.interval_length)
p.terminate()
else:
p = Popen(self.mpv + '-loop "' + file + '"', shell=True, preexec_fn=os.setsid)
sleep(self.interval_length)
os.killpg(p.pid, SIGTERM)
# Play the game
def start(self):
self.info('Start')
self.info('Checking configuration.')
self.info('')
self.rename_files_if_necessary()
if self.check():
self.info('')
self.info('Conf ok')
self.info('Starting the game.')
self.video(1)
self.info('')
pos = 1
pos = self.diceroll(pos)
while pos < 100:
self.video(pos)
newpos = self.diceroll(pos)
self.info('Current position: ' + str(newpos))
if (pos < 25) and (newpos >= 25):
if newpos != 25: self.video(25)
elif (pos < 50) and (newpos >= 50):
if newpos != 50: self.video(50)
elif (pos < 75) and (newpos >= 75):
if newpos != 75: self.video(75)
elif ((pos % 25) != 0) and (newpos < 100):
print("interval")
self.interval()
pos = newpos
self.info('')
self.info('Congratulations, you\'ve cleared the game!')
self.video(100)
self.info('')
self.info('Roll history : ')
self.info(' ' + str(self.posLog))
if self.output_log:
file = open(self.logname, 'a')
for msg in self.messages:
file.write(msg + '\n')
file.close()
print('Log printed to logfile: ' + self.logname)
else:
self.info('')
self.info('Errors found')
self.info('Please try again')
file = open(self.logname, 'a')
for msg in self.messages:
file.write(msg + '\n')
file.close()
print('Log printed to logfile: ' + self.logname)
seed(int(datetime.now().strftime("%Y%m%d%H%M%S")))
game = FL()
game.start()
|
StarcoderdataPython
|
186169
|
from src.config import ExperimentConfig
from tensorflow.python.keras.layers.core import Dropout, Masking
from src.features.sequences.transformer import SequenceMetadata
import tensorflow as tf
from typing import Any, List, Dict
from .metrics import (
MulticlassAccuracy,
MulticlassTrueNegativeRate,
MulticlassTruePositiveRate,
PercentileSubsetMetricHelper,
MultilabelNestedMetric,
)
from .config import ModelConfig
from .callbacks import MLFlowCallback, BestModelRestoreCallback
from .initializers import FastTextInitializer
import logging
import mlflow
import datetime
def full_prediction_binary_accuracy_loss(y_true, y_pred):
sum = tf.reduce_sum(y_true, axis=-1)
weights = tf.where(sum > 1, x=1.0, y=sum)
weights = tf.cast(weights, dtype="float32")
loss = tf.keras.losses.binary_crossentropy(y_true, y_pred)
loss = tf.reduce_sum(weights * loss, axis=1) / tf.reduce_sum(weights, axis=1)
return tf.reduce_mean(loss)
class BaseEmbedding:
config: ModelConfig
num_features: int = 0
num_hidden_features: int = 0
num_connections: int = 0
basic_feature_embeddings: tf.Variable # shape: (num_features, embedding_size)
basic_hidden_embeddings: tf.Variable # shape: (num_hidden_features, embedding_size)
def _final_embedding_matrix(self):
"""Overwrite this in case embedding uses attention mechanism etc"""
return self.basic_feature_embeddings
def _get_kernel_regularizer(self, scope: str):
if scope not in self.config.kernel_regularizer_scope:
logging.debug("Regularization not enabled for %s", scope)
return None
elif self.config.kernel_regularizer_value <= 0.0:
return None
elif self.config.kernel_regularizer_type == "l2":
return tf.keras.regularizers.l2(self.config.kernel_regularizer_value)
elif self.config.kernel_regularizer_type == "l2":
return tf.keras.regularizers.l1(self.config.kernel_regularizer_value)
else:
return None
def _get_initializer(
self,
initializer_name: str,
initializer_seed: int,
description_vocab: Dict[int, str],
) -> tf.keras.initializers.Initializer:
if initializer_name == "random_uniform":
return tf.keras.initializers.GlorotUniform(seed=initializer_seed)
elif initializer_name == "random_normal":
return tf.keras.initializers.GlorotNormal(seed=initializer_seed)
elif initializer_name == "fasttext":
initializer = FastTextInitializer(self.config.embedding_dim)
return initializer.get_initializer(description_vocab)
else:
logging.error("Unknown initializer %s", initializer_name)
def _get_feature_initializer(
self, description_vocab: Dict[int, str]
) -> tf.keras.initializers.Initializer:
return self._get_initializer(
self.config.feature_embedding_initializer,
self.config.feature_embedding_initializer_seed,
description_vocab,
)
def _get_hidden_initializer(
self, description_vocab: Dict[int, str]
) -> tf.keras.initializers.Initializer:
return self._get_initializer(
self.config.hidden_embedding_initializer,
self.config.hidden_embedding_initializer_seed,
description_vocab,
)
class BaseModel:
def __init__(self):
self.prediction_model: tf.keras.Model = None
self.embedding_layer: BaseEmbedding = None
self.metrics: List[tf.keras.metrics.Metric] = []
self.config = ModelConfig()
def _get_embedding_layer(
self, metadata: SequenceMetadata, knowledge: Any
) -> BaseEmbedding:
raise NotImplementedError("This should be implemented by the subclass!!!")
def _select_distribute_strategy(self) -> tf.distribute.Strategy:
if self.config.distribute_strategy == "mirrored":
return tf.distribute.MirroredStrategy()
elif self.config.distribute_strategy.startswith("/gpu"):
return tf.distribute.OneDeviceStrategy(
device=self.config.distribute_strategy
)
else:
return tf.distribute.get_strategy()
def build(self, metadata: SequenceMetadata, knowledge: Any):
self.metadata = metadata
self.strategy = self._select_distribute_strategy()
logging.info(
"Using strategy with %d workers", self.strategy.num_replicas_in_sync
)
with self.strategy.scope():
self.embedding_layer = self._get_embedding_layer(metadata, knowledge)
self._log_embedding_stats()
self.prediction_model = tf.keras.models.Sequential(
[
tf.keras.layers.Input(
shape=(metadata.max_x_length, len(metadata.x_vocab)),
),
self.embedding_layer,
tf.keras.layers.Masking(mask_value=0),
self._get_rnn_layer(),
tf.keras.layers.Dropout(
rate=self.config.dropout_rate, seed=self.config.dropout_seed
),
tf.keras.layers.Dense(
len(metadata.y_vocab),
activation=self.config.final_activation_function,
kernel_regularizer=self.embedding_layer._get_kernel_regularizer(
scope="prediction_dense"
),
),
]
)
def _log_embedding_stats(self):
mlflow.log_metric("num_features", self.embedding_layer.num_features)
mlflow.log_metric(
"num_hidden_features", self.embedding_layer.num_hidden_features
)
mlflow.log_metric("num_connections", self.embedding_layer.num_connections)
def _get_rnn_layer(self):
if self.config.rnn_type == "rnn":
return tf.keras.layers.SimpleRNN(
units=self.config.rnn_dim,
kernel_regularizer=self.embedding_layer._get_kernel_regularizer(
scope="prediction_rnn"
),
return_sequences=self.metadata.full_y_prediction,
dropout=self.config.rnn_dropout,
)
elif self.config.rnn_type == "lstm":
return tf.keras.layers.LSTM(
units=self.config.rnn_dim,
kernel_regularizer=self.embedding_layer._get_kernel_regularizer(
scope="prediction_rnn"
),
return_sequences=self.metadata.full_y_prediction,
dropout=self.config.rnn_dropout,
)
elif self.config.rnn_type == "gru":
return tf.keras.layers.GRU(
units=self.config.rnn_dim,
kernel_regularizer=self.embedding_layer._get_kernel_regularizer(
scope="prediction_rnn"
),
return_sequences=self.metadata.full_y_prediction,
dropout=self.config.rnn_dropout,
)
else:
logging.error("Unknown rnn layer type: %s", self.config.rnn_type)
def train_dataset(
self,
train_dataset: tf.data.Dataset,
test_dataset: tf.data.Dataset,
multilabel_classification: bool,
n_epochs: int,
):
with self.strategy.scope():
if self.metadata.full_y_prediction:
self._compile_full_prediction(train_dataset)
elif len(self.metadata.y_vocab) == 1:
self._compile_singleclass()
elif multilabel_classification:
self._compile_multilabel(train_dataset)
else:
self._compile_multiclass(train_dataset)
model_summary = []
self.prediction_model.summary(print_fn=lambda x: model_summary.append(x))
mlflow.log_text("\n".join(model_summary), artifact_file="model_summary.txt")
self.history = self.prediction_model.fit(
train_dataset,
validation_data=test_dataset,
epochs=n_epochs,
callbacks=[
MLFlowCallback(),
BestModelRestoreCallback(
metric=self.config.best_model_metric,
minimize=self.config.best_model_metric_minimize,
early_stopping_epochs=self.config.early_stopping_epochs,
),
],
)
def _compile_singleclass(self):
self.metrics = [
tf.keras.metrics.Accuracy(),
tf.keras.metrics.BinaryAccuracy(),
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall(),
tf.keras.metrics.AUC(),
]
self.prediction_model.compile(
loss=self.config.loss,
optimizer=self.config.optimizer,
metrics=self.metrics,
)
def _compile_full_prediction(self, train_dataset: tf.data.Dataset):
self.metrics = [
MultilabelNestedMetric(
nested_metric=tf.keras.metrics.CategoricalAccuracy(),
name="categorical_accuracy",
full_prediction=self.metadata.full_y_prediction,
),
MultilabelNestedMetric(
nested_metric=tf.keras.metrics.TopKCategoricalAccuracy(k=5),
name="top_5_categorical_accuracy",
full_prediction=self.metadata.full_y_prediction,
),
MultilabelNestedMetric(
nested_metric=tf.keras.metrics.TopKCategoricalAccuracy(k=10),
name="top_10_categorical_accuracy",
full_prediction=self.metadata.full_y_prediction,
),
MultilabelNestedMetric(
nested_metric=tf.keras.metrics.TopKCategoricalAccuracy(k=20),
name="top_20_categorical_accuracy",
full_prediction=self.metadata.full_y_prediction,
),
]
metric_helper = PercentileSubsetMetricHelper(
train_dataset,
num_percentiles=self.config.metrics_num_percentiles,
y_vocab=self.metadata.y_vocab,
full_prediction=self.metadata.full_y_prediction,
)
for k in [5, 10, 20]:
self.metrics = (
self.metrics
+ metric_helper.get_accuracy_at_k_for(
k=k, is_multilabel=True, use_cumulative=True
)
+ metric_helper.get_accuracy_at_k_for(
k=k, is_multilabel=True, use_cumulative=False
)
)
self.prediction_model.compile(
loss=full_prediction_binary_accuracy_loss,
optimizer=self.config.optimizer,
metrics=self.metrics,
)
def _compile_multilabel(self, train_dataset: tf.data.Dataset):
self.metrics = [
MultilabelNestedMetric(
nested_metric=tf.keras.metrics.CategoricalAccuracy(),
name="categorical_accuracy",
full_prediction=self.metadata.full_y_prediction,
),
MultilabelNestedMetric(
nested_metric=tf.keras.metrics.TopKCategoricalAccuracy(k=5),
name="top_5_categorical_accuracy",
full_prediction=self.metadata.full_y_prediction,
),
MultilabelNestedMetric(
nested_metric=tf.keras.metrics.TopKCategoricalAccuracy(k=10),
name="top_10_categorical_accuracy",
full_prediction=self.metadata.full_y_prediction,
),
MultilabelNestedMetric(
nested_metric=tf.keras.metrics.TopKCategoricalAccuracy(k=20),
name="top_20_categorical_accuracy",
full_prediction=self.metadata.full_y_prediction,
),
]
metric_helper = PercentileSubsetMetricHelper(
train_dataset,
num_percentiles=self.config.metrics_num_percentiles,
y_vocab=self.metadata.y_vocab,
full_prediction=self.metadata.full_y_prediction,
)
for k in [5, 10, 20]:
self.metrics = (
self.metrics
+ metric_helper.get_accuracy_at_k_for(
k=k, is_multilabel=True, use_cumulative=True
)
+ metric_helper.get_accuracy_at_k_for(
k=k, is_multilabel=True, use_cumulative=False
)
)
self.prediction_model.compile(
loss=self.config.loss,
optimizer=self.config.optimizer,
metrics=self.metrics,
)
def _compile_multiclass(self, train_dataset: tf.data.Dataset):
metric_helper = PercentileSubsetMetricHelper(
train_dataset,
num_percentiles=self.config.metrics_num_percentiles,
y_vocab=self.metadata.y_vocab,
full_prediction=self.metadata.full_y_prediction,
)
self.metrics = [
tf.keras.metrics.CategoricalAccuracy(),
tf.keras.metrics.TopKCategoricalAccuracy(
k=5, name="top_5_categorical_accuracy"
),
tf.keras.metrics.TopKCategoricalAccuracy(
k=10, name="top_10_categorical_accuracy"
),
tf.keras.metrics.TopKCategoricalAccuracy(
k=20, name="top_20_categorical_accuracy"
),
]
for k in [5, 10, 20]:
self.metrics = (
self.metrics
+ metric_helper.get_accuracy_at_k_for(
k=k, is_multilabel=False, use_cumulative=True
)
+ metric_helper.get_accuracy_at_k_for(
k=k, is_multilabel=False, use_cumulative=False
)
)
self.prediction_model.compile(
loss=self.config.loss,
optimizer=self.config.optimizer,
metrics=self.metrics,
)
|
StarcoderdataPython
|
151518
|
<reponame>mewbak/hypertools<gh_stars>1000+
# -*- coding: utf-8 -*-
"""
=============================
Normalizing your features
=============================
Often times its useful to normalize (z-score) you features before plotting, so
that they are on the same scale. Otherwise, some features will be weighted more
heavily than others when doing PCA, and that may or may not be what you want.
The `normalize` kwarg can be passed to the plot function. If `normalize` is
set to 'across', the zscore will be computed for the column across all of the
lists passed. Conversely, if `normalize` is set to 'within', the z-score will
be computed separately for each column in each list. Finally, if `normalize` is
set to 'row', each row of the matrix will be zscored. Alternatively, you can use
the normalize function found in tools (see the third example).
"""
# Code source: <NAME>
# License: MIT
# import
import hypertools as hyp
import numpy as np
import matplotlib.pyplot as plt
# simulate data
cluster1 = np.random.multivariate_normal(np.zeros(3), np.eye(3), size=100)
cluster2 = np.random.multivariate_normal(np.zeros(3)+10, np.eye(3), size=100)
data = [cluster1, cluster2]
# plot normalized across lists
hyp.plot(data, '.', normalize='across', title='Normalized across datasets')
# plot normalized within list
hyp.plot(data, '.', normalize='within', title='Normalized within dataset')
# normalize by row
normalized_row = hyp.normalize(data, normalize='row')
# plot normalized by row
hyp.plot(normalized_row, '.', title='Normalized across row')
|
StarcoderdataPython
|
110949
|
<filename>watcherclient/tests/unit/v1/test_scoring_engine_shell.py
# Copyright (c) 2016 Intel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest import mock
import six
from watcherclient import shell
from watcherclient.tests.unit.v1 import base
from watcherclient import v1 as resource
from watcherclient.v1 import resource_fields
SCORING_ENGINE_1 = {
'uuid': '5b558998-57ed-11e6-9ca8-08002722cb22',
'name': 'se-01',
'description': 'Scoring Engine 0.1',
'metainfo': '{ "columns": ["cpu", "mem", "pci"] }',
'created_at': datetime.datetime.now().isoformat(),
'updated_at': None,
'deleted_at': None,
}
SCORING_ENGINE_2 = {
'uuid': '1f856554-57ee-11e6-ac72-08002722cb22',
'name': 'se-02',
'description': 'Some other Scoring Engine',
'metainfo': 'mode=simplified',
'created_at': datetime.datetime.now().isoformat(),
'updated_at': None,
'deleted_at': None,
}
class ScoringEngineShellTest(base.CommandTestCase):
SHORT_LIST_FIELDS = resource_fields.SCORING_ENGINE_SHORT_LIST_FIELDS
SHORT_LIST_FIELD_LABELS = (
resource_fields.SCORING_ENGINE_SHORT_LIST_FIELD_LABELS)
FIELDS = resource_fields.SCORING_ENGINE_FIELDS
FIELD_LABELS = resource_fields.SCORING_ENGINE_FIELD_LABELS
def setUp(self):
super(self.__class__, self).setUp()
p_se_manager = mock.patch.object(
resource, 'ScoringEngineManager')
self.m_se_mgr_cls = p_se_manager.start()
self.addCleanup(p_se_manager.stop)
self.m_se_mgr = mock.Mock()
self.m_se_mgr_cls.return_value = self.m_se_mgr
self.stdout = six.StringIO()
self.cmd = shell.WatcherShell(stdout=self.stdout)
def test_do_scoringengine_list(self):
se1 = resource.ScoringEngine(mock.Mock(), SCORING_ENGINE_1)
se2 = resource.ScoringEngine(mock.Mock(), SCORING_ENGINE_2)
self.m_se_mgr.list.return_value = [
se1, se2]
exit_code, results = self.run_cmd('scoringengine list')
self.assertEqual(0, exit_code)
self.assertEqual(
[self.resource_as_dict(se1, self.SHORT_LIST_FIELDS,
self.SHORT_LIST_FIELD_LABELS),
self.resource_as_dict(se2, self.SHORT_LIST_FIELDS,
self.SHORT_LIST_FIELD_LABELS)],
results)
self.m_se_mgr.list.assert_called_once_with(detail=False)
def test_do_scoringengine_list_detail(self):
se1 = resource.Goal(mock.Mock(), SCORING_ENGINE_1)
se2 = resource.Goal(mock.Mock(), SCORING_ENGINE_2)
self.m_se_mgr.list.return_value = [
se1, se2]
exit_code, results = self.run_cmd('scoringengine list --detail')
self.assertEqual(0, exit_code)
self.assertEqual(
[self.resource_as_dict(se1, self.FIELDS, self.FIELD_LABELS),
self.resource_as_dict(se2, self.FIELDS, self.FIELD_LABELS)],
results)
self.m_se_mgr.list.assert_called_once_with(detail=True)
def test_do_scoringengine_show_by_name(self):
scoringengine = resource.Goal(mock.Mock(), SCORING_ENGINE_1)
self.m_se_mgr.get.return_value = scoringengine
exit_code, result = self.run_cmd('scoringengine show se-01')
self.assertEqual(0, exit_code)
self.assertEqual(
self.resource_as_dict(scoringengine, self.FIELDS,
self.FIELD_LABELS),
result)
self.m_se_mgr.get.assert_called_once_with('se-01')
def test_do_scoringengine_show_by_uuid(self):
scoringengine = resource.Goal(mock.Mock(), SCORING_ENGINE_1)
self.m_se_mgr.get.return_value = scoringengine
exit_code, result = self.run_cmd(
'scoringengine show 5b558998-57ed-11e6-9ca8-08002722cb22')
self.assertEqual(0, exit_code)
self.assertEqual(
self.resource_as_dict(scoringengine, self.FIELDS,
self.FIELD_LABELS),
result)
self.m_se_mgr.get.assert_called_once_with(
'5b558998-57ed-11e6-9ca8-08002722cb22')
|
StarcoderdataPython
|
1657120
|
from lib_user.execmd import ExeCmd
from time import sleep
ERROR_CODE_ONE = 1
ERROR_CODE_ZERO = 0
class Groups:
def __init__(self, token, url):
self._token = token
self._url = url
self._exe_cmd = ExeCmd()
def create_group(self, group_name, description):
"""
Create a user group
:param group_name: the group name
:param description: the description
:return: error_code, error_message, group
"""
cmd = 'curl -v -X POST -H "Accept: application/json" -H "Content-Type: application/json" -H ' \
'"Authorization: SSWS %s" -d \'{ "profile": { "name": "%s",' \
' "description": "%s"}}\' "https://%s/api/v1/groups"' % (self._token, group_name, description, self._url)
output, error = self._exe_cmd.run(cmd)
if "errorCode" in output:
return ERROR_CODE_ONE, output["errorCauses"][0]["errorSummary"], {}
else:
return ERROR_CODE_ZERO, "", output
def get_group(self, group_id=None):
"""
find and return the group with ID=group_id, if group_id is None, return all exists groups
:param group_id: the group_id
:return: error_code, error_message, group
"""
cmd = 'curl -X GET -H "Accept: application/json" -H "Content-Type: application/json"' \
' -H "Authorization: SSWS {}" "https://{}/api/v1/groups"'.format(self._token, self._url)
if group_id is not None:
cmd += "/{}".format(group_id)
output, error = self._exe_cmd.run(cmd)
if "errorCode" in output:
return ERROR_CODE_ONE, output["errorCauses"][0]["errorSummary"], {}
else:
return ERROR_CODE_ZERO, "", output
def update_group(self, group_id, new_name, new_description):
"""
update a group by replacing the new profile with the old one
:param group_id: the group id
:param new_description: a description
:param new_name: a new name
:return: error_code, error_message, group
"""
cmd = 'curl -X PUT -H "Accept: application/json" -H "Content-Type: application/json" -H ' \
'"Authorization: SSWS %s" -d \'{ "profile": { "name": "%s",' \
' "description": "%s"}}\' "https://%s/api/v1/groups/%s"' % (self._token, new_name,
new_description, self._url, group_id)
output, error = self._exe_cmd.run(cmd)
if "errorCode" in output:
return ERROR_CODE_ONE, output["errorCauses"][0]["errorSummary"], {}
else:
return ERROR_CODE_ZERO, "", output
def remove_group(self, group_id):
"""
remove a user group
:param group_id: The user group
:return: error_code, error_message, group
"""
cmd = 'curl -X DELETE -H "Accept: application/json" -H "Content-Type: application/json"' \
' -H "Authorization: SSWS {}" "https://{}/api/v1/groups/%s"'.format(self._token, self._url, group_id)
if group_id is not None:
cmd += "/{}".format(group_id)
output, error = self._exe_cmd.run(cmd)
if "errorCode" in output:
return ERROR_CODE_ONE, output["errorCauses"][0]["errorSummary"], {}
else:
return ERROR_CODE_ZERO, "", output
def get_group_users(self, group_id):
"""
get the user_app in a specific group
:param group_id: the group id
:return: error_code, error_message, user_app
"""
cmd = 'curl -X GET -H "Accept: application/json" -H "Content-Type: application/json"' \
' -H "Authorization: SSWS {}" "https://{}/api/v1/groups/{}/users"'.format(self._token, self._url, group_id)
output, error = self._exe_cmd.run(cmd)
if "errorCode" in output:
return ERROR_CODE_ONE, output["errorSummary"], {}
else:
return ERROR_CODE_ZERO, "", output
def add_user_to_group(self, group_id, user_id):
"""
add a user to an exist group
:param group_id: the group id
:param user_id: the user id
:return: error_code, error_message, user
"""
# check if user is exists:
error_code, error_message, users = self.get_group_users(group_id)
if len(users) != 0:
for i in users:
if i["id"] == user_id:
self.remove_user_from_group(group_id, user_id)
sleep(3)
cmd = 'curl -X PUT -H "Accept: application/json" -H "Content-Type: application/json" -H ' \
'"Authorization: SSWS %s" "https://%s/api/v1/groups/%s/users/%s"' % (self._token, self._url,
group_id, user_id)
output, error = self._exe_cmd.run(cmd)
if output is not None and "errorCode" in output:
return ERROR_CODE_ONE, output["errorSummary"], ""
else:
return ERROR_CODE_ZERO, "", group_id
def remove_user_from_group(self, group_id, user_id):
"""
remove a user from a group
:param group_id: group id
:param user_id: user id
:return: error_code, error_message, user
"""
cmd = 'curl -X DELETE -H "Accept: application/json" -H "Content-Type: application/json" -H ' \
'"Authorization: SSWS %s" "https://%s/api/v1/groups/%s/users/%s"' % (self._token, self._url,
group_id, user_id)
output, error = self._exe_cmd.run(cmd)
if output is not None and "errorCode" in output:
return ERROR_CODE_ONE, output["errorSummary"], {}
else:
return ERROR_CODE_ZERO, "", output
def filter_groups_by_name(self, group_name):
"""
Filter groups by name
:param group_name: the group name
:return:
"""
cmd = 'curl -X GET -H "Accept: application/json" -H "Content-Type: application/json" -H' \
' "Authorization: SSWS {}"' \
' "https://{}/api/v1/groups?q={}"'.format(self._token, self._url, group_name)
output, error = self._exe_cmd.run(cmd)
if len(output) != 0:
for group in output:
if group["profile"]["name"].lower() == group_name.lower():
return ERROR_CODE_ZERO, "", group
if output is not None and "errorCode" in output:
return ERROR_CODE_ONE, output["errorSummary"], {}
else:
return ERROR_CODE_ONE, "the group {} is not found on organization side".format(group_name), output
def change_group(self, user_id, current_group, new_group, terminate_session):
"""
removes users from current group and add it to the new group
:param user_id: the user's id
:param current_group: the user's current group
:param new_group: the user's new group
:param terminate_session: if true, terminate the user's active sessions
:return: error_code, error_message, response_data
"""
new_group_name = new_group
error_code, error_message, new_group = self.filter_groups_by_name(new_group)
if error_code != 0:
return error_code, error_message, {}
if current_group != "":
error_code, error_message, current_group_object = self.filter_groups_by_name(current_group)
if error_code != 0:
return error_code, error_message, {}
current_group_id = current_group_object["id"]
error_code, error_message, output = self.remove_user_from_group(current_group_id, user_id)
if error_code != 0:
return error_code, error_message, output
error_code, error_message, group_user = self.get_group_users(current_group_id)
for u in group_user:
if u["id"] == user_id:
error_code, error_message, output = self.remove_user_from_group(current_group_id, user_id)
if error_code != 0:
return error_code, error_message, output
sleep(3)
new_group_id = new_group["id"]
error_code, error_message, group_id = self.add_user_to_group(new_group_id, user_id)
if error_code == 0 and terminate_session is True:
current_group = current_group[-1] if len(current_group) != 0 else 0
if int(current_group) < int(new_group_name[-1]):
error_code_terminate, error_message_terminate, result = self.terminate_session(user_id)
if error_code != 0:
return error_code, error_message, group_id
return 0, "", {"status": "success",
"new_group_name": new_group,
"new_group_id": group_id}
def terminate_session(self, user_id):
"""
Removes all active identity provider sessions. This forces the user to authenticate on the next operation
:param user_id: user ip
:return:error_code, error_message, response
"""
cmd = 'curl -X DELETE -H "Accept: application/json"' \
' -H "Content-Type: application/json" -H' \
' "Authorization: SSWS %s"' \
' "https://%s/api/v1/users/%s/sessions"' % (self._token, self._url, user_id)
output, error = self._exe_cmd.run(cmd)
if output is not None and "errorCode" in output:
return ERROR_CODE_ONE, output["errorSummary"], {}
else:
return ERROR_CODE_ZERO, "", output
if __name__ == "__main__":
group_class = Groups("00i3NGyCptMBpO-PbV5v7MHNZ-VeYtjFzXnSRtB9VK", "forcepointbizdev.okta.com")
#print(group_class.create_group("Risk_level_2", "The Group for risk level 2"))
print(group_class.filter_groups_by_name("risk_level_4"))
#print(group_class.add_user_to_group("00g1nizk68ieQ9P8O357", "00u1nj50r1XZD0e1l357"))
#print(group_class.remove_user_from_group("00g1nizk68ieQ9P8O357", "00u1ok7ywiqPPHowm357"))
#error, mesg, user = group_class.get_group_users("00g1nizk68ieQ9P8O357")
#for i in user:
# print(i["id"])
|
StarcoderdataPython
|
1609598
|
import os
import numpy as np
import networkx as nx
from tqdm import tqdm
from utils import load_networks
# Get File Names
data_path = os.path.join(os.path.dirname(__file__), '..', '..', 'Data')
networks_dir = load_networks(os.path.join(data_path, 'Generated', 'Barabasi'))
for net_dir in networks_dir:
print('Calculating Matrix degree for', os.path.basename(net_dir))
G = nx.read_gpickle(net_dir)
N = G.number_of_nodes()
# Degrees through time
degrees_t = np.zeros((N, N))
for num, t in tqdm(enumerate(degrees_t)):
index = range(num+1)
H = G.subgraph(index)
degrees_sub = np.fromiter(dict(nx.degree(H)).values(), dtype=int)
degrees_t[num] = np.pad(degrees_sub, (0, N - num - 1), 'constant', constant_values=0)
with open(net_dir.replace('.gz', '.npy'), 'wb') as f:
np.save(f, degrees_t)
|
StarcoderdataPython
|
3341148
|
<reponame>cacjorge/lcs_adm-pci-ku3
import os
from itertools import chain
from glob import glob
directory = '.'
for filename in os.listdir(directory):
if filename.endswith(".txt"):
f = open(filename, 'r')
text = f.read()
lines = [text.lower() for line in filename]
with open(filename, 'w') as out:
out.writelines(lines)
|
StarcoderdataPython
|
4827057
|
<reponame>Izoniks-prog/Dreams-Island
import pygame
from src.entities.vegetations.vegetation import Vegetation
SIZE_X = 17
SIZE_Y = 16
class Three(Vegetation):
def __init__(self, x: int, y: int, path: str):
super().__init__(x, y, path)
self.image = pygame.image.load(path)
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.rect.x = x * SIZE_X
self.rect.y = y * SIZE_Y
|
StarcoderdataPython
|
48247
|
<filename>functions/time.py
import time, pytz
from datetime import datetime, timedelta
from logger.main import *
def get_day_hh(event, resource):
"""
Get current day + hour (using gmt by default if time parameter not set)
"""
time_zone = os.getenv('TIME', 'gmt')
if time_zone == 'local':
hh = int(time.strftime("%H", time.localtime()))
day = time.strftime("%a", time.localtime()).lower()
logger.info("'local time' hour " + str(hh))
elif time_zone == 'gmt':
hh = int(time.strftime("%H", time.gmtime()))
day = time.strftime("%a", time.gmtime()).lower()
logger.info("'gmt' hour " + str(hh))
else:
if time_zone in pytz.all_timezones:
d = hour_rounder(datetime.now())
d = pytz.utc.localize(d)
req_timezone = pytz.timezone(time_zone)
d_req_timezone = d.astimezone(req_timezone)
hh = int(d_req_timezone.strftime("%H"))
day = d_req_timezone.strftime("%a").lower()
else:
logger.error('Invalid time timezone string value \"%s\", please check!' %(time_zone))
raise ValueError('Invalid time timezone string value')
# Get manual hour informed in event
if resource in event:
if "hour" in event[resource]:
hh = event[resource]["hour"]
else:
logger.info("Hour not found in manual event")
if "day" in event[resource]:
day = event[resource]["day"]
else:
logger.info("Day not found in manual event")
logger.info("Checking for " + resource + " instances to start or stop for 'day' " + day + " hour " + str(hh))
return day, str(hh)
def hour_rounder(t: datetime):
"""
Rounds to nearest hour by adding a timedelta hour if minute >= 30
"""
return (t.replace(second=0, microsecond=0, minute=0, hour=t.hour)
+timedelta(hours=t.minute//30))
|
StarcoderdataPython
|
1747153
|
'p4a example service using oscpy to communicate with main application.'
from random import sample, randint
from string import ascii_letters
from time import localtime, asctime, sleep
from oscpy.server import OSCThreadServer
from oscpy.client import OSCClient
CLIENT = OSCClient('localhost', 3002)
stopFlag = False
def ping(*_):
'answer to ping messages'
msg = 'service: ' + ''.join(sample(ascii_letters, randint(10, 20))) # <----------
CLIENT.send_message(
b'/message',
[
msg.encode('utf8'),
],
)
def send_date():
'send date to the application'
global stopFlag
if stopFlag:
msg = 'service stopped'
else:
msg = asctime(localtime())
CLIENT.send_message(
b'/date',
[msg.encode('utf8'), ]
)
def stop():
global stopFlag
stopFlag=True
SERVER.close()
if __name__ == '__main__':
SERVER = OSCThreadServer()
SERVER.listen('localhost', port=3000, default=True)
SERVER.bind(b'/ping', ping)
SERVER.bind(b'/stop', stop) # stop service when receiving 'stop msg
while not stopFlag:
sleep(1)
send_date()
|
StarcoderdataPython
|
122774
|
<gh_stars>0
import ast
import json
from datetime import datetime
from tweepy import Status
from twitterproducer.tweets.itweets_provider import ITweetsProvider
status_json = "{'created_at': 'Wed Aug 26 03:22:48 +0000 2020', 'id': 1298460861900652545, 'id_str': " \
"'1298460861900652545', 'full_text': 'https://t.co/WjbNIvW96r', 'truncated': False, " \
"'display_text_range': [0, 23], 'entities': {'hashtags': [], 'symbols': [], 'user_mentions': [], " \
"'urls': [], 'media': [{'id': 1298458167211425795, 'id_str': '1298458167211425795', 'indices': [0, 23], " \
"'media_url': 'http://pbs.twimg.com/amplify_video_thumb/1298458167211425795/img/Yf5i8sL2TJ9eOurH.jpg', " \
"'media_url_https': 'https://pbs.twimg.com/amplify_video_thumb/1298458167211425795/img/Yf5i8sL2TJ9eOurH" \
".jpg', 'url': 'https://t.co/WjbNIvW96r', 'display_url': 'pic.twitter.com/WjbNIvW96r', 'expanded_url': " \
"'https://twitter.com/TeamTrump/status/1298458366306660353/video/1', 'type': 'photo', 'sizes': {" \
"'thumb': {'w': 150, 'h': 150, 'resize': 'crop'}, 'medium': {'w': 1200, 'h': 675, 'resize': 'fit'}, " \
"'small': {'w': 680, 'h': 383, 'resize': 'fit'}, 'large': {'w': 1280, 'h': 720, 'resize': 'fit'}}, " \
"'source_status_id': 1298458366306660353, 'source_status_id_str': '1298458366306660353', " \
"'source_user_id': 729676086632656900, 'source_user_id_str': '729676086632656900'}]}, " \
"'extended_entities': {'media': [{'id': 1298458167211425795, 'id_str': '1298458167211425795', " \
"'indices': [0, 23], 'media_url': " \
"'http://pbs.twimg.com/amplify_video_thumb/1298458167211425795/img/Yf5i8sL2TJ9eOurH.jpg', " \
"'media_url_https': 'https://pbs.twimg.com/amplify_video_thumb/1298458167211425795/img/Yf5i8sL2TJ9eOurH" \
".jpg', 'url': 'https://t.co/WjbNIvW96r', 'display_url': 'pic.twitter.com/WjbNIvW96r', 'expanded_url': " \
"'https://twitter.com/TeamTrump/status/1298458366306660353/video/1', 'type': 'video', 'sizes': {" \
"'thumb': {'w': 150, 'h': 150, 'resize': 'crop'}, 'medium': {'w': 1200, 'h': 675, 'resize': 'fit'}, " \
"'small': {'w': 680, 'h': 383, 'resize': 'fit'}, 'large': {'w': 1280, 'h': 720, 'resize': 'fit'}}, " \
"'source_status_id': 1298458366306660353, 'source_status_id_str': '1298458366306660353', " \
"'source_user_id': 729676086632656900, 'source_user_id_str': '729676086632656900', 'video_info': {" \
"'aspect_ratio': [16, 9], 'duration_millis': 54922, 'variants': [{'bitrate': 288000, 'content_type': " \
"'video/mp4', 'url': 'https://video.twimg.com/amplify_video/1298458167211425795/vid/480x270" \
"/sXwPBww7AzzE0l9Q.mp4?tag=13'}, {'bitrate': 2176000, 'content_type': 'video/mp4', " \
"'url': 'https://video.twimg.com/amplify_video/1298458167211425795/vid/1280x720/uNjTWvnP00FREqPM.mp4" \
"?tag=13'}, {'content_type': 'application/x-mpegURL', 'url': " \
"'https://video.twimg.com/amplify_video/1298458167211425795/pl/Zjm-_QsgnU-brl_G.m3u8?tag=13'}, " \
"{'bitrate': 832000, 'content_type': 'video/mp4', 'url': " \
"'https://video.twimg.com/amplify_video/1298458167211425795/vid/640x360/BBMmBNzsb75nsyC4.mp4?tag=13" \
"'}]}, 'additional_media_info': {'title': '', 'description': '', 'embeddable': True, 'monetizable': " \
"False, 'source_user': {'id': 729676086632656900, 'id_str': '729676086632656900', 'name': '<NAME> (" \
"Text VOTE to 88022)', 'screen_name': 'TeamTrump', 'location': 'USA', 'description': 'The official " \
"Twitter account for the Trump Campaign. Together, we will KEEP AMERICA GREAT! 🇺🇸', " \
"'url': 'https://t.co/mZB2hymxC9', 'entities': {'url': {'urls': [{'url': 'https://t.co/mZB2hymxC9', " \
"'expanded_url': 'http://www.DonaldJTrump.com', 'display_url': 'DonaldJTrump.com', 'indices': [0, " \
"23]}]}, 'description': {'urls': []}}, 'protected': False, 'followers_count': 2123084, 'friends_count': " \
"127, 'listed_count': 4055, 'created_at': 'Mon May 09 14:15:10 +0000 2016', 'favourites_count': 3479, " \
"'utc_offset': None, 'time_zone': None, 'geo_enabled': True, 'verified': True, 'statuses_count': 25663, " \
"'lang': None, 'contributors_enabled': False, 'is_translator': False, 'is_translation_enabled': False, " \
"'profile_background_color': '000000', 'profile_background_image_url': " \
"'http://abs.twimg.com/images/themes/theme1/bg.png', 'profile_background_image_url_https': " \
"'https://abs.twimg.com/images/themes/theme1/bg.png', 'profile_background_tile': False, " \
"'profile_image_url': 'http://pbs.twimg.com/profile_images/745768799849308160/KrZhjkpH_normal.jpg', " \
"'profile_image_url_https': 'https://pbs.twimg.com/profile_images/745768799849308160/KrZhjkpH_normal" \
".jpg', 'profile_banner_url': 'https://pbs.twimg.com/profile_banners/729676086632656900/1588979102', " \
"'profile_link_color': 'CB0606', 'profile_sidebar_border_color': '000000', " \
"'profile_sidebar_fill_color': '000000', 'profile_text_color': '000000', " \
"'profile_use_background_image': False, 'has_extended_profile': False, 'default_profile': False, " \
"'default_profile_image': False, 'following': False, 'follow_request_sent': False, 'notifications': " \
"False, 'translator_type': 'none'}}}]}, 'source': '<a href=\"http://twitter.com/download/iphone\" " \
"rel=\"nofollow\">Twitter for iPhone</a>', 'in_reply_to_status_id': None, 'in_reply_to_status_id_str': " \
"None, 'in_reply_to_user_id': None, 'in_reply_to_user_id_str': None, 'in_reply_to_screen_name': None, " \
"'user': {'id': 25073877, 'id_str': '25073877', 'name': '<NAME>', 'screen_name': " \
"'realDonaldTrump', 'location': 'Washington, DC', 'description': '45th President of the United States " \
"of America🇺🇸', 'url': 'https://t.co/OMxB0x7xC5', 'entities': {'url': {'urls': [{'url': " \
"'https://t.co/OMxB0x7xC5', 'expanded_url': 'http://www.Instagram.com/realDonaldTrump', 'display_url': " \
"'Instagram.com/realDonaldTrump', 'indices': [0, 23]}]}, 'description': {'urls': []}}, 'protected': " \
"False, 'followers_count': 85580226, 'friends_count': 50, 'listed_count': 118643, 'created_at': 'Wed " \
"Mar 18 13:46:38 +0000 2009', 'favourites_count': 4, 'utc_offset': None, 'time_zone': None, " \
"'geo_enabled': True, 'verified': True, 'statuses_count': 54945, 'lang': None, 'contributors_enabled': " \
"False, 'is_translator': False, 'is_translation_enabled': True, 'profile_background_color': '6D5C18', " \
"'profile_background_image_url': 'http://abs.twimg.com/images/themes/theme1/bg.png', " \
"'profile_background_image_url_https': 'https://abs.twimg.com/images/themes/theme1/bg.png', " \
"'profile_background_tile': True, 'profile_image_url': " \
"'http://pbs.twimg.com/profile_images/874276197357596672/kUuht00m_normal.jpg', " \
"'profile_image_url_https': 'https://pbs.twimg.com/profile_images/874276197357596672/kUuht00m_normal" \
".jpg', 'profile_banner_url': 'https://pbs.twimg.com/profile_banners/25073877/1595058372', " \
"'profile_link_color': '1B95E0', 'profile_sidebar_border_color': 'BDDCAD', " \
"'profile_sidebar_fill_color': 'C5CEC0', 'profile_text_color': '333333', " \
"'profile_use_background_image': True, 'has_extended_profile': False, 'default_profile': False, " \
"'default_profile_image': False, 'following': True, 'follow_request_sent': False, 'notifications': " \
"False, 'translator_type': 'regular'}, 'geo': None, 'coordinates': None, 'place': None, 'contributors': " \
"None, 'is_quote_status': False, 'retweet_count': 8901, 'favorite_count': 39148, 'favorited': False, " \
"'retweeted': False, 'possibly_sensitive': False, 'lang': 'und'}, created_at=datetime.datetime(2020, 8, " \
"26, 3, 22, 48), id=1298460861900652545, id_str='1298460861900652545', " \
"full_text='https://t.co/WjbNIvW96r', truncated=False, display_text_range=[0, 23], entities={" \
"'hashtags': [], 'symbols': [], 'user_mentions': [], 'urls': [], 'media': [{'id': 1298458167211425795, " \
"'id_str': '1298458167211425795', 'indices': [0, 23], 'media_url': " \
"'http://pbs.twimg.com/amplify_video_thumb/1298458167211425795/img/Yf5i8sL2TJ9eOurH.jpg', " \
"'media_url_https': 'https://pbs.twimg.com/amplify_video_thumb/1298458167211425795/img/Yf5i8sL2TJ9eOurH" \
".jpg', 'url': 'https://t.co/WjbNIvW96r', 'display_url': 'pic.twitter.com/WjbNIvW96r', 'expanded_url': " \
"'https://twitter.com/TeamTrump/status/1298458366306660353/video/1', 'type': 'photo', 'sizes': {" \
"'thumb': {'w': 150, 'h': 150, 'resize': 'crop'}, 'medium': {'w': 1200, 'h': 675, 'resize': 'fit'}, " \
"'small': {'w': 680, 'h': 383, 'resize': 'fit'}, 'large': {'w': 1280, 'h': 720, 'resize': 'fit'}}, " \
"'source_status_id': 1298458366306660353, 'source_status_id_str': '1298458366306660353', " \
"'source_user_id': 729676086632656900, 'source_user_id_str': '729676086632656900'}]}, " \
"extended_entities={'media': [{'id': 1298458167211425795, 'id_str': '1298458167211425795', 'indices': [" \
"0, 23], 'media_url': 'http://pbs.twimg.com/amplify_video_thumb/1298458167211425795/img" \
"/Yf5i8sL2TJ9eOurH.jpg', 'media_url_https': " \
"'https://pbs.twimg.com/amplify_video_thumb/1298458167211425795/img/Yf5i8sL2TJ9eOurH.jpg', " \
"'url': 'https://t.co/WjbNIvW96r', 'display_url': 'pic.twitter.com/WjbNIvW96r', 'expanded_url': " \
"'https://twitter.com/TeamTrump/status/1298458366306660353/video/1', 'type': 'video', 'sizes': {" \
"'thumb': {'w': 150, 'h': 150, 'resize': 'crop'}, 'medium': {'w': 1200, 'h': 675, 'resize': 'fit'}, " \
"'small': {'w': 680, 'h': 383, 'resize': 'fit'}, 'large': {'w': 1280, 'h': 720, 'resize': 'fit'}}, " \
"'source_status_id': 1298458366306660353, 'source_status_id_str': '1298458366306660353', " \
"'source_user_id': 729676086632656900, 'source_user_id_str': '729676086632656900', 'video_info': {" \
"'aspect_ratio': [16, 9], 'duration_millis': 54922, 'variants': [{'bitrate': 288000, 'content_type': " \
"'video/mp4', 'url': 'https://video.twimg.com/amplify_video/1298458167211425795/vid/480x270" \
"/sXwPBww7AzzE0l9Q.mp4?tag=13'}, {'bitrate': 2176000, 'content_type': 'video/mp4', " \
"'url': 'https://video.twimg.com/amplify_video/1298458167211425795/vid/1280x720/uNjTWvnP00FREqPM.mp4" \
"?tag=13'}, {'content_type': 'application/x-mpegURL', 'url': " \
"'https://video.twimg.com/amplify_video/1298458167211425795/pl/Zjm-_QsgnU-brl_G.m3u8?tag=13'}, " \
"{'bitrate': 832000, 'content_type': 'video/mp4', 'url': " \
"'https://video.twimg.com/amplify_video/1298458167211425795/vid/640x360/BBMmBNzsb75nsyC4.mp4?tag=13" \
"'}]}, 'additional_media_info': {'title': '', 'description': '', 'embeddable': True, 'monetizable': " \
"False, 'source_user': {'id': 729676086632656900, 'id_str': '729676086632656900', 'name': '<NAME> (" \
"Text VOTE to 88022)', 'screen_name': 'TeamTrump', 'location': 'USA', 'description': 'The official " \
"Twitter account for the Trump Campaign. Together, we will KEEP AMERICA GREAT! 🇺🇸', " \
"'url': 'https://t.co/mZB2hymxC9', 'entities': {'url': {'urls': [{'url': 'https://t.co/mZB2hymxC9', " \
"'expanded_url': 'http://www.DonaldJTrump.com', 'display_url': 'DonaldJTrump.com', 'indices': [0, " \
"23]}]}, 'description': {'urls': []}}, 'protected': False, 'followers_count': 2123084, 'friends_count': " \
"127, 'listed_count': 4055, 'created_at': 'Mon May 09 14:15:10 +0000 2016', 'favourites_count': 3479, " \
"'utc_offset': None, 'time_zone': None, 'geo_enabled': True, 'verified': True, 'statuses_count': 25663, " \
"'lang': None, 'contributors_enabled': False, 'is_translator': False, 'is_translation_enabled': False, " \
"'profile_background_color': '000000', 'profile_background_image_url': " \
"'http://abs.twimg.com/images/themes/theme1/bg.png', 'profile_background_image_url_https': " \
"'https://abs.twimg.com/images/themes/theme1/bg.png', 'profile_background_tile': False, " \
"'profile_image_url': 'http://pbs.twimg.com/profile_images/745768799849308160/KrZhjkpH_normal.jpg', " \
"'profile_image_url_https': 'https://pbs.twimg.com/profile_images/745768799849308160/KrZhjkpH_normal" \
".jpg', 'profile_banner_url': 'https://pbs.twimg.com/profile_banners/729676086632656900/1588979102', " \
"'profile_link_color': 'CB0606', 'profile_sidebar_border_color': '000000', " \
"'profile_sidebar_fill_color': '000000', 'profile_text_color': '000000', " \
"'profile_use_background_image': False, 'has_extended_profile': False, 'default_profile': False, " \
"'default_profile_image': False, 'following': False, 'follow_request_sent': False, 'notifications': " \
"False, 'translator_type': 'none'}}}]}, source='Twitter for iPhone', " \
"source_url='http://twitter.com/download/iphone', in_reply_to_status_id=None, " \
"in_reply_to_status_id_str=None, in_reply_to_user_id=None, in_reply_to_user_id_str=None, " \
"in_reply_to_screen_name=None, author=User(_api=<tweepy.api.API object at 0x0000023F470BA0B8>, " \
"_json={'id': 25073877, 'id_str': '25073877', 'name': '<NAME>', 'screen_name': " \
"'realDonaldTrump', 'location': 'Washington, DC', 'description': '45th President of the United States " \
"of America🇺🇸', 'url': 'https://t.co/OMxB0x7xC5', 'entities': {'url': {'urls': [{'url': " \
"'https://t.co/OMxB0x7xC5', 'expanded_url': 'http://www.Instagram.com/realDonaldTrump', 'display_url': " \
"'Instagram.com/realDonaldTrump', 'indices': [0, 23]}]}, 'description': {'urls': []}}, 'protected': " \
"False, 'followers_count': 85580226, 'friends_count': 50, 'listed_count': 118643, 'created_at': 'Wed " \
"Mar 18 13:46:38 +0000 2009', 'favourites_count': 4, 'utc_offset': None, 'time_zone': None, " \
"'geo_enabled': True, 'verified': True, 'statuses_count': 54945, 'lang': None, 'contributors_enabled': " \
"False, 'is_translator': False, 'is_translation_enabled': True, 'profile_background_color': '6D5C18', " \
"'profile_background_image_url': 'http://abs.twimg.com/images/themes/theme1/bg.png', " \
"'profile_background_image_url_https': 'https://abs.twimg.com/images/themes/theme1/bg.png', " \
"'profile_background_tile': True, 'profile_image_url': " \
"'http://pbs.twimg.com/profile_images/874276197357596672/kUuht00m_normal.jpg', " \
"'profile_image_url_https': 'https://pbs.twimg.com/profile_images/874276197357596672/kUuht00m_normal" \
".jpg', 'profile_banner_url': 'https://pbs.twimg.com/profile_banners/25073877/1595058372', " \
"'profile_link_color': '1B95E0', 'profile_sidebar_border_color': 'BDDCAD', " \
"'profile_sidebar_fill_color': 'C5CEC0', 'profile_text_color': '333333', " \
"'profile_use_background_image': True, 'has_extended_profile': False, 'default_profile': False, " \
"'default_profile_image': False, 'following': True, 'follow_request_sent': False, 'notifications': " \
"False, 'translator_type': 'regular'}, id=25073877, id_str='25073877', name='<NAME>', " \
"screen_name='realDonaldTrump', location='Washington, DC', description='45th President of the United " \
"States of America🇺🇸', url='https://t.co/OMxB0x7xC5', entities={'url': {'urls': [{'url': " \
"'https://t.co/OMxB0x7xC5', 'expanded_url': 'http://www.Instagram.com/realDonaldTrump', 'display_url': " \
"'Instagram.com/realDonaldTrump', 'indices': [0, 23]}]}, 'description': {'urls': []}}, protected=False, " \
"followers_count=85580226, friends_count=50, listed_count=118643, created_at=datetime.datetime(2009, 3, " \
"18, 13, 46, 38), favourites_count=4, utc_offset=None, time_zone=None, geo_enabled=True, verified=True, " \
"statuses_count=54945, lang=None, contributors_enabled=False, is_translator=False, " \
"is_translation_enabled=True, profile_background_color='6D5C18', " \
"profile_background_image_url='http://abs.twimg.com/images/themes/theme1/bg.png', " \
"profile_background_image_url_https='https://abs.twimg.com/images/themes/theme1/bg.png', " \
"profile_background_tile=True, " \
"profile_image_url='http://pbs.twimg.com/profile_images/874276197357596672/kUuht00m_normal.jpg', " \
"profile_image_url_https='https://pbs.twimg.com/profile_images/874276197357596672/kUuht00m_normal.jpg', " \
"profile_banner_url='https://pbs.twimg.com/profile_banners/25073877/1595058372', " \
"profile_link_color='1B95E0', profile_sidebar_border_color='BDDCAD', " \
"profile_sidebar_fill_color='C5CEC0', profile_text_color='333333', profile_use_background_image=True, " \
"has_extended_profile=False, default_profile=False, default_profile_image=False, following=True, " \
"follow_request_sent=False, notifications=False, translator_type='regular'), user=User(" \
"_api=<tweepy.api.API object at 0x0000023F470BA0B8>, _json={'id': 25073877, 'id_str': '25073877', " \
"'name': '<NAME>', 'screen_name': 'realDonaldTrump', 'location': 'Washington, DC', " \
"'description': '45th President of the United States of America🇺🇸', 'url': 'https://t.co/OMxB0x7xC5', " \
"'entities': {'url': {'urls': [{'url': 'https://t.co/OMxB0x7xC5', 'expanded_url': " \
"'http://www.Instagram.com/realDonaldTrump', 'display_url': 'Instagram.com/realDonaldTrump', " \
"'indices': [0, 23]}]}, 'description': {'urls': []}}, 'protected': False, 'followers_count': 85580226, " \
"'friends_count': 50, 'listed_count': 118643, 'created_at': 'Wed Mar 18 13:46:38 +0000 2009', " \
"'favourites_count': 4, 'utc_offset': None, 'time_zone': None, 'geo_enabled': True, 'verified': True, " \
"'statuses_count': 54945, 'lang': None, 'contributors_enabled': False, 'is_translator': False, " \
"'is_translation_enabled': True, 'profile_background_color': '6D5C18', 'profile_background_image_url': " \
"'http://abs.twimg.com/images/themes/theme1/bg.png', 'profile_background_image_url_https': " \
"'https://abs.twimg.com/images/themes/theme1/bg.png', 'profile_background_tile': True, " \
"'profile_image_url': 'http://pbs.twimg.com/profile_images/874276197357596672/kUuht00m_normal.jpg', " \
"'profile_image_url_https': 'https://pbs.twimg.com/profile_images/874276197357596672/kUuht00m_normal" \
".jpg', 'profile_banner_url': 'https://pbs.twimg.com/profile_banners/25073877/1595058372', " \
"'profile_link_color': '1B95E0', 'profile_sidebar_border_color': 'BDDCAD', " \
"'profile_sidebar_fill_color': 'C5CEC0', 'profile_text_color': '333333', " \
"'profile_use_background_image': True, 'has_extended_profile': False, 'default_profile': False, " \
"'default_profile_image': False, 'following': True, 'follow_request_sent': False, 'notifications': " \
"False, 'translator_type': 'regular'}, id=25073877, id_str='25073877', name='<NAME>', " \
"screen_name='realDonaldTrump', location='Washington, DC', description='45th President of the United " \
"States of America🇺🇸', url='https://t.co/OMxB0x7xC5', entities={'url': {'urls': [{'url': " \
"'https://t.co/OMxB0x7xC5', 'expanded_url': 'http://www.Instagram.com/realDonaldTrump', 'display_url': " \
"'Instagram.com/realDonaldTrump', 'indices': [0, 23]}]}, 'description': {'urls': []}} "
class obj(object):
def __init__(self, d):
for a, b in d.items():
if type(b) == dict:
if b.get('ignore'):
setattr(self, a, b)
continue
if isinstance(b, (list, tuple)):
setattr(self, a, [obj(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, a, obj(b) if isinstance(b, dict) else b)
class MockTweetsProvider(ITweetsProvider):
def get_tweets(self, user_id):
return [obj({
'user': {
'screen_name': 'mockuser'
},
'full_text': 'Mock tweet',
'created_at': datetime.now(),
'id_str': '121212',
'entities': {'urls': [], 'ignore': True},
'retweeted': False
})]
|
StarcoderdataPython
|
3347421
|
<gh_stars>1-10
#!python
from .more_page_builder import MorePageBuilder
from .page_builder import StopOutput
import queue
import threading
# Signal to send to the input queue when there is no more input
END_OF_INPUT = None
# Return code if output was interrupted by the user (e.g. the user pressed ctrl+c)
OUTPUT_STOPPED = 'OUTPUT_STOPPED'
def paginate(
input,
output=None,
prompt=None,
screen_dimensions=None,
plugins=None,
page_builder=None,
asynchronous=False):
'''
Paginates the input, similar to how 'more' works in bash.
Reads from input until the output window is full.
Then prompts the user for an action before reading more input.
Pseudo-logic:
-------------
page = page_builder.build_first_page()
for line in <input-lines>:
if page.is_full():
page.flush()
page = page_builder.build_next_page()
page.add_line(line)
Arguments:
----------
input: [type iterable or Queue]
The input text that should be paginated.
This must either be an iterable over text (e.g. a list or a file), or an instance of queue.Queue.
It is not required that each returned string is a complete line.
The paginator will combine incomplete lines until a '\n' is encountered.
If it is a queue.Queue, you must pass 'END_OF_INPUT' into the queue when no more input is expected.
This will flush the final incomplete line (if any) to the output.
Note that you can NOT use queue.join() to detect all input has been processed
(as that just raises issues if the user decides to abort the output halfway through).
Instead, if you use 'asynchronous=True' you can join the returned context.
output: [type Output]
If not specified we print output to stdout
prompt: [type Input]
Used when prompting the user for actions.
Defaults to reading from stdin.
screen_dimensions: [type ScreenDimensions]
Used to know the height of the output window
(which is used to decide how many lines to print before we consider a page 'full').
Defaults to using the dimensions of the terminal window.
plugins: [type list of MorePlugin]
The plugins to load. These plugins decide what actions are available on the 'more' prompt.
If not specified will fetch all plugins from more_plugins.py
asynchronous: [type bool]
If true the 'paginate' call will return instantly and run asynchronously.
In this case a context is returned on which you can call 'context.join([timeout])'
to block until all lines are sent to the output.
page_builder: [type PageBuilder]
The object that will create the output pages whenever a page is full.
Must be an instance of 'PageBuilder'.
If specified we ignore the values of output, prompt, screen_dimensions and plugins.
Returns:
--------
A joinable 'context' if asynchronous is True
OUTPUT_STOPPED if the user stopped the output (for example using ctrl+c)
'''
page_builder = page_builder or MorePageBuilder(
input=prompt,
output=output,
screen_dimensions=screen_dimensions,
plugins=plugins)
if asynchronous:
thread = threading.Thread(
target=paginate,
kwargs={
'input': input,
'page_builder': page_builder,
'asynchronous': False,
},
)
thread.start()
return thread
paginator = Paginator(page_builder)
if isinstance(input, queue.Queue):
return paginator.paginate_from_queue(input)
else:
return paginator.paginate(input)
class Paginator(object):
'''
Paginates given input text, similar to how 'more' works in bash.
See help of 'paginate' for a more detailed description of the behavior.
There are 3 ways to send input text:
- pass an iterable to self.paginate.
- pass a queue to self.paginate_from_queue.
- call 'add_text' repeatedly until all text has been sent in, then call 'flush_incomplete_line'.
Each of these methods returns 'OUTPUT_STOPPED' if the user stopped the output (for example using ctrl+c)
'''
def __init__(self, page_builder):
self._page_builder = page_builder
self._lines = _LineCollector()
self._page = self._page_builder.build_first_page()
def paginate(self, iterable):
'''
Iterates over the iterable, and paginates all the text it returns
'''
try:
for text in iterable:
self._try_to_add_text(text)
self.flush_incomplete_line()
except StopOutput:
return OUTPUT_STOPPED
def paginate_from_queue(self, input_queue):
'''
Iterates over the queue, and paginates all the text it returns.
Stops paginating when END_OF_INPUT is encountered on the queue.
'''
return self.paginate(QueueIterator(input_queue))
def add_text(self, input_text):
'''
Splits the input_text into lines, and paginates them.
Can be called multiple times.
When you're done you must call 'flush_incomplete_line'
to ensure the last incomplete input line is sent to the output.
'''
try:
self._try_to_add_text(input_text)
except StopOutput:
return OUTPUT_STOPPED
def _try_to_add_text(self, input_text):
self._lines.add(input_text)
for line in self._lines.pop_complete_lines():
self._paginate_and_print_text(line)
def flush_incomplete_line(self):
try:
self._try_to_flush_incomplete_line()
except StopOutput:
return OUTPUT_STOPPED
def _try_to_flush_incomplete_line(self):
if len(self._lines.incomplete_line):
self._paginate_and_print_text(self._lines.pop_incomplete_line())
self._page.flush()
def _paginate_and_print_text(self, text):
if self._page.is_full():
self._start_new_page()
self._output_text(text)
def _start_new_page(self):
self._page.flush()
self._page = self._page_builder.build_next_page()
def _output_text(self, text):
self._page.add_line(text)
class _LineCollector(object):
'''
Collects the input text and allows us to walk over the complete lines only.
example:
self.add('first ')
self.add('line \nsecond line\n')
self.add('incomplete final line')
self.pop_complete_lines() <-- returns ['first line', 'second line']
self.pop_incomplete_line() <-- returns 'incomplete final line'
'''
def __init__(self):
self._complete_lines = []
self.incomplete_line = ''
def add(self, text):
assert isinstance(text, str), 'expected str got {}'.format(text.__class__)
unprocessed_text = self.incomplete_line + text
complete_lines, incomplete_line = self._split_lines(unprocessed_text)
self._complete_lines += complete_lines
self.incomplete_line = incomplete_line
def pop_complete_lines(self):
try:
return self._complete_lines
finally:
self._complete_lines = []
def pop_incomplete_line(self):
try:
return self.incomplete_line
finally:
self.incomplete_line = ''
def _split_lines(self, text):
lines = text.splitlines(True)
if self._has_incomplete_line(lines):
complete_lines = lines[:-1]
incomplete_line = lines[-1]
else:
complete_lines = lines
incomplete_line = ''
return (complete_lines, incomplete_line)
def _has_incomplete_line(self, lines):
return len(lines) and not lines[-1].endswith('\n')
def _make_callable(value):
if not callable(value):
return lambda: value
else:
return value
class QueueIterator(object):
'''
Iterates over a queue, until END_OF_INPUT is encountered
'''
def __init__(self, queue):
self._queue = queue
def __iter__(self):
return self
def __next__(self):
text = self._queue.get()
if text is END_OF_INPUT:
raise StopIteration
return text
|
StarcoderdataPython
|
1626358
|
from datetime import datetime
import nonebot
import pytz
from aiocqhttp.exceptions import Error as CQHttpError
from json import loads
from requests import get
from bot.plugins.pluginsConfig import *
@nonebot.scheduler.scheduled_job('cron', hour=NotificationTime['hour'], minute=NotificationTime['minute'])
async def _():
bot = nonebot.get_bot()
res = get(
'https://temp.163.com/special/00804KVA/cm_yaowen20200213.js?callback=data_callback')
text = '[' + res.content.decode('gbk')[15:-2] + ']'
news_list = loads(text)
news_message = ''
for new in news_list[:10]:
key_list = []
for keyword in new['keywords']:
key_list.append(keyword['keyname'])
news_message += f" {news_list.index(new) + 1}、{new['title']}\n 详情见:{new['docurl']}\n 关键词:{'、'.join(key_list)}\n"
try:
await bot.send_group_msg(group_id=NewsGroupID,
message=news_message)
except CQHttpError:
pass
|
StarcoderdataPython
|
1622350
|
'''
Give a sorted matrix, search for an element.
EXAMPLE:
Input : mat[4][4] = { {10, 20, 30, 40},
{15, 25, 35, 45},
{27, 29, 37, 48},
{32, 33, 39, 50}};
x = 29
Output : Found at (2, 1)
SOLUTION:
We can do a binary search in every row one at a time.
Complexity -> O(nlogm)
SOLUTION 2:
Better solution would be to start from the top right cell and either move left or down based on whether
the search element is greater or smaller than the current element.
Complexity -> O(n+m)
Note that we can also start from the bottom left element.
But not from top left or bottom right.
SOLUTION 3:
We can use divide and conquer similar to binary search:
Start with the middle element (n/2, n/2).
If needle > current
=> We can ignore the top left submatrix.
Meaning we can recursively call the search on rest of the 3 remaining submatrices.
Similarly if needle < current
We can ignore bottom right submatrix.
At each step we are reducing search space by 1/4th.
The base can be a submatrix which has size <= 2*2.
Complexity -> (n^1.6)
Note that the complexity is not O(log(nm))!
Its even worse than O(n).
'''
|
StarcoderdataPython
|
1792233
|
<filename>app/tests/sample_data/item_samples.py<gh_stars>1-10
import json
from django.contrib.gis.geos import GEOSGeometry
from stac_api.models import BBOX_CH
from stac_api.utils import fromisoformat
geometries = {
'switzerland': GEOSGeometry(BBOX_CH),
'switzerland-west':
GEOSGeometry(
'SRID=4326;POLYGON(('
'5.710217406117146 47.84846875331844,'
'7.940442015492146 47.84846875331844,'
'7.940442015492146 45.773562697134,'
'5.710217406117146 45.773562697134,'
'5.710217406117146 47.84846875331844))'
),
'switzerland-east':
GEOSGeometry(
'SRID=4326;POLYGON(('
'8.094250609242145 47.84846875331844,'
'10.708996702992145 47.84846875331844,'
'10.708996702992145 45.773562697134,'
'8.094250609242145 45.773562697134,'
'8.094250609242145 47.84846875331844))'
),
'switzerland-north':
GEOSGeometry(
'SRID=4326;POLYGON(('
'5.798108031117146 47.84846875331844,'
'10.708996702992145 47.84846875331844,'
'10.708996702992145 46.89614858846383,'
'5.798108031117146 46.89614858846383,'
'5.798108031117146 47.84846875331844))'
),
'switzerland-south':
GEOSGeometry(
'SRID=4326;POLYGON(('
'5.798108031117146 46.89614858846383,'
'10.708996702992145 46.89614858846383,'
'10.708996702992145 45.67385578908906,'
'5.798108031117146 45.67385578908906,'
'5.798108031117146 46.89614858846383))'
),
'paris':
GEOSGeometry(
'SRID=4326;POLYGON(('
'1.6892213123671462 49.086733408488925,'
'2.8317994373671462 49.086733408488925,'
'2.8317994373671462 48.52233957365349,'
'1.6892213123671462 48.52233957365349,'
'1.6892213123671462 49.086733408488925))'
),
'covers-switzerland':
GEOSGeometry(
'SRID=4326;POLYGON(('
'4.96 44.82,'
'11.49 44.82,'
'11.49 49.81,'
'4.96 49.81,'
'4.96 44.82))'
)
}
links = {
'link-1': {
'rel': 'describedBy',
'href': 'https://www.example.com/described-by',
'title': 'This is an extra item link',
'link_type': 'description'
}
}
links_invalid = {
'link-invalid': {
'title': 'invalid item link relation',
'rel': 'invalid relation',
'href': 'not a url',
}
}
items = {
'item-1': {
'name': 'item-1',
'geometry':
GEOSGeometry(
json.dumps({
"coordinates": [[
[5.644711, 46.775054],
[5.644711, 48.014995],
[6.602408, 48.014995],
[7.602408, 49.014995],
[5.644711, 46.775054],
]],
"type": "Polygon"
})
),
'properties_title': 'My item 1',
'properties_datetime': fromisoformat('2020-10-28T13:05:10Z'),
'links': links.values()
},
'item-2': {
'name': 'item-2',
'geometry':
GEOSGeometry(
json.dumps({
"coordinates": [[
[5.644711, 46.775054],
[5.644711, 48.014995],
[6.602408, 48.014995],
[7.602408, 49.014995],
[5.644711, 46.775054],
]],
"type": "Polygon"
})
),
'properties_title': 'My item 2',
'properties_start_datetime': fromisoformat('2020-10-28T13:05:10Z'),
'properties_end_datetime': fromisoformat('2020-10-28T14:05:10Z')
},
'item-invalid': {
'name': 'item invalid name',
'geometry': {
"coordinates": [[
[10000000, 46.775054],
[5.644711, 48.014995],
[6.602408, 48.014995],
[7.602408, 444444444],
[5.644711, 46.775054],
]],
"type": "Polygon"
},
'properties_title': [23, 56],
'properties_start_datetime': 'not a datetime',
},
'item-invalid-link': {
'name': 'item-invalid-link',
'geometry': {
"coordinates": [[
[5.644711, 46.775054],
[5.644711, 48.014995],
[6.602408, 48.014995],
[7.602408, 49.014995],
[5.644711, 46.775054],
]],
"type": "Polygon"
},
'properties': {
'datetime': fromisoformat('2020-10-28T13:05:10Z')
},
'links': links_invalid.values()
},
'item-switzerland': {
'name': 'item-switzerland',
'geometry': geometries['switzerland'],
'properties_datetime': fromisoformat('2020-10-28T13:05:10Z')
},
'item-switzerland-west': {
'name': 'item-switzerland-west',
'geometry': geometries['switzerland-west'],
'properties_datetime': fromisoformat('2020-10-28T13:05:10Z')
},
'item-switzerland-east': {
'name': 'item-switzerland-east',
'geometry': geometries['switzerland-east'],
'properties_datetime': fromisoformat('2020-10-28T13:05:10Z')
},
'item-switzerland-north': {
'name': 'item-switzerland-north',
'geometry': geometries['switzerland-north'],
'properties_datetime': fromisoformat('2020-10-28T13:05:10Z')
},
'item-switzerland-south': {
'name': 'item-switzerland-south',
'geometry': geometries['switzerland-south'],
'properties_datetime': fromisoformat('2020-10-28T13:05:10Z')
},
'item-paris': {
'name': 'item-paris',
'geometry': geometries['paris'],
'properties_datetime': fromisoformat('2020-10-28T13:05:10Z')
},
'item-covers-switzerland': {
'name': 'item-covers_switzerland',
'geometry': geometries['covers-switzerland'],
'properties_datetime': fromisoformat('2020-10-28T13:05:10Z')
}
}
|
StarcoderdataPython
|
36048
|
import os
from .abstract_command import AbstractCommand
from ..services.state_utils import StateUtils
from ..services.state import StateHolder
from ..services.command_handler import CommandHandler
from ..services.console_logger import ColorPrint
class Start(AbstractCommand):
command = ["start", "up"]
args = ["[<project/plan>]"]
args_descriptions = {"[<project/plan>]": "Name of the project in the catalog and/or name of the project's plan"}
description = "Run: 'poco start nginx/default' or 'poco up nginx/default' to start nginx project (docker, helm " \
"or kubernetes) with the default plan."
run_command = "start"
need_checkout = True
def prepare_states(self):
StateUtils.calculate_name_and_work_dir()
StateUtils.prepare("compose_handler")
def resolve_dependencies(self):
if StateHolder.catalog_element is not None and not StateUtils.check_variable('repository'):
ColorPrint.exit_after_print_messages(message="Repository not found for: " + str(StateHolder.name))
self.check_poco_file()
def execute(self):
if self.need_checkout:
StateHolder.compose_handler.run_checkouts()
CommandHandler().run(self.run_command)
if hasattr(self, "end_message"):
ColorPrint.print_info(getattr(self, "end_message"))
@staticmethod
def check_poco_file():
if not StateUtils.check_variable('poco_file'):
poco_file = str(StateHolder.repository.target_dir if StateHolder.repository is not None
else os.getcwd()) + '/poco.yml'
ColorPrint.print_error(message="Poco file not found: " + poco_file)
ColorPrint.exit_after_print_messages(message="Use 'poco init " + StateHolder.name +
"', that will generate a default poco file for you",
msg_type="warn")
|
StarcoderdataPython
|
3227373
|
<filename>traffic/data/eurocontrol/ddr/freeroute.py
import re
from functools import lru_cache
from io import StringIO
from pathlib import Path
from typing import Any, Set, Tuple
import geopandas as gpd
import pandas as pd
from shapely.geometry import MultiPoint
from shapely.ops import unary_union
from ....data import nm_navaids
from .airspaces import NMAirspaceParser
def parse_coordinates(elt: str) -> Tuple[float, float]:
pattern = r"([N,S])(\d{4}|\d{6})(.\d*)?([E,W])(\d{5}|\d{7})(.\d*)?$"
x = re.match(pattern, elt)
assert x is not None, elt
lat_, lat_sign = x.group(2), 1 if x.group(1) == "N" else -1
lon_, lon_sign = x.group(5), 1 if x.group(4) == "E" else -1
lat_ = lat_.ljust(6, "0")
lon_ = lon_.ljust(7, "0")
lat = lat_sign * (
int(lat_[:2]) + int(lat_[2:4]) / 60 + int(lat_[4:]) / 3600
)
lon = lon_sign * (
int(lon_[:3]) + int(lon_[3:5]) / 60 + int(lon_[5:]) / 3600
)
return (lat, lon)
class NMFreeRouteParser(NMAirspaceParser):
def init_cache(self) -> None:
msg = f"Edit file {self.config_file} with NM directory"
if self.nm_path is None:
raise RuntimeError(msg)
are_file = next(self.nm_path.glob("Free_Route_*.are"), None)
if are_file is None:
raise RuntimeError(
f"No Free_Route_*.are file found in {self.nm_path}"
)
self.read_are(are_file)
sls_file = next(self.nm_path.glob("Free_Route_*.sls"), None)
if sls_file is None:
raise RuntimeError(
f"No Free_Route_*.sls file found in {self.nm_path}"
)
self.read_sls(sls_file)
self.initialized = True
self.fra = gpd.GeoDataFrame.from_records(
[
{"FRA": k, "geometry": self[k].shape} # type: ignore
for k in self.elements.keys()
]
)
frp_file = next(self.nm_path.glob("Free_Route_*.frp"), None)
if frp_file is None:
raise RuntimeError(
f"No Free_Route_*.frp file found in {self.nm_path}"
)
self.read_frp(frp_file)
def read_frp(self, filename: Path) -> None:
area = unary_union(self.fra.geometry)
west, south, east, north = area.bounds
subset = nm_navaids.extent((west, east, south, north))
assert subset is not None
coords = subset.data[["longitude", "latitude"]].values
europoints = subset.data.merge(
pd.DataFrame(
[
list(x.coords[0])
for x in area.intersection(MultiPoint(coords)).geoms
],
columns=["longitude", "latitude"],
)
)
df = pd.read_csv(StringIO(filename.read_text()), header=None)
df_ = (
df[0]
.str.replace(r"\s+", " ", regex=True)
.str.split(" ", expand=True)
.rename(columns={0: "FRA", 1: "type", 2: "name"})
)
a = (
df_.query('type in ["AD", "A", "D"]')
.dropna(axis=1, how="all")
.iloc[:, 3:]
.fillna("")
.sum(axis=1)
.str.replace(r"(\w{4})", r"\1,", regex=True)
.str[:-1]
.str.split(",")
)
tab = (
df_.query('type not in ["AD", "A", "D"]')
.dropna(axis=1, how="all")
.rename(columns={3: "latitude", 4: "longitude"})
)
# Part 1: When coordinates are in the file, decode them
coords = (
tab.query("latitude.notnull()")[["latitude", "longitude"]]
.sum(axis=1)
.apply(parse_coordinates)
)
decode_coords = tab.query("latitude.notnull()").assign(
latitude=coords.str[0], longitude=coords.str[1]
)
# Part 2: Propagate decoded coordinates (avoid slight inconsistencies)
propagate_coords = (
tab.query("latitude.isnull() and name in @decode_coords.name")
.drop(columns=["latitude", "longitude"])
.merge(
decode_coords[
["name", "latitude", "longitude"]
].drop_duplicates(),
on="name",
)
)
# Part 3: Unknown coordinates
unknown_coords = (
tab.query("latitude.isnull() and name not in @decode_coords.name")
.drop(columns=["latitude", "longitude"])
.merge(europoints.drop(columns=["type", "description"]), on="name")
)
# Part 4: Airport connections
airport_coords = pd.concat(
[
df_.query('type in ["AD", "A", "D"]').iloc[:, :3],
a.rename("airport"),
],
axis=1,
)
propagate_airports = airport_coords.merge(
decode_coords[["name", "latitude", "longitude"]].drop_duplicates(),
on=["name"],
).explode("airport")
unknown_airports = (
airport_coords.query("name not in @propagate_airports.name").merge(
europoints.drop(columns=["type", "description"]), on="name"
)
).explode("airport")
self.frp = pd.concat(
[
decode_coords,
propagate_coords,
unknown_coords,
propagate_airports,
unknown_airports,
]
)
def __getattr__(self, attr: str) -> Any:
if attr in ["fra", "frp"]:
self.init_cache()
return getattr(self, attr)
raise AttributeError(attr)
@lru_cache()
def _ipython_key_completions_(self) -> Set[str]:
return {*self.elements.keys()}
|
StarcoderdataPython
|
3316767
|
from locust import HttpUser, task, between
class BookInfoUser(HttpUser):
wait_time = between(5, 15)
@task(1)
def productpage(self):
self.client.get("/productpage")
|
StarcoderdataPython
|
154836
|
# ~*~ utf-8 ~*~
import sys
sys.stdin = open("sum.in") # Закомментируйте эту строку для ввода с клавиатуры
sys.stdout = open("sum.out", "w") # Закомментрируйте эту строку для вывода на экран
A, B = int(input()), int(input()) # Вводим 2 целых A и B (они в разных строках)
print (A + B)
|
StarcoderdataPython
|
3300406
|
"""
chop_map.py
A collection of methods which allow to chop a map around an atomic model.
The map can be chopped in three different ways:
- using a cub around the atomic residue with hard edges
- using certain radius around atomic residue with hard edges
- using certain radius around atomic residue with soft edges
Copyright [2013] EMBL - European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the
"License"); you may not use this file except in
compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = '2018-05-29'
import os
import math
from random import randint
import numpy as np
from threed_strudel.parse.map_parser import MapParser
import threed_strudel.utils.bio_utils as bu
import psutil
p = psutil.Process()
class ChopMap:
"""
Class for chopping map around an atomic model
"""
# def __init__(self, in_model=None, in_map=None):
# self.model = in_model
# self.map = in_map
# self.sett_model_map()
#
# def sett_model_map(self):
# """
# Loads the map and model
# """
# if self.map is not None:
# self.map = MapParser(self.map)
# if self.model is not None:
# self.model = bio_utils.load_structure(self.model)
def chop_cube_list(self, in_model_list, in_map, cube_padding, zero_origin=True):
"""
Chop map using a cubic box around the model
:param in_model_list: list of biopython model objects
:param in_map: path to the input map or strudel map object
:param cube_padding: distance fom any cube edge to the nearest atom
:param zero_origin: boolean,
:return: list of map objects, list of translation matrices
"""
if isinstance(in_map, MapParser):
pass
elif os.path.exists(in_map):
in_map = MapParser(in_map)
else:
raise Exception('in_map should be MapParser object or a map file path not {}'.format(type(in_map)))
shifts_list = []
chopped_map_obj_list = []
for model in in_model_list:
out_map, shifts = self.chop_cube(model, in_map, cube_padding, zero_origin)
chopped_map_obj_list.append(out_map)
shifts_list.append(shifts)
return chopped_map_obj_list, shifts_list
@staticmethod
def chop_cube(in_model, in_map, cube_padding=5, zero_origin=True, out_map_path=None):
"""
Chop map using a cubic box around the model
:param in_model: biopython model object
:param in_map: path to the input map or strudel map object
:param cube_padding: distance fom any cube edge to the nearest atom
:param out_map_path: output map path
:param zero_origin: boolean,
:return: map object, translation matrix
"""
if isinstance(in_map, MapParser):
pass
elif os.path.exists(in_map):
in_map = MapParser(in_map)
else:
raise Exception('in_map should be MapParser object or a map file path not {}'.format(type(in_map)))
if isinstance(in_model, str):
in_model = bu.load_structure(in_model)
atoms_coord = []
for atom in in_model.get_atoms():
atoms_coord.append(atom.coord)
delta = round(cube_padding / in_map.voxel_size[0])
# Get the indices in the map grid that correspond to the atom coordinates
x_indices = []
y_indices = []
z_indices = []
for atom in atoms_coord:
x_index, y_index, z_index = in_map.coord_to_index(atom)
if any([x_index, y_index, z_index]) < 0:
print("Atom outside map")
else:
x_indices.append(x_index)
y_indices.append(y_index)
z_indices.append(z_index)
# Find the voxel located in the middle of the atomic residue
# and the maximum molecule size in grid points
deltas = []
minim = min(x_indices)
dx = max(x_indices) - minim
middle_x = int(round(dx / 2 + minim))
deltas.append(dx)
minim = min(y_indices)
dy = max(y_indices) - minim
middle_y = int(round(dy / 2 + minim))
deltas.append(dy)
minim = min(z_indices)
dz = max(z_indices) - minim
middle_z = int(round(dz / 2 + minim))
deltas.append(dz)
max_d = max(deltas)
# Calculate the size of the cube
radius = int(round(max_d / 2 + delta))
new_dimension = radius * 2
# Ensure that the new grid size has no prime numbers greater than 19
# new_dimension = func.find_good_grid([new_dimension, new_dimension, new_dimension])[0]
# print(new_dimension)
# hard_radius = int(new_dimension / 2)
# Create a numpy array to store the chopped voxels
new_data = np.zeros((new_dimension, new_dimension, new_dimension), dtype='float32')
# Assign voxel values
for x in range(new_dimension):
for y in range(new_dimension):
for z in range(new_dimension):
try:
new_data[x, y, z] = in_map.data[x + middle_x - radius, y + middle_y - radius,
z + middle_z - radius]
except IndexError:
pass
# Calculate the new cell size
voxel_size = in_map.voxel_size
new_cell = (round(new_dimension * voxel_size[0], 3),
round(new_dimension * voxel_size[1], 3),
round(new_dimension * voxel_size[2], 3))
# Calculate the shifts applied to the chopped map
if not zero_origin:
shifts = np.array([0, 0, 0])
else:
index_shifts = in_map.index_shifts
x_shift = (middle_z - radius) * voxel_size[0] + index_shifts[0] * voxel_size[0]
y_shift = (middle_y - radius) * voxel_size[1] + index_shifts[1] * voxel_size[1]
z_shift = (middle_x - radius) * voxel_size[2] + index_shifts[2] * voxel_size[2]
shifts = np.array([x_shift, y_shift, z_shift])
new_data[np.isnan(new_data)] = 0
out_map = MapParser('')
out_map.data = new_data
out_map.cell = new_cell
out_map.cellb = in_map.cellb
out_map.voxel_size = voxel_size
n_st = in_map.n_start
if not zero_origin:
origin = ((middle_z - radius + n_st[0]) * out_map.voxel_size[0] + in_map.origin[0],
(middle_y - radius + n_st[1]) * out_map.voxel_size[1] + in_map.origin[1],
(middle_x - radius + n_st[2]) * out_map.voxel_size[2] + in_map.origin[2])
out_map.set_origin(origin)
out_map.n_start = (0, 0, 0)
if out_map_path is not None:
out_map.write_map(out_map_path)
return out_map, shifts
@staticmethod
def check_cut_vals(model, in_map, out_map, shifts):
"""
Checks the map values at 5 atoms positions
:param model:
:param in_map:
:param out_map:
:param shifts:
:return:
"""
atoms = [a for a in model.get_atoms()]
in_coords = []
out_coords = []
for i in range(5):
n = randint(0, len(atoms))
try:
in_coords.append(atoms[n].coord)
except IndexError:
pass
for coord in in_coords:
tmp_coord = list(coord)
for i in range(3):
tmp_coord[i] = tmp_coord[i] - shifts[i]
out_coords.append(tmp_coord)
in_vals = []
out_vals = []
for i in range(len(in_coords)):
in_indices = in_map.coord_to_index_int(in_coords[i])
out_indices = out_map.coord_to_index_int(out_coords[i])
neg_indices = sum(n < 0 for n in in_indices + out_indices)
if neg_indices > 0:
in_vals.append(None)
out_vals.append(None)
else:
try:
in_vals.append(round(in_map.data[in_indices], 5))
except IndexError:
in_vals.append(None)
try:
out_vals.append(round(out_map.data[out_indices], 5))
except IndexError:
out_vals.append(None)
if in_vals == out_vals and not all([v is None for v in in_vals]):
return True
else:
print('IN', in_vals)
print('OU', out_vals)
return False
@staticmethod
def chop_soft_radius(model, in_map, out_map=None, hard_radius=3, soft_radius=2, mask_path=None):
"""
Chop map using a soft mask with a given radius (hard_radius + soft_radius)
around the atomic residue. A cosine function is used to create the soft mask.
:param mask_path: mask output path
:param model: biopython model object
:param in_map: path to the input map or strudel map object
:param out_map: out_map: output map path
:param hard_radius: hard radius
:param soft_radius: soft radius, cosine function
:return strudel map object if out_map not given otherwise None
"""
# Get atom coordinates
atoms_coord = []
for atom in model.get_atoms():
atoms_coord.append(atom.coord)
if isinstance(in_map, MapParser):
pass
elif os.path.exists(in_map):
in_map = MapParser(in_map)
else:
raise Exception('in_map should be MapParser object or a map file path not {}'.format(type(in_map)))
voxel_size = in_map.voxel_size
aver_voxel_size = sum(voxel_size) / 3
delta1 = hard_radius
delta2 = hard_radius + soft_radius
# Create a numpy array for mask
shape = in_map.data.shape
mask = np.zeros(shape, dtype='float32')
for coord in atoms_coord:
x_index, y_index, z_index = in_map.coord_to_index_int(coord)
rx = int(round(delta2 / voxel_size[0]))
ry = int(round(delta2 / voxel_size[1]))
rz = int(round(delta2 / voxel_size[2]))
for x in range(x_index - rx, x_index + rx):
for y in range(y_index - ry, y_index + ry):
for z in range(z_index - rz, z_index + rz):
# Calculate the distance between the current atom and the current voxel
d = aver_voxel_size * math.sqrt((x - x_index) ** 2 + (y - y_index) ** 2 + (z - z_index) ** 2)
# Assign mask values based to the distance to the atoms
if d <= delta1:
try:
mask[x, y, z] = 1
except IndexError:
pass
elif delta1 < d < delta2:
try:
mask[x, y, z] += (math.cos((math.pi / soft_radius) * (d - delta1)) + 1) / 2
# if intensity value became > 1 it is set to 1
if mask[x, y, z] > 1:
mask[x, y, z] = 1
except IndexError:
pass
# Apply the mask to the map data
in_map.data = in_map.data * 1000
final = (mask * (in_map.data))
# print('final', final[3,3,3])
# print(final[1][1][:])
# final[final < 0] = 0
# print(final[1][1][:])
# import sys
# np.set_printoptions(threshold=sys.maxsize)
# print('out\n\n\n')
# # print(in_map.data)
# print('Inp max', np.max(in_map.data))
# # print(final)
# print('out max', np.max(final))
out_map_obj = MapParser('')
out_map_obj.copy_header(in_map)
out_map_obj.data = final
if mask_path is not None:
mask_ob = MapParser('')
mask_ob.copy_header(in_map)
mask_ob.data = mask
mask_ob.write_map(mask_path)
if out_map is not None:
out_map_obj.write_map(out_map)
return out_map_obj
@staticmethod
def find_near_atoms(atom, model, distance=6):
"""
Finds all atoms which are closer than "distance" from the target atom
:param atom: target atom (center of search)
:param model: biopython structure object
:param distance: search radius
:return: list of biopython atom objects
"""
close = []
# print("Atom_parent", atom.parent.id[1], type(atom.parent.id[1]))
for atom1 in model.get_atoms():
if atom1.parent.id != atom.parent.id or atom1.parent.parent.id != atom.parent.parent.id:
d = atom1 - atom
if d < distance:
close.append(atom1)
close = list(set(close))
# Filter out flanking residues BB atoms
# close = [a for a in close if a.parent.id[1] not in [atom.parent.id[1]-1, atom.parent.id[1]+1]]
filtered = []
for a in close:
if a.get_name() not in ['N', 'C', 'O', 'CA']:
filtered.append(a)
elif a.parent.id[1] not in [atom.parent.id[1] - 1, atom.parent.id[1] + 1]:
filtered.append(a)
return filtered
@staticmethod
def print_mem():
mem = psutil.Process(p.pid).memory_info()
return mem.rss / 1000000
def chop_soft_radius_watershed_slow(self, model, in_map, whole_model, shifts=None, out_map=None,
radius=2, soft_radius=1, mask_path=None):
"""
TODO: requires more testing
Chop map using a soft mask with a given radius (hard_radius + soft_radius) around an amino acid residue residue.
A cosine function is used to create the soft mask. Similar to chop_soft_radius but avoids
cutting neighboring residues side chains map. To do so, it finds the closest atom
(which does not belong to the guide model) for each atom in the guide model and
tries to separate the map between them.
It can be used to chop map around bigger models but may take long for big objects.
:param whole_model: biopython model object. The complete model of which the guide model is a part of
:param model: biopython atomic residue object
:param in_map: in_dir to the input map
:param out_map: out_map: in_dir for the chopped map
:param radius: hard radius
:param soft_radius: soft radius (a cosine function is applied for it)
:return out_map_obj: map object
"""
# Get atom coordinates
if shifts is None:
shifts = np.array([0, 0, 0])
if isinstance(in_map, MapParser):
pass
elif os.path.exists(in_map):
in_map = MapParser(in_map)
else:
raise Exception('in_map should be MapParser object or a map file path not {}'.format(type(in_map)))
voxel_size = in_map.voxel_size
aver_voxel_size = sum(voxel_size) / 3
# Create a numpy array for mask
shape = in_map.data.shape
mask = np.zeros(shape, dtype='float32')
r = int(round((radius + soft_radius) / aver_voxel_size))
for atom in model.get_atoms():
xyz = in_map.coord_to_index(atom.coord - shifts)
xyz_int = in_map.coord_to_index_int(atom.coord - shifts)
near_atoms = []
if atom.get_name() not in ['C', 'CA']:
near_atoms = self.find_near_atoms(atom, whole_model, distance=(radius + soft_radius) * 2)
for x in range(xyz_int[0] - r, xyz_int[0] + r):
for y in range(xyz_int[1] - r, xyz_int[1] + r):
for z in range(xyz_int[2] - r, xyz_int[2] + r):
near_ds = [100]
for n_atom in near_atoms:
n_xyz = in_map.coord_to_index(n_atom.coord - shifts)
dn = aver_voxel_size * math.sqrt((x - n_xyz[0]) ** 2 + (y - n_xyz[1]) ** 2
+ (z - n_xyz[2]) ** 2)
near_ds.append(dn)
dn = min(near_ds)
# Calculate the distance between the current atom and the current voxel
d = aver_voxel_size * math.sqrt((x - xyz[0]) ** 2 + (y - xyz[1]) ** 2 + (z - xyz[2]) ** 2)
if d > dn * 1.3:
continue
elif dn < radius + soft_radius:
delta2 = min((d + dn) * 0.65, radius + soft_radius)
delta1 = delta2 - soft_radius
else:
delta2 = radius + soft_radius
delta1 = radius
# Assign mask values based to the distance to the atoms
if d < delta1:
try:
mask[x, y, z] = 1
except IndexError:
pass
elif delta1 < d < delta2:
try:
mask[x, y, z] += (math.cos((math.pi / soft_radius) * (d - delta1)) + 1) / 2
except IndexError:
pass
mask[mask > 1] = 1
final = (mask * in_map.data)
out_map_obj = MapParser('')
out_map_obj.copy_header(in_map)
out_map_obj.data = final
if mask_path is not None:
mask_ob = MapParser('')
mask_ob.copy_header(in_map)
mask_ob.data = mask
mask_ob.write_map(mask_path)
if out_map is not None:
out_map_obj.write_map(out_map)
return out_map_obj
def chop_soft_radius_watershed(self, model, in_map, whole_model, shifts=None, out_map=None,
radius=2, soft_radius=1, asymmetric_delta=0.5, mask_path=None):
"""
Chop map using a soft mask with a given radius (hard_radius + soft_radius) around an amino acid residue residue.
A cosine function is used to create the soft mask. Similar to chop_soft_radius but avoids
cutting neighboring residues side chains map. To do so, it creates two masks: a soft edge mask (var: mask)
around the guide model and another soft edge mask (var: outer_mask) around the atoms which are near the guide
model atoms (d < hard_radius + soft_radius). The final mask is given by: mask = (mask - outer_mask * mask).
It can be used to chop map around bigger models but may take long for big objects.
:param whole_model: biopython model object. The complete model of which the guide model is a part of
:param shifts: between model and map
:param mask_path: mask output path
:param model: biopython atomic residue object
:param in_map: in_dir to the input map
:param out_map: out_map: in_dir for the chopped map
:param radius: hard radius
:param soft_radius: soft radius
:param asymmetric_delta:
"""
# r1 - hard radius for near atoms
r1 = radius - asymmetric_delta
# r2 - soft radius for near atoms
r2 = r1 + soft_radius
# Get atom coordinates
if shifts is None:
shifts = np.array([0, 0, 0])
if isinstance(in_map, MapParser):
pass
elif os.path.exists(in_map):
in_map = MapParser(in_map)
else:
raise Exception('in_map should be MapParser object or a map file path not {}'.format(type(in_map)))
voxel_size = in_map.voxel_size
aver_voxel_size = sum(voxel_size) / 3
delta1 = radius
delta2 = radius + soft_radius
# Create a numpy array for mask
shape = in_map.data.shape
mask = np.zeros(shape, dtype='float32')
outer_mask = np.zeros(shape, dtype='float32')
r = int(round((radius + soft_radius) / aver_voxel_size))
near_atoms = []
import time
near_time = 0
for atom in model.get_atoms():
xyz = in_map.coord_to_index(atom.coord - shifts)
xyz_int = in_map.coord_to_index_int(atom.coord - shifts)
t = time.time()
if atom.get_name() not in ['C', 'CA', 'N', 'O']:
# near_atoms += self.find_near_atoms(atom, whole_model, distance=(radius + soft_radius) * 2)
near_atoms += self.find_near_atoms(atom, whole_model, distance=4)
near_time += time.time() - t
for x in range(xyz_int[0] - r, xyz_int[0] + r):
for y in range(xyz_int[1] - r, xyz_int[1] + r):
for z in range(xyz_int[2] - r, xyz_int[2] + r):
# Calculate the distance between the current atom and the current voxel
d = aver_voxel_size * math.sqrt((x - xyz[0]) ** 2 + (y - xyz[1]) ** 2 + (z - xyz[2]) ** 2)
if d <= delta1:
try:
mask[x, y, z] = 1
except IndexError:
pass
elif delta1 < d < delta2:
try:
mask[x, y, z] += (math.cos((math.pi / soft_radius) * (d - delta1)) + 1) / 2
# if intensity value became > 1 it is set to 1
if mask[x, y, z] > 1:
mask[x, y, z] = 1
except IndexError:
pass
mask[mask > 1] = 1
near_atoms = list(set(near_atoms))
# print('NEAR', len(near_atoms), near_atoms)
# print("NEAR time ", near_time)
for atom in near_atoms:
xyz = in_map.coord_to_index(atom.coord - shifts)
xyz_int = in_map.coord_to_index_int(atom.coord - shifts)
for x in range(xyz_int[0] - r, xyz_int[0] + r):
for y in range(xyz_int[1] - r, xyz_int[1] + r):
for z in range(xyz_int[2] - r, xyz_int[2] + r):
# Calculate the distance between the current atom and the current voxel
d = aver_voxel_size * math.sqrt((x - xyz[0]) ** 2 + (y - xyz[1]) ** 2 + (z - xyz[2]) ** 2)
if d <= r1:
try:
outer_mask[x, y, z] = 1
except IndexError:
pass
elif r1 < d < r2:
try:
outer_mask[x, y, z] += (math.cos((math.pi / soft_radius) * (d - r1)) + 1) / 2
# if intensity value became > 1 it is set to 1
if outer_mask[x, y, z] > 1:
outer_mask[x, y, z] = 1
except IndexError:
pass
outer_mask[outer_mask > 1] = 1
outer_mask[mask == 0] = 0
mask = (mask - outer_mask * mask)
final = (mask * in_map.data)
out_map_obj = MapParser('')
out_map_obj.copy_header(in_map)
out_map_obj.data = final
if mask_path is not None:
mask_ob = MapParser('')
mask_ob.copy_header(in_map)
mask_ob.data = mask
mask_ob.write_map(mask_path)
# mask_ob = MapParser('')
# mask_ob.copy_header(in_map)
# mask_ob.data = mask
#
# outer_mask_ob = MapParser('')
# outer_mask_ob.copy_header(in_map)
# outer_mask_ob.data = outer_mask
if out_map is not None:
out_map_obj.write_map(out_map)
return out_map_obj # , mask_ob, outer_mask_ob
@staticmethod
def chop_hard_radius(model, in_map, out_map, radius=3, mask_path=None):
"""
Chop map using a hard mask with a given hard_radius around the atomic residue.
:param model: biopython atomic residue object
:param in_map: in_dir to the input map
:param out_map: out_map: in_dir for the chopped map
:param radius: mask radius around atoms
:param mask_path: mask output path
:return out_map_obj: masked map object
"""
# Get atom coordinates
atoms_coord = []
for atom in model.get_atoms():
atoms_coord.append(atom.coord)
if isinstance(in_map, MapParser):
pass
elif os.path.exists(in_map):
in_map = MapParser(in_map)
else:
raise Exception('in_map should be MapParser object or a map file path not {}'.format(type(in_map)))
voxel_size = in_map.voxel_size
aver_voxel_size = sum(voxel_size) / 3
shape = in_map.data.shape
mask = np.zeros(shape, dtype='float32')
for coord in atoms_coord:
x_index, y_index, z_index = in_map.coord_to_index_int(coord)
rx = int(round(radius / voxel_size[0]))
ry = int(round(radius / voxel_size[0]))
rz = int(round(radius / voxel_size[0]))
for x in range(x_index - rx, x_index + rx):
for y in range(y_index - ry, y_index + ry):
for z in range(z_index - rz, z_index + rz):
# Calculate the distance between the current atom and the current voxel
d = aver_voxel_size * math.sqrt((x - x_index) ** 2 + (y - y_index) ** 2
+ (z - z_index) ** 2)
# Assign mask values based to the distance to the atoms
if d < radius:
mask[x, y, z] = 1
# Apply the mask to the map data
final = (mask * in_map.data)
# Save the chopped map
if mask_path is not None:
mask_ob = MapParser('')
mask_ob.copy_header(in_map)
mask_ob.data = mask
mask_ob.write_map(mask_path)
out_map_obj = MapParser('')
out_map_obj.copy_header(in_map)
out_map_obj.data = final
if out_map is not None:
out_map_obj.write_map(out_map)
return out_map_obj
|
StarcoderdataPython
|
181704
|
<filename>challenge22.py
numbers = [9, 8, 72, 22, 21, 81, 2, 1, 11, 76, 32, 54]
def highest_num(numbers_in):
highest = numbers_in[0]
for count in range(len(numbers_in)):
if highest < numbers_in[count]:
highest = numbers_in[count]
return highest
highest_out = highest_num(numbers)
print("The highest number is", highest_out)
|
StarcoderdataPython
|
66838
|
#!/usr/bin/env pytest
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: gdalinfo.py testing
# Author: <NAME> <<EMAIL>>
#
###############################################################################
# Copyright (c) 2010, <NAME> <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import test_py_scripts
import pytest
###############################################################################
# Simple test
def test_gdalinfo_py_1():
script_path = test_py_scripts.get_py_script('gdalinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '../gcore/data/byte.tif')
assert ret.find('Driver: GTiff/GeoTIFF') != -1
###############################################################################
# Test -checksum option
def test_gdalinfo_py_2():
script_path = test_py_scripts.get_py_script('gdalinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '-checksum ../gcore/data/byte.tif')
assert ret.find('Checksum=4672') != -1
###############################################################################
# Test -nomd option
def test_gdalinfo_py_3():
script_path = test_py_scripts.get_py_script('gdalinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '../gcore/data/byte.tif')
assert ret.find('Metadata') != -1
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '-nomd ../gcore/data/byte.tif')
assert ret.find('Metadata') == -1
###############################################################################
# Test -noct option
def test_gdalinfo_py_4():
script_path = test_py_scripts.get_py_script('gdalinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '../gdrivers/data/gif/bug407.gif')
assert ret.find('0: 255,255,255,255') != -1
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '-noct ../gdrivers/data/gif/bug407.gif')
assert ret.find('0: 255,255,255,255') == -1
###############################################################################
# Test -stats option
def test_gdalinfo_py_5():
script_path = test_py_scripts.get_py_script('gdalinfo')
if script_path is None:
pytest.skip()
try:
os.remove('../gcore/data/byte.tif.aux.xml')
except OSError:
pass
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '../gcore/data/byte.tif')
assert ret.find('STATISTICS_MINIMUM=74') == -1, 'got wrong minimum.'
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '-stats ../gcore/data/byte.tif')
assert ret.find('STATISTICS_MINIMUM=74') != -1, 'got wrong minimum (2).'
# We will blow an exception if the file does not exist now!
os.remove('../gcore/data/byte.tif.aux.xml')
###############################################################################
# Test a dataset with overviews and RAT
def test_gdalinfo_py_6():
script_path = test_py_scripts.get_py_script('gdalinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '../gdrivers/data/hfa/int.img')
assert ret.find('Overviews') != -1
###############################################################################
# Test a dataset with GCPs
def test_gdalinfo_py_7():
script_path = test_py_scripts.get_py_script('gdalinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '../gcore/data/gcps.vrt')
assert ret.find('GCP Projection =') != -1
assert ret.find('PROJCS["NAD27 / UTM zone 11N"') != -1
assert ret.find('(100,100) -> (446720,3745320,0)') != -1
# Same but with -nogcps
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '-nogcp ../gcore/data/gcps.vrt')
assert ret.find('GCP Projection =') == -1
assert ret.find('PROJCS["NAD27 / UTM zone 11N"') == -1
assert ret.find('(100,100) -> (446720,3745320,0)') == -1
###############################################################################
# Test -hist option
def test_gdalinfo_py_8():
script_path = test_py_scripts.get_py_script('gdalinfo')
if script_path is None:
pytest.skip()
try:
os.remove('../gcore/data/byte.tif.aux.xml')
except OSError:
pass
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '../gcore/data/byte.tif')
assert ret.find('0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 0 0 0 0 0 0 0 0 37 0 0 0 0 0 0 0 57 0 0 0 0 0 0 0 62 0 0 0 0 0 0 0 66 0 0 0 0 0 0 0 0 72 0 0 0 0 0 0 0 31 0 0 0 0 0 0 0 24 0 0 0 0 0 0 0 12 0 0 0 0 0 0 0 0 7 0 0 0 0 0 0 0 12 0 0 0 0 0 0 0 5 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1') == -1, \
'did not expect histogram.'
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '-hist ../gcore/data/byte.tif')
assert ret.find('0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 0 0 0 0 0 0 0 0 37 0 0 0 0 0 0 0 57 0 0 0 0 0 0 0 62 0 0 0 0 0 0 0 66 0 0 0 0 0 0 0 0 72 0 0 0 0 0 0 0 31 0 0 0 0 0 0 0 24 0 0 0 0 0 0 0 12 0 0 0 0 0 0 0 0 7 0 0 0 0 0 0 0 12 0 0 0 0 0 0 0 5 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1') != -1, \
'did not get expected histogram.'
# We will blow an exception if the file does not exist now!
os.remove('../gcore/data/byte.tif.aux.xml')
###############################################################################
# Test -mdd option
def test_gdalinfo_py_9():
script_path = test_py_scripts.get_py_script('gdalinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '../gdrivers/data/nitf/fake_nsif.ntf')
assert ret.find('BLOCKA=010000001000000000') == -1, 'Got unexpected extra MD.'
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '-mdd TRE ../gdrivers/data/nitf/fake_nsif.ntf')
assert ret.find('BLOCKA=010000001000000000') != -1, 'did not get extra MD.'
###############################################################################
# Test -mm option
def test_gdalinfo_py_10():
script_path = test_py_scripts.get_py_script('gdalinfo')
if script_path is None:
pytest.skip()
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '../gcore/data/byte.tif')
assert ret.find('Computed Min/Max=74.000,255.000') == -1
ret = test_py_scripts.run_py_script(script_path, 'gdalinfo', '-mm ../gcore/data/byte.tif')
assert ret.find('Computed Min/Max=74.000,255.000') != -1
|
StarcoderdataPython
|
3382524
|
<filename>bin/genwrap.py
#!/usr/bin/env python
"""Generic wrapper.
"""
import sys
import argparse
import intgutils.basic_wrapper as basic_wrapper
def main():
"""Entry point.
"""
parser = argparse.ArgumentParser(description='Generic wrapper')
parser.add_argument('inputwcl', nargs=1, action='store')
args = parser.parse_args(sys.argv[1:])
bwrap = basic_wrapper.BasicWrapper(args.inputwcl[0])
bwrap.run_wrapper()
bwrap.write_outputwcl()
sys.exit(bwrap.get_status())
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1726165
|
<filename>gpucsl/pc/helpers.py
from math import sqrt
from scipy.stats import norm
from cupyx.scipy.special import ndtr
import numpy as np
from typing import Any, Callable, Generic, NamedTuple, TypeVar, Dict, Tuple, Set
from functools import wraps
from timeit import default_timer as timer
import logging
import networkx as nx
import colorama
import cupy as cp
T = TypeVar("T")
U = TypeVar("U")
# Use for typing (cannot inherit from NamedTuple at the same time)
# keep in sync with TimedReturn
class TimedReturnT(Generic[T]):
result: T
runtime: float
# Use for instantiation
# keep in sync with TimedReturn
class TimedReturn(NamedTuple):
result: Any
runtime: float
def timed(f: Callable[..., U]) -> Callable[..., TimedReturnT[U]]:
@wraps(f)
def wrap(*args, **kw):
start = timer()
result = f(*args, **kw)
end = timer()
duration = end - start
return TimedReturn(result, duration)
return wrap
def get_gaussian_thresholds(data: np.ndarray, alpha: float = 0.05):
num_observations = data.shape[0]
number_of_levels = 50
thresholds = [0.0] * number_of_levels
for i in range(1, min(number_of_levels, num_observations - 3) - 1):
q = norm.ppf((alpha / 2), loc=0, scale=1)
d = sqrt(num_observations - i - 3)
thresholds[i - 1] = abs(q / d)
return thresholds
def init_pc_graph(data: np.ndarray):
num_variables = data.shape[1]
return np.ones((num_variables, num_variables), np.uint16)
def correlation_matrix_of(data: np.ndarray):
return np.corrcoef(data, rowvar=False)
def transform_to_pmax_cupy(d_zmin: cp.ndarray, num_samples: int) -> cp.ndarray:
# np.sqrt is only used to compute the square of a scalar
d_intermediate_result = cp.abs(np.sqrt(num_samples - 3) * d_zmin)
d_pmax = 2 * (1 - ndtr(d_intermediate_result))
return d_pmax
# pmax interface should conform with pcalg pmax structure:
# lower triangle all -1
# main diagonal all -1
# all other entries are in [0, 1]
# for our tests, H_0 is "v_i and v_j are independent".
# If pvalue <= alpha, we reject H_0 and v_i and v_j are dependent (we keep the existing edge).
# If pvalue > alpha, H_0 holds and we delete the edge.
def postprocess_pmax_cupy(d_pmax: cp.ndarray) -> cp.ndarray:
d_pmax[
cp.tri(d_pmax.shape[0], dtype="bool")
] = -1 # graph also writes to lower triangle, fill this with -1
return d_pmax
def transform_to_pmax(zmin: np.ndarray, num_samples: int):
return 2 * norm.sf(np.abs(np.sqrt(num_samples - 3) * zmin))
# pmax interface should conform with pcalg pmax structure:
# lower triangle all -1
# main diagonal all -1
# all other entries are in [0, 1]
# for our tests, H_0 is "v_i and v_j are independent".
# If pvalue <= alpha, we reject H_0 and v_i and v_j are dependent (we keep the existing edge).
# If pvalue > alpha, H_0 holds and we delete the edge.
def postprocess_pmax(pmax: np.ndarray) -> np.ndarray:
pmax[
np.tril_indices(pmax.shape[0])
] = -1 # graph also writes to lower triangle, fill this with -1
return pmax
def get_max_neighbor_count(
d_compacted_skeleton: cp.ndarray, columns_per_device: int, device_index: int
) -> int:
start_index = columns_per_device * device_index
end_index = start_index + columns_per_device
# If the number of devices is close to the number of the variables, some devices
# can stay without columns assigned to them; return 0 in that case
max_neighbors_count = (
d_compacted_skeleton[start_index:end_index, 0].max().get()
if d_compacted_skeleton[start_index:end_index, 0].size > 0
else 0
)
return max_neighbors_count
def log(message):
logging.info(f"{colorama.Fore.GREEN}{message}{colorama.Style.RESET_ALL}")
def log_time(message, value, unit="s", value_spacing=40):
spacing_length = value_spacing - len(message)
unit_message = ""
if unit:
spacing_length -= len(unit) + 2
unit_message = f"({unit})"
spacing = " " * spacing_length
compacted_message = message + spacing + unit_message + ": " + f"{(value):1.6f}"
log(compacted_message)
class PCResult(NamedTuple):
directed_graph: nx.DiGraph
separation_sets: Dict[Tuple[int, int], Set[int]]
pmax: np.ndarray
discover_skeleton_runtime: float
edge_orientation_runtime: float
discover_skeleton_kernel_runtime: float
class SkeletonResult(NamedTuple):
skeleton: np.ndarray
seperation_sets: Dict[Tuple[int, int], Set[int]]
pmax: np.ndarray
discover_skeleton_kernel_time: float
|
StarcoderdataPython
|
3285104
|
'''
1 put flags on the field, bots have to each collect
all flags.
2 start with one flag
3 Add in some quality-of-life sensing function
Might want to be able to ask if a tile is any sort of
conveyor and then be able to ask where you would move to
next if conveyed by it.
4 write a basic AI
5 add 3 more flags. flags then display also on the bot like
they are being carried.
6 revise basic AI
7 test everything
8 make a proper level
9
10 repair station that does not act as a checkpoint
11 lazers? repairs? health bars? I love this idea.
Falling in a hole cuts your health in half and sends
you back to check point. Laser blast reduces your health
a little. Finishing a turn at a checkpoint or repair
station increases your health by twice as much as a
laser blast hurts it.
12
13 boost moves? other special moves
Note: more than one player can occupy a checkpoint by
being reset to it. I'm ok with this for now.
'''
import pygame, math, random
fps = 20 #Frames per second
map_choice = 'maps/map01.txt'
#If True then robots will move automatically
#as fast as the fps allows. If False, then spacebar
#can be pressed to advance the game.
play_continuous = False
pygame.init()
initial_width = 144
initial_height = 144
scaling = 0.5
font = pygame.font.SysFont('Arial', int(48*scaling))
tile_width = int(initial_width*scaling)
tile_height = int(initial_height*scaling)
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
yellow = (255,255,0)
green = (0,255,0)
blue = (0,0,255)
#Control how many times player blinks
blink_count = 6 #This must be even
direction_list = ['north', 'east', 'south', 'west']
step_forward = False
def getDirectionIndex(direction):
return direction_list.index(direction)
def getPlayerAt(players, row, col):
'''Returns player at given location or returns None.'''
for p in players:
if p.row == row and p.col == col:
return p
return None
def outOfBounds(board,row,col):
return row<0 or col<0 or row>=len(board) or col>=len(board[0])
def canMove(board, players, row, col, direction):
#Robots can always move out of bounds
if outOfBounds(board, row, col):
return True
#Check indicated direction
if direction == 0: #north
#Make sure there is no wall to the north
if board[row][col] != '1wu ' and board[row][col] != '2wur' and board[row][col] != '2wlu':
#If there is a player to the north...
if getPlayerAt(players, row-1, col) != None:
#...then recursively check that it can be moved north
return canMove(board, players, row-1, col, direction)
else:
return True
elif direction == 1: #east
#Make sure there is no wall to the east
if board[row][col] != '1wr ' and board[row][col] != '2wur' and board[row][col] != '2wrd':
#If there is a player to the east...
if getPlayerAt(players, row, col+1) != None:
#...then recursively check that it can be moved east
return canMove(board, players, row, col+1, direction)
else:
return True
elif direction == 2: #south
#Make sure there is no wall to the south
if board[row][col] != '1wd ' and board[row][col] != '2wrd' and board[row][col] != '2wdl':
#If there is a player to the south...
if getPlayerAt(players, row+1, col) != None:
#...then recursively check that it can be moved south
return canMove(board, players, row+1, col, direction)
else:
return True
elif direction == 3: #west
#Make sure there is no wall to the west
if board[row][col] != '1wl ' and board[row][col] != '2wdl' and board[row][col] != '2wlu':
#If there is a player to the west...
if getPlayerAt(players, row, col-1) != None:
#...then recursively check that it can be moved west
return canMove(board, players, row, col-1, direction)
else:
return True
else:
print('ERROR This should be impossible.')
exit()
return False
class Robot:
def __init__(self, screen, row, col, color):
self.screen = screen
self.row = row
self.col = col
self.color = color
self.sides = 3
self.radius = 40*scaling
self.direction = 0
self.checkpoint = (row,col)
self.next_action = ''
self.text = font.render('', True,yellow)
self.blink = False
self.flags = []
def doAction(self, board, players, action):
if action == 'left':
self.rotateLeft()
elif action == 'right':
self.rotateRight()
elif action == 'move':
self.move(board, players)
self.next_action = ''
self.text = font.render('', True,yellow)
def chooseAction(self, board, players):
'''
TODO sensing
getPlayerAt(players, row, col)
outOfBounds(board,row,col)
canMove(board, players, row, col, direction)
board[row][col] == 'chek'
'empt' #empty square. safe
'hole' #Hole. Don't fall in
'1wl ' #1 wall left
'1wu ' #1 wall up
'1wr ' #1 wall right
'1wd ' #1 wall down
'chek' #Checkpoint
'cv1d' #Convey one down
'cv1l' #Convey one left
'cv1u' #Convey one up
'cv1r' #Convey one right
'''
r = random.randint(0,3)
if r == 0:
self.next_action = 'left'
elif r == 1:
self.next_action = 'right'
elif r == 2:
self.next_action = 'move'
else:
self.next_action = 'wait'
self.text = font.render(self.next_action, True,yellow)
return self.next_action
def rotateLeft(self):
self.direction -= 1
if self.direction < 0:
self.direction = len(direction_list)-1
def rotateRight(self):
self.direction = (self.direction+1)%len(direction_list)
def forceMove(self, players, direction, flags):
temp_row = self.row
temp_col = self.col
if direction == 0: #north
temp_row -= 1
elif direction == 1: #east
temp_col += 1
elif direction == 2: #south
temp_row += 1
elif direction == 3: #west
temp_col -= 1
#Check for another player that got shoved and
#pass the shove down the line.
p = getPlayerAt(players, temp_row, temp_col)
if p != None:
p.forceMove(players, direction, flags)
#Complete the move
self.row = temp_row
self.col = temp_col
self.eventAtLocation(board, flags)
def move(self, board, players, flags):
if canMove(board, players, self.row, self.col, self.direction):
temp_row = self.row
temp_col = self.col
if self.direction == 0: #north
temp_row -= 1
elif self.direction == 1: #east
temp_col += 1
elif self.direction == 2: #south
temp_row += 1
elif self.direction == 3: #west
temp_col -= 1
#If someone else is in this space, shove them.
p = getPlayerAt(players, temp_row, temp_col)
if p != None:
p.forceMove(players, self.direction, flags)
#Complete the move
self.row = temp_row
self.col = temp_col
self.eventAtLocation(board, flags)
def eventAtLocation(self, board, flags):
'''Check for and activate any event at current location'''
#Check for off the board and reset to a checkpoint
if outOfBounds(board,self.row,self.col):
self.row, self.col = self.checkpoint
#Check for falling in a hole
elif board[self.row][self.col] == 'hole':
print('Fell in a hole. Reset to last checkpoint')
self.row, self.col = self.checkpoint
#Update our checkpoint if we landed on a checkpoint
elif board[self.row][self.col] == 'chek':
self.checkpoint = (self.row, self.col)
#Check for getting a flag
for f in flags:
if f.row==self.row and f.col==self.col and not(f.color in self.flags):
self.flags.append(f.color)
def getCenter(self):
offset_x = self.col*tile_width + tile_width/2
offset_y = self.row*tile_height + tile_height/2
return offset_x, offset_y
def getCorners(self):
#Returns list of points to draw the robot
points = []
offset_x, offset_y = self.getCenter()
heading = 0
if self.direction == 0:
heading = -math.pi/2
elif self.direction == 2:
heading = math.pi/2
elif self.direction == 3:
heading = math.pi
#Nose
angle = heading+math.pi*2
x = offset_x + math.cos(angle)*self.radius*1.5
y = offset_y + math.sin(angle)*self.radius*1.5
points.append([x, y])
#wing 1
angle = heading+math.pi*2*(1.2/self.sides)
x = offset_x + math.cos(angle)*self.radius
y = offset_y + math.sin(angle)*self.radius
points.append([x, y])
#rear
angle = heading+math.pi
x = offset_x + math.cos(angle)*self.radius*0.5
y = offset_y + math.sin(angle)*self.radius*0.5
points.append([x, y])
#wing 2
angle = heading+math.pi*2*(1.8/self.sides)
x = offset_x + math.cos(angle)*self.radius
y = offset_y + math.sin(angle)*self.radius
points.append([x, y])
return points
def draw(self):
#Draw all flags you are carrying
i = -2
for f in self.flags:
temp = Flag(self.screen, self.row, self.col, f)
temp.drawSmall(int(0.25*i*tile_width), 0.5)
i = i+1
#Blink
c = self.color
if self.blink:
c = black
#Draw outline of ship.
points = self.getCorners()
pygame.draw.polygon(self.screen, c,
points,int(scaling*8))
#Draw current action
offset_x, offset_y = self.getCenter()
self.screen.blit(self.text, (offset_x, offset_y))
class Flag:
def __init__(self, screen, row, col, color):
self.screen = screen
self.row = row
self.col = col
self.color = color
self.radius = 40*scaling
def getCenter(self):
offset_x = self.col*tile_width + tile_width/2
offset_y = self.row*tile_height + tile_height/2
return offset_x, offset_y
def getCorners(self, x_adjust=0, shrink=1):
#Returns list of points to draw the flag
points = []
offset_x, offset_y = self.getCenter()
#top
x = offset_x + x_adjust
y = offset_y - self.radius*1.3*shrink
points.append([int(x), int(y)])
#tri corner flag
x = offset_x -0.8*self.radius*shrink + x_adjust
y = offset_y - self.radius*shrink
points.append([int(x), int(y)])
x = offset_x + x_adjust
y = offset_y - 0.7*self.radius*shrink
points.append([int(x), int(y)])
x = offset_x + x_adjust
y = offset_y - self.radius*1.3*shrink
points.append([int(x), int(y)])
#base
x = offset_x + x_adjust
y = offset_y + self.radius*0.8*shrink
points.append([int(x), int(y)])
return points
def draw(self):
points = self.getCorners()
pygame.draw.polygon(self.screen, self.color,
points,int(scaling*8))
pygame.draw.circle(self.screen, self.color,
points[-1], int(scaling*12))
def drawSmall(self, adjust, scale):
points = self.getCorners(x_adjust=adjust, shrink=scale)
pygame.draw.polygon(self.screen, self.color,
points,int(scaling*8))
pygame.draw.circle(self.screen, self.color,
points[-1], int(scaling*12))
class Sprite:
def __init__(self, x, y, image):
self.x = x
self.y = y
self.image = image
def draw(self, surface):
surface.blit(self.image, (self.x, self.y))
def constructMap(filename):
# Dictionary mapping tileset abbreviations to file names
image_dict = {'empt': 'tile-clear.png',
'hole': 'tile-hole.png',
'1wl ': 'tile-wall-1a.png',
'1wu ': 'tile-wall-1b.png',
'1wr ': 'tile-wall-1c.png',
'1wd ': 'tile-wall-1d.png',
'chek': 'tile-hammer-wrench.png',
'cv1d': 'tile-conveyor-1a.png',
'cv1l': 'tile-conveyor-1b.png',
'cv1u': 'tile-conveyor-1c.png',
'cv1r': 'tile-conveyor-1d.png',
'cvlr': 'tile-conveyor-1-turnleft_a.png',
'cvld': 'tile-conveyor-1-turnleft_b.png',
'cvll': 'tile-conveyor-1-turnleft_c.png',
'cvlu': 'tile-conveyor-1-turnleft_d.png',
'cvrr': 'tile-conveyor-1-turnright_a.png',
'cvrd': 'tile-conveyor-1-turnright_b.png',
'cvrl': 'tile-conveyor-1-turnright_c.png',
'cvru': 'tile-conveyor-1-turnright_d.png',
'2wur': 'tile-wall-2a.png',
'2wrd': 'tile-wall-2b.png',
'2wdl': 'tile-wall-2c.png',
'2wlu': 'tile-wall-2d.png',
}
# Open file to read in text representation of the map
file_handle = open(filename, 'r')
line = file_handle.readline()
line = line.strip()
images = [] # 2d array of sprites
board = [] # 2d array of tile names
while line:
array = line.split(',')
row_array_img = []
row_array_name = []
for i in range(len(array)):
img = pygame.image.load('tiles/'+image_dict[array[i]])
# Scale image
img = pygame.transform.scale(img, (tile_width, tile_height))
row_array_img.append(Sprite(i*tile_width, len(images)*tile_width, img))
row_array_name.append(array[i])
images.append(row_array_img)
board.append(row_array_name)
line = file_handle.readline()
line = line.strip()
return images,board
def drawAll(surface, players, flags):
surface.fill(black)
for row in images:
for col in row:
col.draw(surface)
for p in players:
p.draw()
for f in flags:
f.draw()
pygame.display.flip()
# Delay to get desired fps
clock.tick(fps)
def boardActions(board, players, flags):
#If the index of the conveyor in this list matches up
#to the index of the direction in direction_list
#then we can use index to force move players on conveyors
conveyors = ['cv1u', 'cv1r', 'cv1d', 'cv1l']
left_turn_conveyors = ['cvlu', 'cvlr', 'cvld', 'cvll', ]
right_turn_conveyors = ['cvru', 'cvrr', 'cvrd', 'cvrl', ]
#Convey any players on conveyors
for p in players:
if board[p.row][p.col] in conveyors:
direction = conveyors.index(board[p.row][p.col])
p.forceMove(players, direction, flags)
elif board[p.row][p.col] in left_turn_conveyors:
direction = left_turn_conveyors.index(board[p.row][p.col])
p.forceMove(players, direction, flags)
p.rotateLeft()
elif board[p.row][p.col] in right_turn_conveyors:
direction = right_turn_conveyors.index(board[p.row][p.col])
p.forceMove(players, direction, flags)
p.rotateRight()
#images is a 2d array of images
#board is a 2d array of the names of tiles
images,board = constructMap(map_choice)
board_width = len(board[0])
board_height = len(board)
map_width = int(board_width*tile_width)
map_height = int(board_height*tile_height)
clock = pygame.time.Clock()
surface = pygame.display.set_mode((map_width, map_height))
player1 = Robot(surface, 0, 0, white)
player2 = Robot(surface, 4, 3, red)
player3 = Robot(surface, 4, 4, red)
player4 = Robot(surface, 4, 5, red)
player_list = [player1, player2, player3, player4]
flags = []
flags.append(Flag(surface, 2, 3, red))
flags.append(Flag(surface, 5, 5, yellow))
flags.append(Flag(surface, 6, 1, blue))
flags.append(Flag(surface, 6, 3, green))
#Use this list for ordering actions
action_list = []
# Draw all images on the surface
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
elif event.key == 32: #space bar
step_forward = True
elif event.key == pygame.K_RIGHT:
player_list[0].rotateRight()
elif event.key == pygame.K_LEFT:
player_list[0].rotateLeft()
elif event.key == pygame.K_UP:
player_list[0].move(board, player_list, flags)
if step_forward or play_continuous:
step_forward = False
#Reset the action list for the next round
if len(action_list) == 0:
#Perform board actions such as conveying before
#next player move
boardActions(board, player_list, flags)
#All players decide what they want to do
for p in player_list:
action_list.append([p, 'wait'])#p.chooseAction(board, player_list)]) #TODO TESTING
#Randomize ordering of actions
random.shuffle(action_list)
else:
#Take the next action
player, action = action_list.pop()
#blink to indicate actor
for i in range(blink_count):
player.blink = not player.blink
drawAll(surface, player_list, flags)
#Perform action
player.doAction(board, player_list, action)
drawAll(surface, player_list, flags)
pygame.quit()
|
StarcoderdataPython
|
4826788
|
# -*- coding: utf-8 -*-
"""
pyrseas.dbobject.eventtrig
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module defines two classes: EventTrigger derived from
DbObject, and EventTriggerDict derived from DbObjectDict.
"""
from . import DbObjectDict, DbObject
from . import quote_id, commentable
from .function import split_schema_func, join_schema_func
EXEC_PROC = 'EXECUTE PROCEDURE '
ENABLE_MODES = {'O': True, 'D': False, 'R': 'replica', 'A': 'always'}
class EventTrigger(DbObject):
"""An event trigger"""
keylist = ['name']
catalog = 'pg_event_trigger'
def __init__(self, name, description, owner, event, procedure,
enabled=False, tags=None,
oid=None):
"""Initialize the event trigger
:param name: trigger name (from evtname)
:param description: comment text (from obj_description())
:param owner: owner name (from rolname via evtowner)
:param event: event that causes firing (from evtevent)
:param procedure: function to be called (from evtfoid)
:param enabled: replication mode firing control (from evtenabled)
:param tags: command tags (from evttags)
"""
super(EventTrigger, self).__init__(name, description)
self._init_own_privs(owner, [])
self.event = event
if procedure[-2:] == '()':
procedure = procedure[:-2]
self.procedure = split_schema_func(None, procedure)
if enabled is False or enabled is True:
self.enabled = enabled
elif len(enabled) == 1:
self.enabled = ENABLE_MODES[enabled]
else:
assert enabled is not None and enabled in ENABLE_MODES.values()
self.enabled = enabled
self.tags = tags
self.oid = oid
@staticmethod
def query(dbversion=None):
return """
SELECT evtname AS name, evtevent AS event, rolname AS owner,
evtenabled AS enabled, evtfoid::regprocedure AS procedure,
evttags AS tags, t.oid,
obj_description(t.oid, 'pg_event_trigger') AS description
FROM pg_event_trigger t JOIN pg_roles ON (evtowner = pg_roles.oid)
WHERE t.oid NOT IN (
SELECT objid FROM pg_depend WHERE deptype = 'e')
ORDER BY name"""
@staticmethod
def from_map(name, inobj):
"""Initialize an event trigger instance from a YAML map
:param name: trigger name
:param inobj: YAML map of the event trigger
:return: event trigger instance
"""
obj = EventTrigger(
name, inobj.pop('description', None), inobj.pop('owner', None),
inobj.pop('event', None), inobj.pop('procedure', None),
inobj.pop('enabled', False), inobj.pop('tags', None))
obj.set_oldname(inobj)
return obj
@property
def objtype(self):
return "EVENT TRIGGER"
def to_map(self, db, no_owner=False, no_privs=False):
"""Convert an event trigger definition to a YAML-suitable format
:param db: db used to tie the objects together
:return: dictionary
"""
dct = super(EventTrigger, self).to_map(db, no_owner)
dct['procedure'] = join_schema_func(self.procedure) + "()"
if self.tags is None:
dct.pop('tags')
return dct
@commentable
def create(self, dbversion=None):
"""Return SQL statements to CREATE the event trigger
:return: SQL statements
"""
filter = ''
if self.tags is not None:
filter = "\n WHEN tag IN (%s)" % ", ".join(
["'%s'" % tag for tag in self.tags])
return ["CREATE %s %s\n ON %s%s\n EXECUTE PROCEDURE %s()" % (
self.objtype, quote_id(self.name), self.event, filter,
join_schema_func(self.procedure))]
def get_implied_deps(self, db):
deps = super(EventTrigger, self).get_implied_deps(db)
sch, fnc = self.procedure
deps.add(db.functions[(sch, fnc, '')])
return deps
class EventTriggerDict(DbObjectDict):
"The collection of event triggers in a database"
cls = EventTrigger
def _from_catalog(self):
"""Initialize the dictionary of triggers by querying the catalogs"""
if self.dbconn.version < 90300:
return
super(EventTriggerDict, self)._from_catalog()
def from_map(self, intriggers, newdb):
"""Initalize the dictionary of triggers by converting the input map
:param intriggers: YAML map defining the event triggers
:param newdb: dictionary of input database
"""
for key in intriggers:
if not key.startswith('event trigger '):
raise KeyError("Unrecognized object type: %s" % key)
trg = key[14:]
inobj = intriggers[key]
if not inobj:
raise ValueError("Event trigger '%s' has no specification" %
trg)
self[trg] = EventTrigger.from_map(trg, inobj)
|
StarcoderdataPython
|
188635
|
import math
import logging
import json
from model.resources import *
_driver = None
def set_driver(new_driver):
global _driver
_driver = new_driver
def get_driver():
global _driver
return _driver
def get_module_logger(mod_name):
logger = logging.getLogger(mod_name)
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s - %(funcName)20s %(message)s')
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
consoleHandler.setLevel(logging.INFO)
logger.addHandler(consoleHandler)
debugFileHandler = logging.FileHandler('logs/olivia_debug.log')
debugFileHandler.setFormatter(formatter)
debugFileHandler.setLevel(logging.DEBUG)
logger.addHandler(debugFileHandler)
fileHandler = logging.FileHandler('logs/olivia.log')
fileHandler.setFormatter(formatter)
fileHandler.setLevel(logging.INFO)
logger.addHandler(fileHandler)
logger.setLevel(logging.DEBUG)
return logger
log = get_module_logger(__name__)
def seconds_to_formatted_time(seconds):
# log.debug('seconds = {}'.format(seconds))
timeLeft = int(math.ceil(seconds))
s = timeLeft % 60
timeLeft = int((timeLeft - s)/60)
m = timeLeft % 60
timeLeft = int((timeLeft - m)/60)
h = timeLeft % 24
timeLeft = int((timeLeft - h) /24)
d = timeLeft
formattedTime = ''
if d != 0:
formattedTime += '{} d'.format(d)
if h != 0:
formattedTime += ' {} h'.format(h)
if m != 0:
formattedTime += ' {} m'.format(m)
if s != 0:
formattedTime += ' {} s'.format(s)
# log.debug('format = {}'.format(formattedTime))
return formattedTime
def formatted_time_to_seconds(formattedTime):
# log.debug('format = {}'.format(formattedTime))
# Parsing
days = 0
hours = 0
minutes = 0
seconds = 0
if 'd' in formattedTime:
days = int(formattedTime.split('d')[0])
formattedTime = formattedTime.split('d')[1]
if 'h' in formattedTime:
hours = int(formattedTime.split('h')[0])
formattedTime = formattedTime.split('h')[1]
if 'm' in formattedTime:
minutes = int(formattedTime.split('m')[0])
formattedTime = formattedTime.split('m')[1]
if 's' in formattedTime:
seconds = int(formattedTime.split('s')[0])
timeInSeconds = ((days * 24 + hours) * 60 + minutes) * 60 + seconds
# log.debug('seconds = {}'.format(timeInSeconds))
return timeInSeconds
def cost_extraction(costListElement):
cost = {METAL: 0, CRISTAL: 0, DEUTERIUM: 0}
try:
cost[METAL] = int(costListElement.find_element_by_class_name('metal').find_element_by_class_name('cost').get_attribute(
'innerHTML').replace('.', '').replace('M', '000000'))
except:
pass
try:
cost[CRISTAL] = int(costListElement.find_element_by_class_name('crystal').find_element_by_class_name('cost').get_attribute(
'innerHTML').replace('.', '').replace('M', '000000'))
except:
pass
try:
cost[DEUTERIUM] = int(costListElement.find_element_by_class_name('deuterium').find_element_by_class_name(
'cost').get_attribute('innerHTML').replace('.', '').replace('M', '000000'))
except:
pass
return cost
def level_extraction(levelElementText):
while '<' in levelElementText:
i = levelElementText.find('<')
j = levelElementText.find('>')
if i == 0:
levelElementText = levelElementText[j + 1:]
levelElementText = levelElementText[levelElementText.find('>') + 1:]
else:
levelElementText = levelElementText[:i - 1]
levelElementText.strip()
# Remove dots
levelElementText = levelElementText.replace('.', '')
return int(levelElementText.strip())
def remove_ad():
#Removing ad
try:
cloaseAdZone = _driver.find_element_by_class_name('openX_int_closeButton')
closeAdButton = cloaseAdZone.find_element_by_tag_name('a')
closeAdButton.click()
except:
pass
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.