id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
93071
|
<gh_stars>0
"""Used to make pytest functions available globally"""
# Copyright (c) 2020 zfit
#
#
# def pytest_generate_tests(metafunc):
# if metafunc.config.option.all_jit_levels:
#
# # We're going to duplicate these tests by parametrizing them,
# # which requires that each test has a fixture to accept the parameter.
# # We can add a new fixture like so:
# metafunc.fixturenames.append('tmp_ct')
#
# # Now we parametrize. This is what happens when we do e.g.,
# # @pytest.mark.parametrize('tmp_ct', range(count))
# # def test_foo(): pass
# metafunc.parametrize('tmp_ct', range(2))
|
StarcoderdataPython
|
14353
|
#!/usr/bin/env python
#########################################
# Title: Rocksat Data Server Class #
# Project: Rocksat #
# Version: 1.0 #
# Date: August, 2017 #
# Author: <NAME>, KJ4QLP #
# Comment: Initial Version #
#########################################
import socket
import threading
import sys
import os
import errno
import time
import binascii
import numpy
import datetime as dt
from logger import *
class Data_Server(threading.Thread):
def __init__ (self, options):
threading.Thread.__init__(self,name = 'DataServer')
self._stop = threading.Event()
self.ip = options.ip
self.port = options.port
self.id = options.id
self.ts = options.ts
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #TCP Socket
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.connected = False
self.log_fh = setup_logger(self.id, 'main', self.ts)
self.logger = logging.getLogger('main')
self.last_frame_ts = dt.datetime.utcnow() #Time Stamp of last received frame
self.frame_count = 0
self.adsb_count = 0
self.ais_count = 0
self.hw_count = 0
def run(self):
print "Data Server Running..."
try:
self.sock.connect((self.ip, self.port))
self.connected = True
print self.utc_ts() + "Connected to Modem..."
except Exception as e:
self.Handle_Connection_Exception(e)
while (not self._stop.isSet()):
if self.connected == True:
data = self.sock.recv(4096)
if len(data) == 256:
self.Decode_Frame(data, dt.datetime.utcnow())
else:
self.connected = False
elif self.connected == False:
print self.utc_ts() + "Disconnected from modem..."
time.sleep(1)
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #TCP Socket
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.connect((self.ip, self.port))
self.connected = True
print self.utc_ts() + "Connected to Modem..."
except Exception as e:
self.Handle_Connection_Exception(e)
sys.exit()
def Decode_Frame(self, rx_frame, ts):
self.frame_count += 1
self.last_frame_ts = ts
#print str(self.frame_count) + ',' + binascii.hexlify(rx_frame)
self.logger.info(str(self.frame_count) + ',' + binascii.hexlify(rx_frame))
self.Decode_Header(rx_frame)
def Decode_Header(self, rx_frame):
callsign = str(rx_frame[0:6]) #Callsign
dn_pkt_id = numpy.uint16(struct.unpack('>H',rx_frame[6:8]))[0] #downlink frame id
up_pkt_id = numpy.uint16(struct.unpack('>H',rx_frame[8:10]))[0] #uplink frame id
msg_type = numpy.uint8(struct.unpack('>B',rx_frame[10]))[0] #message type, 0=ADSB, 1=AIS, 2=HW
msg_type_str = ""
if msg_type == 0: msg_type_str = 'ADSB'
elif msg_type == 1: msg_type_str = ' AIS'
elif msg_type == 2: msg_type_str = ' HW'
print self.last_frame_ts, self.frame_count, callsign, dn_pkt_id, up_pkt_id, msg_type_str
def Handle_Connection_Exception(self, e):
#print e, type(e)
errorcode = e[0]
if errorcode==errno.ECONNREFUSED:
pass
#print errorcode, "Connection refused"
elif errorcode==errno.EISCONN:
print errorcode, "Transport endpoint is already connected"
self.sock.close()
else:
print e
self.sock.close()
self.connected = False
def get_frame_counts(self):
self.valid_count = len(self.valid.time_tx)
self.fault_count = len(self.fault.time_tx)
self.recon_count = len(self.recon.time_tx)
self.total_count = self.valid_count + self.fault_count + self.recon_count
#print self.utc_ts(), self.total_count, self.valid_count, self.fault_count, self.recon_count
return self.total_count, self.valid_count, self.fault_count, self.recon_count
def set_start_time(self, start):
print self.utc_ts() + "Mission Clock Started"
ts = start.strftime('%Y%m%d_%H%M%S')
self.log_file = "./log/rocksat_"+ self.id + "_" + ts + ".log"
log_f = open(self.log_file, 'a')
msg = "Rocksat Receiver ID: " + self.id + "\n"
msg += "Log Initialization Time Stamp: " + str(start) + " UTC\n\n"
log_f.write(msg)
log_f.close()
self.log_flag = True
print self.utc_ts() + "Logging Started: " + self.log_file
self.valid_start = True
self.start_time = start
for i in range(len(self.valid.time_rx)):
self.valid.rx_offset[i] = (self.valid.time_rx[i]-self.start_time).total_seconds()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def utc_ts(self):
return str(dt.datetime.utcnow()) + " UTC | "
|
StarcoderdataPython
|
123391
|
<gh_stars>1-10
import pytest
from PySide import QtCore
from jukeboxcore.gui import filesysitemdata as fsitd
from jukeboxcore.filesys import JB_File
dr = QtCore.Qt.DisplayRole
def test_tfi_element_data(taskfileinfo):
assert fsitd.taskfileinfo_element_data(taskfileinfo, dr) == "Shot01"
def test_tfi_task_data(taskfileinfo):
assert fsitd.taskfileinfo_task_data(taskfileinfo, dr) == "Design"
def test_tfi_descriptor_data(taskfileinfo):
assert fsitd.taskfileinfo_descriptor_data(taskfileinfo, dr) == "desc1"
def test_tfi_path_data(taskfileinfo):
assert fsitd.taskfileinfo_path_data(taskfileinfo, dr) == JB_File(taskfileinfo).get_fullpath()
def test_tfi_version_data(taskfileinfo):
assert fsitd.taskfileinfo_version_data(taskfileinfo, dr) == "v005"
def test_tfi_rtype_data(taskfileinfo):
assert fsitd.taskfileinfo_rtype_data(taskfileinfo, dr) == 'handoff'
@pytest.fixture(scope="module")
def tfidata(taskfileinfo):
return fsitd.TaskFileInfoItemData(taskfileinfo)
def test_tfidata_column_count(tfidata):
assert tfidata.column_count(), 6
def test_tfidata_internal_data(tfidata, taskfileinfo):
assert tfidata.internal_data() is taskfileinfo
@pytest.mark.parametrize("inp,expected", [(0, "Shot01"),
(1, "Design"),
(2, "desc1"),
(3, "v005"),
(4, "handoff")])
def test_tfidata_data(inp, expected, tfidata):
assert tfidata.data(inp, dr) == expected
def test_tfidata_data_path(tfidata, taskfileinfo):
assert tfidata.data(5, dr) == JB_File(taskfileinfo).get_fullpath()
|
StarcoderdataPython
|
174763
|
# Generated by Django 4.0 on 2021-12-21 20:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('observe', '0009_alter_observinglocation_options'),
]
operations = [
migrations.AlterField(
model_name='observinglocation',
name='status',
field=models.CharField(choices=[('TBD', 'TBD'), ('Possible', 'Possible'), ('Issues', 'Issues'), ('Provisional', 'Provisional'), ('Active', 'Active'), ('Rejected', 'Rejected')], default='TBD', max_length=50, verbose_name='Status'),
),
]
|
StarcoderdataPython
|
1746346
|
class NeuroLangException(Exception):
"""Base class for NeuroLang Exceptions"""
pass
class UnexpectedExpressionError(NeuroLangException):
pass
class NeuroLangNotImplementedError(NeuroLangException):
pass
class ForbiddenExpressionError(NeuroLangException):
pass
class ForbiddenDisjunctionError(ForbiddenExpressionError):
pass
class ForbiddenExistentialError(ForbiddenExpressionError):
pass
class RelationalAlgebraError(NeuroLangException):
pass
class RelationalAlgebraNotImplementedError(
RelationalAlgebraError, NotImplementedError
):
pass
class ForbiddenBuiltinError(ForbiddenExpressionError):
pass
class NeuroLangFrontendException(NeuroLangException):
pass
class SymbolNotFoundError(NeuroLangException):
pass
class RuleNotFoundError(NeuroLangException):
pass
class UnsupportedProgramError(NeuroLangException):
pass
class UnsupportedQueryError(NeuroLangException):
pass
class ProtectedKeywordError(NeuroLangException):
pass
class ForbiddenRecursivityError(UnsupportedProgramError):
pass
class ForbiddenUnstratifiedAggregation(UnsupportedProgramError):
pass
|
StarcoderdataPython
|
3253756
|
<filename>words/urls.py
from django.urls import path
from . import views
from . import exercises
urlpatterns = [
path('', views.index, name='index'),
path('auth/signup/', views.SignUp.as_view(), name='signup'),
path('create_words/', views.read_words, name='read'),
path('new_words/', views.show_new_words, name='new'),
path('get_words/', views.get_new_words, name='get_words'),
path('get_familiar/', views.get_familiar_list, name='get_familiar'),
path('get_new/', views.get_new_list, name='get_new'),
path('get_known/', views.get_known_list, name='get_known'),
path('familiar_words/', views.show_familiar_words, name='familiar'),
path('translate/', exercises.translate, name='translate'),
path(
'translate_to_english/', exercises.translate_to_english,
name='translate_to_english'
),
path(
'word_from_letters', exercises.words_from_letters_english,
name='word_from_letters_english'
),
path(
'word_from_letters_rus', exercises.words_from_letters_russian,
name='word_from_letters_russian'
)
]
|
StarcoderdataPython
|
1777130
|
<gh_stars>1-10
import os
import PIL
import PIL.Image
import PIL.ExifTags
import cv2
import numpy as np
folders = ["Left of Sidewalk", "Right of Sidewalk", "Middle of Sidewalk"]
def rotate(img_name):
img = PIL.Image.open(img_name)
if img is None:
return
exif = {
PIL.ExifTags.TAGS[k]: v
for k, v in img._getexif().items()
if k in PIL.ExifTags.TAGS
}
img = img.convert('RGB')
img = np.array(img)
if exif['Orientation'] == 3:
img = cv2.rotate(img, cv2.ROTATE_180)
if exif['Orientation'] == 6:
img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
if exif['Orientation'] == 8:
img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imwrite(img_name, img)
for folder in folders:
for file in os.listdir(folder):
if file.endswith(".JPG") or file.endswith(".jpg"):
print(os.path.join(folder, file))
rotate(os.path.join(folder, file))
|
StarcoderdataPython
|
1762583
|
<filename>CNIC-X/util.py
# -*- coding: utf-8 -*-
import skimage.io
import skimage.transform
import ipdb
import numpy as np
def load_image(path):
try:
img=skimage.io.imread(path).astype(float)
except:
return None
if img is None:return None
if len(img.shape)<2:return None
if len(img.shape)==4:return None
if len(img.shape)==2:img=np.tile(img[:,:,None],3)
if img.shape[2]==4:img=img[:,:,:3]
if img.shape[2]>4:return None
img/=255.
short_edge=min(img.shape[:2])
yy=int((img.shape[0]-short_edge)/2)
xx=int((img.shape[1]-short_edge)/2)
resized_img=skimage.transform.resize(img,[224,224])
return resized_img
|
StarcoderdataPython
|
8070
|
import logging
from episodes import find_updates, db, count_all
from logging import error as logi
from flask import Flask, jsonify, request
def create_app(config, debug=False, testing=False, config_overrides=None):
app = Flask(__name__)
app.config.from_object(config)
app.config['JSON_AS_ASCII'] = False
app.debug = debug
app.testing = testing
if config_overrides:
app.config.update(config_overrides)
# Configure logging
if not app.testing:
logging.basicConfig(level=logging.INFO)
@app.before_request
def before_request():
db.connect()
@app.after_request
def after_request(response):
db.close()
return response
@app.route('/get_new_episodes')
def get_new_episodes():
appengine_request = request.headers.get('X-Appengine-Cron')
if appengine_request == 'true':
from scraper import update_episodes
update_episodes()
return '<h1>Success</h1>'
else:
return '<h1>This is a crobjob and all the requests should come from appengine.</h1>'
@app.route('/get_updates')
def get_update():
timestamp = request.args.get('timestamp', '')
if timestamp == '':
logi('Default timestamp')
timestamp = 0
else:
timestamp = long(timestamp)
result = find_updates(timestamp)
return jsonify(result)
@app.route('/')
def welcome():
message = '{}{}{}{}'.format('<h1>Welcome to FardaStationAPI WebService</h1>',
'<p>To get information about the latest episodes of Fardaa Station (by '
'RadioFarda.com) please send a GET request to '
'http://fardastationapi.appspot.com/get_updates URL.</p>',
'<p>A UNIX epoch timestamp can also be passed in as an argument to filter out the '
'episodes before that timestamp. Example: '
'https://fardastationapi.appspot.com/get_updates?timestamp=1512629949</p>',
'<h1>Current number of episodes: {}</h1>'.format(count_all()))
return message
# Add an error handler. This is useful for debugging the live application,
# however, you should disable the output of the exception for production
# applications.
@app.errorhandler(500)
def server_error(e):
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
return app
|
StarcoderdataPython
|
3260196
|
import numpy as np
import datetime, sys
import scipy.io
import pdb
import os
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from shapely.geometry import Polygon,Point
import scipy.interpolate
import xarray as xr
import matplotlib.pyplot as plt
import utilities.hfr_util
import pyproj
from matplotlib import animation, rc
plt.rcParams['animation.html'] = 'jshtml'
animation.rcParams['animation.embed_limit'] = 60
class TrackingModel(object):
def __init__(self):
self.minlon, self.maxlon, self.minlat, self.maxlat, self.resolution_km, self.url, self.days_to_capture, self.start_time = self.set_model_parameters(default=True)
self.current_dataset = self._get_HFR_subset()
self.lat = self.current_dataset['lat'].values
self.lon = self.current_dataset['lon'].values
self.x = np.arange(0, len(self.lon), 1) * 1850
self.y = np.arange(0, len(self.lat), 1) * 1995
self.x_grid, self.y_grid = np.meshgrid(self.x, self.y)
self.lon_grid, self.lat_grid = np.meshgrid(self.lon, self.lat)
self.origin = ccrs.UTM(zone=10).transform_point(self.lon[0], self.lat[0], ccrs.PlateCarree())
self.time_index = 0
self.hours_elapsed = 0
self.current_time = self.start_time
self.particles = np.array([])
self.time_step = .25
def set_model_parameters(self, default=False):
''' Hotwire the default parameters for developement '''
crs = ccrs.PlateCarree() # Make cartopy projection object
resolution_km = 2
days_to_capture=3
if int(resolution_km)==2:
url='http://hfrnet-tds.ucsd.edu/thredds/dodsC/HFR/USWC/2km/hourly/RTV/HFRADAR_US_West_Coast_2km_Resolution_Hourly_RTV_best.ncd'
if int(resolution_km)==6:
url='http://hfrnet-tds.ucsd.edu/thredds/dodsC/HFR/USWC/6km/hourly/RTV/HFRADAR_US_West_Coast_6km_Resolution_Hourly_RTV_best.ncd'
if default:
year,month,day,hour,minu,secs = 2017,5,27,5,0,0
start_time = datetime.datetime(year,month,day,hour,minu,secs)
maxlat, minlat = 38.5, 37
maxlat=float(maxlat)
minlat=float(minlat)
minlon, maxlon = -123.5, -122
minlon=float(minlon)
maxlon=float(maxlon)
else:
try:
year,month,day,hour,minu,secs=input("Enter the start date [yr mn dy hr mn sc] ").split()
except ValueError:
print('Defaulting to 2017-05-27T05:00:00')
year,month,day,hour,minu,secs = 2017,5,27,5,0,0
start_time = datetime.datetime(year,month,day,hour,minu,secs)
#[year,month,day,hour,minu,secs]=input('Enter the start date [yr,mn,dy,hr,mn,sc]): ')
#[2017,5,27,5,0,0]
try:
maxlat,minlat=input('Enter the north and south boundary []: ').split()
except ValueError:
maxlat, minlat = 38.5, 37
maxlat=float(maxlat)
minlat=float(minlat)
# [38.5,37]
try:
minlon, maxlon=input('Enter the west and east boundary []: ').split()
except ValueError:
minlon, maxlon = -123.5, -122
minlon=float(minlon)
maxlon=float(maxlon)
return minlon, maxlon, minlat, maxlat, resolution_km, url, days_to_capture, start_time
def _get_HFR_subset(self):
"""
Open netcdf from hfrnet as xarray datase, subset the desired data in space and time
Currently, using global variables for parameters, should reduce as code becomes object-oriented
:return hfr_ds = xarray dataset over the specified spatial grid and time
"""
try:
ds = xr.open_dataset(self.url)
except Exception as e:
print("Trouble downloading data, check connections.")
print(e)
sys.exit()
subset_ds = ds.sel(time = slice(self.start_time, self.start_time + datetime.timedelta(days=self.days_to_capture)),
lat = slice(self.minlat,self.maxlat),
lon = slice(self.minlon,self.maxlon))
clean_u = hfr_util.interp_time_surface_currents(subset_ds['u'].values) # linear interpolate currents through time, given a threshold of availible data
clean_v = hfr_util.interp_time_surface_currents(subset_ds['v'].values)
subset_ds['u_clean'] = xr.DataArray(clean_u, coords={'time': subset_ds['time'].values, 'lon': subset_ds['lon'].values, 'lat': subset_ds['lat'].values}, dims=['time','lat','lon'])
subset_ds['v_clean'] = xr.DataArray(clean_v, coords={'time': subset_ds['time'].values, 'lon': subset_ds['lon'].values, 'lat': subset_ds['lat'].values}, dims=['time','lat','lon'])
return subset_ds
def add_particle(self, coord):
utm = ccrs.UTM(zone=10)
p_dist = utm.transform_point(coord[0], coord[1], ccrs.PlateCarree())
p_dist = [p_dist[0] - self.origin[0], p_dist[1] - self.origin[1]]
self.particles = np.append(self.particles, Particle(p_dist))
def part_to_coor(self, particle, last=False):
pc_proj = ccrs.PlateCarree()
p_dist = particle.coordinates
p_dist = [p_dist[:,0] + self.origin[0], p_dist[:,1] + self.origin[1]]
if last:
p_coor = pc_proj.transform_points(ccrs.UTM(zone=10), np.array((p_dist[:,-1])),np.array((p_dist[:,-1])))
else:
p_coor = pc_proj.transform_points(ccrs.UTM(zone=10), np.array([p_dist[:,0]]),np.array([p_dist[:,1]]))
p_coor = p_coor[:,:2]
return p_coor
def draw_map(self, draw_bathy=True, whole_extent=False):
''' Draw a map of the domain '''
land_feature = cfeature.NaturalEarthFeature('physical','land','10m')
self.fig = plt.figure()
# self.fig.set_size_inches(8,8)
self.geo_axes = plt.axes(projection=ccrs.PlateCarree())
if whole_extent:
extent = [self.minlon, self.maxlon, self.minlat, self.maxlat]
else:
extent = [-123, -122.3, 37.26, 38]
self.geo_axes.set_extent(extent, crs=ccrs.PlateCarree())
self.geo_axes.add_feature(land_feature, edgecolor='k', zorder=40)
# self.geo_axes.gridlines(draw_labels=True, zorder= 20)
if draw_bathy:
self._draw_bathymetry_SFBAY()
def _draw_bathymetry_SFBAY(self):
'''
Draw and return an axes with bathymetry contours
Bathy grid retrieved from: https://maps.ngdc.noaa.gov/viewers/wcs-client/
'''
file = './data/SF_Bay_coastal_relief_model.nc'
bathy = xr.open_dataset(file)
blat = bathy['lat'].values
blon = bathy['lon'].values
elev = bathy['Band1'].values
levels = np.arange(10*(-2000//10), -499, 450)
levels = np.append(levels, np.arange(-475, 25, 25))
lws = [0.5 if l%100 else 1 for l in levels]
cs = self.geo_axes.contour(blon, blat, elev, levels, linewidths=lws, linestyles='solid', colors=['black'], alpha=0.4)
plt.clabel(cs, list(np.arange(-125,1,25)),fmt='%1d', inline=True, fontsize=15, colors='k',inline_spacing=10)
def plot_particles(self, plot_type='all', save_dir):
"""
Make plots of the particle trajectories. Can make static plots, last locations, or animations
"""
if plot_type == 'last':
#Only if you want to plot the last position (debugging)
for p in self.particles[:]:
pos = self.part_to_coor(p, last=True)
self.geo_axes.scatter(pos[0,0],pos[0,1], zorder=50, marker='.', c='grey')
elif plot_type == 'animation':
# make animation of all the particle trajectories
Q = self.geo_axes.quiver(self.lon_grid, self.lat_grid, model.current_dataset['u_clean'].isel(time=0), model.current_dataset['v_clean'].isel(time=0), color='b', units='inches')
p = self.particles[0]
px, py = p.coordinates[:,0]+self.origin[0], p.coordinates[:,1]+self.origin[1]
line, = self.geo_axes.plot(px[0],py[0],
color='.75',
transform=ccrs.UTM(zone=10))
front_marker = self.geo_axes.scatter(px[0],py[0],
color='b',
s=10,
transform=ccrs.UTM(zone=10))
tail_marker = self.geo_axes.scatter(px[0],py[0],
marker='x',
color='b',
s=10,
transform=ccrs.UTM(zone=10)) # last
line, = self.geo_axes.plot(px[0],py[0],
color='.75',
transform=ccrs.UTM(zone=10))
time_text = self.geo_axes.text(0.5, 1.05, 'Hours Before: ' + str(0), fontsize=16, transform=self.geo_axes.transAxes,zorder=300)
def update_geoaxes(num, front_marker, tail_marker, line, Q, time_text):
"""
Animation function - updates the data of each plot at each
timestep and the hour textbox
"""
Q.set_UVC(model.current_dataset['u_clean'].isel(time=num//4),model.current_dataset['v_clean'].isel(time=num//4))
front_marker.set_offsets((px[num],py[num]))
if num == 0:
tail = 0
line.set_data(px[num], py[num])
elif num <= 24:
tail = 0
line.set_data(px[tail:num], py[tail: num])
else:
tail = num - 24
line.set_data(px[tail:num], py[tail: num])
tail_marker.set_offsets((px[tail], py[tail]))
time_text.set_text('Hours Before: ' + str(num//4))
return(front_marker, tail_marker, line, Q, time_text)
anim = animation.FuncAnimation(self.fig,
update_geoaxes, fargs=(front_marker, tail_marker, line, Q, time_text),
interval=150,
blit=True,
frames=len(px))
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='<NAME>'), bitrate=1800)
anim.save('testing_traj.mp4', dpi=200, writer=writer)
elif plot_type == 'all':
for p in self.particles[:]:
self.geo_axes.plot(p.coordinates[:,0]+self.origin[0], p.coordinates[:,1]+self.origin[1],
zorder=50,
c='blue',
markerfacecolor='.5',
markeredgecolor='None',
marker='.',
markersize=5,
transform=ccrs.UTM(zone=10))
else:
raise ValueError("Plot type not valid.")
def advect_particle(self):
"""
Advect a particle object based on the current
Fixed timestep to 1/4 hour --> If this is to be worked update self.timestep
"""
for p in self.particles:
current_pos = p.get_position()
if not np.any(np.isnan(current_pos)): # check if advection previously failed if so skip
new_x = self._solve_position(current_pos[0], current_pos[1], self.time_step, dim=0)
new_y = self._solve_position(current_pos[0], current_pos[1], self.time_step, dim=1)
p.update_position([new_x, new_y])
self.update_time()
def update_time(self):
"""
Update the time_index and number of elapsed hours with each advection
"""
self.hours_elapsed += self.time_step
self.time_index = int(self.hours_elapsed)
def _solve_position(self, x, y, h, dim):
"""
Solves for the next position of a particle in after time, h, in either the x
or y using a runge-kutta 4th order scheme.
TODO: Update function to get next half timestep if goes into next hour
Arguments
---------
X, Y: mesh grid.
x, y: coordinates where to begin the evolution.
f: the current vector f that will be evolved.
h: the time step
dim: 0 for x and 1 for y.
Returns
---------
interp_value: interpolated value of f(y,x)
"""
X = self.x_grid
Y = self.y_grid
if dim == 0:
f = self.current_dataset['u_clean'].isel(time=self.time_index).values * 60 * 60
if dim == 1:
f = self.current_dataset['v_clean'].isel(time=self.time_index).values * 60 * 60
try:
k1 = h * self.bilinear_interpolation(X, Y, f, x, y)
k2 = h * self.bilinear_interpolation(X, Y, f, x + 0.5 * h, y + 0.5 * k1)
k3 = h * self.bilinear_interpolation(X, Y, f, x + 0.5 * h, y + 0.5 * k2)
k4 = h * self.bilinear_interpolation(X, Y, f, x + h, y + k3)
except ValueError as e:
print('Error in Interpolation, trying to interpolate a NAN value')
print(e)
return
try:
if dim == 0:
return x + 1. / 6 * k1 + 1. / 3 * k2 + 1. / 3 * k3 + 1. / 6 * k4
elif dim == 1:
return y + 1. / 6 * k1 + 1. / 3 * k2 + 1. / 3 * k3 + 1. / 6 * k4
except Exception as e:
print(e)
sys.exit()
def bilinear_interpolation(self, X, Y, f, x, y):
"""
Interpolation methods for estimating surface current values in between grid points. Edge cases are outlined in the
top of the function and may need to be refactored. NaNs is returned for cases where values can not be interpolated.
Arguments
---------
X, Y: Coordinate mesh grid
f: Grid of velocity values that can be accessed as f(j,i) Remeber row, column
x, y: coordinates to compute interpolation to f(y,x)
Returns
---------
interp_value: interpolated value of f(y,x)
"""
# Grid index shape
M = np.shape(X[:, 0])[0]
N = np.shape(X[0, :])[0]
dx, dy = X[0, 1] - X[0, 0], Y[1, 0] - Y[0, 0]
x_start, y_start = X[0, 0], Y[0, 0]
# Find the index of each value
i1, i2 = int((x - x_start) / dx), int((x - x_start) / dx) + 1
j1, j2 = int((y - y_start) / dy), int((y - y_start) / dy) + 1
# Boundary Conditions when interpolating near the edge.
# 1. Eastern boundary
if (i1 - N) > 1:
return np.nan
if i1 >= N - 1 and j1 <= N - 1 and j1 >= 0: # If on the Eastern edge of the boundary
return f[j1, N - 1]
if i1 >= N - 1 and j1 <= 0:
return f[0, N - 1]
if i1 >= N - 1 and j1 >= N - 1:
return f[N - 1, N - 1]
# 2. Western boundary
if i1 <= 0 and j1 <= N - 1 and j1 >= 0:
return f[j1, 0]
if i1 <= 0 and j1 <= 0:
return f[0, 0]
if i1 <= 0 and j1 >= N - 1:
return f[N - 1, 0]
# 3. Northern boundary
if j1 >= M - 1 and i1 <= M - 1 and i1 >= 0:
return f[M - 1, i1]
if j1 >= N - 1 and i1 <= 0:
return f[M - 1, 0]
# 3. Bottom boundary
if j1 <= 0 and i1 <= M - 1 and i1 >= 0:
return f[0, i1]
if j1 <= 0 and i1 >= M - 1:
return f[M - 1, 0]
x1, x2 = X[j1, i1], X[j2, i2]
y1, y2 = Y[j1, i1], Y[j2, i2]
interp_value = (1 / (x2 - x1) * 1 / (y2 - y1) *
(f[j1, i1] * (x2 - x) * (y2 - y) + f[j1, i2] * (x - x1) * (y2 - y)
+ f[j2, i1] * (x2 - x) * (y - y1) + f[j2, i2] * (x - x1) * (y - y1)))
return interp_value
def seed_particles(self, center_coord, radius):
''' Create a of cluster of particles within a radius in (km)'''
x_pos, y_pos = ccrs.UTM(zone=10).transform_point(center_coord[0],center_coord[1], ccrs.PlateCarree())
col_num = [1,3,5,3,1]
dx = radius/4 * 1000
ylevel = (np.arange(max(col_num)) - 3) * dx
for i, n in enumerate(col_num):
x = np.arange(n)
x = x - (n-1)/2
x = x * dx
y = np.ones(shape=x.shape) * ylevel[i]
pos_x = x_pos + x
pos_y = y_pos + y
coors = ccrs.PlateCarree().transform_points(ccrs.UTM(zone=10), pos_x, pos_y)
x_coors = coors[:,0]
y_coors = coors[:,1]
for pos in zip(x_coors, y_coors):
self.add_particle(pos)
class Particle(object):
def __init__(self,coord):
self.coordinates = np.array([coord])
def get_position(self):
''' Return latest postion of particle class '''
return self.coordinates[-1,:]
def update_position(self, pos):
''' Append to coord array '''
pos = np.array(pos)
self.coordinates = np.vstack((self.coordinates,pos))
if __name__ == "__main__":
model = TrackingModel()
model.draw_map(draw_bathy=True)
# Debugging Particles
center_coord = [-122.6, 37.77]
model.seed_particles(center_coord, radius=3)
ax = model.geo_axes
# ax.quiver(model.lon_grid, model.lat_grid, model.current_dataset['u_clean'].isel(time=0), model.current_dataset['v'].isel(time=0))
ix = np.where(np.isfinite(model.current_dataset['u_clean'].isel(time=0)))
ixnan = np.where(np.isnan(model.current_dataset['u_clean'].isel(time=0)))
# ax.scatter(model.lon_grid[ix], model.lat_grid[ix], marker='.', s=10)
ax.scatter(model.lon_grid[ixnan], model.lat_grid[ixnan], marker='x', s=10, c='r')
#
for i in range(24*4*3):
try:
model.advect_particle()
except Exception as e:
print(e)
print(round(i/4,2),'hours have passed before breaking')
break
model.plot_particles(plot_type='animation')
|
StarcoderdataPython
|
36429
|
"""Defines the application configuration for the product application"""
from __future__ import unicode_literals
from django.apps import AppConfig
class ProductConfig(AppConfig):
"""Configuration for the product application"""
name = 'product'
label = 'product'
verbose_name = 'Product'
def ready(self):
"""Registers the product implementations with other applications."""
from job.configuration.data.data_file import DATA_FILE_STORE
from product.configuration.product_data_file import ProductDataFileStore
# Register product files for the data file store
DATA_FILE_STORE['DATA_FILE_STORE'] = ProductDataFileStore()
|
StarcoderdataPython
|
1626447
|
<reponame>HustCoderHu/tensoflow_intro_practice
import shutil as sh
from os.path import join as pj
import os.path as path
import os
import json
from pprint import pprint
from concurrent.futures import ProcessPoolExecutor
import var_config as cf
videoRoot = cf.videoRoot
cwd = cf.cwd
newVideoRoot = pj(cwd, 'all_data')
# newVideoRoot = r'/home/hzx/all_data_reduced'
def cpimg():
if not path.exists(newVideoRoot):
os.mkdir(newVideoRoot)
# videoIdxSet = [1]
videoIdxSet = list(range(1, 82))
videoIdxSet.extend(list(range(83, 111)))
framesPerVideo = 300
# 可迭代参数集合
paramsCollection = []
for videoIdx in videoIdxSet:
paramsCollection.append((videoIdx, framesPerVideo))
max_workers = os.cpu_count()
with ProcessPoolExecutor(max_workers) as executor:
# 保持和multiprocessing.pool的默认chunksize一样
# chunksize, extra = divmod(len(paramsCollection), executor._max_workers * 4)
# executor.map(worker_cpVideoFrames, paramsCollection, chunksize=chunksize)
for videoIdx in videoIdxSet:
executor.submit(worker_cpVideoFrames, videoIdx, framesPerVideo)
executor.shutdown()
print('--- worker_cpVideoFrames executor shutdown()')
return
# framesPerVideo 每个视频取帧数上限 等间隔取
def worker_cpVideoFrames(videoIdx, framesPerVideo):
print('start video: ' + str(videoIdx))
framesDir = pj(videoRoot, '{:0>3d}'.format(videoIdx))
flist = os.listdir(framesDir)
flist = [f for f in flist if path.splitext(f)[1] == '.jpg']
flist.sort()
nImg = len(flist)
interval, mod = divmod(nImg, framesPerVideo) # nImg // framesPerVideo
if mod != 0:
interval += 1
dstDir = pj(newVideoRoot, '{:0>3d}'.format(videoIdx))
if not path.exists(dstDir):
os.mkdir(dstDir)
# 图像数量小于指定 全部复制
if nImg <= framesPerVideo:
for f in flist:
src = pj(framesDir, f)
dst = pj(dstDir, f)
os.symlink(src, dst)
else :
for f in flist:
frameIdx = int(path.splitext(f)[0])
if frameIdx%interval != 0:
continue
src = pj(framesDir, f)
dst = pj(dstDir, f)
os.symlink(src, dst)
# os.copy(src, dst)
# os.copy2(src, dst)
print('finish video: ' + str(videoIdx))
return
def tst_softlink():
labeljson = r'/home/hzx/all_data/label.json'
dst = r'/home/hzx/all_data/softlink.json'
os.symlink(labeljson, dst)
with open(dst, 'r') as f:
dat = json.load(f)
pprint(dat)
if __name__ == '__main__':
# a = 3200
# b = 300
# c = a // b
# print(c) # 10
# print(37915 / 50)
# tst_softlink()
# worker_cpVideoFrames(2, 5)
cpimg()
|
StarcoderdataPython
|
1649990
|
# -*- encoding: utf-8 -*-
# Testing new amara.tree API
# Testing quick reference examples
import unittest
import cStringIO
import amara
from amara.lib import testsupport
from amara import tree, xml_print
from amara import bindery
XML = '<a x="1"><b>i</b><c>j<d/>k</c><b>l</b></a>'
#self.assertEqual(doc[u'a'], doc.xml_select(u'//a')[0]) #object representing a element (instance of a),
def check_bindery(self, doc):
a = doc.xml_elements.next() #element node
self.assertEqual(a, doc.a)
self.assertEqual(doc.a, doc.xml_select(u'//a')[0]) #object representing a element (instance of a),
#self.assertEqual(doc[u'a'], doc.xml_select(u'//a')[0]) #object representing a element (instance of a),
self.assertEqual(doc.a.xml_type, 'element') #object representing a element (instance of class a),
self.assertEqual(doc.a.b, doc.xml_select(u'//a/b')[0]) #first instance of b
self.assertEqual(doc.a.x, u'1') #u"1"
self.assertEqual(doc.a[u'x'], u'1') #u"1"
self.assertEqual(doc.a.b[0], doc.xml_select(u'//b')[0]) #first instance of b
self.assertEqual(doc.a.b[1], doc.xml_select(u'//b')[1]) #second instance of b
self.assertEqual(doc.a[u'b'][1], doc.xml_select(u'//b')[1]) #second instance of b
#iter(doc.a.b) #iterator over both instances of b
self.assertEqual(unicode(doc.a.b), u'i') #u"i"
self.assertEqual(unicode(doc.a.b[1]), u'l') #u"l"
self.assertEqual(unicode(doc.a), u"ijkl") #u"ijkl"
return
class Test_quick_reference(unittest.TestSuite):
class Test_basic_access(unittest.TestCase):
def test_basic_access(self):
doc = amara.parse(XML)
a = doc.xml_children[0]
self.assertEqual(doc.xml_type, tree.entity.xml_type)
self.assertEqual(len(doc.xml_children), 1)
self.assertEqual(a.xml_type, tree.element.xml_type)
self.assertEqual(doc.xml_children[0].xml_local, u'a')
self.assertEqual(a.xml_attributes[u'x'], u"1")
self.assertEqual(a.xml_local, u'a')
self.assertEqual(a.xml_qname, u'a')
self.assertEqual(a.xml_name, (None, u'a'))
self.assertEqual(len(a.xml_children), 3)
self.assertEqual(a.xml_parent, doc)
self.assertEqual(a.xml_attributes.items(), [((None, u'x'), u'1')])
class Test_basic_document_update(unittest.TestCase):
def test_update_tree(self):
doc = amara.parse(XML)
a = doc.xml_children[0]
#Add a new text node to a (--> last child)
new_text = amara.tree.text(u'New Content')
a.xml_append(new_text)
self.assertEqual(a.xml_children[-1], new_text)
new_text = amara.tree.text(u'New Content')
a.xml_insert(1, new_text)
self.assertEqual(a.xml_children[1], new_text)
#Remove the las b child from a
num_kids = len(a.xml_children)
e1 = a.xml_select(u"./b")[-1]
e1.xml_parent.xml_remove(e1)
self.assertEqual(len(a.xml_children), num_kids-1)
return
class Test_bindery(unittest.TestCase):
XML = '<a x="1"><b>i</b><c>j<d/>k</c><b>l</b></a>'
def test_bindery(self):
doc = bindery.parse(self.XML) # bindery doc
check_bindery(self, doc)
return
class Test_bindery_document_update(unittest.TestCase):
XML = '<a x="1"><b>i</b><c>j<d/>k</c><b>l</b></a>'
def test_update_bindery(self):
doc = bindery.parse(self.XML)
#Add a new text node to a (--> last child)
doc.a.xml_append(u'New Content')
self.assertEqual(doc.a.xml_children[-1].xml_value, u'New Content')
new_elem = doc.xml_element_factory(None, u'spam')
doc.a.xml_append(new_elem)
self.assertEqual(doc.a.xml_children[-1], new_elem)
new_text = amara.tree.text(u'New Content')
doc.a.xml_insert(1, new_text)
self.assertEqual(doc.a.xml_children[1], new_text)
#Remove the last b child from a
num_kids = len(doc.a.xml_children)
#e1 = doc.a.b[-1].e
b1 = doc.a.b[1]
b1.xml_parent.xml_remove(b1)
self.assertEqual(len(doc.a.xml_children), num_kids-1)
doc = bindery.nodes.entity_base()
#doc.xml_clear() #Remove all children from a
doc.xml_append_fragment(self.XML)
check_bindery(self, doc)
return
class Test_namespaces(unittest.TestCase):
def setUp(self):
self.XML = '<n:a xmlns:n="urn:x-bogus1" n:x="1"><b xmlns="urn:x-bogus2">c</b></n:a>'
NS1 = u'urn:x-bogus1'
NS2 = u'urn:x-bogus2'
self.doc = amara.parse(self.XML)
return
def test_parsing_namespaces(self):
a = self.doc.xml_children[0] #element node
self.assertEqual(type(a), tree.element)
self.assertEqual(a.xml_attributes[(u'urn:x-bogus1', u'x')], u'1')
self.assertEqual(a.xml_local, u'a')
self.assertEqual(a.xml_prefix, u'n')
self.assertEqual(a.xml_qname, u'n:a')
self.assertEqual(a.xml_name, (u'urn:x-bogus1', u'a'))
self.assertEqual(a.xml_namespace, u'urn:x-bogus1')
return
class Test_xpath(unittest.TestCase):
def test_single_xpath(self):
doc = amara.parse(XML)
b_els = doc.xml_select(u"//b")
self.assertEqual(len(b_els), 2)
self.assertEqual(doc.xml_select(u"count(//b)"), 2)
self.assertEqual(doc.xml_select(u"/a/@x")[0].xml_value , u'1')
self.assertEqual(doc.xml_select(u'string(/a/b)'), u'i')
return
if __name__ == '__main__':
testsupport.test_main()
|
StarcoderdataPython
|
1704426
|
<filename>icc.modelstudio/src/icc/modelstudio/__init__.py
# Example package with a console entry point
from __future__ import print_function
def main(*args, **kwargs):
from isu.gui.gtk import main
main(*args, **kwargs)
|
StarcoderdataPython
|
3385086
|
<reponame>michuschenk/accelerator
from typing import Tuple
import numpy as np
from .base import BaseElement
from .utils import bent_element
class Dipole(BaseElement):
"""Dipole element"""
def __init__(self, rho: float, theta: float):
"""Dipole element.
Args:
rho: bending radius in meters.
theta: bending angle in radians.
Attributes:
rho: bending radius in meters.
theta: bending angle in radians.
m_h: element transfer matrix horizontal plane.
m_v: element transfer matrix vertical plane.
"""
super().__init__()
self.rho = rho
self.theta = theta
self.length = rho * theta
def transfer_matrix(self) -> Tuple[np.ndarray, np.ndarray]:
# horizontal
m_h = np.zeros((2, 2))
m_h[0][0] = np.cos(self.theta)
m_h[0][1] = self.rho * np.sin(self.theta)
m_h[1][0] = -(1 / self.rho) * np.sin(self.theta)
m_h[1][1] = np.cos(self.theta)
# vertical
m_v = np.zeros((2, 2))
m_v[0][0] = 1
m_v[0][1] = self.length
# m_v[1][0] = 0
m_v[1][1] = 1
return m_h, m_v
def _dxztheta_ds(self, theta: float, d_s: float) -> np.ndarray:
return bent_element(theta, d_s, self.rho)
def __repr__(self) -> str:
return f"Dipole(rho={self.rho:.4f}, theta={self.theta:.4f})"
|
StarcoderdataPython
|
1704160
|
import discord
from discord.ext import commands
from wwbot.util import fetch_guild, chunks
from wwbot.db import Player, Poll, PollMessage
from wwbot.permissions import is_participant
from wwbot import errors
def get_all_alive(bot):
guild = fetch_guild(bot)
alive = []
for p in Player.select():
m = guild.get_member(p.discord_id)
if is_participant(m):
alive.append(p)
return alive
async def create_poll(ch, options):
# options should have properties discord_id and emoji
poll = Poll.create(channel=ch.id)
await ch.send("Poll #`{}`".format(poll.id))
msgids = []
for chunk in chunks(options, 20):
# chunk is a list of 20 Players from db
pollmsg = await ch.send("\n".join("{0.emoji} - <@{<EMAIL>_id}>".format(opt) for opt in chunk))
msgids.append({"discord_id":pollmsg.id,"poll":poll})
for p in chunk:
await pollmsg.add_reaction(p.emoji)
PollMessage.insert_many(msgids).execute()
async def get_raw_reactions(bot, poll, me):
# returns a dict of emoji to members, with self removed
# deletes reactions as they are collected
reactions = {}
channel = bot.get_channel(poll.channel)
for dbmsg in poll.messages:
msg = await channel.get_message(dbmsg.discord_id)
for reaction in msg.reactions:
users = await reaction.users().flatten()
users.remove(me)
reactions[reaction.emoji] = users
await msg.clear_reactions()
return reactions
async def process_reactions(reactions):
# currently this just checks that no one has voted twice.
# returns a dict of processed reactions and a list of error messages
errs = []
double_voted = set()
all_users = set() # set of user ids
for emoji, users in reactions.items():
for user in users:
if user.id in all_users:
double_voted.add(user)
all_users.update(set(u.id for u in users))
for dv in double_voted:
errs.append("{} has voted more than once! They have been disqualified.".format(dv.mention))
for emoji, users in reactions.items():
users.remove(dv)
return reactions, errs
async def format_poll_output(bot, reactions, errs):
guild = fetch_guild(bot)
res_ret = []
for emoji, people in reactions.items():
option_player_id = Player.get(emoji=emoji).discord_id
option_player_mention = guild.get_member(option_player_id).mention
if len(people) == 0:
voters = "*No one!*"
else:
voters = ", ".join(p.mention for p in people)
res_ret.append(
"({count}) {opt} : {voters}".format(
opt=option_player_mention, voters=voters, count=len(people)
)
)
return ("Results:\n" +
"\n".join(res_ret) +
"\n" +
"\n".join(errs))
async def close_poll(bot, pollid):
guild = fetch_guild(bot)
poll = Poll.get_or_none(id=pollid)
if poll is None:
raise errors.NoSuchPoll()
# get emojis to members, with self removed
reactions = await get_raw_reactions(bot, poll, guild.me)
# process the reactions
processed_reactions, errs = await process_reactions(reactions)
# format output
s = await format_poll_output(bot, processed_reactions, errs)
# delete the poll
poll.delete_instance(recursive=True)
return s
|
StarcoderdataPython
|
3322431
|
<reponame>IgorQueiroz32/curso_meigaron_pyhton_ao_ds<filename>House Rocket Company/main.py
# ----------------------------------------
# Libraries
# ---------------------------------------
import pandas as pd
import numpy as np
from geopy.geocoders import Nominatim
import ipywidgets as widgets
from ipywidgets import fixed
import plotly.express as px
from matplotlib import gridspec
from matplotlib import pyplot as plt
import ipywidgets as ipywidgets
from ipywidgets import Box
import streamlit as st
# ----------------------------------------
# Functions
# ---------------------------------------
def show_dtypes(data):
print(data.dtypes)
return None
def show_dimensions(data):
print('Number of rows: {}'.format(data.shape[0]))
print('Number of columns: {}'.format(data.shape[1]), end='\n\n')
return None
def collect_geodata(data, cols):
# Initialize Nominatim API
geolocator = Nominatim(user_agent='geopiExercises')
# Creating empty rows
data.loc[:, cols[0]] = 'NA'
data.loc[:, cols[1]] = 'NA'
data.loc[:, cols[2]] = 'NA'
data.loc[:, cols[3]] = 'NA'
data.loc[:, cols[4]] = 'NA'
data.loc[:, cols[5]] = 'NA'
data.loc[:, cols[6]] = 'NA'
data.loc[:, cols[7]] = 'NA'
data.loc[:, cols[8]] = 'NA'
for i in range(len(data)):
print('Loop: {}/{}'.format(i, len(data)))
# make query
query = str(data.loc[i, 'lat']) + ',' + str(data.loc[i, 'long']) # concatenadno as duas colunas
# API Request
response = geolocator.reverse(
query) # fizemos requisicao da API geopy, passando o query acima, que acha o endereco de
# todas as lst e long
# populate data
if cols[0] in response.raw[
'address']: # este if significa se tiver a coluna road dentro de address, preenchemos com o lat e long, se nao, pula
data.loc[i, 'house_number'] = response.raw['address'][cols[0]]
if cols[1] in response.raw['address']:
data.loc[i, 'road'] = response.raw['address'][cols[1]]
if cols[2] in response.raw['address']:
data.loc[i, 'neighbourhood'] = response.raw['address'][cols[2]]
if cols[3] in response.raw['address']:
data.loc[i, 'city'] = response.raw['address'][cols[3]]
if cols[4] in response.raw['address']:
data.loc[i, 'county'] = response.raw['address'][cols[4]]
if cols[5] in response.raw['address']:
data.loc[i, 'state'] = response.raw['address'][cols[5]]
if cols[6] in response.raw['address']:
data.loc[i, 'country'] = response.raw['address'][cols[6]]
if cols[7] in response.raw:
data.loc[i, 'place_id'] = response.raw[cols[7]]
if cols[8] in response.raw:
data.loc[i, 'osm_type'] = response.raw[cols[8]]
return data
|
StarcoderdataPython
|
59069
|
#!/usr/bin/env python
import argparse, sys, math
# covalent radius to decide a bond. bond length: r1+r2
radius = {" H": 0.25,
" N": 0.65,
" C": 0.70,
" O": 0.60,
" P": 1.00,
" S": 1.00,
"NA": 1.80,
"CL": 1.00
}
elebd_radius = {" N": 1.5,
" H": 1.0,
" C": 1.7,
" O": 1.4,
" P": 1.85,
" S": 1.85,
" X": 1.85
}
vdw_parm = {" C": (2.000, 0.150),
" H": (1.000, 0.020),
" O": (1.600, 0.200),
" N": (1.750, 0.160),
" S": (2.000, 0.200),
" P": (2.000, 0.200),
" X": (2.000, 0.173)
}
sp_orbitals = [" C", " N", " O", " P", " S"]
spd_orbitals = ["FE"]
tolerance_scale = 1.3 # (r1+r2) * this scale gives the bond upper limit, value between 1.2 to 1.5 recommended
def vector_normalize(v):
vn = ()
d = math.sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2])
if d < 1.0e-20:
vn = (0.0, 0.0, 0.0)
else:
vn = (v[0] / d, v[1] / d, v[2] / d)
return vn
def avv(v1, v2):
v1 = vector_normalize(v1)
v2 = vector_normalize(v2)
t = v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2];
if t > 1.0:
t = 1.0
elif t < -1.0:
t = -1.0
return math.acos(t)
def dvv(v1, v2):
dx = v1[0] - v2[0]
dy = v1[1] - v2[1]
dz = v1[2] - v2[2]
d2 = dx * dx + dy * dy + dz * dz
return math.sqrt(d2)
def vector_vminusv(v1, v2):
z = (v1[0] - v2[0], v1[1] - v2[1], v1[2] - v2[2])
return z
class Atom():
def __init__(self):
self.name = ""
self.element = ""
self.orbital = ""
self.xyz = ()
self.connect = []
return
def loadline(self, line):
self.name = line[12:16]
self.element = line[76:78]
self.xyz = (float(line[30:38]), float(line[38:46]), float(line[46:54]))
return
class Pdb2ftpl:
def __init__(self, arguments):
if len(arguments.c) != 2:
print("The conformer type ID has to be 2 characters, such as 01, 02, -1, +1, DM")
return
else:
self.confid = arguments.c
self.resname = []
self.atoms = self.file2atoms(arguments.pdbfile[0])
if len(self.resname) != 1:
print("%d residue names detected. The input pdb file can only have one residue." % len(self.resname))
sys.exit()
if arguments.d:
records = self.records_from_distance()
else:
records = self.records_from_file(arguments.pdbfile[0])
if not records:
records = self.records_from_distance()
# find connected atoms
self.connect_from_records(records)
# find hybrid type from connected atoms and bond angle
self.make_bond_orbital()
return
def file2atoms(self, fname):
atoms = []
lines = open(fname).readlines()
for line in lines:
if len(line) < 54:
continue
if line[:6] == "ATOM " or line[:6] == "HETATM":
res = line[17:20]
if res not in self.resname:
self.resname.append(res)
atom = Atom()
atom.loadline(line)
atoms.append(atom)
return atoms
def records_from_file(self, fname):
connect = {}
lines = open(fname).readlines()
for line in lines:
if len(line) < 11:
continue
if line[:6] == "CONECT":
fields = line.split()
key = int(fields[1]) - 1
value = [int(x) - 1 for x in fields[2:]]
connect[key] = value
return connect
def records_from_distance(self):
connect = {}
n = len(self.atoms)
for i in range(n - 1):
for j in range(i + 1, n):
d = dvv(self.atoms[i].xyz, self.atoms[j].xyz)
bond_distance = radius[self.atoms[i].element] + radius[self.atoms[j].element]
if d < bond_distance * tolerance_scale:
if i in connect:
connect[i].append(j)
else:
connect[i] = [j]
if j in connect:
connect[j].append(i)
else:
connect[j] = [i]
return connect
def connect_from_records(self, records):
for i in range(len(self.atoms)):
atom = self.atoms[i]
if i in records.keys():
# print(i, records[i])
atom.connect = [self.atoms[k] for k in records[i]]
else:
atom.connect = []
return
def make_bond_orbital(self):
for atom in self.atoms:
if atom.element == " H":
atom.orbital = "s"
elif atom.element in sp_orbitals: # sp, sp2, sp3
if len(atom.connect) == 4: # sp3
atom.orbital = "sp3"
elif len(atom.connect) == 3 or len(atom.connect) == 2:
# bond angle: sp if 180, sp2 if 120, sp3 if 109
v1 = vector_vminusv(atom.connect[0].xyz, atom.xyz)
v2 = vector_vminusv(atom.connect[1].xyz, atom.xyz)
alpha = avv(v1, v2) / math.pi * 180
# print(atom.connect[0].name, atom.name, atom.connect[1].name, alpha)
if 104 < alpha < 115:
atom.orbital = "sp3"
elif 115 < alpha < 150:
atom.orbital = "sp2"
elif 150 < alpha < 180.1:
atom.orbital = "sp"
else:
print("%s - %s - %s bond angle = %.3f, can not interpret" % (
atom.connect[0].name, atom.name, atom.connect[1].name, alpha))
sys.exit()
elif len(atom.connect) == 1:
# doesn't matter in the sense of geometry, but O on CH3-(CO)-CH3 is sp2 instead of sp3.
atom.orbital = "sp3"
elif len(atom.connect) == 0:
atom.orbital = "ion"
else:
atom.orbital = "udf"
return
def print_conflist(self):
print("# Conformer definition")
if self.confid == "BK":
print("CONFLIST, %s: %s%s" % (self.resname[0], self.resname[0], self.confid))
else:
print("CONFLIST, %s: %sBK, %s%s" % (self.resname[0], self.resname[0], self.resname[0], self.confid))
def print_connect(self):
print("# ATOM name and bonds")
for atom in self.atoms:
connected_atoms = ",".join(["\"%s\"" % x.name for x in atom.connect])
print("CONNECT, \"%s\", %s%s: %4s, %s" % (atom.name, self.resname[0], self.confid, atom.orbital, connected_atoms))
def print_charge(self):
print("# ATOM charges")
for atom in self.atoms:
print("CHARGE, %s%s, \"%s\": to_be_filled" % (self.resname[0], self.confid, atom.name))
def print_radius(self):
print("# Atom radius, dielelctric boundary radius, VDW radius, and energy well depth")
for atom in self.atoms:
if atom.element in elebd_radius:
rbd = elebd_radius[atom.element]
else:
rbd = elebd_radius[" X"]
if atom.element in vdw_parm:
rvdw, well = vdw_parm[atom.element]
else:
rvdw, well = vdw_parm[" X"]
print("RADIUS, %s%s, \"%s\": %6.3f, %6.3f, %6.3f" % (self.resname[0], self.confid, atom.name, rbd, rvdw, well))
def print_conformer(self):
print("# Conformer parameters that appear in head3.lst: ne, Em0, nH, pKa0, rxn")
print("CONFORMER, %s%s: Em0=0.0, pKa0=0.00, ne=0, nH=0, rxn02= 0, rxn04= 0, rxn08= 0" % (self.resname[0], self.confid))
if __name__ == "__main__":
# Get the command arguments
helpmsg = "Create a ftpl template file from a cofactor PDB file. The atoms in the input files are considered as one molecule."
parser = argparse.ArgumentParser(description=helpmsg)
parser.add_argument("-d", default=False, help="Ignore CONNECT, use distance to determine bond", action="store_true")
parser.add_argument("-c", metavar="conformer type", default="01", help="Specify a 2-character conformer type ID, default 01")
parser.add_argument("pdbfile", metavar="pdbfile", nargs=1)
args = parser.parse_args()
ftpl = Pdb2ftpl(args)
ftpl.print_conflist()
print()
ftpl.print_connect()
print()
ftpl.print_charge()
print()
ftpl.print_radius()
print()
ftpl.print_conformer()
|
StarcoderdataPython
|
3225246
|
<reponame>saisankargochhayat/doot
from tornado import websocket, web, ioloop
import os
path=os.getcwd()
path=path.strip('Lettertrainer') + 'ML'
import sys
sys.path.append(path)
import tornado.escape
from tornado import gen
import tornado.httpserver
import tornado.options
from sklearn.neighbors import KNeighborsClassifier
from sklearn import tree
from sklearn.linear_model import SGDClassifier
from sklearn import svm
import collections
# from data_loader import data_loader
import json
import pprint
import pandas
from sklearn import svm
import numpy as np
from tornado.escape import json_decode
from tornado.escape import json_encode
from feature_extracter_live import *
from sklearn import preprocessing
from helper import svm,knn,dtree,sgd,lda,qda
from textblob import TextBlob
# define("port", default=8080, help="run on the given port", type=int)
data = []
labels = []
dataFrame = pandas.read_csv('../CSV_Data/server_dataset.csv')
svm_model , svm_scaler = svm.get_model(dataFrame)
knn_model , knn_scaler = knn.get_model(dataFrame)
sgd_model , sgd_scaler = sgd.get_model(dataFrame)
dtree_model , dtree_scaler = dtree.get_model(dataFrame)
lda_model , lda_scaler = lda.get_model(dataFrame)
qda_model , qda_scaler = qda.get_model(dataFrame)
print("Trained")
class HomeHandler(web.RequestHandler):
def get(self):
self.render("static/index.html")
class Words(web.RequestHandler):
def get(self):
self.render("static/words.html")
class Letter(web.RequestHandler):
def get(self):
self.render("static/letter.html")
class Visualizer1(web.RequestHandler):
def get(self):
self.render("static/visualizer1.html")
class Predictor(web.RequestHandler):
def get(self):
self.render("static/predictor.html")
class Visualizer(web.RequestHandler):
def get(self):
self.render("static/visualizer.html")
class Predict(websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
print("WebSocket opened")
def on_message(self, message):
msg = json.loads(message)
test=extract_array(msg)
test = np.array(test)
test = test.reshape(1,-1)
predictions = {}
vote = {}
predictions['svm'] = str(svm_model.predict(svm_scaler.transform(test))[0])
if predictions['svm'] in vote:
vote[predictions['svm']] = vote[predictions['svm']]+1
else:
vote[predictions['svm']] = 1
predictions['knn'] = str(knn_model.predict(knn_scaler.transform(test))[0])
if predictions['knn'] in vote:
vote[predictions['knn']] = vote[predictions['knn']]+1
else:
vote[predictions['knn']] = 1
predictions['lda'] = str(lda_model.predict(lda_scaler.transform(test))[0])
if predictions['lda'] in vote:
vote[predictions['lda']] = vote[predictions['lda']]+1
else:
vote[predictions['lda']] = 1
predictions['qda'] = str(qda_model.predict(qda_scaler.transform(test))[0])
if predictions['qda'] in vote:
vote[predictions['qda']] = vote[predictions['qda']]+1
else:
vote[predictions['qda']] = 1
predictions['sgd'] = str(sgd_model.predict(sgd_scaler.transform(test))[0])
if predictions['sgd'] in vote:
vote[predictions['sgd']] = vote[predictions['sgd']]+1
else:
vote[predictions['sgd']] = 1
predictions['dtree'] = str(dtree_model.predict(dtree_scaler.transform(test))[0])
if predictions['dtree'] in vote:
vote[predictions['dtree']] = vote[predictions['dtree']]+1
else:
vote[predictions['dtree']] = 1
count = collections.Counter(vote)
predictions['max_vote'] = count.most_common(1)[0][0]
letter = predictions['max_vote']
self.write_message(letter)
def on_close(self):
print("WebSocket closed")
# class normal_user(websocket.WebSocketHandler):
# def check_origin(self, origin):
# return True
app = web.Application([
(r'/assets/(.*)', web.StaticFileHandler, {'path': 'static/assets/'}),
(r'/static/(.*)', web.StaticFileHandler, {'path': 'static/'}),
(r"/",HomeHandler),
(r"/predictor",Predictor),
(r"/visualizer",Visualizer),
(r"/visualizer1",Visualizer1),
(r"/words",Words),
(r"/letter",Letter),
(r"/ws",Predict)
])
if __name__ == '__main__':
app.listen(3000)
print("Listening at 127.0.0.1:3000")
ioloop.IOLoop.instance().start()
|
StarcoderdataPython
|
3286075
|
from django.shortcuts import render, get_object_or_404, redirect
from .models import User, Vehicle
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic import ListView
from .forms import VehicleForm, OwnerForm
def get_owner_by_id(request, owner_id):
owners = [get_object_or_404(User, pk=owner_id)]
return render(request, 'owner.html', {"owners": owners})
def get_owners(request):
owners = User.objects.all()
return render(request, 'owner.html', {"owners": owners})
def create_owner(request):
form = OwnerForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('/owners/all')
return render(request, "owner_form.html", {"form": form})
class VehicleView(ListView):
model = Vehicle
template_name = 'vehicle.html'
def get_queryset(self):
param = self.request.GET.get('vehicle')
if not param:
return self.model.objects.all()
else:
try:
vehicle_id = int(param)
queryset = self.queryset.filter(pk=vehicle_id)
except Exception:
queryset = self.model.objects.none()
return queryset
class VehicleDelete(DeleteView):
model = Vehicle
template_name = 'delete.html'
success_url = '/vehicle/'
class VehicleUpdate(UpdateView):
template_name = 'vehicle_form.html'
model = Vehicle
form_class = VehicleForm
success_url = '/vehicle/'
class VehicleCreate(CreateView):
template_name = 'vehicle_form.html'
model = Vehicle
form_class = VehicleForm
success_url = '/vehicle/'
|
StarcoderdataPython
|
184174
|
from test_plus.test import TestCase
from referrals import utils
from uuid import uuid4
class TestValidateUUID4(TestCase):
def setUp(self):
self.valid_uuid = str(uuid4())
self.invalid_uuid = 'FAKE_STRING'
def test_with_valid_uuid4(self):
self.assertTrue(utils.validate_uuid4(self.valid_uuid))
def test_with_invalid_uuid4(self):
self.assertFalse(utils.validate_uuid4(self.invalid_uuid))
def test_without_uuid4(self):
self.assertFalse(utils.validate_uuid4(''))
|
StarcoderdataPython
|
3275016
|
# preprocess.data.loaders
# Dataset loading utilities and primary API to the datasets module.
#
# Author: <NAME>
# Created: Thu Apr 12 7:23:25 2020 -0400
#
# Copyright (C) 2020 BSD 3-Clause License
# For license information, see LICENSE.txt
#
# ID: loaders.py [] <EMAIL> $
"""
Dataset loading utilities and primary API to the datasets module.
"""
##########################################################################
## Imports
##########################################################################
import os
__all__ = [
"load_freeculture",
"load_culturalibre",
"freeculture_pdfpath",
"tnlp1_path",
"tnlp1h_path",
"test_text_path",
"test_texth_path",
]
DATASETS = {
"load_freeculture": "Free_Culture.txt",
"load_culturalibre": "Cultura_Libre.txt",
"freeculture_pdfpath": "Free_Culture.pdf",
"tnlp1_path": "srctnlp1.txt",
"tnlp1h_path": "srctnlp1_human.txt",
"test_text_path": "test_text.txt",
"test_texth_path": "test_text_human.txt",
}
##########################################################################
## Specific loading utilities
##########################################################################
def _load_dataset(name, data_home=None, return_dataset=False):
"""
Load a dataset by name and return specified format.
"""
title = DATASETS[name]
txt = os.path.join(os.path.dirname(__file__), 'books', title)
with open(txt) as doc:
data = doc.read()
if return_dataset:
return data
return data
def load_freeculture():
"""Load the book Free Culture.
"""
return _load_dataset('load_freeculture')
def load_culturalibre():
"""Load the book Cultura Libre.
"""
return _load_dataset('load_culturalibre')
def freeculture_pdfpath():
path = os.path.join(os.path.dirname(__file__), 'books', DATASETS['freeculture_pdfpath'])
return path
def tnlp1_path():
path = os.path.join(os.path.dirname(__file__), 'short', DATASETS['tnlp1_path'])
return path
def tnlp1h_path():
path = os.path.join(os.path.dirname(__file__), 'short', DATASETS['tnlp1h_path'])
return path
def test_text_path():
path = os.path.join(os.path.dirname(__file__), 'short', DATASETS['test_text_path'])
return path
def test_texth_path():
path = os.path.join(os.path.dirname(__file__), 'short', DATASETS['test_texth_path'])
return path
|
StarcoderdataPython
|
156389
|
<gh_stars>0
from .urls import urlpatterns
from .middleware import AfterRequestMiddleware
|
StarcoderdataPython
|
1678084
|
<reponame>SimonBoothroyd/nonbonded
import os
from typing import List, Type, Union
import click
from nonbonded.cli.utilities import generate_click_command
from nonbonded.library.models.projects import Benchmark, Optimization
from nonbonded.library.models.results import BenchmarkResult, OptimizationResult
def _upload_options() -> List[click.option]:
return []
def upload_command(model_type: Type[Union[Optimization, Benchmark]]):
result_type = (
OptimizationResult if issubclass(model_type, Optimization) else BenchmarkResult
)
def base_function(**_):
results_name = (
"optimization" if issubclass(model_type, Optimization) else "benchmark"
)
results_path = os.path.join("analysis", f"{results_name}-results.json")
results = result_type.parse_file(results_path).upload()
with open(results_path, "w") as file:
file.write(results.json())
model_string = (
"an optimization" if issubclass(model_type, Optimization) else "a benchmark"
)
return generate_click_command(
click.command(
"upload",
help=f"Upload the analysed results of {model_string} to the REST API.",
),
[*_upload_options()],
base_function,
)
|
StarcoderdataPython
|
3289539
|
# =================================================================
#
# Authors: <NAME> <<EMAIL>>
#
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
import models
import util
from plugin import Plugin
from factory import Factory
LOGGER = logging.getLogger(__name__)
def list_resources(resource_type=None, query=None, tag=None):
"""return all resources"""
reliability_values = []
first_run = None
last_run = None
response = {
'total': 0,
'success': {
'number': 0,
'percentage': 0
},
'fail': {
'number': 0,
'percentage': 0
},
'first_run': None,
'last_run': None,
'reliability': 0
}
filters = ()
if resource_type is not None:
filters = filters + ("resource_type = '%s'" % resource_type,)
if query is not None:
field, term = get_query_field_term(query)
filters = filters + (field.ilike(term),)
if tag is not None:
tag_filter = (models.Resource.tags.any(models.Tag.name.in_([tag])),)
filters = filters + tag_filter
response['resources'] = models.Resource.query.filter(*filters).all()
response['total'] = len(response['resources'])
response['success']['percentage'] = 0
response['fail']['percentage'] = 0
response['reliability'] = 0
for resource in response['resources']:
if resource.runs.count() > 0:
# View should work even without Runs
if resource.first_run < first_run or first_run is None:
first_run = resource.first_run
if resource.last_run < last_run or last_run is None:
last_run = resource.last_run
response['first_run'] = first_run
response['last_run'] = last_run
if resource.last_run.success:
response['success']['number'] += 1
else:
response['fail']['number'] += 1
reliability_values.append(resource.reliability)
response['success']['percentage'] = int(round(util.percentage(
response['success']['number'], response['total'])))
response['fail']['percentage'] = 100 - response['success']['percentage']
response['reliability'] = round(util.average(reliability_values), 1)
return response
def get_resource_by_id(identifier):
"""return one resource by identifier"""
return models.Resource.query.filter_by(
identifier=identifier).first_or_404()
def get_run_by_id(identifier):
"""return one Run by identifier"""
return models.Run.query.filter_by(
identifier=identifier).first_or_404()
def get_run_by_resource_id(identifier):
"""return one Run by identifier"""
return models.Run.query.filter_by(
resource_identifier=identifier)
def get_resource_types_counts():
"""return frequency counts of registered resource types"""
mrt = models.get_resource_types_counts()
return {
'counts': mrt[0],
'total': mrt[1]
}
def get_health_summary():
"""return summary of all runs"""
# For overall reliability
total_runs = models.get_runs_count()
failed_runs = models.get_runs_status_count(False)
success_runs = total_runs - failed_runs
# Resources status derived from last N runs
total_resources = models.get_resources_count()
last_runs = models.get_last_run_per_resource()
failed = 0
failed_resources = []
for run in last_runs:
if not run.success:
failed_resources.append(
get_resource_by_id(run.resource_identifier))
failed += 1
success = total_resources - failed
failed_percentage = int(round(
util.percentage(failed, total_resources)))
success_percentage = 100 - failed_percentage
response = {
'total': total_resources,
'success': {
'number': success,
'percentage': success_percentage
},
'fail': {
'number': failed,
'percentage': failed_percentage
},
'first_run': models.get_first_run(),
'last_run': models.get_last_run(),
'reliability': round(util.percentage(success_runs, total_runs), 1),
'failed_resources': failed_resources
}
return response
def get_tag_counts():
"""return all tag counts"""
return models.get_tag_counts()
def get_query_field_term(query):
"""determine query context from q="""
field = models.Resource.title # default
try:
facet, term = query.split(':')
term2 = '%%%s%%' % term # default like
if facet == 'url':
field = models.Resource.url
elif facet == 'title':
field = models.Resource.title
elif facet == 'site':
field = models.Resource.url
term2 = '%%%s/%%' % term
elif facet == 'owner':
field = models.Resource.owner_identifier
term = term2
except ValueError: # default search
term = '%%%s%%' % query
return [field, term]
def get_probes_avail(resource_type=None, resource=None):
"""
Get all available Probes with their attributes.
:param resource_type: optional resource type e.g. OGC:WMS
:param resource: optional Resource instance
:return:
"""
# Assume no resource type
filters = None
if resource_type:
filters = [('RESOURCE_TYPE', resource_type),
('RESOURCE_TYPE', '*:*')]
probe_classes = Plugin.get_plugins('GeoHealthCheck.probe.Probe', filters)
result = dict()
for probe_class in probe_classes:
probe = Factory.create_obj(probe_class)
if probe:
if resource:
try:
probe.expand_params(resource)
except Exception as err:
msg = 'Cannot expand plugin vars for %s err=%s' \
% (probe_class, str(err))
LOGGER.warning(msg)
result[probe_class] = probe.get_plugin_vars()
return result
|
StarcoderdataPython
|
8913
|
<reponame>Sette/autokeras
import functools
import pickle
import kerastuner
import tensorflow as tf
from tensorflow.python.util import nest
from autokeras.hypermodel import base
from autokeras.hypermodel import compiler
class Graph(kerastuner.engine.stateful.Stateful):
"""A graph consists of connected Blocks, HyperBlocks, Preprocessors or Heads.
# Arguments
inputs: A list of input node(s) for the Graph.
outputs: A list of output node(s) for the Graph.
override_hps: A list of HyperParameters. The predefined HyperParameters that
will override the space of the Hyperparameters defined in the Hypermodels
with the same names.
"""
def __init__(self, inputs, outputs, override_hps=None):
super().__init__()
self.inputs = nest.flatten(inputs)
self.outputs = nest.flatten(outputs)
self._node_to_id = {}
self._nodes = []
self.blocks = []
self._block_to_id = {}
self._build_network()
self.override_hps = override_hps or []
def compile(self, func):
"""Share the information between blocks by calling functions in compiler.
# Arguments
func: A dictionary. The keys are the block classes. The values are
corresponding compile functions.
"""
for block in self.blocks:
if block.__class__ in func:
func[block.__class__](block)
def _register_hps(self, hp):
"""Register the override HyperParameters for current HyperParameters."""
for single_hp in self.override_hps:
name = single_hp.name
if name not in hp.values:
hp.register(single_hp.name,
single_hp.__class__.__name__,
single_hp.get_config())
hp.values[name] = single_hp.default
def _build_network(self):
self._node_to_id = {}
# Recursively find all the interested nodes.
for input_node in self.inputs:
self._search_network(input_node, self.outputs, set(), set())
self._nodes = sorted(list(self._node_to_id.keys()),
key=lambda x: self._node_to_id[x])
for node in (self.inputs + self.outputs):
if node not in self._node_to_id:
raise ValueError('Inputs and outputs not connected.')
# Find the blocks.
blocks = []
for input_node in self._nodes:
for block in input_node.out_blocks:
if any([output_node in self._node_to_id
for output_node in block.outputs]) and block not in blocks:
blocks.append(block)
# Check if all the inputs of the blocks are set as inputs.
for block in blocks:
for input_node in block.inputs:
if input_node not in self._node_to_id:
raise ValueError('A required input is missing for HyperModel '
'{name}.'.format(name=block.name))
# Calculate the in degree of all the nodes
in_degree = [0] * len(self._nodes)
for node_id, node in enumerate(self._nodes):
in_degree[node_id] = len([
block for block in node.in_blocks if block in blocks])
# Add the blocks in topological order.
self.blocks = []
self._block_to_id = {}
while len(blocks) != 0:
new_added = []
# Collect blocks with in degree 0.
for block in blocks:
if any([in_degree[self._node_to_id[node]]
for node in block.inputs]):
continue
new_added.append(block)
# Remove the collected blocks from blocks.
for block in new_added:
blocks.remove(block)
for block in new_added:
# Add the collected blocks to the AutoModel.
self._add_block(block)
# Decrease the in degree of the output nodes.
for output_node in block.outputs:
if output_node not in self._node_to_id:
continue
output_node_id = self._node_to_id[output_node]
in_degree[output_node_id] -= 1
def _search_network(self, input_node, outputs, in_stack_nodes,
visited_nodes):
visited_nodes.add(input_node)
in_stack_nodes.add(input_node)
outputs_reached = False
if input_node in outputs:
outputs_reached = True
for block in input_node.out_blocks:
for output_node in block.outputs:
if output_node in in_stack_nodes:
raise ValueError('The network has a cycle.')
if output_node not in visited_nodes:
self._search_network(output_node, outputs, in_stack_nodes,
visited_nodes)
if output_node in self._node_to_id.keys():
outputs_reached = True
if outputs_reached:
self._add_node(input_node)
in_stack_nodes.remove(input_node)
def _add_block(self, block):
if block not in self.blocks:
block_id = len(self.blocks)
self._block_to_id[block] = block_id
self.blocks.append(block)
def _add_node(self, input_node):
if input_node not in self._node_to_id:
self._node_to_id[input_node] = len(self._node_to_id)
def _get_block(self, name):
for block in self.blocks:
if block.name == name:
return block
raise ValueError('Cannot find block named {name}.'.format(name=name))
def get_state(self):
# TODO: Include everything including the graph structure.
block_state = {str(block_id): block.get_state()
for block_id, block in enumerate(self.blocks)}
node_state = {str(node_id): node.get_state()
for node_id, node in enumerate(self._nodes)}
return {'blocks': block_state, 'nodes': node_state}
def set_state(self, state):
# TODO: Include everything including the graph structure.
block_state = state['blocks']
node_state = state['nodes']
for block_id, block in enumerate(self.blocks):
block.set_state(block_state[str(block_id)])
for node_id, node in enumerate(self._nodes):
node.set_state(node_state[str(node_id)])
def save(self, fname):
state = self.get_state()
with tf.io.gfile.GFile(fname, 'wb') as f:
pickle.dump(state, f)
return str(fname)
def reload(self, fname):
with tf.io.gfile.GFile(fname, 'rb') as f:
state = pickle.load(f)
self.set_state(state)
def build(self, hp):
self._register_hps(hp)
class PlainGraph(Graph):
"""A graph built from a HyperGraph to produce KerasGraph and PreprocessGraph.
A PlainGraph does not contain HyperBlock. HyperGraph's hyper_build function
returns an instance of PlainGraph, which can be directly built into a KerasGraph
and a PreprocessGraph.
# Arguments
inputs: A list of input node(s) for the PlainGraph.
outputs: A list of output node(s) for the PlainGraph.
"""
def __init__(self, inputs, outputs, **kwargs):
self._keras_model_inputs = []
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
def _build_network(self):
super()._build_network()
# Find the model input nodes
for node in self._nodes:
if self._is_keras_model_inputs(node):
self._keras_model_inputs.append(node)
self._keras_model_inputs = sorted(self._keras_model_inputs,
key=lambda x: self._node_to_id[x])
@staticmethod
def _is_keras_model_inputs(node):
for block in node.in_blocks:
if not isinstance(block, base.Preprocessor):
return False
for block in node.out_blocks:
if not isinstance(block, base.Preprocessor):
return True
return False
def build_keras_graph(self):
return KerasGraph(self._keras_model_inputs,
self.outputs,
override_hps=self.override_hps)
def build_preprocess_graph(self):
return PreprocessGraph(self.inputs,
self._keras_model_inputs,
override_hps=self.override_hps)
class KerasGraph(Graph, kerastuner.HyperModel):
"""A graph and HyperModel to be built into a Keras model."""
def build(self, hp):
"""Build the HyperModel into a Keras Model."""
super().build(hp)
self.compile(compiler.AFTER)
real_nodes = {}
for input_node in self.inputs:
node_id = self._node_to_id[input_node]
real_nodes[node_id] = input_node.build()
for block in self.blocks:
if isinstance(block, base.Preprocessor):
continue
temp_inputs = [real_nodes[self._node_to_id[input_node]]
for input_node in block.inputs]
outputs = block.build(hp, inputs=temp_inputs)
outputs = nest.flatten(outputs)
for output_node, real_output_node in zip(block.outputs, outputs):
real_nodes[self._node_to_id[output_node]] = real_output_node
model = tf.keras.Model(
[real_nodes[self._node_to_id[input_node]] for input_node in
self.inputs],
[real_nodes[self._node_to_id[output_node]] for output_node in
self.outputs])
return self._compile_keras_model(hp, model)
def _get_metrics(self):
metrics = {}
for output_node in self.outputs:
block = output_node.in_blocks[0]
if isinstance(block, base.Head):
metrics[block.name] = block.metrics
return metrics
def _get_loss(self):
loss = {}
for output_node in self.outputs:
block = output_node.in_blocks[0]
if isinstance(block, base.Head):
loss[block.name] = block.loss
return loss
def _compile_keras_model(self, hp, model):
# Specify hyperparameters from compile(...)
optimizer = hp.Choice('optimizer',
['adam', 'adadelta', 'sgd'],
default='adam')
model.compile(optimizer=optimizer,
metrics=self._get_metrics(),
loss=self._get_loss())
return model
class PreprocessGraph(Graph):
"""A graph consists of only Preprocessors.
It is both a search space with Hyperparameters and a model to be fitted. It
preprocess the dataset with the Preprocessors. The output is the input to the
Keras model. It does not extend Hypermodel class because it cannot be built into
a Keras model.
"""
def preprocess(self, dataset, validation_data=None, fit=False):
"""Preprocess the data to be ready for the Keras Model.
# Arguments
dataset: tf.data.Dataset. Training data.
validation_data: tf.data.Dataset. Validation data.
fit: Boolean. Whether to fit the preprocessing layers with x and y.
# Returns
if validation data is provided.
A tuple of two preprocessed tf.data.Dataset, (train, validation).
Otherwise, return the training dataset.
"""
dataset = self._preprocess(dataset, fit=fit)
if validation_data:
validation_data = self._preprocess(validation_data)
return dataset, validation_data
def _preprocess(self, dataset, fit=False):
# A list of input node ids in the same order as the x in the dataset.
input_node_ids = [self._node_to_id[input_node] for input_node in self.inputs]
# Iterate until all the model inputs have their data.
while set(map(lambda node: self._node_to_id[node], self.outputs)
) - set(input_node_ids):
# Gather the blocks for the next iteration over the dataset.
blocks = []
for node_id in input_node_ids:
for block in self._nodes[node_id].out_blocks:
if block in self.blocks:
blocks.append(block)
if fit:
# Iterate the dataset to fit the preprocessors in current depth.
self._fit(dataset, input_node_ids, blocks)
# Transform the dataset.
output_node_ids = []
dataset = dataset.map(functools.partial(
self._transform,
input_node_ids=input_node_ids,
output_node_ids=output_node_ids,
blocks=blocks,
fit=fit))
# Build input_node_ids for next depth.
input_node_ids = output_node_ids
return dataset
def _fit(self, dataset, input_node_ids, blocks):
# Iterate the dataset to fit the preprocessors in current depth.
for x, y in dataset:
x = nest.flatten(x)
id_to_data = {
node_id: temp_x for temp_x, node_id in zip(x, input_node_ids)
}
for block in blocks:
data = [id_to_data[self._node_to_id[input_node]]
for input_node in block.inputs]
block.update(data, y=y)
# Finalize and set the shapes of the output nodes.
for block in blocks:
block.finalize()
nest.flatten(block.outputs)[0].shape = block.output_shape
def _transform(self,
x,
y,
input_node_ids,
output_node_ids,
blocks,
fit=False):
x = nest.flatten(x)
id_to_data = {
node_id: temp_x
for temp_x, node_id in zip(x, input_node_ids)
}
output_data = {}
# Transform each x by the corresponding block.
for hm in blocks:
data = [id_to_data[self._node_to_id[input_node]]
for input_node in hm.inputs]
data = tf.py_function(functools.partial(hm.transform, fit=fit),
inp=nest.flatten(data),
Tout=hm.output_types())
data = nest.flatten(data)[0]
data.set_shape(hm.output_shape)
output_data[self._node_to_id[hm.outputs[0]]] = data
# Keep the Keras Model inputs even they are not inputs to the blocks.
for node_id, data in id_to_data.items():
if self._nodes[node_id] in self.outputs:
output_data[node_id] = data
for node_id in sorted(output_data.keys()):
output_node_ids.append(node_id)
return tuple(map(
lambda node_id: output_data[node_id], output_node_ids)), y
def build(self, hp):
"""Obtain the values of all the HyperParameters.
Different from the build function of Hypermodel. This build function does not
produce a Keras model. It only obtain the hyperparameter values from
HyperParameters.
# Arguments
hp: HyperParameters.
"""
super().build(hp)
self.compile(compiler.BEFORE)
for block in self.blocks:
block.build(hp)
def copy(old_instance):
instance = old_instance.__class__()
instance.set_state(old_instance.get_state())
return instance
class HyperGraph(Graph):
"""A HyperModel based on connected Blocks and HyperBlocks.
# Arguments
inputs: A list of input node(s) for the HyperGraph.
outputs: A list of output node(s) for the HyperGraph.
"""
def __init__(self, inputs, outputs, **kwargs):
super().__init__(inputs, outputs, **kwargs)
self.compile(compiler.HYPER)
def build_graphs(self, hp):
plain_graph = self.hyper_build(hp)
preprocess_graph = plain_graph.build_preprocess_graph()
preprocess_graph.build(hp)
return (preprocess_graph,
plain_graph.build_keras_graph())
def hyper_build(self, hp):
"""Build a GraphHyperModel with no HyperBlock but only Block."""
# Make sure get_uid would count from start.
tf.keras.backend.clear_session()
inputs = []
old_node_to_new = {}
for old_input_node in self.inputs:
input_node = copy(old_input_node)
inputs.append(input_node)
old_node_to_new[old_input_node] = input_node
for old_block in self.blocks:
inputs = [old_node_to_new[input_node]
for input_node in old_block.inputs]
if isinstance(old_block, base.HyperBlock):
outputs = old_block.build(hp, inputs=inputs)
else:
outputs = copy(old_block)(inputs)
for output_node, old_output_node in zip(outputs, old_block.outputs):
old_node_to_new[old_output_node] = output_node
inputs = []
for input_node in self.inputs:
inputs.append(old_node_to_new[input_node])
outputs = []
for output_node in self.outputs:
outputs.append(old_node_to_new[output_node])
return PlainGraph(inputs, outputs, override_hps=self.override_hps)
|
StarcoderdataPython
|
3217417
|
<gh_stars>10-100
from leapp.actors import Actor
from leapp.models import NetworkManagerConfig
from leapp.tags import ApplicationsPhaseTag, IPUWorkflowTag
snippet_path = '/usr/lib/NetworkManager/conf.d/10-dhcp-dhclient.conf'
snippet_data = ("# Generated by leapp when upgrading from RHEL7 to RHEL8\n"
"[main]\n"
"dhcp=dhclient\n")
class NetworkManagerUpdateConfig(Actor):
"""
Updates NetworkManager configuration for Red Hat Enterprise Linux 8.
On Red Hat Enterprise Linux 7 NetworkManager uses the "dhclient" DHCP backend by default, while
the default is "internal" on Red Hat Enterprise Linux 8. We want to keep "dhclient" enabled on
upgrade, unless the user explicitly chose another backend in the configuration. To do so, we
drop a configuration snippet in /usr/lib.
"""
name = 'network_manager_update_config'
consumes = (NetworkManagerConfig,)
produces = ()
tags = (ApplicationsPhaseTag, IPUWorkflowTag)
def process(self):
for nm_config in self.consume(NetworkManagerConfig):
self.log.info('Consuming dhcp={}'.format(nm_config.dhcp))
if nm_config.dhcp == '':
try:
with open(snippet_path, 'w') as f:
f.write(snippet_data)
self.log.info('Written the following to {}:\n{}\n'.format(snippet_path, snippet_data))
except IOError as e:
self.log.warning('Write error: {}'.format(e))
break
|
StarcoderdataPython
|
1674516
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Ensure that SharedLibrary builder works with SHLIBVERSION and -j2.
This is regression test for:
http://article.gmane.org/gmane.comp.programming.tools.scons.user/27049
"""
import TestSCons
import os
import sys
import SCons.Platform
import SCons.Defaults
test = TestSCons.TestSCons()
if sys.platform == 'darwin':
# Skipping until logic is fixed for macosx
test.skip_test("Not working on darwin yet\n")
test.write('foo.c', """
#if _WIN32
__declspec(dllexport)
#endif
int foo() { return 0; }
""")
test.write('main.c', """
#if _WIN32
__declspec(dllimport)
#endif
int foo();
int main(void) { return foo(); }
""")
test.write('SConstruct', """
env = Environment()
env.AppendUnique(LIBPATH = ['.'])
env.Program('main.c', LIBS = ['foo'])
env.SharedLibrary('foo', 'foo.c', SHLIBVERSION = '0.1.2')
""")
test.run(arguments = ['-j 2', '--tree=all'])
env = SCons.Defaults.DefaultEnvironment()
platform = SCons.Platform.platform_default()
tool_list = SCons.Platform.DefaultToolList(platform, env)
if platform == 'cygwin':
# PATH is used to search for *.dll librarier (cygfoo-0-2-1.dll in our case)
path = os.environ.get('PATH','')
if path: path = path + os.pathsep
path = path + test.workpath('.')
os.environ['PATH'] = path
if os.name == 'posix':
os.environ['LD_LIBRARY_PATH'] = test.workpath('.')
if sys.platform.find('irix') != -1:
os.environ['LD_LIBRARYN32_PATH'] = test.workpath('.')
test.run(program = test.workpath('main'))
test.run(arguments = ['-c'])
platform = SCons.Platform.platform_default()
if 'gnulink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'libfoo.so',
'libfoo.so.0',
'libfoo.so.0.1.2',
'foo.os',
]
elif 'applelink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'libfoo.dylib',
'libfoo.0.1.2.dylib',
'foo.os',
]
elif 'cyglink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'cygfoo-0-1-2.dll',
'libfoo-0-1-2.dll.a',
'libfoo.dll.a',
'foo.os',
]
elif 'mslink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'foo.dll',
'foo.lib',
'foo.obj',
]
elif 'sunlink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'libfoo.so',
'libfoo.so.0',
'libfoo.so.0.1.2',
'so_foo.os',
]
else:
# All (?) the files we expect will get created in the current directory
files= [
'libfoo.so',
'foo.os']
for f in files:
test.must_not_exist([ f])
test.must_exist(['main.c'])
test.must_exist(['foo.c'])
test.must_exist(['SConstruct'])
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
StarcoderdataPython
|
1743106
|
<gh_stars>0
"""
Django settings for {{cookiecutter.project_name}} project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'{{cookiecutter.project_slug}}',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = '{{cookiecutter.project_slug}}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{cookiecutter.project_slug}}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Custom user model
AUTH_USER_MODEL = '{{cookiecutter.project_slug}}.User'
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# Django Rest Framework
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'{{cookiecutter.project_slug}}.authentication.TemporaryTokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.'
'LimitOffsetPagination',
'PAGE_SIZE': 100
}
# CORS Header Django Rest Framework
CORS_ORIGIN_ALLOW_ALL = True
# Temporary Token
REST_FRAMEWORK_TEMPORARY_TOKENS = {
'MINUTES': 10,
'RENEW_ON_SUCCESS': True,
'USE_AUTHENTICATION_BACKENDS': False,
}
# Activation Token
ACTIVATION_TOKENS = {
'MINUTES': 2880,
}
# Email service configuration.
# Supported services: SendinBlue.
SETTINGS_IMAILING = {
"SERVICE": "SendinBlue",
"API_KEY": "example_api_key",
"EMAIL_FROM": "<EMAIL>",
"TEMPLATES": {
"CONFIRM_SIGN_UP": "example_template_id",
"FORGOT_PASSWORD": "<PASSWORD>",
}
}
# User specific settings
LOCAL_SETTINGS = {
'ORGANIZATION': "{{cookiecutter.company_name}}",
"EMAIL_SERVICE": False,
"AUTO_ACTIVATE_USER": False,
"FRONTEND_INTEGRATION": {
"ACTIVATION_URL": "example.com/activate?activation_token={% raw %}{{token}}{% endraw %}",
"FORGOT_PASSWORD_URL": "example.com/forgot_password?token={% raw %}{{token}}{% endraw %}",
},
}
|
StarcoderdataPython
|
1786062
|
<reponame>sebnil/internet-uptime
import time
import urllib.request
from datetime import date
def internet_on():
try:
urllib.request.urlopen('http://google.se', timeout=5)
return 1
except urllib.request.URLError:
print('URLError')
except:
print('could not do urlopen')
return 0
if __name__ == "__main__":
now = date.today()
filename = '{:04d}-{:02d}-{:02d} uptime.csv'.format(now.year, now.month, now.day)
with open(filename, 'a') as f:
for i in range(0, 5):
internet_ok = internet_on()
if internet_ok:
break
else:
print('internet not ok. loop {}. trying again'.format(i))
iso_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())
line = '{:.6f},{},{}\n'.format(time.time(), iso_time, internet_ok)
print(line)
f.write(line)
f.flush()
|
StarcoderdataPython
|
52091
|
<gh_stars>10-100
from dataclasses import dataclass
from typing import Callable, List, Optional
from hooqu.analyzers.analyzer import (AggDefinition, DoubledValuedState,
StandardScanShareableAnalyzer)
from hooqu.analyzers.preconditions import has_column, is_numeric
from hooqu.dataframe import DataFrameLike
@dataclass
class MeanState(DoubledValuedState["MeanState"]):
total: float
count: int
def sum(self, other: "MeanState") -> "MeanState":
return MeanState(self.total + other.total, self.count + other.count)
def metric_value(self) -> float:
if self.count == 0:
return float("nan")
return self.total / self.count
class Mean(StandardScanShareableAnalyzer[MeanState]):
def __init__(self, column: str, where: Optional[str] = None):
super().__init__("Mean", column, where=where)
def from_aggregation_result(
self, result: DataFrameLike, offset: int = 0
) -> Optional[MeanState]:
sum_ = 0
count = 0
if len(result): # otherwise an empty dataframe
sum_ = result.loc["sum"][self.instance]
count = result.loc["count"][self.instance]
return MeanState(sum_, count)
def _aggregation_functions(self, where: Optional[str] = None) -> AggDefinition:
# Defines the aggregations to compute on the data
# TODO: Handle the ConditionalCount for a dataframe (if possible)
# in the original implementation here a Spark.Column is returned
# with using the "SUM (exp(where)) As LONG INT"
# with Pandas-like dataframe the where clause need to be evaluated
# before as the API does not get translated into SQL as with spark
return {self.instance: {"sum", "count"}}
def additional_preconditions(self) -> List[Callable[[DataFrameLike], None]]:
return [has_column(self.instance), is_numeric(self.instance)]
|
StarcoderdataPython
|
1692225
|
import torch
id_to_char = {}
char_to_id = {}
def _update_vocab(txt):
chars = list(txt)
for i, char in enumerate(chars):
if char not in char_to_id:
tmp_id = len(char_to_id)
char_to_id[char] = tmp_id
id_to_char[tmp_id] = char
def load_text(file_name, use_dict=False, dict_data=None):
with open(file_name, 'r') as f:
txt_list = f.readlines()
questions, answers = [], []
for txt in txt_list:
idx = txt.find('_')
questions.append(txt[:idx])
answers.append(txt[idx:-1])
# create vocab dict
if use_dict is False:
for i in range(len(questions)):
q, a = questions[i], answers[i]
_update_vocab(q)
_update_vocab(a)
# create torch array
x = torch.zeros([len(questions), len(questions[0]), 1], dtype=torch.long,
device=get_device())
t = torch.zeros([len(questions), len(answers[0]), 1], dtype=torch.long,
device=get_device())
if use_dict is False:
vocab_dict = char_to_id
else:
vocab_dict = dict_data
for i, sentence in enumerate(questions):
x[i, :, 0] = torch.Tensor([vocab_dict[c] for c in list(sentence)])
for i, sentence in enumerate(answers):
t[i, :, 0] = torch.Tensor([vocab_dict[c] for c in list(sentence)])
return (x, t)
def get_id_from_char(c):
return char_to_id[c]
def get_max_dict():
return len(char_to_id)
def get_device():
return torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_dict_id_to_char():
return id_to_char
def get_dict_char_to_id():
return char_to_id
|
StarcoderdataPython
|
3264433
|
<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'GetConnectionResult',
'AwaitableGetConnectionResult',
'get_connection',
'get_connection_output',
]
@pulumi.output_type
class GetConnectionResult:
def __init__(__self__, arn=None, authorization_type=None, description=None, secret_arn=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if authorization_type and not isinstance(authorization_type, str):
raise TypeError("Expected argument 'authorization_type' to be a str")
pulumi.set(__self__, "authorization_type", authorization_type)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if secret_arn and not isinstance(secret_arn, str):
raise TypeError("Expected argument 'secret_arn' to be a str")
pulumi.set(__self__, "secret_arn", secret_arn)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
"""
The arn of the connection resource.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="authorizationType")
def authorization_type(self) -> Optional['ConnectionAuthorizationType']:
return pulumi.get(self, "authorization_type")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of the connection.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="secretArn")
def secret_arn(self) -> Optional[str]:
"""
The arn of the secrets manager secret created in the customer account.
"""
return pulumi.get(self, "secret_arn")
class AwaitableGetConnectionResult(GetConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConnectionResult(
arn=self.arn,
authorization_type=self.authorization_type,
description=self.description,
secret_arn=self.secret_arn)
def get_connection(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectionResult:
"""
Resource Type definition for AWS::Events::Connection.
:param str name: Name of the connection.
"""
__args__ = dict()
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:events:getConnection', __args__, opts=opts, typ=GetConnectionResult).value
return AwaitableGetConnectionResult(
arn=__ret__.arn,
authorization_type=__ret__.authorization_type,
description=__ret__.description,
secret_arn=__ret__.secret_arn)
@_utilities.lift_output_func(get_connection)
def get_connection_output(name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConnectionResult]:
"""
Resource Type definition for AWS::Events::Connection.
:param str name: Name of the connection.
"""
...
|
StarcoderdataPython
|
1688323
|
"""
Python model "Population.py"
Translated using PySD version 0.9.0
"""
from __future__ import division
import numpy as np
from pysd import utils
import xarray as xr
from pysd.py_backend.functions import cache
from pysd.py_backend import functions
_subscript_dict = {}
_namespace = {
'TIME': 'time',
'Time': 'time',
'Growth Fraction': 'growth_fraction',
'Initial Population': 'initial_population',
'Number Added': 'number_added',
'Population': 'population',
'FINAL TIME': 'final_time',
'INITIAL TIME': 'initial_time',
'SAVEPER': 'saveper',
'TIME STEP': 'time_step'
}
__pysd_version__ = "0.9.0"
@cache('run')
def growth_fraction():
"""
Real Name: b'Growth Fraction'
Original Eqn: b'0.015'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 0.015
@cache('run')
def initial_population():
"""
Real Name: b'Initial Population'
Original Eqn: b'3000'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 3000
@cache('step')
def number_added():
"""
Real Name: b'Number Added'
Original Eqn: b'Growth Fraction*Population'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return growth_fraction() * population()
@cache('step')
def population():
"""
Real Name: b'Population'
Original Eqn: b'INTEG ( Number Added, Initial Population)'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return integ_population()
@cache('run')
def final_time():
"""
Real Name: b'FINAL TIME'
Original Eqn: b'2010'
Units: b'Year'
Limits: (None, None)
Type: constant
b'The final time for the simulation.'
"""
return 2010
@cache('run')
def initial_time():
"""
Real Name: b'INITIAL TIME'
Original Eqn: b'1960'
Units: b'Year'
Limits: (None, None)
Type: constant
b'The initial time for the simulation.'
"""
return 1960
@cache('step')
def saveper():
"""
Real Name: b'SAVEPER'
Original Eqn: b'TIME STEP'
Units: b'Year'
Limits: (0.0, None)
Type: component
b'The frequency with which output is stored.'
"""
return time_step()
@cache('run')
def time_step():
"""
Real Name: b'TIME STEP'
Original Eqn: b'0.125'
Units: b'Year'
Limits: (0.0, None)
Type: constant
b'The time step for the simulation.'
"""
return 0.125
integ_population = functions.Integ(lambda: number_added(), lambda: initial_population())
|
StarcoderdataPython
|
3252530
|
#!/usr/bin/env python
"""
Author: <NAME>
Last Updated: 2015-03-13
This is a demo of Panda's occluder-culling system. It demonstrates loading
occluder from an EGG file and adding them to a CullTraverser.
"""
# Load PRC data
from panda3d.core import loadPrcFileData
loadPrcFileData('', 'window-title Occluder Demo')
loadPrcFileData('', 'sync-video false')
loadPrcFileData('', 'show-frame-rate-meter true')
loadPrcFileData('', 'texture-minfilter linear-mipmap-linear')
#loadPrcFileData('', 'fake-view-frustum-cull true') # show culled nodes in red
# Import needed modules
import random
from direct.showbase.ShowBase import ShowBase
from direct.gui.OnscreenText import OnscreenText
from panda3d.core import PerspectiveLens, TextNode, \
TexGenAttrib, TextureStage, TransparencyAttrib, LPoint3, Texture
def add_instructions(pos, msg):
"""Function to put instructions on the screen."""
return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), shadow=(0, 0, 0, 1),
parent=base.a2dTopLeft, align=TextNode.ALeft,
pos=(0.08, -pos - 0.04), scale=.05)
def add_title(text):
"""Function to put title on the screen."""
return OnscreenText(text=text, style=1, pos=(-0.1, 0.09), scale=.08,
parent=base.a2dBottomRight, align=TextNode.ARight,
fg=(1, 1, 1, 1), shadow=(0, 0, 0, 1))
class Game(ShowBase):
"""Sets up the game, camera, controls, and loads models."""
def __init__(self):
ShowBase.__init__(self)
self.xray_mode = False
self.show_model_bounds = False
# Display instructions
add_title("Panda3D Tutorial: Occluder Culling")
add_instructions(0.06, "[Esc]: Quit")
add_instructions(0.12, "[W]: Move Forward")
add_instructions(0.18, "[A]: Move Left")
add_instructions(0.24, "[S]: Move Right")
add_instructions(0.30, "[D]: Move Back")
add_instructions(0.36, "Arrow Keys: Look Around")
add_instructions(0.42, "[F]: Toggle Wireframe")
add_instructions(0.48, "[X]: Toggle X-Ray Mode")
add_instructions(0.54, "[B]: Toggle Bounding Volumes")
# Setup controls
self.keys = {}
for key in ['arrow_left', 'arrow_right', 'arrow_up', 'arrow_down',
'a', 'd', 'w', 's']:
self.keys[key] = 0
self.accept(key, self.push_key, [key, 1])
self.accept('shift-%s' % key, self.push_key, [key, 1])
self.accept('%s-up' % key, self.push_key, [key, 0])
self.accept('f', self.toggleWireframe)
self.accept('x', self.toggle_xray_mode)
self.accept('b', self.toggle_model_bounds)
self.accept('escape', __import__('sys').exit, [0])
self.disableMouse()
# Setup camera
self.lens = PerspectiveLens()
self.lens.setFov(60)
self.lens.setNear(0.01)
self.lens.setFar(1000.0)
self.cam.node().setLens(self.lens)
self.camera.setPos(-9, -0.5, 1)
self.heading = -95.0
self.pitch = 0.0
# Load level geometry
self.level_model = self.loader.loadModel('models/level')
self.level_model.reparentTo(self.render)
self.level_model.setTexGen(TextureStage.getDefault(),
TexGenAttrib.MWorldPosition)
self.level_model.setTexProjector(TextureStage.getDefault(),
self.render, self.level_model)
self.level_model.setTexScale(TextureStage.getDefault(), 4)
tex = self.loader.load3DTexture('models/tex_#.png')
self.level_model.setTexture(tex)
# Load occluders
occluder_model = self.loader.loadModel('models/occluders')
occluder_nodepaths = occluder_model.findAllMatches('**/+OccluderNode')
for occluder_nodepath in occluder_nodepaths:
self.render.setOccluder(occluder_nodepath)
occluder_nodepath.node().setDoubleSided(True)
# Randomly spawn some models to test the occluders
self.models = []
box_model = self.loader.loadModel('box')
for dummy in range(0, 500):
pos = LPoint3((random.random() - 0.5) * 9,
(random.random() - 0.5) * 9,
random.random() * 8)
box = box_model.copy_to(self.render)
box.setScale(random.random() * 0.2 + 0.1)
box.setPos(pos)
box.setHpr(random.random() * 360,
random.random() * 360,
random.random() * 360)
box.reparentTo(self.render)
self.models.append(box)
self.taskMgr.add(self.update, 'main loop')
def push_key(self, key, value):
"""Stores a value associated with a key."""
self.keys[key] = value
def update(self, task):
"""Updates the camera based on the keyboard input."""
delta = globalClock.getDt()
move_x = delta * 3 * -self.keys['a'] + delta * 3 * self.keys['d']
move_z = delta * 3 * self.keys['s'] + delta * 3 * -self.keys['w']
self.camera.setPos(self.camera, move_x, -move_z, 0)
self.heading += (delta * 90 * self.keys['arrow_left'] +
delta * 90 * -self.keys['arrow_right'])
self.pitch += (delta * 90 * self.keys['arrow_up'] +
delta * 90 * -self.keys['arrow_down'])
self.camera.setHpr(self.heading, self.pitch, 0)
return task.cont
def toggle_xray_mode(self):
"""Toggle X-ray mode on and off. This is useful for seeing the
effectiveness of the occluder culling."""
self.xray_mode = not self.xray_mode
if self.xray_mode:
self.level_model.setColorScale((1, 1, 1, 0.5))
self.level_model.setTransparency(TransparencyAttrib.MDual)
else:
self.level_model.setColorScaleOff()
self.level_model.setTransparency(TransparencyAttrib.MNone)
def toggle_model_bounds(self):
"""Toggle bounding volumes on and off on the models."""
self.show_model_bounds = not self.show_model_bounds
if self.show_model_bounds:
for model in self.models:
model.showBounds()
else:
for model in self.models:
model.hideBounds()
game = Game()
game.run()
|
StarcoderdataPython
|
1725935
|
<filename>ztfin2p3/metadata.py<gh_stars>0
""" Handling metadata """
import os
import numpy as np
import pandas
import warnings
from .utils.tools import parse_singledate
def get_rawmeta(which, date, ccdid=None, fid=None,
getwhat='metadata',
**kwargs):
"""
which: [string]
- flat
- bias
- starflat [not implemented yet]
- science
date: [string (or list of)]
date can either be a single string or a list of two dates in isoformat.
- two dates format: date=['start','end'] is isoformat
e.g. date=['2019-03-14','2019-03-25']
- single string: four format are then accepted, year, month, week or day:
- yyyy: get the full year. (string of length 4)
e.g. date='2019'
- yyyymm: get the full month (string of length 6)
e.g. date='201903'
- yyyywww: get the corresponding week of the year (string of length 7)
e.g. date='2019045'
- yyyymmdd: get the given single day (string of length 8)
e.g. date='20190227'
ccdid, fid: [int or list of] -optional-
value or list of ccd (ccdid=[1->16]) or filter (fid=[1->3]) you want
to limit to.
getwhat: [string] -optional-
what do you want to get. get_{getwhat} must be an existing
method. You have for instance:
- file
- metadata
- zquery
**kwargs goes to get_metadata()
option examples:
- which='flat':
- ledid
- which='science':
- field
- getwhat = 'file':
- client,
- as_dask
Returns
-------
list
Example:
--------
#
# - Flat (with LEDID)
#
Get the rawflat image file of ledid #2 for the 23th week of
2020. Limit this to ccd #4
files = get_rawfile('flat', '2020023', ledid=2, ccdid=4)
"""
prop = dict(ccdid=ccdid, fid=fid)
method = f"get_{getwhat}"
if which == "flat":
class_ = RawFlatMetaData
elif which == "bias":
class_ = RawBiasMetaData
elif which in ["object","science"]:
class_ = RawScienceMetaData
else:
raise NotImplementedError(f"which = {which} has not been")
return getattr(class_,method)(date, **{**prop, **kwargs})
class MetaDataHandler( object ):
_KIND = None # IRSA kind: raw, sci, cal
_SUBKIND = None # subkind (raw/{flat,bias,science,starflat}, etc.)
# ================= #
# To IMPLEMENT #
# ================= #
@classmethod
def build_monthly_metadata(cls, year, month):
""" """
raise NotImplementedError("You must implement build_monthly_metadata()")
# ================= #
# MetaData #
# ================= #
@classmethod
def get_filepath(cls, date, in_meta=False, **kwargs):
""" get the local path where the data are """
from ztfquery.query import metatable_to_url
metadata = cls.get_metadata(date, **kwargs)
filepath = metatable_to_url(metadata, source="local")
if in_meta:
metadata["filepath"] = filepath
return metadata
return filepath
@classmethod
def get_file(cls, date, getpath=False, client=None, as_dask="futures", **kwargs):
""" get the file associated to the input metadata limits.
**kwargs goes to get_metadata, it contains selection options like ccdid or fid.
"""
from ztfquery import io
files = cls.get_filepath(date, **kwargs)
return io.bulk_get_file(files, client=client, as_dask=as_dask)
@classmethod
def get_metadata(cls, date, ccdid=None, fid=None):
""" General method to access the IRSA metadata given a date or a daterange.
The format of date is very flexible to quickly get what you need:
Parameters
----------
date: [string (or list of)]
date can either be a single string or a list of two dates in isoformat.
- two dates format: date=['start','end'] is isoformat
e.g. date=['2019-03-14','2019-03-25']
- single string: four format are then accepted, year, month, week or day:
- yyyy: get the full year. (string of length 4)
e.g. date='2019'
- yyyymm: get the full month (string of length 6)
e.g. date='201903'
- yyyywww: get the corresponding week of the year (string of length 7)
e.g. date='2019045'
- yyyymmdd: get the given single day (string of length 8)
e.g. date='20190227'
ccdid, fid: [int or list of]
value or list of ccd (ccdid=[1->16]) or filter (fid=[1->3]) you want
to limit to.
Returns
-------
dataframe (IRSA metadata)
"""
# #
# Date #
# #
if date is None:
raise ValueError("date cannot be None, could be string, float, or list of 2 strings")
if not hasattr(date, "__iter__"): # int/float given, convert to string
date = str(date)
if type(date) is str and len(date) == 6: # means per month as stored.
return cls.get_monthly_metadata(date[:4],date[4:])
elif type(date) is str:
start, end = parse_singledate(date) # -> start, end
else:
from astropy import time
start, end = time.Time(date).datetime
months = cls._daterange_to_monthlist_(start, end)
data = pandas.concat([cls.get_monthly_metadata(yyyy,mm) for yyyy,mm in months])
datecol = data["obsdate"].astype('datetime64')
data = data[datecol.between(start.isoformat(), end.isoformat())]
# #
# CCDID #
# #
if ccdid is not None:
data = data[data["ccdid"].isin(np.atleast_1d(ccdid))]
# #
# FID #
# #
if fid is not None:
data = data[data["fid"].isin(np.atleast_1d(fid))]
return data
@classmethod
def get_zquery(cls, date, force_dl=False, **kwargs):
""" get the ZTFQuery object associated to the metadata
corresponding to the input date.
**kwargs goes to get_metadata() like ccdid, fid
"""
from ztfquery import query
data = cls.get_metadata(date, **kwargs)
return query.ZTFQuery(data, cls._KIND)
@classmethod
def bulk_build_metadata(cls, date, client=None, as_dask="delayed", force_dl=False, format=None):
""" uses Dask to massively download metadata in the given time range.
Data will be stored using the usual monthly based format.
Example:
--------
To run the parallel downloading between May-12th of 2019 and June-3rd of 2020:
filesout = bulk_build_metadata(['2019-05-12','2020-06-03'], as_dask='computed')
"""
import dask
#
# - Test Dask input
if client is None:
if as_dask == "futures":
raise ValueError("Cannot as_dask=futures with client is None.")
if as_dask in ["gather","gathered"]:
as_dask = "computed"
# end test dask input
#
if not hasattr(date, "__iter__"): # int/float given, convert to string
date = str(date)
if type(date) is str and len(date) == 6: # means per month as stored.
return cls.get_monthly_metadata(date[:4],date[4:])
elif type(date) is str:
start, end = parse_singledate(date) # -> start, end
else:
from astropy import time
start, end = time.Time(date, format=format).datetime
months = cls._daterange_to_monthlist_(start, end)
delayed_data = [dask.delayed(cls._load_or_download_)(yyyy,mm, force_dl=force_dl)
for yyyy,mm in months]
# Returns
if as_dask == "delayed":
return delayed_data
if as_dask in ["compute","computed"]:
return dask.delayed(list)(delayed_data).compute()
if as_dask == "futures": # client has been tested already
return client.compute(delayed_data)
if as_dask in ["gather","gathered"]:
return client.gather(client.compute(delayed_data))
raise ValueError("Cannot parse the given as_dask")
@classmethod
def get_monthly_metadatafile(cls, year, month):
""" """
from .io import get_directory
year, month = int(year), int(month)
if cls._KIND is None or cls._SUBKIND is None:
raise AttributeError(f"_KIND {cls._KIND} or _SUBKIND {cls._SUBKIND} is None. Please define them")
directory = get_directory(cls._KIND, cls._SUBKIND)
return os.path.join(directory, "meta", f"{cls._KIND}{cls._SUBKIND}_metadata_{year:04d}{month:02d}.parquet")
@classmethod
def _load_or_download_(cls, year, month, force_dl=False, **kwargs):
""" """
filepath = cls.get_monthly_metadatafile(year, month)
if force_dl or not os.path.isfile(filepath):
filepath = cls.build_monthly_metadata(year, month)
return filepath
@classmethod
def get_monthly_metadata(cls, year, month, force_dl=False, **kwargs):
""" """
filepath = cls._load_or_download_(year, month, force_dl=force_dl)
return pandas.read_parquet(filepath, **kwargs)
# --------------- #
# INTERNAL #
# --------------- #
@staticmethod
def _daterange_to_monthlist_(start, end):
""" """
#
# Now we have start and end in datetime format.
starting_month = [start.isoformat().split("-")[:2]]
extra_months = pandas.date_range(start.isoformat(),
end.isoformat(), freq='MS'
).strftime("%Y-%m").astype('str').str.split("-").to_list()
# All individual months
return np.unique(np.asarray(starting_month+extra_months, dtype="int"), axis=0)
class RawMetaData( MetaDataHandler ):
_KIND = "raw"
_SUBKIND = None
@classmethod
def build_monthly_metadata(cls, year, month):
""" """
if cls._SUBKIND is None:
raise NotImplementedError("you must define cls._SUBKIND")
year, month = int(year), int(month)
from astropy import time
from ztfquery import query
fileout = cls.get_monthly_metadatafile(year, month)
zquery = query.ZTFQuery()
start, end = parse_singledate(f"{year:04d}{month:02d}")
start = time.Time(start.isoformat())
end = time.Time(end.isoformat())
zquery.load_metadata("raw", sql_query=f"obsjd between {start.jd} and {end.jd} and imgtype = '{cls._SUBKIND}'")
if len(zquery.data)>5:
dirout = os.path.dirname(fileout)
if not os.path.isdir(dirout):
os.makedirs(dirout, exist_ok=True)
zquery.data.to_parquet(fileout)
return fileout
class RawFlatMetaData( RawMetaData ):
_SUBKIND = "flat"
# ================= #
# Super It #
# ================= #
@classmethod
def get_metadata(cls, date, ccdid=None, fid=None, ledid=None):
""" General method to access the IRSA metadata given a date or a daterange.
The format of date is very flexible to quickly get what you need:
Parameters
----------
date: [string (or list of)]
date can either be a single string or a list of two dates in isoformat.
- two dates format: date=['start','end'] is isoformat
e.g. date=['2019-03-14','2019-03-25']
- single string: four format are then accepted, year, month, week or day:
- yyyy: get the full year. (string of length 4)
e.g. date='2019'
- yyyymm: get the full month (string of length 6)
e.g. date='201903'
- yyyywww: get the corresponding week of the year (string of length 7)
e.g. date='2019045'
- yyyymmdd: get the given single day (string of length 8)
e.g. date='20190227'
ccdid, fid, ledid: [int or list of]
value or list of ccd (ccdid=[1->16]), filter (fid=[1->3]) or LED (2->13 | but 6)
to limit to.
Returns
-------
dataframe (IRSA metadata)
"""
data = super().get_metadata(date, ccdid=ccdid, fid=fid)
if ledid is not None:
data = data[data["ledid"].isin(np.atleast_1d(ledid))]
return data
# ================= #
# Additional #
# ================= #
@classmethod
def add_ledinfo_to_metadata(cls, year, month, use_dask=True, update=False):
""" """
year, month = int(year), int(month)
from ztfquery import io
from astropy.io import fits
def getval_from_header(filename, value, ext=None, **kwargs):
""" """
return fits.getval(io.get_file(filename, **kwargs), value, ext=ext)
zquery = cls.get_zquery(f"{year:04d}{month:02d}")
if "ledid" in zquery.data.columns:
warnings.warn("ledid already in data. update=False so nothing to do")
return
# Only get the first filefracday index, since all filefactday have the same LED.
filefracdays= zquery.data[zquery.data["fid"].isin([1,2,3])].groupby("filefracday").head(1)
# Get the LEDID
files = [l.split("/")[-1] for l in zquery.get_data_path(indexes=filefracdays.index)]
if use_dask:
import dask
ilum_delayed = [dask.delayed(getval_from_header)(file_, "ILUM_LED") for file_ in files]
ilum = dask.delayed(list)(ilum_delayed).compute()
else:
ilum = [getval_from_header(file_, "ILUM_LED") for file_ in files]
# merge that with the initial data.
filefracdays.insert(len(filefracdays.columns), "ledid", ilum)
data = zquery.data.merge(filefracdays[["filefracday","ledid"]], on="filefracday")
# and store it back.
fileout = cls.get_monthly_metadatafile(year, month)
data.to_parquet(fileout)
return
class RawBiasMetaData( RawMetaData ):
_SUBKIND = "bias"
class RawScienceMetaData( RawMetaData ):
_SUBKIND = "object"
@classmethod
def get_metadata(cls, date, ccdid=None, fid=None, field=None):
""" General method to access the IRSA metadata given a date or a daterange.
The format of date is very flexible to quickly get what you need:
Parameters
----------
date: [string (or list of)]
date can either be a single string or a list of two dates in isoformat.
- two dates format: date=['start','end'] is isoformat
e.g. date=['2019-03-14','2019-03-25']
- single string: four format are then accepted, year, month, week or day:
- yyyy: get the full year. (string of length 4)
e.g. date='2019'
- yyyymm: get the full month (string of length 6)
e.g. date='201903'
- yyyywww: get the corresponding week of the year (string of length 7)
e.g. date='2019045'
- yyyymmdd: get the given single day (string of length 8)
e.g. date='20190227'
ccdid, fid: [int or list of]
value or list of ccd (ccdid=[1->16]) or filter (fid=[1->3])
to limit to.
field: [int or list of]
requested (list of) field(s)
Returns
-------
dataframe (IRSA metadata)
"""
data = super().get_metadata(date, ccdid=ccdid, fid=fid)
if field is not None:
data = data[data["field"].isin(np.atleast_1d(field))]
return data
|
StarcoderdataPython
|
4836746
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('editorial', '0078_auto_20171218_1301'),
]
operations = [
migrations.CreateModel(
name='TalentEditorProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('public', models.BooleanField(default=False, help_text=b'Is this talent editor publicly listed?')),
],
),
migrations.RemoveField(
model_name='user',
name='public',
),
migrations.AlterField(
model_name='user',
name='user_type',
field=models.CharField(help_text=b'Type of user.', max_length=25, choices=[(b'Admin', b'Admin'), (b'Editor', b'Editor'), (b'Staff', b'Staff'), (b'Other', b'Other')]),
),
migrations.AddField(
model_name='talenteditorprofile',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL),
),
]
|
StarcoderdataPython
|
3273588
|
<gh_stars>1-10
"""
Author: <REPLACE>
Project: 100DaysPython
File: module3_day35_taskScheduler.py
Creation Date: <REPLACE>
Description: <REPLACE>
"""
import subprocess
import sys
import time
python_path = "/Users/sbeal603/.virtualenvs/100DaysPython/bin/python"
subprocess.Popen([python_path, "greeting.py"])
sys_platform = sys.platform
print(sys_platform)
if sys_platform == "win32" or sys_platform == "cygwin":
subprocess.Popen(["start", "../../Module2/Day19/declaration.txt"], shell=True)
elif sys_platform == "darwin":
subprocess.Popen(["open", "../../Module2/Day19/declaration.txt"])
elif sys_platform == "linux":
subprocess.Popen(["see", "../../Module2/Day19/declaration.txt"])
else:
print(f"Unknown operating system: {sys_platform}")
try:
sleep_time = float(input("How many minutes should the timer last?\n")) * 60
except TypeError:
print(f"{sleep_time} is not a valid entry.")
raise
except KeyboardInterrupt:
print("The program was cancelled by the user.")
else:
time.sleep(sleep_time)
subprocess.Popen(["open", "alarm.mp3"])
finally:
print("Goodbye")
|
StarcoderdataPython
|
3266002
|
import torch.nn as nn
# nn.Module을 상속받는다.
class LeNet5(nn.Module):
def __init__(self, classes=10):
# 다중 상속 중복 문제 해결
super(LeNet5, self).__init__()
# 1x32x32 -> 6x28x28
self.conv1 = nn.Conv2d(in_channels=1,
out_channels=6,
kernel_size=5,
stride=1, # default
padding=0, # default
bias=True)
self.sigmoid1 = nn.Sigmoid()
# 28x28x6 -> 14x14x6
self.avg_pool1 = nn.AvgPool2d(kernel_size=2)
# 14x14x6 -> 10x10x16
self.conv2 = nn.Conv2d(in_channels=6,
out_channels=16,
kernel_size=5,
bias=True)
self.sigmoid2 = nn.Sigmoid()
# 10x10x16 -> 5x5x16
self.avg_pool2 = nn.AvgPool2d(kernel_size=2)
# 5x5x16 -> 1x1x120
self.conv3 = nn.Conv2d(in_channels=16,
out_channels=120,
kernel_size=5,
bias=True)
self.sigmoid3 = nn.Sigmoid()
# 120 -> 84
self.dense1 = nn.Linear(120, 84)
self.sigmoid4 = nn.Sigmoid()
# 84 -> 10
self.output = nn.Linear(84, classes)
def forward(self, x):
x = self.conv1(x)
x = self.sigmoid1(x)
x = self.avg_pool1(x)
x = self.conv2(x)
x = self.sigmoid2(x)
x = self.avg_pool2(x)
x = self.conv3(x)
x = self.sigmoid3(x)
x = x.view(x.size(0), -1)
x = self.dense1(x)
x = self.sigmoid4(x)
x = self.output(x)
return x
|
StarcoderdataPython
|
95110
|
<reponame>data61/anonlink-client<filename>docs/tutorial/util.py
"""This module provides some helper functions for tutorial Notebooks."""
import anonlink
from collections import defaultdict
def solve(encodings, rec_to_blocks, threshold: float = 0.8):
""" entity resolution, baby
calls anonlink to do the heavy lifting.
:param encodings: a sequence of lists of Bloom filters (bitarray). One for each data provider
:param rec_to_blocks: a sequence of dictionaries, mapping a record id to the list of blocks it is part of. Again,
one per data provider, same order as encodings.
:param threshold: similarity threshold for solving
:return: same as the anonlink solver.
An sequence of groups. Each group is an sequence of
records. Two records are in the same group iff they represent
the same entity. Here, a record is a two-tuple of dataset index
and record index.
"""
def my_blocking_f(ds_idx, rec_idx, _):
return rec_to_blocks[ds_idx][rec_idx]
candidate_pairs = anonlink.candidate_generation.find_candidate_pairs(
encodings,
anonlink.similarities.dice_coefficient,
threshold=threshold,
blocking_f=my_blocking_f)
# Need to use the probabilistic greedy solver to be able to remove the duplicate. It is not configurable
# with the native greedy solver.
return anonlink.solving.probabilistic_greedy_solve(candidate_pairs, merge_threshold=1.0)
def naive_solve(encodings, threshold: float = 0.8):
""" entity resolution, baby
calls anonlink to do the heavy lifting.
:param encodings: a sequence of lists of Bloom filters (bitarray). One for each data provider
:param threshold: similarity threshold for solving
:return: same as the anonlink solver.
An sequence of groups. Each group is an sequence of
records. Two records are in the same group iff they represent
the same entity. Here, a record is a two-tuple of dataset index
and record index.
"""
candidate_pairs = anonlink.candidate_generation.find_candidate_pairs(
encodings,
anonlink.similarities.dice_coefficient,
threshold=threshold)
# Need to use the probabilistic greedy solver to be able to remove the duplicate. It is not configurable
# with the native greedy solver.
return anonlink.solving.probabilistic_greedy_solve(candidate_pairs, merge_threshold=1.0)
def evaluate(found_groups, true_matches):
tp = len([x for x in found_groups if x in true_matches])
fp = len([x for x in found_groups if x not in true_matches])
fn = len([x for x in true_matches if x not in found_groups])
precision = tp / (tp + fp)
recall = tp / (tp + fn)
return precision, recall
def reduction_ratio(filtered_reverse_indices, data, K):
"""Assess reduction ratio for multiple parties."""
naive_num_comparison = 1
for d in data:
naive_num_comparison *= len(d)
filtered_reversed_indices_dict = []
block_keys = defaultdict(int) # type: Dict[Any, int]
for reversed_index in filtered_reverse_indices:
fdict = defaultdict(list)
for index, blks in reversed_index.items():
for blk in blks:
block_keys[blk] += 1
fdict[blk].append(index)
filtered_reversed_indices_dict.append(fdict)
final_block_keys = [key for key, count in block_keys.items() if count >= K]
reduced_num_comparison = 0
for key in final_block_keys:
num_comparison = 1
for reversed_index in filtered_reversed_indices_dict:
index = reversed_index.get(key, [0])
num_comparison *= len(index)
reduced_num_comparison += num_comparison
rr = 1 - reduced_num_comparison / naive_num_comparison
return rr, reduced_num_comparison, naive_num_comparison
def set_completeness(filtered_reverse_indices, truth, K):
"""Assess reduction ratio for multiple parties."""
block_keys = defaultdict(int) # type: Dict[Any, int]
filtered_reversed_indices_dict = []
for reversed_index in filtered_reverse_indices:
fdict = defaultdict(list)
for index, blks in reversed_index.items():
for blk in blks:
block_keys[blk] += 1
fdict[blk].append(int(index))
filtered_reversed_indices_dict.append(fdict)
final_block_keys = [key for key, count in block_keys.items() if count >= K]
sets = defaultdict(set)
for i, reversed_index in enumerate(filtered_reversed_indices_dict):
for key in final_block_keys:
index = reversed_index.get(key, None)
if index is not None:
for ind in index:
sets[key].add((i, ind))
num_true_matches = 0
for true_set in truth:
check = False
true_set = set(true_set)
for s in sets.values():
if true_set.intersection(s) == true_set:
check = True
if check:
num_true_matches += 1
sc = num_true_matches / len(truth)
return sc
|
StarcoderdataPython
|
1714028
|
from distutils.core import setup
setup(
name = 'json_stable_stringify_python',
packages = ['json_stable_stringify_python'],
version = '0.2',
description = 'Deterministic JSON stringify',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/haochi/json-stable-stringify-python',
download_url = 'https://github.com/haochi/json-stable-stringify-python/archive/0.2.tar.gz',
keywords = ['json', 'stringify', 'serialization'],
classifiers = [],
)
|
StarcoderdataPython
|
1662797
|
"""
Scripts concerned with processing raw data into objects ready for analysis.
Many of these can be run as semi-automated services.
"""
|
StarcoderdataPython
|
137445
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import collections
UNKNOWN = np.nan
R = 1
FR = 2
F = 3
FL = 4
L = 5
BL = 6
B = 7
BR = 8
U = 9
UF = 10
UB = 11
UL = 12
UR = 13
UFL = 14
UFR = 15
UBL = 16
UBR = 17
D = 18
DF = 19
DB = 20
DL = 21
DR = 22
DFL = 23
DFR = 24
DBL = 25
DBR = 26
INT_TO_CODE = collections.OrderedDict(
[
(UNKNOWN, 'unknown'),
(R, 'right'),
(FR, 'frontright'),
(F, 'front'),
(FL, 'frontleft'),
(L, 'left'),
(BL, 'backleft'),
(B, 'back'),
(BR, 'backright'),
(U, 'up'),
(UF, 'upfront'),
(UB, 'upback'),
(UL, 'upleft'),
(UR, 'upright'),
(UFL, 'upfrontleft'),
(UFR, 'upfrontright'),
(UBL, 'upbackleft'),
(UBR, 'upbackright'),
(D, 'down'),
(DF, 'downfront'),
(DB, 'downback'),
(DL, 'downleft'),
(DR, 'downright'),
(DFL, 'downfrontleft'),
(DFR, 'downfrontright'),
(DBL, 'downbackleft'),
(DBR, 'downbackright'),
]
)
# HACK: mirrors ibeis viewpoint int distance encoding
VIEW_INT_DIST = {
# DIST 0 PAIRS
(B, B): 0,
(BL, BL): 0,
(BR, BR): 0,
(D, D): 0,
(DB, DB): 0,
(DBL, DBL): 0,
(DBR, DBR): 0,
(DF, DF): 0,
(DFL, DFL): 0,
(DFR, DFR): 0,
(DL, DL): 0,
(DR, DR): 0,
(F, F): 0,
(FL, FL): 0,
(FR, FR): 0,
(L, L): 0,
(R, R): 0,
(U, U): 0,
(UB, UB): 0,
(UBL, UBL): 0,
(UBR, UBR): 0,
(UF, UF): 0,
(UFL, UFL): 0,
(UFR, UFR): 0,
(UL, UL): 0,
(UR, UR): 0,
# DIST 1 PAIRS
(B, BL): 1,
(B, BR): 1,
(B, DB): 1,
(B, DBL): 1,
(B, DBR): 1,
(B, UB): 1,
(B, UBL): 1,
(B, UBR): 1,
(BL, DBL): 1,
(BL, L): 1,
(BL, UBL): 1,
(BR, DBR): 1,
(BR, R): 1,
(BR, UBR): 1,
(D, DB): 1,
(D, DBL): 1,
(D, DBR): 1,
(D, DF): 1,
(D, DFL): 1,
(D, DFR): 1,
(D, DL): 1,
(D, DR): 1,
(DB, DBL): 1,
(DB, DBR): 1,
(DBL, DL): 1,
(DBL, L): 1,
(DBR, DR): 1,
(DBR, R): 1,
(DF, DFL): 1,
(DF, DFR): 1,
(DF, F): 1,
(DFL, DL): 1,
(DFL, F): 1,
(DFL, FL): 1,
(DFL, L): 1,
(DFR, DR): 1,
(DFR, F): 1,
(DFR, FR): 1,
(DFR, R): 1,
(DL, L): 1,
(DR, R): 1,
(F, FL): 1,
(F, FR): 1,
(F, UF): 1,
(F, UFL): 1,
(F, UFR): 1,
(FL, L): 1,
(FL, UFL): 1,
(FR, R): 1,
(FR, UFR): 1,
(L, UBL): 1,
(L, UFL): 1,
(L, UL): 1,
(R, UBR): 1,
(R, UFR): 1,
(R, UR): 1,
(U, UB): 1,
(U, UBL): 1,
(U, UBR): 1,
(U, UF): 1,
(U, UFL): 1,
(U, UFR): 1,
(U, UL): 1,
(U, UR): 1,
(UB, UBL): 1,
(UB, UBR): 1,
(UBL, UL): 1,
(UBR, UR): 1,
(UF, UFL): 1,
(UF, UFR): 1,
(UFL, UL): 1,
(UFR, UR): 1,
# DIST 2 PAIRS
(B, D): 2,
(B, DL): 2,
(B, DR): 2,
(B, L): 2,
(B, R): 2,
(B, U): 2,
(B, UL): 2,
(B, UR): 2,
(BL, BR): 2,
(BL, D): 2,
(BL, DB): 2,
(BL, DBR): 2,
(BL, DFL): 2,
(BL, DL): 2,
(BL, FL): 2,
(BL, U): 2,
(BL, UB): 2,
(BL, UBR): 2,
(BL, UFL): 2,
(BL, UL): 2,
(BR, D): 2,
(BR, DB): 2,
(BR, DBL): 2,
(BR, DFR): 2,
(BR, DR): 2,
(BR, FR): 2,
(BR, U): 2,
(BR, UB): 2,
(BR, UBL): 2,
(BR, UFR): 2,
(BR, UR): 2,
(D, F): 2,
(D, FL): 2,
(D, FR): 2,
(D, L): 2,
(D, R): 2,
(DB, DF): 2,
(DB, DFL): 2,
(DB, DFR): 2,
(DB, DL): 2,
(DB, DR): 2,
(DB, L): 2,
(DB, R): 2,
(DB, UB): 2,
(DB, UBL): 2,
(DB, UBR): 2,
(DBL, DBR): 2,
(DBL, DF): 2,
(DBL, DFL): 2,
(DBL, DFR): 2,
(DBL, DR): 2,
(DBL, FL): 2,
(DBL, UB): 2,
(DBL, UBL): 2,
(DBL, UBR): 2,
(DBL, UFL): 2,
(DBL, UL): 2,
(DBR, DF): 2,
(DBR, DFL): 2,
(DBR, DFR): 2,
(DBR, DL): 2,
(DBR, FR): 2,
(DBR, UB): 2,
(DBR, UBL): 2,
(DBR, UBR): 2,
(DBR, UFR): 2,
(DBR, UR): 2,
(DF, DL): 2,
(DF, DR): 2,
(DF, FL): 2,
(DF, FR): 2,
(DF, L): 2,
(DF, R): 2,
(DF, UF): 2,
(DF, UFL): 2,
(DF, UFR): 2,
(DFL, DFR): 2,
(DFL, DR): 2,
(DFL, FR): 2,
(DFL, UBL): 2,
(DFL, UF): 2,
(DFL, UFL): 2,
(DFL, UFR): 2,
(DFL, UL): 2,
(DFR, DL): 2,
(DFR, FL): 2,
(DFR, UBR): 2,
(DFR, UF): 2,
(DFR, UFL): 2,
(DFR, UFR): 2,
(DFR, UR): 2,
(DL, DR): 2,
(DL, F): 2,
(DL, FL): 2,
(DL, UBL): 2,
(DL, UFL): 2,
(DL, UL): 2,
(DR, F): 2,
(DR, FR): 2,
(DR, UBR): 2,
(DR, UFR): 2,
(DR, UR): 2,
(F, L): 2,
(F, R): 2,
(F, U): 2,
(F, UL): 2,
(F, UR): 2,
(FL, FR): 2,
(FL, U): 2,
(FL, UBL): 2,
(FL, UF): 2,
(FL, UFR): 2,
(FL, UL): 2,
(FR, U): 2,
(FR, UBR): 2,
(FR, UF): 2,
(FR, UFL): 2,
(FR, UR): 2,
(L, U): 2,
(L, UB): 2,
(L, UF): 2,
(R, U): 2,
(R, UB): 2,
(R, UF): 2,
(UB, UF): 2,
(UB, UFL): 2,
(UB, UFR): 2,
(UB, UL): 2,
(UB, UR): 2,
(UBL, UBR): 2,
(UBL, UF): 2,
(UBL, UFL): 2,
(UBL, UFR): 2,
(UBL, UR): 2,
(UBR, UF): 2,
(UBR, UFL): 2,
(UBR, UFR): 2,
(UBR, UL): 2,
(UF, UL): 2,
(UF, UR): 2,
(UFL, UFR): 2,
(UFL, UR): 2,
(UFR, UL): 2,
(UL, UR): 2,
# DIST 3 PAIRS
(B, DF): 3,
(B, DFL): 3,
(B, DFR): 3,
(B, FL): 3,
(B, FR): 3,
(B, UF): 3,
(B, UFL): 3,
(B, UFR): 3,
(BL, DF): 3,
(BL, DFR): 3,
(BL, DR): 3,
(BL, F): 3,
(BL, R): 3,
(BL, UF): 3,
(BL, UFR): 3,
(BL, UR): 3,
(BR, DF): 3,
(BR, DFL): 3,
(BR, DL): 3,
(BR, F): 3,
(BR, L): 3,
(BR, UF): 3,
(BR, UFL): 3,
(BR, UL): 3,
(D, UB): 3,
(D, UBL): 3,
(D, UBR): 3,
(D, UF): 3,
(D, UFL): 3,
(D, UFR): 3,
(D, UL): 3,
(D, UR): 3,
(DB, F): 3,
(DB, FL): 3,
(DB, FR): 3,
(DB, U): 3,
(DB, UFL): 3,
(DB, UFR): 3,
(DB, UL): 3,
(DB, UR): 3,
(DBL, F): 3,
(DBL, FR): 3,
(DBL, R): 3,
(DBL, U): 3,
(DBL, UF): 3,
(DBL, UR): 3,
(DBR, F): 3,
(DBR, FL): 3,
(DBR, L): 3,
(DBR, U): 3,
(DBR, UF): 3,
(DBR, UL): 3,
(DF, U): 3,
(DF, UBL): 3,
(DF, UBR): 3,
(DF, UL): 3,
(DF, UR): 3,
(DFL, R): 3,
(DFL, U): 3,
(DFL, UB): 3,
(DFL, UR): 3,
(DFR, L): 3,
(DFR, U): 3,
(DFR, UB): 3,
(DFR, UL): 3,
(DL, FR): 3,
(DL, R): 3,
(DL, U): 3,
(DL, UB): 3,
(DL, UBR): 3,
(DL, UF): 3,
(DL, UFR): 3,
(DR, FL): 3,
(DR, L): 3,
(DR, U): 3,
(DR, UB): 3,
(DR, UBL): 3,
(DR, UF): 3,
(DR, UFL): 3,
(F, UB): 3,
(F, UBL): 3,
(F, UBR): 3,
(FL, R): 3,
(FL, UB): 3,
(FL, UBR): 3,
(FL, UR): 3,
(FR, L): 3,
(FR, UB): 3,
(FR, UBL): 3,
(FR, UL): 3,
(L, UBR): 3,
(L, UFR): 3,
(L, UR): 3,
(R, UBL): 3,
(R, UFL): 3,
(R, UL): 3,
# DIST 4 PAIRS
(B, F): 4,
(BL, FR): 4,
(BR, FL): 4,
(D, U): 4,
(DB, UF): 4,
(DBL, UFR): 4,
(DBR, UFL): 4,
(DF, UB): 4,
(DFL, UBR): 4,
(DFR, UBL): 4,
(DL, UR): 4,
(DR, UL): 4,
(L, R): 4,
# UNDEFINED DIST PAIRS
(B, UNKNOWN): np.nan,
(BL, UNKNOWN): np.nan,
(BR, UNKNOWN): np.nan,
(D, UNKNOWN): np.nan,
(DB, UNKNOWN): np.nan,
(DBL, UNKNOWN): np.nan,
(DBR, UNKNOWN): np.nan,
(DF, UNKNOWN): np.nan,
(DFL, UNKNOWN): np.nan,
(DFR, UNKNOWN): np.nan,
(DL, UNKNOWN): np.nan,
(DR, UNKNOWN): np.nan,
(F, UNKNOWN): np.nan,
(FL, UNKNOWN): np.nan,
(FR, UNKNOWN): np.nan,
(L, UNKNOWN): np.nan,
(R, UNKNOWN): np.nan,
(U, UNKNOWN): np.nan,
(UB, UNKNOWN): np.nan,
(UBL, UNKNOWN): np.nan,
(UBR, UNKNOWN): np.nan,
(UF, UNKNOWN): np.nan,
(UFL, UNKNOWN): np.nan,
(UFR, UNKNOWN): np.nan,
(UL, UNKNOWN): np.nan,
(UNKNOWN, B): np.nan,
(UNKNOWN, BL): np.nan,
(UNKNOWN, BR): np.nan,
(UNKNOWN, D): np.nan,
(UNKNOWN, DB): np.nan,
(UNKNOWN, DBL): np.nan,
(UNKNOWN, DBR): np.nan,
(UNKNOWN, DF): np.nan,
(UNKNOWN, DFL): np.nan,
(UNKNOWN, DFR): np.nan,
(UNKNOWN, DL): np.nan,
(UNKNOWN, DR): np.nan,
(UNKNOWN, F): np.nan,
(UNKNOWN, FL): np.nan,
(UNKNOWN, FR): np.nan,
(UNKNOWN, L): np.nan,
(UNKNOWN, R): np.nan,
(UNKNOWN, U): np.nan,
(UNKNOWN, UB): np.nan,
(UNKNOWN, UBL): np.nan,
(UNKNOWN, UBR): np.nan,
(UNKNOWN, UF): np.nan,
(UNKNOWN, UFL): np.nan,
(UNKNOWN, UFR): np.nan,
(UNKNOWN, UL): np.nan,
(UNKNOWN, UR): np.nan,
(UR, UNKNOWN): np.nan,
(UNKNOWN, UNKNOWN): np.nan,
}
# make distance symmetric
for (f1, f2), d in list(VIEW_INT_DIST.items()):
VIEW_INT_DIST[(f2, f1)] = d
# Make string based version
VIEW_CODE_DIST = {
(INT_TO_CODE[f1], INT_TO_CODE[f2]): d for (f1, f2), d in VIEW_INT_DIST.items()
}
def RhombicuboctahedronDistanceDemo():
import utool as ut
def rhombicuboctahedro_faces():
"""yields names of all 26 rhombicuboctahedron faces"""
face_axes = [['up', 'down'], ['front', 'back'], ['left', 'right']]
ordering = {f: p for p, fs in enumerate(face_axes) for f in fs}
for i in range(1, len(face_axes) + 1):
for axes in list(ut.combinations(face_axes, i)):
for combo in ut.product(*axes):
sortx = ut.argsort(ut.take(ordering, combo))
face = tuple(ut.take(combo, sortx))
yield face
# Each face is a node.
import networkx as nx
G = nx.Graph()
faces = list(rhombicuboctahedro_faces())
G.add_nodes_from(faces)
# A fase is connected if they share an edge or a vertex
# TODO: is there a more general definition?
face_axes = [['up', 'down'], ['front', 'back'], ['left', 'right']]
ordering = {f: p for p, fs in enumerate(face_axes) for f in fs}
# In this case faces might share an edge or vertex if their names intersect
edges = []
for face1, face2 in ut.combinations(faces, 2):
set1 = set(face1)
set2 = set(face2)
if len(set1.intersection(set2)) > 0:
diff1 = set1.difference(set2)
diff2 = set2.difference(set1)
sortx1 = ut.argsort(ut.take(ordering, diff1))
sortx2 = ut.argsort(ut.take(ordering, diff2))
# If they share a name that is on opposite poles, then they cannot
# share an edge or vertex.
if not list(set(sortx1).intersection(set(sortx2))):
edges.append((face1, face2))
# print('-----')
# print('Edge: {} {}'.format(face1, face2))
# print('diff1 = {!r}'.format(diff1))
# print('diff2 = {!r}'.format(diff2))
G.add_edges_from(edges)
# Build distance lookup table
lookup = {}
for face1, face2 in ut.combinations(faces, 2):
# key = tuple(sorted([''.join(face1), ''.join(face2)]))
key = tuple(sorted([face1, face2]))
dist = nx.shortest_path_length(G, face1, face2)
lookup[key] = dist
def convert(face):
if face is None:
return 'UNKNOWN'
else:
return ''.join([p[0].upper() for p in face])
dist_lookup = {}
dist_lookup = {(convert(k1), convert(k2)): d for (k1, k2), d in lookup.items()}
for face in faces:
dist_lookup[(convert(face), convert(face))] = 0
dist_lookup[(convert(face), convert(None))] = None
dist_lookup[(convert(None), convert(face))] = None
dist_lookup[(convert(None), convert(None))] = None
# z = {'({}, {})'.format(k1, k2): d for (k1, k2), d in dist_lookup.items()}
# for i in range(0, 5):
# print(ub.repr2({k: v for k, v in z.items() if v == i}, si=True))
# i = None
# print(ub.repr2({k: v for k, v in z.items() if v == i}, si=True))
# z = ut.sort_dict(z, 'vals')
# print(ub.repr2(z, nl=2, si=True))
# if False:
# from wbia import constants as const
# VIEW = const.VIEW
# viewint_dist_lookup = {
# (VIEW.CODE_TO_INT[f1], VIEW.CODE_TO_INT[f2]): d
# (f1, f2) for (f1, f2), d in viewcode_dist_lookup.items()
# }
# for k, v in viewcode_dist_lookup.items():
# if 'up' not in k[0] and 'down' not in k[0]:
# if 'up' not in k[1] and 'down' not in k[1]:
# print(k, v)
def visualize_connection_graph():
# node_to_group = {f: str(len(f)) for f in faces}
node_to_group = {}
for f in faces:
if 'up' in f:
node_to_group[f] = '0.' + str(len(f))
elif 'down' in f:
node_to_group[f] = '1.' + str(len(f))
else:
node_to_group[f] = '2.' + str(len(f))
nx.set_node_attributes(G, name='groupid', values=node_to_group)
node_to_label = {f: ''.join(ut.take_column(f, 0)).upper() for f in faces}
nx.set_node_attributes(G, name='label', values=node_to_label)
import wbia.plottool as pt
pt.qt4ensure()
pt.show_nx(G, prog='neato', groupby='groupid')
visualize_connection_graph()
|
StarcoderdataPython
|
185095
|
<reponame>ryanzhao2/grade11cs
"""
def generate_list_books(filename):
book_list = []
file_in = open(filename, encoding='utf-8', errors='replace')
file_in.readline()
for line in file_in:
line = line.strip().split(",")
line[2] = float(line[2])
line[3] = int(line[3])
line[4] = int(line[4])
line[5] = int(line[5])
book_list.append(line)
return book_list
#QUESTION #1
def print_books(list_of_books):
for book in list_of_books:
print(f' {book[0][0:30]:<30} by {book[1][0:20]:<20} {str(book[5]):<4} rated {str(book[2])[0:3]}')
#QUESTION #2
def print_detailed_book(list_of_books):
format = '-'
for book in list_of_books:
print(f' {book[0]}\n'
f' by: {book[1]}\n'
f' {book[5]}\n'
f' {format * len(book[0])}\n'
f' ${book[4]:.2f}\n\n'
f' {book[6]}\n'
f' rated {book[2]} for {book[3]} reviews\n\n\n')
def main():
main_book_list = generate_list_books("amazon_bestseller_books.csv")
#print(main_book_list[:10])
print_books(main_book_list)
print_detailed_book(main_book_list)
main()
"""
|
StarcoderdataPython
|
1747036
|
################################################################################
# #
# Advection of Passive Scalars in 1D #
# #
################################################################################
from __future__ import print_function
import sys
sys.path.append('../../script/')
sys.dont_write_bytecode = True; import bhlight as bhl
PROB = 'advection1d'
TRACERS = '-tracers' in sys.argv
if '-ntot' in sys.argv:
NTOT = int(sys.argv[sys.argv.index('-ntot')+1])
else:
NTOT = 512
if '-idim' in sys.argv:
IDIM = int(sys.argv[sys.argv.index('-idim')+1])
else:
IDIM = 1
if IDIM == 1:
N1 = NTOT
N2 = 1
N3 = 1
elif IDIM == 2:
N1 = 1
N2 = NTOT
N3 = 1
elif IDIM == 3:
N1 = 1
N2 = 1
N3 = NTOT
else:
raise ValueError("Invalid IDIM. IDIM = {}".format(IDIM))
TF = 6 if TRACERS else 2
TRACERS_PER_CELL = 10
TRACERS_TOT = N1*N2*N3*TRACERS_PER_CELL
### COMPILE TIME PARAMETERS ###
# SPATIAL RESOLUTION AND MPI DECOMPOSITION
bhl.config.set_cparm('N1TOT', N1)
bhl.config.set_cparm('N2TOT', N2)
bhl.config.set_cparm('N3TOT', N3)
bhl.config.set_cparm('N1CPU', 1)
bhl.config.set_cparm('N2CPU', 1)
bhl.config.set_cparm('N3CPU', 1)
# OPENMP PARALLELIZATION
bhl.config.set_cparm('OPENMP', False)
# COORDINATES
bhl.config.set_cparm('METRIC', 'MINKOWSKI')
# FLUID
bhl.config.set_cparm('RECONSTRUCTION', 'WENO')
bhl.config.set_cparm('X1L_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X1R_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X2L_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X2R_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X3L_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X3R_GAS_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X1L_INFLOW', False)
bhl.config.set_cparm('X1R_INFLOW', False)
bhl.config.set_cparm('X2L_INFLOW', False)
bhl.config.set_cparm('X2R_INFLOW', False)
bhl.config.set_cparm('X3L_INFLOW', False)
bhl.config.set_cparm('X3R_INFLOW', False)
# PASSIVE SCALARS
bhl.config.set_cparm('NVAR_PASSIVE', 2)
# Tracers
if TRACERS:
bhl.config.set_cparm('RADIATION', True)
bhl.config.set_cparm('ESTIMATE_THETAE', False)
bhl.config.set_cparm('EMISSION', False)
bhl.config.set_cparm('ABSORPTION', False)
bhl.config.set_cparm('SCATTERING', False)
bhl.config.set_cparm('TRACERS', True)
bhl.config.set_cparm('NU_BINS', 200)
bhl.config.set_cparm('X1L_RAD_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X1R_RAD_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X2L_RAD_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X2R_RAD_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X3L_RAD_BOUND', 'BC_PERIODIC')
bhl.config.set_cparm('X3R_RAD_BOUND', 'BC_PERIODIC')
### RUNTIME PARAMETERS ###
bhl.config.set_rparm('tf', 'double', default = TF)
bhl.config.set_rparm('dt', 'double', default = 1e-6)
bhl.config.set_rparm('gam', 'double', default = 5./3.)
bhl.config.set_rparm('DTd', 'double', default = 0.02)
bhl.config.set_rparm('DTl', 'double', default = 0.02)
bhl.config.set_rparm('DTr', 'double', default = 10000)
bhl.config.set_rparm('cadv', 'double', default = 0.5)
bhl.config.set_rparm('idim', 'int', default = IDIM)
if TRACERS:
bhl.config.set_rparm('ntracers', 'int', default=TRACERS_TOT)
bhl.config.set_rparm('L_unit', 'double', default=1)
bhl.config.set_rparm('M_unit', 'double', default=1)
### CONFIGURE AND COMPILE ###
bhl.build(PROB)
|
StarcoderdataPython
|
1687487
|
<reponame>Rigborn4/PyMdlxConverter
from PyMdlxConverter.common.binarystream import BinaryStream
from PyMdlxConverter.parsers.mdlx.tokenstream import TokenStream
from PyMdlxConverter.parsers.mdlx.genericobject import GenericObject
from PyMdlxConverter.parsers.errors import TokenStreamError
class ParticleEmitterPopcorn(GenericObject):
def __init__(self):
super().__init__()
self.life_span = 0
self.emission_rate = 0
self.speed = 0
self.color = 3
self.alpha = 1
self.replaceable_id = 0
self.path = ''
self.animation_visibility_guide = ''
def read_mdx(self, stream: BinaryStream, version):
start = stream.index
size = stream.read_uint32()
super().read_mdx(stream, version)
self.life_span = stream.read_float32()
self.emission_rate = stream.read_float32()
self.speed = stream.read_float32()
self.color = stream.read_float32_array(3)
self.alpha = stream.read_float32()
self.replaceable_id = stream.read_uint32()
self.path = stream.read(260)
self.animation_visibility_guide = stream.read(260)
self.read_animations(stream, size - (stream.index - start))
def write_mdx(self, stream: BinaryStream, version):
stream.write_uint32(self.get_byte_length())
super().write_mdx(stream, version)
stream.write_float32(self.life_span)
stream.write_float32(self.emission_rate)
stream.write_float32(self.speed)
stream.write_float32_array(self.color)
stream.write_float32(self.alpha)
stream.write_uint32(self.replaceable_id)
stream.write(self.path)
stream.skip(260 - len(self.path))
stream.write(self.animation_visibility_guide)
stream.skip(260 - len(self.animation_visibility_guide))
self.write_non_generic_animation_chunks(stream)
def read_mdl(self, stream: TokenStream):
for token in super().read_generic_block(stream):
if token == 'SortPrimsFarZ':
self.flags |= 0x10000
elif token == 'Unshaded':
self.flags |= 0x8000
elif token == 'Unfogged':
self.flags |= 0x40000
elif token == 'static LifeSpan':
self.life_span = stream.read_float()
elif token == 'LifeSpan':
self.read_animation(stream, 'KPPL')
elif token == 'static EmissionRate':
self.emission_rate = stream.read_float()
elif token == 'EmissionRate':
self.read_animation(stream, 'KPPE')
elif token == 'static Speed':
self.speed = stream.read_float()
elif token == 'Speed':
self.read_animation(stream, 'KPPS')
elif token == 'static Color':
self.color = stream.read_vector(3)
elif token == 'Color':
self.read_animation(stream, 'KPPC')
elif token == 'static Alpha':
self.alpha = stream.read_float()
elif token == 'Alpha':
self.read_animation(stream, 'KPPA')
elif token == 'Visibility':
self.read_animation(stream, 'KPPV')
elif token == 'ReplaceableId':
self.replaceable_id = stream.read_int()
elif token == 'Path':
self.path = stream.read()
elif token == 'AnimVisibilityGuide':
self.animation_visibility_guide = stream.read()
else:
raise TokenStreamError('ParticleEmitterPopcorn', token)
def write_mdl(self, stream: TokenStream, version=None):
stream.start_object_block('ParticleEmitterPopcorn', self.name)
self.write_generic_header(stream)
if self.flags & 0x10000:
stream.write_flag('SortPrimsFarZ')
if self.flags & 0x8000:
stream.write_flag('Unshaded')
if self.flags & 0x40000:
stream.write_flag('Unfogged')
if not self.write_animation(stream, 'KPPL'):
stream.write_number_attrib('static LifeSpan', self.life_span)
if not self.write_animation(stream, 'KPPE'):
stream.write_number_attrib('static EmissionRate', self.emission_rate)
if not self.write_animation(stream, 'KPPS'):
stream.write_number_attrib('static Speed', self.speed)
if not self.write_animation(stream, 'KPPC'):
stream.write_vector_attrib('static Color', self.color)
if not self.write_animation(stream, 'KPPA'):
stream.write_number_attrib('static Alpha', self.alpha)
self.write_animation(stream, 'KPPV')
if self.replaceable_id != 0:
stream.write_number_attrib('ReplaceableId', self.replaceable_id)
if len(self.path):
stream.write_string_attrib('Path', self.path)
if len(self.animation_visibility_guide):
stream.write_string_attrib('AnimVisibilityGuide', self.animation_visibility_guide)
self.write_generic_animations(stream)
stream.end_block()
def get_byte_length(self, version=None):
return 556 + super().get_byte_length(version=version)
|
StarcoderdataPython
|
187776
|
<reponame>chigur/pose
from . import Module as SoundNet
|
StarcoderdataPython
|
183629
|
# Derived from code found at:
# https://github.com/laserson/dsq
# Copyright 2013 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributed streaming quantiles.
This is a Python implementation of distributed streaming quantiles using the
count-min sketch. It is suitable for using in a distributed computing
environment, like Spark.
Modifications by <NAME>
Use array instead of list for counts.
"""
import sys
import random
import array
import itertools
from math import isnan, ceil, log, e as euler
def exp2(x):
return 2. ** x
class CMSketch(object):
"""Count-min sketch data structure.
As described in Cormode and Muthukrishnan:
http://dx.doi.org/10.1016/j.jalgor.2003.12.001
Attributes:
width: (int) width of the sketch
depth: (int) depth of the sketch (number of hashes)
hash_state: (tuple of int) defining the set of hash functions. It
should have depth integers.
"""
def __init__(self, width, depth, hash_state):
"""Inits CMSketch with specified width, depth, hash_state."""
if depth != len(hash_state):
raise ValueError("depth and len(hash_state) must be equal.")
self.width = width
self.depth = depth
self.hash_state = hash_state
self._counts = array.array('i', itertools.repeat(0, self.width * self.depth))
self._masks = [CMSketch.generate_mask(n) for n in self.hash_state]
def hash_index(self, row, column):
return (self.width * row) + (column % self.width)
def counts(self):
return self._counts
def increment(self, key):
"""Increment counter for hashable object key."""
for (i, mask) in enumerate(self._masks):
j = hash(key) ^ mask
self._counts[self.hash_index(i, j)] += 1
def get(self, key):
"""Get estimated count for hashable object key."""
return min([self._counts[self.hash_index(i, hash(key) ^ mask)]
for i, mask in enumerate(self._masks)])
def merge(self, other):
"""Merge other CMSketch with this CMSketch.
The width, depth, and hash_state must be identical.
"""
self._check_compatibility(other)
for i in xrange(self.depth):
for j in xrange(self.width):
ix = self.hash_index(i, j)
self._counts[ix] += other.counts()[ix]
return self
def _check_compatibility(self, other):
"""Check if another CMSketch is compatible with this one for merge.
Compatibility requires same width, depth, and hash_state.
"""
if self.width != other.width or self.depth != other.depth:
raise ValueError("CMSketch dimensions do not match.")
if self.hash_state != other.hash_state:
raise ValueError("CMSketch hashes do not match")
@staticmethod
def generate_hash_state(num_hashes, seed=1729):
"""Generate some random ints suitable to be a hash_state."""
random.seed(seed)
return tuple([random.randint(0, sys.maxint)
for _ in xrange(num_hashes)])
@staticmethod
def generate_mask(state):
"""Generate a mask to be used for a random hash fn, given state (int).
Returns mask, which contains random bits. Define a hash fn like so:
def myhash(x):
return hash(x) ^ mask
"""
random.seed(state)
mask = random.getrandbits(32)
return mask
class QuantileAccumulator(object):
"""Accumulator object for computing quantiles on distributed streams.
This object implements the quantile algorithm using the count-min sketch as
described in Cormode and Muthukrishnan:
http://dx.doi.org/10.1016/j.jalgor.2003.12.001
This object requires knowledge of the domain of possible values. This
domain is split into dyadic intervals on a binary tree of specified depth
(num_levels). The precision of the result depends on the size of the
smallest dyadic interval over the given domain.
QuantileAccumulator objects can be processed independently and their
underlying data structures merged, allowing for processing of distributed
streams.
Attributes:
total: (int) the total number of objects streamed through
"""
def __init__(self, lower_bound, upper_bound, num_levels, epsilon, delta,
seed=1729):
"""Init a QuantileAccumulator with domain and precision information.
The accuracy of the estimator is limited by the size of the smallest
dyadic subdivision of the domain. So if the domain is [0, 1], and
num_levels is set to 10, the smallest subdivision has size 2^(-9).
epsilon should be set to the allowable error in the estimate. (Note
that it must be compatible with the num_levels. If there are not enough
levels to achieve the necessary accuracy, this won't work.)
delta should be set to the probability of getting a worse estimate
(i.e., something small, say, 0.05)
The three precision parameters, num_levels, epsilon, and delta
ultimately define how much memory is necessary to store the data
structures. There is one CMSketch per level, and each sketch has a
width and depth defined by epsilon and delta, as described in the paper.
Args:
lower_bound: float lower bound of domain
upper_bound: float upper bound of domain
num_levels: int number of levels in binary tree dyadic partition of
the domain.
epsilon: float amount of error allowed in resulting rank
delta: float probability of error exceeding epsilon accuracy
seed: value is fed to random.seed to initialize randomness
"""
self.total = 0
# domain
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.norm = upper_bound - lower_bound if upper_bound != lower_bound else 1
# precision/sketch state
self._num_levels = num_levels
# width and depth are determined from the Cormode paper, sec. 3.1
self._width = int(ceil(euler / (epsilon / num_levels)))
self._depth = int(ceil(log(1. / delta)))
self._hash_state = CMSketch.generate_hash_state(self._depth, seed)
self._sketches = [CMSketch(self._width, self._depth, self._hash_state)
for _ in xrange(self._num_levels)]
def sketches(self):
return self._sketches
def width(self):
return self._width
def depth(self):
return self._depth
def hash_state(self):
return self._hash_state
def num_levels(self):
return self._num_levels
def increment(self, value):
"""Increment counter for value in the domain."""
if value is None or isnan(value):
return
normed_value = float(value - self.lower_bound) / self.norm
self.total += 1
for (level, sketch) in enumerate(self._sketches):
key = QuantileAccumulator._index_at_level(normed_value, level)
sketch.increment(key)
def __call__(self, value_iterator):
"""Makes QuantileAccumulator usable with PySpark .mapPartitions().
An RDD's .mapPartitions method takes a function that consumes an
iterator of records and spits out an iterable for the next RDD
downstream. Since QuantileAccumulator is callable, the call can be
performed like so:
accums = values.mapPartitions(QuantileAccumulator(<parameters>))
accums.reduce(lambda x, y: x.merge(y))
"""
for value in value_iterator:
self.increment(value)
yield self
def merge(self, other):
"""Merge QuantileAccumulator with another compatible one."""
self._check_compatibility(other)
for (my_sketch, other_sketch) in zip(self._sketches, other.sketches()):
my_sketch.merge(other_sketch)
self.total += other.total
return self
def _check_compatibility(self, other):
"""Check if another CMSketch is compatible with this one for merge.
Compatibility requires same domain, precision parameters.
"""
if (self.lower_bound != other.lower_bound or
self.upper_bound != other.upper_bound):
raise ValueError("Quantile domains do not match")
if self._num_levels != other.num_levels():
raise ValueError("Number of levels do not match.")
if (self._width != other.width() or self._depth != other.depth()
or self._hash_state != other.hash_state()):
raise ValueError("Sketch parameters do not match.")
def cdf(self, value):
"""Compute estimated CDF at value in domain."""
_norm = lambda x: float(x - self.lower_bound) / self.norm
normed_value = _norm(value)
return self._normed_cdf(normed_value)
def _normed_cdf(self, normed_value):
"""Compute estimated CDF at normed value in [0, 1]."""
covering = self._get_covering(normed_value)
accum = 0.
for segment in covering:
(level, index) = segment
accum += self._sketches[level].get(index)
return accum / self.total
def ppf(self, q):
"""Percent point function (inverse of CDF).
Args:
q: float in [0, 1]. Lower tail probability.
Returns:
The value for which q of the observations lie below. E.g., q = 0.95
is the 95th percentile.
"""
_inv_norm = lambda x: x * (self.upper_bound - self.lower_bound) + self.lower_bound
return _inv_norm(self._binary_search(q))
def _binary_search(self, q, lo=0., hi=1.):
if hi - lo < exp2(-(self._num_levels + 1)):
return hi
mid = lo + (hi - lo) / 2.
key = self._normed_cdf(mid)
if key == q:
return mid
elif key < q:
return self._binary_search(q, mid, hi)
else:
return self._binary_search(q, lo, mid)
# utilities for working with the binary tree representation of the domain
def _is_leaf(self, level):
"""Is a node at level a leaf node?"""
return level >= self._num_levels - 1
def _get_covering(self, value, level=0, index=0):
"""Get the set of dyadic ranges that cover [0, value].
value must be a normed value in [0, 1].
Basically this traverses a binary tree where each node has an associated
domain. The tree itself doesn't need to be materialized because it can
be computed using level and index information.
"""
if (self._is_leaf(level) or
QuantileAccumulator._value_at_right_boundary(value, level,
index)):
return [(level, index)]
elif (value <= 0 or
QuantileAccumulator._left_child_contains_value(value, level,
index)):
return self._get_covering(value, level + 1, index * 2)
else:
return ([(level + 1, index * 2)] +
self._get_covering(value, level + 1, index * 2 + 1))
@staticmethod
def _index_at_level(value, level):
"""Get dyadic range index at given level of binary tree for value.
value is a float in [0, 1] (so required normed values).
"""
if value <= 0.:
return 0
segment_size = exp2(-level)
index = int(ceil(value / segment_size)) - 1
return index
@staticmethod
def _value_at_right_boundary(value, level, index):
"""Is the (normed) value at right boundary of a node in the bin tree?"""
return value >= (index + 1) * exp2(-level)
@staticmethod
def _left_child_contains_value(value, level, index):
return (2 * index) * exp2(-(level + 1)) < value <= (2 * index + 1) * exp2(-(level + 1))
|
StarcoderdataPython
|
98602
|
#!/usr/bin/env python
import mirheo as mir
import argparse
import numpy as np
ranks = (1, 1, 1)
domain = (8, 8, 8)
dt = 0.01
u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename='log', no_splash=True)
n = 20
np.random.seed(42)
positions = np.random.rand(n, 3)
velocities = np.random.rand(n, 3) - 0.5
for i in range(3):
positions[:,i] *= domain[i]
pv = mir.ParticleVectors.ParticleVector('pv', mass = 1)
ic = mir.InitialConditions.FromArray(positions.tolist(), velocities.tolist())
u.registerParticleVector(pv=pv, ic=ic)
vv = mir.Integrators.VelocityVerlet('vv')
u.registerIntegrator(vv)
u.setIntegrator(vv, pv)
dump_every = 20
update_every = dump_every
u.registerPlugins(mir.Plugins.createParticleDisplacement('disp', pv, update_every))
u.registerPlugins(mir.Plugins.createDumpParticles('partDump', pv, dump_every, ["displacements"], 'h5/solvent_particles-'))
u.run(100)
# TEST: plugins.displacements
# cd plugins
# rm -rf h5 displacements.out.txt
# mir.run --runargs "-n 2" ./displacements.py
# mir.post h5dump -d displacements h5/solvent_particles-00004.h5 | awk '{print $2, $3, $4}' | LC_ALL=en_US.utf8 sort > displacements.out.txt
|
StarcoderdataPython
|
65348
|
from app.encryption import CryptoSigner
signer = CryptoSigner()
def test_should_sign_content(notify_api):
signer.init_app(notify_api)
assert signer.sign("this") != "this"
def test_should_verify_content(notify_api):
signer.init_app(notify_api)
signed = signer.sign("this")
assert signer.verify(signed) == "this"
def test_should_sign_json(notify_api):
signer.init_app(notify_api)
signed = signer.sign({"this": "that"})
assert signer.verify(signed) == {"this": "that"}
|
StarcoderdataPython
|
3356547
|
import base64
import os
import psycopg2
import base64
from io import BytesIO
from PIL import Image
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import settings
# PATH読み込み
top = settings.top
script = settings.script
image = settings.image
#DB接続用ステータス設定
path = "localhost"
port = "5432"
dbname = settings.DBNAME
user = settings.USER
password = <PASSWORD>
def main():
print('------------------ START OF THIS SERIES IF PROCESSING ------------------')
print('--------- THIS FILE IS take_img_from_db.py ---------')
#DBへの接続部分
conText = "host={} port={} dbname={} user={} password={}"
conText = conText.format(path,port,dbname,user,password)
connection = psycopg2.connect(conText)
cur = connection.cursor()
#DBにデータを保存
sql = "select id, image from tbl_save_image where user_id='masaru';"
cur.execute(sql)
result = cur.fetchall()
for row in result:
# 左側についている'data:image/png;base64,'を除去
img_base64 = row[1].rsplit('data:image/png;base64,')[-1]
# base64をPNGにデコードして、保存
im = Image.open(BytesIO(base64.b64decode(img_base64)))
im.save(os.path.join(top, image, '') + str(row[0]) +'_image.png', 'PNG')
connection.close()
print('--------- EOF ---------')
if __name__ == '__main__':
main()
# dcgan_model2.pyを走らせる。
exec(open(os.path.join(top,script,"dcgan_model2.py")).read())
|
StarcoderdataPython
|
91890
|
<reponame>qagustina/python-exercises
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 5 16:32:23 2021
@author: qagustina
"""
# Ejercicio 8.10
import pandas as pd
df = pd.read_csv('../Data/OBS_SHN_SF-BA.csv', index_col=['Time'], parse_dates=True)
# COPIA
dh = df['12-25-2014':].copy()
delta_t = -1 # tiempo que tarda la marea entre ambos puertos
delta_h = 20.0 # diferencia de los ceros de escala entre ambos puertos
pd.DataFrame([dh['H_SF'].shift(delta_t) - delta_h, dh['H_BA']]).T.plot(figsize=(12,9))
# delta_t = -1 porque la onda tarda 1 hora en llegar a San Fernando.
# delta_h = 20.0
# freq_horaria = 4 # 4 para 15min, 60 para 1min
# cant_horas = 24
# deltah_h = cant_horas - freq_horaria
|
StarcoderdataPython
|
107637
|
<filename>challenges/codility/lessons/q012/passing_cars_test.py
#!/usr/bin/env python3
import random
import unittest
import numpy as np
from challenges.codility.lessons.q012.passing_cars_v001 import *
MAX_N = 100000
class PassingCarsTestCase(unittest.TestCase):
def test_description_examples(self):
self.assertEqual(5, solution([0, 1, 0, 1, 1]))
# Correctness
def test_single(self):
self.assertEqual(0, solution([0]))
self.assertEqual(0, solution([1]))
def test_double(self):
self.assertEqual(0, solution([0, 0]))
self.assertEqual(1, solution([0, 1]))
self.assertEqual(0, solution([1, 1]))
self.assertEqual(0, solution([1, 0]))
def test_small_simple(self):
self.assertEqual(3, solution([0, 1, 1, 1]))
def test_small_random1(self):
self.__test_random(100)
def test_small_random2(self):
self.__test_random(1000)
# Performance
def test_medium_random(self):
self.__test_random(MAX_N)
def test_large_random(self):
self.__test_random(MAX_N)
def test_large_big_answer(self):
self.__test_random(MAX_N)
self.__test_random(MAX_N)
def test_large_alternate(self):
array = [0, 1] * (MAX_N // 2)
self.assertEqual(self.__brute_solution(array), solution(array))
array = [1, 0] * (MAX_N // 2)
self.assertEqual(self.__brute_solution(array), solution(array))
def test_large_extreme(self):
self.assertEqual(0, solution([0] * MAX_N))
self.assertEqual(0, solution([1] * MAX_N))
self.assertEqual(-1, solution(([0] * (MAX_N // 2)) + ([1] * (MAX_N // 2))))
self.assertEqual(0, solution(([1] * (MAX_N // 2)) + ([0] * (MAX_N // 2))))
# Utils
@staticmethod
def __brute_solution(array):
result = 0
saw_one = False
for i in range(len(array) - 1):
saw_one |= array[i]
if array[i] == 0:
result += sum(array[i + 1:])
if result > 1000000000:
return -1
return result
def __test_random(self, n):
array = list(np.random.random_integers(0, 1, n))
random.shuffle(array)
with self.subTest(n=n):
self.assertEqual(self.__brute_solution(array), solution(array))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4825270
|
__________________________________________________________________________________________________
sample 36 ms submission
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
sort_list = sorted(intervals)
merged = []
for i in sort_list:
if not merged or merged[-1][1] < i[0]:
merged.append(i)
else:
merged[-1][1] = max(merged[-1][1], i[1])
return merged
__________________________________________________________________________________________________
sample 13856 kb submission
'''
Merge overlaping intervals:
Input: [[1,3],[2,6],[8,10],[15,18]]
[[1,6], [8,10], [15,18]]
^ ^
[[1,3], [1,6]]
Output: [[1,6],[8,10],[15,18]]
1) sort by start times:
intervals.sort()
2) if overlap --> merge() and append res
else --> append each interval
p_first ++
p_second ++
s1 -------- e1
s2 ----------e2
'''
# Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution:
def merge(self, intervals):
intervals.sort()
merged = []
for interval in intervals:
# if the list of merged intervals is empty or if the current
# interval does not overlap with the previous, simply append it.
if not merged or merged[-1][1] < interval[0]:
merged.append(interval)
else:
# otherwise, there is overlap, so we merge the current and previous
# intervals.
merged[-1][1] = max(merged[-1][1], interval[1])
return merged
__________________________________________________________________________________________________
|
StarcoderdataPython
|
64599
|
<gh_stars>1-10
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import socket
from subprocess import Popen
import time
import unittest
from mcrouter.test.config import McrouterGlobals
# Test basic functionality of the dispatcher that can listen on
# multiple ports. Connect to all the ports a few times and make
# sure basic communication is ok
class TestMultiplePorts(unittest.TestCase):
def test_multiple_ports(self):
first_port = 11106
num_ports = 3
num_passes = 100
ports = range(first_port, first_port + num_ports)
cmd = McrouterGlobals.preprocessArgs([
McrouterGlobals.InstallDir + '/mcrouter/mcrouter',
'-L', '/tmp/test.log',
'-f', 'mcrouter/test/test_ascii.json',
'-p', ','.join(map(str, ports)),
'--proxy-threads', '2'
])
proc = Popen(cmd)
time.sleep(1) # magic
for i in range(num_passes):
for port in ports:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address = ('localhost', port)
sock.connect(address)
fd = sock.makefile()
sock.send("get multiple-ports-junk-key\r\n")
self.assertTrue(fd.readline().strip() == "END")
sock.close()
proc.terminate()
|
StarcoderdataPython
|
3223241
|
<filename>seriesplugin/src/Identifiers/WunschlisteFeed.py
# -*- coding: utf-8 -*-
# by betonme @2012
# Imports
import re
from Components.config import config
from Tools.BoundFunction import boundFunction
from urllib import urlencode
from datetime import datetime
from sys import maxint
# Internal
from Plugins.Extensions.SeriesPlugin.IdentifierBase import IdentifierBase
from Plugins.Extensions.SeriesPlugin.Logger import logDebug, logInfo
from Plugins.Extensions.SeriesPlugin import _
try:
from HTMLParser import HTMLParser
except ImportError as ie:
HTMLParser = None
try:
from iso8601 import parse_date
except ImportError as ie:
parse_date = None
# Constants
SERIESLISTURL = "http://www.wunschliste.de/ajax/search_dropdown.pl?"
EPISODEIDURLATOM = "http://www.wunschliste.de/xml/atom.pl?"
#EPISODEIDURLRSS = "http://www.wunschliste.de/xml/rss.pl?"
# Series: EpisodeTitle (Season.Episode) - Weekday Date, Time / Channel (Country)
# Two and a Half Men: Der Mittwochs-Mann (1.5) - Mi 02.05., 19.50:00 Uhr / TNT Serie (Pay-TV)
# Two and a Half Men: Der Mittwochs-Mann (1.5) - Mi 02.05., 19.50:00 Uhr / TNT Serie
# Two and a Half Men: Der Mittwochs-Mann (1) (1.5) - Mi 02.05., 19.50:00 Uhr / TNT Serie
# Der Troedeltrupp - Das Geld liegt im Keller: Folge 109 (109) - Do 03.05., 16.15:00 Uhr / RTL II
# Galileo: U.a.: Die schaerfste Chili der Welt - Fr 04.05., 19.05:00 Uhr / ProSieben
# Galileo: Magazin mit <NAME>, BRD 2012 - Mi 09.05., 06.10:00 Uhr / ProSieben
# Gute Zeiten, schlechte Zeiten: Folgen 4985 - 4988 (21.84) - Sa 05.05., 11.00:00 Uhr / RTL
# Channel is between last / and ( or line end
CompiledRegexpAtomChannel = re.compile('\/(?!.*\/) ([^\(]+)')
# Date is between last - and channel
CompiledRegexpAtomDate = re.compile('-(?!.*-) (.+)')
# Find optional episode
CompiledRegexpAtomEpisode = re.compile('\((?!.*\()(.+)\) ')
# Series: Title
CompiledRegexpAtomTitle = re.compile('.+: (.+)')
# (Season.Episode) - EpisodeTitle
# (21.84) Folge 4985
# (105) Folge 105
# (4.11/4.11) Mama ist die Beste/Rund um die Uhr
# Galileo: Die schaerfste Chili der Welt
# Galileo: Jumbo auf Achse: Muelltonnenkoch
# Gute Zeiten, schlechte Zeiten: Folgen 4985 - 4988 (21.84) - Sa 05.05., 11.00:00 Uhr / RTL
#CompiledRegexpPrintTitle = re.compile( '(\(.*\) )?(.+)')
CompiledRegexpEpisode = re.compile( '((\d+)[\.x])?(\d+)')
def str_to_utf8(s):
# Convert a byte string with unicode escaped characters
logDebug("WLF: str_to_utf8: s: ", repr(s))
#unicode_str = s.decode('unicode-escape')
#logDebug("WLF: str_to_utf8: s: ", repr(unicode_str))
## Python 2.x can't convert the special chars nativly
#utf8_str = utf8_encoder(unicode_str)[0]
#logDebug("WLF: str_to_utf8: s: ", repr(utf8_str))
#return utf8_str #.decode("utf-8").encode("ascii", "ignore")
if type(s) != unicode:
# Default shoud be here
try:
s = s.decode('ISO-8859-1')
logDebug("WLF: str_to_utf8 decode ISO-8859-1: s: ", repr(s))
except:
try:
s = unicode(s, 'utf-8')
s = s.encode('ISO-8859-1')
logDebug("WLF: str_to_utf8 decode utf-8: s: ", repr(s))
except:
try:
s = unicode(s, 'cp1252')
s = s.encode('ISO-8859-1')
logDebug("WLF: str_to_utf8 decode cp1252: s: ", repr(s))
except:
s = unicode(s, 'utf-8', 'ignore')
s = s.encode('ISO-8859-1')
logDebug("WLF: str_to_utf8 decode utf-8 ignore: s: ", repr(s))
else:
try:
s = s.encode('ISO-8859-1')
logDebug("WLF: str_to_utf8 encode ISO-8859-1: s: ", repr(s))
except:
s = s.encode('ISO-8859-1', 'ignore')
logDebug("WLF: str_to_utf8 except encode ISO-8859-1 ignore: s: ", repr(s))
return s
class WLAtomParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.title = False
self.updated = False
self.titlestr = ''
self.updatedstr = ''
self.list = []
def handle_starttag(self, tag, attributes):
if tag == 'title':
self.title = True
elif tag == 'updated':
self.updated = True
def handle_endtag(self, tag):
if tag == 'title':
self.title = False
elif tag == 'updated':
self.updated = False
elif tag == 'entry':
self.list.append( (self.titlestr, self.updatedstr) )
self.titlestr = ''
self.updatedstr = ''
def handle_data(self, data):
if self.title:
self.titlestr += data
elif self.updated:
self.updatedstr = data
class WunschlisteFeed(IdentifierBase):
def __init__(self):
IdentifierBase.__init__(self)
@classmethod
def knowsToday(cls):
return False
@classmethod
def knowsFuture(cls):
return True
def getName(self):
return "Wunschliste"
def getEpisode(self, name, begin, end=None, service=None):
# On Success: Return a single season, episode, title tuple
# On Failure: Return a empty list or String or None
# Check dependencies
if HTMLParser is None:
msg = _("Error install") + " HTMLParser"
logInfo(msg)
return msg
if parse_date is None:
msg = _("Error install") + " parse_date"
logInfo(msg)
return msg
# Check preconditions
if not name:
msg =_("Skip: No show name specified")
logInfo(msg)
return msg
if not begin:
msg = _("Skip: No begin timestamp specified")
logInfo(msg)
return msg
if not service:
msg = _("Skip: No service specified")
logInfo(msg)
return msg
logInfo("WLF: getEpisode, name, begin, end=None, service", name, begin, end, service)
self.begin = begin
self.end = end
self.service = service
self.knownids = []
self.returnvalue = None
while name:
ids = self.getSeries(name)
while ids:
idserie = ids.pop()
if idserie and len(idserie) == 2:
id, idname = idserie
# Handle encodings
self.series = str_to_utf8(idname)
logInfo("WLF: Possible matched series:", self.series)
result = self.getNextPage( id )
if result:
return result
else:
name = self.getAlternativeSeries(name)
else:
return ( self.returnvalue or _("No matching series found") )
def getSeries(self, name):
#url = SERIESLISTURL + urlencode({ 'q' : re.sub("[^a-zA-Z0-9-*]", " ", name) })
url = SERIESLISTURL + urlencode({ 'q' : name.lower() })
data = self.getPage( url )
if data and isinstance(data, basestring):
data = self.parseSeries(data)
self.doCacheList(url, data)
if data and isinstance(data, list):
logDebug("WLF: ids", data)
return self.filterKnownIds(data)
def parseSeries(self, data):
serieslist = []
for line in data.splitlines():
values = line.split("|")
if len(values) == 4:
idname, countryyear, id, temp = values
logDebug(id, idname)
serieslist.append( (id, idname) )
else:
logDebug("WLF: ParseError: " + str(line))
serieslist.reverse()
return serieslist
def parseNextPage(self, data):
# Handle malformed HTML issues
data = data.replace('&','&') # target=\"_blank\"&
parser = WLAtomParser()
parser.feed(data)
#logDebug(parser.list)
return parser.list
def getNextPage(self, id):
logDebug("WLF: getNextPage")
url = EPISODEIDURLATOM + urlencode({ 's' : id })
data = self.getPage( url )
if data and isinstance(data, basestring):
data = self.parseNextPage(data)
self.doCacheList(url, data)
if data and isinstance(data, list):
trs = data
yepisode = None
ydelta = maxint
for tds in trs:
if tds and len(tds) == 2:
xtitle, xupdated = tds
if xtitle is not None and xupdated is not None:
#import iso8601
#http://code.google.com/p/pyiso8601/
xbegin = parse_date(xupdated)
xbegin = xbegin.replace(tzinfo=None)
#"2014-11-10T20:15:00+01:00"
#xbegin = datetime.strptime(xupdated[0:-6], "%Y-%m-%dT%H:%M:%S");
#Py2.6
delta = abs(self.begin - xbegin)
delta = delta.seconds + delta.days * 24 * 3600
#Py2.7
#delta = abs(self.begin - xbegin).total_seconds()
logDebug("WLF:", self.begin, '-', xbegin, '-', delta, '-', self.max_time_drift)
if delta <= self.max_time_drift:
result = CompiledRegexpAtomChannel.search(xtitle)
if result and len(result.groups()) >= 1:
xchannel = result.group(1)
logInfo("WLF: Possible match witch channel: ", xchannel)
if self.compareChannels(self.service, xchannel):
if delta < ydelta:
# Slice string to remove channel
xtitle = xtitle[:result.start()]
result = CompiledRegexpAtomDate.search(xtitle)
if result and len(result.groups()) >= 1:
# Slice string to remove date
xtitle = xtitle[:result.start()]
result = CompiledRegexpAtomEpisode.search(xtitle)
if result and len(result.groups()) >= 1:
# Extract season and episode
xepisode = result.group(1)
# Slice string to remove season and episode
xtitle = xtitle[:result.start()]
result = CompiledRegexpEpisode.search(xepisode)
if result and len(result.groups()) >= 3:
xseason = result and result.group(2) or config.plugins.seriesplugin.default_season.value
xepisode = result and result.group(3) or config.plugins.seriesplugin.default_episode.value
else:
logDebug("WLF: wrong episode format", xepisode)
xseason = config.plugins.seriesplugin.default_season.value
xepisode = config.plugins.seriesplugin.default_episode.value
else:
logDebug("WLF: wrong title format", xtitle)
xseason = config.plugins.seriesplugin.default_season.value
xepisode = config.plugins.seriesplugin.default_episode.value
result = CompiledRegexpAtomTitle.search(xtitle)
if result and len(result.groups()) >= 1:
# Extract episode title
xtitle = result.group(1)
# Handle encodings
xtitle = str_to_utf8(xtitle)
yepisode = (xseason, xepisode, xtitle, self.series)
ydelta = delta
else: #if delta >= ydelta:
break
else:
self.returnvalue = _("Check the channel name")
else:
if yepisode:
return ( yepisode )
if delta <= 600:
# Compare channels?
logInfo("WLF: Max time trift exceeded", delta)
if yepisode:
return ( yepisode )
else:
logInfo("WLF: No data returned")
# Nothing found
return
|
StarcoderdataPython
|
112667
|
<reponame>llewyn-jh/basic-deep-learning
"""Recurrent Neural Network."""
import tensorflow as tf
import numpy as np
class RecurrentNetwork:
"""A network is made up varialbles initialize, forward,
backward, train, fit, calcualte validation loss,
activation, dataset batch, predict, accuracy methods."""
def __init__(self, n_cells=10, batch_size=32, learning_rate=0.1):
"""Initialize all of variables except for kernels.
n_cells: the number of neurals in a hidden layer, default: 10
batch_size: batch size of train, validation dataset, default: 32
learning_rate: 0~1, updating rate of weights, default: 0.1"""
self.n_cells = n_cells
self.batch_size = batch_size
self.weight_1h = None
self.weight_1x = None
self.bias_1 = None
self.weight_2 = None
self.bias_2 = None
self.hidden_states = None
self.losses = []
self.val_losses = []
self.learning_rate = learning_rate
def init_weights(self, n_features: int, n_classes: int):
"""Set initial kernels of hidden layers.
We use Orthogonal, GlorotUniform of tensorflow initializers and numpy zeros.
n_features: the number of cells
n_classes: the number of classes what you want to detect"""
orth_init = tf.initializers.Orthogonal()
glorot_init = tf.initializers.GlorotUniform()
self.weight_1h = orth_init((self.n_cells, self.n_cells)).numpy()
self.weight_1x = glorot_init((n_features, self.n_cells)).numpy()
self.bias_1 = np.zeros(self.n_cells)
self.weight_2 = glorot_init((self.n_cells, n_classes)).numpy()
self.bias_2 = np.zeros(n_classes)
def forpass(self, x):
"""Calculate a forward in a network. Return values before final activation in a network.
x: input data, training or validation dataset.
You have to encode a dataset to one hot encoding.
A shape of data is (self.batch_size, n_features, n_features)"""
self.hidden_states = [np.zeros((x.shape[0], self.n_cells))]
seq = np.swapaxes(x, 0, 1)
for sample in seq:
z1 = np.dot(sample, self.weight_1x) + \
np.dot(self.hidden_states[-1], self.weight_1h) + self.bias_1
hidden_state = np.tanh(z1)
self.hidden_states.append(hidden_state)
z2 = np.dot(hidden_state, self.weight_2) + self.bias_2
return z2
def backprop(self, x, err):
"""Calculate a backword in a network. Return gradients of kernels ans biases
x: input data, training or validation dataset.
You have to encode a dataset to one hot encoding.
A shape of data is (self.batch_size, n_features, n_features)
err: differences between targets and activations, "- (targets - activations)" """
m = len(x)
weight_2_grad = np.dot(self.hidden_states[-1], err) / m
bias_2_grad = np.sum(err) / m
seq = np.swapaxes(x, 0, 1)
weight_1h_grad = weight_1x_grad = bias_1_grad = 0
err2cell = np.dot(err, self.weight_2.T) * (1 - self.hidden_states[-1] ** 2)
for sample, hidden_state in zip(seq[::-1][:10], self.hidden_states[:-1][::-1][:10]):
weight_1h_grad += np.dot(hidden_state.T, err2cell)
weight_1x_grad += np.dot(sample.T, err2cell)
bias_1_grad += np.sum(err2cell, axis=0)
err2cell = np.dot(err2cell, self.weight_1h) * (1 - hidden_state ** 2)
weight_1h_grad /= m
weight_1x_grad /= m
bias_1_grad /= m
return weight_1h_grad, weight_1x_grad, bias_1_grad, weight_2_grad, bias_2_grad
def fit(self, x, y, epochs=100, x_val=None, y_val=None):
"""Train a network. Not save all of kernels and biases in a network.
x, x_val: input data, training or validation dataset.
You have to encode a dataset to one hot encoding.
A shape of data is (self.batch_size, n_features, n_features)
y, y_val: input data, target of train or validation dataset.
if you want to classfy multi classes, you have to do one hot encoding and modify some code.
epochs: the number of training loop, default=100"""
y = y.reshape(-1, 1)
y_val = y_val.reshape(-1, 1)
np.random.seed(42)
self.init_weights(x.shape[2], y.shape[1])
for i in range(epochs):
print('Epoch', i, end=' ')
batch_losses = []
for x_batch, y_batch in self.gen_batch(x, y):
print('.', end='')
z = self.forpass(x_batch)
activation = 1 / (1 + np.exp(-z))
err = - (y_batch - activation)
weight_1h_grad, weight_1x_grad, bias_1_grad, weight_2_grad, bias_2_grad = \
self.backprop(x_batch, err)
self.weight_1h -= self.learning_rate * weight_1h_grad
self.weight_1x -= self.learning_rate * weight_1x_grad
self.bias_1 -= self.learning_rate * bias_1_grad
self.weight_2 -= self.learning_rate * weight_2_grad
self.bias_2 -= self.learning_rate * bias_2_grad
cliped_activation = np.clip(activation, 1e-10, 1-1e-10)
loss = np.mean(-(y_batch * np.log(cliped_activation) + (1 - y_batch) * np.log(1 - cliped_activation)))
batch_losses.append(loss)
print()
self.losses.append(np.mean(batch_losses))
self.update_val_losses(x_val, y_val)
def gen_batch(self, x, y):
"""Generate batch of dataset
x: input data, training or validation dataset.
y: input data, target of train or validation dataset."""
length = len(x)
bins = length // self.batch_size
if length % self.batch_size:
bins += 1
indexes = np.random.permutation(np.arange(len(x)))
x = x[indexes]
y = y[indexes]
for i in range(bins):
start = self.batch_size * i
end = self.batch_size * (i + 1)
yield x[start:end], y[start:end]
def update_val_losses(self, x_val, y_val):
"""Calculate and update losses of validation dataset.
x_val: input data, training or validation dataset.
You have to encode a dataset to one hot encoding.
A shape of data is (self.batch_size, n_features, n_features)
y_val: input data, target of train or validation dataset.
if you want to classfy multi classes,
you have to do one hot encoding and modify some code."""
z = self.forpass(x_val)
activation = 1 / (1 + np.exp(-z))
cliped_activation = np.clip(activation, 1e-10, 1-1e-10)
loss = np.mean(-(y_val * np.log(cliped_activation) + (1 - y_val) * np.log(1 - cliped_activation)))
self.val_losses.append(loss)
def predict(self, x):
"""Predict a result. Return True or False.
x: input data, training or validation dataset.
You have to encode a dataset to one hot encoding.
A shape of data is (self.batch_size, n_features, n_features)
y: input data, target of train or validation dataset."""
z = self.forpass(x)
activation = 1 / (1 + np.exp(-z))
return activation > 0.5
def score(self, x, y):
"""Calculate accuracy of a network.
x_val: input data, training or validation dataset.
You have to encode a dataset to one hot encoding.
A shape of data is (self.batch_size, n_features, n_features)
y_val: input data, target of train or validation dataset."""
return np.mean(self.predict(x) == y.reshape(-1, 1))
|
StarcoderdataPython
|
1649077
|
from django.urls import path, include
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
urlpatterns = [
path('api/', include('api.urls'))
]
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static('/media/', document_root=settings.MEDIA_ROOT)
|
StarcoderdataPython
|
1661146
|
<reponame>stevenkfirth/crossproduct<filename>crossproduct/segment.py
# -*- coding: utf-8 -*-
from .point import Point, Point2D
from .points import Points
from .line import Line2D, Line3D
from .segments import Segments
SMALL_NUM=0.00000001
class Segment():
"A n-D segment"
classname='Segment'
def __init__(self,P0,P1):
""
if P0==P1:
raise ValueError('P0 and P1 cannot be equal for a segment')
if isinstance(P0, Point):
self._P0=P0
else:
raise TypeError
if isinstance(P1, Point):
self._P1=P1
else:
raise TypeError
def __add__(self,segment):
"""Adds this segment to the supplied segment
:param segment: A segment.
:type segment: Segment2D, Segment3D
:raises ValueError: If the segments are not collinear,
do not share a start or end point,
or if they overlap.
:return: A new segment which is the sum of the two segments.
:rtype: Segment2D, Segment3D
:Example:
.. code-block:: python
# 2D example
>>> s1 = Segment2D(Point2D(0,0), Point2D(1,0))
>>> s2 = Segment2D(Point2D(1,0), Point2D(2,0))
>>> result = s1 + s2
>>> print(result)
Segment2D(Point2D(0,0), Point2D(2,0))
# 3D example
>>> s1 = Segment3D(Point2D(1,0,0), Point3D(0,0,0))
>>> s2 = Segment3D(Point3D(0,0,0), Point3D(-1,0,0))
>>> result = s1 + s2
>>> print(result)
Segment3D(Point3D(-1,0,0), Point3D(1,0,0))
"""
if not self.line==segment.line:
raise ValueError('To add two segments, they must be collinear')
s1=self.order
s2=segment.order
#print(s1)
#print(s2)
if not (s1.P1==s2.P0 or s1.P0==s2.P1):
raise ValueError('To add two segments, they must have a shared start or end point and not overlap')
line=self.line
t_values=[line.calculate_t_from_point(self.P0),
line.calculate_t_from_point(self.P1),
line.calculate_t_from_point(segment.P0),
line.calculate_t_from_point(segment.P1)]
return self.__class__(line.calculate_point(min(t_values)),
line.calculate_point(max(t_values)))
def __contains__(self,obj):
"""Tests if the segment contains the object.
:param obj: A point or segment.
:type obj: Point2D, Point3D, Segment2D, Segment3D
:return: For point, True if the point lies on the segment; otherwise False.
For segment, True if the segment start and endpoints are on the segment; otherwise False.
:rtype: bool
:Example:
.. code-block:: python
# 2D example
>>> s = Segment2D(Point2D(0,0), Point2D(1,0))
>>> result = Point2D(2,0) in l
>>> print(result)
False
# 3D example
>>> s1 = Segment3D(Point2D(0,0,0), Point3D(1,0,0))
>>> s2 = Segment3D(Point3D(0,0,0), Point3D(0.5,0,0))
>>> result = s2 in s1
>>> print(result)
True
"""
if isinstance(obj,Point):
t=self.line.calculate_t_from_point(obj)
try:
pt=self.calculate_point(t)
except ValueError: # t<0<1
return False
return obj==pt
if isinstance(obj,Segment):
return obj.P0 in self and obj.P1 in self
else:
return TypeError()
def __eq__(self,segment):
"""Tests if this segment and the supplied segment are equal.
:param segment: A segment.
:type segment: Segment2D, Segment3D
:return: True if the segments have the same start point and the same end point;
else True if the start point of one is the end point of the other, and vice versa;
otherwise False.
:rtype: bool
:Example:
.. code-block:: python
# 2D example
>>> s = Segment2D(Point2D(0,0), Point2D(1,0))
>>> result = s == s
>>> print(result)
True
# 3D example
>>> s1 = Segment3D(Point3D(0,0,0), Point3D(1,0,0))
>>> s2 = Segment3D(Point3D(0,0,0), Point3D(-1,0,0))
>>> result = s1 == s2
>>> print(result)
False
"""
if isinstance(segment,Segment):
return ((self.P0==segment.P0 and self.P1==segment.P1) or
(self.P0==segment.P1 and self.P1==segment.P0))
else:
return False
def calculate_point(self,t):
"""Returns a point on the segment for a given t value.
:param t: The t value.
:type t: float
:return: A point on the segment based on the t value.
:rtype: Point2D, Point3D
:Example:
.. code-block:: python
# 2D example
>>> s = Segment2D(Point2D(0,0), Point2D(1,0))
>>> result = s.calcuate_point(0.5)
>>> print(result)
Point2D(0.5,0)
# 3D example
>>> s = Segment3D(Point3D(0,0,0), Point3D(1,0,0))
>>> result = s.calcuate_point(0)
>>> print(result)
Point3D(0,0,0)
"""
if t>=0-SMALL_NUM and t<=1+SMALL_NUM: ## MADE A CHANGE HERE
return self.line.calculate_point(t)
else:
raise ValueError('For a segment, t must be equal to or between 0 and 1')
def difference_segment(self,segment):
"""Returns the difference of two segments.
:param segment: A segment.
:type segment: Segment2D, Segment3D
:return: A segments sequence of 0, 1 or 2 segments.
Returns an empty segments sequence if the supplied segment is equal to or contains this segment.
Returns a segments sequence with this segment if the supplied segment does not intersect this segment.
Returns a segments sequence with a new segment if the supplied segment intersects this segment
including either one of the start point or end point.
Returns a segment sequence with two new segments if the supplied segment intersects this segment
and is contained within it.
:rtype: Segments
:Example:
.. code-block:: python
# 2D example
>>> s1 = Segment2D(Point2D(0,0), Point2D(1,0))
>>> s2 = Segment2D(Point2D(0.5,0), Point2D(1,0))
>>> result = s1.difference_segment(s2)
>>> print(result)
Segments(Segment2D(Point2D(0,0), Point2D(0.5,0)))
# 3D example
>>> s1 = Segment3D(Point3D(0,0,0), Point3D(1,0,0))
>>> s2 = Segment3D(Point3D(-1,0,0), Point3D(2,0,0))
>>> result = s1.difference_segment(s2)
>>> print(result)
Segments()
"""
if self in segment:
return Segments()
if self.line==segment.line:
t0=self.line.calculate_t_from_point(segment.P0)
t1=self.line.calculate_t_from_point(segment.P1)
#print(self)
#print(segment)
#print(t0,t1)
if t1<t0:
t0,t1=t1,t0
if t0>=1 or t1<=0:
return Segments(self)
elif t0>=0 and t1>=1:
return Segments(self.__class__(self.calculate_point(0),
self.calculate_point(t0)),)
elif t0<=0 and t1<=1:
return Segments(self.__class__(self.calculate_point(t1),
self.calculate_point(1)),)
else:
return Segments(self.__class__(self.calculate_point(0),
self.calculate_point(t0)),
self.__class__(self.calculate_point(t1),
self.calculate_point(1)))
else:
return Segments(self)
def difference_segments(self,segments):
"""Returns the difference between this segment and a segments sequence.
:param segments: A segments sequence.
:type segments: Segments
:return: Any parts of this segment which are not also part of the segments in the sequence.
:rtype: Segments
:Example:
.. code-block:: python
# 2D example
>>> s = Segment2D(Point2D(0,0), Point2D(1,0))
>>> sgmts = Segments(Segment2D(Point2D(0.2,0), Point2D(0.8,0))
>>> result = s.difference_segments(sgmts)
>>> print(result)
Segments(Segment2D(Point2D(0,0), Point2D(0.2,0)),
Segment2D(Point2D(0.8,0),Point2D(1,0)))
# 3D example
>>> s = Segment3D(Point3D(0,0,0), Point3D(1,0,0))
>>> sgmts = Segments(Segment3D(Point3D(-1,0,0), Point3D(2,0,0))
>>> result = s.difference_segment(sgmts)
>>> print(result)
Segments()
"""
def rf(result,segments):
if len(segments)==0:
return result
else:
diff=result.difference_segment(segments[0])
#print('diff',diff)
if len(diff)==0:
return None
elif len(diff)==1:
if len(segments)>1:
result=rf(diff[0],segments[1:])
else:
result=diff[0],
return result
elif len(diff)==2:
if len(segments)>1:
result=tuple(list(rf(diff[0],segments[1:]))+list(rf(diff[1],segments[1:])))
else:
result=diff[0],diff[1]
return result
else:
raise Exception
result=self
result=rf(result,segments)
if result is None:
return Segments()
else:
return Segments(*result)
def distance_to_point(self,point):
"""Returns the distance from the segment to the supplied point.
:param point: A point.
:type point: Point2D, Point3D
:return: The distance between the segment and the point.
:rtype: float
:Example:
.. code-block:: python
# 2D example
>>> s = Segment2D(Point2D(0,0), Point2D(1,0))
>>> result = s.distance_to_point(Point2D(0,10))
>>> print(result)
10
# 3D example
>>> s = Segment3D(Point3D(0,0,0), Point3D(1,0,0))
>>> result = s.distance_to_point(Point3D(10,0,0))
>>> print(result)
9
.. seealso:: `<https://geomalgorithms.com/a02-_lines.html>`_
"""
v=self.line.vL
w=point-self.P0
c1=v.dot(w)
c2=v.dot(v)
if c1<=0: # i.e. t<0
return w.length
elif c2<=c1: # i.e. T>0
return (point-self.P1).length
else:
return self.line.distance_to_point(point)
def intersect_line(self,line):
"""Returns the intersection of this segment with the supplied line.
:param line: A line.
:type line: Line2D, Line3D
:return: Returns None for parallel non-collinear segment and line.
Returns None for skew segment and line that don't intersect.
Returns point for skew segment and line that intersect.
Returns segment (this segment) for a segment that lies on the line.
:rtype: None, Point2D, Point3D, Segment2D, Segment3D
:Example:
.. code-block:: python
# 2D example
>>> s = Segment2D(Point2D(0,0), Point2D(1,0))
>>> l = Line2D(Point2D(0,0), Vector2D(0,1))
>>> result = s.intersect_line(l)
>>> print(result)
Point2D(0,0)
# 3D example
>>> s = Segment3D(Point3D(0,0,0), Point3D(1,0,0))
>>> l = Line3D(Point3D(0,0,1), Vector3D(1,0,0))
>>> result = s.intersect_line(l)
>>> print(result)
None
.. seealso:: `<https://geomalgorithms.com/a05-_intersect-1.html>`_
"""
if line==self.line:
return self
elif self.line.is_parallel(line): # parallel but not collinear
return None
else:
p=self.line._intersect_line_skew(line)
if p in self:
return p
else:
return None
@property
def order(self):
"""Returns the segment with ordered points such that P0 is less than P1
:return: If P0 < P1, returns the reverse of this segment;
otherwise returns a copy of this segment.
:rtype: Segment2D, Segment3D
:Example:
.. code-block:: python
# 2D example
>>> s = Segment2D(Point2D(1,0), Point2D(0,0))
>>> result = s.order
>>> print(result)
Segment2D(Point2D(0,0), Point2D(1,0))
# 3D example
>>> s = Segment3D(Point3D(0,0,0), Point3D(1,0,0))
>>> result = s.order
>>> print(result)
Segment3D(Point3D(0,0,0), Point3D(1,0,0))
"""
if self.P0 < self.P1 :
return self.__class__(self.P0,self.P1)
else:
return self.reverse
@property
def P0(self):
"""The start point of the segment.
:rtype: Point2D, Point3D
"""
return self._P0
@property
def P1(self):
"""The end point of the segment.
:rtype: Point2D, Point3D
"""
return self._P1
@property
def points(self):
"""Return the points P0 and P1 of the segment.
:return: The segment points as (P0,P1).
:rtype: Points
:Example:
.. code-block:: python
# 2D example
>>> s = Segment2D(Point2D(1,0), Point2D(0,0))
>>> result = s.points
>>> print(result)
Points(Point2D(1,0), Point2D(0,0))
# 3D example
>>> s = Segment3D(Point3D(0,0,0), Point3D(1,0,0))
>>> result = s.points
>>> print(result)
Points(Point3D(0,0,0), Point3D(1,0,0))
"""
return Points(self.P0, self.P1)
@property
def reverse(self):
"""Returns the segment in reverse.
:return: A segment where the start point is the end point of this segment, and vice versa.
:rtype: Segment2D, Segment3D
:Example:
.. code-block:: python
# 2D example
>>> s = Segment2D(Point2D(1,0), Point2D(0,0))
>>> result = s.reverse
>>> print(result)
Segment2D(Point2D(0,0), Point2D(1,0))
# 3D example
>>> s = Segment3D(Point3D(0,0,0), Point3D(1,0,0))
>>> result = s.reverse
>>> print(result)
Segment3D(Point3D(1,0,0), Point3D(0,0,0))
"""
return self.__class__(self.P1,self.P0)
class Segment2D(Segment):
"""A two dimensional segment, situated on an x, y plane.
Equation of the halfline is P(t) = P0 + vL*t where:
- P(t) is a point on the halfline
- P0 is the start point of the halfline
- vL is the halfline vector
- t is any real, positive number between 0 and 1
"""
def __repr__(self):
""
return 'Segment2D(%s, %s)' % (self.P0,self.P1)
@property
def dimension(self):
"""The dimension of the segment.
:return: '2D'
:rtype: str
:Example:
.. code-block:: python
>>> s = Segment2D(Point2D(0,0), Point2D(1,0))
>>> print(s.dimension)
'2D'
"""
return '2D'
def intersect_halfline(self,halfline):
"""Returns the interesection of this segment and a halfline.
:param halfline: A 2D halfline.
:type halfline: HalfLine2D
:return: Returns None for parallel non-collinear segment and halfline.
Returns None for skew segment and halfline that don't intersect.
Returns None for collinear segment and halfline that don't intersect.
Returns point for skew segment and halfline that intersect.
Returns point for collinear segment and halfline that intersect at the start or end point.
Returns segment for collinear segment and halfline that intersect.
:rtype: None, Point2D, Segment2D
:Example:
.. code-block:: python
>>> s = Segment2D(Point2D(0,0), Point2D(1,0))
>>> hl = Halfine2D(Point2D(0,0), Vector2D(0,1))
>>> result = s.intersect_halfline(hl)
>>> print(result)
Point2D(0,0)
.. seealso:: `<https://geomalgorithms.com/a05-_intersect-1.html>`_
"""
if halfline in self.line:
if self.P0 in halfline and self.P1 in halfline:
return self
elif self.P0==halfline.P0:
return self.P0
elif self.P1==halfline.P0:
return self.P1
elif self.P0 in halfline:
return Segment2D(self.P0,halfline.P0,)
elif self.P1 in halfline:
return Segment2D(halfline.P0,self.P1)
else:
return None
elif self.line.is_parallel(halfline): # parallel but not collinear
return None
else:
p=self.line.intersect_line_skew(halfline.line)
#print(p)
if p in self and p in halfline:
return p
else:
return None
def intersect_segment(self,segment):
"""Returns the interesection of this segment and another segment.
:param segment: A 2D segment.
:type segment: Segment2D
:return: Returns None for parallel non-collinear segments.
Returns None for skew segments that don't intersect.
Returns None for collinear segments that don't intersect.
Returns point for skew segments that intersect.
Returns point for collinear segments that intersect at a start or end point.
Returns segment for collinear segments that intersect.
:rtype: None, Point2D, Segment2D
:Example:
.. code-block:: python
>>> s1 = Segment2D(Point2D(0,0), Point2D(1,0))
>>> s2 = Segment2D(Point2D(0,0), Point2D(0,1))
>>> result = s1.intersect_segment(s2)
>>> print(result)
Point2D(0,0)
.. seealso:: `<https://geomalgorithms.com/a05-_intersect-1.html>`_
"""
if segment in self.line:
try:
t0=self.line.calculate_t_from_x(segment.P0.x)
except ValueError:
t0=self.line.calculate_t_from_y(segment.P0.y)
try:
t1=self.line.calculate_t_from_x(segment.P1.x)
except ValueError:
t1=self.line.calculate_t_from_y(segment.P1.y)
#t1=self.calculate_t_from_point(segment.P1)
if t0 > t1: # must have t0 smaller than t1, swap if not
t0, t1 = t1, t0
if (t0 > 1 or t1 < 0): # intersecting segment does not overlap
return None
if t0<0: t0=0 # clip to min 0
if t1>1: t1=1 # clip to max 1
if t0==t1: # point overlap
return self.calculate_point(t0)
# they overlap in a valid subsegment
return Segment2D(self.calculate_point(t0),self.calculate_point(t1))
elif self.line.is_parallel(segment.line): # parallel but not collinear
return None
else:
p=self.line._intersect_line_skew(segment.line)
if p in self and p in segment:
return p
else:
return None
@property
def line(self):
"""Returns the line which the segment lies on.
:return: A line with the same start point (P0) and vector (P1-P0) as the segment.
:rtype: Line2D
:Example:
.. code-block:: python
>>> s = Segment2D(Point2D(0,0), Point2D(1,0))
>>> result = s.line
>>> print(result)
Line2D(Point2D(0,0), Vector2D(1,0))
"""
return Line2D(self.P0,self.P1-self.P0)
def plot(self,ax,**kwargs):
"""Plots the segment on the supplied axes.
:param ax: An Axes instance.
:type ax: matplotlib.axes.Axes
:param kwargs: keyword arguments to be supplied to the Axes.plot call
"""
x=[p.x for p in self.points]
y=[p.y for p in self.points]
ax.plot(x,y,**kwargs)
# @property
# def polyline(self):
# """Returns a simple polyline of the segment
# :return polyline:
# :rtype SimplePolyline2D:
# """
# from .simple_polyline import SimplePolyline2D
# return SimplePolyline2D(self.P0,self.P1)
def project_3D(self,plane,coordinate_index):
"""Projection of 2D segment on a 3D plane.
:param plane: The plane for the projection
:type plane: Plane3D
:param coordinate_index: The index of the coordinate which was ignored
to create the 2D projection. For example, coordinate_index=0
means that the x-coordinate was ignored and this point
was originally projected onto the yz plane.
:type coordinate_index: int
:return: 3D segment which has been projected from the 2D segment.
:rtype: Segment3D
:Example:
.. code-block:: python
>>> s = Segment2D(Point2D(0,0), Point2D(1,0))
>>> pl = Plane3D(Point3D(0,0,1), Vector3D(0,0,1))
>>> result = s.project_3D(pl, 2)
Segment3D(Point3D(0,0,1),Point3D(1,0,1))
"""
P0=self.P0.project_3D(plane,coordinate_index)
P1=self.P1.project_3D(plane,coordinate_index)
return Segment3D(P0,P1)
class Segment3D(Segment):
"""A three dimensional segment, situated on an x, y, z plane.
Equation of the halfline is P(t) = P0 + vL*t where:
- P(t) is a point on the halfline
- P0 is the start point of the halfline
- vL is the halfline vector
- t is any real, positive number between 0 and 1
"""
def __repr__(self):
""
return 'Segment3D(%s, %s)' % (self.P0,self.P1)
@property
def dimension(self):
"""The dimension of the segment.
:return: '3D'
:rtype: str
:Example:
.. code-block:: python
>>> s = Segment3D(Point3D(0,0,0), Point3D(0,0,1))
>>> print(s.dimension)
'3D'
"""
return '3D'
def distance_to_segment(self,segment):
"""Returns the distance from this segment to the supplied segment
:param segment: A 3D segment.
:type segment: Segment3D
:return: The distance between the two segments.
:rtype: float
:Example:
.. code-block:: python
>>> s1 = Segment3D(Point3D(0,0,0), Point3D(0,0,1))
>>> s2 = Segment3D(Point3D(0,0,2), Point3D(0,0,3))
>>> result= s1.distance_to_segment(s2)
>>> print(result)
1
.. seealso:: `<https://geomalgorithms.com/a07-_distance.html>`_
"""
u=self.line.vL
v=segment.line.vL
w=self.P0-segment.P0
a=u.dot(u)
b=u.dot(v)
c=v.dot(v)
d=u.dot(w)
e=v.dot(w)
D=a*c - b*b
sc,sN,sD = D,D,D # sc = sN / sD, default sD = D >= 0
tc,tN,tD = D,D,D # tc = tN / tD, default tD = D >= 0
if D< SMALL_NUM: # the lines are almost parallel
sN=0 # force using point P0 on segment S1
sD=1 # to prevent possible division by 0.0 later
tN=e
tD=c
else: # get the closest points on the infinite lines
sN=b*e-c*d
tN=a*e-b*d
if sN<0: # sc < 0 => the s=0 edge is visible
sN=0
tN=e
tD=c
elif sN>sD: # sc > 1 => the s=1 edge is visible
sN=sD
tN=e+b
tD=c
if tN<0: # tc < 0 => the t=0 edge is visible
tN=0
# recompute sc for this edge
if -d<0:
sN=0
elif -d>a:
sN-sD
else:
sN=-d
sD=a
elif tN>tD: # tc > 1 => the t=1 edge is visible
tN=tD
# recompute sc for this edge
if -d+b<0:
sN=0
elif -d+b>a:
sN=sD
else:
sN=-d+b
sD=a
# finally do the division to get sc and tc
sc=0 if abs(sN)<SMALL_NUM else sN / sD
tc=0 if abs(tN)<SMALL_NUM else tN / tD
# get the difference of the two closest points
dP = w + u*sc - v*tc # = S1(sc) - S2(tc)
return dP.length
def intersect_halfline(self,halfline):
"""Returns the interesection of this segment and a halfline
:param halfline: A 3D halfline.
:type halfline: HalfLine3D
:return: Returns None for parallel non-collinear segment and halfline.
Returns None for skew segment and halfline that don't intersect.
Returns None for collinear segment and halfline that don't intersect.
Returns point for skew segment and halfline that intersect.
Returns point for collinear segment and halfline that intersect at the start or end point.
Returns segment for collinear segment and halfline that intersect.
:rtype: None, Point3D, Segment3D
:Example:
.. code-block:: python
>>> s = Segment3D(Point3D(0,0,0), Point3D(1,0,0))
>>> hl = Halfline3D(Point3D(0,0,1), Vector3D(1,0,0))
>>> result = s.intersect_halfline(hl)
>>> print(result)
None
.. seealso:: `<https://geomalgorithms.com/a05-_intersect-1.html>`_
"""
if halfline in self.line:
if self.P0 in halfline and self.P1 in halfline:
return self
elif self.P0==halfline.P0:
return self.P0
elif self.P1==halfline.P0:
return self.P1
elif self.P0 in halfline:
return Segment2D(self.P0,halfline.P0,)
elif self.P1 in halfline:
return Segment2D(halfline.P0,self.P1)
else:
return None
elif self.line.is_parallel(halfline): # parallel but not collinear
return None
else:
p=self.line._intersect_line_skew(halfline.line)
#print(p)
if p in self and p in halfline:
return p
else:
return None
def intersect_segment(self,segment):
"""Returns the interesection of this segment and another segment.
:param segment: A 3D segment.
:type segment: Segment3D
:return: Returns None for parallel non-collinear segments.
Returns None for skew segments that don't intersect.
Returns None for collinear segments that don't intersect.
Returns point for skew segments that intersect.
Returns point for collinear segments that intersect at a start or end point.
Returns segment for collinear segments that intersect.
:rtype: None, Point2D, Segment2D
:Example:
.. code-block:: python
>>> s1 = Segment3D(Point3D(0,0,0), Point3D(1,0,0))
>>> s2 = Segment3D(Point3D(0,0,0), Point3D(0,1,0))
>>> result = s1.intersect_segment(s2)
>>> print(result)
Point3D(0,0,0)
.. seealso:: `<https://geomalgorithms.com/a05-_intersect-1.html>`_
"""
if segment in self.line:
try:
t0=self.calculate_t_from_x(segment.P0.x)
except ValueError:
try:
t0=self.calculate_t_from_y(segment.P0.y)
except ValueError:
t0=self.calculate_t_from_z(segment.P0.z)
try:
t1=self.calculate_t_from_x(segment.P1.x)
except ValueError:
try:
t1=self.calculate_t_from_y(segment.P1.y)
except ValueError:
t1=self.calculate_t_from_z(segment.P1.z)
if t0 > t1: # must have t0 smaller than t1, swap if not
t0, t1 = t1, t0
if (t0 > 1 or t1 < 0): # intersecting segment does not overlap
return None
if t0<0: t0=0 # clip to min 0
if t1>1: t1=1 # clip to max 1
if t0==t1: # point overlap
return self.calculate_point(t0)
# they overlap in a valid subsegment
return Segment3D(self.calculate_point(t0),self.calculate_point(t1))
elif self.line.is_parallel(segment.line): # parallel but not collinear
return None
else:
p=self.line._intersect_line_skew(segment.line)
if p in self and p in segment:
return p
else:
return None
@property
def line(self):
"""Returns the line which the segment lies on.
:return: A line with the same start point (P0) and vector (P1-P0) as the halfline.
:rtype: Line3D
:Example:
.. code-block:: python
>>> s = Segment3D(Point3D(0,0,0), Point3D(1,0,0))
>>> result = s.line
>>> print(result)
Line3D(Point3D(0,0,0), Vector3D(1,0,0))
"""
return Line3D(self.P0,self.P1-self.P0)
def plot(self,ax,**kwargs):
"""Plots the segment on the supplied axes
:param ax: An Axes3D instance.
:type ax: mpl_toolkits.mplot3d.axes3d.Axes3D
:param kwargs: keyword arguments to be supplied to the Axes3D.plot call
"""
x=[p.x for p in self.points]
y=[p.y for p in self.points]
z=[p.z for p in self.points]
ax.plot(x,y,z,**kwargs)
def project_2D(self,coordinate_index):
"""Projection of the 3D segment as a 2D segment.
:param coordinate_index: The index of the coordinate to ignore.
Use coordinate_index=0 to ignore the x-coordinate, coordinate_index=1
for the y-coordinate and coordinate_index=2 for the z-coordinate.
:type coordinate_index: int
:return: A 2D segment based on the projection of the 3D segment.
:rtype: Segment2D
:Example:
.. code-block:: python
>>> s = Segment3D(Point3D(0,0,0), Point3D(1,2,3))
>>> result = s.project_2D(0)
>>> print(result)
Segment2D(Point2D(0,0), Point2D(2,3))
"""
if coordinate_index==0:
return Segment2D(Point2D(self.P0.y,self.P0.z),
Point2D(self.P1.y,self.P1.z))
elif coordinate_index==1:
return Segment2D(Point2D(self.P0.z,self.P0.x),
Point2D(self.P1.z,self.P1.x))
elif coordinate_index==2:
return Segment2D(Point2D(self.P0.x,self.P0.y),
Point2D(self.P1.x,self.P1.y))
else:
raise Exception
|
StarcoderdataPython
|
144010
|
<gh_stars>1-10
import numpy as np
class Deriv:
"""
Calculate the derivative with given order of the function f(t) at point t.
"""
def __init__(self, f, dt, o=1):
"""
Initialize the differentiation solver.
Params:
- f the name of the function object ('def f(t):...')
- dt the calculation step between successive points
- o the order of the derivative to be calculated
"""
self.f = f
self.dt = dt
self.o = o
# coefficients of forward finite difference approximations of order O(h^2)
self.co = np.array([
[-3.0, 4.0, -1.0, 0.0, 0.0, 0.0],
[2.0, -5.0, 4.0, -1.0, 0.0, 0.0],
[-5.0, 18.0, -24.0, 14.0, -3.0, 0.0],
[3.0, -14.0, 26.0, -24.0, 11.0, -2.0]
])
self.den = np.array([2 * dt, dt ** 2, 2 * dt ** 3, dt ** 4])
def solve(self, t):
"""
Calculate the derivative at point 't'.
The method uses Richardson extrapolation to improve accuracy.
"""
df = [0.0, 0.0]
for i, dt_ in enumerate([self.dt, self.dt / 2]):
t_array = np.arange(t, t + 6 * dt_, dt_)
f_array = np.array([self.f(t_i) for t_i in t_array])
c_array = self.co[self.o - 1, :]
df[i] = (c_array * f_array) / self.den[self.o - 1]
return (4.0 * df[1] - df[0]) / 3.0
|
StarcoderdataPython
|
3361199
|
import math
from kivy.graphics import *
from kivy.properties import *
from kivy.event import *
from kivy.uix.textinput import TextInput
from kivy.uix.label import Label
from entity import Entity
from kivy.clock import Clock
import time
name = 'tree'
icon='tree1.png'
class Tree(Entity):
def __init__(self,canvas,pos):
super(Tree, self).__init__(canvas,pos)
self.name = 'Tree'
self.gl_shape.source = './icons/tree1.png'
print( 'new tree' )
def clock(self,modeler):
super(Tree,self).clock(modeler)
def on_power(self,instance,value):
super(Tree,self).on_power(instance,value)
def do():
print( 'modul tree do' )
return Tree
|
StarcoderdataPython
|
131100
|
<reponame>ericbrasiln/QualCoder<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_dialog_view_image.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog_view_image(object):
def setupUi(self, Dialog_view_image):
Dialog_view_image.setObjectName("Dialog_view_image")
Dialog_view_image.resize(1021, 715)
self.gridLayout = QtWidgets.QGridLayout(Dialog_view_image)
self.gridLayout.setObjectName("gridLayout")
self.horizontalSlider = QtWidgets.QSlider(Dialog_view_image)
self.horizontalSlider.setMinimum(9)
self.horizontalSlider.setSingleStep(3)
self.horizontalSlider.setProperty("value", 99)
self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.horizontalSlider.setTickInterval(10)
self.horizontalSlider.setObjectName("horizontalSlider")
self.gridLayout.addWidget(self.horizontalSlider, 2, 0, 1, 1)
self.textEdit = QtWidgets.QTextEdit(Dialog_view_image)
self.textEdit.setMaximumSize(QtCore.QSize(16777215, 80))
self.textEdit.setObjectName("textEdit")
self.gridLayout.addWidget(self.textEdit, 3, 0, 1, 1)
self.scrollArea = QtWidgets.QScrollArea(Dialog_view_image)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1001, 583))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout_2.setObjectName("gridLayout_2")
self.graphicsView = QtWidgets.QGraphicsView(self.scrollAreaWidgetContents)
self.graphicsView.setObjectName("graphicsView")
self.gridLayout_2.addWidget(self.graphicsView, 0, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1)
self.retranslateUi(Dialog_view_image)
QtCore.QMetaObject.connectSlotsByName(Dialog_view_image)
def retranslateUi(self, Dialog_view_image):
_translate = QtCore.QCoreApplication.translate
Dialog_view_image.setWindowTitle(_translate("Dialog_view_image", "View Image"))
self.textEdit.setToolTip(_translate("Dialog_view_image", "<html><head/><body><p>Memo</p></body></html>"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog_view_image = QtWidgets.QDialog()
ui = Ui_Dialog_view_image()
ui.setupUi(Dialog_view_image)
Dialog_view_image.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
3301959
|
<reponame>djw1149/pynmsg
#!/usr/bin/env python
# Copyright (c) 2009-2014 by Farsight Security, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import select
import sys
import nmsg
def main(ip, port):
ni = nmsg.input.open_sock(ip, port)
fd = ni.fileno()
p = select.poll()
p.register(fd, select.POLLIN)
while True:
events = p.poll(1000)
if events:
m = ni.read()
while m:
print 'got a message'
m = ni.read()
else:
print 'no messages!'
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
|
StarcoderdataPython
|
3383200
|
import pypyodbc
import click
import os
import sys
from collections import namedtuple
ConnectionInfo = namedtuple('ConnectionInfo', 'server user password')
BootstrapInfo = namedtuple('BootstrapTuple', 'database user password')
Constants = namedtuple('Constants', 'host instance')
constants = Constants('localhost', '(default)')
class DatabaseBootstrapper:
"""Magick Animal Which doth create databaseth"""
def __init__(self, connection_info, bootstrap_info, _generate_only, generate):
self._connection_info = connection_info
self._bootstrap_info = bootstrap_info
self._verbose = _generate_only
self._generate_only = generate
self._script = []
self._connection = None
def run(self):
"""Runs the bootstrapper with the provided options"""
self._script = []
self._connect_master()
self._create_login()
self._create_database()
self._close_connection()
self._connect_app_database()
self._create_app_user()
self._allow_app_user_connect()
self._set_app_user_roles()
self._dump_script_if_required()
def _dump_script_if_required(self):
if not self._generate_only:
return
script_file = 'Script-%s.sql' % (self._bootstrap_info.database)
with open(script_file, mode='w') as stream:
for line in self._script:
stream.write("%s\n" % line)
print('Generated script: %s' % script_file)
def _log(self, message):
if not self._verbose:
return
print(message)
def _connect_master(self):
if self._generate_only:
return
self._connection = self._connect_to_database('master')
def _close_connection(self):
if self._generate_only:
return
self._connection.close()
def _connect_app_database(self):
if self._generate_only:
# required to GO before attempting to use the new database
self._run_script("GO")
self._run_script('use [%s]' % self._bootstrap_info.database)
return
database_name = self._bootstrap_info.database
self._connection = self._connect_to_database(database_name)
def _connect_to_database(self, database):
if self._generate_only:
return
if self._connection_info.user == '':
auth = 'Trusted_Connection=True'
else:
auth = 'UID=%s;PWD=%s' % \
(self._connection_info.user, self._connection_info.password)
conn_string = 'DRIVER={Sql Server};SERVER=%s;DATABASE=%s;%s' \
% (self._connection_info.server, database, auth)
self._log('connecting: %s' % (conn_string))
return pypyodbc.connect(conn_string)
def _enquote(self, text):
return text.replace('\'', '\'\'')
def _create_app_user(self):
user = self._bootstrap_info.user
script = \
'''if not exists (select * from sysusers where name = \'%s\')
begin
create user %s for login %s with default_schema = dbo
end;''' % (user, user, user)
self._run_script(script)
def _allow_app_user_connect(self):
script = 'grant connect to %s' % (self._bootstrap_info.user)
self._run_script(script)
def _set_app_user_roles(self):
script = 'exec sp_addrolemember N\'%s\', N\'%s\';'
user = self._bootstrap_info.user
for role in ['db_datareader', 'db_datawriter', 'db_owner']:
self._run_script(script % (role, user))
def _create_database(self):
db_name = self._bootstrap_info.database
script = \
'''if not exists(select name from master..sysdatabases where name = \'%s\')
begin
create database %s
end;
''' % (db_name, db_name)
self._run_script(script)
def _create_login(self):
user = self._enquote(self._bootstrap_info.user)
password = self._enquote(self._bootstrap_info.password)
script = \
'''use master;
if not exists (select name from master..syslogins where name = \'%s\')
begin
create login %s with password = \'%s\';
end;
''' % (user, user, password)
self._run_script(script)
def _run_script(self, script):
if self._generate_only:
self._script.append(script)
return
self._log('run sql: %s' % (script))
cursor = self._connection.cursor()
cursor.execute(script)
self._connection.commit()
@click.command()
@click.option('--host', help='Sql server hostname',
default=constants.host)
@click.option('--instance', help='Sql Server instance',
default=constants.instance)
@click.option('--login', help='Admin-level login on Sql Server \
(leave blank for trusted)', default='')
@click.option('--password', help='Admin-level password on Sql Server \
(leave blank for trusted)', default='', hide_input=True)
@click.option('--user-name', prompt='App user to create')
@click.option('--user-password', prompt='<PASSWORD> <PASSWORD>',
hide_input=True, confirmation_prompt=True)
@click.option('--database-name', prompt='Database name to create')
@click.option('--verbose/--quiet', default=False, help='Print debug info')
@click.option('--generate/--live', default=False, help='Generate script or run to live database server (default is run live)')
def main(host, \
instance, \
login, \
password, \
user_name, \
user_password, \
database_name, \
verbose, \
generate):
server = host
if len(instance) > 0 and instance != constants.instance:
server = '\\'.join([host, instance])
connection_info = ConnectionInfo(server, login, password)
bootstrap_info = BootstrapInfo(database_name, user_name, user_password)
bootstrapper = DatabaseBootstrapper(
connection_info,
bootstrap_info,
verbose,
generate)
bootstrapper.run()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3364932
|
from wordasso.wordasso import syn_words, pho_words, ent_words
import csv
files = ["../datasets/mc.csv"]
def test_syn_words():
for f in files:
with open(f, "r") as inf:
reader = csv.reader(inf, delimiter=",")
for row in reader:
w = row[0]
print("word: ", w)
print("containing-synonym-tag related words:")
print(syn_words(w))
print("-" * 60)
def test_pho_words():
for f in files:
with open(f, "r") as inf:
reader = csv.reader(inf, delimiter=",")
for row in reader:
w = row[0]
print("word: ", w)
print("most-sounds-like related words:")
print(pho_words(w))
print("-" * 60)
def test_ent_words():
for f in files:
with open(f, "r") as inf:
reader = csv.reader(inf, delimiter=",")
for row in reader:
w = row[0]
print("word: ", w)
print("Named Entities related to the words:")
print(ent_words(w))
print("-" * 60)
if __name__ == '__main__':
pass
|
StarcoderdataPython
|
1645413
|
from os import listdir
from pathlib import Path
from typing import Optional, List
import hydra
import wandb
from omegaconf import DictConfig
from pytorch_lightning import LightningDataModule, LightningModule
from pytorch_lightning.loggers import LightningLoggerBase, WandbLogger
from tqdm import tqdm
import numpy as np
from src.callbacks.wandb_callbacks import get_wandb_logger
from src.evaluation.separate import separate_with_onnx, separate_with_ckpt
from src.utils import utils
from src.utils.utils import load_wav, sdr
log = utils.get_logger(__name__)
def evaluation(config: DictConfig):
assert config.split in ['train', 'valid', 'test']
data_dir = Path(config.get('data_dir')).joinpath(config['split'])
assert data_dir.exists()
# Init Lightning loggers
loggers: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config.logger.items():
if "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
loggers.append(hydra.utils.instantiate(lg_conf))
if any([isinstance(l, WandbLogger) for l in loggers]):
utils.wandb_login(key=config.wandb_api_key)
model = hydra.utils.instantiate(config.model)
target_name = model.target_name
ckpt_path = Path(config.ckpt_dir).joinpath(config.ckpt_path)
scores = []
num_tracks = len(listdir(data_dir))
for i, track in tqdm(enumerate(sorted(listdir(data_dir)))):
track = data_dir.joinpath(track)
mixture = load_wav(track.joinpath('mixture.wav'))
target = load_wav(track.joinpath(target_name + '.wav'))
#target_hat = {source: separate(config['batch_size'], models[source], onnxs[source], mixture) for source in sources}
target_hat = separate_with_ckpt(config.batch_size, model, ckpt_path, mixture, config.device)
score = sdr(target_hat, target)
scores.append(score)
for logger in loggers:
logger.log_metrics({'sdr': score}, i)
for wandb_logger in [logger for logger in loggers if isinstance(logger, WandbLogger)]:
mid = mixture.shape[-1] // 2
track = target_hat[:, mid - 44100 * 3:mid + 44100 * 3]
wandb_logger.experiment.log(
{f'track={i}_target={target_name}': [wandb.Audio(track.T, sample_rate=44100)]})
for logger in loggers:
logger.log_metrics({'mean_sdr_' + target_name: sum(scores)/num_tracks})
logger.close()
if any([isinstance(logger, WandbLogger) for logger in loggers]):
wandb.finish()
|
StarcoderdataPython
|
1707484
|
<reponame>shreejitverma/GeeksforGeeks
'''https://www.geeksforgeeks.org/reverse-a-linked-list/
Given pointer to the head node of a linked list, the task is to reverse the linked list. We need to reverse the list by changing the links between nodes.
Examples:
Input: Head of following linked list
1->2->3->4->NULL
Output: Linked list should be changed to,
4->3->2->1->NULL
Input: Head of following linked list
1->2->3->4->5->NULL
Output: Linked list should be changed to,
5->4->3->2->1->NULL
Input: NULL
Output: NULL
Input: 1->NULL
Output: 1->NULL
Iterative Method
Initialize three pointers prev as NULL, curr as head and next as NULL.
Iterate through the linked list. In loop, do following.
// Before changing next of current,
// store next node
next = curr->next
// Now change next of current
// This is where actual reversing happens
curr->next = prev
// Move prev and curr one step forward
prev = curr
curr = next
Output:
Given linked list
85 15 4 20
Reversed Linked list
20 4 15 85
Time Complexity: O(n)
Space Complexity: O(1)
'''
# Python program to reverse a linked list
# Time Complexity : O(n)
# Space Complexity : O(1)
# Node class
class Node:
# Constructor to initialize the node object
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
# Function to initialize head
def __init__(self):
self.head = None
# Function to reverse the linked list
def reverse(self):
prev = None
current = self.head
while(current is not None):
next = current.next
current.next = prev
prev = current
current = next
self.head = prev
# Function to insert a new node at the beginning
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
# Utility function to print the linked LinkedList
def printList(self):
temp = self.head
while(temp):
print temp.data,
temp = temp.next
# Driver code
llist = LinkedList()
llist.push(20)
llist.push(4)
llist.push(15)
llist.push(85)
print "Given Linked List"
llist.printList()
llist.reverse()
print "\nReversed Linked List"
llist.printList()
'''
Recursive Method:
1) Divide the list in two parts - first node and
rest of the linked list.
2) Call reverse for the rest of the linked list.
3) Link rest to first.
4) Fix head pointer
Linked List Rverse'''
"""Python3 program to reverse linked list
using recursive method"""
# Linked List Node
class Node:
def __init__(self, data):
self.data = data
self.next = None
# Create and Handle list operations
class LinkedList:
def __init__(self):
self.head = None # Head of list
# Method to reverse the list
def reverse(self, head):
# If head is empty or has reached the list end
if head is None or head.next is None:
return head
# Reverse the rest list
rest = self.reverse(head.next)
# Put first element at the end
head.next.next = head
head.next = None
# Fix the header pointer
return rest
# Returns the linked list in display format
def __str__(self):
linkedListStr = ""
temp = self.head
while temp:
linkedListStr = (linkedListStr +
str(temp.data) + " ")
temp = temp.next
return linkedListStr
# Pushes new data to the head of the list
def push(self, data):
temp = Node(data)
temp.next = self.head
self.head = temp
# Driver code
linkedList = LinkedList()
linkedList.push(20)
linkedList.push(4)
linkedList.push(15)
linkedList.push(85)
print("Given linked list")
print(linkedList)
linkedList.head = linkedList.reverse(linkedList.head)
print("Reversed linked list")
print(linkedList)
|
StarcoderdataPython
|
159813
|
import sys
sys.stderr.write("This is the stderr test\n")
sys.stderr.flush()
sys.stderr.write('this is my first sys tutorial\n')
print (sys.argv)
print datetime.datetime.now
|
StarcoderdataPython
|
1677198
|
<filename>accountit/invoices/admin.py<gh_stars>0
from django.contrib import admin
from . import models
admin.site.register(models.Invoice)
admin.site.register(models.ItemSold)
|
StarcoderdataPython
|
198726
|
<filename>examples/mxnet/export.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
import chainer
import chainer.links as L
import numpy as np
import mxnet as mx
import onnx_chainer
import onnx_mxnet
chainer.config.train = False
def save_as_onnx_then_import_from_mxnet(model, fn):
x = np.random.randn(1, 3, 224, 224).astype(np.float32)
chainer_out = model(x)['prob'].array
onnx_chainer.export(model, x, fn)
sym, params = onnx_mxnet.import_model(fn)
mod = mx.mod.Module(
symbol=sym, data_names=['input_0'], context=mx.cpu(), label_names=None)
mod.bind(
for_training=False, data_shapes=[('input_0', x.shape)],
label_shapes=None)
mod.set_params(arg_params=params, aux_params=None, allow_missing=True)
Batch = namedtuple('Batch', ['data'])
mod.forward(Batch([mx.nd.array(x)]))
mxnet_out = mod.get_outputs()[0].asnumpy()
print(mxnet_out.shape)
np.testing.assert_almost_equal(
chainer_out, mxnet_out, decimal=5)
def main():
model = L.VGG16Layers()
save_as_onnx_then_import_from_mxnet(model, 'vgg16.onnx')
model = L.ResNet50Layers()
save_as_onnx_then_import_from_mxnet(model, 'resnet50.onnx')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4805800
|
<reponame>naralakamsani/fake-face-detector
import torch
import predict_helper
#Load the model from checkpoint
model = predict_helper.load_checkpoint("checkpoint.pth")
#Create a dummy input
model.to('cpu')
image = predict_helper.process_image("img4.jpg")
dummy_input = image.unsqueeze_(0).to('cpu').float()
#export model to onnx
torch.onnx.export(model,
dummy_input,
"model.onnx")
|
StarcoderdataPython
|
3293366
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from bigdl.util.common import get_node_and_core_number
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.common import set_core_number
class TestUtil(ZooTestCase):
def test_set_core_num(self):
_, core_num = get_node_and_core_number()
set_core_number(core_num + 1)
_, new_core_num = get_node_and_core_number()
assert new_core_num == core_num + 1, \
"set_core_num failed, set the core" \
" number to be {} but got {}".format(core_num + 1, new_core_num)
set_core_number(core_num)
if __name__ == "__main__":
pytest.main([__file__])
|
StarcoderdataPython
|
149433
|
<reponame>DoRTaL94/edison<filename>edison/config.py
import secrets
import inspect
import sys, inspect
def get_config_object(env_keyword: str):
"""
Returns the the desired config class path.
The function iterates through a dictionary returned by inspect.
The dictionary contains details about all of the file members.
Its key is the name of the member and value is obj which contains all the details about the member.
The desired config path is being picked by the ENV_KEYWORD field defined in the config class.
Parameters:
env_keyword (str): Should be equals to one of the config classes ENV_KEYWORD field.
Returns:
str: module_name.class_name, which is the full path of the config class.
"""
for name, obj in inspect.getmembers(sys.modules[__name__]):
if issubclass(obj, Config) and obj.ENV_KEYWORD == env_keyword:
return ".".join([obj.__module__, name])
class Config:
ENV_KEYWORD = ""
DEBUG = False
# Turns off the Flask-SQLAlchemy event system
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Turn off the Flask-SQLAlchemy event system
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Enables response message for unauthenticated requests
PROPAGATE_EXCEPTIONS = True
# This tells the JWTManager to use jwt.token_in_blacklist_loader callback
JWT_BLACKLIST_ENABLED = True
# JWTManager uses this secret key for creating tokens
JWT_SECRET_KEY = secrets.token_hex(24)
# We're going to check if both access_token and refresh_token are black listed
JWT_BLACKLIST_TOKEN_CHECKS = ['access', 'refresh']
SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:[email protected]/edison'
# PostgreSQL connection string should be updated once an actual production environment is established.
class ProductionConfig(Config):
ENV_KEYWORD = "production"
class DevelopmentConfig(Config):
ENV_KEYWORD = "development"
DEBUG = True
class TestConfig(Config):
ENV_KEYWORD = "test"
TESTING = True
|
StarcoderdataPython
|
1666352
|
<gh_stars>0
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
k = len(nums)
i=0
j=0
ct=1
while i<k-1 and ct<k:
if nums[i] == nums[ct]:
ct+=1
j+=1
else:
nums[i+1] = nums[ct]
i+=1
ct+=1
return k-j
|
StarcoderdataPython
|
141277
|
from django.conf import settings
from django.test import TestCase
from nose.util import resolve_name
__all__ = ('IP', 'PORT', 'SITE', 'DjangoTestCase')
IP = getattr(settings, 'TDDSPRY_IP', '127.0.0.1')
PORT = getattr(settings, 'TDDSPRY_PORT', 8088)
SITE = 'http://%s:%s/' % (IP, PORT)
DjangoTestCase = getattr(settings, 'TDDSPRY_TEST_CASE', TestCase)
if isinstance(DjangoTestCase, basestring):
DjangoTestCase = resolve_name(DjangoTestCase)
|
StarcoderdataPython
|
1708417
|
# Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved.
#
# This program and the accompanying materials are made available under
# the terms of the under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
from ephemerol import Scanner
from ephemerol.Models import ScanStats
class TestScanner(unittest.TestCase):
TEST_RULE_FILE = "rulebase.csv"
def __init__(self, *args, **kwargs):
super(TestScanner, self).__init__(*args, **kwargs)
if not os.path.isfile(TestScanner.TEST_RULE_FILE):
TestScanner.TEST_RULE_FILE = os.path.join("ephemerol", "test", TestScanner.TEST_RULE_FILE)
def setUp(self):
Scanner.load_rules(self.TEST_RULE_FILE)
Scanner.scan_results = []
def test_load_rules(self):
rulecount = -1 # start with -1 to exclude header row from count
total_refactor = 0
with open(self.TEST_RULE_FILE, 'rU') as f:
for line in f:
rulecount += 1
if rulecount != 0:
total_refactor += int([field for field in line.split(',')][4])
results = Scanner.rulebase
self.assertEqual(rulecount, len(results))
self.assertEqual(total_refactor, sum([int(entry.refactor_rating) for entry in results]))
def test_archive_scan(self):
"""Verify cloud readiness index for SampleWebApp-master.zip and rulebase.csv"""
archive = "SampleWebApp-master.zip"
if not os.path.isfile(archive):
archive = os.path.join("ephemerol", "test", archive)
results_stats = Scanner.scan_archive(archive)
self.assertEqual(97.44, results_stats.cloud_readiness_index)
def test_config_scan(self):
file_path_list = ['persistence.xml', 'web.xml', 'bing.xml', 'dir/dir/dir/ra.xml', '/dir/dir/ejb-jar.xml',
'dir/dir/web.xml']
Scanner.scan_results = []
Scanner.config_scan(file_path_list=file_path_list)
results = Scanner.scan_results
self.assertEqual(5, len(results))
print(results)
def test_has_scan_func_for_csproj(self):
"""Make sure files ending in csproj have a scan function"""
scan_func = Scanner.get_scan_func("foobar.csproj")
self.assertIsNotNone(scan_func)
def dotnet_version_tester(self, sample_data, refactor_rating):
Scanner.csproj_file_scan([sample_data], "testfile.csproj")
results = Scanner.scan_results
self.assertEqual(1, len(results))
self.assertEqual(refactor_rating, results[0].refactor_rating)
def test_dotnet_version_3_0(self):
""".NET framework version 3.0 is refactor of 1"""
self.dotnet_version_tester(' <TargetFrameworkVersion>v3.0</TargetFrameworkVersion>', "1")
def test_dotnet_version_3_5(self):
""".NET framework version 3.5 is refactor of 0"""
self.dotnet_version_tester(' <TargetFrameworkVersion>v3.5</TargetFrameworkVersion>',"0")
def test_dotnet_version_4_0(self):
""".NET framework version 4.0 is refactor of 0"""
self.dotnet_version_tester(' <TargetFrameworkVersion>v4.0</TargetFrameworkVersion>', "0")
def test_dotnet_version_4_5(self):
""".NET framework version 4.5 is refactor of 0"""
self.dotnet_version_tester(' <TargetFrameworkVersion>v4.5</TargetFrameworkVersion>', "0")
def test_dotnet_version_4_6(self):
""".NET framework version 4.6 is refactor of 0"""
self.dotnet_version_tester(' <TargetFrameworkVersion>v4.6</TargetFrameworkVersion>', "0")
def test_dotnet_version_2_0_refactor_1(self):
""".NET framework version 2.0 is refactor of 3"""
self.dotnet_version_tester(' <TargetFrameworkVersion>v2.0</TargetFrameworkVersion>', "3")
def test_dotnet_version_1_0_refactor_1(self):
""".NET framework version 1.0 is refactor of 3"""
self.dotnet_version_tester(' <TargetFrameworkVersion>v1.0</TargetFrameworkVersion>', "3")
def test_dotnet_version_not_present(self):
""".NET framework version not in csproj file"""
Scanner.csproj_file_scan([' <NoFrameworkVersion></NoFrameworkVersion>'], "testfile.csproj")
results = Scanner.scan_results
self.assertEqual(0, len(results))
def test_scan_for_servicebase_extension(self):
"""Found .cs file with class extending ServiceBase"""
Scanner.cs_file_scan(['using System.ServiceProcess;',
'using System.Text;',
'using System.Threading.Tasks;',
'namespace WindowsService1',
'{',
' public partial class Service1 : ServiceBase',
' {'],
'Service1.cs')
results = Scanner.scan_results
self.assertEqual(1, len(results))
self.assertEqual("20", results[0].refactor_rating)
def test_scan_for_no_extension(self):
"""Found .cs file with class not extending anything"""
Scanner.cs_file_scan(['using System.ServiceProcess;',
'using System.Text;',
'using System.Threading.Tasks;',
'namespace WindowsService1',
'{',
' public partial class Service1',
' {'],
'Service1.cs')
results = Scanner.scan_results
self.assertEqual(0, len(results))
def test_scan_for_extension_but_not_servicebase(self):
"""Found .cs file with class extending FooBar"""
Scanner.cs_file_scan(['using System.ServiceProcess;',
'using System.Text;',
'using System.Threading.Tasks;',
'namespace WindowsService1',
'{',
' public partial class Service1 : FooBar',
' {'],
'Service1.cs')
results = Scanner.scan_results
self.assertEqual(0, len(results))
def test_scan_for_oledb_use(self):
"""Found .cs file with \"using System.Data.OleDb\'"""
Scanner.cs_file_scan(['using System.Data.OleDb;'], 'Repository.cs')
results = Scanner.scan_results
self.assertEqual(1, len(results))
def test_scan_for_obdc_use(self):
"""Found .cs file with \"using System.Data.Obdc\'"""
Scanner.cs_file_scan(['using System.Data.Odbc;'], 'Repository.cs')
results = Scanner.scan_results
self.assertEqual(1, len(results))
def test_scan_for_ado_net_use(self):
"""Found .cs file with \"using System.Data\'"""
Scanner.cs_file_scan(['using System.Data;'], 'Repository.cs')
results = Scanner.scan_results
self.assertEqual(1, len(results))
def test_scan_double_hit_for_ado_net_and_odbc_use(self):
"""Found .cs file with \"using System.Data\'" and \"using System.Data.Odbc\'"""
Scanner.cs_file_scan(['using System.Data;', 'using System.Data.Odbc;'], 'Repository.cs')
results = Scanner.scan_results
self.assertEqual(2, len(results))
def test_scan_for_ef_use(self):
"""Found .cs file with \"using System.Data.Entity\'"""
Scanner.cs_file_scan(['using System.Data;', 'using System.Data.Odbc;'], 'Repository.cs')
results = Scanner.scan_results
self.assertEqual(2, len(results))
def test_scan_for_file_write(self):
"""Found .cs file with call to File.WriteAllText"""
Scanner.cs_file_scan(['File.WriteAllText("foo.bar", "Some Text");'], 'FileWrite.cs')
results = Scanner.scan_results
self.assertEqual(1, len(results))
def test_scan_for_file_open(self):
"""Found .cs file with call to File.Open"""
Scanner.cs_file_scan(
[' using (FileStream fs = File.Open(path, FileMode.Open, FileAccess.Write, FileShare.None))'],
'FileWrite.cs')
results = Scanner.scan_results
self.assertEqual(1, len(results))
def test_scan_for_filesystem_watcher(self):
"""Found .cs file with call to File.WriteAllText"""
Scanner.cs_file_scan([' FileSystemWatcher watcher = new FileSystemWatcher();'], 'FileWrite.cs')
results = Scanner.scan_results
self.assertEqual(1, len(results))
def test_cloud_readiness_index_algorithm(self):
"""make sure no scan results to 20 to 220 show consistent readiness index"""
scan_stats = ScanStats(Scanner.scan_results)
self.assertEqual(0, len(scan_stats.scan_result_list))
self.assertEqual(100, scan_stats.cloud_readiness_index)
for counter in range(0, 10):
Scanner.java_file_scan(['import javax.ejb.'], 'BadPojo.java')
Scanner.java_file_scan(['import org.springframework.'], 'GoodPojo.java')
scan_stats = ScanStats(Scanner.scan_results)
self.assertEqual(20, len(scan_stats.scan_result_list))
self.assertEqual(85, scan_stats.cloud_readiness_index)
for counter in range(0, 100):
Scanner.java_file_scan(['import javax.ejb.'], 'BadPojo.java')
Scanner.java_file_scan(['import org.springframework.'], 'GoodPojo.java')
scan_stats = ScanStats(Scanner.scan_results)
self.assertEqual(220, len(scan_stats.scan_result_list))
self.assertEqual(85, scan_stats.cloud_readiness_index)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1620652
|
<gh_stars>0
#!/usr/bin/env python
"""
Pandoc filter to process code blocks with class "graphviz" into
graphviz-generated images.
"""
import pygraphviz
import hashlib
import os
import sys
from pandocfilters import toJSONFilter, Str, Para, Image
def sha1(x):
return hashlib.sha1(x.encode(sys.getfilesystemencoding())).hexdigest()
imagedir = "graphviz-images"
def graphviz(key, value, format, meta):
if key == 'CodeBlock':
[[ident, classes, keyvals], code] = value
caption = "caption"
if "graphviz" in classes:
G = pygraphviz.AGraph(string=code)
G.layout()
filename = sha1(code)
if format == "html":
filetype = "png"
elif format == "latex":
filetype = "pdf"
else:
filetype = "png"
alt = Str(caption)
src = imagedir + '/' + filename + '.' + filetype
if not os.path.isfile(src):
try:
os.mkdir(imagedir)
sys.stderr.write('Created directory ' + imagedir + '\n')
except OSError:
pass
G.draw(src)
sys.stderr.write('Created image ' + src + '\n')
tit = ""
return Para([Image([alt], [src, tit])])
if __name__ == "__main__":
toJSONFilter(graphviz)
|
StarcoderdataPython
|
23391
|
#!/usr/bin/python
# -*- coding: UTF-8 -*
from db.it.db_it_mgr import it_mgr
__all__ = {"itSingleton"}
class itSingleton():
def get_cmd_sql(self, sql):
return it_mgr.get_cmd_sql(sql)
it_singleton = itSingleton()
|
StarcoderdataPython
|
3293689
|
import json
import requests
import time
url = 'https://api.warframestat.us/ps4/fissures'
current_survivals = [] # hold current survivals until new once appear
ids_sent = []
#execute every 5 sec
while True:
raw_data = requests.get(url)
fissures_data = json.loads(raw_data.text)
# I have to clear to get rid of the old id's
current_survivals.clear()
print()
for x in fissures_data:
print(x['missionType'])
print()
# add
for mission in fissures_data:
if mission['missionType'] == 'Capture':
# add to current_survivals
current_survivals.append(mission['id'])
# Makes sure not to send an email twice
for x in current_survivals:
if x in ids_sent:
print("Email already sent.")
else:
ids_sent.append(x)
print("Sent Email")
# send an email
print("Current_survivals: " + str(current_survivals))
print("ids_sent: " + str(ids_sent) + "\n")
time.sleep(60) #will run agian after 1 minute
|
StarcoderdataPython
|
173661
|
import os
import unittest
from docker.machine.cli.machine import Machine
from docker.machine.errors import CLIError
from docker.machine.cli.client import Status
from docker.machine.constants import LOCALHOST
digitalocean_access_token = os.environ.get('DOCKERMACHINEPY_DIGITALOCEAN_ACCESS_TOKEN')
class BaseTestCases:
class MachineDriverBaseTest(unittest.TestCase):
def __init__(self, machine_name, driver=None, driver_config={}, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.machine_name = machine_name
self.driver = driver
self.driver_config = driver_config
self.machine = Machine(self.machine_name)
self.assertFalse(self.machine.exists())
self.assertEqual(self.machine.create(), self.machine)
self.assertTrue(self.machine.exists())
def setUp(self):
super(BaseTestCases.MachineDriverBaseTest, self).setUp()
self.machine = Machine(self.machine_name)
def test_version(self):
version_info = self.machine.docker_machine_version()
self.assertIsNotNone(version_info.version_number)
self.assertIsNotNone(version_info.version_hash)
def test_active_docker_machine(self):
self.assertIsNone(Machine.active_docker_machine())
def test_all_docker_machines(self):
self.assertEqual(self.machine, Machine.all_docker_machines()[0])
def test_url(self):
self.assertEqual(self.machine.url(), self.machine.driver.url)
def test_status(self):
self.assertEqual(self.machine.status(), Status.running)
def test_running(self):
self.assertTrue(self.machine.running())
def test_paused(self):
self.assertFalse(self.machine.paused())
def test_stopped(self):
self.assertFalse(self.machine.stopped())
def test_stopping(self):
self.assertFalse(self.machine.stopping())
def test_starting(self):
self.assertFalse(self.machine.starting())
def test_error(self):
self.assertFalse(self.machine.error())
def test_timeout(self):
self.assertFalse(self.machine.timeout())
def test_runningish(self):
self.assertTrue(self.machine.runningish())
def test_stoppedish(self):
self.assertFalse(self.machine.stoppedish())
def test_errorish(self):
self.assertFalse(self.machine.errorish())
def tearDown(self):
try:
self.machine.rm(force=True)
except CLIError:
pass
class MachineDriverTest(MachineDriverBaseTest):
def __init__(self, machine_name, driver=None, driver_config={}, *args, **kwargs):
super(BaseTestCases.MachineDriverTest, self).__init__(machine_name, driver, driver_config, *args, **kwargs)
# def test_tmp_test(self):
# self.machine.create(self.driver, **self.driver_config)
# self.machine.ip()
# pass
# def test_machine_test(self):
# self.assertFalse(self.machine.exists())
# self.machine.create(self.driver, **self.driver_config)
# self.assertTrue(self.machine.exists())
# self.assertEqual(self.machine, Machine.all()[0])
# self.machine.rm(force=True)
# self.assertFalse(self.machine.exists())
class NoDriverMachineTest(BaseTestCases.MachineDriverBaseTest):
def __init__(self, *args, **kwargs):
super(NoDriverMachineTest, self).__init__('noDriverMachineTest', None, {}, *args, **kwargs)
def test_ip(self):
self.assertEqual(self.machine.ip(), LOCALHOST)
def test_ssh(self):
with self.assertRaises(CLIError):
self.machine.ssh()
with self.assertRaises(CLIError):
self.machine.ssh('ls -lah')
def test_config(self):
with self.assertRaises(CLIError):
self.machine.config()
def test_restart(self):
with self.assertRaises(CLIError):
self.machine.restart()
def test_env(self):
with self.assertRaises(CLIError):
self.machine.env()
def test_stop(self):
with self.assertRaises(CLIError):
self.machine.stop()
def test_start(self):
with self.assertRaises(CLIError):
self.machine.start()
def test_kill(self):
with self.assertRaises(CLIError):
self.machine.kill()
# # def inspect(self, format=None, snake_case=False, named_tuple=False):
# # return self.client().inspect(self.name, format, snake_case, named_tuple)
# #
# # def scp(self, src, dest, recursive=False):
# # return self.client().scp(src, dest, recursive)
# #
# self.machine.scp()
# @unittest.skip('Slow test')
# def test_regenerate_certs(self):
# with self.assertRaises(CLIError):
# self.machine.regenerate_certs()
#
# @unittest.skip('Slow test')
# def test_upgrade(self):
# with self.assertRaises(CLIError):
# self.machine.upgrade()
def tearDown(self):
try:
self.assertEqual(self.machine.rm(force=True), self.machine)
self.assertFalse(self.machine.exists())
except CLIError:
pass
class DigitaloceanMachineTest(BaseTestCases.MachineDriverTest if digitalocean_access_token is not None
else BaseTestCases.MachineDriverBaseTest):
def __init__(self, *args, **kwargs):
driver = 'digitalocean'
params = dict(access_token=digitalocean_access_token)
super(DigitaloceanMachineTest, self).__init__('digitaloceanMachineTest', driver, params, *args, **kwargs)
def test_missing_required_parameter(self):
with self.assertRaises(ValueError):
Machine('missingRequiredParameterTestMachine').create(self.driver)
def test_invalid_access_token(self):
with self.assertRaises(CLIError):
Machine('invalidAccessTokenTestMachine').create(self.driver, access_token='INVALID_ACCESS_TOKEN')
|
StarcoderdataPython
|
1614929
|
<gh_stars>1-10
#coding: utf-8
''' mbinary
#######################################################################
# File : subarraySum.py
# Author: mbinary
# Mail: <EMAIL>
# Blog: https://mbinary.xyz
# Github: https://github.com/mbinary
# Created Time: 2020-04-20 16:49
# Description: 子数组累加和
#######################################################################
'''
from typing import List
def subarraySum(nums: List[int], k: int) -> int:
dic = {0: 1}
sm = 0
count = 0
for i in range(len(nums)):
sm += nums[i]
if((sm-k) in dic):
count += dic[sm-k]
if(sm in dic):
dic[sm] += 1
else:
dic[sm] = 1
return count
|
StarcoderdataPython
|
1659972
|
<reponame>tevin/RescueAirBnB<gh_stars>1-10
from flask import Flask, request, render_template, redirect
from pymongo import MongoClient
from envparse import env
from flask_httpauth import HTTPDigestAuth
import os.path
# Get env vars stored either in an env file or on the machine
def get_env(name):
if (os.path.exists('./env')):
env.read_envfile('./env')
return env(name)
app = Flask(__name__)
app.config['SECRET_KEY'] = get_env('SECRET_KEY')
users = users = {
"admin": get_env('ADMIN_PASS')
}
auth = HTTPDigestAuth()
@auth.get_password
def get_pw(username):
if username in users:
return users.get(username)
return None
# Utility method for mongo connections
def mongo_login():
mongo_uri=get_env('MONGO_URI')
client = MongoClient(mongo_uri)
return client['rescuebnb']
# Home page with host form
@app.route('/')
def show_home():
return render_template('index.html')
# Post endpoint for committing host to db
@app.route('/addhost', methods = ['GET', 'POST'])
def hosts():
if request.method == 'POST':
db = mongo_login()
hosts_collection = db.hosts
host = request.form.to_dict()
hosts_collection.insert_one(host) # should probably check for completed insert
return redirect('/')
return render_template('addhosts.html')
# Post endpoint for committing people who need shelter to db
@app.route('/requestshelter', methods = ['GET', 'POST'])
def guests():
if request.method == 'POST':
db = mongo_login()
guest_collection = db.guests
guest = request.form.to_dict()
guest_collection.insert_one(guest) # should probably check for completed insert
return redirect('/')
return render_template('request_shelter.html')
# Get involved page
@app.route('/getinvolved')
def get_involved():
return render_template('get_involved.html')
# Get involved page
@app.route('/volunteer')
def volunteer():
return render_template('volunteer.html')
# "Secured" endpoint for viewing registered hosts
@app.route('/hosts')
@auth.login_required
def viewhosts():
db = mongo_login()
hosts_collection = db.hosts
guests_collection = db.guests
return render_template('viewhosts.html', hosts=list(hosts_collection.find()),
guests=list(guests_collection.find()))
@app.route('/ussd')
def ussd():
db = mongo_login()
ussd_collection = db.ussd
ussd = request.form.to_dict()
ussd_collection.insert_one(ussd)
return render_template('index.html')
if __name__ == '__main__':
app.run()
#app.run(debug=True)
|
StarcoderdataPython
|
3301914
|
<reponame>andrecp/myhdl_simple_uart<filename>baudrate_gen.py
from myhdl import *
def baudrate_gen(sysclk, reset_n, baud_rate_i, half_baud_rate_tick_o, baud_rate_tick_o):
""" Serial
This module implements a baudrate generator
Ports:
-----
sysclk: sysclk input
reset_n: reset input
baud_rate_i: the baut rate to generate
baud_rate_tick_o: the baud rate enable
-----
"""
baud_gen_count_reg = Signal(intbv(0, min = 0, max = 900))
half_baud_const = baud_rate_i//2
@always_seq(sysclk.posedge, reset = reset_n)
def sequential_process():
baud_gen_count_reg.next = baud_gen_count_reg + 1
baud_rate_tick_o.next = 0
half_baud_rate_tick_o.next = 0
if baud_gen_count_reg == baud_rate_i:
baud_gen_count_reg.next = 0
baud_rate_tick_o.next = 1
if baud_gen_count_reg == half_baud_const:
half_baud_rate_tick_o.next = 1
return sequential_process
|
StarcoderdataPython
|
3350037
|
import ast
from .scope import ScopeMixin
def add_list_calls(node):
"""Provide context to Module and Function Def"""
return ListCallTransformer().visit(node)
def add_variable_context(node, trees):
"""Provide context to Module and Function Def"""
return VariableTransformer(trees).visit(node)
def add_assignment_context(node):
"""Annotate nodes on the LHS of an assigment"""
return LHSAnnotationTransformer().visit(node)
class ListCallTransformer(ast.NodeTransformer):
"""
Adds all calls to list to scope block.
You need to apply VariableTransformer before you use it.
"""
def visit_Call(self, node):
if self.is_list_addition(node):
var = node.scopes.find(node.func.value.id)
if var is not None and self.is_list_assignment(var.assigned_from):
if not hasattr(var, "calls"):
var.calls = []
var.calls.append(node)
return node
def is_list_assignment(self, node):
return (
hasattr(node, "value")
and isinstance(node.value, ast.List)
and hasattr(node, "targets")
and isinstance(node.targets[0].ctx, ast.Store)
)
def is_list_addition(self, node):
"""Check if operation is adding something to a list"""
list_operations = ["append", "extend", "insert"]
return (
hasattr(node.func, "ctx")
and isinstance(node.func.ctx, ast.Load)
and hasattr(node.func, "value")
and isinstance(node.func.value, ast.Name)
and hasattr(node.func, "attr")
and node.func.attr in list_operations
)
class VariableTransformer(ast.NodeTransformer, ScopeMixin):
"""Adds all defined variables to scope block"""
def __init__(self, trees):
super().__init__()
if len(trees) == 1:
self._trees = {}
else:
self._trees = {t.__file__.stem: t for t in trees}
def visit_FunctionDef(self, node):
node.vars = []
# So function signatures are accessible even after they're
# popped from the scope
self.scopes[-2].vars.append(node)
for arg in node.args.args:
arg.assigned_from = node
node.vars.append(arg)
self.generic_visit(node)
return node
def visit_ClassDef(self, node):
node.vars = []
# So classes are accessible even after they're
# popped from the scope
self.scopes[-2].vars.append(node)
self.generic_visit(node)
return node
def visit_Import(self, node):
for name in node.names:
name.imported_from = node
return node
def visit_ImportFrom(self, node):
module_path = node.module
names = [n.name for n in node.names]
if module_path in self._trees:
m = self._trees[module_path]
resolved_names = [m.scopes.find(n) for n in names]
node.scopes[-1].vars += resolved_names
return node
def visit_If(self, node):
node.vars = []
self.visit(node.test)
for e in node.body:
self.visit(e)
node.body_vars = node.vars
node.vars = []
for e in node.orelse:
self.visit(e)
node.orelse_vars = node.vars
node.vars = []
return node
def visit_For(self, node):
node.target.assigned_from = node
node.vars = [node.target]
self.generic_visit(node)
return node
def visit_Module(self, node):
node.vars = []
self.generic_visit(node)
return node
def visit_With(self, node):
node.vars = []
self.generic_visit(node)
return node
def visit(self, node):
with self.enter_scope(node):
return super().visit(node)
def visit_Assign(self, node):
for target in node.targets:
if isinstance(target, ast.Name):
target.assigned_from = node
self.scope.vars.append(target)
return node
def visit_AnnAssign(self, node):
target = node.target
if isinstance(target, ast.Name):
target.assigned_from = node
self.scope.vars.append(target)
return node
def visit_AugAssign(self, node):
target = node.target
if isinstance(target, ast.Name):
target.assigned_from = node
self.scope.vars.append(target)
return node
class LHSAnnotationTransformer(ast.NodeTransformer):
def __init__(self):
super().__init__()
self._lhs = False
def visit(self, node):
if self._lhs:
node.lhs = self._lhs
return super().visit(node)
def visit_Assign(self, node):
for target in node.targets:
self._lhs = True
self.visit(target)
self._lhs = False
self.visit(node.value)
return node
def visit_AnnAssign(self, node):
self._lhs = True
self.visit(node.target)
self._lhs = False
self.visit(node.annotation)
if node.value is not None:
self.visit(node.value)
return node
def visit_AugAssign(self, node):
self._lhs = True
self.visit(node.target)
self._lhs = False
self.visit(node.op)
self.visit(node.value)
return node
|
StarcoderdataPython
|
1621810
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib
import xbmc
pluginid = "plugin.video.plexodusplayer"
def main():
path = xbmc.getInfoLabel('ListItem.Path')
stream_file = xbmc.getInfoLabel('ListItem.FileNameAndPath')
if stream_file.endswith(".strm"):
url = "plugin://{0}/tv/set_live_library_player/{1}".format(pluginid, urllib.quote_plus(path))
else:
url = "plugin://{0}/tv/set_live_library_player/{1}".format(pluginid, urllib.quote_plus(stream_file))
xbmc.executebuiltin("RunPlugin({0})".format(url))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
191804
|
#!/usr/bin/python3
import httplib2
import os
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
import json
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# account domain
DOMAIN = 'test.example.edu'
SCOPES = 'https://www.googleapis.com/auth/admin.directory.group.readonly ' \
'https://www.googleapis.com/auth/admin.directory.group.member.readonly ' \
'https://www.googleapis.com/auth/apps.groups.settings'
# Credential file
CLIENT_SECRET_FILE = 'client_secret.json'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'google-migration.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
if flags:
credentials = tools.run_flow(flow, store, flags)
else:
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def getGroups(http):
"""
Retrieve all groups for a domain or the account:
Calls a Google Admin-SDK Directory API: Groups service object and outputs a
all groups in an account identified by the domain.
"""
data = {}
service = discovery.build('admin', 'directory_v1', http=http)
try:
results = service.groups().list(domain=DOMAIN, alt='json').execute()
if len(results['groups']):
for group in results['groups']:
filename = group['email'].split("@")[0]
data.update({filename: group})
return data
except:
print('Unable to read groups from domain: {0}'.format(DOMAIN))
raise
def getGroupMembers(groupEmail,http):
"""
Retrieve all group members:
Calls a Google Admin-SDK Directory API: Group Members service object and outputs a
all members of the group identified by the group's email address.
"""
service = discovery.build('admin', 'directory_v1', http=http)
try:
results = service.members().list(groupKey=groupEmail, pageToken=None, maxResults=None, roles=None).execute()
if len(results):
print(json.dumps(results, indent=4))
return results
except:
print('Unable to read members for group: {0}'.format(groupEmail))
raise
def getGroupSettings(groupEmail,http):
"""
Retrieving settings for a group:
Calls a Google Admin-SDK Groups Settings API service object and outputs a
group's settings identified by the group's email address.
"""
service = discovery.build('groupssettings', 'v1', http=http)
try:
results = service.groups().get(groupUniqueId=groupEmail, alt='json').execute()
if len(results):
print(json.dumps(results, indent=4))
return results
except:
print('Unable to read group: {0}'.format(groupEmail))
raise
def writeRecordToFile(filename, data):
# Make sure we have the folder to wright the files to
if not os.path.exists("groups"):
os.makedirs("groups")
with open('groups/' + filename + '.json', 'w') as f:
json.dump(data, f, ensure_ascii=False)
def main():
print("****** Starting ******")
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
groups_info = getGroups(http)
if len(groups_info) > 0:
for groupName in groups_info:
data = {'groupInfo': groups_info[groupName]}
groupEmail = groups_info[groupName]['email']
groupSettings = getGroupSettings(groupEmail, http)
data.update({'groupSettings': groupSettings})
groupMembers = getGroupMembers(groupEmail, http)
if groupName:
if groupMembers:
data.update({'groupMembers': groupMembers})
writeRecordToFile(groupName, data)
print("****** Finished ******")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3236882
|
# linear search
def linear(srchlist, srch):
"""check every element, kinda slow"""
for element in srchlist:
if element == srch:
return srchlist.index(element)
return -1
|
StarcoderdataPython
|
187518
|
import lichtenberg
from PIL import Image
from pathlib import Path
def main():
width = 200
height = 200
sim = lichtenberg.Simulator(width, height)
sim.breakdown(60, 60)
sim.insulate_circle(100, 100, 50)
sim.simulate(max_loop=1000)
image = Image.new('RGB', (width, height), (0x00, 0x00, 0x00))
c_max = sim.cells.get_max_count()
for y in range(height):
for x in range(width):
if sim.cells.get_insulated(x, y):
# visualize insulated cells
image.putpixel((x, y), (255, 0, 0))
else:
c = sim.cells.get_count(x, y)
lm = int(255 * c / c_max) # normalize
image.putpixel((x, y), (lm, lm, lm))
image.save(Path(__file__).stem + '.png')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3246830
|
<gh_stars>0
import numpy as np
np.set_printoptions(precision=4)
import time
from scipy import spatial
def xy2theta(x, y):
if (x >= 0 and y >= 0):
theta = 180/np.pi * np.arctan(y/x);
if (x < 0 and y >= 0):
theta = 180 - ((180/np.pi) * np.arctan(y/(-x)));
if (x < 0 and y < 0):
theta = 180 + ((180/np.pi) * np.arctan(y/x));
if ( x >= 0 and y < 0):
theta = 360 - ((180/np.pi) * np.arctan((-y)/x));
return theta
def pt2rs(point, gap_ring, gap_sector, num_ring, num_sector):
x = point[0]
y = point[1]
# z = point[2]
if(x == 0.0):
x = 0.001
if(y == 0.0):
y = 0.001
theta = xy2theta(x, y)
faraway = np.sqrt(x*x + y*y)
idx_ring = np.divmod(faraway, gap_ring)[0]
idx_sector = np.divmod(theta, gap_sector)[0]
if(idx_ring >= num_ring):
idx_ring = num_ring-1 # python starts with 0 and ends with N-1
return int(idx_ring), int(idx_sector)
def ptcloud2sc(ptcloud, sc_shape, max_length):
num_ring = sc_shape[0]
num_sector = sc_shape[1]
gap_ring = max_length/num_ring
gap_sector = 360/num_sector
enough_large = 500
sc_storage = np.zeros([enough_large, num_ring, num_sector])
sc_counter = np.zeros([num_ring, num_sector])
num_points = ptcloud.shape[0]
for pt_idx in range(num_points):
point = ptcloud[pt_idx, :]
point_height = point[2] + 2.0 # for setting ground is roughly zero
idx_ring, idx_sector = pt2rs(point, gap_ring, gap_sector, num_ring, num_sector)
if sc_counter[idx_ring, idx_sector] >= enough_large:
continue
sc_storage[int(sc_counter[idx_ring, idx_sector]), idx_ring, idx_sector] = point_height
sc_counter[idx_ring, idx_sector] = sc_counter[idx_ring, idx_sector] + 1
sc = np.amax(sc_storage, axis=0)
return sc
def sc2rk(sc):
return np.mean(sc, axis=1)
def distance_sc(sc1, sc2):
num_sectors = sc1.shape[1]
# repeate to move 1 columns
_one_step = 1 # const
sim_for_each_cols = np.zeros(num_sectors)
for i in range(num_sectors):
# Shift
sc1 = np.roll(sc1, _one_step, axis=1) # columne shift
#compare
sum_of_cossim = 0
num_col_engaged = 0
for j in range(num_sectors):
col_j_1 = sc1[:, j]
col_j_2 = sc2[:, j]
if (~np.any(col_j_1) or ~np.any(col_j_2)):
# to avoid being divided by zero when calculating cosine similarity
# - but this part is quite slow in python, you can omit it.
continue
cossim = np.dot(col_j_1, col_j_2) / (np.linalg.norm(col_j_1) * np.linalg.norm(col_j_2))
sum_of_cossim = sum_of_cossim + cossim
num_col_engaged = num_col_engaged + 1
# save
sim_for_each_cols[i] = sum_of_cossim / num_col_engaged
yaw_diff = np.argmax(sim_for_each_cols) + 1 # because python starts with 0
sim = np.max(sim_for_each_cols)
dist = 1 - sim
return dist, yaw_diff
class ScanContextManager:
def __init__(self, shape=[20,60], num_candidates=10, threshold=0.15): # defualt configs are same as the original paper
self.shape = shape
self.num_candidates = num_candidates
self.threshold = threshold
self.max_length = 80 # recommended but other (e.g., 100m) is also ok.
self.ENOUGH_LARGE = 15000 # capable of up to ENOUGH_LARGE number of nodes
self.ptclouds = [None] * self.ENOUGH_LARGE
self.scancontexts = [None] * self.ENOUGH_LARGE
self.ringkeys = [None] * self.ENOUGH_LARGE
self.curr_node_idx = 0
def addNode(self, node_idx, ptcloud):
sc = ptcloud2sc(ptcloud, self.shape, self.max_length)
rk = sc2rk(sc)
self.curr_node_idx = node_idx
self.ptclouds[node_idx] = ptcloud
self.scancontexts[node_idx] = sc
self.ringkeys[node_idx] = rk
def getPtcloud(self, node_idx):
return self.ptclouds[node_idx]
def detectLoop(self):
exclude_recent_nodes = 30
valid_recent_node_idx = self.curr_node_idx - exclude_recent_nodes
if(valid_recent_node_idx < 1):
return None, None, None
else:
# step 1
ringkey_history = np.array(self.ringkeys[:valid_recent_node_idx])
ringkey_tree = spatial.KDTree(ringkey_history)
ringkey_query = self.ringkeys[self.curr_node_idx]
_, nncandidates_idx = ringkey_tree.query(ringkey_query, k=self.num_candidates)
# step 2
query_sc = self.scancontexts[self.curr_node_idx]
nn_dist = 1.0 # initialize with the largest value of distance
nn_idx = None
nn_yawdiff = None
for ith in range(self.num_candidates):
candidate_idx = nncandidates_idx[ith]
candidate_sc = self.scancontexts[candidate_idx]
dist, yaw_diff = distance_sc(candidate_sc, query_sc)
if(dist < nn_dist):
nn_dist = dist
nn_yawdiff = yaw_diff
nn_idx = candidate_idx
if(nn_dist < self.threshold):
nn_yawdiff_deg = nn_yawdiff * (360/self.shape[1])
return nn_idx, nn_dist, nn_yawdiff_deg # loop detected!
else:
return None, None, None
|
StarcoderdataPython
|
1679500
|
<reponame>BA7JCM/angr<gh_stars>0
from typing import Tuple, Optional
from ailment.expression import BinaryOp, Const, Expression, Convert
from ailment.statement import Call
from .base import PeepholeOptimizationExprBase
class Bswap(PeepholeOptimizationExprBase):
__slots__ = ()
NAME = "Simplifying bswap_16()"
expr_classes = (BinaryOp, ) # all expressions are allowed
def optimize(self, expr: BinaryOp):
# bswap_16
# And(
# (
# ((Conv(16->32, A) << 0x8<8>) & 0xff00ff00<32>) |
# ((Conv(16->32, A) >> 0x8<8>) & 0xff00ff<32>)
# ),
# 0xffff<32>
# )
if expr.op == "And" and len(expr.operands) == 2 and isinstance(expr.operands[1], Const) \
and expr.operands[1].value == 0xffff:
inner = expr.operands[0]
if isinstance(inner, BinaryOp) and inner.op == "Or" and len(inner.operands) == 2:
or_first, or_second = inner.operands[0], inner.operands[1]
if isinstance(or_first, BinaryOp) and or_first.op == "And" and len(or_first.operands) == 2 and \
isinstance(or_second, BinaryOp) and or_second.op == "And" and len(or_second.operands) == 2:
r, the_expr = self._match_inner(or_first, or_second)
if r:
return Call(expr.idx, "__builtin_bswap16", args=[the_expr], bits=expr.bits, **expr.tags)
r, the_expr = self._match_inner(or_second, or_first)
if r:
return Call(expr.idx, "__builtin_bswap16", args=[the_expr], bits=expr.bits, **expr.tags)
return None
return None
def _match_inner(self, or_first: BinaryOp, or_second: BinaryOp) -> Tuple[bool,Optional[Expression]]:
if isinstance(or_first.operands[1], Const) and or_first.operands[1].value == 0xff00ff00:
if isinstance(or_second.operands[1], Const) and or_second.operands[1].value == 0x00ff00ff:
inner_first = or_first.operands[0]
inner_second = or_second.operands[0]
if isinstance(inner_first, BinaryOp) and inner_first.op == "Shl" and \
isinstance(inner_first.operands[1], Const) and inner_first.operands[1].value == 8:
if isinstance(inner_second, BinaryOp) and inner_second.op == "Shr" and \
isinstance(inner_second.operands[1], Const) and inner_second.operands[1].value == 8:
if isinstance(inner_first.operands[0], Convert):
conv: Convert = inner_first.operands[0]
if conv.from_bits == 16 and conv.to_bits == 32:
the_expr_1 = conv.operand
if isinstance(inner_second.operands[0], Convert) and \
inner_second.operands[0].from_bits == 16 and \
inner_second.operands[0].to_bits == 32:
the_expr_2 = inner_second.operands[0].operand
if the_expr_1.likes(the_expr_2):
return True, the_expr_1
return False, None
|
StarcoderdataPython
|
151916
|
<gh_stars>0
#!/usr/bin/env python3
import os
import signal
import time
def sigusr1_handler(signum, frame):
print(f'Catched a SIGUSR1 signal: {signum}, {frame}')
def sigusr2_handler(signum, frame):
print(f'Catched a SIGUSR2 signal: {signum}, {frame}')
def main():
print(f'PID: {os.getpid()}')
signal.signal(signal.SIGUSR1, sigusr1_handler)
signal.signal(signal.SIGUSR2, sigusr2_handler)
print('Waiting 300s to catch signals')
time.sleep(300)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3266462
|
<gh_stars>1-10
class <caret>B(C):
def __init__(self):
C.__init__(self)
|
StarcoderdataPython
|
3385103
|
###########################################################
# Module: phate_annotation.py
# Programmer: <NAME>
#
# Description: Module containing classes and methods for representing annotation results from various sources
#
# Classes and methods:
# annotationRecord
# addPVOGid2list
# getPVOGassociationList
# enterGFFdata(gff/dict)
# setPSATparameters
# removeRedundancy
# recordPSATannotations
# updatePSATcount
# getDBXREFs
# findInfo
# getFigDescription
# getPvogMembers
# getNCBItaxonomy
# link2databaseIdentifiers
# printAnnotationRecord
# printAnnotationRecord_tabHeader
# printAnnotationRecord_tab
# printAnnotationRecord2file_tabHeader
# printAnnotationRecord2file_tab
# returnGFFannotationRecord
# printAnnotationRecord2file
# printAll
# printAll2file(fileH)
# writePVOGgroups
##########################################################
# This code was developed by <NAME> at Lawrence Livermore National Laboratory.
# THIS CODE IS COVERED BY THE BSD LICENSE. SEE INCLUDED FILE BSD.PDF FOR DETAILS.
import re, os, subprocess
#DEBUG = True
DEBUG = False
p_comment = re.compile('^#')
KEGG_VIRUS_BASE_DIR = os.environ["KEGG_VIRUS_BASE_DIR"]
NCBI_VIRUS_BASE_DIR = os.environ["NCBI_VIRUS_BASE_DIR"]
PHANTOME_BASE_DIR = os.environ["PHANTOME_BASE_DIR"]
NCBI_TAXON_DIR = os.environ["NCBI_TAXON_DIR"]
PVOGS_BASE_DIR = os.environ["PVOGS_BASE_DIR"]
PSAT_OUT_DIR = "" # now being set by set method via parameter
# Verbosity
CLEAN_RAW_DATA = os.environ["CLEAN_RAW_DATA"]
PHATE_WARNINGS = os.environ["PHATE_WARNINGS"]
PHATE_MESSAGES = os.environ["PHATE_MESSAGES"]
PHATE_PROGRESS = os.environ["PHATE_PROGRESS"]
# External links
NCBI_TAXON_LINK = "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id="
# List of useful files to convert vg identifier to annotation information.
# These are located on mpath under /home/zhou4/BioRemediation/Phage/Kegg_virus/
# Use: os.environ["KEGG_VIRUS_BLAST_HOME"]
vg_enzyme = KEGG_VIRUS_BASE_DIR + "vg_enzyme.list"
vg_ko = KEGG_VIRUS_BASE_DIR + "vg_ko.list"
vg_ncbi_proteinid = KEGG_VIRUS_BASE_DIR + "vg_ncbi-proteinid.list"
vg_pfam = KEGG_VIRUS_BASE_DIR + "vg_pfam.list"
vg_tax = KEGG_VIRUS_BASE_DIR + "vg_tax.list"
vg_uniprot = KEGG_VIRUS_BASE_DIR + "vg_uniprot.list"
phantome_phage_genes = PHANTOME_BASE_DIR + "Phantome_Phage_genes.faa.headers"
ncbi_taxon_lookup = NCBI_TAXON_DIR + "nucl_gb.accession2taxid"
pVOGheaderFile = PVOGS_BASE_DIR + "pVOGs_headers.lst"
class annotationRecord(object):
def __init__(self):
self.source = "unknown" # Typically RAST, LLNL, PhAnToMe, GeneMark, Glimmer, Prodigal, PHANOTATE, KEGG, NCBI, pVOGs
self.method = "unknown" # Typcially RAST, PSAT, PFP, PhiRAST, JGI, SDSU, BLAST, blastp, blastn, HMM, jackhmmer
self.annotationType = "unknown" # gene, mRNA, polypeptide, CDS, functional, homology, hmm
self.pVOGlist = [] # list of pVOG identifiers (identified via blast hit)
self.contig = "unknown"
self.start = 0
self.end = 0
self.strand = 'x'
self.readingFrame = 'x'
self.identifier = "none"
self.locusTag = "none"
self.name = "none" # subject hit header (i.e., database identifier provider in fasta header)
self.description = "none" # more information: dbxref identifiers provided via lookup-tables (see above)
self.annotationList = [] # could be multiple from single source
self.category = "none" # functional categories: primary, sequence, structure, motif, etc.
self.wraparound = "none" # indicates that a gene call wraps around the genome sequence as given
self.psat = {
"jobID" : "", # PSAT job id
"jobName" : "", # PSAT job name
"fileName" : "", # PSAT output file
}
self.psatOutDir = "" # need to set
self.annotationString = "" # used to construct a summary of annotation(s) for GFF output
def addPVOGid2list(self,pVOG):
self.pVOGlist.append(pVOG)
def getPVOGassociationList(self):
return(self.pVOGlist)
def enterGFFdata(self,gff): # Input a dict object with key/values as specified
if isinstance(gff,dict):
self.source = gff["source"]
self.method = gff["method"]
self.annotationType = gff["type"]
self.contig = gff["contig"]
self.start = gff["start"]
self.end = gff["end"]
self.strand = gff["strand"]
self.readingFrame = gff["readingFrame"]
annotList = gff["annotation"].split(';')
for annot in annotList:
self.annotationList.append(annot)
self.category = "sequence"
return True
else:
return False
# METHODS FOR ANNOTATING FROM EXTERNAL SOURCES (e.g., PSAT)
def setPSATparameters(self,jobID,jobName,fileName,psatOutDir):
self.psat["jobID"] = jobID
self.psat["jobName"] = jobName
self.psat["fileName"] = fileName
self.source = "LLNL"
self.method = "PSAT"
self.annotationType = "functional"
self.psatOutDir = psatOutDir
PSAT_OUT_DIR = psatOutDir
def removeRedundancy(self,inList): # Eliminate redundancy in list; Different PSAT annotations sources can return same annotation
outList = []
for i in range(len(inList)):
item = inList.pop()
if item not in inList:
outList.append(item)
outList.reverse()
return outList
def recordPSATannotations(self,proteinHeader,PSAT_H): # Query PSAT annotation file for current gene's annotations
# Locations of annotations in PSAT annotation file
EC_COLUMN = 3
EC_DESC_COLUMN = 4
PATHWAY_COLUMN = 5
INTERPRO_COLUMN = 13
SIGNALP_COLUMN1 = 14
SIGNALP_COLUMN2 = 15
#TMHMM_COLUMN = 25 # Not in service. TMHMM could be added to PSAT output in column 25
# Patterns
p_comment = re.compile('^#')
p_EC = re.compile('\d\.\d+\.\d+\.\d+')
p_ECdesc = re.compile('[\w\d\.\-\_\s]+')
p_pathway = re.compile('ec\d{4}.*')
p_GO = re.compile('(MOLECULAR_FUNCTION|BIOLOGICAL_PROCESS|CELLULAR_COMPONENT),(GO:\d{7})')
p_pfam = re.compile('([\w\d\s\.\-\_\,]+)PFAM')
p_smart = re.compile('([\w\d\s\.\-\_\,]+)SMART')
p_dbxref = re.compile('EMPTY4NOW')
p_signalp1 = re.compile('YES|NO')
p_signalp2 = re.compile('\'(\d+)\-(\d+)\'')
p_tmhmm = re.compile('Topology=([io][io\d\-]+)\|')
#
annotationString = ""
psatAnnotationList = []
tempList = []; columns = []
if PHATE_MESSAGES == 'True':
print("phate_annotation says: Recording PSAT annotations.")
### Capture lines corresponding to the gene
pLines = PSAT_H.read().splitlines()
if DEBUG:
print("phate_annotation says, DEBUG: There are this many pLines:", len(pLines))
# Capture all annotation lines for this gene
for pLine in pLines:
matchTerm = proteinHeader + '\t'
match_gene = re.search(matchTerm,pLine)
match_comment = re.search(p_comment,pLine)
if match_gene:
tempList.append(pLine)
if DEBUG:
print("phate_annotation says, DEBUG: Protein", proteinHeader, "Temp list:", tempList)
### Parse each annotation line, capture annotations and format for genbank
for line in tempList:
if DEBUG:
print("phate_annotation says, DEBUG: Processing PSAT hit line:", line)
annotationString = ""
EC = ""
ecDescription = ""
pathway = ""
GO = ""
pfam = ""
smart = ""
dbxrefID = ""
signalp = ""; signalp1 = ""; signalp2 = ""
tmhmm = ""
columns = line.split('\t')
### Detect annotations; Note: There could be >1 in a given data line, or in a given column
match_ec = re.search(p_EC, columns[EC_COLUMN])
match_ecDesc = re.search(p_ECdesc, columns[EC_DESC_COLUMN])
match_pathway = re.search(p_pathway, columns[PATHWAY_COLUMN])
match_go = re.search(p_GO, columns[INTERPRO_COLUMN])
match_pfam = re.search(p_pfam, columns[INTERPRO_COLUMN])
match_smart = re.search(p_smart, columns[INTERPRO_COLUMN])
match_dbxref = re.search(p_dbxref, columns[INTERPRO_COLUMN])
match_signalp1 = re.search(p_signalp1, columns[SIGNALP_COLUMN1])
match_signalp2 = re.search(p_signalp2, columns[SIGNALP_COLUMN2])
#match_tmhmm = re.search(p_tmhmm, columns[TMHMM_COLUMN]) # Not currently in service
if match_ec:
EC = match_ec.group(0)
annotationString = "EC:" + EC
self.annotationList.append(annotationString)
if match_ecDesc:
ecDesc = match_ecDesc.group(0)
annotationString = "EC description:" + ecDesc
self.annotationList.append(annotationString)
if match_pathway:
pathwayString = match_pathway.group(0)
pathways = pathwayString.split('|\s')
for pathway in pathways:
annotationString = "Pathway:" + pathway + ' | '
self.annotationList.append(annotationString)
if match_go:
GO = match_go.group(0)
category = match_go.group(1)
goID = match_go.group(2)
if category == "BIOLOGICAL_PROCESS":
annotationString = "go_process:" + goID
elif category == "MOLECULAR_FUNCTION":
annotationString = "go_function:" + goID
elif category == "CELLULAR_COMPONENT":
annotationString = "go_component:" + goID
else:
annotationString = "go_unknown:" + goID
self.annotationList.append(annotationString)
if match_pfam:
pfam = match_pfam.group(1)
pfam = re.sub('\,$','',pfam)
annotationString = "pfam:" + pfam
self.annotationList.append(annotationString)
if match_smart:
smart = match_smart.group(1)
smart = re.sub('\,$','',smart)
annotationString = "smart:" + smart
self.annotationList.append(annotationString)
if match_dbxref:
dbxrefID = match_dbxref.group(0)
for dbxref in match_dbxref:
annotationString = "dbxref:" + dbxrefID
self.annotationList.append(annotationString)
if match_signalp1:
signalp1 = ''; signalp2 = ''; start = 0; end = 0
signalp1 = match_signalp1.group(0)
if signalp1 == 'YES':
if match_signalp2:
signalp2 = match_signalp2.group(0)
start = match_signalp2.group(1)
end = match_signalp2.group(2)
else:
if DEBUG:
print("phate_annotation says, DEBUG: DID NOT FIND A signalp2 MATCH for YES", pLine)
annotationString = "signal_peptide:" + signalp1 + ' ' + signalp2
self.annotationList.append(annotationString)
# Not in service
#if match_tmhmm:
# tmhmm = match_tmhmm.group(1)
# print "found tmhmm:", tmhmm
# annotationString = str(start) + '\t' + str(end) + "\tfeature\tTMHMM\t" + tmhmm
# #psatAnnotationList.append(annotationString)
self.annotationList = self.removeRedundancy(self.annotationList)
self.updatePSATcount()
if PHATE_PROGRESS == 'True':
print("phate_annotation says: PSAT annotations complete.")
return
def updatePSATcount(self):
if self.annotationList == []:
self.description = "There are no PSAT annotations"
else:
self.description = "Num annots:" + str(len(self.annotationList))
# METHODS FOR LINKING TO FUNCTIONAL ANNOTATIONS
# The Phantome, Kegg-virus, and Ncbi-virus database provide cryptic fasta headers
# Need to use look-up tables to get more descriptive names for these annotations
def getDBXREFs(self,database):
idList = []
dbxref = ""
p_truncatedSearchTerm = re.compile('^([^\s]*)\s')
match_truncate = re.search(p_truncatedSearchTerm, self.name)
if match_truncate:
searchTerm = match_truncate.group(1)
else:
searchTerm = self.name
command = 'grep ' + searchTerm + ' ' + database
proc = subprocess.Popen([command], stdout=subprocess.PIPE, shell=True)
(rawout, err) = proc.communicate()
out = rawout.decode('utf-8') # Python3
if out != "":
lines = out.split('\n')
for line in lines:
if line != "":
fields = line.split('\t')
if len(fields) > 1:
dbxref = fields[1]
else:
if PHATE_WARNINGS == 'True':
print("phate_annotation says, WARNING: no dbxref found for", self.name, "in database", database, "given line", line)
idList.append(dbxref)
return idList
def findInfo(self,searchTerm,database): # Searches Phantome header file for annotation information
p_truncatedSearchTerm = re.compile('^([^\s]*)\s')
infoLines = []
DATABASE_H = open(database,"r")
dLines = DATABASE_H.read().splitlines()
if DEBUG:
print("phate_annotation says, DEBUG: original searchTerm is", searchTerm)
match_truncate = re.search(p_truncatedSearchTerm,searchTerm)
if match_truncate:
truncatedSearchTerm = match_truncate.group(1)
searchTerm = truncatedSearchTerm
if DEBUG:
print("phate_annotation says, DEBUG: searchTerm was changed to", searchTerm)
for dLine in dLines:
match_searchTerm = re.search(searchTerm,dLine)
if match_searchTerm:
fields = dLine.split('\s+')
if searchTerm == fields[0][1:]:
infoLines.append(dLine)
DATABASE_H.close()
return infoLines
def getFigDescription(self,database):
dbxrefList = []; infoList = []; infoString = ""
# Alert: Really ugly regex follows; how to make this better?
p_phantomeInfo = re.compile('\[[\w\s\d\-\.\(\)\;\:\_\,\#]+\]') # Gather all text enclosed within []
# Pull figfam descriptions from phantome header file
match_fig = re.search('fig\|',self.name)
if match_fig:
searchTerm = self.name
figLines = self.findInfo(searchTerm,database)
for fig in figLines:
infoList = re.findall(p_phantomeInfo,fig)
for info in infoList:
infoString += ' | ' + info
dbxrefList.append(infoString)
else:
if PHATE_WARNINGS == 'True':
print("WARNING in annotation module: Unexpected name encountered in phate_annotation.py/getFigDescription:", self.name)
return dbxrefList
def getPvogMembers(self,database): # database should be the pVOGs headers file, but fasta file will work (slowly)
dbxrefList = []; infoList = []; infoString = []
p_pVOGid = re.compile('VOG\d+?a|b')
# self.name is the pVOGs database blast hit header
pVOGidList = []
pVOGidList = re.findall(p_pVOGid,self.name) # extract pVOGid(s) from header
if pVOGidList:
for pVOGid in pVOGidList:
infoLines = re.findall(pVOGid,database)
for line in infoLines:
infoString += ' | ' + line
dbxrefList.append(infoString)
return dbxrefList # list(s) of pVOGs database headers with common pVOGid
# Query a taxonomy lookup table to get taxonomy information
def getNCBItaxonomyID(self,database): # Database maps ncbi header to taxonomy
ncbiTaxonList = []; giNumber = ''; accnNumber = ''
ncbiTaxonList = []
p_version = re.compile('(\w+_\d+)\.\d+')
p_taxID = re.compile('[\d\w\_]\s+[\d\w\_\.]+\s+([\d]+)\s+[\d]+')
if self.name == '' and self.name == 'none':
if DEBUG:
print("phate_annotation says, DEBUG: name field blank in getNCBItaxonomy")
else:
fields = self.name.split('|') # Hit's fasta header has several fields, delimited w/'|'
if len(fields) > 4:
giNumber = fields[1]
accnNumber = fields[3]
description = fields[4]
self.description = description
searchTermString = accnNumber
match_version = re.search(p_version,searchTermString)
if match_version:
searchTerm = match_version.group(1)
else:
searchTerm = searchTermString
command = 'grep \"' + searchTerm + '\" ' + database
if DEBUG:
print("phate_annotation says, DEBUG: command is", command)
proc = subprocess.Popen([command], stdout=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if DEBUG:
print("phate_annotation says, DEBUG: Result of grep is", out)
if out != '':
match_taxID = re.search(p_taxID,out)
taxonomyID = match_taxID.group(1)
taxonomyString = 'NCBItaxID=' + taxonomyID
ncbiTaxonList.append(taxonomyString)
ncbiTaxonLink = NCBI_TAXON_LINK + taxonomyID
ncbiTaxonList.append(ncbiTaxonLink)
else:
if PHATE_WARNINGS == 'True':
print("phate_annotation says, WARNING: NCBI hit header has improper format or is missing:", self.name)
if DEBUG:
print("phate_annotation says, DEBUG: ncbiTaxonList is", ncbiTaxonList)
return ncbiTaxonList
def link2databaseIdentifiers(self,database,dbName):
dbxrefList = [] # holds concatenation of functional annotations
enzymeList = []; ncbiProteinList = []; taxonList = [] # hold specific annotations
pfamList = []; uniprotList = []; koList = [] # hold specific annotations
figList = []
annotationString = "" # string containing all dbxref annotations found
annotation = ""
if self.name == "" or self.name == "none":
if PHATE_WARNINGS == 'True':
print("phate_annotation says, WARNING: No name for identification of dbxref in phate_annotation/link2databaseIdentifiers")
return
else:
if dbName.lower() == 'kegg':
enzymeList = self.getDBXREFs(vg_enzyme)
koList = self.getDBXREFs(vg_ko)
ncbiProteinList = self.getDBXREFs(vg_ncbi_proteinid)
pfamList = self.getDBXREFs(vg_pfam)
taxList = self.getDBXREFs(vg_tax)
uniprotList = self.getDBXREFs(vg_uniprot)
# Create annotation lists that can later be parsed
for enzyme in enzymeList:
dbxrefList.append(enzyme)
for ko in koList:
dbxrefList.append(ko)
for protein in ncbiProteinList:
dbxrefList.append(protein)
for pfam in pfamList:
dbxrefList.append(pfam)
for taxon in taxList:
dbxrefList.append(taxon)
for uniprot in uniprotList:
dbxrefList.append(uniprot)
elif dbName.lower() == 'phantome':
figList = self.getFigDescription(phantome_phage_genes)
for fig in figList:
dbxrefList.append(fig)
elif dbName.lower() == 'ncbi':
taxonList = self.getNCBItaxonomyID(ncbi_taxon_lookup)
for taxon in taxonList:
dbxrefList.append(taxon)
elif dbName.lower() == 'pvogs':
pVOGlist = self.getPvogMembers(pVOGheaderFile)
elif dbName.lower() == 'ncbivirusprotein':
pass
else:
if PHATE_WARNINGS == 'True':
print("phate_annotation says, WARNING: Unrecognized database:", dbName)
if DEBUG:
print("phate_annotation says, DEBUG: dbxrefList:", dbxrefList)
for annotation in dbxrefList:
nextAnnot = ' | ' + annotation
annotationString += nextAnnot
self.description = annotationString
return
# PRINT METHODS
def printAnnotationRecord(self):
print("Annotation source:", self.source, '| Method:', self.method, '| Type:', self.annotationType)
print("Contig:", self.contig, "| Start:", self.start, "| End:", self.end, "| Strand:", self.strand)
print("Name:", self.name, "Description:", self.description)
print("Annotations:", self.annotationList)
def printAnnotationRecord_tabHeader(self):
header = 'Source\tMethod\tType\tCategory\tStart-End/strand\tName\tDescription'
print(header)
def printAnnotationRecord_tab(self):
annotationString = ""
#print "Number of annotations:", len(self.annotationList)
for annot in self.annotationList:
annotationString += annot
annotationString += ' | '
tabLine = self.source + '\t' + self.method + '\t' + self.annotationType + '\t' + self.category + '\t'
tabLine += str(self.start) + '-' + str(self.end) + '/' + self.strand + '\t'
tabLine += self.name + '\t' + self.description + '\t' + annotationString
print(tabLine)
def printAnnotationRecord2file_tabHeader(self,FILE_HANDLE):
header = 'Source\tMethod\tType\tCategory\tStart-End/strand\tName\tDescription'
FILE_HANDLE.write("%s\n" % (header))
def printAnnotationRecord2file_tab(self,FILE_HANDLE):
annotationString = ""
for annot in self.annotationList:
annotationString += annot
annotationString += ' | '
tabLine = self.source + '\t' + self.method + '\t' + self.annotationType + '\t' + self.category + '\t'
tabLine += str(self.start) + '-' + str(self.end) + '/' + self.strand + '\t'
tabLine += self.name + '\t' + self.description + '\t' + annotationString
FILE_HANDLE.write("%s\n" % (tabLine))
# Return annotations as a semicolon-delimited string
def returnGFFannotationRecord(self,FILE_HANDLE):
self.annotationString = ''; annot = ''; annotationList = []
if self.annotationType == 'gene':
annot = '(gene) ' + self.start + '/' + self.end + '/' + self.strand + ' ' + self.method
annotationList.append(annot)
if self.annotationType == 'functional':
annot = '(function) ' + self.method + ' ' + self.description
annotationList.append(annot)
if self.annotationType == 'homology':
homologName = self.name
newName = re.sub(';','',homologName) # Remove embedded ';' because GFF uses this delimiter
annot = '(homology) ' + self.method + ' ' + newName
annotationList.append(annot)
if self.annotationType == 'hmm search':
annot = '(hmm search) ' + self.method + ' ' + self.description
annotationList.append(annot)
if len(annotationList) > 0:
for i in range(0, len(annotationList)):
if i > 0:
self.annotationString += '; ' + annotationList[i]
else:
self.annotationString += annotationList[i]
FILE_HANDLE.write("%s" % (self.annotationString))
def printAnnotationRecord2file(self,FILE_HANDLE): #*** Update this
FILE_HANDLE.write("%s%s%s" % ("Annotation source:",self.source,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Method:",self.method,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Contig:",self.contig,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Annotations:",self.annotationList,"\n"))
def printAll(self):
print("=== Annotation record ===")
print("Source:", self.source)
print("Method:", self.method)
print("Type:", self.annotationType)
print("Contig:", self.contig)
print("Start:", self.start)
print("End:", self.end)
print("Strand:", self.strand)
print("Reading Frame:", self.readingFrame)
print("Identifier:", self.identifier)
print("Locus Tag:", self.locusTag)
print("Name:", self.name)
print("Description:", self.description)
print("Category:", self.category)
print("Wraparound:", self.wraparound)
print("Annotation List:")
for annot in self.annotationList:
print(" ", annot)
print("Category:", self.category)
print("========================")
def printAll2file(self,FILE_HANDLE):
FILE_HANDLE.write("%s" % ("Annotation record ===\n"))
FILE_HANDLE.write("%s%s%s" % ("Source:",self.source,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Method:",self.method,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Type:",self.annotationType,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Contig:",self.contig,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Start:",self.start,"\n"))
FILE_HANDLE.write("%s%s%s" % ("End:",self.end,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Strand:",self.strand,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Reading Frame:",self.readingFrame,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Identifier:",self.identifier,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Locus Tag:",self.locusTag,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Name:",self.name,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Description:",self.description,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Category:",self.category,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Wraparound:",self.wraparound,"\n"))
FILE_HANDLE.write("%s" % ("Annotation List:i\n"))
for annot in self.annotationList:
FILE_HANDLE.write("%s%s%s" % (" ",annot,"\n"))
FILE_HANDLE.write("%s" % ("Paralog List:\n"))
for paralog in self.paralogList:
FILE_HANDLE.write("%s%s%s" % (" ",paralog,"\n"))
FILE_HANDLE.write("%s" % ("=======================\n"))
def writePVOGgroups(self,FILE_HANDLE):
for pVOG in self.pVOGlist:
pass
|
StarcoderdataPython
|
3238909
|
from __future__ import absolute_import, division, print_function
import re
import inspect
from cloudformation_validator.Profile import Profile
from cfn_model.parser.ParserError import ParserError
def lineno():
"""Returns the current line number in our program."""
return str(' - ProfileLoader - line number: '+str(inspect.currentframe().f_back.f_lineno))
# Load rule profile
class ProfileLoader:
"""
Profile loader
"""
def __init__(self,rules_registry,debug=False):
"""
Initialize the ProfileLoader
:param rules_registry:
:param debug:
"""
self.debug = debug
self.rules_registry = rules_registry
if self.debug:
print('ProfileLoader - init'+lineno())
def load(self, profile_definition):
"""
Load rules from a profile definition
:param profile_definition:
:return:
"""
if self.debug:
print('load'+lineno())
print('vars: '+str(vars(profile_definition))+lineno())
# coerce falsy profile_definition into empty string for
# empty profile check
if not profile_definition:
raise ParserError("Empty profile")
new_profile = Profile()
if self.debug:
print('vars: '+str(vars(new_profile))+lineno())
for definition in profile_definition.rules:
if self.debug:
print('definition: '+str(definition))
rule_id = self.rule_line_match(definition.id)
if rule_id:
self.check_valid_rule_id(rule_id)
new_profile.add_rule(rule_id)
return new_profile
def rule_line_match(self, rule_id):
"""
Parses a line, returns first matching line or false if no match
:param rule_id:
:return:
"""
if self.debug:
print('rule_line_match'+lineno())
print('rule_id: '+str(rule_id)+lineno())
rule_id = rule_id.rstrip('\r\n')
matchObj = re.match(r'^([a-zA-Z]*?[0-9]+)\s*(.*)', rule_id, re.M | re.I)
if matchObj:
if self.debug:
print("matchObj.group() : "+str(matchObj.group()))
print("matchObj.group(1) : "+str(matchObj.group(1)))
print("matchObj.group(2) : "+str(matchObj.group(2)))
return matchObj.group(1)
else:
if self.debug:
print("No match!!")
return False
def rules_ids(self):
"""
Returns ids of rules in registry
:return:
"""
if self.debug:
print('rules_ids'+lineno())
ids = []
for rules in self.rules_registry:
ids.append(rules.id)
def check_valid_rule_id(self, rule_id):
"""
Returns true if rule_id is valid (present in rules registry), else raise an error
:param rule_id:
:return:
"""
if self.debug:
print('check_valid_rule_id'+lineno())
print('rule_id: '+str(rule_id)+lineno())
if self.rules_registry.by_id(rule_id) == None:
return False
return True
|
StarcoderdataPython
|
3361066
|
<gh_stars>0
import argparse
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
width = 500
height = 500
angle = 0.0
refreshMills = 15
points = []
polygons = []
window : GLint
def myReshape(width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0, width, 0, height)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glClearColor(1.0, 1.0, 1.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT)
glPointSize(1.0)
glColor3f(0.0, 0.0, 0.0)
def center_and_scale_model():
x = [point[0].value for point in points]
y = [point[1].value for point in points]
z = [point[2].value for point in points]
maxx = max(x)
minx = min(x)
maxy = max(y)
miny = min(y)
maxz = max(z)
minz = min(z)
div = max([maxx - minx, maxy - miny, maxz - minz])
for point in points:
point[0] = GLfloat(((point[0].value - (minx + maxx)/2)) * (2/div))
point[1] = GLfloat(((point[1].value - (miny + maxy)/2)) * (2/div))
point[2] = GLfloat(((point[2].value - (minz + maxz)/2)) * (2/div))
def myDisplay():
global angle
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glRotatef(angle, 0.0, 1.0, 0.0)
for polygon in polygons:
glBegin(GL_TRIANGLES)
glColor3f(0.0, 0.0, 0.0)
glVertex3f(points[polygon[0]][0], points[polygon[0]][1], points[polygon[0]][2])
glColor3f(0.0, 0.0, 0.0)
glVertex3f(points[polygon[1]][0], points[polygon[1]][1], points[polygon[1]][2])
glColor3f(1.0, 1.0, 1.0)
glVertex3f(points[polygon[2]][0], points[polygon[2]][1], points[polygon[2]][2])
glEnd()
angle += 0.2
glutSwapBuffers()
def parse_args():
parser = argparse.ArgumentParser(description='Draws 3d model from object file')
parser.add_argument('object_file', help='path to object file')
parser.add_argument('point', help='point to check if inside object')
args = parser.parse_args()
return args
def read_file(object_file):
global points, polygons
with open(object_file, 'r') as inp:
for line in inp:
if line.startswith('v'):
line = line.strip().split(' ')
points.append([GLfloat(float(line[1])), GLfloat(float(line[2])), GLfloat(float(line[3]))])
elif line.startswith('f'):
line = line.strip().split(' ')
polygons.append([int(line[1]) - 1, int(line[2]) - 1, int(line[3]) - 1])
def check_point(point):
point = list(eval(point))
x = point[0]
y = point[1]
z = point[2]
for polygon in polygons:
x1 = points[polygon[0]][0].value
x2 = points[polygon[1]][0].value
x3 = points[polygon[2]][0].value
y1 = points[polygon[0]][1].value
y2 = points[polygon[1]][1].value
y3 = points[polygon[2]][1].value
z1 = points[polygon[0]][2].value
z2 = points[polygon[1]][2].value
z3 = points[polygon[2]][2].value
A = (y2 - y1) * (z3 - z1) - (z2 - z1) * (y3 - y1)
B = -1 * (x2 - x1) * (z3 - z1) + (z2 - z1) * (x3 - x1)
C = (x2 - x1) * (y3 - y1) - (y2 - y1) * (x3 - x1)
D = -1 * x1 * A - y1 * B - z1 * C
VR = A*x + B*y + C*z + D
if VR > 0:
print('Tocka nije unutar objekta!')
return
print('Tocka je unutar objekta')
def timer(value):
glutPostRedisplay()
glutTimerFunc(refreshMills, timer, 0)
def main():
global window
args = parse_args()
read_file(args.object_file)
center_and_scale_model()
if args.point is not None:
check_point(args.point)
glutInit()
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(width, height)
glutInitWindowPosition(100, 100)
glutCreateWindow("Glut OpenGL 3d object")
glutReshapeFunc(myReshape)
glutDisplayFunc(myDisplay)
glutTimerFunc(0, timer, 0)
glutMainLoop()
if __name__=="__main__":
main()
|
StarcoderdataPython
|
79463
|
<filename>openpype/hosts/nuke/api/__init__.py
from .workio import (
file_extensions,
has_unsaved_changes,
save_file,
open_file,
current_file,
work_root,
)
from .command import (
reset_frame_range,
get_handles,
reset_resolution,
viewer_update_and_undo_stop
)
from .plugin import OpenPypeCreator
from .pipeline import (
install,
uninstall,
ls,
containerise,
parse_container,
update_container,
)
from .lib import (
maintained_selection
)
__all__ = (
"file_extensions",
"has_unsaved_changes",
"save_file",
"open_file",
"current_file",
"work_root",
"reset_frame_range",
"get_handles",
"reset_resolution",
"viewer_update_and_undo_stop",
"OpenPypeCreator",
"install",
"uninstall",
"ls",
"containerise",
"parse_container",
"update_container",
"maintained_selection",
)
|
StarcoderdataPython
|
3324221
|
"""Copyright (c) Facebook, Inc. and its affiliates."""
from typing import List, Optional
import typer
from rich.progress import track
from rich.console import Console
from pedroai.io import read_jsonlines, write_jsonlines
from leaderboard.analysis.squad import SquadPredictionData
from leaderboard.data import create_leaderboard_splits
from leaderboard.www import database
squad_app = typer.Typer()
console = Console()
@squad_app.command("plot")
def squad_plot(models: List[str]):
data = SquadPredictionData(models)
data.plot_comparison()
@squad_app.command("anova")
def squad_anova(models: List[str]):
data = SquadPredictionData(models)
data.repeated_measures_anova()
@squad_app.command("permutation")
def squad_permutation(models: List[str]):
data = SquadPredictionData(models)
data.permutation_test()
@squad_app.command("dist")
def squad_dist(models: List[str]):
data = SquadPredictionData(models)
data.dist_info()
@squad_app.command("csv")
def squad_to_csv(out_path: str, models: List[str]):
data = SquadPredictionData(models)
data.to_csv(out_path)
@squad_app.command()
def init_db(limit_submissions: Optional[int] = None, skip_tests: bool = False):
# TODO: Move this out of the squad command
database.build_db(limit_submissions=limit_submissions, skip_tests=skip_tests)
@squad_app.command()
def export_to_irt(out_path: str):
submissions = database.export_submissions()
write_jsonlines(out_path, submissions)
@squad_app.command()
def item_splits(fold: str, seed: int = 42, train_size: float = 0.9):
create_leaderboard_splits(fold, seed=seed, train_size=train_size)
@squad_app.command()
def leaderboard_to_pyirt(data_path: str, output_path: str, metric: str = 'exact_match'):
"""Convert the SQuAD leaderboard.jsonlines data to py-irt format"""
output = []
for line in track(read_jsonlines(data_path)):
subject_id = line['submission_id']
# Extra field, just passing meta info
name = line['name']
responses = {}
for item_id, scores in line['predictions'].items():
responses[item_id] = scores['scores'][metric]
output.append({
'subject_id': subject_id,
'responses': responses,
'name': name
})
console.log(f'Writing output to: {output_path}')
write_jsonlines(output_path, output)
|
StarcoderdataPython
|
1760931
|
<filename>venv/lib/python3.7/site-packages/diffoscope/feeders.py<gh_stars>0
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2014-2015 <NAME> <<EMAIL>>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import signal
import hashlib
import logging
import subprocess
from .config import Config
from .profiling import profile
logger = logging.getLogger(__name__)
DIFF_CHUNK = 4096
def from_raw_reader(in_file, filter=None):
def feeder(out_file):
max_lines = Config().max_diff_input_lines
end_nl = False
line_count = 0
# If we have a maximum size, hash the content as we go along so we can
# display a nicer message.
h = None
if max_lines < float('inf'):
h = hashlib.sha1()
for buf in in_file:
line_count += 1
out = buf if filter is None else filter(buf)
if h is not None:
h.update(out)
if line_count < max_lines:
out_file.write(out)
# very long lines can sometimes interact negatively with
# python buffering; force a flush here to avoid this,
# see https://bugs.debian.org/870049
out_file.flush()
if buf:
end_nl = buf[-1] == '\n'
if h is not None and line_count >= max_lines:
out_file.write(
"[ Too much input for diff (SHA1: {}) ]\n".format(
h.hexdigest()
).encode('utf-8')
)
end_nl = True
return end_nl
return feeder
def from_text_reader(in_file, filter=None):
if filter is None:
def encoding_filter(text_buf):
return text_buf.encode('utf-8')
else:
def encoding_filter(text_buf):
return filter(text_buf).encode('utf-8')
return from_raw_reader(in_file, encoding_filter)
def from_command(command):
def feeder(out_file):
with profile('command', command.cmdline()[0]):
feeder = from_raw_reader(command.stdout, command.filter)
end_nl = feeder(out_file)
returncode = command.returncode
if returncode not in (0, -signal.SIGTERM):
raise subprocess.CalledProcessError(
returncode, command.cmdline(), output=command.stderr
)
return end_nl
return feeder
def from_text(content):
def feeder(f):
for offset in range(0, len(content), DIFF_CHUNK):
f.write(content[offset : offset + DIFF_CHUNK].encode('utf-8'))
return content and content[-1] == '\n'
return feeder
def empty():
def feeder(f):
return False
return feeder
|
StarcoderdataPython
|
32730
|
import datetime
import remi
import core.globals
connected_clients = {} # Dict with key=session id of App Instance and value=ws_client.client_address of App Instance
connected_clients['number'] = 0 # Special Dict Field for amount of active connections
client_route_url_to_view = {} # Dict to store URL extensions related to session. This is used to switch a view based on url
def handle_connections(AppInst=None):
# Take care of the connection. It is only alive if the websocket still is active.
# Check, if there is a new websocket connection for this App session (= Instance)
if AppInst.connection_established == False and len(AppInst.websockets) == 1:
for session_id, app_inst in remi.server.clients.items():
if session_id == AppInst.session:
for ws_client in app_inst.websockets:
AppInst.logger.info(f'New Session with ID <{AppInst.session}> from host {ws_client.client_address}') # Host Information for direct connection
connected_clients[AppInst.session] = ws_client.client_address
AppInst.logger.info(f'Session <{AppInst.session}> host headers: {ws_client.headers}')
connected_clients['number'] = connected_clients['number'] + 1
AppInst.logger.info(f'Connected clients ({connected_clients["number"]} in total): {connected_clients}')
AppInst.connect_time = datetime.datetime.now()
AppInst.connection_established = True # Set Flag. This can be used by other threads as end signal.
# Check, if the websocket connection is still alive. REMI removes the Websocket from the List if dead.
if len(remi.server.clients[AppInst.session].websockets) == 0 and AppInst.connection_established == True:
AppInst.disconnect_time = datetime.datetime.now() # Store the disconnect time
connection_duration = f'{(AppInst.disconnect_time - AppInst.connect_time).seconds} sec'
AppInst.logger.info(f'Session <{AppInst.session}> from host {connected_clients[AppInst.session]} has disconnected. Connection duration: {connection_duration}')
AppInst.connection_established = False # Set Flag. This can be used by other threads as end signal.
del connected_clients[AppInst.session]
connected_clients['number'] = connected_clients['number'] - 1
AppInst.logger.info(f'Still connected clients: {connected_clients}')
|
StarcoderdataPython
|
4829932
|
""" Object id converter.
This is taken from
http://flask.pocoo.org/snippets/106/
"""
from flask import Flask
from werkzeug.routing import BaseConverter, ValidationError
from itsdangerous import base64_encode, base64_decode
from bson.objectid import ObjectId
from bson.errors import InvalidId
class ObjectIDConverter(BaseConverter):
def to_python(self, value):
"""Object ID converter to Python
Returns:
ObjectId from the base64
"""
try:
return ObjectId(base64_decode(value))
except (InvalidId, ValueError, TypeError):
raise ValidationError()
def to_url(self, value):
"""Object ID converter to url
Returns:
Base64
"""
return base64_encode(value.binary)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.