max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
python/ray/train/__init__.py
jamesliu/ray
33
1700
<filename>python/ray/train/__init__.py from ray.train.backend import BackendConfig from ray.train.callbacks import TrainingCallback from ray.train.checkpoint import CheckpointStrategy from ray.train.session import (get_dataset_shard, local_rank, load_checkpoint, report, save_checkpoint, world_rank, world_size) from ray.train.trainer import Trainer, TrainingIterator __all__ = [ "BackendConfig", "CheckpointStrategy", "get_dataset_shard", "load_checkpoint", "local_rank", "report", "save_checkpoint", "TrainingIterator", "TrainingCallback", "Trainer", "world_rank", "world_size" ]
1.945313
2
test/test_contact_in_group.py
anastas11a/python_training
0
1701
<gh_stars>0 from model.contact import Contact from model.group import Group import random def test_add_contact_in_group(app, db): app.open_home_page() contact = db.get_contact_list() if len(contact) == 0: app.contact.create(Contact(firstname = "test firstname changed")) group = db.get_group_list() if len(group) == 0: app.group.create(Group(name="test")) contact_rand = random.choice(contact) group_rand = random.choice(group) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) l = db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand in l def test_del_contact_from_group(app, db): app.open_home_page() contact = db.get_contact_list() if len(contact) == 0: app.contact.create(Contact(firstname = "test firstname changed")) group = db.get_group_list() if len(group) == 0: app.group.create(Group(name="test")) group_rand = random.choice(group) app.contact.open_contacts_in_group(group_rand.id) contacts_in_group = db.get_contacts_in_group(Group(id=group_rand.id)) if len(contacts_in_group) == 0: app.contact.view_all_contacts() contact_rand = random.choice(contact) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) app.contact.open_contacts_in_group(group_rand.id) db.get_contacts_in_group(Group(id=group_rand.id)) app.contact.del_contact_from_group() l = db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand.id not in l
2.359375
2
byurak/accounts/admin.py
LikeLion-CAU-9th/Django-fancy-coder
0
1702
<reponame>LikeLion-CAU-9th/Django-fancy-coder from django.contrib import admin from accounts.models import User, Profile, UserFollow @admin.register(User) class UserAdmin(admin.ModelAdmin): list_display = ['email', 'nickname'] list_display_links = ['email', 'nickname'] admin.site.register(Profile) admin.site.register(UserFollow)
1.945313
2
viz_utils/eoa_viz.py
olmozavala/eoas-pyutils
0
1703
import os from PIL import Image import cv2 from os import listdir from os.path import join import matplotlib.pyplot as plt import matplotlib from matplotlib.colors import LogNorm from io_utils.io_common import create_folder from viz_utils.constants import PlotMode, BackgroundType import pylab import numpy as np import cmocean import shapely import cartopy.crs as ccrs import cartopy.feature as cfeature import cartopy def select_colormap(field_name): ''' Based on the name if the field it chooses a colormap from cmocean Args: field_name: Returns: ''' if np.any([field_name.find(x) != -1 for x in ('ssh', 'srfhgt', 'adt','surf_el')]): # cmaps_fields.append(cmocean.cm.deep_r) return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x in ('temp', 'sst', 'temperature')]): return cmocean.cm.thermal elif np.any([field_name.find(x) != -1 for x in ('vorticity', 'vort')]): return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x in ('salin', 'sss', 'sal')]): return cmocean.cm.haline elif field_name.find('error') != -1: return cmocean.cm.diff elif field_name.find('binary') != -1: return cmocean.cm.oxy elif np.any([field_name.find(x) != -1 for x in ('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]): return cmocean.cm.speed class EOAImageVisualizer: """This class makes plenty of plots assuming we are plotting Geospatial data (maps). It is made to read xarrays, numpy arrays, and numpy arrays in dictionaries vizobj = new EOAImageVisualizer(disp_images=True, output_folder='output', lats=[lats],lons=[lons]) """ _COLORS = ['y', 'r', 'c', 'b', 'g', 'w', 'k', 'y', 'r', 'c', 'b', 'g', 'w', 'k'] _figsize = 8 _font_size = 30 _units = '' _max_imgs_per_row = 4 _mincbar = np.nan # User can set a min and max colorbar values to 'force' same color bar to all plots _maxcbar = np.nan _flip_data = True _eoas_pyutils_path = './eoas_pyutils'# This is the path where the eoas_utils folder is stored with respect to the main project _contourf = False # When plotting non-regular grids and need precision _background = BackgroundType.BLUE_MARBLE_LR # Select the background to use _auto_colormap = True # Selects the colormap based on the name of the field _show_var_names = False # Includes the name of the field name in the titles _additional_polygons = [] # MUST BE SHAPELY GEOMETRIES In case we want to include additional polygons in the plots (all of them) # If you want to add a streamplot of a vector field. It must be a dictionary with keys x,y,u,v # and optional density, color, cmap, arrowsize, arrowstyle, minlength _vector_field = None _norm = None # Use to normalize the colormap. For example with LogNorm # vizobj = EOAImageVisualizer(disp_images=True, output_folder='output', # lats=[lats],lons=[lons]) def __init__(self, disp_images=True, output_folder='output', lats=[-90,90], lons =[-180,180], projection=ccrs.PlateCarree(), **kwargs): # All the arguments that are passed to the constructor of the class MUST have its name on it. self._disp_images = disp_images self._output_folder = output_folder self._projection = projection bbox = self.getExtent(lats, lons) self._extent = bbox self._lats = lats self._lons = lons self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2]) self._contour_labels = False for arg_name, arg_value in kwargs.items(): self.__dict__["_" + arg_name] = arg_value print(self.__dict__["_" + arg_name]) def __getattr__(self, attr): '''Generic getter for all the properties of the class''' return self.__dict__["_" + attr] def __setattr__(self, attr, value): '''Generic setter for all the properties of the class''' self.__dict__["_" + attr] = value def add_colorbar(self, fig, im, ax, show_color_bar, label=""): # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html if show_color_bar: font_size_cbar = self._font_size * .5 # TODO how to make this automatic and works always cbar = fig.colorbar(im, ax=ax, shrink=.7) cbar.ax.tick_params(labelsize=font_size_cbar) if label != "": cbar.set_label(label, fontsize=font_size_cbar*1.2) else: cbar.set_label(self._units, fontsize=font_size_cbar*1.2) def plot_slice_eoa(self, c_img, ax, cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan) -> None: """ Plots a 2D img for EOA data. :param c_img: 2D array :param ax: geoaxes :return: """ c_ax = ax if self._flip_data: origin = 'lower' else: origin = 'upper' if self._background == BackgroundType.CARTO_DEF: c_ax.stock_img() else: if self._background == BackgroundType.BLUE_MARBLE_LR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png')) if self._background == BackgroundType.BLUE_MARBLE_HR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg')) if self._background == BackgroundType.TOPO: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png')) if self._background == BackgroundType.BATHYMETRY: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg')) c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree()) if mode == PlotMode.RASTER or mode == PlotMode.MERGED: if self._contourf: im = c_ax.contourf(self._lons, self._lats, c_img, num_colors=255, cmap='inferno', extent=self._extent) else: if np.isnan(mincbar): im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, transform=self._projection, norm=self._norm) else: im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, vmin=mincbar, vmax=maxcbar, transform=self._projection, norm=self._norm) if mode == PlotMode.CONTOUR or mode == PlotMode.MERGED: c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) if mode == PlotMode.CONTOUR: im = c_ax.contour(c_img, extent=self._extent, transform=self._projection) if mode == PlotMode.MERGED: if self._contour_labels: c_ax.contour(c_img, self._contour_labels, colors='r', extent=self._extent, transform=self._projection) else: c_ax.contour(c_img, extent=self._extent, transform=self._projection) if len(self._additional_polygons) > 0: pol_lats = [] pol_lons = [] for c_polygon in self._additional_polygons: if isinstance(c_polygon, shapely.geometry.linestring.LineString): x,y = c_polygon.xy elif isinstance(c_polygon, shapely.geometry.polygon.Polygon): x, y = c_polygon.exterior.xy pol_lats += y pol_lons += x c_ax.plot(x,y, transform=self._projection, c='r') # Adds a threshold to the plot to see the polygons c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats, list(self._lons) + pol_lons, 0.5)) if self._vector_field != None: try: u = self._vector_field['u'] v = self._vector_field['v'] x = self._vector_field['x'] y = self._vector_field['y'] vec_keys = self._vector_field.keys() c = 'r' density = 1 linewidth = 3 vec_cmap = cmocean.cm.solar if 'color' in vec_keys: c = self._vector_field['color'] if 'density' in vec_keys: density = self._vector_field['density'] if 'linewidth' in vec_keys: linewidth = self._vector_field['linewidth'] if 'cmap' in vec_keys: vec_cmap = self._vector_field['cmap'] c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) c_ax.streamplot(x, y, u, v, transform=self._projection, density=density, color=c, cmap=vec_cmap, linewidth=linewidth) except Exception as e: print(F"Couldn't add vector field e:{e}") gl = c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--') # gl.xlabel_style = {'size': self._font_size/2, 'color': '#aaaaaa', 'weight':'bold'} font_coords = {'size': self._font_size*.6} gl.xlabel_style = font_coords gl.ylabel_style = font_coords gl.top_labels = False gl.right_labels = False return im def get_proper_size(self, rows, cols): """ Obtains the proper size for a figure. :param rows: how many rows will the figure have :param cols: how many colswill the figure have :param prop: Proportion is the proportion to use w/h :return: """ if rows == 1: return self._figsize * cols * self._fig_prop, self._figsize else: return self._figsize * cols * self._fig_prop, self._figsize * rows def _close_figure(self): """Depending on what is disp_images, the figures are displayed or just closed""" if self._disp_images: plt.show() else: plt.close() def getExtent(self, lats, lons, expand_ext=0.0): ''' Obtains the bbox of the coordinates. If included threshold then increases the bbox in all directions with that thres Args: lats: lons: inc_threshold: Returns: ''' minLat = np.amin(lats) - expand_ext maxLat = np.amax(lats) + expand_ext minLon = np.amin(lons) - expand_ext maxLon = np.amax(lons) + expand_ext bbox = (minLon, maxLon, minLat, maxLat) return bbox def xr_summary(self, ds): """ Prints a summary of the netcdf (global attributes, variables, etc) :param ds: :return: """ print("\n========== Global attributes =========") for name in ds.attrs: print(F"{name} = {getattr(ds, name)}") print("\n========== Dimensions =========") for name in ds.dims: print(F"{name}: {ds[name].shape}") print("\n========== Coordinates =========") for name in ds.coords: print(F"{name}: {ds[name].shape}") print("\n========== Variables =========") for cur_variable_name in ds.variables: cur_var = ds[cur_variable_name] print(F"{cur_variable_name}: {cur_var.dims} {cur_var.shape}") def nc_summary(self, ds): """ Prints a summary of the netcdf (global attributes, variables, etc) :param ds: :return: """ print("\n========== Global attributes =========") for name in ds.ncattrs(): print(F"{name} = {getattr(ds, name)}") print("\n========== Variables =========") netCDFvars = ds.variables for cur_variable_name in netCDFvars.keys(): cur_var = ds.variables[cur_variable_name] print(F"Dimensions for {cur_variable_name}: {cur_var.dimensions} {cur_var.shape}") def add_roads(self, ax): # Names come from: https://www.naturalearthdata.com/features/ # -- Add states roads = cfeature.NaturalEarthFeature( category='cultural', name='roads', scale='10m', facecolor='none') ax.add_feature(roads, edgecolor='black') return ax def add_states(self, ax): # Names come from: https://www.naturalearthdata.com/features/ # -- Add states states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') ax.add_feature(states_provinces, edgecolor='gray') return ax def plot_scatter_data(self, lats=None, lons=None, bbox=None, s=1, c='blue', cmap='plasma', title=''): ''' This function plots points in a map :param bbox: :return: ''' if bbox is None: bbox = (-180, 180, -90, 90) if lats is None: lats = self.lats if lons is None: lons = self.lons fig, ax = plt.subplots(1, 1, figsize=(self._figsize, self._figsize), subplot_kw={'projection': ccrs.PlateCarree()}) ax.set_extent(bbox) # If we do not set this, it will cropp it to the limits of the locations ax.gridlines() im = ax.scatter(lons, lats, s=s, c=c, cmap=cmap) fig.colorbar(im, ax=ax, shrink=0.7) ax.coastlines() plt.title(title) plt.show() def plot_3d_data_npdict(self, np_variables:list, var_names:list, z_levels= [], title='', file_name_prefix='', cmap=None, z_names = [], show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): """ Plots multiple z_levels for multiple fields. It uses rows for each depth, and columns for each variable """ create_folder(self._output_folder) orig_cmap = cmap # If the user do not requires any z-leve, then all are plotted if len(z_levels) == 0: z_levels = range(np_variables[var_names[0]].shape[0]) cols = np.min((self._max_imgs_per_row, len(var_names))) if cols == len(var_names): rows = len(z_levels) else: rows = int(len(z_levels) * np.ceil(len(var_names)/cols)) fig, _axs = plt.subplots(rows, cols, figsize=self.get_proper_size(rows, cols), subplot_kw={'projection': self._projection}) for c_zlevel, c_slice in enumerate(z_levels): # Iterates over the z-levels # Verify the index of the z_levels are the original ones. if len(z_names) != 0: c_slice_txt = z_names[c_slice] else: c_slice_txt = c_slice c_mincbar = np.nan c_maxcbar = np.nan for idx_var, c_var in enumerate(var_names): # Iterate over the fields if rows*cols == 1: # Single figure ax = _axs else: ax = _axs.flatten()[c_zlevel*len(var_names) + idx_var] # Here we chose the min and max colorbars for each field if not(np.all(np.isnan(mincbar))): if type(mincbar) is list: c_mincbar = mincbar[idx_var] else: c_mincbar = mincbar if not(np.all(np.isnan(maxcbar))): if type(mincbar) is list: c_maxcbar = maxcbar[idx_var] else: c_maxcbar = maxcbar # By default we select the colorbar from the name of the variable if self._auto_colormap and orig_cmap is None: cmap = select_colormap(c_var) else: # If there is an array of colormaps we select the one for this field if type(orig_cmap) is list: cmap = orig_cmap[idx_var] else: # If it is just one cmap, then we use it for all the fields cmap = orig_cmap im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap, mode=plot_mode, mincbar=c_mincbar, maxcbar=c_maxcbar) if self._show_var_names: c_title = F'{var_names[idx_var]} {title}' else: c_title = F'{title}' if len(z_levels) > 1: c_title += F"Z - level: {c_slice_txt}" ax.set_title(c_title, fontsize=self._font_size) self.add_colorbar(fig, im, ax, show_color_bar) plt.tight_layout(pad=.5) file_name = F'{file_name_prefix}' pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight') self._close_figure() def plot_2d_data_xr(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function to receive raw 2D numpy data. It calls the 'main' function for 3D plotting :param np_variables: :param var_names: :param title: :param file_name_prefix: :param cmap: :param flip_data: :param rot_90: :param show_color_bar: :param plot_mode: :param mincbar: :param maxcbar: :return: ''' npdict_3d = {} for i, field_name in enumerate(var_names): npdict_3d[field_name] = np.expand_dims(np_variables[field_name], axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap, z_names = [], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def plot_2d_data_np(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap=None, flip_data=False, rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function to receive raw 2D numpy data. It calls the 'main' function for 3D plotting :param np_variables: Numpy variables. They can be with shape [fields, x, y] or just a single field with shape [x,y] :param var_names: :param title: :param file_name_prefix: :param cmap: :param flip_data: :param rot_90: :param show_color_bar: :param plot_mode: :param mincbar: :param maxcbar: :return: ''' npdict_3d = {} for i, field_name in enumerate(var_names): if len(np_variables.shape) == 3: c_np_data = np_variables[i, :, :] else: c_np_data = np_variables # Single field if rot_90: c_np_data = np.rot90(c_np_data) if flip_data: c_np_data = np.flip(np.flip(c_np_data), axis=1) npdict_3d[field_name] = np.expand_dims(c_np_data, axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap, z_names = [], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def make_video_from_images(self, input_folder, output_file, fps=24): files = listdir(input_folder) files.sort() print(F"Generating video file: {output_file}") out_video = -1 for i, file_name in enumerate(files[0:36]): if i % 10 == 0: print(F"Adding file # {i}: {file_name}") c_file = join(input_folder, file_name) im = Image.open(c_file) np_im = np.asarray(im)[:, :, :3] if i == 0: video_size = (np_im.shape[1], np_im.shape[0]) out_video = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, video_size, True) out_video.write(np_im[:, :, ::-1]) out_video.release() cv2.destroyAllWindows() print("Done! yeah babe!")
2.109375
2
ade20kScripts/setup.py
fcendra/PSPnet18
1
1704
from os import listdir from os.path import isfile, join from path import Path import numpy as np import cv2 # Dataset path target_path = Path('target/') annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath() dataset = [ f for f in listdir(annotation_images_path) if isfile(join(annotation_images_path,f))] images = np.empty(len(dataset), dtype = object) count = 1 # Iterate all Training Images for n in range(0, len(dataset)): # Read image images[n] = cv2.imread(join(annotation_images_path,dataset[n])) # Convert it to array array = np.asarray(images[n],dtype=np.int8) # Conditions when the value equal less than 1, change it to 255. # If it is >= 1, increment it by -1 arr = np.where(array < 1, 255, array -1) #Saved it to another file if count < 10: cv2.imwrite(target_path +'ADE_train_0000000'+ str(count) + ".png", arr) elif count < 100 and count > 9: cv2.imwrite(target_path +'ADE_train_000000'+ str(count) + ".png", arr) elif count < 1000 and count > 99: cv2.imwrite(target_path +'ADE_train_00000'+ str(count) + ".png", arr) elif count < 10000 and count > 999: cv2.imwrite(target_path +'ADE_train_0000'+ str(count) + ".png", arr) else: cv2.imwrite(target_path +'ADE_train_000'+ str(count) + ".png", arr) print(str(count) + ".png is printed") count += 1
2.640625
3
src/mesh/azext_mesh/servicefabricmesh/mgmt/servicefabricmesh/models/__init__.py
Mannan2812/azure-cli-extensions
207
1705
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .available_operation_display import AvailableOperationDisplay from .error_details_model import ErrorDetailsModel from .error_error_model import ErrorErrorModel from .error_model import ErrorModel, ErrorModelException from .operation_result import OperationResult from .provisioned_resource_properties import ProvisionedResourceProperties from .proxy_resource import ProxyResource from .managed_proxy_resource import ManagedProxyResource from .resource import Resource from .tracked_resource import TrackedResource from .secret_resource_properties import SecretResourceProperties from .inlined_value_secret_resource_properties import InlinedValueSecretResourceProperties from .secret_resource_properties_base import SecretResourcePropertiesBase from .secret_resource_description import SecretResourceDescription from .secret_value import SecretValue from .secret_value_properties import SecretValueProperties from .secret_value_resource_description import SecretValueResourceDescription from .volume_provider_parameters_azure_file import VolumeProviderParametersAzureFile from .volume_properties import VolumeProperties from .volume_reference import VolumeReference from .application_scoped_volume_creation_parameters import ApplicationScopedVolumeCreationParameters from .application_scoped_volume import ApplicationScopedVolume from .application_scoped_volume_creation_parameters_service_fabric_volume_disk import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk from .volume_resource_description import VolumeResourceDescription from .network_resource_properties import NetworkResourceProperties from .local_network_resource_properties import LocalNetworkResourceProperties from .endpoint_ref import EndpointRef from .network_ref import NetworkRef from .network_resource_properties_base import NetworkResourcePropertiesBase from .network_resource_description import NetworkResourceDescription from .gateway_destination import GatewayDestination from .tcp_config import TcpConfig from .http_route_match_path import HttpRouteMatchPath from .http_route_match_header import HttpRouteMatchHeader from .http_route_match_rule import HttpRouteMatchRule from .http_route_config import HttpRouteConfig from .http_host_config import HttpHostConfig from .http_config import HttpConfig from .gateway_properties import GatewayProperties from .gateway_resource_description import GatewayResourceDescription from .image_registry_credential import ImageRegistryCredential from .environment_variable import EnvironmentVariable from .setting import Setting from .container_label import ContainerLabel from .endpoint_properties import EndpointProperties from .resource_requests import ResourceRequests from .resource_limits import ResourceLimits from .resource_requirements import ResourceRequirements from .diagnostics_ref import DiagnosticsRef from .reliable_collections_ref import ReliableCollectionsRef from .container_state import ContainerState from .container_event import ContainerEvent from .container_instance_view import ContainerInstanceView from .container_code_package_properties import ContainerCodePackageProperties from .auto_scaling_trigger import AutoScalingTrigger from .auto_scaling_mechanism import AutoScalingMechanism from .auto_scaling_policy import AutoScalingPolicy from .service_resource_description import ServiceResourceDescription from .diagnostics_sink_properties import DiagnosticsSinkProperties from .diagnostics_description import DiagnosticsDescription from .application_properties import ApplicationProperties from .azure_internal_monitoring_pipeline_sink_description import AzureInternalMonitoringPipelineSinkDescription from .application_resource_description import ApplicationResourceDescription from .add_remove_replica_scaling_mechanism import AddRemoveReplicaScalingMechanism from .auto_scaling_metric import AutoScalingMetric from .auto_scaling_resource_metric import AutoScalingResourceMetric from .service_properties import ServiceProperties from .service_replica_properties import ServiceReplicaProperties from .service_replica_description import ServiceReplicaDescription from .average_load_scaling_trigger import AverageLoadScalingTrigger from .container_logs import ContainerLogs from .operation_result_paged import OperationResultPaged from .secret_resource_description_paged import SecretResourceDescriptionPaged from .secret_value_resource_description_paged import SecretValueResourceDescriptionPaged from .volume_resource_description_paged import VolumeResourceDescriptionPaged from .network_resource_description_paged import NetworkResourceDescriptionPaged from .gateway_resource_description_paged import GatewayResourceDescriptionPaged from .application_resource_description_paged import ApplicationResourceDescriptionPaged from .service_resource_description_paged import ServiceResourceDescriptionPaged from .service_replica_description_paged import ServiceReplicaDescriptionPaged from .service_fabric_mesh_management_client_enums import ( ResourceStatus, HealthState, SecretKind, VolumeProvider, SizeTypes, ApplicationScopedVolumeKind, NetworkKind, HeaderMatchType, OperatingSystemType, DiagnosticsSinkKind, AutoScalingMechanismKind, AutoScalingMetricKind, AutoScalingResourceMetricName, AutoScalingTriggerKind, ) __all__ = [ 'AvailableOperationDisplay', 'ErrorDetailsModel', 'ErrorErrorModel', 'ErrorModel', 'ErrorModelException', 'OperationResult', 'ProvisionedResourceProperties', 'ProxyResource', 'ManagedProxyResource', 'Resource', 'TrackedResource', 'SecretResourceProperties', 'InlinedValueSecretResourceProperties', 'SecretResourcePropertiesBase', 'SecretResourceDescription', 'SecretValue', 'SecretValueProperties', 'SecretValueResourceDescription', 'VolumeProviderParametersAzureFile', 'VolumeProperties', 'VolumeReference', 'ApplicationScopedVolumeCreationParameters', 'ApplicationScopedVolume', 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk', 'VolumeResourceDescription', 'NetworkResourceProperties', 'LocalNetworkResourceProperties', 'EndpointRef', 'NetworkRef', 'NetworkResourcePropertiesBase', 'NetworkResourceDescription', 'GatewayDestination', 'TcpConfig', 'HttpRouteMatchPath', 'HttpRouteMatchHeader', 'HttpRouteMatchRule', 'HttpRouteConfig', 'HttpHostConfig', 'HttpConfig', 'GatewayProperties', 'GatewayResourceDescription', 'ImageRegistryCredential', 'EnvironmentVariable', 'Setting', 'ContainerLabel', 'EndpointProperties', 'ResourceRequests', 'ResourceLimits', 'ResourceRequirements', 'DiagnosticsRef', 'ReliableCollectionsRef', 'ContainerState', 'ContainerEvent', 'ContainerInstanceView', 'ContainerCodePackageProperties', 'AutoScalingTrigger', 'AutoScalingMechanism', 'AutoScalingPolicy', 'ServiceResourceDescription', 'DiagnosticsSinkProperties', 'DiagnosticsDescription', 'ApplicationProperties', 'AzureInternalMonitoringPipelineSinkDescription', 'ApplicationResourceDescription', 'AddRemoveReplicaScalingMechanism', 'AutoScalingMetric', 'AutoScalingResourceMetric', 'ServiceProperties', 'ServiceReplicaProperties', 'ServiceReplicaDescription', 'AverageLoadScalingTrigger', 'ContainerLogs', 'OperationResultPaged', 'SecretResourceDescriptionPaged', 'SecretValueResourceDescriptionPaged', 'VolumeResourceDescriptionPaged', 'NetworkResourceDescriptionPaged', 'GatewayResourceDescriptionPaged', 'ApplicationResourceDescriptionPaged', 'ServiceResourceDescriptionPaged', 'ServiceReplicaDescriptionPaged', 'ResourceStatus', 'HealthState', 'SecretKind', 'VolumeProvider', 'SizeTypes', 'ApplicationScopedVolumeKind', 'NetworkKind', 'HeaderMatchType', 'OperatingSystemType', 'DiagnosticsSinkKind', 'AutoScalingMechanismKind', 'AutoScalingMetricKind', 'AutoScalingResourceMetricName', 'AutoScalingTriggerKind', ]
1.09375
1
Core/managers/InputPeripherals.py
Scoppio/Rogue-EVE
2
1706
<reponame>Scoppio/Rogue-EVE import logging from models.GenericObjects import Vector2 logger = logging.getLogger('Rogue-EVE') class MouseController(object): """ Mouse controller needs the map, get over it """ def __init__(self, map=None, object_pool=None): self.mouse_coord = (0, 0) self.map = map self.object_pool = object_pool self.camera = None def set_map(self, map): self.map = map def set_object_pool(self, object_pool): self.object_pool = object_pool def get_mouse_coord(self): return self.mouse_coord def set_mouse_coord(self, new_coord): self.mouse_coord = new_coord logger.debug("mouse position {}".format(self.mouse_coord)) def get_names_under_mouse(self): # return a string with the names of all objects under the mouse (x, y) = self.camera.camera_coord + Vector2(*self.mouse_coord) # create a list with the names of all objects at the mouse's coordinates and in FOV objects = self.object_pool.get_objects_as_list() names = "" if self.map and self.object_pool: if objects and self.map: names = [obj.name for obj in objects if obj.coord.X == x and obj.coord.Y == y and (x,y) in self.map.get_visible_tiles()] names = ', '.join(names) # join the names, separated by commas else: logger.warning("map or object pool not initialized!") return names.capitalize()
2.828125
3
the_unsync/thesync.py
vromanuk/async_techniques
0
1707
<gh_stars>0 from unsync import unsync import asyncio import datetime import math import aiohttp import requests def main(): t0 = datetime.datetime.now() tasks = [ compute_some(), compute_some(), compute_some(), download_some(), download_some(), download_some(), download_some_more(), download_some_more(), wait_some(), wait_some(), wait_some(), wait_some()] [t.result() for t in tasks] dt = datetime.datetime.now() - t0 print('Unsync version done in {:,.2f} seconds.'.format(dt.total_seconds())) @unsync(cpu_bound=True) def compute_some(): print('Computing...') for _ in range(1, 10_000_000): math.sqrt(25 ** 25 + .01) @unsync() async def download_some(): print('Downloading...') url = 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2' async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session: async with session.get(url) as resp: resp.raise_for_status() text = await resp.text() print('Downloaded (more) {:,} characters'.format(len(text))) @unsync() def download_some_more(): print('Downloading more...') url = 'https://pythonbytes.fm./episodes/show/92/will-your-python-be-compiled' resp = requests.get(url) resp.raise_for_status() text = resp.text print('Downloaded (more) {:,} characters'.format(len(text))) @unsync() async def wait_some(): print('Waiting...') for _ in range(1, 1000): await asyncio.sleep(.001) if __name__ == '__main__': main()
2.78125
3
sdk/python/pulumi_azure/desktopvirtualization/workspace.py
henriktao/pulumi-azure
109
1708
<filename>sdk/python/pulumi_azure/desktopvirtualization/workspace.py # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['WorkspaceArgs', 'Workspace'] @pulumi.input_type class WorkspaceArgs: def __init__(__self__, *, resource_group_name: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ The set of arguments for constructing a Workspace resource. :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. :param pulumi.Input[str] description: A description for the Virtual Desktop Workspace. :param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace. :param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. :param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. """ pulumi.set(__self__, "resource_group_name", resource_group_name) if description is not None: pulumi.set(__self__, "description", description) if friendly_name is not None: pulumi.set(__self__, "friendly_name", friendly_name) if location is not None: pulumi.set(__self__, "location", location) if name is not None: pulumi.set(__self__, "name", name) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ A description for the Virtual Desktop Workspace. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="friendlyName") def friendly_name(self) -> Optional[pulumi.Input[str]]: """ A friendly name for the Virtual Desktop Workspace. """ return pulumi.get(self, "friendly_name") @friendly_name.setter def friendly_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "friendly_name", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A mapping of tags to assign to the resource. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @pulumi.input_type class _WorkspaceState: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ Input properties used for looking up and filtering Workspace resources. :param pulumi.Input[str] description: A description for the Virtual Desktop Workspace. :param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace. :param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. :param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. """ if description is not None: pulumi.set(__self__, "description", description) if friendly_name is not None: pulumi.set(__self__, "friendly_name", friendly_name) if location is not None: pulumi.set(__self__, "location", location) if name is not None: pulumi.set(__self__, "name", name) if resource_group_name is not None: pulumi.set(__self__, "resource_group_name", resource_group_name) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ A description for the Virtual Desktop Workspace. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="friendlyName") def friendly_name(self) -> Optional[pulumi.Input[str]]: """ A friendly name for the Virtual Desktop Workspace. """ return pulumi.get(self, "friendly_name") @friendly_name.setter def friendly_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "friendly_name", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> Optional[pulumi.Input[str]]: """ The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A mapping of tags to assign to the resource. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) class Workspace(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): """ Manages a Virtual Desktop Workspace. ## Example Usage ```python import pulumi import pulumi_azure as azure example = azure.core.ResourceGroup("example", location="West Europe") workspace = azure.desktopvirtualization.Workspace("workspace", location=example.location, resource_group_name=example.name, friendly_name="FriendlyName", description="A description of my workspace") ``` ## Import Virtual Desktop Workspaces can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: A description for the Virtual Desktop Workspace. :param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace. :param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. :param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. """ ... @overload def __init__(__self__, resource_name: str, args: WorkspaceArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Manages a Virtual Desktop Workspace. ## Example Usage ```python import pulumi import pulumi_azure as azure example = azure.core.ResourceGroup("example", location="West Europe") workspace = azure.desktopvirtualization.Workspace("workspace", location=example.location, resource_group_name=example.name, friendly_name="FriendlyName", description="A description of my workspace") ``` ## Import Virtual Desktop Workspaces can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace ``` :param str resource_name: The name of the resource. :param WorkspaceArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = WorkspaceArgs.__new__(WorkspaceArgs) __props__.__dict__["description"] = description __props__.__dict__["friendly_name"] = friendly_name __props__.__dict__["location"] = location __props__.__dict__["name"] = name if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["tags"] = tags super(Workspace, __self__).__init__( 'azure:desktopvirtualization/workspace:Workspace', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Workspace': """ Get an existing Workspace resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: A description for the Virtual Desktop Workspace. :param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace. :param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. :param pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _WorkspaceState.__new__(_WorkspaceState) __props__.__dict__["description"] = description __props__.__dict__["friendly_name"] = friendly_name __props__.__dict__["location"] = location __props__.__dict__["name"] = name __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["tags"] = tags return Workspace(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ A description for the Virtual Desktop Workspace. """ return pulumi.get(self, "description") @property @pulumi.getter(name="friendlyName") def friendly_name(self) -> pulumi.Output[Optional[str]]: """ A friendly name for the Virtual Desktop Workspace. """ return pulumi.get(self, "friendly_name") @property @pulumi.getter def location(self) -> pulumi.Output[str]: """ The location/region where the Virtual Desktop Workspace is located. Changing the location/region forces a new resource to be created. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the Virtual Desktop Workspace. Changing the name forces a new resource to be created. """ return pulumi.get(self, "name") @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Output[str]: """ The name of the resource group in which to create the Virtual Desktop Workspace. Changing the resource group name forces a new resource to be created. """ return pulumi.get(self, "resource_group_name") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ A mapping of tags to assign to the resource. """ return pulumi.get(self, "tags")
1.984375
2
jupyanno/sheets.py
betatim/jupyanno
23
1709
<reponame>betatim/jupyanno """Code for reading and writing results to google sheets""" from bs4 import BeautifulSoup import requests import warnings import json import pandas as pd from six.moves.urllib.parse import urlparse, parse_qs from six.moves.urllib.request import urlopen _CELLSET_ID = "AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4" def get_task_sheet(in_task): return get_sheet_as_df(sheet_api_url(in_task.sheet_id), _CELLSET_ID) def get_sheet_as_df(base_url, kk, columns="A:AG"): """ Gets the sheet as a list of Dicts (directly importable to Pandas) :return: """ try: # TODO: we should probably get the whole sheet all_vals = "{base_url}/{cols}?key={kk}".format(base_url=base_url, cols=columns, kk=kk) t_data = json.loads(urlopen(all_vals).read().decode('latin1'))[ 'values'] frow = t_data.pop(0) return pd.DataFrame([ dict([(key, '' if idx >= len(irow) else irow[idx]) for idx, key in enumerate(frow)]) for irow in t_data]) except IOError as e: warnings.warn( 'Sheet could not be accessed, check internet connectivity, \ proxies and permissions: {}'.format( e)) return pd.DataFrame([{}]) def sheet_api_url(sheet_id): return "https://sheets.googleapis.com/v4/spreadsheets/{id}/values".format( id=sheet_id) def get_questions(in_url): res = urlopen(in_url) soup = BeautifulSoup(res.read(), 'html.parser') def get_names(f): return [v for k, v in f.attrs.items() if 'label' in k] def get_name(f): return get_names(f)[0] if len( get_names(f)) > 0 else 'unknown' all_questions = soup.form.findChildren( attrs={'name': lambda x: x and x.startswith('entry.')}) return {get_name(q): q['name'] for q in all_questions} def submit_response(form_url, cur_questions, verbose=False, **answers): submit_url = form_url.replace('/viewform', '/formResponse') form_data = {'draftResponse': [], 'pageHistory': 0} for v in cur_questions.values(): form_data[v] = '' for k, v in answers.items(): if k in cur_questions: form_data[cur_questions[k]] = v else: warnings.warn('Unknown Question: {}'.format(k), RuntimeWarning) if verbose: print(form_data) user_agent = {'Referer': form_url, 'User-Agent': "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537\ .36 (KHTML, like Gecko) Chrome/28.0.1500.52 Safari/537.36"} return requests.post(submit_url, data=form_data, headers=user_agent)
3.125
3
sorting/python/max_heap.py
zhou7rui/algorithm
6
1710
# -*- coding: utf-8 -* ''' 最大堆实现 98 / \ 96 84 / \ / \ 92 82 78 47 / \ / \ / \ / \ 33 26 51 85 50 15 44 60 / \ / \ / \ / \ / \ / \ / \ / \ 40 51 98 51 7 17 94 82 32 21 64 60 7 44 63 63 ''' import random class Maxheap(object): def __init__(self,cpacity,arr = None): self.data = [None] * (cpacity + 1) self.cpacity = cpacity if arr is None: self.count = 0 else: for i in range(0,cpacity): self.data[i + 1]= arr[i] self.count = cpacity for i in range(self.count / 2, 0, -1): self.__shifDown(i) def size(self): return self.count def isEmpty(self): return self.count == 0 def __shiftUp(self,k): while k > 1 and self.data[k] > self.data[int(k / 2)]: self.data[k],self.data[int(k / 2)] = self.data[int(k / 2)], self.data[k] k =int(k/2) def insert(self,data): self.data[self.count + 1] = data self.count += 1 self.__shiftUp(self.count) def __shifDown(self,k): while k * 2 <= self.count: j = k * 2 if self.count >= j + 1 and self.data[j + 1] > self.data[j]: j += 1 if self.data[k] > self.data[j]: break self.data[k], self.data[j] = self.data[j],self.data[k] k = j def extractMax(self): ret = self.data[1] self.data[1], self.data[self.count] = self.data[self.count], self.data[1] self.count -= 1 self.__shifDown(1) return ret if __name__ == '__main__': N = 31 M = 100 heap = Maxheap(N) for i in range(0,N): k = random.randint(1, M) heap.insert(k) # arr = [random.randint(1,M) for i in range(N)] # heap = Maxheap(len(arr),arr) print(heap.size()) print(heap.data) print(heap.extractMax())
3.140625
3
ink2canvas/svg/Use.py
greipfrut/pdftohtml5canvas
4
1711
from ink2canvas.svg.AbstractShape import AbstractShape class Use(AbstractShape): def drawClone(self): drawables = self.rootTree.getDrawable() OriginName = self.getCloneId() OriginObject = self.rootTree.searchElementById(OriginName,drawables) OriginObject.runDraw() def draw(self, isClip=False): if self.hasTransform(): transMatrix = self.getTransform() self.canvasContext.transform(*transMatrix) self.drawClone() def getCloneId(self): return self.attr("href","xlink")[1:]
2.8125
3
docs/source/tutorial/code/read_csv.py
HanSooLim/DIL-Project
2
1712
<filename>docs/source/tutorial/code/read_csv.py import pandas datas = pandas.read_csv("../../Sample/example_dataset.csv", index_col=0) print(datas)
3.140625
3
app.py
rghose/lol3
0
1713
<filename>app.py<gh_stars>0 from flask import * app = Flask(__name__) import botty # ---------------------------------- @app.route("/", methods=['GET', 'POST']) def hello(): if request.method == 'POST': data = request.form["query"] return render_template("index.html",data=data) return render_template("main.html") # ----------------------------------- # ----------------------------------- @app.route("/request", methods=['POST']) def respond(): data = request.form["data"] return botty.botty_get_response(data) # ----------------------------------- if __name__ == "__main__": app.debug = True app.run(host="0.0.0.0")
2.59375
3
config.py
metarom-quality/gooseberry
0
1714
<filename>config.py<gh_stars>0 #!/usr/bin/env python3 import os DATABASE="/home/tomate/Warehouse/syte/meta.db" XLSDIR = "/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/" temp = [i for i in next(os.walk(XLSDIR))[2] if i.endswith("xlsx") or i.endswith("xls")] flist = {} for i in temp: name = i.split(" ")[0].split("-")[0].split(".")[0] if name.startswith("~") or name.startswith("PR") or name.startswith("FAB"): continue else: flist[name] = i
2.28125
2
setup.py
markostrajkov/range-requests-proxy
1
1715
<gh_stars>1-10 #!/usr/bin/env python import sys from setuptools import setup from setuptools.command.test import test as TestCommand class PyTest(TestCommand): user_options = [('pytest-args=', 'a', "Arguments to pass into py.test")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): import pytest errno = pytest.main(self.pytest_args) sys.exit(errno) setup( name='range-requests-proxy', version='0.1', description='Asynchronous HTTP proxy for HTTP Range Requests', author='<NAME>', author_email='<EMAIL>', cmdclass={'test': PyTest}, tests_require=['pytest>=2.8.0', 'mock==2.0.0'], install_requires=['tornado==4.4.1', 'pycurl==7.43.0'], packages=['rangerequestsproxy'], license='BSD', url='https://github.com/markostrajkov/range-requests-proxy', )
1.992188
2
tests/pytorch_pfn_extras_tests/onnx/test_load_model.py
kmaehashi/pytorch-pfn-extras
243
1716
<reponame>kmaehashi/pytorch-pfn-extras<gh_stars>100-1000 import os import pytest import torch import pytorch_pfn_extras.onnx as tou from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net @pytest.mark.filterwarnings("ignore:Named tensors .* experimental:UserWarning") def test_onnx_load_model(): model = Net() outdir = "out/load_model_test" tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir, training=True, do_constant_folding=False) tou.load_model(os.path.join(outdir, "model.onnx")) @pytest.mark.filterwarnings("ignore:.*ONNX contains stripped .*:UserWarning") def test_stripped_onnx_load_model(): model = Net() outdir = "out/stripped_load_model_test" tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir, strip_large_tensor_data=True, training=True, do_constant_folding=False) tou.load_model(os.path.join(outdir, "model.onnx"))
1.992188
2
validate/v1/base.py
huzidabanzhang/Python
4
1717
<reponame>huzidabanzhang/Python<gh_stars>1-10 #!/usr/bin/env python # -*- coding:UTF-8 -*- ''' @Description: 数据库验证器 @Author: Zpp @Date: 2020-05-28 13:44:29 @LastEditors: Zpp @LastEditTime: 2020-05-28 14:02:02 ''' params = { # 验证字段 'fields': { 'type': { 'name': '导出类型', 'type': 'int', 'between': [1, 2, 3], 'required': True }, 'document': { 'name': '数据库文件', 'type': 'file', 'required': True, 'msg': '请选择上传数据库文件' }, 'admin_id': { 'name': '管理员编号', 'type': 'str', 'required': True }, 'time': { 'name': '查询时间', 'type': 'str', 'required': True } }, # 导出数据库 'Export': ['type'], # 导入数据库 'Import': ['document'], # 首页登录清空 'Login': ['admin_id', 'time'] }
1.679688
2
example/speech_recognition/stt_layer_slice.py
axbaretto/mxnet
92
1718
import mxnet as mx def slice_symbol_to_seq_symobls(net, seq_len, axis=1, squeeze_axis=True): net = mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=axis, squeeze_axis=squeeze_axis) hidden_all = [] for seq_index in range(seq_len): hidden_all.append(net[seq_index]) net = hidden_all return net
2.359375
2
api/auth.py
fergalmoran/dss.api
0
1719
import datetime import json from calendar import timegm from urllib.parse import parse_qsl import requests from allauth.socialaccount import models as aamodels from requests_oauthlib import OAuth1 from rest_framework import parsers, renderers from rest_framework import status from rest_framework.authtoken.models import Token from rest_framework.authtoken.serializers import AuthTokenSerializer from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.views import APIView from rest_framework_jwt.authentication import JSONWebTokenAuthentication from rest_framework_jwt.settings import api_settings from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler from dss import settings from spa.models import UserProfile from spa.models.socialaccountlink import SocialAccountLink def _temp_reverse_user(uid, provider, access_token, access_token_secret, payload): """ Do some magic here to find user account and deprecate psa 1. Look for account in """ user = None try: sa = SocialAccountLink.objects.get(social_id=uid) sa.type = provider sa.social_id = uid sa.access_token = access_token sa.access_token_secret = access_token_secret sa.provider_data = payload sa.save() user = UserProfile.objects.get(id=sa.user.id) except SocialAccountLink.DoesNotExist: # try allauth try: aa = aamodels.SocialAccount.objects.get(uid=uid) try: user = UserProfile.objects.get(user__id=aa.user_id) except UserProfile.DoesNotExist: print('Need to create UserProfile') # we got an allauth, create the SocialAccountLink sa = SocialAccountLink() sa.user = user sa.social_id = aa.uid sa.type = aa.provider sa.access_token = access_token sa.access_token_secret = access_token_secret sa.provider_data = payload sa.save() except aamodels.SocialAccount.DoesNotExist: print('Need to create social model') return user if user else None class SocialLoginHandler(APIView): """View to authenticate users through social media.""" permission_classes = (AllowAny,) def post(self, request): uid = None backend = request.query_params.get('backend') user = None if backend in ['twitter']: request_token_url = 'https://api.twitter.com/oauth/request_token' access_token_url = 'https://api.twitter.com/oauth/access_token' access_token = "" access_token_secret = "" if request.data.get('oauth_token') and request.data.get('oauth_verifier'): auth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, resource_owner_key=request.data.get('oauth_token'), verifier=request.data.get('oauth_verifier')) r = requests.post(access_token_url, auth=auth) profile = dict(parse_qsl(r.text)) payload = json.dumps(profile) uid = profile.get('user_id') access_token = profile.get('oauth_token') access_token_secret = profile.get('oauth_token_secret') user = _temp_reverse_user(uid, 'twitter', access_token, access_token_secret, payload) else: oauth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, callback_uri=settings.TWITTER_CALLBACK_URL) r = requests.post(request_token_url, auth=oauth) access_token = dict(parse_qsl(r.text)) return Response(access_token) elif backend in ['facebook']: access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token' graph_api_url = 'https://graph.facebook.com/v2.3/me' access_token = "" access_token_secret = "" params = { 'client_id': request.data.get('clientId'), 'redirect_uri': request.data.get('redirectUri'), 'client_secret': settings.SOCIAL_AUTH_FACEBOOK_SECRET, 'code': request.data.get('code') } # Step 1. Exchange authorization code for access token. r = requests.get(access_token_url, params=params) token = json.loads(r.text) # Step 2. Retrieve information about the current user. r = requests.get(graph_api_url, params=token) profile = json.loads(r.text) access_token = token.get('access_token') uid = profile.get('id') user = _temp_reverse_user(uid, 'facebook', access_token, access_token_secret, r.text) elif backend in ['google']: access_token_url = 'https://accounts.google.com/o/oauth2/token' people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect' access_token = "" access_token_secret = "" payload = dict(client_id=request.data.get('clientId'), redirect_uri=request.data.get('redirectUri'), client_secret=settings.SOCIAL_AUTH_GOOGLE_OAUTH_SECRET, code=request.data.get('code'), grant_type='authorization_code') # Step 1. Exchange authorization code for access token. r = requests.post(access_token_url, data=payload) token = json.loads(r.text) headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])} # Step 2. Retrieve information about the current user. r = requests.get(people_api_url, headers=headers) profile = json.loads(r.text) uid = profile.get('sub') user = _temp_reverse_user(uid, 'google', access_token, access_token_secret, r.text) if uid is not None and user is not None: if not user.user.is_active: return Response({ 'status': 'Unauthorized', 'message': 'User account disabled' }, status=status.HTTP_401_UNAUTHORIZED) payload = jwt_payload_handler(user.user) if api_settings.JWT_ALLOW_REFRESH: payload['orig_iat'] = timegm( datetime.datetime.utcnow().utctimetuple() ) response_data = { 'token': jwt_encode_handler(payload), 'session': user.get_session_id() } return Response(response_data) return Response({ 'status': 'Bad request', 'message': 'Authentication could not be performed with received data.' }, status=status.HTTP_400_BAD_REQUEST) class ObtainUser(APIView): throttle_classes = () permission_classes = () parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,) renderer_classes = (renderers.JSONRenderer,) serializer_class = AuthTokenSerializer model = Token def post(self, request): return self.get(request) def get(self, request): if request.user.is_authenticated(): return Response( status=status.HTTP_200_OK, data={ 'id': request.user.id, 'name': request.user.username, 'session': request.user.userprofile.get_session_id(), 'slug': request.user.userprofile.slug, 'session': request.user.userprofile.get_session_id(), 'userRole': 'user', }) else: return Response(status=status.HTTP_401_UNAUTHORIZED)
2.25
2
bcgs/disqus_objects.py
aeturnum/bcgs
0
1720
import requests import aiohttp from constants import API_KEY class User(object): def __init__(self, author_info): # "author": { # "about": "", # "avatar": { # "cache": "//a.disquscdn.com/1519942534/images/noavatar92.png", # "isCustom": false, # "large": { # "cache": "//a.disquscdn.com/1519942534/images/noavatar92.png", # "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg" # }, # "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg", # "small": { # "cache": "//a.disquscdn.com/1519942534/images/noavatar32.png", # "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg" # } # }, # "disable3rdPartyTrackers": false, # "id": "5472588", # "isAnonymous": false, # "isPowerContributor": false, # "isPrimary": true, # "isPrivate": true, # "joinedAt": "2010-11-20T04:45:33", # "location": "", # "name": "felix1999", # "profileUrl": "https://disqus.com/by/felix1999/", # "signedUrl": "", # "url": "", # "username": "felix1999" # }, self._basic_info = author_info self._detailed_info = None async def load(self): async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session: user_info = await session.get( 'https://disqus.com/api/3.0/users/details.json', params={'user': self.id, 'api_key': API_KEY} ) detail_json = await user_info.json() if detail_json['code'] != 0: print(f'Problem with getting user details from user {self.id}') print(detail_json) self._detailed_info = detail_json['response'] def _get_detailed_info(self): # https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=<KEY> # { # "code": 0, # "response": { # "about": "", # "avatar": { # "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551", # "isCustom": true, # "large": { # "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551", # "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg" # }, # "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg", # "small": { # "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551", # "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg" # } # }, # "disable3rdPartyTrackers": false, # "id": "137780765", # "isAnonymous": false, # "isPowerContributor": false, # "isPrimary": true, # "isPrivate": false, # "joinedAt": "2015-01-02T18:40:14", # "location": "", # "name": "Bob", # "numFollowers": 2, # "numFollowing": 0, # "numForumsFollowing": 0, # "numLikesReceived": 8967, # "numPosts": 4147, # "profileUrl": "https://disqus.com/by/disqus_FqhLpDGmTT/", # "rep": 3.5297520000000002, # "reputation": 3.5297520000000002, # "reputationLabel": "High", # "signedUrl": "", # "url": "", # "username": "disqus_FqhLpDGmTT" # } # } print("WARNING: auto-loading user in async version of code!!!!") details = requests.get( 'https://disqus.com/api/3.0/users/details.json', {'user': self.id, 'api_key': API_KEY} ) detail_json = details.json() if detail_json['code'] != 0: print(f'Problem with getting user details from user {self.id}') print(detail_json) self._detailed_info = detail_json['response'] @property def anonymous(self): return 'id' not in self._basic_info @property def private(self): return self.anonymous or self._basic_info.get('isPrivate') @property def id(self): if self.private: return 'Private' return self._basic_info.get('id', 'Anonymous') @property def name(self): return self._basic_info.get('name') @property def username(self): return self._basic_info.get('username') @property def location(self): return self._basic_info.get('location') @property def joined_at(self): return self._basic_info.get('joinedAt') @property def profile_url(self): return self._basic_info.get('profileUrl') @property def total_posts(self): if self._detailed_info is None: self._get_detailed_info() return self._detailed_info.get('numPosts') @property def total_likes(self): if self._detailed_info is None: self._get_detailed_info() return self._detailed_info.get('numLikesReceived') @property def user_info_row(self): return [ self.id, self.name, self.username, self.total_posts, self.total_likes, self.location, self.joined_at, self.profile_url ]
2.453125
2
nvdbgeotricks.py
LtGlahn/estimat_gulstripe
0
1721
<gh_stars>0 """ En samling hjelpefunksjoner som bruker nvdbapiv3-funksjonene til å gjøre nyttige ting, f.eks. lagre geografiske datasett Disse hjelpefunksjonene forutsetter fungerende installasjon av geopandas, shapely og en del andre ting som må installeres separat. Noen av disse bibliotekene kunne historisk av og til være plundrete å installere, evt ha versjonskonflikter seg i mellom, spesielt på windows. Slikt plunder hører historien til (stort sett) Anbefalingen er like fullt å bruke (ana)conda installasjon i et eget "environment". Dette er god kodehygiene og sikrer minimalt med kluss, samt ikke minst: Eventuelt kluss lar seg greit reparere ved å lage nytt "enviroment", uten at det påvirker hele python-installasjonen din. """ import re import pdb from shapely import wkt # from shapely.ops import unary_union import pandas as pd import geopandas as gpd from datetime import datetime import nvdbapiv3 from apiforbindelse import apiforbindelse def nvdb2gpkg( objekttyper, filnavn='datadump', mittfilter=None, vegnett=True, vegsegmenter=False, geometri=True): """ Lagrer NVDB vegnett og angitte objekttyper til geopackage ARGUMENTS objekttyper: Liste med objekttyper du vil lagre KEYWORDS mittfilter=None : Dictionary med filter til søkeobjekt i nvdbapiv3.py, for eksempel { 'kommune' : 5001 } Samme filter brukes på både vegnett og fagdata vegnett=True : Bool, default=True. Angir om vi skal ta med data om vegnett eller ikke vegsegmenter=False : Bool, default=False. Angir om vi skal repetere objektet delt inn etter vegsegementer geometri=True : Bool, default=True. Angir om vi skal hente geometri fra egengeometri (hvis det finnes) Hvis du ønsker å presentere vegobjekt ut fra objektets stedfesting langs veg så bruker du kombinasjonen vegsegmenter=True, geometri=False RETURNS None """ if not '.gpkg' in filnavn: filnavn = filnavn + datetime.today().strftime('%Y-%m-%d') + '.gpkg' if not isinstance(objekttyper, list ): objekttyper = [ objekttyper ] for enObjTypeId in objekttyper: enObjTypeId = int( enObjTypeId ) sok = nvdbapiv3.nvdbFagdata( enObjTypeId ) if mittfilter: sok.filter( mittfilter ) stat = sok.statistikk() objtypenavn = sok.objektTypeDef['navn'] print( 'Henter', stat['antall'], 'forekomster av objekttype', sok.objektTypeId, objtypenavn ) lagnavn = 'type' + str(enObjTypeId) + '_' + nvdbapiv3.esriSikkerTekst( objtypenavn.lower() ) rec = sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri ) if len( rec ) > 0: mindf = pd.DataFrame( rec ) # Må trickse litt for å unngå navnekollisjon kolonner = list( mindf.columns ) lowerkolonner = [ x.lower() for x in kolonner ] # Duplicate element indices in list # Using list comprehension + list slicing # https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/ res = [idx for idx, val in enumerate(lowerkolonner) if val in lowerkolonner[:idx]] for ii, dublett in enumerate( res): mindf.rename(columns={ mindf.columns[dublett] : kolonner[dublett] + '_' + str( ii+1 ) }, inplace=True ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) # må droppe kolonne vegsegmenter hvis du har vegsegmenter=False if 'vegsegmenter' in minGdf.columns: minGdf.drop( 'vegsegmenter', 1, inplace=True) minGdf.drop( 'geometri', 1, inplace=True) minGdf.to_file( filnavn, layer=lagnavn, driver="GPKG") else: print( 'Ingen forekomster av', objtypenavn, 'for filter', mittfilter) if vegnett: veg = nvdbapiv3.nvdbVegnett() if mittfilter: junk = mittfilter.pop( 'egenskap', None) junk = mittfilter.pop( 'overlapp', None) veg.filter( mittfilter ) print( 'Henter vegnett') rec = veg.to_records() mindf = pd.DataFrame( rec) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1, inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) minGdf.to_file( filnavn, layer='vegnett', driver="GPKG") def dumpkontraktsomr( komr = [] ): """ Dumper et har (hardkodede) kontraktsområder """ if not komr: komr = [ '9302 Haugesund 2020-2025', '9304 Bergen', '9305 Sunnfjord' ] komr = [ '9253 Agder elektro og veglys 2021-2024'] objliste = [ 540, # Trafikkmengde 105, # Fartsgrense 810, # Vinterdriftsklasse 482, # trafikkregistreringsstasjon 153, # Værstasjon 64, # Ferjeleie 39, # Rasteplass 48, # Fortau 199, # Trær 15, # Grasdekker 274, # Blomsterbeplanting 511, # Busker 300 , # Naturområde (ingen treff i Haugesund kontrakt) 517, # Artsrik vegkant 800, # Fremmede arter 67, # Tunnelløp 846, # Skredsikring, bremsekjegler 850 # Skredsikring, forbygning ] objliste = [] for enkontrakt in komr: filnavn = nvdbapiv3.esriSikkerTekst( enkontrakt ) nvdb2gpkg( objliste, filnavn=filnavn, mittfilter={'kontraktsomrade' : enkontrakt }) def firefeltrapport( mittfilter={}): """ Finner alle firefeltsveger i Norge, evt innafor angitt søkekriterie Bruker søkeobjektet nvdbapiv3.nvdbVegnett fra biblioteket https://github.com/LtGlahn/nvdbapi-V3 ARGUMENTS None KEYWORDS: mittfilter: Dictionary med søkefilter RETURNS geodataframe med resultatet """ v = nvdbapiv3.nvdbVegnett() # Legger til filter på kun fase = V (eksistende veg), såfremt det ikke kommer i konflikt med anna filter if not 'vegsystemreferanse' in mittfilter.keys(): mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv' if not 'kryssystem' in mittfilter.keys(): mittfilter['kryssystem'] = 'false' if not 'sideanlegg' in mittfilter.keys(): mittfilter['sideanlegg'] = 'false' v.filter( mittfilter ) # Kun kjørende, og kun øverste topologinivå, og ikke adskiltelop=MOT v.filter( { 'trafikantgruppe' : 'K', 'detaljniva' : 'VT,VTKB', 'adskiltelop' : 'med,nei' } ) data = [] vegsegment = v.nesteForekomst() while vegsegment: if sjekkfelt( vegsegment, felttype='firefelt'): vegsegment['feltoversikt'] = ','.join( vegsegment['feltoversikt'] ) vegsegment['geometri'] = vegsegment['geometri']['wkt'] vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform'] vegsegment['vegnr'] = vegsegment['vref'].split()[0] vegsegment['vegkategori'] = vegsegment['vref'][0] vegsegment['adskilte løp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_løp'] data.append( vegsegment ) vegsegment = v.nesteForekomst() if len( data ) > 1: mindf = pd.DataFrame( data ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1, inplace=True) mindf.drop( 'kontraktsområder', 1, inplace=True) mindf.drop( 'riksvegruter', 1, inplace=True) mindf.drop( 'href', 1, inplace=True) mindf.drop( 'metadata', 1, inplace=True) mindf.drop( 'kortform', 1, inplace=True) mindf.drop( 'veglenkenummer', 1, inplace=True) mindf.drop( 'segmentnummer', 1, inplace=True) mindf.drop( 'startnode', 1, inplace=True) mindf.drop( 'sluttnode', 1, inplace=True) mindf.drop( 'referanse', 1, inplace=True) mindf.drop( 'målemetode', 1, inplace=True) mindf.drop( 'måledato', 1, inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) return minGdf else: return None def sjekkfelt( vegsegment, felttype='firefelt' ): """ Sjekker hva slags felt som finnes på et vegsegment ARGUMENTS: vegsegment - dicionary med data om en bit av vegnettet hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/ KEYWORDS: felttype - hva slags felttype som skal sjekkes. Mulige verdier: firefelt (default). Antar at firefeltsveg betyr at kjørefeltnummer 1-4 er brukt og er enten vanlig kj.felt, kollektivfelt eller reversibelt felt (flere varianter kommer når de trengs) RETURNS boolean - True hvis kjørefeltene er av riktig type """ svar = False vr = 'vegsystemreferanse' sr = 'strekning' if felttype == 'firefelt': if 'feltoversikt' in vegsegment.keys() and 'detaljnivå' in vegsegment.keys() and 'Vegtrase' in vegsegment['detaljnivå']: kjfelt = set( filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig', 'K', 'R']) ) if vr in vegsegment.keys(): if sr in vegsegment[vr] and 'adskilte_løp' in vegsegment[vr][sr]: if vegsegment[vr][sr]['adskilte_løp'] == 'Nei' and kjfelt.issuperset( { 1, 2, 3, 4}): svar = True # Siste klausul her har f.eks. forekommet på Fv5724, envegskjørt tunnel ved Oldenvatnet. elif vegsegment[vr][sr]['adskilte_løp'] == 'Med' and len( kjfelt ) >= 2 and not kjfelt.issuperset( {1, 2} ): svar = True return svar else: raise NotImplementedError('Sjekkfelt: Sjekk for felt av type: ' + felttype + 'er ikke implementert (ennå)' ) def filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig', 'K', 'R' ]): """ Returnerer liste med kjørefeltnummer filtrert på hva slags feltkode vi evt har ARGUMENTS feltoversikt - Liste med feltkoder for et vegsegment. KEYWORDS mittfilter=['vanlig', 'K', 'R' ] - Liste med koder for hva slags felt vi skal telle med. Sjekk håndbok v830 Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505 for mulige verdier, kortversjon: 'vanlig' - Helt vanlig kjørefelt, kjørefeltnumemr er angitt som heltall uten noen bokstaver. 'K' - kollektivfelt 'R' - reversibelt felt 'S' - Sykkelfelt 'H' - Svingefelt mot høyre 'V' - Svingefelt mot venstre 'B' - Ekstra felt for bompengeinnkreving RETURNS Liste med kjørefeltnummer hvor kun kjørefelt som angitt med mittfilter-nøkkelord er inkludert """ data = [ ] for felt in feltoversikt: feltbokstav = re.findall( '[A-Za-z]', felt) if feltbokstav: feltbokstav = feltbokstav[0] else: feltbokstav = 'vanlig' if feltbokstav in mittfilter: feltnummer = int( re.split( '[A-Z]', felt)[0] ) data.append( feltnummer ) return data
2.25
2
019_CountingSundays.py
joetache4/project-euler
0
1722
""" You are given the following information, but you may prefer to do some research for yourself. 1 Jan 1900 was a Monday. Thirty days has September, April, June and November. All the rest have thirty-one, Saving February alone, Which has twenty-eight, rain or shine. And on leap years, twenty-nine. A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400. How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)? ans: 171 """ # set to day of week for 1 Jan 1901 (Tuesday) dow = 2 def no_days(month, year): if month in [0,2,4,6,7,9,11]: return 31 elif month in [3,5,8,10]: return 30 elif year % 400 == 0: return 29 elif year % 100 == 0: return 28 elif year % 4 == 0: return 29 else: return 28 sum = 0 for y in range(1901, 2001): for m in range(0, 12): if dow == 0: sum += 1 dow = (dow + no_days(m, y)) % 7 print(sum)
4
4
setup.py
aagaard/dbservice
1
1723
<filename>setup.py #!/usr/bin/env python3 # -*- encoding: utf-8 -*- """ Setup for the dbservice """ from setuptools import setup, find_packages setup( name='dbservice', version='0.9', description="Database service for storing meter data", author="<NAME>", author_email='<EMAIL>', url='https://github.com/dbservice/dbservice', packages=find_packages(), package_data={'': ['static/*.*', 'templates/*.*']}, scripts=['manage.py'], )
1.289063
1
venv/lib/python3.6/site-packages/ansible_collections/junipernetworks/junos/plugins/module_utils/network/junos/argspec/facts/facts.py
usegalaxy-no/usegalaxy
1
1724
# # -*- coding: utf-8 -*- # Copyright 2019 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ The arg spec for the junos facts module. """ from __future__ import absolute_import, division, print_function __metaclass__ = type class FactsArgs(object): """ The arg spec for the junos facts module """ def __init__(self, **kwargs): pass argument_spec = { "gather_subset": dict( default=["!config"], type="list", elements="str" ), "config_format": dict( default="text", choices=["xml", "text", "set", "json"] ), "gather_network_resources": dict(type="list", elements="str"), "available_network_resources": {"type": "bool", "default": False}, }
1.96875
2
server/dbcls/api/resources/authenticate.py
ripry/umakaviewer
2
1725
from flask_restful import Resource, reqparse from firebase_admin import auth as firebase_auth from dbcls.models import User parser = reqparse.RequestParser() parser.add_argument('token', type=str, required=True, nullable=False) class Authenticate(Resource): def post(self): try: args = parser.parse_args() decoded_token = firebase_auth.verify_id_token(args['token']) except (ValueError, firebase_auth.AuthError) as e: return {'message': f'{e}'}, 400 firebase_uid = decoded_token['uid'] user = User.query.filter_by(firebase_uid=firebase_uid).first() if not user: return {'message': 'user not found. You have to sign up.'}, 400 custom_token = firebase_auth.create_custom_token(firebase_uid) return { 'custom_token': custom_token.decode(), 'display_name': user.display_name, 'contact_uri': user.contact_uri, 'roles': [role.role_type for role in user.user_roles], }
2.671875
3
GetJSONData_NLPParser.py
Feiyi-Ding/2021A
0
1726
<reponame>Feiyi-Ding/2021A #Import required modules import requests import json # Get json results for the required input InputString = "kobe is a basketball player" headers = { 'Content-type': 'application/json', } data = '{"text":InputString = '+ InputString + '}' response = requests.post('http://66.76.242.198:9888/', data=data).json() #Adding a test comment to check if the automatic git pull is working or not #print(json.dumps(response, indent=4, sort_keys=True))
3.171875
3
language/bert_extraction/steal_bert_classifier/utils/wiki103_sentencize.py
Xtuden-com/language
1,199
1727
<filename>language/bert_extraction/steal_bert_classifier/utils/wiki103_sentencize.py<gh_stars>1000+ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Sentencize the raw wikitext103.""" import tensorflow.compat.v1 as tf app = tf.app flags = tf.flags gfile = tf.gfile logging = tf.logging flags.DEFINE_string("wiki103_raw", None, "Path to raw wikitext103 train corpus.") flags.DEFINE_string("output_path", None, "Path to output the processed dataset.") FLAGS = flags.FLAGS def main(_): with open(FLAGS.wiki103_raw, "r") as f: data = f.read().strip().split("\n") data = [x.split(" . ") for x in data if x.strip() and x.strip()[0] != "="] sentences = [] for para in data: for sent in para: sentences.append(sent + ".") data = "\n".join(sentences) data = data.replace(" @.@ ", ".").replace(" @-@ ", "-").replace(" ,", ",") data = data.replace(" \'", "\'").replace(" )", ")").replace("( ", "(") data = data.replace(" ;", ";") data = "\n".join([x for x in data.split("\n") if len(x.split()) > 3]) logging.info("length = %d", len(data.split("\n"))) with open(FLAGS.output_path, "w") as f: f.write(data) if __name__ == "__main__": app.run(main)
2.40625
2
example_bots/any_to_any/__init__.py
budacom/trading-bots
21
1728
default_bot = 'example_bots.any_to_any.bot.AnyToAny'
1.179688
1
helpers.py
owenjones/CaBot
3
1729
from server import roles def hasRole(member, roleID): role = member.guild.get_role(roleID) return role in member.roles def gainedRole(before, after, roleID): role = before.guild.get_role(roleID) return (role not in before.roles) and (role in after.roles) def isExplorer(ctx): return hasRole(ctx.author, roles["explorer"]) def isNetwork(ctx): return hasRole(ctx.author, roles["network"]) def isLeader(ctx): return hasRole(ctx.author, roles["leader"]) def isAdmin(ctx): return hasRole(ctx.author, roles["admin"]) def isBot(ctx): return hasRole(ctx.author, roles["bot"]) class Colours: DEFAULT = 0 AQUA = 1752220 GREEN = 3066993 BLUE = 3447003 PURPLE = 10181046 GOLD = 15844367 ORANGE = 15105570 RED = 15158332 GREY = 9807270 DARKER_GREY = 8359053 NAVY = 3426654 DARK_AQUA = 1146986 DARK_GREEN = 2067276 DARK_BLUE = 2123412 DARK_PURPLE = 7419530 DARK_GOLD = 12745742 DARK_ORANGE = 11027200 DARK_RED = 10038562 DARK_GREY = 9936031 LIGHT_GREY = 12370112 DARK_NAVY = 2899536 LUMINOUS_VIVID_PINK = 16580705 DARK_VIVID_PINK = 12320855
2.671875
3
databuilder/loader/file_system_neo4j_csv_loader.py
davcamer/amundsendatabuilder
0
1730
import csv import logging import os import shutil from csv import DictWriter # noqa: F401 from pyhocon import ConfigTree, ConfigFactory # noqa: F401 from typing import Dict, Any # noqa: F401 from databuilder.job.base_job import Job from databuilder.loader.base_loader import Loader from databuilder.models.neo4j_csv_serde import NODE_LABEL, \ RELATION_START_LABEL, RELATION_END_LABEL, RELATION_TYPE from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable # noqa: F401 from databuilder.utils.closer import Closer LOGGER = logging.getLogger(__name__) class FsNeo4jCSVLoader(Loader): """ Write node and relationship CSV file(s) that can be consumed by Neo4jCsvPublisher. It assumes that the record it consumes is instance of Neo4jCsvSerializable """ # Config keys NODE_DIR_PATH = 'node_dir_path' RELATION_DIR_PATH = 'relationship_dir_path' FORCE_CREATE_DIR = 'force_create_directory' SHOULD_DELETE_CREATED_DIR = 'delete_created_directories' _DEFAULT_CONFIG = ConfigFactory.from_dict({ SHOULD_DELETE_CREATED_DIR: True, FORCE_CREATE_DIR: False }) def __init__(self): # type: () -> None self._node_file_mapping = {} # type: Dict[Any, DictWriter] self._relation_file_mapping = {} # type: Dict[Any, DictWriter] self._closer = Closer() def init(self, conf): # type: (ConfigTree) -> None """ Initializing FsNeo4jCsvLoader by creating directory for node files and relationship files. Note that the directory defined in configuration should not exist. :param conf: :return: """ conf = conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG) self._node_dir = conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH) self._relation_dir = \ conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH) self._delete_created_dir = \ conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR) self._force_create_dir = conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR) self._create_directory(self._node_dir) self._create_directory(self._relation_dir) def _create_directory(self, path): # type: (str) -> None """ Validate directory does not exist, creates it, register deletion of created directory function to Job.closer. :param path: :return: """ if os.path.exists(path): if self._force_create_dir: LOGGER.info('Directory exist. Deleting directory {}'.format(path)) shutil.rmtree(path) else: raise RuntimeError('Directory should not exist: {}'.format(path)) os.makedirs(path) def _delete_dir(): # type: () -> None if not self._delete_created_dir: LOGGER.warn('Skip Deleting directory {}'.format(path)) return LOGGER.info('Deleting directory {}'.format(path)) shutil.rmtree(path) # Directory should be deleted after publish is finished Job.closer.register(_delete_dir) def load(self, csv_serializable): # type: (Neo4jCsvSerializable) -> None """ Writes Neo4jCsvSerializable into CSV files. There are multiple CSV files that this method writes. This is because there're not only node and relationship, but also it can also have different nodes, and relationships. Common pattern for both nodes and relations: 1. retrieve csv row (a dict where keys represent a header, values represent a row) 2. using this dict to get a appropriate csv writer and write to it. 3. repeat 1 and 2 :param csv_serializable: :return: """ node_dict = csv_serializable.next_node() while node_dict: key = (node_dict[NODE_LABEL], len(node_dict)) file_suffix = '{}_{}'.format(*key) node_writer = self._get_writer(node_dict, self._node_file_mapping, key, self._node_dir, file_suffix) node_writer.writerow(node_dict) node_dict = csv_serializable.next_node() relation_dict = csv_serializable.next_relation() while relation_dict: key2 = (relation_dict[RELATION_START_LABEL], relation_dict[RELATION_END_LABEL], relation_dict[RELATION_TYPE], len(relation_dict)) file_suffix = '{}_{}_{}'.format(key2[0], key2[1], key2[2]) relation_writer = self._get_writer(relation_dict, self._relation_file_mapping, key2, self._relation_dir, file_suffix) relation_writer.writerow(relation_dict) relation_dict = csv_serializable.next_relation() def _get_writer(self, csv_record_dict, # type: Dict[str, Any] file_mapping, # type: Dict[Any, DictWriter] key, # type: Any dir_path, # type: str file_suffix # type: str ): # type: (...) -> DictWriter """ Finds a writer based on csv record, key. If writer does not exist, it's creates a csv writer and update the mapping. :param csv_record_dict: :param file_mapping: :param key: :param file_suffix: :return: """ writer = file_mapping.get(key) if writer: return writer LOGGER.info('Creating file for {}'.format(key)) file_out = open('{}/{}.csv'.format(dir_path, file_suffix), 'w') def file_out_close(): # type: () -> None LOGGER.info('Closing file IO {}'.format(file_out)) file_out.close() self._closer.register(file_out_close) writer = csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(), quoting=csv.QUOTE_NONNUMERIC) writer.writeheader() file_mapping[key] = writer return writer def close(self): # type: () -> None """ Any closeable callable registered in _closer, it will close. :return: """ self._closer.close() def get_scope(self): # type: () -> str return "loader.filesystem_csv_neo4j"
2.3125
2
sample_program_04_02_knn.py
pepsinal/python_doe_kspub
16
1731
# -*- coding: utf-8 -*- """ @author: <NAME> """ import pandas as pd from sklearn.neighbors import NearestNeighbors # k-NN k_in_knn = 5 # k-NN における k rate_of_training_samples_inside_ad = 0.96 # AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用 dataset = pd.read_csv('resin.csv', index_col=0, header=0) x_prediction = pd.read_csv('resin_prediction.csv', index_col=0, header=0) # データ分割 y = dataset.iloc[:, 0] # 目的変数 x = dataset.iloc[:, 1:] # 説明変数 # 標準偏差が 0 の特徴量の削除 deleting_variables = x.columns[x.std() == 0] x = x.drop(deleting_variables, axis=1) x_prediction = x_prediction.drop(deleting_variables, axis=1) # オートスケーリング autoscaled_x = (x - x.mean()) / x.std() autoscaled_x_prediction = (x_prediction - x.mean()) / x.std() # k-NN による AD ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD モデルの宣言 ad_model.fit(autoscaled_x) # k-NN による AD では、トレーニングデータの x を model_ad に格納することに対応 # サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに # トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn + 1 個と設定 knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1) knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame型に変換 mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1), columns=['mean_of_knn_distance']) # 自分以外の k_in_knn 個の距離の平均 mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # トレーニングデータのサンプルの rate_of_training_samples_inside_ad * 100 % が含まれるようにしきい値を設定 sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) # 距離の平均の小さい順に並び替え ad_threshold = sorted_mean_of_knn_distance_train.iloc[ round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) - 1] # トレーニングデータに対して、AD の中か外かを判定 inside_ad_flag_train = mean_of_knn_distance_train <= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_train.columns=['inside_ad_flag'] inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対する k-NN 距離の計算 knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction) knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame型に変換 mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1), columns=['mean_of_knn_distance']) # k_in_knn 個の距離の平均 mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対して、AD の中か外かを判定 inside_ad_flag_prediction = mean_of_knn_distance_prediction <= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_prediction.columns=['inside_ad_flag'] inside_ad_flag_prediction.to_csv('inside_ad_flag_prediction_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
3.09375
3
topology.py
destinysky/nsh_sfc
2
1732
#!/usr/bin/python """ """ from mininet.net import Mininet from mininet.node import Controller, RemoteController, OVSKernelSwitch,UserSwitch #OVSLegacyKernelSwitch, UserSwitch from mininet.cli import CLI from mininet.log import setLogLevel from mininet.link import Link, TCLink #conf_port=50000 conf_ip_1='10.0.0.254' conf_mac_1='11:12:13:14:15:16' def topology(): "Create a network." net = Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch ) print "*** Creating nodes" h1 = net.addHost( 'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/24' ) h2 = net.addHost( 'h2', mac='00:00:00:00:00:02', ip='10.0.0.2/24' ) h3 = net.addHost( 'h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24' ) h4 = net.addHost( 'h4', mac='00:00:00:00:00:04', ip='10.0.0.4/24' ) h5 = net.addHost( 'h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24' ) s1 = net.addSwitch( 's1', listenPort=6671 ) s2 = net.addSwitch( 's2', listenPort=6672 ) s3 = net.addSwitch( 's3', listenPort=6673 ) s4 = net.addSwitch( 's4', listenPort=6674 ) s5 = net.addSwitch( 's5', listenPort=6675 ) c1 = net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=6633 ) print "*** Creating links" net.addLink(s1, h1) net.addLink(s2, h2) net.addLink(s3, h3) net.addLink(s4, h4) net.addLink(s5, h5) net.addLink(s1, s2) net.addLink(s2, s3) net.addLink(s3, s4) net.addLink(s4, s5) print "*** Starting network" net.build() h1.cmd('ip route add '+conf_ip_1+'/32 dev h1-eth0') h1.cmd('sudo arp -i h1-eth0 -s '+conf_ip_1+' '+conf_mac_1) h1.cmd('sysctl -w net.ipv4.ip_forward=1') h1.cmd('python3 listen.py &') h2.cmd('ip route add '+conf_ip_1+'/32 dev h2-eth0') h2.cmd('sudo arp -i h2-eth0 -s '+conf_ip_1+' '+conf_mac_1) h2.cmd('sysctl -w net.ipv4.ip_forward=1') h2.cmd('python3 listen.py &') h3.cmd('ip route add '+conf_ip_1+'/32 dev h3-eth0') h3.cmd('sudo arp -i h3-eth0 -s '+conf_ip_1+' '+conf_mac_1) h3.cmd('sysctl -w net.ipv4.ip_forward=1') h3.cmd('python3 listen.py &') h4.cmd('ip route add '+conf_ip_1+'/32 dev h4-eth0') h4.cmd('sudo arp -i h4-eth0 -s '+conf_ip_1+' '+conf_mac_1) h4.cmd('sysctl -w net.ipv4.ip_forward=1') h4.cmd('python3 listen.py &') h5.cmd('ip route add '+conf_ip_1+'/32 dev h5-eth0') h5.cmd('sudo arp -i h5-eth0 -s '+conf_ip_1+' '+conf_mac_1) h5.cmd('sysctl -w net.ipv4.ip_forward=1') h5.cmd('python3 listen.py &') c1.start() s1.start( [c1] ) s2.start( [c1] ) s3.start( [c1] ) s4.start( [c1] ) s5.start( [c1] ) print "*** Running CLI" CLI( net ) print "*** Stopping network" net.stop() if __name__ == '__main__': setLogLevel( 'info' ) topology()
2.390625
2
lampara/lamp.py
gventuraagramonte/python
0
1733
<gh_stars>0 #Definicion de la clase #antes de empezar una clase se declara de la siguiente manera class Lamp: _LAMPS = [''' . . | , \ ' / ` ,-. ' --- ( ) --- \ / _|=|_ |_____| ''', ''' ,-. ( ) \ / _|=|_ |_____| '''] def __init__(self, is_turned_on): #metodo instancia e init es el constructar osea es el primero que se ejecuta self._is_turned_on = is_turned_on def turn_on(self): self._is_turned_on = True self._display_image() def turn_off(self): self._is_turned_on = False self._display_image() def _display_image(self): if self._is_turned_on: print(self._LAMPS[0]) else: print(self._LAMPS[1])
3.609375
4
lib/galaxy/model/migrate/versions/0084_add_ldda_id_to_implicit_conversion_table.py
sneumann/galaxy
1
1734
""" Migration script to add 'ldda_id' column to the implicitly_converted_dataset_association table. """ from __future__ import print_function import logging from sqlalchemy import ( Column, ForeignKey, Integer, MetaData ) from galaxy.model.migrate.versions.util import ( add_column, drop_column ) log = logging.getLogger(__name__) metadata = MetaData() def upgrade(migrate_engine): print(__doc__) metadata.bind = migrate_engine metadata.reflect() # SQLAlchemy Migrate has a bug when adding a column with both a ForeignKey and a index in SQLite if migrate_engine.name != 'sqlite': c = Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True) else: c = Column("ldda_id", Integer, index=True, nullable=True) add_column(c, 'implicitly_converted_dataset_association', metadata, index_name='ix_implicitly_converted_ds_assoc_ldda_id') def downgrade(migrate_engine): metadata.bind = migrate_engine metadata.reflect() drop_column('ldda_id', 'implicitly_converted_dataset_association', metadata)
2.1875
2
ds.py
tobiichiorigami1/csp
0
1735
<gh_stars>0 votes_t_shape = [3, 0, 1, 2] for i in range(6 - 4): votes_t_shape += [i + 4] print(votes_t_shape)
2.640625
3
scripts/adam/cc100_baselines.py
TimDettmers/sched
1
1736
<reponame>TimDettmers/sched<filename>scripts/adam/cc100_baselines.py import numpy as np import itertools import gpuscheduler import argparse import os import uuid import hashlib import glob import math from itertools import product from torch.optim.lr_scheduler import OneCycleLR from os.path import join parser = argparse.ArgumentParser(description='Compute script.') parser.add_argument('--dry', action='store_true') parser.add_argument('--verbose', action='store_true') parser.add_argument('--p', type=float, default=1.0, help='Probability with which to select a configuration.') args = parser.parse_args() gpus = 128 cmd = 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0} --distributed-port 54187 --fp16 --memory-efficient-fp16 --num-workers 2 --criterion cross_entropy --task language_modeling --sample-break-mode none --log-interval 25 --tokens-per-sample 1024 --arch transformer_lm_big --share-decoder-input-output-embed --decoder-layers 28 --decoder-attention-heads 16 --dropout 0.0 --attention-dropout 0.0 --activation-dropout 0.0 --activation-fn relu --no-epoch-checkpoints --keep-best-checkpoints 0 --keep-interval-updates 0 --keep-last-epochs 0 --save-interval-updates 1000 --log-format simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus) args2 = {} name = 'blockwise5' constraint = 'volta32gb' # 1024 tokens * 8 update_freq * 56250 steps = 0.4608e9 tokens -> optimal batch size 3460 # model sizes: 1.92bn, 2.43bn, 1.41bn logfolder = 'adam/cc100/{0}'.format(name) ckp_name = logfolder #time_hours = 24*2 cores_per_job = 5 mem = 56*(8 if gpus > 8 else gpus) num_seeds = 1 seed_offset = 5 time_hours = 72 time_minutes = 0 #partition = 'learnlab,learnfair,scavenge' partition = 'learnfair,learnlab' #partition = 'learnfair' #partition = 'uninterruptible' change_dir = 'fairseq_private' repo = 'fairseq_private' exclude = '' s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False) fp16 = True args3 = {} args2['lr-scheduler'] = 'polynomial_decay' args2['warmup-updates'] = 2000 args2['max-update'] = 56250 args2['total-num-update'] = 56250 #args2['lr-scheduler'] = 'cosine' #args2['warmup-updates'] = 3000 #args2['max-update'] = 56250*4 args2['fp16-scale-window'] = 250 args2['clip-norm'] = 0.4 #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1), (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]#, (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] #args3['adam8bits-offset'] = [1/512] #args3['prob-quant'] = [False] #args3['dist-scale'] = [1.0] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3['decoder-embed-dim'] = [2048+256] #args3['decoder-ffn-embed-dim'] = [8192+2048] #args3['max-tokens'] = [3072] #args3['update-freq'] = [2] key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq', 'lr') #key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq') args3[key] = [] #lrkey = ('lr', 'warmup-init-lr') #args3[lrkey] = [] # 32-bit baseline #args3['optimizer'] = ['adam'] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.41e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # adafactor #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 32, 'quantile', 1)] #args2['optimizer'] = 'adafactor' #args2['beta1'] = 0.9 #args2['decay-rate'] = 0.999 ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048+256,8192+2048,2)) ##args3[key].append((2048,2688,10752,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.92e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # 8-bit #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(5, 0.0)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'quantile', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 1)] args3['optimizer'] = ['adam'] args3[('use-bnb', 'optim-bits')] = [(True, 8)] args3[('stable-emb', 'no-scale-embedding')] = [(True, True)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(True, True, True), (False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding', 'optim-bits')] = [(True, True, True, True)] args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,8, 0.00045)) #args3[key].append((2048,2688,10752,2)) #args3['use-emb-norm'] = [True] #lr = 0.003239 + (-0.0001395*math.log(2.43e9)) #args3[lrkey].append((lr, 0.0)) #args2['train-subset'] = 'train11' args4 = [] args5 = {} args6 = {} rdm = np.random.RandomState(5345) for key, value in args2.items(): cmd = cmd + ' --{0} {1}'.format(key, value) args_prod = [] for key, values in args3.items(): if isinstance(key, tuple): keyvalues = [] for tups in values: arg = '' for i, v in enumerate(tups): if v is True: v = '' if v is False: continue if len(key[i]) == 0: arg += '{0} '.format(v) else: arg += '--{0} {1} '.format(key[i], v) keyvalues.append(arg) elif isinstance(key, str): keyvalues = [] for v in values: if v is True: v = '' if v is False: keyvalues.append('') else: keyvalues.append(' --{0} {1}'.format(key, v)) args_prod.append(keyvalues) if len(args_prod) >= 2: args_prod = list(product(*args_prod)) else: new_args = [] if len(args_prod) > 0: for arg in args_prod[0]: new_args.append([arg]) args_prod = new_args jobs = [] if len(args4) == 0: args4.append('') for seed in range(num_seeds): seed = seed + seed_offset for arg4 in args4: if len(args_prod) == 0: args_prod.append(('', '')) for i, values in enumerate(args_prod): job_cmd = cmd + arg4 for val in values: job_cmd += ' {0}' .format(val) #job_cmd += ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) if not fp16: job_cmd = job_cmd.replace('--fp16 ', ' ') job_cmd = job_cmd + ' --seed {0}'.format(seed) checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) save_dir = ' --save-dir {0}'.format(checkpoint_dir) job_cmd = job_cmd + save_dir cmds = [job_cmd] if rdm.rand(1) <= args.p: jobs.append(job_cmd) s.add_job(logfolder, repo, change_dir, cmds, time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus) if args.dry: for i, job in enumerate(jobs): print(i, job) print('') print('Total jobs', len(jobs)) print('Time hours: {0}'.format(time_hours)) print('GPUs: {0}'.format(gpus)) print('Jobs will be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder))) print('Jobs will be run on: {0}'.format(partition)) print('Run in folder: {0}'.format(change_dir)) if not args.dry: s.run_jobs()
1.882813
2
boa3_test/test_sc/event_test/EventNep5Transfer.py
hal0x2328/neo3-boa
25
1737
<filename>boa3_test/test_sc/event_test/EventNep5Transfer.py from boa3.builtin import public from boa3.builtin.contract import Nep5TransferEvent transfer = Nep5TransferEvent @public def Main(from_addr: bytes, to_addr: bytes, amount: int): transfer(from_addr, to_addr, amount)
1.460938
1
abtest/views.py
SchuylerGoodman/topicalguide
0
1738
# The Topical Guide # Copyright 2010-2011 Brigham Young University # # This file is part of the Topical Guide <http://nlp.cs.byu.edu/topic_browser>. # # The Topical Guide is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # The Topical Guide is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License # for more details. # # You should have received a copy of the GNU Affero General Public License # along with the Topical Guide. If not, see <http://www.gnu.org/licenses/>. # # If you have inquiries regarding any further use of the Topical Guide, please # contact the Copyright Licensing Office, Brigham Young University, 3760 HBLL, # Provo, UT 84602, (801) 422-9339 or 422-3821, e-mail <EMAIL>. from __future__ import print_function from django.shortcuts import render, redirect from django.http import HttpResponse import abtest from abtest.settings import TEST_LIST from visualize import root # Create your views here. def test(request, arg, *args, **kwargs): if arg not in TEST_LIST: print("Error! Unknown view should have been hit instead") package_list = TEST_LIST[arg]['VIEW_PACKAGE'].split('.') view_package = package_list.pop() package = ".".join(package_list) view = getattr(__import__(package, fromlist=[view_package]), view_package) return view(request, args, kwargs) # This view is called when the given url does not match anything def unknown(request, arg, *args, **kwargs): # redirect to the root view return redirect('/')
2.265625
2
neurodocker/reprozip/tests/test_merge.py
sulantha2006/neurodocker
0
1739
"""Tests for merge.py.""" from __future__ import absolute_import, division, print_function from glob import glob import os import tarfile import tempfile from neurodocker.docker import client from neurodocker.reprozip.trace import ReproZipMinimizer from neurodocker.reprozip.merge import merge_pack_files def _create_packfile(commands, dir): """Create packfile from list `commands` in debian:stretch container.""" container = client.containers.run('debian:stretch', detach=True, tty=True, security_opt=['seccomp:unconfined']) try: minimizer = ReproZipMinimizer(container.id, commands, packfile_save_dir=dir) packfile_path = minimizer.run() except: raise finally: container.stop() container.remove() return packfile_path def test_merge_pack_files(): tmpdir = tempfile.mkdtemp() cmd = ["du -sh /usr", "rm --help"] packpath = _create_packfile(cmd, tmpdir) new_name = "first-pack.rpz" os.rename(packpath, os.path.join(tmpdir, new_name)) cmd = ["ls -l /", "grep --help"] _create_packfile(cmd, tmpdir) pattern = os.path.join(tmpdir, '*.rpz') packfiles = glob(pattern) assert packfiles, "packfiles not found" outfile = os.path.join(tmpdir, 'merged.rpz') merge_pack_files(outfile=outfile, packfiles=packfiles) with tarfile.open(outfile) as tar: tar.extractall(path=tmpdir) datafile = os.path.join(tmpdir, 'DATA.tar.gz') with tarfile.open(datafile) as tardata: tardata.extractall(path=tmpdir) usr_path = os.path.join(tmpdir, 'DATA', 'usr', 'bin') assert os.path.isfile(os.path.join(usr_path, 'du')) assert os.path.isfile(os.path.join(usr_path, 'grep')) assert os.path.isfile(os.path.join(usr_path, 'ls')) assert os.path.isfile(os.path.join(usr_path, 'rm')) assert not os.path.isfile(os.path.join(usr_path, 'sed')) assert not os.path.isfile(os.path.join(usr_path, 'tar'))
2.03125
2
build/step-3-kivy-almost-manylinux/scripts/redirect_html5.py
dolang/build-kivy-linux
0
1740
""" HTML5 contexts. :author: <NAME> :license: MIT """ import contextlib import io import sys __all__ = ['create_document', 'tag', 'as_link'] class create_document(contextlib.redirect_stdout): """Redirect output to an HTML5 document specified by new_target. A HTML document title can be specified, but should not consist of whitespace only. Default is a dash. For serialisation, an encoding is included and defaults to UTF-8. Make sure the output (likely ``new_target``) uses the correct one. Arguments are not checked for validity. """ def __init__(self, new_target, *, title='-', encoding='utf-8'): super().__init__(new_target) self._title = str(title) self._encoding = encoding def __enter__(self): new_target = contextlib.redirect_stdout.__enter__(self) html5 = ('<!DOCTYPE html>\n' '<html>\n' '<title>{}</title>\n' '<meta charset="{}">'.format(self._title, self._encoding)) print(html5) return new_target @contextlib.contextmanager def tag(name): """Enclose output in an HTML tag denoted by the name.""" print('<{}>'.format(name)) yield print('</{}>'.format(name)) class LinkStringIO(io.StringIO): def __init__(self): super().__init__() self._write_text = False # switch between link href="..." and text def write(self, s): if not s: return # else: if s.isspace(): return super().write(s) # else: if self._write_text: count = super().write('<a href="') count += super().write(s) count += super().write('">') else: count = super().write(s) count += super().write('</a>') self._write_text = not self._write_text return count class write_link(contextlib.redirect_stdout): """Combine any two subsequent non-empty writes into an HTML link.""" def __init__(self): super().__init__(LinkStringIO()) def __exit__(self, exctype, excinst, exctb): super().__exit__(exctype, excinst, exctb) with contextlib.closing(self._new_target): self._new_target.seek(0) sys.stdout.write(self._new_target.read())
3.09375
3
lab/hw03-part-i_nov14.py
jzacsh/neuralnets-cmp464
1
1741
<reponame>jzacsh/neuralnets-cmp464<gh_stars>1-10 """ <NAME> solution to homework #3, Nov 14., Part I """ # Per homework instructions, following lead from matlab example by professor: # http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf import sys import tensorflow as tf import tempfile import os import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # not really doing intersting things in this lab, so just ignore optimization class Differentiable: """ encapsulation of a function and its derivative """ def __init__(self, label, f, d): self.func = f self.deriv = d self.func.name = label self.deriv.name = "%sDeriv" % label # g(x) = x^4+2x-7 ; per matlab example # g'(x) = 4x^3+2 fExFourth = Differentiable("fExFourth", lambda x: tf.add_n([tf.pow(x, 4), tf.multiply(2, x), -7]), lambda x: tf.add_n([tf.multiply(4, tf.pow(x, 3)), 2])) tFofTwo = fExFourth.func(2) tFofDerivTwo = fExFourth.deriv(2) log_dir = tempfile.mkdtemp(prefix="hw3-nov14-parti") print(log_dir) with tf.Session() as sess: writer = tf.summary.FileWriter(log_dir, sess.graph) fOfTwo, fDerivOfTwo = results = sess.run([tFofTwo, tFofDerivTwo]) sys.stderr.write("results:\n\tf(2)=%s\n\tf'(2)=%s\n" % (fOfTwo, fDerivOfTwo)) # note: only needed when doing a *loop* of sess.run() calls, and want to see # intermediary results per-loop. #writer.add_summary(results) writer.flush() writer.close()
2.359375
2
modules/experiments_bc/set_tp.py
GChrysostomou/tasc
2
1742
import torch import torch.nn as nn import numpy as np import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import pandas as pd from sklearn.metrics import * from sklearn.metrics import precision_recall_fscore_support as prfs device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') def degrading_model_perf(data, model, save_path, data_size, largest = True): print("\n--- Degrading Model Performance \n") modulo = round(len(data) / 10) + 1 model.embedding.weight.requires_grad_(True) actual = [] results = {} results["random"] = [] results["attention"]= [] results["gradient"] = [] results["grad_attention"] = [] results["grad*attention"] = [] _, _, lengths, _ = next(iter(data)) maximum = max(lengths) if max(lengths) <= 10 : maximum = max(lengths) - 1 elif max(lengths) > 10 : maximum = 10 print(maximum) grad_set = torch.zeros([data_size, maximum]).long().to(device) att_set = torch.zeros([data_size, maximum]).long().to(device) rand_set = torch.zeros([data_size, maximum]).long().to(device) att_grad_set = torch.zeros([data_size, maximum]).long().to(device) att_x_grad_set = torch.zeros([data_size, maximum]).long().to(device) actual_set = torch.zeros([data_size, 1]).long().to(device) docs = [] for batchi, (doc_id, sentences, lengths, labels) in enumerate(data): model.train() torch.cuda.empty_cache() model.zero_grad() sentences, lengths, labels = sentences.to(device), lengths.to(device), labels.to(device) yhat, weights_or = model(sentences, lengths, retain_gradient = True) masking = yhat.max(-1)[1] == labels if largest == False: masking = yhat.max(-1)[1] != labels yhat.max(-1)[0].sum().backward(retain_graph = True) maxi = max(lengths) doc_id = doc_id[masking] yhat = yhat[masking] sentences = sentences[masking] labels = labels[masking] lengths = lengths[masking] weights_or = weights_or[masking] docs.extend(doc_id) g = model.embed.grad[masking] weights_def_grad = model.weights.grad[masking] max_lengths = max(max(lengths), maxi) model_masks = model.masks[masking] with torch.no_grad(): weights = weights_or.clone() weight_mul_grad = weights_or * weights_def_grad weight_mul_grad[model_masks[:,:max_lengths]] = float("-inf") weights_def_grad_soft = weights_def_grad.clone() weights_def_grad_soft[model_masks[:,:max_lengths]] = float("-inf") em = model.embed[masking] g1 = (g* em).sum(-1)[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float("-inf") sentence_att = sentences.clone()[:,:max_lengths] sentence_grad = sentences.clone()[:,:max_lengths] sentence_rand = sentences.clone()[:,:max_lengths] sentence_att_grad = sentences.clone()[:,:max_lengths] sentence_att_mul_grad = sentences.clone()[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float("-inf") top_grad = torch.topk(g1, k = g1.size(1), largest = largest)[1] top_att = torch.topk(weights, k = weights.size(1), largest = largest)[1] top_rand = torch.randn(top_att.shape) top_rand = torch.topk(top_rand, k = weights.size(1), largest = largest)[1] top_att_grad = torch.topk(weights_def_grad_soft, k = weights.size(1), largest = largest)[1] top_att_mul_grad = torch.topk(weight_mul_grad, k = weights.size(1), largest = largest)[1] temp_pred = [] temp_act = [] temp_act.append(labels.cpu().data.numpy()) temp_pred.append(yhat.max(-1)[1].cpu().data.numpy()) model.eval() actual_set[doc_id] = labels.unsqueeze(-1) rand_set[doc_id, 0] = yhat.max(-1)[1] att_set[doc_id, 0] = yhat.max(-1)[1] grad_set[doc_id, 0] = yhat.max(-1)[1] att_grad_set[doc_id, 0] = yhat.max(-1)[1] att_x_grad_set[doc_id, 0] = yhat.max(-1)[1] rows = torch.arange(sentences.size(0)) for _j_ in range(1,maximum): sentence_grad[rows, top_grad[:,_j_]] = 0 sentence_att[rows, top_att[:,_j_]] = 0 sentence_att_grad[rows, top_att_grad[:,_j_]] = 0 sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]] = 0 sentence_rand[rows, top_rand[:,_j_]] = 0 yhat_rand, _ = model(sentence_rand,lengths) rand_set[doc_id, _j_] = yhat_rand.max(-1)[1] yhat_att, _ = model(sentence_att,lengths) att_set[doc_id, _j_] = yhat_att.max(-1)[1] yhat_grad, _ = model(sentence_grad,lengths) grad_set[doc_id, _j_] = yhat_grad.max(-1)[1] yhat_att_grad, _ = model(sentence_att_grad,lengths) att_grad_set[doc_id, _j_] = yhat_att_grad.max(-1)[1] yhat_att_x_grad, _ = model(sentence_att_mul_grad,lengths) att_x_grad_set[doc_id, _j_] = yhat_att_x_grad.max(-1)[1] if batchi % modulo == 0 : print("Remaining: ", len(data)- batchi) docs = torch.LongTensor(docs) rand_set = rand_set[docs] att_set = att_set[docs] grad_set = grad_set[docs] att_grad_set = att_grad_set[docs] att_x_grad_set = att_x_grad_set[docs] actual_set = actual_set[docs] for _k_ in range(0,maximum): actual = actual_set.flatten().cpu().data.numpy() rand_pred = classification_report(actual, rand_set[:,_k_].cpu().data.numpy(), output_dict = True)["macro avg"]["f1-score"] att_pred = classification_report(actual, att_set[:,_k_].cpu().data.numpy(), output_dict = True)["macro avg"]["f1-score"] grad_pred = classification_report(actual, grad_set[:,_k_].cpu().data.numpy(), output_dict = True)["macro avg"]["f1-score"] att_grad_pred = classification_report(actual, att_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)["macro avg"]["f1-score"] att_x_grad_pred = classification_report(actual, att_x_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)["macro avg"]["f1-score"] results["random"].append(rand_pred) results["attention"].append(att_pred) results["gradient"].append(grad_pred) results["grad_attention"].append(att_grad_pred) results["grad*attention"].append(att_x_grad_pred) results = pd.DataFrame.from_dict(results) results.plot(kind = "line", figsize = (18,10)) ordering = "ascending" if largest: ordering = "descending" plt.savefig(save_path + "_correct_classified_" + ordering + ".png") results.to_csv(save_path + "_correct_classified_" + ordering + ".csv")
2.453125
2
helios/tasks.py
mattmurch/helios-server
0
1743
<filename>helios/tasks.py """ Celery queued tasks for Helios 2010-08-01 <EMAIL> """ import copy from celery import shared_task from celery.utils.log import get_logger import signals from models import CastVote, Election, Voter, VoterFile from view_utils import render_template_raw @shared_task def cast_vote_verify_and_store(cast_vote_id, status_update_message=None, **kwargs): cast_vote = CastVote.objects.get(id=cast_vote_id) result = cast_vote.verify_and_store() voter = cast_vote.voter election = voter.election user = voter.get_user() if result: # send the signal signals.vote_cast.send(sender=election, election=election, user=user, voter=voter, cast_vote=cast_vote) if status_update_message and user.can_update_status(): user.update_status(status_update_message) else: logger = get_logger(cast_vote_verify_and_store.__name__) logger.error("Failed to verify and store %d" % cast_vote_id) @shared_task def voters_email(election_id, subject_template, body_template, extra_vars={}, voter_constraints_include=None, voter_constraints_exclude=None): """ voter_constraints_include are conditions on including voters voter_constraints_exclude are conditions on excluding voters """ election = Election.objects.get(id=election_id) # select the right list of voters voters = election.voter_set.all() if voter_constraints_include: voters = voters.filter(**voter_constraints_include) if voter_constraints_exclude: voters = voters.exclude(**voter_constraints_exclude) for voter in voters: single_voter_email.delay(voter.uuid, subject_template, body_template, extra_vars) @shared_task def voters_notify(election_id, notification_template, extra_vars={}): election = Election.objects.get(id=election_id) for voter in election.voter_set.all(): single_voter_notify.delay(voter.uuid, notification_template, extra_vars) @shared_task def single_voter_email(voter_uuid, subject_template, body_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter}) subject = render_template_raw(None, subject_template, the_vars) body = render_template_raw(None, body_template, the_vars) voter.send_message(subject, body) @shared_task def single_voter_notify(voter_uuid, notification_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter}) notification = render_template_raw(None, notification_template, the_vars) voter.send_notification(notification) @shared_task def election_compute_tally(election_id): election = Election.objects.get(id=election_id) election.compute_tally() election_notify_admin.delay(election_id=election_id, subject="encrypted tally computed", body=""" The encrypted tally for election %s has been computed. -- Helios """ % election.name) if election.has_helios_trustee(): tally_helios_decrypt.delay(election_id=election.id) @shared_task def tally_helios_decrypt(election_id): election = Election.objects.get(id=election_id) election.helios_trustee_decrypt() election_notify_admin.delay(election_id=election_id, subject='Helios Decrypt', body=""" Helios has decrypted its portion of the tally for election %s. -- Helios """ % election.name) @shared_task def voter_file_process(voter_file_id): voter_file = VoterFile.objects.get(id=voter_file_id) voter_file.process() election_notify_admin.delay(election_id=voter_file.election.id, subject='voter file processed', body=""" Your voter file upload for election %s has been processed. %s voters have been created. -- Helios """ % (voter_file.election.name, voter_file.num_voters)) @shared_task def election_notify_admin(election_id, subject, body): election = Election.objects.get(id=election_id) election.admin.send_message(subject, body)
2.234375
2
tests/conftest.py
AlanRosenthal/virtual-dealer
1
1744
<reponame>AlanRosenthal/virtual-dealer<gh_stars>1-10 """ pytest fixtures """ import unittest.mock as mock import pytest import virtual_dealer.api @pytest.fixture(name="client") def fixture_client(): """ Client test fixture for testing flask APIs """ return virtual_dealer.api.app.test_client() @pytest.fixture(name="store") def fixture_store(): """ Mock for store::Store """ with mock.patch("virtual_dealer.api.store", autospec=True) as mock_store: yield mock_store @pytest.fixture(name="datastore") def fixture_datastore(): """ Client test fixture for testing Google's datastore APIs """ with mock.patch("virtual_dealer.store.datastore", autospec=True) as mock_datastore: yield mock_datastore @pytest.fixture(name="datastore_key") def fixture_datastore_key(): """ Datastore Key Mock """ return mock.MagicMock() @pytest.fixture(name="datastore_entity") def fixture_datastore_entity(): """ Datastore Entity Mock """ return mock.MagicMock()
2.421875
2
corehq/apps/fixtures/tests.py
dslowikowski/commcare-hq
1
1745
from xml.etree import ElementTree from casexml.apps.case.tests.util import check_xml_line_by_line from casexml.apps.case.xml import V2 from corehq.apps.fixtures import fixturegenerators from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, \ FixtureItemField, FieldList from corehq.apps.fixtures.views import update_tables from corehq.apps.fixtures.exceptions import FixtureVersionError from corehq.apps.users.models import CommCareUser from django.test import TestCase class FixtureDataTest(TestCase): def setUp(self): self.domain = 'qwerty' self.tag = "district" self.data_type = FixtureDataType( domain=self.domain, tag=self.tag, name="Districts", fields=[ FixtureTypeField( field_name="state_name", properties=[] ), FixtureTypeField( field_name="district_name", properties=["lang"] ), FixtureTypeField( field_name="district_id", properties=[] ) ], item_attributes=[], ) self.data_type.save() self.data_item = FixtureDataItem( domain=self.domain, data_type_id=self.data_type.get_id, fields= { "state_name": FieldList( field_list=[ FixtureItemField( field_value="Delhi_state", properties={} ) ] ), "district_name": FieldList( field_list=[ FixtureItemField( field_value="Delhi_in_HIN", properties={"lang": "hin"} ), FixtureItemField( field_value="Delhi_in_ENG", properties={"lang": "eng"} ) ] ), "district_id": FieldList( field_list=[ FixtureItemField( field_value="Delhi_id", properties={} ) ] ) }, item_attributes={}, ) self.data_item.save() self.user = CommCareUser.create(self.domain, 'to_delete', '***') self.fixture_ownership = FixtureOwnership( domain=self.domain, owner_id=self.user.get_id, owner_type='user', data_item_id=self.data_item.get_id ) self.fixture_ownership.save() def tearDown(self): self.data_type.delete() self.data_item.delete() self.user.delete() self.fixture_ownership.delete() def test_xml(self): check_xml_line_by_line(self, """ <district> <state_name>Delhi_state</state_name> <district_name lang="hin">Delhi_in_HIN</district_name> <district_name lang="eng">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> """, ElementTree.tostring(self.data_item.to_xml())) def test_ownership(self): self.assertItemsEqual([self.data_item.get_id], FixtureDataItem.by_user(self.user, wrap=False)) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) fixture, = fixturegenerators.item_lists(self.user, V2) check_xml_line_by_line(self, """ <fixture id="item-list:district" user_id="%s"> <district_list> <district> <state_name>Delhi_state</state_name> <district_name lang="hin">Delhi_in_HIN</district_name> <district_name lang="eng">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> </district_list> </fixture> """ % self.user.user_id, ElementTree.tostring(fixture)) self.data_item.remove_user(self.user) self.assertItemsEqual([], self.data_item.get_all_users()) self.fixture_ownership = self.data_item.add_user(self.user) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) def test_get_indexed_items(self): with self.assertRaises(FixtureVersionError): fixtures = FixtureDataItem.get_indexed_items(self.domain, self.tag, 'state_name') delhi_id = fixtures['Delhi_state']['district_id'] self.assertEqual(delhi_id, 'Delhi_id')
1.953125
2
readthedocs/search/signals.py
agarwalrounak/readthedocs.org
10
1746
# -*- coding: utf-8 -*- """We define custom Django signals to trigger before executing searches.""" from django.db.models.signals import post_save, pre_delete from django.dispatch import receiver from django_elasticsearch_dsl.apps import DEDConfig from readthedocs.projects.models import HTMLFile, Project from readthedocs.projects.signals import bulk_post_create, bulk_post_delete from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es @receiver(bulk_post_create, sender=HTMLFile) def index_html_file(instance_list, **_): """Handle indexing from the build process.""" from readthedocs.search.documents import PageDocument kwargs = { 'app_label': HTMLFile._meta.app_label, 'model_name': HTMLFile.__name__, 'document_class': str(PageDocument), 'objects_id': [obj.id for obj in instance_list], } # Do not index if autosync is disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es(**kwargs) @receiver(bulk_post_delete, sender=HTMLFile) def remove_html_file(instance_list, **_): """Remove deleted files from the build process.""" from readthedocs.search.documents import PageDocument kwargs = { 'app_label': HTMLFile._meta.app_label, 'model_name': HTMLFile.__name__, 'document_class': str(PageDocument), 'objects_id': [obj.id for obj in instance_list], } # Do not index if autosync is disabled globally if DEDConfig.autosync_enabled(): delete_objects_in_es(**kwargs) @receiver(post_save, sender=Project) def index_project_save(instance, *args, **kwargs): """ Save a Project instance based on the post_save signal.post_save. This uses Celery to do it async, replacing how django-elasticsearch-dsl does it. """ from readthedocs.search.documents import ProjectDocument kwargs = { 'app_label': Project._meta.app_label, 'model_name': Project.__name__, 'document_class': str(ProjectDocument), 'objects_id': [instance.id], } # Do not index if autosync is disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es.delay(**kwargs) @receiver(pre_delete, sender=Project) def remove_project_delete(instance, *args, **kwargs): from readthedocs.search.documents import ProjectDocument kwargs = { 'app_label': Project._meta.app_label, 'model_name': Project.__name__, 'document_class': str(ProjectDocument), 'objects_id': [instance.id], } # Don't `delay` this because the objects will be deleted already if DEDConfig.autosync_enabled(): delete_objects_in_es(**kwargs)
2.078125
2
src/falconpy/_endpoint/_filevantage.py
kra-ts/falconpy
0
1747
<reponame>kra-ts/falconpy """Internal API endpoint constant library. _______ __ _______ __ __ __ | _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----. |. 1___| _| _ | | | | _ | 1___| _| _| | <| -__| |. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____| |: 1 | |: 1 | |::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy `-------' `-------' OAuth2 API - Customer SDK This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <https://unlicense.org> """ _filevantage_endpoints = [ [ "getChanges", "GET", "/filevantage/entities/changes/v2", "Retrieve information on changes", "filevantage", [ { "type": "array", "items": { "type": "string" }, "collectionFormat": "multi", "description": "Comma separated values of change ids", "name": "ids", "in": "query", "required": True } ] ], [ "queryChanges", "GET", "/filevantage/queries/changes/v2", "Returns one or more change IDs", "filevantage", [ { "minimum": 0, "type": "integer", "description": "The first change index to return in the response. " "If not provided it will default to '0'. " "Use with the `limit` parameter to manage pagination of results.", "name": "offset", "in": "query" }, { "type": "integer", "description": "The maximum number of changes to return in the response " "(default: 100; max: 500). " "Use with the `offset` parameter to manage pagination of results", "name": "limit", "in": "query" }, { "type": "string", "description": "Sort changes using options like:\n\n" "- `action_timestamp` (timestamp of the change occurrence) \n\n " "Sort either `asc` (ascending) or `desc` (descending). " "For example: `action_timestamp|asc`.\n" "The full list of allowed sorting options can be reviewed in our API documentation.", "name": "sort", "in": "query" }, { "type": "string", "description": "Filter changes using a query in Falcon Query Language (FQL). \n\n" "Common filter options include:\n\n - `host.host_name`\n - `action_timestamp`\n\n " "The full list of allowed filter parameters can be reviewed in our API documentation.", "name": "filter", "in": "query" } ] ] ]
1.125
1
TimeWrapper_JE/venv/Lib/site-packages/pip/_internal/cli/progress_bars.py
JE-Chen/je_old_repo
0
1748
import itertools import sys from signal import SIGINT, default_int_handler, signal from typing import Any, Dict, List from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar from pip._vendor.progress.spinner import Spinner from pip._internal.utils.compat import WINDOWS from pip._internal.utils.logging import get_indentation from pip._internal.utils.misc import format_size try: from pip._vendor import colorama # Lots of different errors can come from this, including SystemError and # ImportError. except Exception: colorama = None def _select_progress_class(preferred, fallback): # type: (Bar, Bar) -> Bar encoding = getattr(preferred.file, "encoding", None) # If we don't know what encoding this file is in, then we'll just assume # that it doesn't support unicode and use the ASCII bar. if not encoding: return fallback # Collect all of the possible characters we want to use with the preferred # bar. characters = [ getattr(preferred, "empty_fill", ""), getattr(preferred, "fill", ""), ] characters += list(getattr(preferred, "phases", [])) # Try to decode the characters we're using for the bar using the encoding # of the given file, if this works then we'll assume that we can use the # fancier bar and if not we'll fall back to the plaintext bar. try: "".join(characters).encode(encoding) except UnicodeEncodeError: return fallback else: return preferred _BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any class InterruptibleMixin: """ Helper to ensure that self.finish() gets called on keyboard interrupt. This allows downloads to be interrupted without leaving temporary state (like hidden cursors) behind. This class is similar to the progress library's existing SigIntMixin helper, but as of version 1.2, that helper has the following problems: 1. It calls sys.exit(). 2. It discards the existing SIGINT handler completely. 3. It leaves its own handler in place even after an uninterrupted finish, which will have unexpected delayed effects if the user triggers an unrelated keyboard interrupt some time after a progress-displaying download has already completed, for example. """ def __init__(self, *args, **kwargs): # type: (List[Any], Dict[Any, Any]) -> None """ Save the original SIGINT handler for later. """ # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore self.original_handler = signal(SIGINT, self.handle_sigint) # If signal() returns None, the previous handler was not installed from # Python, and we cannot restore it. This probably should not happen, # but if it does, we must restore something sensible instead, at least. # The least bad option should be Python's default SIGINT handler, which # just raises KeyboardInterrupt. if self.original_handler is None: self.original_handler = default_int_handler def finish(self): # type: () -> None """ Restore the original SIGINT handler after finishing. This should happen regardless of whether the progress display finishes normally, or gets interrupted. """ super().finish() # type: ignore signal(SIGINT, self.original_handler) def handle_sigint(self, signum, frame): # type: ignore """ Call self.finish() before delegating to the original SIGINT handler. This handler should only be in place while the progress display is active. """ self.finish() self.original_handler(signum, frame) class SilentBar(Bar): def update(self): # type: () -> None pass class BlueEmojiBar(IncrementalBar): suffix = "%(percent)d%%" bar_prefix = " " bar_suffix = " " phases = ("\U0001F539", "\U0001F537", "\U0001F535") class DownloadProgressMixin: def __init__(self, *args, **kwargs): # type: (List[Any], Dict[Any, Any]) -> None # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore self.message = (" " * (get_indentation() + 2)) + self.message # type: str @property def downloaded(self): # type: () -> str return format_size(self.index) # type: ignore @property def download_speed(self): # type: () -> str # Avoid zero division errors... if self.avg == 0.0: # type: ignore return "..." return format_size(1 / self.avg) + "/s" # type: ignore @property def pretty_eta(self): # type: () -> str if self.eta: # type: ignore return f"eta {self.eta_td}" # type: ignore return "" def iter(self, it): # type: ignore for x in it: yield x # B305 is incorrectly raised here # https://github.com/PyCQA/flake8-bugbear/issues/59 self.next(len(x)) # noqa: B305 self.finish() class WindowsMixin: def __init__(self, *args, **kwargs): # type: (List[Any], Dict[Any, Any]) -> None # The Windows terminal does not support the hide/show cursor ANSI codes # even with colorama. So we'll ensure that hide_cursor is False on # Windows. # This call needs to go before the super() call, so that hide_cursor # is set in time. The base progress bar class writes the "hide cursor" # code to the terminal in its init, so if we don't set this soon # enough, we get a "hide" with no corresponding "show"... if WINDOWS and self.hide_cursor: # type: ignore self.hide_cursor = False # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore # Check if we are running on Windows and we have the colorama module, # if we do then wrap our file with it. if WINDOWS and colorama: self.file = colorama.AnsiToWin32(self.file) # type: ignore # The progress code expects to be able to call self.file.isatty() # but the colorama.AnsiToWin32() object doesn't have that, so we'll # add it. self.file.isatty = lambda: self.file.wrapped.isatty() # The progress code expects to be able to call self.file.flush() # but the colorama.AnsiToWin32() object doesn't have that, so we'll # add it. self.file.flush = lambda: self.file.wrapped.flush() class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin): file = sys.stdout message = "%(percent)d%%" suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s" class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar): pass class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): pass class DownloadBar(BaseDownloadProgressBar, Bar): pass class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar): pass class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar): pass class DownloadProgressSpinner( WindowsMixin, InterruptibleMixin, DownloadProgressMixin, Spinner ): file = sys.stdout suffix = "%(downloaded)s %(download_speed)s" def next_phase(self): # type: () -> str if not hasattr(self, "_phaser"): self._phaser = itertools.cycle(self.phases) return next(self._phaser) def update(self): # type: () -> None message = self.message % self phase = self.next_phase() suffix = self.suffix % self line = "".join( [ message, " " if message else "", phase, " " if suffix else "", suffix, ] ) self.writeln(line) BAR_TYPES = { "off": (DownloadSilentBar, DownloadSilentBar), "on": (DefaultDownloadProgressBar, DownloadProgressSpinner), "ascii": (DownloadBar, DownloadProgressSpinner), "pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner), "emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner), } def DownloadProgressProvider(progress_bar, max=None): # type: ignore if max is None or max == 0: return BAR_TYPES[progress_bar][1]().iter else: return BAR_TYPES[progress_bar][0](max=max).iter
2.09375
2
scripts/study_case/ID_5/matchzoo/auto/tuner/tune.py
kzbnb/numerical_bugs
8
1749
import typing import numpy as np import scripts.study_case.ID_5.matchzoo as mz from scripts.study_case.ID_5.matchzoo.engine.base_metric import BaseMetric from .tuner import Tuner def tune( params: 'mz.ParamTable', optimizer: str = 'adam', trainloader: mz.dataloader.DataLoader = None, validloader: mz.dataloader.DataLoader = None, embedding: np.ndarray = None, fit_kwargs: dict = None, metric: typing.Union[str, BaseMetric] = None, mode: str = 'maximize', num_runs: int = 10, verbose=1 ): """ Tune model hyper-parameters. A simple shorthand for using :class:`matchzoo.auto.Tuner`. `model.params.hyper_space` reprensents the model's hyper-parameters search space, which is the cross-product of individual hyper parameter's hyper space. When a `Tuner` builds a model, for each hyper parameter in `model.params`, if the hyper-parameter has a hyper-space, then a sample will be taken in the space. However, if the hyper-parameter does not have a hyper-space, then the default value of the hyper-parameter will be used. See `tutorials/model_tuning.ipynb` for a detailed walkthrough on usage. :param params: A completed parameter table to tune. Usually `model.params` of the desired model to tune. `params.completed()` should be `True`. :param optimizer: Str or `Optimizer` class. Optimizer for optimizing model. :param trainloader: Training data to use. Should be a `DataLoader`. :param validloader: Testing data to use. Should be a `DataLoader`. :param embedding: Embedding used by model. :param fit_kwargs: Extra keyword arguments to pass to `fit`. (default: `dict(epochs=10, verbose=0)`) :param metric: Metric to tune upon. Must be one of the metrics in `model.params['task'].metrics`. (default: the first metric in `params.['task'].metrics`. :param mode: Either `maximize` the metric or `minimize` the metric. (default: 'maximize') :param num_runs: Number of runs. Each run takes a sample in `params.hyper_space` and build a model based on the sample. (default: 10) :param callbacks: A list of callbacks to handle. Handled sequentially at every callback point. :param verbose: Verbosity. (default: 1) Example: >>> import scripts.study_case.ID_5.matchzoo as mz >>> import numpy as np >>> train = mz.datasets.toy.load_data('train') >>> valid = mz.datasets.toy.load_data('dev') >>> prpr = mz.models.DenseBaseline.get_default_preprocessor() >>> train = prpr.fit_transform(train, verbose=0) >>> valid = prpr.transform(valid, verbose=0) >>> trainset = mz.dataloader.Dataset(train) >>> validset = mz.dataloader.Dataset(valid) >>> padding = mz.models.DenseBaseline.get_default_padding_callback() >>> trainloader = mz.dataloader.DataLoader(trainset, callback=padding) >>> validloader = mz.dataloader.DataLoader(validset, callback=padding) >>> model = mz.models.DenseBaseline() >>> model.params['task'] = mz.tasks.Ranking() >>> optimizer = 'adam' >>> embedding = np.random.uniform(-0.2, 0.2, ... (prpr.context['vocab_size'], 100)) >>> tuner = mz.auto.Tuner( ... params=model.params, ... optimizer=optimizer, ... trainloader=trainloader, ... validloader=validloader, ... embedding=embedding, ... num_runs=1, ... verbose=0 ... ) >>> results = tuner.tune() >>> sorted(results['best'].keys()) ['#', 'params', 'sample', 'score'] """ tuner = Tuner( params=params, optimizer=optimizer, trainloader=trainloader, validloader=validloader, embedding=embedding, fit_kwargs=fit_kwargs, metric=metric, mode=mode, num_runs=num_runs, verbose=verbose ) return tuner.tune()
2.765625
3
libs/gym/tests/wrappers/test_pixel_observation.py
maxgold/icml22
0
1750
<reponame>maxgold/icml22 """Tests for the pixel observation wrapper.""" from typing import Optional import pytest import numpy as np import gym from gym import spaces from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY class FakeEnvironment(gym.Env): def __init__(self): self.action_space = spaces.Box(shape=(1,), low=-1, high=1, dtype=np.float32) def render(self, width=32, height=32, *args, **kwargs): del args del kwargs image_shape = (height, width, 3) return np.zeros(image_shape, dtype=np.uint8) def reset(self, seed: Optional[int] = None): super().reset(seed=seed) observation = self.observation_space.sample() return observation def step(self, action): del action observation = self.observation_space.sample() reward, terminal, info = 0.0, False, {} return observation, reward, terminal, info class FakeArrayObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs): self.observation_space = spaces.Box( shape=(2,), low=-1, high=1, dtype=np.float32 ) super(FakeArrayObservationEnvironment, self).__init__(*args, **kwargs) class FakeDictObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs): self.observation_space = spaces.Dict( { "state": spaces.Box(shape=(2,), low=-1, high=1, dtype=np.float32), } ) super(FakeDictObservationEnvironment, self).__init__(*args, **kwargs) class TestPixelObservationWrapper(object): @pytest.mark.parametrize("pixels_only", (True, False)) def test_dict_observation(self, pixels_only): pixel_key = "rgb" env = FakeDictObservationEnvironment() # Make sure we are testing the right environment for the test. observation_space = env.observation_space assert isinstance(observation_space, spaces.Dict) width, height = (320, 240) # The wrapper should only add one observation. wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only, render_kwargs={pixel_key: {"width": width, "height": height}}, ) assert isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only: assert len(wrapped_env.observation_space.spaces) == 1 assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else: assert ( len(wrapped_env.observation_space.spaces) == len(observation_space.spaces) + 1 ) expected_keys = list(observation_space.spaces.keys()) + [pixel_key] assert list(wrapped_env.observation_space.spaces.keys()) == expected_keys # Check that the added space item is consistent with the added observation. observation = wrapped_env.reset() rgb_observation = observation[pixel_key] assert rgb_observation.shape == (height, width, 3) assert rgb_observation.dtype == np.uint8 @pytest.mark.parametrize("pixels_only", (True, False)) def test_single_array_observation(self, pixels_only): pixel_key = "depth" env = FakeArrayObservationEnvironment() observation_space = env.observation_space assert isinstance(observation_space, spaces.Box) wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only ) wrapped_env.observation_space = wrapped_env.observation_space assert isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only: assert len(wrapped_env.observation_space.spaces) == 1 assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else: assert len(wrapped_env.observation_space.spaces) == 2 assert list(wrapped_env.observation_space.spaces.keys()) == [ STATE_KEY, pixel_key, ] observation = wrapped_env.reset() depth_observation = observation[pixel_key] assert depth_observation.shape == (32, 32, 3) assert depth_observation.dtype == np.uint8 if not pixels_only: assert isinstance(observation[STATE_KEY], np.ndarray)
2.21875
2
real_plot_fft_stft_impl.py
MuAuan/Scipy-Swan
0
1751
<reponame>MuAuan/Scipy-Swan<filename>real_plot_fft_stft_impl.py<gh_stars>0 import pyaudio import wave from scipy.fftpack import fft, ifft import numpy as np import matplotlib.pyplot as plt import cv2 from scipy import signal from swan import pycwt CHUNK = 1024 FORMAT = pyaudio.paInt16 # int16型 CHANNELS = 1 # 1;monoral 2;ステレオ- RATE = 22100 # 22.1kHz 44.1kHz RECORD_SECONDS = 5 # 5秒録音 WAVE_OUTPUT_FILENAME = "output2.wav" p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) s=1 # figureの初期化 fig = plt.figure(figsize=(12, 10)) ax1 = fig.add_subplot(311) ax2 = fig.add_subplot(312) ax3 = fig.add_subplot(313) ax2.axis([0, 5, 200,20000]) ax2.set_yscale('log') while True: fig.delaxes(ax1) fig.delaxes(ax3) ax1 = fig.add_subplot(311) ax3 = fig.add_subplot(313) print("* recording") frames = [] for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data) print("* done recording") wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() wavfile = WAVE_OUTPUT_FILENAME wr = wave.open(wavfile, "rb") ch = CHANNELS #wr.getnchannels() width = p.get_sample_size(FORMAT) #wr.getsampwidth() fr = RATE #wr.getframerate() fn = wr.getnframes() fs = fn / fr origin = wr.readframes(wr.getnframes()) data = origin[:fn] wr.close() sig = np.frombuffer(data, dtype="int16") /32768.0 t = np.linspace(0,fs, fn/2, endpoint=False) ax1.axis([0, 5, -0.0075,0.0075]) ax1.plot(t, sig) nperseg = 256 f, t, Zxx = signal.stft(sig, fs=fs*fn/50, nperseg=nperseg) ax2.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv') freq =fft(sig,int(fn/2)) Pyy = np.sqrt(freq*freq.conj())*2/fn f = np.arange(int(fn/2)) ax3.axis([200, 20000, 0,0.000075]) ax3.set_xscale('log') ax3.plot(f,Pyy) plt.pause(1) plt.savefig('figure'+str(s)+'.png') s += 1
2.3125
2
tests/pydecompile-test/baselines/events_in_code_blocks.py
gengxf0505/pxt
1
1752
#/ <reference path="./testBlocks/mb.ts" /> def function_0(): basic.showNumber(7) basic.forever(function_0)
1.3125
1
PID/PDControl.py
l756302098/ros_practice
0
1753
<filename>PID/PDControl.py #!/usr/bin/env python # -*- coding:utf-8 -*- import random import numpy as np import matplotlib.pyplot as plt class Robot(object): def __init__(self, length=20.0): """ Creates robotand initializes location/orientation to 0, 0, 0. """ self.x = 0.0 self.y = 0.0 self.orientation = 0.0 self.length =length self.steering_noise = 0.0 self.distance_noise = 0.0 self.steering_drift = 0.0 def set(self, x,y, orientation): """ Sets a robotcoordinate. """ self.x = x self.y = y self.orientation = orientation % (2.0 * np.pi) def set_noise(self, steering_noise, distance_noise): """ Sets thenoise parameters. """ # makes itpossible to change the noise parameters # this isoften useful in particle filters self.steering_noise = steering_noise self.distance_noise = distance_noise def set_steering_drift(self, drift): """ Sets thesystematical steering drift parameter """ self.steering_drift = drift def move(self,steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0): """ steering =front wheel steering angle, limited by max_steering_angle distance =total distance driven, most be non-negative """ if steering> max_steering_angle: steering= max_steering_angle if steering <-max_steering_angle: steering= -max_steering_angle if distance< 0.0: distance= 0.0 # apply noise steering2 =random.gauss(steering, self.steering_noise) distance2 =random.gauss(distance, self.distance_noise) # applysteering drift steering2 +=self.steering_drift # Execute motion turn =np.tan(steering2) * distance2 / self.length if abs(turn)< tolerance: #approximate by straight line motion self.x +=distance2 * np.cos(self.orientation) self.y +=distance2 * np.sin(self.orientation) self.orientation = (self.orientation + turn) % (2.0 * np.pi) else: #approximate bicycle model for motion radius =distance2 / turn cx =self.x - (np.sin(self.orientation) * radius) cy =self.y + (np.cos(self.orientation) * radius) self.orientation = (self.orientation + turn) % (2.0 * np.pi) self.x =cx + (np.sin(self.orientation) * radius) self.y =cy - (np.cos(self.orientation) * radius) def __repr__(self): return'[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation) def run_p(robot, tau, n=100, speed=1.0): x_trajectory = [] y_trajectory = [] for i in range(n): cte = robot.y steer = -tau* cte robot.move(steer, speed) x_trajectory.append(robot.x) y_trajectory.append(robot.y) return x_trajectory, y_trajectory robot = Robot() robot.set(0, 1, 0) robot.set_noise(0.1,0.05) def run(robot, tau_p, tau_d, n=100, speed=1.0): x_trajectory = [] y_trajectory = [] #steering =-tau_p * CTE - tau_d * diff_CTE crosstrack_error= [] crosstrack_error.append(0.0) diff_CTE = 0.0 startX = robot.x startY = robot.y startOrientation= robot.orientation distance = 0.0 for i in range(n): steering =-tau_p * crosstrack_error[i] - tau_d * diff_CTE distance =speed robot.move(steering, distance) x_trajectory.append(robot.x) y_trajectory.append(robot.y) # when in theoriginal path, x=robot.x ,caculate y. x1 = robot.x y1 = startY +(x1 - startX) * np.tan(startOrientation) crosstrack =(robot.y - y1) * np.cos(startOrientation) crosstrack_error.append(crosstrack) diff_CTE =crosstrack_error[i+1] - crosstrack_error[i] print("{} [{}, {}] {}, {}".format(i,robot.x, robot.y,steering, crosstrack)) return x_trajectory, y_trajectory x_trajectory, y_trajectory = run(robot, 0.1, 1.0) n = len(x_trajectory) fig, ax1 = plt.subplots(1, 1, figsize=(8, 8)) ax1.plot(x_trajectory, y_trajectory, 'g', label='PDcontroller') ax1.plot(x_trajectory, np.zeros(n), 'r', label='reference') plt.show()
3.328125
3
torchvision/datasets/samplers/__init__.py
yoshitomo-matsubara/vision
12,063
1754
<gh_stars>1000+ from .clip_sampler import DistributedSampler, UniformClipSampler, RandomClipSampler __all__ = ("DistributedSampler", "UniformClipSampler", "RandomClipSampler")
1.140625
1
Pyshare2019/02 - if + Nesteed if/Nesteed-IF.py
suhaili99/python-share
4
1755
name = input("masukkan nama pembeli = ") alamat= input("Alamat = ") NoTelp = input("No Telp = ") print("\n") print("=================INFORMASI HARGA MOBIL DEALER JAYA ABADI===============") print("Pilih Jenis Mobil :") print("\t 1.Daihatsu ") print("\t 2.Honda ") print("\t 3.Toyota ") print("") pilihan = int(input("Pilih jenis mobil yang ingin dibeli : ")) print("") if (pilihan==1): print("<<<<<<<< Macam macam mobil pada Daihatsu >>>>>>>>>") print("\ta.Grand New Xenia") print("\tb.All New Terios") print("\tc.New Ayla") Pilih1 = input("Mana yang ingin anda pilih ?? = ") if(Pilih1 == "a"): print("Harga mobil Grand New Xenia adalah 183 juta ") elif(Pilih1== "b"): print("Harga mobil All New Terios adalah 215 juta") elif(Pilih1== "c"): print("Harga mobil New Ayla adalah 110 juta") else: print("Tidak terdefinisi") elif (pilihan==2): print("<<<<<<<< Macam macam mobil pada Honda >>>>>>>>>") print("\ta.Honda Brio Satya S") print("\tb.Honda Jazz ") print("\tb.Honda Mobilio ") pilih2 = input("Mana yang ingin anda pilih??") if(pilih2=="a"): print("Harga mobil HOnda Brio Satya S adalah 131 juta") elif(pilih2=="b"): print("Harga mobil Honda Jazz adalah 232 juta") elif(pilih2=="c"): print("Harga mobil Honda mobilio adalah 189 juta") else: print("Tidak terdefinisi") elif (pilihan==3): print("<<<<<<<< Macam macam mobil pada Toyota>>>>>>>>?") print("\ta.Alphard") print("\tb.Camry") print("\tc.Fortuner") pilih3 = input("Mana yang ingin anda pilih??") if (pilih3=="a"): print("Harga mobil Alphard adalah 870 juta") elif (pilih3=="b"): print("Harga mobil Camry adalah 560 Juta") elif (pilih3=="c"): print("Harga mobil Fortuner adalah 492 Juta")
3.875
4
oneflow/python/test/ops/test_l1loss.py
wanghongsheng01/framework_enflame
2
1756
<gh_stars>1-10 """ Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow import numpy as np import oneflow.typing as tp from test_util import GenArgList import unittest from collections import OrderedDict from typing import Dict import os def _compare_l1loss_with_np( input_shape, target_shape, device_type, machine_ids, device_counts ): input = np.random.random(size=input_shape).astype(np.float32) target = np.random.random(size=target_shape).astype(np.float32) assert device_type in ["cpu", "gpu"] func_config = flow.FunctionConfig() flow.clear_default_session() if device_type == "cpu": flow.config.cpu_device_num(device_counts) else: flow.config.gpu_device_num(device_counts) func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids)) func_config.default_logical_view(flow.scope.consistent_view()) def np_l1loss(np_input, np_target): np_l1 = np.abs(np_target - np_input) np_l1_mean = np.mean(np_l1) np_l1_sum = np.sum(np_l1) np_l1_dict = { "np_l1_loss": np_l1, "np_l1_loss_mean": np_l1_mean, "np_l1_loss_sum": np_l1_sum, } return np_l1_dict def np_l1_loss_diff(np_input, np_target): # Use numpy to compute diff original_shape = np_target.shape elemcnt = np_target.size prediction = np_input.reshape(-1) label = np_target.reshape(-1) prediction_grad = np.zeros((elemcnt)).astype(prediction.dtype) for i in np.arange(elemcnt): diff = prediction[i] - label[i] prediction_grad[i] = np.sign(diff) grad_mean = prediction_grad.reshape(original_shape) / elemcnt # TODO: if you want to get the grad when the reduction = "sum", you can use the follow code # grad_sum = prediction_grad.reshape(original_shape) grad_dict = { "np_grad_mean": grad_mean, } return grad_dict # Use Numpy to compute l1 loss np_out_l1loss_dict = np_l1loss(input, target) # Use Numpy to compute l1 grad np_grad_dict = np_l1_loss_diff(input, target) def assert_prediction_grad(blob: tp.Numpy): # Evaluate the gradient. Here we only test the reduction type == "mean" assert np.allclose(blob, np_grad_dict["np_grad_mean"]) @flow.global_function(type="train", function_config=func_config) def oneflow_l1loss( of_input: tp.Numpy.Placeholder(shape=input.shape), of_target: tp.Numpy.Placeholder(shape=target.shape), ) -> Dict[str, tp.Numpy]: with flow.scope.placement(device_type, "0:0"): v = flow.get_variable( shape=target.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), name="v", ) x_var = of_input + v # watch the diff flow.watch_diff(x_var, assert_prediction_grad) l1loss = flow.nn.L1Loss(x_var, of_target, reduction="none", name="of_l1loss") l1loss_mean = flow.nn.L1Loss( x_var, of_target, reduction="mean", name="of_l1loss_mean" ) l1loss_sum = flow.nn.L1Loss( x_var, of_target, reduction="sum", name="of_l1loss_sum" ) with flow.scope.placement(device_type, "0:0"): # We only test reduction="mean" diff flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(l1loss_mean) return { "of_l1_loss": l1loss, "of_l1_loss_mean": l1loss_mean, "of_l1_loss_sum": l1loss_sum, } of_out_l1loss_dict = oneflow_l1loss(input, target) assert np.allclose( of_out_l1loss_dict["of_l1_loss"], np_out_l1loss_dict["np_l1_loss"] ) assert np.allclose( of_out_l1loss_dict["of_l1_loss_mean"][0], np_out_l1loss_dict["np_l1_loss_mean"] ) assert np.allclose( of_out_l1loss_dict["of_l1_loss_sum"][0], np_out_l1loss_dict["np_l1_loss_sum"] ) def _gen_arg_dict(shape, device_type, machine_ids, device_counts): # Generate a dict to pass parameter to test case arg_dict = OrderedDict() arg_dict["input_shape"] = [shape] arg_dict["target_shape"] = [shape] arg_dict["device_type"] = [device_type] arg_dict["machine_ids"] = [machine_ids] arg_dict["device_counts"] = [device_counts] return arg_dict @flow.unittest.skip_unless_1n1d() class Testl1loss1n1d(flow.unittest.TestCase): def test_l1loss_cpu(test_case): arg_dict = _gen_arg_dict( shape=(16, 3), device_type="cpu", machine_ids="0:0", device_counts=1 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_l1loss_gpu(test_case): arg_dict = _gen_arg_dict( shape=(3, 16, 32), device_type="gpu", machine_ids="0:0", device_counts=1 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @flow.unittest.skip_unless_1n2d() class Testl1loss1n2d(flow.unittest.TestCase): @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_l1loss_gpu_1n2d(test_case): arg_dict = _gen_arg_dict( shape=(3, 32, 16), device_type="gpu", machine_ids="0:0-1", device_counts=2 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) if __name__ == "__main__": unittest.main()
2.21875
2
tests/test_schema.py
Dog-Egg/dida
0
1757
import unittest import datetime from dida import schemas, triggers from marshmallow import ValidationError class TestTriggerSchema(unittest.TestCase): def test_dump_trigger(self): result = schemas.TriggerSchema().dump(triggers.IntervalTrigger()) print('IntervalTrigger dump:', result) result = schemas.TriggerSchema().dump(triggers.DateTrigger()) print('DateTrigger dump:', result) def test_load_trigger(self): self.assertRaises(ValidationError, schemas.TriggerSchema().load, {"type": "unknown"}) obj = schemas.TriggerSchema().load({'type': "interval"}) self.assertIsInstance(obj, triggers.IntervalTrigger) obj = schemas.TriggerSchema().load({'type': 'date', "params": {'run_date': "2020-01-01 00:00:00"}}) self.assertEqual(obj.run_date, datetime.datetime(2020, 1, 1).astimezone())
2.625
3
apps/content/views.py
Sunbird-Ed/evolve-api
1
1758
<filename>apps/content/views.py from django.shortcuts import render from rest_framework import status from rest_framework.generics import ( ListAPIView, ListCreateAPIView, ListAPIView, RetrieveUpdateAPIView,) from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import permission_classes from apps.configuration.models import Book from apps.hardspot.models import HardSpot from .models import Content,ContentContributors from .serializers import ( ContentListSerializer, BookNestedSerializer, BookListSerializer, ContentStatusListSerializer, SectionKeywordSerializer, SubSectionKeywordSerializer, SectionKeywordsSerializer, ChapterKeywordsSerializer, SubSectionKeywordsSerializer, KeywordSerializer, ContentContributorSerializer, ApprovedContentSerializer, ContentStatusSerializer, HardSpotCreateSerializer, ContentContributorsSerializer, SubSubSectionKeywordsSerializer, ContentStatusSerializerFileFormat, ) from django.utils.decorators import method_decorator from django.contrib.auth.decorators import permission_required from rest_framework.parsers import MultiPartParser from apps.dataupload.models import (Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword, ) import json import pandas as pd from evolve import settings from evolve import settings from azure.storage.blob import ( BlockBlobService, ContainerPermissions ) from datetime import datetime, timedelta import os import itertools from django.db.models import Q import threading account_name = settings.AZURE_ACCOUNT_NAME account_key = settings.AZURE_ACCOUNT_KEY CONTAINER_NAME= settings.AZURE_CONTAINER block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key) class ContentList(ListCreateAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer parser_classes = (MultiPartParser,) def get(self, request): try: queryset = self.get_queryset() serializer = ContentStatusListSerializer(queryset, many=True) context = {"success": True, "message": "Chapter List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Chapter list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def post(self, request,format=None): try: serializer = ContentListSerializer(data=request.data) if serializer.is_valid(): serializer.save() context = {"success": True, "message": "Created Successful", "data": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {"success": False, "message": "Invalid Input Data to create content"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context = {'success': "false", 'message': 'Failed to create content.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentRetrieveUpdate(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def get(self, request): try: queryset = self.get_object() serializer = ContentListSerializer(queryset, many=True) context = {"success": True, "message": "Chapter List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def put(self, request, pk, format=None): try: try: content_list = self.get_object() except Exception as error: context = {'success': "false", 'message': 'content Id does not exist.'} return Response(context, status=status.HTTP_404_NOT_FOUND) serializer = ContentListSerializer(content_list, data=request.data, context={"user":request.user}, partial=True) if serializer.is_valid(): serializer.save() context = {"success": True, "message": "Updation Successful","data": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {"success": False, "message": "Updation Failed"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context = {'success': "false", 'message': 'Failed To Update content Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookNestedList(ListAPIView): queryset = Book.objects.all() serializer_class = BookNestedSerializer def get(self, request): try: subject = request.query_params.get('subject', None) if subject is not None: queryset=self.get_queryset().filter(subject__id=subject, content_only=True) else: queryset = self.get_queryset().filter(content_only=True) serializer = BookNestedSerializer(queryset, many=True) context = {"success": True, "message": "Conetent List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookListView(ListAPIView): queryset = Book.objects.all() serializer_class = BookListSerializer def get(self, request): try: subject = request.query_params.get('subject', None) if subject is not None: queryset=self.get_queryset().filter(subject__id=subject) else: queryset = self.get_queryset() serializer = BookListSerializer(queryset, many=True) context = {"success": True, "message": "Content List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Conetent list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentApprovedList(ListAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer def get(self, request): try: chapter_id = request.query_params.get('chapter', None) section_id = request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True) elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=True) elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=True) elif sub_sub_section_id is not None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=True) else: queryset = self.get_queryset().filter(approved=True) serializer = KeywordSerializer(queryset, many=True) context = {"success": True, "message": "Content Approved List", "data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Content Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentPendingList(ListAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer def get(self, request): try: chapter_id = request.query_params.get('chapter', None) section_id = request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False, approved_by=None) elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False, approved_by=None) elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False, approved_by=None) elif sub_sub_section_id is not None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=False,approved_by=None) else: queryset = self.get_queryset().filter(approved=False, approved_by=None) serializer = KeywordSerializer(queryset, many=True) context = {"success": True, "message": "Content Pending List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Content Pending list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusList(ListCreateAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def get(self, request): try: if request.query_params.get('chapter', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None)) elif request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None)) elif request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None)) else: queryset = self.get_queryset() serializer = ContentListSerializer(queryset, many=True) context = {"success": True, "message": "Content Status List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Content Status list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentRejectedList(ListAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def get(self, request): try: chapter_id = request.query_params.get('chapter', None) section_id = request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None) elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None) elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None) elif sub_sub_section_id is not None: queryset =self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id , approved = False).exclude(approved_by=None) else: queryset = self.get_queryset().filter(approved=False).exclude(approved_by=None) serializer = KeywordSerializer(queryset, many=True) context = {"success": True, "message": "Content Rejected List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Content Rejected list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class Keywords(ListAPIView): queryset = Content.objects.all() def get(self, request): try: chapter_id = request.query_params.get('chapter', None) section_id = request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section', None) if chapter_id is not None: queryset=ChapterKeyword.objects.filter(chapter__id = chapter_id) serializer = ChapterKeywordsSerializer(queryset, many=True) elif section_id is not None: queryset = SectionKeyword.objects.filter(section__id = section_id) serializer = SectionKeywordsSerializer(queryset, many=True) elif sub_section_id is not None: queryset = SubSectionKeyword.objects.filter(sub_section__id = sub_section_id) serializer = SubSectionKeywordsSerializer(queryset, many=True) elif sub_sub_section_id is not None: queryset = SubSubSectionKeyword.objects.filter(sub_sub_section__id = sub_sub_section_id) serializer = SubSubSectionKeywordsSerializer(queryset, many=True) else: queryset = self.get_queryset() serializer = KeywordSerializer(queryset, many=True) context = {"success": True, "message": "Content List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentContributorCreateView(ListCreateAPIView): queryset = ContentContributors.objects.all() serializer_class = ContentContributorSerializer def post(self, request): try: queryset = ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first() if queryset is not None: if str(queryset.email) == "" and request.data['email'] is not None: ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email']) queryset.refresh_from_db() serializer = ContentContributorSerializer(queryset) context = {"success": True, "message": "Successful", "data": serializer.data} return Response(context, status=status.HTTP_200_OK) else: serializer = ContentContributorSerializer(data=request.data) if serializer.is_valid(): serializer.save() context = {"success": True, "message": "Successful", "data": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {"success": False, "message": "Invalid Input Data to create Pesonal details"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context = {'success': "false", 'message': 'Failed to Personal Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ApprovedContentDownloadView(ListAPIView): queryset = Book.objects.all() def get(self, request): try: final_list = [] import os from shutil import copyfile book = request.query_params.get('book', None) chapters=Chapter.objects.filter(book_id=book).order_by('id') serializer = ApprovedContentSerializer(chapters, many=True) for data in serializer.data: for d in data['chapter']: final_list.append(d) repeat_list=['Content Name','Content Link/Video Link','Content Rating (By Reviewer)','Comment (By Reviewer)', 'linked_keywords'] data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium', 'Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2 Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5))))) exists = os.path.isfile('ApprovedContent.csv') path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('ApprovedContent.csv') data_frame.to_csv(path + 'ApprovedContent.csv', encoding="utf-8-sig", index=False) context = {"success": True, "message": "Activity List", "data": 'media/files/ApprovedContent.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusDownloadView(RetrieveUpdateAPIView): queryset = HardSpot.objects.all() serializer_class = HardSpotCreateSerializer def get(self, request): try: final_list = [] import os from shutil import copyfile book_id = request.query_params.get('book', None) book_name="" if book_id is not None: book_name=Book.objects.get(id=book_id) chapters=Chapter.objects.filter(book__id=book_id).order_by('id') serializer = ContentStatusSerializer(chapters, many=True) for data in serializer.data: for d in data['chapter']: final_list.append(d) data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium','Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2 Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'total', 'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots']) exists = os.path.isfile('{}_contentstatus.csv'.format(book_name)) path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('{}_contentstatus.csv'.format(book_name)) # data_frame.to_excel(path + 'contentstatus.xlsx') data_frame.to_csv(path + str(book_name)+'_contentstatus.csv', encoding="utf-8-sig", index=False) context = {"success": True, "message": "Activity List","data": 'media/files/{}_contentstatus.csv'.format(book_name)} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentContributorsDownloadView(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = HardSpotCreateSerializer def get(self, request): try: final_list = [] import os from shutil import copyfile state_id = request.query_params.get('state', None) if state_id is not None: queryset = Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id) | Q(sub_section__section__chapter__book__subject__grade__medium__state__id = state_id) | Q(section__chapter__book__subject__grade__medium__state__id= state_id) | Q(chapter__book__subject__grade__medium__state__id = state_id) ).distinct() else: queryset = self.get_queryset() serializer = ContentContributorsSerializer(queryset, many=True) res_list = [] for i in range(len(serializer.data)): if serializer.data[i] not in serializer.data[i + 1:]: res_list.append(serializer.data[i]) for data in res_list: for d in res_list: final_list.append(d) data_frame = pd.DataFrame(final_list , columns=['first_name', 'last_name','mobile', 'email','city_name','school_name','textbook_name']).drop_duplicates() exists = os.path.isfile('content_contributers.csv') path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('content_contributers.csv') # data_frame.to_excel(path + 'content_contributers.xlsx') data_frame.to_csv(path + 'content_contributers.csv', encoding="utf-8-sig", index=False) context = {"success": True, "message": "Activity List","data": 'media/files/content_contributers.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = { 'success': "false", 'message': 'Failed to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSASView(ListAPIView): def get(self,request): try: sas_url = block_blob_service.generate_container_shared_access_signature( CONTAINER_NAME, ContainerPermissions.WRITE, datetime.utcnow() + timedelta(hours=1), ) base_url=account_name+".blob.core.windows.net/"+CONTAINER_NAME context = {"success": True, "message": "url link", "token":sas_url,"base_url":base_url} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSasDownloadView(ListAPIView): def get(self,request): from evolve import settings accountName = settings.AZURE_ACCOUNT_NAME accountKey = settings.AZURE_ACCOUNT_KEY containerName= settings.AZURE_CONTAINER try: blobService = BlockBlobService(account_name=accountName, account_key=accountKey) sas_token = blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ, datetime.utcnow() + timedelta(hours=10)) context = {"success": True, "token":sas_token} return Response(context, status=status.HTTP_200_OK) except: return None class ContentListUrlUpdate(ListAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer def get(self, request): try: queryset = self.get_queryset().filter(approved=True) serializer = ContentStatusSerializerFileFormat(queryset, many=True) context = {"success": True, "message": "OtherContent Approved List", "data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequest(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer def post(self, request): try: datalist = request.data print(datalist) for data in datalist: print(data) Content.objects.filter(pk=data['content_id']).update(video=data['video']) context = {"success": True, "message": "update successfull"} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequestRevert(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer def post(self, request): try: datalist = request.data print(datalist) for data in datalist: Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database']) context = {"success": True, "message": "update successfull"} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BackupContent(ListAPIView): queryset = Book.objects.all() def get(self,request): try: t = threading.Thread(target=self.index, args=(), kwargs={}) t.setDaemon(True) t.start() context = {"success": True, "message": "Activity List", "data": 'media/files/BackupContent.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Activity list.' ,"error" :str(error)} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def index(self): final_list,final = [],[] queryset = Content.objects.filter(approved=True) for i in queryset: try: if i.video is not None : final=[i.id,i.video] final_list.append(final) except Exception as e: pass path = settings.MEDIA_ROOT + '/files/' data_frame = pd.DataFrame(final_list , columns=['id','url']) data_frame.to_csv(path+ 'BackupContent.csv', encoding="utf-8-sig", index=False)
1.578125
2
examples/given_data.py
GuoJingyao/cornac
0
1759
# -*- coding: utf-8 -*- """ Example to train and evaluate a model with given data @author: <NAME> <<EMAIL>> """ from cornac.data import Reader from cornac.eval_methods import BaseMethod from cornac.models import MF from cornac.metrics import MAE, RMSE from cornac.utils import cache # Download MovieLens 100K provided training and test splits reader = Reader() train_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base')) test_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test')) eval_method = BaseMethod.from_splits(train_data=train_data, test_data=test_data, exclude_unknowns=False, verbose=True) mf = MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, early_stop=True, verbose=True) # Evaluation result = eval_method.evaluate(model=mf, metrics=[MAE(), RMSE()], user_based=True) print(result)
3.28125
3
taming/data/ade20k.py
ZlodeiBaal/taming
0
1760
import os import numpy as np import cv2 import albumentations from PIL import Image from torch.utils.data import Dataset from taming.data.sflckr import SegmentationBase # for examples included in repo class Examples(SegmentationBase): def __init__(self, size=256, random_crop=False, interpolation="bicubic"): super().__init__(data_csv="data/ade20k_examples.txt", data_root="data/ade20k_images", segmentation_root="data/ade20k_segmentations", size=size, random_crop=random_crop, interpolation=interpolation, n_labels=151, shift_segmentation=False) # With semantic map and scene label class ADE20kBase(Dataset): def __init__(self, config=None, size=None, random_crop=False, interpolation="bicubic", crop_size=None): self.split = self.get_split() self.n_labels = 151 # unknown + 150 self.data_csv = {"train": "data/ade20k_train.txt", "validation": "data/ade20k_test.txt"}[self.split] self.data_root = "./data/ade20k_root" with open(os.path.join(self.data_root, "sceneCategories.txt"), "r") as f: self.scene_categories = f.read().splitlines() self.scene_categories = dict(line.split() for line in self.scene_categories) with open(self.data_csv, "r") as f: self.image_paths = f.read().splitlines() self._length = len(self.image_paths) ss = self.split if ss=='train': ss='training' self.labels = { "relative_file_path_": [l for l in self.image_paths], "file_path_": [os.path.join(self.data_root, "images",ss, l) for l in self.image_paths], "relative_segmentation_path_": [l.replace(".jpg", ".png") for l in self.image_paths], "segmentation_path_": [os.path.join(self.data_root, "annotations",ss, l.replace(".jpg", ".png")) for l in self.image_paths], "scene_category": [self.scene_categories[l.replace(".jpg", "")] for l in self.image_paths], } size = None if size is not None and size<=0 else size self.size = size if crop_size is None: self.crop_size = size if size is not None else None else: self.crop_size = crop_size if self.size is not None: self.interpolation = interpolation self.interpolation = { "nearest": cv2.INTER_NEAREST, "bilinear": cv2.INTER_LINEAR, "bicubic": cv2.INTER_CUBIC, "area": cv2.INTER_AREA, "lanczos": cv2.INTER_LANCZOS4}[self.interpolation] self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=self.interpolation) self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=cv2.INTER_NEAREST) if crop_size is not None: self.center_crop = not random_crop if self.center_crop: self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size) else: self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size) self.preprocessor = self.cropper def __len__(self): return self._length def __getitem__(self, i): example = dict((k, self.labels[k][i]) for k in self.labels) image = Image.open(example["file_path_"]) if not image.mode == "RGB": image = image.convert("RGB") image = np.array(image).astype(np.uint8) if self.size is not None: image = self.image_rescaler(image=image)["image"] segmentation = Image.open(example["segmentation_path_"]) segmentation = np.array(segmentation).astype(np.uint8) if self.size is not None: segmentation = self.segmentation_rescaler(image=segmentation)["image"] if self.size is not None: processed = self.preprocessor(image=image, mask=segmentation) else: processed = {"image": image, "mask": segmentation} example["image"] = (processed["image"]/127.5 - 1.0).astype(np.float32) segmentation = processed["mask"] onehot = np.eye(self.n_labels)[segmentation] example["segmentation"] = onehot return example class ADE20kTrain(ADE20kBase): # default to random_crop=True def __init__(self, config=None, size=None, random_crop=True, interpolation="bicubic", crop_size=None): super().__init__(config=config, size=size, random_crop=random_crop, interpolation=interpolation, crop_size=crop_size) def get_split(self): return "train" class ADE20kValidation(ADE20kBase): def get_split(self): return "validation" if __name__ == "__main__": dset = ADE20kValidation() ex = dset[0] for k in ["image", "scene_category", "segmentation"]: print(type(ex[k])) try: print(ex[k].shape) except: print(ex[k])
2.40625
2
templates/federated_reporting/distributed_cleanup.py
olehermanse/masterfiles
44
1761
#!/usr/bin/env python3 """ fr_distributed_cleanup.py - a script to remove hosts which have migrated to other feeder hubs. To be run on Federated Reporting superhub after each import of feeder data. First, to setup, enable fr_distributed_cleanup by setting a class in augments (def.json). This enables policy in cfe_internal/enterprise/federation/federation.cf ```json { "classes": { "cfengine_mp_enable_fr_distributed_cleanup": [ "any::" ] } } ``` After the policy has run on superhub and feeders, run this script to setup fr_distributed_cleanup role and account on all feeders and superhubs with proper RBAC settings for normal operation. You will be prompted for superhub admin credentials and then admin credentials on each feeder. """ import argparse import logging import os import platform import string import random import subprocess import sys from getpass import getpass from nova_api import NovaApi from cfsecret import read_secret, write_secret WORKDIR = None CFE_FR_TABLES = None # get WORKDIR and CFE_FR_TABLES from config.sh config_sh_path = os.path.join(os.path.dirname(__file__), "config.sh") cmd = "source {}; echo $WORKDIR; echo $CFE_FR_TABLES".format(config_sh_path) with subprocess.Popen( cmd, stdout=subprocess.PIPE, shell=True, executable="/bin/bash" ) as proc: lines = proc.stdout.readlines() WORKDIR = lines[0].decode().strip() CFE_FR_TABLES = [table.strip() for table in lines[1].decode().split()] if not WORKDIR or not CFE_FR_TABLES: print("Unable to get WORKDIR and CFE_FR_TABLES values from config.sh") sys.exit(1) # Primary dir in which to place various needed files DISTRIBUTED_CLEANUP_DIR = "/opt/cfengine/federation/cftransport/distributed_cleanup" # collect cert files from /var/cfengine/httpd/ssl/certs on # superhub and feeders and cat all together into hubs.cert CERT_PATH = os.path.join(DISTRIBUTED_CLEANUP_DIR, "hubs.cert") # Note: remove the file at DISTRIBUTED_CLEANUP_SECRET_PATH to reset everything. # api calls will overwrite fr_distributed_cleanup user and role on superhub and all feeders. DISTRIBUTED_CLEANUP_SECRET_PATH = os.path.join(WORKDIR, "state/fr_distributed_cleanup.cfsecret") def interactive_setup(): fr_distributed_cleanup_password = "".join(random.choices(string.printable, k=20)) admin_pass = getpass( prompt="Enter admin password for superhub {}: ".format(platform.node()) ) api = NovaApi(api_user="admin", api_password=<PASSWORD>) # first confirm that this host is a superhub status = api.fr_hub_status() if ( status["status"] == 200 and status["role"] == "superhub" and status["configured"] ): logger.debug("This host is a superhub configured for Federated Reporting.") else: if status["status"] == 401: print("admin credentials are incorrect, try again") sys.exit(1) else: print( "Check the status to ensure role is superhub and configured is True. {}".format( status ) ) sys.exit(1) feederResponse = api.fr_remote_hubs() if not feederResponse["hubs"]: print( "No attached feeders. Please attach at least one feeder hub before running this script." ) sys.exit(1) email = input("Enter email for fr_distributed_cleanup accounts: ") logger.info("Creating fr_distributed_cleanup role on superhub...") response = api.put( "role", "fr_distributed_cleanup", { "description": "fr_distributed_cleanup Federated Host Cleanup role", "includeContext": "cfengine", }, ) if response["status"] != 201: print( "Problem creating fr_distributed_cleanup role on superhub. {}".format( response ) ) sys.exit(1) response = api.put_role_permissions( "fr_distributed_cleanup", ["query.post", "remoteHub.list", "hubStatus.get"] ) if response["status"] != 201: print("Unable to set RBAC permissions on role fr_distributed_cleanup") sys.exit(1) logger.info("Creating fr_distributed_cleanup user on superhub") response = api.put( "user", "fr_distributed_cleanup", { "description": "fr_distributed_cleanup Federated Host Cleanup user", "email": "{}".format(email), "password": <PASSWORD>(fr_<PASSWORD>_cleanup_password), "roles": ["fr_distributed_cleanup"], }, ) if response["status"] != 201: print( "Problem creating fr_distributed_cleanup user on superhub. {}".format( response ) ) sys.exit(1) for hub in feederResponse["hubs"]: feeder_credentials = getpass( prompt="Enter admin credentials for {} at {}: ".format( hub["ui_name"], hub["api_url"] ) ) feeder_hostname = hub["ui_name"] feeder_api = NovaApi( api_user="admin", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) logger.info("Creating fr_distributed_cleanup role on %s", feeder_hostname) response = feeder_api.put( "role", "fr_distributed_cleanup", { "description": "fr_distributed_cleanup Federated Host Cleanup role", "includeContext": "cfengine", }, ) if response["status"] != 201: print( "Problem creating fr_distributed_cleanup role on superhub. {}".format( response ) ) sys.exit(1) response = feeder_api.put_role_permissions( "fr_distributed_cleanup", ["host.delete"] ) if response["status"] != 201: print("Unable to set RBAC permissions on role fr_distributed_cleanup") sys.exit(1) logger.info("Creating fr_distributed_cleanup user on %s", feeder_hostname) response = feeder_api.put( "user", "fr_distributed_cleanup", { "description": "fr_distributed_cleanup Federated Host Cleanup user", "email": "{}".format(email), "password": "{}".format(<PASSWORD>), "roles": ["fr_distributed_cleanup"], }, ) if response["status"] != 201: print( "Problem creating fr_distributed_cleanup user on {}. {}".format( feeder_hostname, response ) ) sys.exit(1) write_secret(DISTRIBUTED_CLEANUP_SECRET_PATH, fr_distributed_cleanup_password) def main(): if not os.geteuid() == 0: sys.exit("\n{} must be run as root".format(os.path.basename(__file__))) parser = argparse.ArgumentParser( description="Clean up migrating clients in Federated Reporting setup" ) group = parser.add_mutually_exclusive_group() group.add_argument("--debug", action="store_true") group.add_argument("--inform", action="store_true") args = parser.parse_args() global logger logger = logging.getLogger("fr_distributed_cleanup") ch = logging.StreamHandler() if args.debug: logger.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) if args.inform: logger.setLevel(logging.INFO) ch.setLevel(logging.INFO) logger.addHandler(ch) if not os.path.exists(DISTRIBUTED_CLEANUP_SECRET_PATH): if sys.stdout.isatty(): interactive_setup() else: print( "{} requires manual setup, please run as root interactively.".format( os.path.basename(__file__) ) ) sys.exit(1) fr_distributed_cleanup_password = read_secret(DISTRIBUTED_CLEANUP_SECRET_PATH) api = NovaApi( api_user="fr_distributed_cleanup", api_password=<PASSWORD>_cleanup_password ) # defaults to localhost response = api.fr_hub_status() if not ( response["status"] == 200 and response["role"] == "superhub" and response["configured"] ): print( "{} can only be run on a Federated Reporting hub configured to be superhub".format( os.path.basename(__file__) ) ) sys.exit(1) response = api.fr_remote_hubs() if not response["hubs"]: print( "No attached feeders. Please attach at least one feeder hub before running this script." ) for hub in response["hubs"]: if hub["role"] != "feeder" or hub["target_state"] != "on": continue feeder_hostkey = hub["hostkey"] feeder_hostname = hub["ui_name"] feeder_api = NovaApi( api_user="fr_distributed_cleanup", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) response = feeder_api.status() if response["status"] != 200: print( "Unable to get status for feeder {}. Skipping".format(feeder_hostname) ) continue sql = "SELECT hub_id FROM __hubs WHERE hostkey = '{}'".format(feeder_hostkey) response = api.query(sql) if response["status"] != 200: print("Unable to query for feeder hub_id. Response was {}".format(response)) continue # query API should return one row, [0], and one column, [0], in rows value feeder_hubid = response["rows"][0][0] sql = """ SELECT DISTINCT hosts.hostkey FROM hosts WHERE hub_id = '{0}' AND EXISTS( SELECT 1 FROM lastseenhosts ls JOIN ( SELECT hostkey, max(lastseentimestamp) as newesttimestamp FROM lastseenhosts WHERE lastseendirection = 'INCOMING' GROUP BY hostkey ) as newest ON ls.hostkey = newest.hostkey AND ls.lastseentimestamp = newest.newesttimestamp AND ls.hostkey = hosts.hostkey AND ls.hub_id != '{0}' )""".format( feeder_hubid ) response = api.query(sql) if response["status"] != 200: print( "Unable to query for deletion candidates. Response was {}".format( response ) ) sys.exit(1) logger.debug("Hosts to delete on %s are %s", hub["ui_name"], response["rows"]) hosts_to_delete = response["rows"] if len(hosts_to_delete) == 0: logger.info("%s: No hosts to delete. No actions taken.", feeder_hostname) continue logger.debug( "%s host(s) to delete on feeder %s", len(hosts_to_delete), hub["ui_name"] ) # build up a post-loop SQL statement to delete hosts locally from feeder schemas # change to feeder schema to make deletions easier/more direct without having to # specify hub_id in queries post_sql = "set schema 'hub_{}';\n".format(feeder_hubid) post_sql += "\\set ON_ERROR STOP on\n" delete_sql = "" post_hostkeys = [] for row in hosts_to_delete: # The query API returns rows which are lists of column values. # We only selected hostkey so will take the first value. host_to_delete = row[0] response = feeder_api.delete("host", host_to_delete) # both 202 Accepted and 404 Not Found are acceptable responses if response["status"] not in [202, 404]: logger.warning( "Delete %s on feeder %s got %s status code", host_to_delete, feeder_hostname, response["status"], ) continue # only add the host_to_delete if it was successfully deleted on the feeder post_hostkeys.append(host_to_delete) if len(post_hostkeys) == 0: logger.info( "No hosts on feeder %s need processing on superhub so skipping post processing", feeder_hostname, ) continue # simulate the host api delete process by setting current_timestamp in deleted column # and delete from all federated tables similar to the clear_hosts_references() pgplsql function. post_sql += "INSERT INTO __hosts (hostkey,deleted) VALUES" for hostkey in post_hostkeys: delete_sql += "('{}', CURRENT_TIMESTAMP) ".format(hostkey) delete_sql += ( "ON CONFLICT (hostkey,hub_id) DO UPDATE SET deleted = excluded.deleted;\n" ) clear_sql = "set schema 'public';\n" for table in CFE_FR_TABLES: # special case of partitioning, operating on parent table will work if "__promiselog_*" in table: table = "__promiselog" clear_sql += ( "DELETE FROM {} WHERE hub_id = {} AND hostkey IN ({});\n".format( table, feeder_hubid, ",".join(["'{}'".format(hk) for hk in post_hostkeys]), ) ) post_sql += delete_sql + clear_sql logger.debug("Running SQL:\n%s", post_sql) with subprocess.Popen( ["/var/cfengine/bin/psql", "cfdb"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as proc: logger.debug("got a proc, sending sql...") outs, errs = proc.communicate(input=post_sql.encode()) if "ERROR" in errs.decode("utf-8"): print( "Problem running post processing SQL. returncode was {}, stderr:\n{}\nstdout:\n{}".format( proc.returncode, errs.decode("utf-8"), outs.decode("utf-8") ) ) sys.exit(1) logger.debug( "Ran post processing SQL. returncode was %s, stderr:\n%s\nstdout:\n%s", proc.returncode, errs.decode("utf-8"), outs.decode("utf-8"), ) if len(hosts_to_delete) != 0: logger.info( "%s: %s host deletions processed", hub["ui_name"], len(hosts_to_delete), ) if __name__ == "__main__": main() else: raise ImportError("fr_distributed_cleanup.py must only be used as a script!")
2.0625
2
Python/Fibonacci.py
kennethsequeira/Hello-world
1
1762
#Doesn't work. import time fibonacci = [1, 1] n = int(input()) while len(fibonacci) < n: fibonacci.append(fibonacci[-1] + fibonacci[-2]) for i in range(n): print(fibonacci[i], end=' ')
3.75
4
setup.py
kreyoo/csgo-inv-shuffle
0
1763
<reponame>kreyoo/csgo-inv-shuffle from setuptools import setup setup(name="csgoinvshuffle")
1.125
1
py/_log/log.py
EnjoyLifeFund/py36pkgs
2
1764
<filename>py/_log/log.py """ basic logging functionality based on a producer/consumer scheme. XXX implement this API: (maybe put it into slogger.py?) log = Logger( info=py.log.STDOUT, debug=py.log.STDOUT, command=None) log.info("hello", "world") log.command("hello", "world") log = Logger(info=Logger(something=...), debug=py.log.STDOUT, command=None) """ import py, sys class Message(object): def __init__(self, keywords, args): self.keywords = keywords self.args = args def content(self): return " ".join(map(str, self.args)) def prefix(self): return "[%s] " % (":".join(self.keywords)) def __str__(self): return self.prefix() + self.content() class Producer(object): """ (deprecated) Log producer API which sends messages to be logged to a 'consumer' object, which then prints them to stdout, stderr, files, etc. Used extensively by PyPy-1.1. """ Message = Message # to allow later customization keywords2consumer = {} def __init__(self, keywords, keywordmapper=None, **kw): if hasattr(keywords, 'split'): keywords = tuple(keywords.split()) self._keywords = keywords if keywordmapper is None: keywordmapper = default_keywordmapper self._keywordmapper = keywordmapper def __repr__(self): return "<py.log.Producer %s>" % ":".join(self._keywords) def __getattr__(self, name): if '_' in name: raise AttributeError(name) producer = self.__class__(self._keywords + (name,)) setattr(self, name, producer) return producer def __call__(self, *args): """ write a message to the appropriate consumer(s) """ func = self._keywordmapper.getconsumer(self._keywords) if func is not None: func(self.Message(self._keywords, args)) class KeywordMapper: def __init__(self): self.keywords2consumer = {} def getstate(self): return self.keywords2consumer.copy() def setstate(self, state): self.keywords2consumer.clear() self.keywords2consumer.update(state) def getconsumer(self, keywords): """ return a consumer matching the given keywords. tries to find the most suitable consumer by walking, starting from the back, the list of keywords, the first consumer matching a keyword is returned (falling back to py.log.default) """ for i in range(len(keywords), 0, -1): try: return self.keywords2consumer[keywords[:i]] except KeyError: continue return self.keywords2consumer.get('default', default_consumer) def setconsumer(self, keywords, consumer): """ set a consumer for a set of keywords. """ # normalize to tuples if isinstance(keywords, str): keywords = tuple(filter(None, keywords.split())) elif hasattr(keywords, '_keywords'): keywords = keywords._keywords elif not isinstance(keywords, tuple): raise TypeError("key %r is not a string or tuple" % (keywords,)) if consumer is not None and not py.builtin.callable(consumer): if not hasattr(consumer, 'write'): raise TypeError( "%r should be None, callable or file-like" % (consumer,)) consumer = File(consumer) self.keywords2consumer[keywords] = consumer def default_consumer(msg): """ the default consumer, prints the message to stdout (using 'print') """ sys.stderr.write(str(msg)+"\n") default_keywordmapper = KeywordMapper() def setconsumer(keywords, consumer): default_keywordmapper.setconsumer(keywords, consumer) def setstate(state): default_keywordmapper.setstate(state) def getstate(): return default_keywordmapper.getstate() # # Consumers # class File(object): """ log consumer wrapping a file(-like) object """ def __init__(self, f): assert hasattr(f, 'write') #assert isinstance(f, file) or not hasattr(f, 'open') self._file = f def __call__(self, msg): """ write a message to the log """ self._file.write(str(msg) + "\n") if hasattr(self._file, 'flush'): self._file.flush() class Path(object): """ log consumer that opens and writes to a Path """ def __init__(self, filename, append=False, delayed_create=False, buffering=False): self._append = append self._filename = str(filename) self._buffering = buffering if not delayed_create: self._openfile() def _openfile(self): mode = self._append and 'a' or 'w' f = open(self._filename, mode) self._file = f def __call__(self, msg): """ write a message to the log """ if not hasattr(self, "_file"): self._openfile() self._file.write(str(msg) + "\n") if not self._buffering: self._file.flush() def STDOUT(msg): """ consumer that writes to sys.stdout """ sys.stdout.write(str(msg)+"\n") def STDERR(msg): """ consumer that writes to sys.stderr """ sys.stderr.write(str(msg)+"\n") class Syslog: """ consumer that writes to the syslog daemon """ def __init__(self, priority = None): if priority is None: priority = self.LOG_INFO self.priority = priority def __call__(self, msg): """ write a message to the log """ py.std.syslog.syslog(self.priority, str(msg)) for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split(): _prio = "LOG_" + _prio try: setattr(Syslog, _prio, getattr(py.std.syslog, _prio)) except AttributeError: pass
2.953125
3
test/test_all_contacts.py
Sergggio/python_training
0
1765
import re from model.contact import Contact def test_all_contacts(app, db): contacts_from_db = db.get_contact_list() phone_list_from_db = db.phones_from_db() #email_liset_from_db = db.emails_from_db() phone_list = [] for phone in phone_list_from_db: phone_list.append(merge_phones_like_on_home_page(phone)) email_list = [] #for email in email_liset_from_db: # email_list.append(merge_mail_like_on_home_page(email)) contacts_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max) phones_from_home_page = [con.all_phones_from_home_page for con in contacts_from_home_page] #emails_from_home_page = [con.all_mail_from_home_page for con in contacts_from_home_page] assert phone_list == phones_from_home_page #assert email_list == emails_from_home_page assert contacts_from_db == contacts_from_home_page def clear(s): return re.sub("[() -]", "", s) def remove_spaces(s): return re.sub(' +', ' ', s).rstrip() def merge_phones_like_on_home_page(contact): return "\n".join(filter(lambda x: x != "", map(lambda x: clear(x), filter(lambda x: x is not None, [contact.home_phone, contact.mobile_phone, contact.work_phone, contact.secondary_phone])))) def merge_email_like_on_home_page(contact): return "\n".join(filter(lambda x: x != "", map(lambda x: remove_spaces(x), filter(lambda x: x is not None, [contact.email, contact.email2, contact.email3]))))
2.625
3
samples/abp/test_graphics.py
jproudlo/PyModel
61
1766
<gh_stars>10-100 """ ABP analyzer and graphics tests """ cases = [ ('Run Pymodel Graphics to generate dot file from FSM model, no need use pma', 'pmg ABP'), ('Generate SVG file from dot', 'dotsvg ABP'), # Now display ABP.dot in browser ('Run PyModel Analyzer to generate FSM from original FSM, should be the same', 'pma ABP'), ('Run PyModel Graphics to generate a file of graphics commands from new FSM', 'pmg ABPFSM'), ('Generate an svg file from the graphics commands', 'dotsvg ABPFSM'), # Now display ABPFSM.svg in browser, should look the same as ABP.svg ]
2.046875
2
games/migrations/0002_auto_20201026_1221.py
IceArrow256/game-list
3
1767
<filename>games/migrations/0002_auto_20201026_1221.py # Generated by Django 3.1.2 on 2020-10-26 12:21 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('games', '0001_initial'), ] operations = [ migrations.AlterField( model_name='game', name='score', field=models.FloatField(null=True, verbose_name='Score'), ), migrations.AlterField( model_name='game', name='series', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='games.series'), ), ]
1.570313
2
build/lib.linux-x86_64-2.7_ucs4/mx/Misc/PackageTools.py
mkubux/egenix-mx-base
0
1768
""" PackageTools - A set of tools to aid working with packages. Copyright (c) 1998-2000, <NAME>; mailto:<EMAIL> Copyright (c) 2000-2015, eGenix.com Software GmbH; mailto:<EMAIL> See the documentation for further information on copyrights, or contact the author. All Rights Reserved. """ __version__ = '0.4.0' import os,types,sys,re,imp,__builtin__ import mx.Tools.NewBuiltins # RE to identify Python modules suffixes = projection(imp.get_suffixes(),0) module_name = re.compile('(.*)(' + '|'.join(suffixes) + ')$') initmodule_name = re.compile('__init__(' + '|'.join(suffixes) + ')$') initmodule_names = [] for suffix in suffixes: initmodule_names.append('__init__' + suffix) def find_packages(dir=os.curdir, files_only=0, recursive=0, ignore_modules=0, pkgbasename='', pkgdict=None, isdir=os.path.isdir,exists=os.path.exists, isfile=os.path.isfile,join=os.path.join,listdir=os.listdir, module_name=module_name,initmodule_name=initmodule_name): """ Return a list of package names found in dir. Packages are Python modules and subdirectories that provide an __init__ module. The .py extension is removed from the files. The __init__ modules are not considered being seperate packages. If files_only is true, only Python files are included in the search (subdirectories are *not* taken into account). If ignore_modules is true (default is false), modules are ignored. If recursive is true the search recurses into package directories. pkgbasename and pkgdict are only used during recursion. """ l = listdir(dir) if pkgdict is None: pkgdict = {} if files_only: for filename in l: m = module_name.match(filename) if m is not None and \ m.group(1) != '__init__': pkgdict[pkgbasename + m.group(1)] = 1 else: for filename in l: path = join(dir, filename) if isdir(path): # Check for __init__ module(s) for name in initmodule_names: if isfile(join(path, name)): pkgname = pkgbasename + filename pkgdict[pkgname] = 1 if recursive: find_packages(path, recursive=1, pkgbasename=pkgname + '.', pkgdict=pkgdict) break elif not ignore_modules: m = module_name.match(filename) if m is not None and \ m.group(1) != '__init__': pkgdict[pkgbasename + m.group(1)] = 1 return pkgdict.keys() def find_subpackages(package, recursive=0, splitpath=os.path.split): """ Assuming that package points to a loaded package module, this function tries to identify all subpackages of that package. Subpackages are all Python files included in the same directory as the module plus all subdirectories having an __init__.py file. The modules name is prepended to all subpackage names. The module location is found by looking at the __file__ attribute that non-builtin modules define. The function uses the __all__ attribute from the package __init__ module if available. If recursive is true (default is false), then subpackages of subpackages are recursively also included in the search. """ if not recursive: # Try the __all__ attribute... try: subpackages = list(package.__all__) except (ImportError, AttributeError): # Did not work, then let's try to find the subpackages by looking # at the directory where package lives... subpackages = find_packages(package.__path__[0], recursive=recursive) else: # XXX Recursive search does not support the __all__ attribute subpackages = find_packages(package.__path__[0], recursive=recursive) basename = package.__name__ + '.' for i,name in irange(subpackages): subpackages[i] = basename + name return subpackages def _thismodule(upcount=1, exc_info=sys.exc_info,trange=trange): """ Returns the module object that the callee is calling from. upcount can be given to indicate how far up the execution stack the function is supposed to look (1 == direct callee, 2 == callee of callee, etc.). """ try: 1/0 except: frame = exc_info()[2].tb_frame for i in trange(upcount): frame = frame.f_back name = frame.f_globals['__name__'] del frame return sys.modules[name] def _module_loader(name, locals, globals, sysmods, errors='strict', importer=__import__, reloader=reload, from_list=['*']): """ Internal API for loading a module """ if not sysmods.has_key(name): is_new = 1 else: is_new = 0 try: mod = importer(name, locals, globals, from_list) if reload and not is_new: mod = reloader(mod) except KeyboardInterrupt: # Pass through; SystemExit will be handled by the error handler raise except Exception, why: if errors == 'ignore': pass elif errors == 'strict': raise elif callable(errors): errors(name, sys.exc_info()[0], sys.exc_info()[1]) else: raise ValueError,'unknown errors value' else: return mod return None def import_modules(modnames,module=None,errors='strict',reload=0, thismodule=_thismodule): """ Import all modules given in modnames into module. module defaults to the caller's module. modnames may contain dotted package names. If errors is 'strict' (default), then ImportErrors and SyntaxErrors are raised. If set to 'ignore', they are silently ignored. If errors is a callable object, then it is called with arguments (modname, errorclass, errorvalue). If the handler returns, processing continues. If reload is true (default is false), all already modules among the list will be forced to reload. """ if module is None: module = _thismodule(2) locals = module.__dict__ sysmods = sys.modules for name in modnames: mod = _module_loader(name, locals, locals, sysmods, errors=errors) if mod is not None: locals[name] = mod def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0): """ Imports all modules in modnames using the given namespaces and returns list of corresponding module objects. If errors is 'strict' (default), then ImportErrors and SyntaxErrors are raised. If set to 'ignore', they are silently ignored. If errors is a callable object, then it is called with arguments (modname, errorclass, errorvalue). If the handler returns, processing continues. If reload is true (default is false), all already modules among the list will be forced to reload. """ modules = [] append = modules.append sysmods = sys.modules for name in modnames: mod = _module_loader(name, locals, globals, sysmods, errors=errors) if mod is not None: append(mod) return modules def import_subpackages(module, reload=0, recursive=0, import_modules=import_modules, find_subpackages=find_subpackages): """ Does a subpackages scan using find_subpackages(module) and then imports all submodules found into module. The module location is found by looking at the __file__ attribute that non-builtin modules define. The function uses the __all__ attribute from the package __init__ module if available. If reload is true (default is false), all already modules among the list will be forced to reload. """ import_modules(find_subpackages(module, recursive=recursive), module, reload=reload) def load_subpackages(module, locals=None, globals=None, errors='strict', reload=0, recursive=0, load_modules=load_modules, find_subpackages=find_subpackages): """ Same as import_subpackages but with load_modules functionality, i.e. imports the modules and also returns a list of module objects. If errors is 'strict' (default), then ImportErrors are raised. If set to 'ignore', they are silently ignored. If reload is true (default is false), all already modules among the list will be forced to reload. """ return load_modules(find_subpackages(module, recursive=recursive), locals, globals, errors=errors, reload=reload) def modules(names, extract=extract): """ Converts a list of module names into a list of module objects. The modules must already be loaded. """ return extract(sys.modules, names) def package_modules(pkgname): """ Returns a list of all modules belonging to the package with the given name. The package must already be loaded. Only the currently registered modules are included in the list. """ match = pkgname + '.' match_len = len(match) mods = [sys.modules[pkgname]] for k,v in sys.modules.items(): if k[:match_len] == match and v is not None: mods.append(v) return mods def find_classes(mods,baseclass=None,annotated=0, ClassType=types.ClassType,issubclass=issubclass): """ Find all subclasses of baseclass or simply all classes (if baseclass is None) defined by the module objects in list mods. If annotated is true the returned list will contain tuples (module_object,name,class_object) for each class found where module_object is the module where the class is defined. """ classes = [] for mod in mods: for name,obj in mod.__dict__.items(): if type(obj) is ClassType: if baseclass and not issubclass(obj,baseclass): continue if annotated: classes.append((mod, name, obj)) else: classes.append(obj) return classes def find_instances(mods,baseclass,annotated=0, InstanceType=types.InstanceType,issubclass=issubclass): """ Find all instances of baseclass defined by the module objects in list mods. If annotated is true the returned list will contain tuples (module_object,name,instances_object) for each instances found where module_object is the module where the instances is defined. """ instances = [] for mod in mods: for name,obj in mod.__dict__.items(): if isinstance(obj,baseclass): if annotated: instances.append((mod,name,obj)) else: instances.append(obj) return instances
2.25
2
Lib/test/test_urllib.py
Kshitijkrishnadas/haribol
4
1769
<filename>Lib/test/test_urllib.py """Regression tests for what was in Python 2's "urllib" module""" import urllib.parse import urllib.request import urllib.error import http.client import email.message import io import unittest from unittest.mock import patch from test import support import os try: import ssl except ImportError: ssl = None import sys import tempfile from nturl2path import url2pathname, pathname2url from base64 import b64encode import collections def hexescape(char): """Escape char as RFC 2396 specifies""" hex_repr = hex(ord(char))[2:].upper() if len(hex_repr) == 1: hex_repr = "0%s" % hex_repr return "%" + hex_repr # Shortcut for testing FancyURLopener _urlopener = None def urlopen(url, data=None, proxies=None): """urlopen(url [, data]) -> open file-like object""" global _urlopener if proxies is not None: opener = urllib.request.FancyURLopener(proxies=proxies) elif not _urlopener: opener = FancyURLopener() _urlopener = opener else: opener = _urlopener if data is None: return opener.open(url) else: return opener.open(url, data) def FancyURLopener(): with support.check_warnings( ('FancyURLopener style of invoking requests is deprecated.', DeprecationWarning)): return urllib.request.FancyURLopener() def fakehttp(fakedata, mock_close=False): class FakeSocket(io.BytesIO): io_refs = 1 def sendall(self, data): FakeHTTPConnection.buf = data def makefile(self, *args, **kwds): self.io_refs += 1 return self def read(self, amt=None): if self.closed: return b"" return io.BytesIO.read(self, amt) def readline(self, length=None): if self.closed: return b"" return io.BytesIO.readline(self, length) def close(self): self.io_refs -= 1 if self.io_refs == 0: io.BytesIO.close(self) class FakeHTTPConnection(http.client.HTTPConnection): # buffer to store data for verification in urlopen tests. buf = None def connect(self): self.sock = FakeSocket(self.fakedata) type(self).fakesock = self.sock if mock_close: # bpo-36918: HTTPConnection destructor calls close() which calls # flush(). Problem: flush() calls self.fp.flush() which raises # "ValueError: I/O operation on closed file" which is logged as an # "Exception ignored in". Override close() to silence this error. def close(self): pass FakeHTTPConnection.fakedata = fakedata return FakeHTTPConnection class FakeHTTPMixin(object): def fakehttp(self, fakedata, mock_close=False): fake_http_class = fakehttp(fakedata, mock_close=mock_close) self._connection_class = http.client.HTTPConnection http.client.HTTPConnection = fake_http_class def unfakehttp(self): http.client.HTTPConnection = self._connection_class class FakeFTPMixin(object): def fakeftp(self): class FakeFtpWrapper(object): def __init__(self, user, passwd, host, port, dirs, timeout=None, persistent=True): pass def retrfile(self, file, type): return io.BytesIO(), 0 def close(self): pass self._ftpwrapper_class = urllib.request.ftpwrapper urllib.request.ftpwrapper = FakeFtpWrapper def unfakeftp(self): urllib.request.ftpwrapper = self._ftpwrapper_class class urlopen_FileTests(unittest.TestCase): """Test urlopen() opening a temporary file. Try to test as much functionality as possible so as to cut down on reliance on connecting to the Net for testing. """ def setUp(self): # Create a temp file to use for testing self.text = bytes("test_urllib: %s\n" % self.__class__.__name__, "ascii") f = open(support.TESTFN, 'wb') try: f.write(self.text) finally: f.close() self.pathname = support.TESTFN self.returned_obj = urlopen("file:%s" % self.pathname) def tearDown(self): """Shut down the open object""" self.returned_obj.close() os.remove(support.TESTFN) def test_interface(self): # Make sure object returned by urlopen() has the specified methods for attr in ("read", "readline", "readlines", "fileno", "close", "info", "geturl", "getcode", "__iter__"): self.assertTrue(hasattr(self.returned_obj, attr), "object returned by urlopen() lacks %s attribute" % attr) def test_read(self): self.assertEqual(self.text, self.returned_obj.read()) def test_readline(self): self.assertEqual(self.text, self.returned_obj.readline()) self.assertEqual(b'', self.returned_obj.readline(), "calling readline() after exhausting the file did not" " return an empty string") def test_readlines(self): lines_list = self.returned_obj.readlines() self.assertEqual(len(lines_list), 1, "readlines() returned the wrong number of lines") self.assertEqual(lines_list[0], self.text, "readlines() returned improper text") def test_fileno(self): file_num = self.returned_obj.fileno() self.assertIsInstance(file_num, int, "fileno() did not return an int") self.assertEqual(os.read(file_num, len(self.text)), self.text, "Reading on the file descriptor returned by fileno() " "did not return the expected text") def test_close(self): # Test close() by calling it here and then having it be called again # by the tearDown() method for the test self.returned_obj.close() def test_info(self): self.assertIsInstance(self.returned_obj.info(), email.message.Message) def test_geturl(self): self.assertEqual(self.returned_obj.geturl(), self.pathname) def test_getcode(self): self.assertIsNone(self.returned_obj.getcode()) def test_iter(self): # Test iterator # Don't need to count number of iterations since test would fail the # instant it returned anything beyond the first line from the # comparison. # Use the iterator in the usual implicit way to test for ticket #4608. for line in self.returned_obj: self.assertEqual(line, self.text) def test_relativelocalfile(self): self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname) class ProxyTests(unittest.TestCase): def setUp(self): # Records changes to env vars self.env = support.EnvironmentVarGuard() # Delete all proxy related env vars for k in list(os.environ): if 'proxy' in k.lower(): self.env.unset(k) def tearDown(self): # Restore all proxy related env vars self.env.__exit__() del self.env def test_getproxies_environment_keep_no_proxies(self): self.env.set('NO_PROXY', 'localhost') proxies = urllib.request.getproxies_environment() # getproxies_environment use lowered case truncated (no '_proxy') keys self.assertEqual('localhost', proxies['no']) # List of no_proxies with space. self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com')) self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888')) self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234')) def test_proxy_cgi_ignore(self): try: self.env.set('HTTP_PROXY', 'http://somewhere:3128') proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) self.env.set('REQUEST_METHOD', 'GET') proxies = urllib.request.getproxies_environment() self.assertNotIn('http', proxies) finally: self.env.unset('REQUEST_METHOD') self.env.unset('HTTP_PROXY') def test_proxy_bypass_environment_host_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t') self.assertTrue(bypass('localhost')) self.assertTrue(bypass('LocalHost')) # MixedCase self.assertTrue(bypass('LOCALHOST')) # UPPERCASE self.assertTrue(bypass('.localhost')) self.assertTrue(bypass('newdomain.com:1234')) self.assertTrue(bypass('.newdomain.com:1234')) self.assertTrue(bypass('foo.d.o.t')) # issue 29142 self.assertTrue(bypass('d.o.t')) self.assertTrue(bypass('anotherdomain.com:8888')) self.assertTrue(bypass('.anotherdomain.com:8888')) self.assertTrue(bypass('www.newdomain.com:1234')) self.assertFalse(bypass('prelocalhost')) self.assertFalse(bypass('newdomain.com')) # no port self.assertFalse(bypass('newdomain.com:1235')) # wrong port def test_proxy_bypass_environment_always_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', '*') self.assertTrue(bypass('newdomain.com')) self.assertTrue(bypass('newdomain.com:1234')) self.env.set('NO_PROXY', '*, anotherdomain.com') self.assertTrue(bypass('anotherdomain.com')) self.assertFalse(bypass('newdomain.com')) self.assertFalse(bypass('newdomain.com:1234')) def test_proxy_bypass_environment_newline(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertFalse(bypass('localhost\n')) self.assertFalse(bypass('anotherdomain.com:8888\n')) self.assertFalse(bypass('newdomain.com:1234\n')) class ProxyTests_withOrderedEnv(unittest.TestCase): def setUp(self): # We need to test conditions, where variable order _is_ significant self._saved_env = os.environ # Monkey patch os.environ, start with empty fake environment os.environ = collections.OrderedDict() def tearDown(self): os.environ = self._saved_env def test_getproxies_environment_prefer_lowercase(self): # Test lowercase preference with removal os.environ['no_proxy'] = '' os.environ['No_Proxy'] = 'localhost' self.assertFalse(urllib.request.proxy_bypass_environment('localhost')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) os.environ['http_proxy'] = '' os.environ['HTTP_PROXY'] = 'http://somewhere:3128' proxies = urllib.request.getproxies_environment() self.assertEqual({}, proxies) # Test lowercase preference of proxy bypass and correct matching including ports os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234' os.environ['No_Proxy'] = 'xyz.com' self.assertTrue(urllib.request.proxy_bypass_environment('localhost')) self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678')) self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234')) self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) # Test lowercase preference with replacement os.environ['http_proxy'] = 'http://somewhere:3128' os.environ['Http_Proxy'] = 'http://somewhereelse:3128' proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin): """Test urlopen() opening a fake http connection.""" def check_read(self, ver): self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!") try: fp = urlopen("http://python.org/") self.assertEqual(fp.readline(), b"Hello!") self.assertEqual(fp.readline(), b"") self.assertEqual(fp.geturl(), 'http://python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_url_fragment(self): # Issue #11703: geturl() omits fragments in the original URL. url = 'http://docs.python.org/library/urllib.html#OK' self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!") try: fp = urllib.request.urlopen(url) self.assertEqual(fp.geturl(), url) finally: self.unfakehttp() def test_willclose(self): self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!") try: resp = urlopen("http://www.python.org") self.assertTrue(resp.fp.will_close) finally: self.unfakehttp() @unittest.skipUnless(ssl, "ssl module required") def test_url_path_with_control_char_rejected(self): for char_no in list(range(0, 0x21)) + [0x7f]: char = chr(char_no) schemeless_url = f"//localhost:7777/test{char}/" self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.") try: # We explicitly test urllib.request.urlopen() instead of the top # level 'def urlopen()' function defined in this... (quite ugly) # test suite. They use different url opening codepaths. Plain # urlopen uses FancyURLOpener which goes via a codepath that # calls urllib.parse.quote() on the URL which makes all of the # above attempts at injection within the url _path_ safe. escaped_char_repr = repr(char).replace('\\', r'\\') InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, f"contain control.*{escaped_char_repr}"): urllib.request.urlopen(f"http:{schemeless_url}") with self.assertRaisesRegex( InvalidURL, f"contain control.*{escaped_char_repr}"): urllib.request.urlopen(f"https:{schemeless_url}") # This code path quotes the URL so there is no injection. resp = urlopen(f"http:{schemeless_url}") self.assertNotIn(char, resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, "ssl module required") def test_url_path_with_newline_header_injection_rejected(self): self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.") host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123" schemeless_url = "//" + host + ":8080/test/?test=a" try: # We explicitly test urllib.request.urlopen() instead of the top # level 'def urlopen()' function defined in this... (quite ugly) # test suite. They use different url opening codepaths. Plain # urlopen uses FancyURLOpener which goes via a codepath that # calls urllib.parse.quote() on the URL which makes all of the # above attempts at injection within the url _path_ safe. InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r"contain control.*\\r.*(found at least . .)"): urllib.request.urlopen(f"http:{schemeless_url}") with self.assertRaisesRegex(InvalidURL, r"contain control.*\\n"): urllib.request.urlopen(f"https:{schemeless_url}") # This code path quotes the URL so there is no injection. resp = urlopen(f"http:{schemeless_url}") self.assertNotIn(' ', resp.geturl()) self.assertNotIn('\r', resp.geturl()) self.assertNotIn('\n', resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, "ssl module required") def test_url_host_with_control_char_rejected(self): for char_no in list(range(0, 0x21)) + [0x7f]: char = chr(char_no) schemeless_url = f"//localhost{char}/test/" self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.") try: escaped_char_repr = repr(char).replace('\\', r'\\') InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, f"contain control.*{escaped_char_repr}"): urlopen(f"http:{schemeless_url}") with self.assertRaisesRegex(InvalidURL, f"contain control.*{escaped_char_repr}"): urlopen(f"https:{schemeless_url}") finally: self.unfakehttp() @unittest.skipUnless(ssl, "ssl module required") def test_url_host_with_newline_header_injection_rejected(self): self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.") host = "localhost\r\nX-injected: header\r\n" schemeless_url = "//" + host + ":8080/test/?test=a" try: InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r"contain control.*\\r"): urlopen(f"http:{schemeless_url}") with self.assertRaisesRegex(InvalidURL, r"contain control.*\\n"): urlopen(f"https:{schemeless_url}") finally: self.unfakehttp() def test_read_0_9(self): # "0.9" response accepted (but not "simple responses" without # a status line) self.check_read(b"0.9") def test_read_1_0(self): self.check_read(b"1.0") def test_read_1_1(self): self.check_read(b"1.1") def test_read_bogus(self): # urlopen() should raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 401 Authentication Required Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try: self.assertRaises(OSError, urlopen, "http://python.org/") finally: self.unfakehttp() def test_invalid_redirect(self): # urlopen() should raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 302 Found Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Location: file://guidocomputer.athome.com:/python/license Connection: close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try: msg = "Redirection to url 'file:" with self.assertRaisesRegex(urllib.error.HTTPError, msg): urlopen("http://python.org/") finally: self.unfakehttp() def test_redirect_limit_independent(self): # Ticket #12923: make sure independent requests each use their # own retry limit. for i in range(FancyURLopener().maxtries): self.fakehttp(b'''HTTP/1.1 302 Found Location: file://guidocomputer.athome.com:/python/license Connection: close ''', mock_close=True) try: self.assertRaises(urllib.error.HTTPError, urlopen, "http://something") finally: self.unfakehttp() def test_empty_socket(self): # urlopen() raises OSError if the underlying socket does not send any # data. (#1680230) self.fakehttp(b'') try: self.assertRaises(OSError, urlopen, "http://something") finally: self.unfakehttp() def test_missing_localfile(self): # Test for #10836 with self.assertRaises(urllib.error.URLError) as e: urlopen('file://localhost/a/file/which/doesnot/exists.py') self.assertTrue(e.exception.filename) self.assertTrue(e.exception.reason) def test_file_notexists(self): fd, tmp_file = tempfile.mkstemp() tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/') try: self.assertTrue(os.path.exists(tmp_file)) with urlopen(tmp_fileurl) as fobj: self.assertTrue(fobj) finally: os.close(fd) os.unlink(tmp_file) self.assertFalse(os.path.exists(tmp_file)) with self.assertRaises(urllib.error.URLError): urlopen(tmp_fileurl) def test_ftp_nohost(self): test_ftp_url = 'ftp:///path' with self.assertRaises(urllib.error.URLError) as e: urlopen(test_ftp_url) self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) def test_ftp_nonexisting(self): with self.assertRaises(urllib.error.URLError) as e: urlopen('ftp://localhost/a/file/which/doesnot/exists.py') self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) @patch.object(urllib.request, 'MAXFTPCACHE', 0) def test_ftp_cache_pruning(self): self.fakeftp() try: urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, []) urlopen('ftp://localhost') finally: self.unfakeftp() def test_userpass_inurl(self): self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!") try: fp = urlopen("http://user:[email protected]/") self.assertEqual(fp.readline(), b"Hello!") self.assertEqual(fp.readline(), b"") self.assertEqual(fp.geturl(), 'http://user:[email protected]/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_userpass_inurl_w_spaces(self): self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!") try: userpass = "<PASSWORD>" url = "http://{}@python.org/".format(userpass) fakehttp_wrapper = http.client.HTTPConnection authorization = ("Authorization: Basic %s\r\n" % b64encode(userpass.encode("ASCII")).decode("ASCII")) fp = urlopen(url) # The authorization header must be in place self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8")) self.assertEqual(fp.readline(), b"Hello!") self.assertEqual(fp.readline(), b"") # the spaces are quoted in URL so no match self.assertNotEqual(fp.geturl(), url) self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_URLopener_deprecation(self): with support.check_warnings(('',DeprecationWarning)): urllib.request.URLopener() @unittest.skipUnless(ssl, "ssl module required") def test_cafile_and_context(self): context = ssl.create_default_context() with support.check_warnings(('', DeprecationWarning)): with self.assertRaises(ValueError): urllib.request.urlopen( "https://localhost", cafile="/nonexistent/path", context=context ) class urlopen_DataTests(unittest.TestCase): """Test urlopen() opening a data URL.""" def setUp(self): # text containing URL special- and unicode-characters self.text = "test data URLs :;,%=& \u00f6 \u00c4 " # 2x1 pixel RGB PNG image with one black and one white pixel self.image = ( b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00' b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae' b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00' b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82') self.text_url = ( "data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3" "D%26%20%C3%B6%20%C3%84%20") self.text_url_base64 = ( "data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs" "sJT0mIPYgxCA%3D") # base64 encoded data URL that contains ignorable spaces, # such as "\n", " ", "%0A", and "%20". self.image_url = ( "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n" "QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 " "vHgAAAABJRU5ErkJggg%3D%3D%0A%20") self.text_url_resp = urllib.request.urlopen(self.text_url) self.text_url_base64_resp = urllib.request.urlopen( self.text_url_base64) self.image_url_resp = urllib.request.urlopen(self.image_url) def test_interface(self): # Make sure object returned by urlopen() has the specified methods for attr in ("read", "readline", "readlines", "close", "info", "geturl", "getcode", "__iter__"): self.assertTrue(hasattr(self.text_url_resp, attr), "object returned by urlopen() lacks %s attribute" % attr) def test_info(self): self.assertIsInstance(self.text_url_resp.info(), email.message.Message) self.assertEqual(self.text_url_base64_resp.info().get_params(), [('text/plain', ''), ('charset', 'ISO-8859-1')]) self.assertEqual(self.image_url_resp.info()['content-length'], str(len(self.image))) self.assertEqual(urllib.request.urlopen("data:,").info().get_params(), [('text/plain', ''), ('charset', 'US-ASCII')]) def test_geturl(self): self.assertEqual(self.text_url_resp.geturl(), self.text_url) self.assertEqual(self.text_url_base64_resp.geturl(), self.text_url_base64) self.assertEqual(self.image_url_resp.geturl(), self.image_url) def test_read_text(self): self.assertEqual(self.text_url_resp.read().decode( dict(self.text_url_resp.info().get_params())['charset']), self.text) def test_read_text_base64(self): self.assertEqual(self.text_url_base64_resp.read().decode( dict(self.text_url_base64_resp.info().get_params())['charset']), self.text) def test_read_image(self): self.assertEqual(self.image_url_resp.read(), self.image) def test_missing_comma(self): self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain') def test_invalid_base64_data(self): # missing padding character self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=') class urlretrieve_FileTests(unittest.TestCase): """Test urllib.urlretrieve() on local files""" def setUp(self): # Create a list of temporary files. Each item in the list is a file # name (absolute path or relative to the current working directory). # All files in this list will be deleted in the tearDown method. Note, # this only helps to makes sure temporary files get deleted, but it # does nothing about trying to close files that may still be open. It # is the responsibility of the developer to properly close files even # when exceptional conditions occur. self.tempFiles = [] # Create a temporary file. self.registerFileForCleanUp(support.TESTFN) self.text = b'testing urllib.urlretrieve' try: FILE = open(support.TESTFN, 'wb') FILE.write(self.text) FILE.close() finally: try: FILE.close() except: pass def tearDown(self): # Delete the temporary files. for each in self.tempFiles: try: os.remove(each) except: pass def constructLocalFileUrl(self, filePath): filePath = os.path.abspath(filePath) try: filePath.encode("utf-8") except UnicodeEncodeError: raise unittest.SkipTest("filePath is not encodable to utf8") return "file://%s" % urllib.request.pathname2url(filePath) def createNewTempFile(self, data=b""): """Creates a new temporary file containing the specified data, registers the file for deletion during the test fixture tear down, and returns the absolute path of the file.""" newFd, newFilePath = tempfile.mkstemp() try: self.registerFileForCleanUp(newFilePath) newFile = os.fdopen(newFd, "wb") newFile.write(data) newFile.close() finally: try: newFile.close() except: pass return newFilePath def registerFileForCleanUp(self, fileName): self.tempFiles.append(fileName) def test_basic(self): # Make sure that a local file just gets its own location returned and # a headers value is returned. result = urllib.request.urlretrieve("file:%s" % support.TESTFN) self.assertEqual(result[0], support.TESTFN) self.assertIsInstance(result[1], email.message.Message, "did not get an email.message.Message instance " "as second returned value") def test_copy(self): # Test that setting the filename argument works. second_temp = "%s.2" % support.TESTFN self.registerFileForCleanUp(second_temp) result = urllib.request.urlretrieve(self.constructLocalFileUrl( support.TESTFN), second_temp) self.assertEqual(second_temp, result[0]) self.assertTrue(os.path.exists(second_temp), "copy of the file was not " "made") FILE = open(second_temp, 'rb') try: text = FILE.read() FILE.close() finally: try: FILE.close() except: pass self.assertEqual(self.text, text) def test_reporthook(self): # Make sure that the reporthook works. def hooktester(block_count, block_read_size, file_size, count_holder=[0]): self.assertIsInstance(block_count, int) self.assertIsInstance(block_read_size, int) self.assertIsInstance(file_size, int) self.assertEqual(block_count, count_holder[0]) count_holder[0] = count_holder[0] + 1 second_temp = "%s.2" % support.TESTFN self.registerFileForCleanUp(second_temp) urllib.request.urlretrieve( self.constructLocalFileUrl(support.TESTFN), second_temp, hooktester) def test_reporthook_0_bytes(self): # Test on zero length file. Should call reporthook only 1 time. report = [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile() urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 1) self.assertEqual(report[0][2], 0) def test_reporthook_5_bytes(self): # Test on 5 byte file. Should call reporthook only 2 times (once when # the "network connection" is established and once when the block is # read). report = [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b"x" * 5) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 2) self.assertEqual(report[0][2], 5) self.assertEqual(report[1][2], 5) def test_reporthook_8193_bytes(self): # Test on 8193 byte file. Should call reporthook only 3 times (once # when the "network connection" is established, once for the next 8192 # bytes, and once for the last byte). report = [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b"x" * 8193) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 3) self.assertEqual(report[0][2], 8193) self.assertEqual(report[0][1], 8192) self.assertEqual(report[1][1], 8192) self.assertEqual(report[2][1], 8192) class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin): """Test urllib.urlretrieve() using fake http connections""" def test_short_content_raises_ContentTooShortError(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') def _reporthook(par1, par2, par3): pass with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL, reporthook=_reporthook) finally: self.unfakehttp() def test_short_content_raises_ContentTooShortError_without_reporthook(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL) finally: self.unfakehttp() class QuotingTests(unittest.TestCase): r"""Tests for urllib.quote() and urllib.quote_plus() According to RFC 3986 (Uniform Resource Identifiers), to escape a character you write it as '%' + <2 character US-ASCII hex value>. The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly. Case does not matter on the hex letters. The various character sets specified are: Reserved characters : ";/?:@&=+$," Have special meaning in URIs and must be escaped if not being used for their special meaning Data characters : letters, digits, and "-_.!~*'()" Unreserved and do not need to be escaped; can be, though, if desired Control characters : 0x00 - 0x1F, 0x7F Have no use in URIs so must be escaped space : 0x20 Must be escaped Delimiters : '<>#%"' Must be escaped Unwise : "{}|\^[]`" Must be escaped """ def test_never_quote(self): # Make sure quote() does not quote letters, digits, and "_,.-" do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz", "0123456789", "_.-~"]) result = urllib.parse.quote(do_not_quote) self.assertEqual(do_not_quote, result, "using quote(): %r != %r" % (do_not_quote, result)) result = urllib.parse.quote_plus(do_not_quote) self.assertEqual(do_not_quote, result, "using quote_plus(): %r != %r" % (do_not_quote, result)) def test_default_safe(self): # Test '/' is default value for 'safe' parameter self.assertEqual(urllib.parse.quote.__defaults__[0], '/') def test_safe(self): # Test setting 'safe' parameter does what it should do quote_by_default = "<>" result = urllib.parse.quote(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, "using quote(): %r != %r" % (quote_by_default, result)) result = urllib.parse.quote_plus(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, "using quote_plus(): %r != %r" % (quote_by_default, result)) # Safe expressed as bytes rather than str result = urllib.parse.quote(quote_by_default, safe=b"<>") self.assertEqual(quote_by_default, result, "using quote(): %r != %r" % (quote_by_default, result)) # "Safe" non-ASCII characters should have no effect # (Since URIs are not allowed to have non-ASCII characters) result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc") expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Same as above, but using a bytes rather than str result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc") expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) def test_default_quoting(self): # Make sure all characters that should be quoted are by default sans # space (separate test for that). should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F should_quote.append(r'<>#%"{}|\^[]`') should_quote.append(chr(127)) # For 0x7F should_quote = ''.join(should_quote) for char in should_quote: result = urllib.parse.quote(char) self.assertEqual(hexescape(char), result, "using quote(): " "%s should be escaped to %s, not %s" % (char, hexescape(char), result)) result = urllib.parse.quote_plus(char) self.assertEqual(hexescape(char), result, "using quote_plus(): " "%s should be escapes to %s, not %s" % (char, hexescape(char), result)) del should_quote partial_quote = "ab[]cd" expected = "ab%5B%5Dcd" result = urllib.parse.quote(partial_quote) self.assertEqual(expected, result, "using quote(): %r != %r" % (expected, result)) result = urllib.parse.quote_plus(partial_quote) self.assertEqual(expected, result, "using quote_plus(): %r != %r" % (expected, result)) def test_quoting_space(self): # Make sure quote() and quote_plus() handle spaces as specified in # their unique way result = urllib.parse.quote(' ') self.assertEqual(result, hexescape(' '), "using quote(): %r != %r" % (result, hexescape(' '))) result = urllib.parse.quote_plus(' ') self.assertEqual(result, '+', "using quote_plus(): %r != +" % result) given = "a b cd e f" expect = given.replace(' ', hexescape(' ')) result = urllib.parse.quote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) expect = given.replace(' ', '+') result = urllib.parse.quote_plus(given) self.assertEqual(expect, result, "using quote_plus(): %r != %r" % (expect, result)) def test_quoting_plus(self): self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'), 'alpha%2Bbeta+gamma') self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'), 'alpha+beta+gamma') # Test with bytes self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'), 'alpha%2Bbeta+gamma') # Test with safe bytes self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'), 'alpha+beta+gamma') def test_quote_bytes(self): # Bytes should quote directly to percent-encoded values given = b"\xa2\xd8ab\xff" expect = "%A2%D8ab%FF" result = urllib.parse.quote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Encoding argument should raise type error on bytes input self.assertRaises(TypeError, urllib.parse.quote, given, encoding="latin-1") # quote_from_bytes should work the same result = urllib.parse.quote_from_bytes(given) self.assertEqual(expect, result, "using quote_from_bytes(): %r != %r" % (expect, result)) def test_quote_with_unicode(self): # Characters in Latin-1 range, encoded by default in UTF-8 given = "\xa2\xd8ab\xff" expect = "%C2%A2%C3%98ab%C3%BF" result = urllib.parse.quote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in Latin-1 range, encoded by with None (default) result = urllib.parse.quote(given, encoding=None, errors=None) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in Latin-1 range, encoded with Latin-1 given = "\xa2\xd8ab\xff" expect = "%A2%D8ab%FF" result = urllib.parse.quote(given, encoding="latin-1") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in BMP, encoded by default in UTF-8 given = "\u6f22\u5b57" # "Kanji" expect = "%E6%BC%A2%E5%AD%97" result = urllib.parse.quote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in BMP, encoded with Latin-1 given = "\u6f22\u5b57" self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given, encoding="latin-1") # Characters in BMP, encoded with Latin-1, with replace error handling given = "\u6f22\u5b57" expect = "%3F%3F" # "??" result = urllib.parse.quote(given, encoding="latin-1", errors="replace") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) # Characters in BMP, Latin-1, with xmlcharref error handling given = "\u6f22\u5b57" expect = "%26%2328450%3B%26%2323383%3B" # "&#28450;&#23383;" result = urllib.parse.quote(given, encoding="latin-1", errors="xmlcharrefreplace") self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) def test_quote_plus_with_unicode(self): # Encoding (latin-1) test for quote_plus given = "\xa2\xd8 \xff" expect = "%A2%D8+%FF" result = urllib.parse.quote_plus(given, encoding="latin-1") self.assertEqual(expect, result, "using quote_plus(): %r != %r" % (expect, result)) # Errors test for quote_plus given = "ab\u6f22\u5b57 cd" expect = "ab%3F%3F+cd" result = urllib.parse.quote_plus(given, encoding="latin-1", errors="replace") self.assertEqual(expect, result, "using quote_plus(): %r != %r" % (expect, result)) class UnquotingTests(unittest.TestCase): """Tests for unquote() and unquote_plus() See the doc string for quoting_Tests for details on quoting and such. """ def test_unquoting(self): # Make sure unquoting of all ASCII values works escape_list = [] for num in range(128): given = hexescape(chr(num)) expect = chr(num) result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %r != %r" % (expect, result)) escape_list.append(given) escape_string = ''.join(escape_list) del escape_list result = urllib.parse.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using unquote(): not all characters escaped: " "%s" % result) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ()) with support.check_warnings(('', BytesWarning), quiet=True): self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'') def test_unquoting_badpercent(self): # Test unquoting on bad percent-escapes given = '%xab' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) given = '%x' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) given = '%' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # unquote_to_bytes given = '%xab' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) given = '%x' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) given = '%' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ()) def test_unquoting_mixed_case(self): # Test unquoting on mixed-case hex digits in the percent-escapes given = '%Ab%eA' expect = b'\xab\xea' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) def test_unquoting_parts(self): # Make sure unquoting works when have non-quoted characters # interspersed given = 'ab%sd' % hexescape('c') expect = "abcd" result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using quote(): %r != %r" % (expect, result)) result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %r != %r" % (expect, result)) def test_unquoting_plus(self): # Test difference between unquote() and unquote_plus() given = "are+there+spaces..." expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) expect = given.replace('+', ' ') result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %r != %r" % (expect, result)) def test_unquote_to_bytes(self): given = 'br%C3%BCckner_sapporo_20050930.doc' expect = b'br\xc3\xbcckner_sapporo_20050930.doc' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) # Test on a string with unescaped non-ASCII characters # (Technically an invalid URI; expect those characters to be UTF-8 # encoded). result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC") expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc" self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) # Test with a bytes as input given = b'%A2%D8ab%FF' expect = b'\xa2\xd8ab\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) # Test with a bytes as input, with unescaped non-ASCII bytes # (Technically an invalid URI; expect those bytes to be preserved) given = b'%A2\xd8ab%FF' expect = b'\xa2\xd8ab\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r" % (expect, result)) def test_unquote_with_unicode(self): # Characters in the Latin-1 range, encoded with UTF-8 given = 'br%C3%BCckner_sapporo_20050930.doc' expect = 'br\u00fcckner_sapporo_20050930.doc' result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Characters in the Latin-1 range, encoded with None (default) result = urllib.parse.unquote(given, encoding=None, errors=None) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Characters in the Latin-1 range, encoded with Latin-1 result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', encoding="latin-1") expect = 'br\u00fcckner_sapporo_20050930.doc' self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Characters in BMP, encoded with UTF-8 given = "%E6%BC%A2%E5%AD%97" expect = "\u6f22\u5b57" # "Kanji" result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Decode with UTF-8, invalid sequence given = "%F3%B1" expect = "\ufffd" # Replacement character result = urllib.parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Decode with UTF-8, invalid sequence, replace errors result = urllib.parse.unquote(given, errors="replace") self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # Decode with UTF-8, invalid sequence, ignoring errors given = "%F3%B1" expect = "" result = urllib.parse.unquote(given, errors="ignore") self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # A mix of non-ASCII and percent-encoded characters, UTF-8 result = urllib.parse.unquote("\u6f22%C3%BC") expect = '\u6f22\u00fc' self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) # A mix of non-ASCII and percent-encoded characters, Latin-1 # (Note, the string contains non-Latin-1-representable characters) result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1") expect = '\u6f22\u00fc' self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) def test_unquoting_with_bytes_input(self): # Bytes not supported yet with self.assertRaisesRegex(TypeError, 'Expected str, got bytes'): given = b'bl\xc3\xa5b\xc3\xa6rsyltet\xc3\xb8y' urllib.parse.unquote(given) class urlencode_Tests(unittest.TestCase): """Tests for urlencode()""" def help_inputtype(self, given, test_type): """Helper method for testing different input types. 'given' must lead to only the pairs: * 1st, 1 * 2nd, 2 * 3rd, 3 Test cannot assume anything about order. Docs make no guarantee and have possible dictionary input. """ expect_somewhere = ["1st=1", "2nd=2", "3rd=3"] result = urllib.parse.urlencode(given) for expected in expect_somewhere: self.assertIn(expected, result, "testing %s: %s not found in %s" % (test_type, expected, result)) self.assertEqual(result.count('&'), 2, "testing %s: expected 2 '&'s; got %s" % (test_type, result.count('&'))) amp_location = result.index('&') on_amp_left = result[amp_location - 1] on_amp_right = result[amp_location + 1] self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(), "testing %s: '&' not located in proper place in %s" % (test_type, result)) self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps "testing %s: " "unexpected number of characters: %s != %s" % (test_type, len(result), (5 * 3) + 2)) def test_using_mapping(self): # Test passing in a mapping object as an argument. self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'}, "using dict as input type") def test_using_sequence(self): # Test passing in a sequence of two-item sequences as an argument. self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')], "using sequence of two-item tuples as input") def test_quoting(self): # Make sure keys and values are quoted using quote_plus() given = {"&":"="} expect = "%s=%s" % (hexescape('&'), hexescape('=')) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) given = {"key name":"A bunch of pluses"} expect = "key+name=A+bunch+of+pluses" result = urllib.parse.urlencode(given) self.assertEqual(expect, result) def test_doseq(self): # Test that passing True for 'doseq' parameter works correctly given = {'sequence':['1', '2', '3']} expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3'])) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) for value in given["sequence"]: expect = "sequence=%s" % value self.assertIn(expect, result) self.assertEqual(result.count('&'), 2, "Expected 2 '&'s, got %s" % result.count('&')) def test_empty_sequence(self): self.assertEqual("", urllib.parse.urlencode({})) self.assertEqual("", urllib.parse.urlencode([])) def test_nonstring_values(self): self.assertEqual("a=1", urllib.parse.urlencode({"a": 1})) self.assertEqual("a=None", urllib.parse.urlencode({"a": None})) def test_nonstring_seq_values(self): self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True)) self.assertEqual("a=None&a=a", urllib.parse.urlencode({"a": [None, "a"]}, True)) data = collections.OrderedDict([("a", 1), ("b", 1)]) self.assertEqual("a=a&a=b", urllib.parse.urlencode({"a": data}, True)) def test_urlencode_encoding(self): # ASCII encoding. Expect %3F with errors="replace' given = (('\u00a0', '\u00c1'),) expect = '%3F=%3F' result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace") self.assertEqual(expect, result) # Default is UTF-8 encoding. given = (('\u00a0', '\u00c1'),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) # Latin-1 encoding. given = (('\u00a0', '\u00c1'),) expect = '%A0=%C1' result = urllib.parse.urlencode(given, encoding="latin-1") self.assertEqual(expect, result) def test_urlencode_encoding_doseq(self): # ASCII Encoding. Expect %3F with errors="replace' given = (('\u00a0', '\u00c1'),) expect = '%3F=%3F' result = urllib.parse.urlencode(given, doseq=True, encoding="ASCII", errors="replace") self.assertEqual(expect, result) # ASCII Encoding. On a sequence of values. given = (("\u00a0", (1, "\u00c1")),) expect = '%3F=1&%3F=%3F' result = urllib.parse.urlencode(given, True, encoding="ASCII", errors="replace") self.assertEqual(expect, result) # Utf-8 given = (("\u00a0", "\u00c1"),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) given = (("\u00a0", (42, "\u00c1")),) expect = '%C2%A0=42&%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # latin-1 given = (("\u00a0", "\u00c1"),) expect = '%A0=%C1' result = urllib.parse.urlencode(given, True, encoding="latin-1") self.assertEqual(expect, result) given = (("\u00a0", (42, "\u00c1")),) expect = '%A0=42&%A0=%C1' result = urllib.parse.urlencode(given, True, encoding="latin-1") self.assertEqual(expect, result) def test_urlencode_bytes(self): given = ((b'\xa0\x24', b'\xc1\x24'),) expect = '%A0%24=%C1%24' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # Sequence of values given = ((b'\xa0\x24', (42, b'\xc1\x24')),) expect = '%A0%24=42&%A0%24=%C1%24' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) def test_urlencode_encoding_safe_parameter(self): # Send '$' (\x24) as safe character # Default utf-8 encoding given = ((b'\xa0\x24', b'\xc1\x24'),) result = urllib.parse.urlencode(given, safe=":$") expect = '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\xa0\x24', b'\xc1\x24'),) result = urllib.parse.urlencode(given, doseq=True, safe=":$") expect = '%A0$=%C1$' self.assertEqual(expect, result) # Safe parameter in sequence given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=":$") self.assertEqual(expect, result) # Test all above in latin-1 encoding given = ((b'\xa0\x24', b'\xc1\x24'),) result = urllib.parse.urlencode(given, safe=":$", encoding="latin-1") expect = '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\xa0\x24', b'\xc1\x24'),) expect = '%A0$=%C1$' result = urllib.parse.urlencode(given, doseq=True, safe=":$", encoding="latin-1") given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=":$", encoding="latin-1") self.assertEqual(expect, result) class Pathname_Tests(unittest.TestCase): """Test pathname2url() and url2pathname()""" def test_basic(self): # Make sure simple tests pass expected_path = os.path.join("parts", "of", "a", "path") expected_url = "parts/of/a/path" result = urllib.request.pathname2url(expected_path) self.assertEqual(expected_url, result, "pathname2url() failed; %s != %s" % (result, expected_url)) result = urllib.request.url2pathname(expected_url) self.assertEqual(expected_path, result, "url2pathame() failed; %s != %s" % (result, expected_path)) def test_quoting(self): # Test automatic quoting and unquoting works for pathnam2url() and # url2pathname() respectively given = os.path.join("needs", "quot=ing", "here") expect = "needs/%s/here" % urllib.parse.quote("quot=ing") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, "pathname2url() failed; %s != %s" % (expect, result)) expect = given result = urllib.request.url2pathname(result) self.assertEqual(expect, result, "url2pathname() failed; %s != %s" % (expect, result)) given = os.path.join("make sure", "using_quote") expect = "%s/using_quote" % urllib.parse.quote("make sure") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, "pathname2url() failed; %s != %s" % (expect, result)) given = "make+sure/using_unquote" expect = os.path.join("make+sure", "using_unquote") result = urllib.request.url2pathname(given) self.assertEqual(expect, result, "url2pathname() failed; %s != %s" % (expect, result)) @unittest.skipUnless(sys.platform == 'win32', 'test specific to the urllib.url2path function.') def test_ntpath(self): given = ('/C:/', '///C:/', '/C|//') expect = 'C:\\' for url in given: result = urllib.request.url2pathname(url) self.assertEqual(expect, result, 'urllib.request..url2pathname() failed; %s != %s' % (expect, result)) given = '///C|/path' expect = 'C:\\path' result = urllib.request.url2pathname(given) self.assertEqual(expect, result, 'urllib.request.url2pathname() failed; %s != %s' % (expect, result)) class Utility_Tests(unittest.TestCase): """Testcase to test the various utility functions in the urllib.""" def test_thishost(self): """Test the urllib.request.thishost utility function returns a tuple""" self.assertIsInstance(urllib.request.thishost(), tuple) class URLopener_Tests(FakeHTTPMixin, unittest.TestCase): """Testcase to test the open method of URLopener class.""" def test_quoted_open(self): class DummyURLopener(urllib.request.URLopener): def open_spam(self, url): return url with support.check_warnings( ('DummyURLopener style of invoking requests is deprecated.', DeprecationWarning)): self.assertEqual(DummyURLopener().open( 'spam://example/ /'),'//example/%20/') # test the safe characters are not quoted by urlopen self.assertEqual(DummyURLopener().open( "spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"), "//c:|windows%/:=&?~#+!$,;'@()*[]|/path/") @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_file(self): with support.temp_dir() as tmpdir: fd, tmpfile = tempfile.mkstemp(dir=tmpdir) os.close(fd) fileurl = "file:" + urllib.request.pathname2url(tmpfile) filename, _ = urllib.request.URLopener().retrieve(fileurl) # Some buildbots have TEMP folder that uses a lowercase drive letter. self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile)) @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_remote(self): url = "http://www.python.org/file.txt" self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!") self.addCleanup(self.unfakehttp) filename, _ = urllib.request.URLopener().retrieve(url) self.assertEqual(os.path.splitext(filename)[1], ".txt") @support.ignore_warnings(category=DeprecationWarning) def test_local_file_open(self): # bpo-35907, CVE-2019-9948: urllib must reject local_file:// scheme class DummyURLopener(urllib.request.URLopener): def open_local_file(self, url): return url for url in ('local_file://example', 'local-file://example'): self.assertRaises(OSError, urllib.request.urlopen, url) self.assertRaises(OSError, urllib.request.URLopener().open, url) self.assertRaises(OSError, urllib.request.URLopener().retrieve, url) self.assertRaises(OSError, DummyURLopener().open, url) self.assertRaises(OSError, DummyURLopener().retrieve, url) # Just commented them out. # Can't really tell why keep failing in windows and sparc. # Everywhere else they work ok, but on those machines, sometimes # fail in one of the tests, sometimes in other. I have a linux, and # the tests go ok. # If anybody has one of the problematic environments, please help! # . Facundo # # def server(evt): # import socket, time # serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # serv.settimeout(3) # serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # serv.bind(("", 9093)) # serv.listen() # try: # conn, addr = serv.accept() # conn.send("1 Hola mundo\n") # cantdata = 0 # while cantdata < 13: # data = conn.recv(13-cantdata) # cantdata += len(data) # time.sleep(.3) # conn.send("2 No more lines\n") # conn.close() # except socket.timeout: # pass # finally: # serv.close() # evt.set() # # class FTPWrapperTests(unittest.TestCase): # # def setUp(self): # import ftplib, time, threading # ftplib.FTP.port = 9093 # self.evt = threading.Event() # threading.Thread(target=server, args=(self.evt,)).start() # time.sleep(.1) # # def tearDown(self): # self.evt.wait() # # def testBasic(self): # # connects # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) # ftp.close() # # def testTimeoutNone(self): # # global default timeout is ignored # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try: # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) # finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutDefault(self): # # global default timeout is used # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try: # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) # finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutValue(self): # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [], # timeout=30) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() class RequestTests(unittest.TestCase): """Unit tests for urllib.request.Request.""" def test_default_values(self): Request = urllib.request.Request request = Request("http://www.python.org") self.assertEqual(request.get_method(), 'GET') request = Request("http://www.python.org", {}) self.assertEqual(request.get_method(), 'POST') def test_with_method_arg(self): Request = urllib.request.Request request = Request("http://www.python.org", method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request("http://www.python.org", {}, method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request("http://www.python.org", method='GET') self.assertEqual(request.get_method(), 'GET') request.method = 'HEAD' self.assertEqual(request.get_method(), 'HEAD') class URL2PathNameTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(url2pathname("///C|"), 'C:') self.assertEqual(url2pathname("///C:"), 'C:') self.assertEqual(url2pathname("///C|/"), 'C:\\') def test_converting_when_no_drive_letter(self): # cannot end a raw string in \ self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\') self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\') def test_simple_compare(self): self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"), r'C:\foo\bar\spam.foo') def test_non_ascii_drive_letter(self): self.assertRaises(IOError, url2pathname, "///\u00e8|/") def test_roundtrip_url2pathname(self): list_of_paths = ['C:', r'\\\C\test\\', r'C:\foo\bar\spam.foo' ] for path in list_of_paths: self.assertEqual(url2pathname(pathname2url(path)), path) class PathName2URLTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(pathname2url("C:"), '///C:') self.assertEqual(pathname2url("C:\\"), '///C:') def test_converting_when_no_drive_letter(self): self.assertEqual(pathname2url(r"\\\folder\test" "\\"), '/////folder/test/') self.assertEqual(pathname2url(r"\\folder\test" "\\"), '////folder/test/') self.assertEqual(pathname2url(r"\folder\test" "\\"), '/folder/test/') def test_simple_compare(self): self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'), "///C:/foo/bar/spam.foo" ) def test_long_drive_letter(self): self.assertRaises(IOError, pathname2url, "XX:\\") def test_roundtrip_pathname2url(self): list_of_paths = ['///C:', '/////folder/test/', '///C:/foo/bar/spam.foo'] for path in list_of_paths: self.assertEqual(pathname2url(url2pathname(path)), path) if __name__ == '__main__': unittest.main()
2.84375
3
gapipy/resources/tour/transport.py
wmak/gapipy
0
1770
# Python 2 and 3 from __future__ import unicode_literals from ...models import Address, SeasonalPriceBand from ..base import Product class Transport(Product): _resource_name = 'transports' _is_listable = False _as_is_fields = [ 'id', 'href', 'availability', 'name', 'product_line', 'sku', 'type', 'sub_type' ] _date_time_fields_utc = ['date_created', 'date_last_modified'] _model_fields = [('start_address', Address), ('finish_address', Address)] _model_collection_fields = [('price_bands', SeasonalPriceBand)]
1.84375
2
modules/dare.py
VeNoM-hubs/nyx
0
1771
from discord.ext import commands import json import random with open("assets/json/questions.json") as data: data = json.load(data) dares = data["dares"] class Dare(commands.Cog): def __init__(self, client): self.client = client @commands.command(aliases=["d"]) async def dare(self, ctx): dare = random.choice(dares) await ctx.send(dare) def setup(client): client.add_cog(Dare(client))
2.75
3
scripts/apic.py
nicmatth/APIC-EM-HelloWorldv3
0
1772
<filename>scripts/apic.py APIC_IP="sandboxapic.cisco.com" APIC_PORT="443" GROUP='group-xx'
1.3125
1
stella/test/external_func.py
squisher/stella
11
1773
<reponame>squisher/stella # Copyright 2013-2015 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from random import randint import mtpy from . import * # noqa def seed_const(): mtpy.mt_seed32new(42) def seed(s): mtpy.mt_seed32new(s) def drand_const(): mtpy.mt_seed32new(42) return mtpy.mt_drand() def drand(s): mtpy.mt_seed32new(s) return mtpy.mt_drand() + mtpy.mt_drand() @mark.parametrize('f', [seed_const, drand_const]) def test1(f): make_eq_test(f, ()) @mark.parametrize('arg', single_args([1, 2, 42, 1823828, randint(1, 10000000), randint(1, 10000000)])) @mark.parametrize('f', [seed, drand]) def test2(f, arg): make_eq_test(f, arg)
2.421875
2
szh_objects.py
ipqhjjybj/bitcoin_trend_strategy
4
1774
<gh_stars>1-10 # encoding: utf-8 import sys from market_maker import OrderManager from settings import * import os from pymongo import MongoClient, ASCENDING from pymongo.errors import ConnectionFailure from datetime import datetime , timedelta import numpy as np ######################################################################################################################## # constants EXCHANGE_BITMEX = "BITMEX" EMPTY_STRING = "" EMPTY_FLOAT = 0.0 EMPTY_INT = 0 #---------------------------------------------------------------------- class LoggerEngine(object): LogDir = "LogDir" #---------------------------------------------------------------------- def __init__(self, logName , in_debug = True , open_md = "w"): if os.path.exists(self.LogDir) == False: os.mkdir( self.LogDir ) self.logPath = os.path.join(self.LogDir , logName) self.now_debug = in_debug if self.now_debug: self.f = open( self.logPath , open_md) #---------------------------------------------------------------------- def error(self, msg , error_id): if self.now_debug: self.f.write(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " : " + "Error msg %s: %s " % (str(error_id) , msg) + "\n") self.f.flush() #---------------------------------------------------------------------- def info(self, msg): if self.now_debug: self.f.write(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " : " + msg + "\n") self.f.flush() #---------------------------------------------------------------------- def close(self): self.f.close() ''' tick 数据的格式 ''' class TickData(object): #---------------------------------------------------------------------- def __init__(self): """Constructor""" super(TickData, self).__init__() # 代码相关 self.symbol = EMPTY_STRING # 合约代码 self.exchange = EMPTY_STRING # 交易所代码 self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码 # 成交数据 self.lastPrice = EMPTY_FLOAT # 最新成交价 self.lastVolume = EMPTY_INT # 最新成交量 self.volume = EMPTY_INT # 今天总成交量 self.openInterest = EMPTY_INT # 持仓量 self.time = EMPTY_STRING # 时间 11:20:56.5 self.date = EMPTY_STRING # 日期 20151009 self.datetime = None # python的datetime时间对象 # 常规行情 self.openPrice = EMPTY_FLOAT # 今日开盘价 self.highPrice = EMPTY_FLOAT # 今日最高价 self.lowPrice = EMPTY_FLOAT # 今日最低价 self.preClosePrice = EMPTY_FLOAT self.upperLimit = EMPTY_FLOAT # 涨停价 self.lowerLimit = EMPTY_FLOAT # 跌停价 # 五档行情 self.bidPrice1 = EMPTY_FLOAT self.bidPrice2 = EMPTY_FLOAT self.bidPrice3 = EMPTY_FLOAT self.bidPrice4 = EMPTY_FLOAT self.bidPrice5 = EMPTY_FLOAT self.askPrice1 = EMPTY_FLOAT self.askPrice2 = EMPTY_FLOAT self.askPrice3 = EMPTY_FLOAT self.askPrice4 = EMPTY_FLOAT self.askPrice5 = EMPTY_FLOAT self.bidVolume1 = EMPTY_INT self.bidVolume2 = EMPTY_INT self.bidVolume3 = EMPTY_INT self.bidVolume4 = EMPTY_INT self.bidVolume5 = EMPTY_INT self.askVolume1 = EMPTY_INT self.askVolume2 = EMPTY_INT self.askVolume3 = EMPTY_INT self.askVolume4 = EMPTY_INT self.askVolume5 = EMPTY_INT ######################################################################## class BarData(object): """K线数据""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" super(BarData, self).__init__() self.vtSymbol = EMPTY_STRING # vt系统代码 self.symbol = EMPTY_STRING # 代码 self.exchange = EMPTY_STRING # 交易所 self.open = EMPTY_FLOAT # OHLC self.high = EMPTY_FLOAT self.low = EMPTY_FLOAT self.close = EMPTY_FLOAT self.date = EMPTY_STRING # bar开始的时间,日期 self.time = EMPTY_STRING # 时间 self.datetime = None # python的datetime时间对象 self.volume = EMPTY_INT # 成交量 self.openInterest = EMPTY_INT # 持仓量 ''' engine的基础类 ''' class EngineBase(object): #---------------------------------------------------------------------- def writeLog(self, content): if self.logger: self.logger.info(content) #---------------------------------------------------------------------- def writeError(self, content , error_id = 0): """ 发送错误通知/记录日志文件 :param content: :return: """ if self.logger: self.logger.error(content , error_id) ''' 主要Engine ''' class DataEngine(EngineBase): #---------------------------------------------------------------------- def __init__(self , _host = GLOBAL_MONGO_HOST , _port = GLOBAL_MONGO_PORT): super(DataEngine, self).__init__() self.host = _host self.port = _port # MongoDB数据库相关 self.dbClient = None # MongoDB客户端对象 self.logger = LoggerEngine("dataEngine.log") ## init the db self.dbConnect() #---------------------------------------------------------------------- def dbConnect(self): """连接MongoDB数据库""" if not self.dbClient: # 读取MongoDB的设置 try: # 设置MongoDB操作的超时时间为0.5秒 self.dbClient = MongoClient(self.host , self.port , connectTimeoutMS=500) # 调用server_info查询服务器状态,防止服务器异常并未连接成功 self.dbClient.server_info() self.writeLog(u'database connection error') except ConnectionFailure: self.writeLog( u'fail in db connection') #---------------------------------------------------------------------- def dbQuery(self, dbName, collectionName, d, sortKey='', sortDirection=ASCENDING): """从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针""" if self.dbClient: db = self.dbClient[dbName] collection = db[collectionName] if sortKey: cursor = collection.find(d).sort(sortKey, sortDirection) # 对查询出来的数据进行排序 else: cursor = collection.find(d) if cursor: return list(cursor) else: return [] else: self.writeLog(u'db query failed') return [] #----------------------------------------------------------------------- def loadBars( self, dbName = GLOBAL_USE_DBNAME , collectionName = GLOBAL_USE_SYMBOL, days = 2): today_datetime = datetime.now() start_datetime = today_datetime - timedelta( days = days) d = {'datetime':{'$gte':start_datetime , '$lte':today_datetime}} barData = self.dbQuery(dbName, collectionName, d, 'datetime') l = [] for d in barData: bar = BarData() bar.__dict__ = d l.append(bar) return l ######################################################################## class BarManager(object): """ K线合成器,支持: 1. 基于Tick合成1分钟K线 2. 基于1分钟K线合成X分钟K线(X可以是2、3、5、10、15、30、60) """ #---------------------------------------------------------------------- def __init__(self, onBar, xsec=0, onXsecBar=None , xmin=0 , xhour=0, onXminBar=None , onXhourBar = None, onDayBar=None): """Constructor""" self.bar = None # 1分钟K线对象 self.onBar = onBar # 1分钟K线回调函数 self.xsecBar = None # 多少秒K线对象 self.xsec = xsec # xsec的值 self.onXsecBar = onXsecBar # x秒的回调函数 self.xminBar = None # X分钟K线对象 self.xmin = xmin # X的值 self.onXminBar = onXminBar # X分钟K线的回调函数 self.xhourBar = None # x小时K线对象 self.xhour = xhour # x的值 self.onXhourBar = onXhourBar # x小时K线的回调函数 self.lastTick = None # 上一TICK缓存对象 self.lastSecondTick = None # 用于秒级别的上一根Tick缓存对象 self.dayBar = None # 一个交易日的bar对象 self.onDayBar = onDayBar # 交易日K线的回调函数 self.lastDayBar = None #---------------------------------------------------------------------- def updateTick(self, tick): """TICK更新""" newMinute = False # 默认不是新的一分钟 # 尚未创建对象 if not self.bar: self.bar = BarData() newMinute = True # 新的一分钟 elif self.bar.datetime.minute != tick.datetime.minute: # 生成上一分钟K线的时间戳 self.bar.datetime = self.bar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.bar.date = self.bar.datetime.strftime('%Y%m%d') self.bar.time = self.bar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上一分钟K线 self.onBar(self.bar) # 创建新的K线对象 self.bar = BarData() newMinute = True # 初始化新一分钟的K线数据 if newMinute: self.bar.vtSymbol = tick.vtSymbol self.bar.symbol = tick.symbol self.bar.exchange = tick.exchange self.bar.open = tick.lastPrice self.bar.high = tick.lastPrice self.bar.low = tick.lastPrice # 累加更新老一分钟的K线数据 else: self.bar.high = max(self.bar.high, tick.lastPrice) self.bar.low = min(self.bar.low, tick.lastPrice) # 通用更新部分 self.bar.close = tick.lastPrice self.bar.datetime = tick.datetime self.bar.openInterest = tick.openInterest if self.lastTick: self.bar.volume += (tick.volume - self.lastTick.volume) # 当前K线内的成交量 # 缓存Tick self.lastTick = tick #---------------------------------------------------------------------- def updateSecond(self, tick ): """通过TICK数据更新到秒数据""" newSecond = False if not self.xsecBar: self.xsecBar = BarData() newSecond = True elif self.xsecBar.datetime.second != tick.datetime.second and ( (tick.datetime.second) % self.xsec == 0 ): self.xsecBar.datetime = self.xsecBar.datetime.replace( microsecond=0) # 将秒和微秒设为0 self.xsecBar.date = self.xsecBar.datetime.strftime('%Y%m%d') self.xsecBar.time = self.xsecBar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上多少秒K线 self.onXsecBar(self.xsecBar) # 清空老K线缓存对象 self.xsecBar = BarData() newSecond = True # 初始化新多少秒的K线数据 if newSecond : self.xsecBar.datetime = tick.datetime self.xsecBar.vtSymbol = tick.vtSymbol self.xsecBar.symbol = tick.symbol self.xsecBar.exchange = tick.exchange self.xsecBar.open = tick.lastPrice self.xsecBar.high = tick.lastPrice self.xsecBar.low = tick.lastPrice # 累加更新老几秒的K线数据 else: self.xsecBar.high = max(self.xsecBar.high, tick.lastPrice) self.xsecBar.low = min(self.xsecBar.low, tick.lastPrice) # 通用更新部分 self.xsecBar.close = tick.lastPrice self.xsecBar.openInterest = tick.openInterest if self.lastSecondTick: self.xsecBar.volume += (tick.volume - self.lastSecondTick.volume) # 当前Tick内的成交量 # 缓存 secondTick 对象 self.lastSecondTick = tick #---------------------------------------------------------------------- def updateBar(self, bar): """1分钟K线更新""" # 尚未创建对象 if not self.xminBar: self.xminBar = BarData() self.xminBar.vtSymbol = bar.vtSymbol self.xminBar.symbol = bar.symbol self.xminBar.exchange = bar.exchange self.xminBar.open = bar.open self.xminBar.high = bar.high self.xminBar.low = bar.low self.xminBar.datetime = bar.datetime # 累加老K线 else: self.xminBar.high = max(self.xminBar.high, bar.high) self.xminBar.low = min(self.xminBar.low, bar.low) # 通用部分 self.xminBar.close = bar.close self.xminBar.openInterest = bar.openInterest self.xminBar.volume += float(bar.volume) # X分钟已经走完 if ( (bar.datetime.minute + 1) % self.xmin ) == 0: # 可以用X整除 # 生成上一X分钟K线的时间戳 self.xminBar.datetime = self.xminBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xminBar.date = self.xminBar.datetime.strftime('%Y%m%d') self.xminBar.time = self.xminBar.datetime.strftime('%H:%M:%S') # 推送 self.onXminBar(self.xminBar) # 清空老K线缓存对象 self.xminBar = None #---------------------------------------------------------------------- def updateHourBar(self , bar): """1小时K线更新""" # 尚未创建对象 if not self.xhourBar: self.xhourBar = BarData() self.xhourBar.vtSymbol = bar.vtSymbol self.xhourBar.symbol = bar.symbol self.xhourBar.exchange = bar.exchange self.xhourBar.open = bar.open self.xhourBar.high = bar.high self.xhourBar.low = bar.low self.xhourBar.datetime = bar.datetime else: self.xhourBar.high = max(self.xhourBar.high, bar.high) self.xhourBar.low = min(self.xhourBar.low, bar.low) # 通用部分 self.xhourBar.close = bar.close self.xhourBar.openInterest = bar.openInterest self.xhourBar.volume += float(bar.volume) # X分钟已经走完 if ( (bar.datetime.hour + 1) % self.xhour ) == 0: # 可以用X整除 # 生成上一X分钟K线的时间戳 self.xhourBar.datetime = self.xhourBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xhourBar.date = self.xhourBar.datetime.strftime('%Y%m%d') self.xhourBar.time = self.xhourBar.datetime.strftime('%H:%M:%S') # 推送 self.onXhourBar(self.xhourBar) # 清空老K线缓存对象 self.xhourBar = None #---------------------------------------------------------------------------- def updateDayBar(self, bar): # 一天走完 # 1. 夜盘 , 2.第二天9点 if self.lastDayBar != None \ and ( (self.lastDayBar.time <= "15:30:00" and bar.time >= "15:30:00") \ or (self.lastDayBar.time <= "15:30:00" and bar.time <= self.lastDayBar.time )): self.dayBar.datetime = self.dayBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.dayBar.date = self.dayBar.datetime.strftime('%Y%m%d') self.dayBar.time = self.dayBar.datetime.strftime('%H:%M:%S') # 说明是新的一天了 # 先推送昨天过去 self.onDayBar( self.dayBar) self.dayBar = BarData() self.dayBar.vtSymbol = bar.vtSymbol self.dayBar.symbol = bar.symbol self.dayBar.exchange = bar.exchange self.dayBar.open = bar.open self.dayBar.high = bar.high self.dayBar.low = bar.low self.dayBar.datetime = bar.datetime elif not self.dayBar: self.dayBar = BarData() self.dayBar.vtSymbol = bar.vtSymbol self.dayBar.symbol = bar.symbol self.dayBar.exchange = bar.exchange self.dayBar.open = bar.open self.dayBar.high = bar.high self.dayBar.low = bar.low self.dayBar.datetime = bar.datetime else: self.dayBar.high = max(self.dayBar.high , bar.high) self.dayBar.low = min(self.dayBar.low , bar.low) # 通用部分 self.dayBar.close = bar.close self.dayBar.openInterest = bar.openInterest self.dayBar.volume += float(bar.volume) self.lastDayBar = bar ######################################################################## class ArrayManager(object): """ K线序列管理工具,负责: 1. K线时间序列的维护 2. 常用技术指标的计算 """ #---------------------------------------------------------------------- def __init__(self, size=100): """Constructor""" self.count = 0 # 缓存计数 self.size = size # 缓存大小 self.inited = False # True if count>=size self.openArray = np.zeros(size) # OHLC self.highArray = np.zeros(size) self.lowArray = np.zeros(size) self.closeArray = np.zeros(size) self.volumeArray = np.zeros(size) #---------------------------------------------------------------------- def updateBar(self, bar): """更新K线""" self.count += 1 if not self.inited and self.count >= self.size: self.inited = True self.openArray[0:self.size-1] = self.openArray[1:self.size] self.highArray[0:self.size-1] = self.highArray[1:self.size] self.lowArray[0:self.size-1] = self.lowArray[1:self.size] self.closeArray[0:self.size-1] = self.closeArray[1:self.size] self.volumeArray[0:self.size-1] = self.volumeArray[1:self.size] self.openArray[-1] = bar.open self.highArray[-1] = bar.high self.lowArray[-1] = bar.low self.closeArray[-1] = bar.close self.volumeArray[-1] = bar.volume #---------------------------------------------------------------------- @property def open(self): """获取开盘价序列""" return self.openArray #---------------------------------------------------------------------- @property def high(self): """获取最高价序列""" return self.highArray #---------------------------------------------------------------------- @property def low(self): """获取最低价序列""" return self.lowArray #---------------------------------------------------------------------- @property def close(self): """获取收盘价序列""" return self.closeArray #---------------------------------------------------------------------- @property def volume(self): """获取成交量序列""" return self.volumeArray
2.03125
2
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/pylint/pyreverse/writer.py
ishtjot/susereumutep
14,668
1775
# -*- coding: utf-8 -*- # Copyright (c) 2008-2013 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:<EMAIL> # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Utilities for creating VCG and Dot diagrams""" from logilab.common.vcgutils import VCGPrinter from logilab.common.graph import DotBackend from pylint.pyreverse.utils import is_exception class DiagramWriter(object): """base class for writing project diagrams """ def __init__(self, config, styles): self.config = config self.pkg_edges, self.inh_edges, self.imp_edges, self.ass_edges = styles self.printer = None # defined in set_printer def write(self, diadefs): """write files for <project> according to <diadefs> """ for diagram in diadefs: basename = diagram.title.strip().replace(' ', '_') file_name = '%s.%s' % (basename, self.config.output_format) self.set_printer(file_name, basename) if diagram.TYPE == 'class': self.write_classes(diagram) else: self.write_packages(diagram) self.close_graph() def write_packages(self, diagram): """write a package diagram""" # sorted to get predictable (hence testable) results for i, obj in enumerate(sorted(diagram.modules(), key=lambda x: x.title)): self.printer.emit_node(i, label=self.get_title(obj), shape='box') obj.fig_id = i # package dependencies for rel in diagram.get_relationships('depends'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.pkg_edges) def write_classes(self, diagram): """write a class diagram""" # sorted to get predictable (hence testable) results for i, obj in enumerate(sorted(diagram.objects, key=lambda x: x.title)): self.printer.emit_node(i, **self.get_values(obj)) obj.fig_id = i # inheritance links for rel in diagram.get_relationships('specialization'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.inh_edges) # implementation links for rel in diagram.get_relationships('implements'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.imp_edges) # generate associations for rel in diagram.get_relationships('association'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, label=rel.name, **self.ass_edges) def set_printer(self, file_name, basename): """set printer""" raise NotImplementedError def get_title(self, obj): """get project title""" raise NotImplementedError def get_values(self, obj): """get label and shape for classes.""" raise NotImplementedError def close_graph(self): """finalize the graph""" raise NotImplementedError class DotWriter(DiagramWriter): """write dot graphs from a diagram definition and a project """ def __init__(self, config): styles = [dict(arrowtail='none', arrowhead="open"), dict(arrowtail='none', arrowhead='empty'), dict(arrowtail='node', arrowhead='empty', style='dashed'), dict(fontcolor='green', arrowtail='none', arrowhead='diamond', style='solid'), ] DiagramWriter.__init__(self, config, styles) def set_printer(self, file_name, basename): """initialize DotWriter and add options for layout. """ layout = dict(rankdir="BT") self.printer = DotBackend(basename, additionnal_param=layout) self.file_name = file_name def get_title(self, obj): """get project title""" return obj.title def get_values(self, obj): """get label and shape for classes. The label contains all attributes and methods """ label = obj.title if obj.shape == 'interface': label = u'«interface»\\n%s' % label if not self.config.only_classnames: label = r'%s|%s\l|' % (label, r'\l'.join(obj.attrs)) for func in obj.methods: label = r'%s%s()\l' % (label, func.name) label = '{%s}' % label if is_exception(obj.node): return dict(fontcolor='red', label=label, shape='record') return dict(label=label, shape='record') def close_graph(self): """print the dot graph into <file_name>""" self.printer.generate(self.file_name) class VCGWriter(DiagramWriter): """write vcg graphs from a diagram definition and a project """ def __init__(self, config): styles = [dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=0), dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', linestyle='dotted', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', textcolor='green'), ] DiagramWriter.__init__(self, config, styles) def set_printer(self, file_name, basename): """initialize VCGWriter for a UML graph""" self.graph_file = open(file_name, 'w+') self.printer = VCGPrinter(self.graph_file) self.printer.open_graph(title=basename, layoutalgorithm='dfs', late_edge_labels='yes', port_sharing='no', manhattan_edges='yes') self.printer.emit_node = self.printer.node self.printer.emit_edge = self.printer.edge def get_title(self, obj): """get project title in vcg format""" return r'\fb%s\fn' % obj.title def get_values(self, obj): """get label and shape for classes. The label contains all attributes and methods """ if is_exception(obj.node): label = r'\fb\f09%s\fn' % obj.title else: label = r'\fb%s\fn' % obj.title if obj.shape == 'interface': shape = 'ellipse' else: shape = 'box' if not self.config.only_classnames: attrs = obj.attrs methods = [func.name for func in obj.methods] # box width for UML like diagram maxlen = max(len(name) for name in [obj.title] + methods + attrs) line = '_' * (maxlen + 2) label = r'%s\n\f%s' % (label, line) for attr in attrs: label = r'%s\n\f08%s' % (label, attr) if attrs: label = r'%s\n\f%s' % (label, line) for func in methods: label = r'%s\n\f10%s()' % (label, func) return dict(label=label, shape=shape) def close_graph(self): """close graph and file""" self.printer.close_graph() self.graph_file.close()
1.96875
2
graphql-ml-serving/backend/mutations.py
philippe-heitzmann/python-apps
13
1776
import logging from ariadne import MutationType, convert_kwargs_to_snake_case from config import clients, messages, queue mutation = MutationType() @mutation.field("createMessage") @convert_kwargs_to_snake_case async def resolve_create_message(obj, info, content, client_id): try: message = {"content": content, "client_id": client_id} messages.append(message) await queue.put(message) return {"success": True, "message": message} except Exception as error: return {"success": False, "errors": [str(error)]} @mutation.field("createClient") @convert_kwargs_to_snake_case async def resolve_create_client(obj, info, client_id): try: logging.info(f"Client id: {client_id}") if not clients.get(client_id): client = {"client_id": client_id} clients[client_id] = client return {"success": True, "client": client} return {"success": False, "errors": ["Client is taken"]} except Exception as error: return {"success": False, "errors": [str(error)]}
2.171875
2
hc/api/transports.py
MaxwellDPS/healthchecks
1
1777
<reponame>MaxwellDPS/healthchecks import os from django.conf import settings from django.template.loader import render_to_string from django.utils import timezone import json import requests from urllib.parse import quote, urlencode from hc.accounts.models import Profile from hc.lib import emails from hc.lib.string import replace try: import apprise except ImportError: # Enforce settings.APPRISE_ENABLED = False def tmpl(template_name, **ctx): template_path = "integrations/%s" % template_name # \xa0 is non-breaking space. It causes SMS messages to use UCS2 encoding # and cost twice the money. return render_to_string(template_path, ctx).strip().replace("\xa0", " ") class Transport(object): def __init__(self, channel): self.channel = channel def notify(self, check): """ Send notification about current status of the check. This method returns None on success, and error message on error. """ raise NotImplementedError() def is_noop(self, check): """ Return True if transport will ignore check's current status. This method is overridden in Webhook subclass where the user can configure webhook urls for "up" and "down" events, and both are optional. """ return False def checks(self): return self.channel.project.check_set.order_by("created") class Email(Transport): def notify(self, check, bounce_url): if not self.channel.email_verified: return "Email not verified" unsub_link = self.channel.get_unsub_link() headers = { "X-Bounce-Url": bounce_url, "List-Unsubscribe": "<%s>" % unsub_link, "List-Unsubscribe-Post": "List-Unsubscribe=One-Click", } try: # Look up the sorting preference for this email address p = Profile.objects.get(user__email=self.channel.email_value) sort = p.sort except Profile.DoesNotExist: # Default sort order is by check's creation time sort = "created" # list() executes the query, to avoid DB access while # rendering a template ctx = { "check": check, "checks": list(self.checks()), "sort": sort, "now": timezone.now(), "unsub_link": unsub_link, } emails.alert(self.channel.email_value, ctx, headers) def is_noop(self, check): if not self.channel.email_verified: return True if check.status == "down": return not self.channel.email_notify_down else: return not self.channel.email_notify_up class Shell(Transport): def prepare(self, template, check): """ Replace placeholders with actual values. """ ctx = { "$CODE": str(check.code), "$STATUS": check.status, "$NOW": timezone.now().replace(microsecond=0).isoformat(), "$NAME": check.name, "$TAGS": check.tags, } for i, tag in enumerate(check.tags_list()): ctx["$TAG%d" % (i + 1)] = tag return replace(template, ctx) def is_noop(self, check): if check.status == "down" and not self.channel.cmd_down: return True if check.status == "up" and not self.channel.cmd_up: return True return False def notify(self, check): if not settings.SHELL_ENABLED: return "Shell commands are not enabled" if check.status == "up": cmd = self.channel.cmd_up elif check.status == "down": cmd = self.channel.cmd_down cmd = self.prepare(cmd, check) code = os.system(cmd) if code != 0: return "Command returned exit code %d" % code class HttpTransport(Transport): @classmethod def get_error(cls, response): # Override in subclasses: look for a specific error message in the # response and return it. return None @classmethod def _request(cls, method, url, **kwargs): try: options = dict(kwargs) options["timeout"] = 5 if "headers" not in options: options["headers"] = {} if "User-Agent" not in options["headers"]: options["headers"]["User-Agent"] = "healthchecks.io" r = requests.request(method, url, **options) if r.status_code not in (200, 201, 202, 204): m = cls.get_error(r) if m: return f'Received status code {r.status_code} with a message: "{m}"' return f"Received status code {r.status_code}" except requests.exceptions.Timeout: # Well, we tried return "Connection timed out" except requests.exceptions.ConnectionError: return "Connection failed" @classmethod def get(cls, url, **kwargs): # Make 3 attempts-- for x in range(0, 3): error = cls._request("get", url, **kwargs) if error is None: break return error @classmethod def post(cls, url, **kwargs): # Make 3 attempts-- for x in range(0, 3): error = cls._request("post", url, **kwargs) if error is None: break return error @classmethod def put(cls, url, **kwargs): # Make 3 attempts-- for x in range(0, 3): error = cls._request("put", url, **kwargs) if error is None: break return error class Webhook(HttpTransport): def prepare(self, template, check, urlencode=False): """ Replace variables with actual values. """ def safe(s): return quote(s) if urlencode else s ctx = { "$CODE": str(check.code), "$STATUS": check.status, "$NOW": safe(timezone.now().replace(microsecond=0).isoformat()), "$NAME": safe(check.name), "$TAGS": safe(check.tags), } for i, tag in enumerate(check.tags_list()): ctx["$TAG%d" % (i + 1)] = safe(tag) return replace(template, ctx) def is_noop(self, check): if check.status == "down" and not self.channel.url_down: return True if check.status == "up" and not self.channel.url_up: return True return False def notify(self, check): spec = self.channel.webhook_spec(check.status) if not spec["url"]: return "Empty webhook URL" url = self.prepare(spec["url"], check, urlencode=True) headers = {} for key, value in spec["headers"].items(): headers[key] = self.prepare(value, check) body = spec["body"] if body: body = self.prepare(body, check) if spec["method"] == "GET": return self.get(url, headers=headers) elif spec["method"] == "POST": return self.post(url, data=body.encode(), headers=headers) elif spec["method"] == "PUT": return self.put(url, data=body.encode(), headers=headers) class Slack(HttpTransport): def notify(self, check): text = tmpl("slack_message.json", check=check) payload = json.loads(text) return self.post(self.channel.slack_webhook_url, json=payload) class HipChat(HttpTransport): def is_noop(self, check): return True class OpsGenie(HttpTransport): @classmethod def get_error(cls, response): try: return response.json().get("message") except ValueError: pass def notify(self, check): headers = { "Conent-Type": "application/json", "Authorization": "GenieKey %s" % self.channel.opsgenie_key, } payload = {"alias": str(check.code), "source": settings.SITE_NAME} if check.status == "down": payload["tags"] = check.tags_list() payload["message"] = tmpl("opsgenie_message.html", check=check) payload["note"] = tmpl("opsgenie_note.html", check=check) payload["description"] = tmpl("opsgenie_description.html", check=check) url = "https://api.opsgenie.com/v2/alerts" if self.channel.opsgenie_region == "eu": url = "https://api.eu.opsgenie.com/v2/alerts" if check.status == "up": url += "/%s/close?identifierType=alias" % check.code return self.post(url, json=payload, headers=headers) class PagerDuty(HttpTransport): URL = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" def notify(self, check): description = tmpl("pd_description.html", check=check) payload = { "service_key": self.channel.pd_service_key, "incident_key": str(check.code), "event_type": "trigger" if check.status == "down" else "resolve", "description": description, "client": settings.SITE_NAME, "client_url": check.details_url(), } return self.post(self.URL, json=payload) class PagerTree(HttpTransport): def notify(self, check): url = self.channel.value headers = {"Conent-Type": "application/json"} payload = { "incident_key": str(check.code), "event_type": "trigger" if check.status == "down" else "resolve", "title": tmpl("pagertree_title.html", check=check), "description": tmpl("pagertree_description.html", check=check), "client": settings.SITE_NAME, "client_url": settings.SITE_ROOT, "tags": ",".join(check.tags_list()), } return self.post(url, json=payload, headers=headers) class PagerTeam(HttpTransport): def notify(self, check): url = self.channel.value headers = {"Content-Type": "application/json"} payload = { "incident_key": str(check.code), "event_type": "trigger" if check.status == "down" else "resolve", "title": tmpl("pagerteam_title.html", check=check), "description": tmpl("pagerteam_description.html", check=check), "client": settings.SITE_NAME, "client_url": settings.SITE_ROOT, "tags": ",".join(check.tags_list()), } return self.post(url, json=payload, headers=headers) class Pushbullet(HttpTransport): def notify(self, check): text = tmpl("pushbullet_message.html", check=check) url = "https://api.pushbullet.com/v2/pushes" headers = { "Access-Token": self.channel.value, "Conent-Type": "application/json", } payload = {"type": "note", "title": settings.SITE_NAME, "body": text} return self.post(url, json=payload, headers=headers) class Pushover(HttpTransport): URL = "https://api.pushover.net/1/messages.json" def notify(self, check): others = self.checks().filter(status="down").exclude(code=check.code) # list() executes the query, to avoid DB access while # rendering a template ctx = {"check": check, "down_checks": list(others)} text = tmpl("pushover_message.html", **ctx) title = tmpl("pushover_title.html", **ctx) pieces = self.channel.value.split("|") user_key, prio = pieces[0], pieces[1] # The third element, if present, is the priority for "up" events if len(pieces) == 3 and check.status == "up": prio = pieces[2] payload = { "token": settings.PUSHOVER_API_TOKEN, "user": user_key, "message": text, "title": title, "html": 1, "priority": int(prio), } # Emergency notification if prio == "2": payload["retry"] = settings.PUSHOVER_EMERGENCY_RETRY_DELAY payload["expire"] = settings.PUSHOVER_EMERGENCY_EXPIRATION return self.post(self.URL, data=payload) class VictorOps(HttpTransport): def notify(self, check): description = tmpl("victorops_description.html", check=check) mtype = "CRITICAL" if check.status == "down" else "RECOVERY" payload = { "entity_id": str(check.code), "message_type": mtype, "entity_display_name": check.name_then_code(), "state_message": description, "monitoring_tool": settings.SITE_NAME, } return self.post(self.channel.value, json=payload) class Matrix(HttpTransport): def get_url(self): s = quote(self.channel.value) url = settings.MATRIX_HOMESERVER url += "/_matrix/client/r0/rooms/%s/send/m.room.message?" % s url += urlencode({"access_token": settings.MATRIX_ACCESS_TOKEN}) return url def notify(self, check): plain = tmpl("matrix_description.html", check=check) formatted = tmpl("matrix_description_formatted.html", check=check) payload = { "msgtype": "m.text", "body": plain, "format": "org.matrix.custom.html", "formatted_body": formatted, } return self.post(self.get_url(), json=payload) class Discord(HttpTransport): def notify(self, check): text = tmpl("slack_message.json", check=check) payload = json.loads(text) url = self.channel.discord_webhook_url + "/slack" return self.post(url, json=payload) class Telegram(HttpTransport): SM = "https://api.telegram.org/bot%s/sendMessage" % settings.TELEGRAM_TOKEN @classmethod def get_error(cls, response): try: return response.json().get("description") except ValueError: pass @classmethod def send(cls, chat_id, text): # Telegram.send is a separate method because it is also used in # hc.front.views.telegram_bot to send invite links. return cls.post( cls.SM, json={"chat_id": chat_id, "text": text, "parse_mode": "html"} ) def notify(self, check): from hc.api.models import TokenBucket if not TokenBucket.authorize_telegram(self.channel.telegram_id): return "Rate limit exceeded" text = tmpl("telegram_message.html", check=check) return self.send(self.channel.telegram_id, text) class Sms(HttpTransport): URL = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" def is_noop(self, check): return check.status != "down" def notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice("SMS") return "Monthly SMS limit exceeded" url = self.URL % settings.TWILIO_ACCOUNT auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl("sms_message.html", check=check, site_name=settings.SITE_NAME) data = { "From": settings.TWILIO_FROM, "To": self.channel.sms_number, "Body": text, } return self.post(url, data=data, auth=auth) class WhatsApp(HttpTransport): URL = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" def is_noop(self, check): if check.status == "down": return not self.channel.whatsapp_notify_down else: return not self.channel.whatsapp_notify_up def notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice("WhatsApp") return "Monthly message limit exceeded" url = self.URL % settings.TWILIO_ACCOUNT auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl("whatsapp_message.html", check=check, site_name=settings.SITE_NAME) data = { "From": "whatsapp:%s" % settings.TWILIO_FROM, "To": "whatsapp:%s" % self.channel.sms_number, "Body": text, } return self.post(url, data=data, auth=auth) class Trello(HttpTransport): URL = "https://api.trello.com/1/cards" def is_noop(self, check): return check.status != "down" def notify(self, check): params = { "idList": self.channel.trello_list_id, "name": tmpl("trello_name.html", check=check), "desc": tmpl("trello_desc.html", check=check), "key": settings.TRELLO_APP_KEY, "token": self.channel.trello_token, } return self.post(self.URL, params=params) class Apprise(HttpTransport): def notify(self, check): if not settings.APPRISE_ENABLED: # Not supported and/or enabled return "Apprise is disabled and/or not installed" a = apprise.Apprise() title = tmpl("apprise_title.html", check=check) body = tmpl("apprise_description.html", check=check) a.add(self.channel.value) notify_type = ( apprise.NotifyType.SUCCESS if check.status == "up" else apprise.NotifyType.FAILURE ) return ( "Failed" if not a.notify(body=body, title=title, notify_type=notify_type) else None ) class MsTeams(HttpTransport): def notify(self, check): text = tmpl("msteams_message.json", check=check) payload = json.loads(text) return self.post(self.channel.value, json=payload) class Zulip(HttpTransport): @classmethod def get_error(cls, response): try: return response.json().get("msg") except ValueError: pass def notify(self, check): _, domain = self.channel.zulip_bot_email.split("@") url = "https://%s/api/v1/messages" % domain auth = (self.channel.zulip_bot_email, self.channel.zulip_api_key) data = { "type": self.channel.zulip_type, "to": self.channel.zulip_to, "topic": tmpl("zulip_topic.html", check=check), "content": tmpl("zulip_content.html", check=check), } return self.post(url, data=data, auth=auth)
2.046875
2
graviti/portex/builder.py
Graviti-AI/graviti-python-sdk
12
1778
<gh_stars>10-100 #!/usr/bin/env python3 # # Copyright 2022 Graviti. Licensed under MIT License. # """Portex type builder related classes.""" from hashlib import md5 from pathlib import Path from shutil import rmtree from subprocess import PIPE, CalledProcessError, run from tempfile import gettempdir from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, TypeVar import yaml import graviti.portex.ptype as PTYPE from graviti.exception import GitCommandError, GitNotFoundError from graviti.portex.base import PortexRecordBase from graviti.portex.external import PortexExternalType from graviti.portex.factory import ConnectedFieldsFactory, TypeFactory from graviti.portex.package import ExternalPackage, Imports, packages from graviti.portex.param import Param, Params from graviti.portex.register import ExternalContainerRegister if TYPE_CHECKING: from subprocess import CompletedProcess from graviti.portex.base import PortexType EXTERNAL_TYPE_TO_CONTAINER = ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER _I = TypeVar("_I", bound="BuilderImports") class PackageRepo: """The local git repo of the external Portex package. Arguments: url: The git repo url of the external package. revision: The git repo revision (tag/commit) of the external package. """ _env: Dict[str, Any] = {} def __init__(self, url: str, revision: str) -> None: tempdir = Path(gettempdir()) / "portex" tempdir.mkdir(exist_ok=True) md5_instance = md5() md5_instance.update(url.encode("utf-8")) md5_instance.update(revision.encode("utf-8")) self._path = tempdir / md5_instance.hexdigest() self._url = url self._revision = revision try: self._prepare_repo() except FileNotFoundError: raise GitNotFoundError() from None def _prepare_repo(self) -> None: if not self._path.exists(): self._clone_repo() elif not self._check_repo_integrity(): rmtree(self._path) self._clone_repo() def _run(self, args: List[str]) -> "CompletedProcess[bytes]": return run(args, cwd=self._path, env=self._env, stdout=PIPE, stderr=PIPE, check=True) def _init_repo(self) -> None: self._run(["git", "init"]) self._run(["git", "remote", "add", "origin", self._url]) def _shallow_fetch(self) -> None: self._run(["git", "fetch", "origin", self._revision, "--depth=1"]) self._run(["git", "checkout", "FETCH_HEAD"]) def _deep_fetch(self) -> None: try: self._run(["git", "fetch", "origin"]) except CalledProcessError as error: raise GitCommandError( "'git fetch' failed, most likely due to the repo url is invalid.", error, ) from None try: self._run(["git", "checkout", self._revision]) except CalledProcessError as error: raise GitCommandError( "'git checkout' failed, most likely due to the repo revision is invalid.", error, ) from None def _check_repo_integrity(self) -> bool: try: result = self._run(["git", "status", "--porcelain"]) except CalledProcessError: # The git command failed means the git repo has been cleaned or broken return False return not bool(result.stdout) def _clone_repo(self) -> None: print(f"Cloning repo '{self._url}@{self._revision}'") path = self._path path.mkdir() try: self._init_repo() try: self._shallow_fetch() except CalledProcessError: self._deep_fetch() except (CalledProcessError, GitCommandError, FileNotFoundError): rmtree(path) raise print(f"Cloned to '{path}'") def get_root(self) -> Path: """Get the root directory path of the package repo. Returns: The root directory path of the package repo. Raises: TypeError: when the "ROOT.yaml" not found or more than one "ROOT.yaml" found. """ roots = list(self._path.glob("**/ROOT.yaml")) if len(roots) == 0: raise TypeError("No 'ROOT.yaml' file found") if len(roots) >= 2: raise TypeError("More than one 'ROOT.yaml' file found") return roots[0].parent class PackageBuilder: """The builder of the external Portex package. Arguments: url: The git repo url of the external package. revision: The git repo revision (tag/commit) of the external package. """ def __init__(self, url: str, revision: str) -> None: self.package = ExternalPackage(url, revision) self._builders = self._create_type_builders() def __getitem__(self, key: str) -> Type["PortexExternalType"]: try: return self.package[key] except KeyError: return self._builders.__getitem__(key).build() def _create_type_builders(self) -> Dict[str, "TypeBuilder"]: repo = PackageRepo(self.package.url, self.package.revision) root = repo.get_root() builders = {} for yaml_file in root.glob("**/*.yaml"): if yaml_file.name == "ROOT.yaml": continue parts = (*yaml_file.relative_to(root).parent.parts, yaml_file.stem) name = ".".join(parts) builders[name] = TypeBuilder(name, yaml_file, self) return builders def build(self) -> ExternalPackage: """Build the Portex external package. Returns: The builded Portex external package. """ for builder in self._builders.values(): if builder.is_building: continue builder.build() return self.package class TypeBuilder: """The builder of the external Portex template type. Arguments: name: The name of the Portex template type. path: The source file path of the Portex template type. package: The package the Portex template type belongs to. """ def __init__(self, name: str, path: Path, builder: PackageBuilder) -> None: self._name = name self._path = path self._builder = builder self.is_building = False def build(self) -> Type["PortexExternalType"]: """Build the Portex external type. Returns: The builded Portex external type. Raises: TypeError: Raise when circular reference detected. """ if self.is_building: raise TypeError("Circular reference") self.is_building = True with self._path.open() as fp: content = yaml.load(fp, yaml.Loader) params_pyobj = content.get("parameters", []) decl = content["declaration"] imports = BuilderImports.from_pyobj(content.get("imports", []), self._builder) factory = TypeFactory(decl, imports) keys = factory.keys params = Params.from_pyobj(params_pyobj) for key, value in params.items(): value.ptype = keys.get(key, PTYPE.Any) params.add(Param("nullable", False, ptype=PTYPE.Boolean)) class_attrs: Dict[str, Any] = { "params": params, "factory": factory, "package": self._builder.package, } if issubclass(factory.class_, PortexRecordBase): bases: Tuple[Type["PortexType"], ...] = (PortexRecordBase, PortexExternalType) class_attrs["_fields_factory"] = ConnectedFieldsFactory( decl, factory.class_, imports, factory.transform_kwargs ) else: bases = (PortexExternalType,) type_ = type(self._name, bases, class_attrs) self._builder.package[self._name] = type_ return type_ class BuilderImports(Imports): """The imports of the Portex template type. Arguments: package: The package the portex belongs to. """ _builder: PackageBuilder def __getitem__(self, key: str) -> Type["PortexType"]: try: return super().__getitem__(key) except KeyError: return self._builder.__getitem__(key) @classmethod def from_pyobj( # type: ignore[override] # pylint: disable=arguments-differ cls: Type[_I], content: List[Dict[str, Any]], builder: PackageBuilder ) -> _I: """Create :class:`Imports` instance from python list. Arguments: content: A python list representing imported types. builder: The package builder. Returns: A :class:`Imports` instance created from the input python list. """ imports = super().from_pyobj(content) imports._builder = builder # pylint: disable=protected-access return imports def build_package(url: str, revision: str) -> ExternalPackage: """Build an external package. Arguments: url: The git repo url of the external package. revision: The git repo revision (tag/commit) of the external package. Returns: The :class:`ExternalPackage` instance. """ builder = PackageBuilder(url, revision) package = builder.build() packages.externals[url, revision] = package return package
2.125
2
dffml/operation/mapping.py
SGeetansh/dffml
171
1779
from typing import Dict, List, Any from ..df.types import Definition from ..df.base import op from ..util.data import traverse_get MAPPING = Definition(name="mapping", primitive="map") MAPPING_TRAVERSE = Definition(name="mapping_traverse", primitive="List[str]") MAPPING_KEY = Definition(name="key", primitive="str") MAPPING_VALUE = Definition(name="value", primitive="generic") @op( name="dffml.mapping.extract", inputs={"mapping": MAPPING, "traverse": MAPPING_TRAVERSE}, outputs={"value": MAPPING_VALUE}, ) def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]): """ Extracts value from a given mapping. Parameters ---------- mapping : dict The mapping to extract the value from. traverse : list[str] A list of keys to traverse through the mapping dictionary and extract the values. Returns ------- dict A dictionary containing the value of the keys. Examples -------- >>> import asyncio >>> from dffml import * >>> >>> dataflow = DataFlow.auto(mapping_extract_value, GetSingle) >>> >>> dataflow.seed.append( ... Input( ... value=[mapping_extract_value.op.outputs["value"].name], ... definition=GetSingle.op.inputs["spec"], ... ) ... ) >>> inputs = [ ... Input( ... value={"key1": {"key2": 42}}, ... definition=mapping_extract_value.op.inputs["mapping"], ... ), ... Input( ... value=["key1", "key2"], ... definition=mapping_extract_value.op.inputs["traverse"], ... ), ... ] >>> >>> async def main(): ... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs): ... print(result) >>> >>> asyncio.run(main()) {'value': 42} """ return {"value": traverse_get(mapping, *traverse)} @op( name="dffml.mapping.create", inputs={"key": MAPPING_KEY, "value": MAPPING_VALUE}, outputs={"mapping": MAPPING}, ) def create_mapping(key: str, value: Any): """ Creates a mapping of a given key and value. Parameters ---------- key : str The key for the mapping. value : Any The value for the mapping. Returns ------- dict A dictionary containing the mapping created. Examples -------- >>> import asyncio >>> from dffml import * >>> >>> dataflow = DataFlow.auto(create_mapping, GetSingle) >>> dataflow.seed.append( ... Input( ... value=[create_mapping.op.outputs["mapping"].name], ... definition=GetSingle.op.inputs["spec"], ... ) ... ) >>> inputs = [ ... Input( ... value="key1", definition=create_mapping.op.inputs["key"], ... ), ... Input( ... value=42, definition=create_mapping.op.inputs["value"], ... ), ... ] >>> >>> async def main(): ... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs): ... print(result) >>> >>> asyncio.run(main()) {'mapping': {'key1': 42}} """ return {"mapping": {key: value}}
2.71875
3
anchore_engine/services/policy_engine/__init__.py
Vijay-P/anchore-engine
0
1780
import time import sys import pkg_resources import os import retrying from sqlalchemy.exc import IntegrityError # anchore modules import anchore_engine.clients.services.common import anchore_engine.subsys.servicestatus import anchore_engine.subsys.metrics from anchore_engine.subsys import logger from anchore_engine.configuration import localconfig from anchore_engine.clients.services import simplequeue, internal_client_for from anchore_engine.clients.services.simplequeue import SimpleQueueClient from anchore_engine.service import ApiService, LifeCycleStages from anchore_engine.services.policy_engine.engine.feeds.feeds import ( VulnerabilityFeed, NvdV2Feed, PackagesFeed, VulnDBFeed, GithubFeed, feed_registry, NvdFeed, ) # from anchore_engine.subsys.logger import enable_bootstrap_logging # enable_bootstrap_logging() from anchore_engine.utils import timer feed_sync_queuename = "feed_sync_tasks" system_user_auth = None feed_sync_msg = {"task_type": "feed_sync", "enabled": True} # These are user-configurable but mostly for debugging and testing purposes try: FEED_SYNC_RETRIES = int(os.getenv("ANCHORE_FEED_SYNC_CHECK_RETRIES", 5)) except ValueError: logger.exception( "Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default value of 5" ) FEED_SYNC_RETRIES = 5 try: FEED_SYNC_RETRY_BACKOFF = int( os.getenv("ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF", 5) ) except ValueError: logger.exception( "Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default value of 5" ) FEED_SYNC_RETRY_BACKOFF = 5 try: feed_config_check_retries = int(os.getenv("FEED_CLIENT_CHECK_RETRIES", 3)) except ValueError: logger.exception( "Error parsing env value FEED_CLIENT_CHECK_RETRIES into int, using default value of 3" ) feed_config_check_retries = 3 try: feed_config_check_backoff = int(os.getenv("FEED_CLIENT_CHECK_BACKOFF", 5)) except ValueError: logger.exception( "Error parsing env FEED_CLIENT_CHECK_BACKOFF value into int, using default value of 5" ) feed_config_check_backoff = 5 # service funcs (must be here) def _check_feed_client_credentials(): from anchore_engine.services.policy_engine.engine.feeds.client import get_client sleep_time = feed_config_check_backoff last_ex = None for i in range(feed_config_check_retries): if i > 0: logger.info( "Waiting for {} seconds to try feeds client config check again".format( sleep_time ) ) time.sleep(sleep_time) sleep_time += feed_config_check_backoff try: logger.info( "Checking feeds client credentials. Attempt {} of {}".format( i + 1, feed_config_check_retries ) ) client = get_client() client = None logger.info("Feeds client credentials ok") return True except Exception as e: logger.warn( "Could not verify feeds endpoint and/or config. Got exception: {}".format( e ) ) last_ex = e else: if last_ex: raise last_ex else: raise Exception( "Exceeded retries for feeds client config check. Failing check" ) def _system_creds(): global system_user_auth if not system_user_auth: config = localconfig.get_config() system_user_auth = config["system_user_auth"] return system_user_auth def process_preflight(): """ Execute the preflight functions, aborting service startup if any throw uncaught exceptions or return False return value :return: """ preflight_check_functions = [init_db_content, init_feed_registry] for fn in preflight_check_functions: try: fn() except Exception as e: logger.exception( "Preflight checks failed with error: {}. Aborting service startup".format( e ) ) sys.exit(1) def _init_distro_mappings(): from anchore_engine.db import session_scope, DistroMapping initial_mappings = [ DistroMapping(from_distro="alpine", to_distro="alpine", flavor="ALPINE"), DistroMapping(from_distro="busybox", to_distro="busybox", flavor="BUSYB"), DistroMapping(from_distro="centos", to_distro="rhel", flavor="RHEL"), DistroMapping(from_distro="debian", to_distro="debian", flavor="DEB"), DistroMapping(from_distro="fedora", to_distro="rhel", flavor="RHEL"), DistroMapping(from_distro="ol", to_distro="ol", flavor="RHEL"), DistroMapping(from_distro="rhel", to_distro="rhel", flavor="RHEL"), DistroMapping(from_distro="ubuntu", to_distro="ubuntu", flavor="DEB"), DistroMapping(from_distro="amzn", to_distro="amzn", flavor="RHEL"), DistroMapping(from_distro="redhat", to_distro="rhel", flavor="RHEL"), ] # set up any data necessary at system init try: logger.info( "Checking policy engine db initialization. Checking initial set of distro mappings" ) with session_scope() as dbsession: distro_mappings = dbsession.query(DistroMapping).all() for i in initial_mappings: if not [x for x in distro_mappings if x.from_distro == i.from_distro]: logger.info("Adding missing mapping: {}".format(i)) dbsession.add(i) logger.info("Distro mapping initialization complete") except Exception as err: if isinstance(err, IntegrityError): logger.warn("another process has already initialized, continuing") else: raise Exception( "unable to initialize default distro mappings - exception: " + str(err) ) return True def init_db_content(): """ Initialize the policy engine db with any data necessary at startup. :return: """ return _init_distro_mappings() def init_feed_registry(): # Register feeds, the tuple is the class and bool if feed is a distro vulnerability feed or not for cls_tuple in [ (NvdV2Feed, False), (VulnDBFeed, False), (VulnerabilityFeed, True), (PackagesFeed, False), (GithubFeed, False), (NvdFeed, False), ]: logger.info("Registering feed handler {}".format(cls_tuple[0].__feed_name__)) feed_registry.register(cls_tuple[0], is_vulnerability_feed=cls_tuple[1]) def do_feed_sync(msg): if "FeedsUpdateTask" not in locals(): from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask if "get_selected_feeds_to_sync" not in locals(): from anchore_engine.services.policy_engine.engine.feeds.sync import ( get_selected_feeds_to_sync, ) handler_success = False timer = time.time() logger.info("FIRING: feed syncer") try: feeds = get_selected_feeds_to_sync(localconfig.get_config()) logger.info("Syncing configured feeds: {}".format(feeds)) result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get("data")) if result is not None: handler_success = True else: logger.warn("Feed sync task marked as disabled, so skipping") except ValueError as e: logger.warn("Received msg of wrong type") except Exception as err: logger.warn("failure in feed sync handler - exception: " + str(err)) if handler_success: anchore_engine.subsys.metrics.summary_observe( "anchore_monitor_runtime_seconds", time.time() - timer, function="do_feed_sync", status="success", ) else: anchore_engine.subsys.metrics.summary_observe( "anchore_monitor_runtime_seconds", time.time() - timer, function="do_feed_sync", status="fail", ) def handle_feed_sync(*args, **kwargs): """ Initiates a feed sync in the system in response to a message from the queue :param args: :param kwargs: :return: """ system_user = _system_creds() logger.info("init args: {}".format(kwargs)) cycle_time = kwargs["mythread"]["cycle_timer"] while True: config = localconfig.get_config() feed_sync_enabled = config.get("feeds", {}).get("sync_enabled", True) if feed_sync_enabled: logger.info("Feed sync task executor activated") try: run_feed_sync(system_user) except Exception as e: logger.error("Caught escaped error in feed sync handler: {}".format(e)) finally: logger.info("Feed sync task executor complete") else: logger.info("sync_enabled is set to false in config - skipping feed sync") time.sleep(cycle_time) return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, ) def run_feed_sync(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( ["simplequeue"] ) if not all_ready: logger.info("simplequeue service not yet ready, will retry") raise Exception("Simplequeue service not yet ready") else: try: # This has its own retry on the queue fetch, so wrap with catch block to ensure we don't double-retry on task exec simplequeue.run_target_with_queue_ttl( None, queue=feed_sync_queuename, target=do_feed_sync, max_wait_seconds=30, visibility_timeout=180, retries=FEED_SYNC_RETRIES, backoff_time=FEED_SYNC_RETRY_BACKOFF, ) except Exception as err: logger.warn("failed to process task this cycle: " + str(err)) def handle_feed_sync_trigger(*args, **kwargs): """ Checks to see if there is a task for a feed sync in the queue and if not, adds one. Interval for firing this should be longer than the expected feed sync duration. :param args: :param kwargs: :return: """ system_user = _system_creds() logger.info("init args: {}".format(kwargs)) cycle_time = kwargs["mythread"]["cycle_timer"] while True: config = localconfig.get_config() feed_sync_enabled = config.get("feeds", {}).get("sync_enabled", True) if feed_sync_enabled: logger.info("Feed Sync task creator activated") try: push_sync_task(system_user) logger.info("Feed Sync Trigger done, waiting for next cycle.") except Exception as e: logger.error( "Error caught in feed sync trigger handler after all retries. Will wait for next cycle" ) finally: logger.info("Feed Sync task creator complete") else: logger.info( "sync_enabled is set to false in config - skipping feed sync trigger" ) time.sleep(cycle_time) return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, ) def push_sync_task(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( ["simplequeue"] ) if not all_ready: logger.info("simplequeue service not yet ready, will retry") raise Exception("Simplequeue service not yet ready") else: # q_client = SimpleQueueClient(user=system_user[0], password=<PASSWORD>[1]) q_client = internal_client_for(SimpleQueueClient, userId=None) if not q_client.is_inqueue(name=feed_sync_queuename, inobj=feed_sync_msg): try: q_client.enqueue(name=feed_sync_queuename, inobj=feed_sync_msg) except: logger.error("Could not enqueue message for a feed sync") raise class PolicyEngineService(ApiService): __service_name__ = "policy_engine" __spec_dir__ = pkg_resources.resource_filename(__name__, "swagger") __monitors__ = { "service_heartbeat": { "handler": anchore_engine.subsys.servicestatus.handle_service_heartbeat, "taskType": "handle_service_heartbeat", "args": [__service_name__], "cycle_timer": 60, "min_cycle_timer": 60, "max_cycle_timer": 60, "last_queued": 0, "last_return": False, "initialized": False, }, "feed_sync_checker": { "handler": handle_feed_sync_trigger, "taskType": "handle_feed_sync_trigger", "args": [], "cycle_timer": 600, "min_cycle_timer": 300, "max_cycle_timer": 100000, "last_queued": 0, "last_return": False, "initialized": False, }, "feed_sync": { "handler": handle_feed_sync, "taskType": "handle_feed_sync", "args": [], "cycle_timer": 3600, "min_cycle_timer": 1800, "max_cycle_timer": 100000, "last_queued": 0, "last_return": False, "initialized": False, }, } __lifecycle_handlers__ = { LifeCycleStages.pre_register: [ (process_preflight, None), ] }
1.984375
2
juriscraper/oral_args/united_states/federal_appellate/scotus.py
EvandoBlanco/juriscraper
228
1781
<reponame>EvandoBlanco/juriscraper<filename>juriscraper/oral_args/united_states/federal_appellate/scotus.py<gh_stars>100-1000 """Scraper for Supreme Court of U.S. CourtID: scotus Court Short Name: scotus History: - 2014-07-20 - Created by <NAME>, reviewed by MLR - 2017-10-09 - Updated by MLR. """ from datetime import datetime from juriscraper.OralArgumentSite import OralArgumentSite class Site(OralArgumentSite): def __init__(self, *args, **kwargs): super(Site, self).__init__(*args, **kwargs) self.court_id = self.__module__ self.url = ( "http://www.supremecourt.gov/oral_arguments/argument_audio.aspx" ) self.back_scrape_iterable = list(range(2010, 2015)) def _get_download_urls(self): path = "id('list')//tr//a/text()" return list(map(self._return_download_url, self.html.xpath(path))) @staticmethod def _return_download_url(d): file_type = "mp3" # or 'wma' is also available for any case. download_url = "http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}".format( type=file_type, docket_number=d ) return download_url def _get_case_names(self): path = "id('list')//tr/td/span/text()" return [s.lstrip(". ") for s in self.html.xpath(path)] def _get_case_dates(self): path = "id('list')//tr/td[2]//text()" return [ datetime.strptime(s, "%m/%d/%y").date() for s in self.html.xpath(path) if not "Date" in s ] def _get_docket_numbers(self): path = "id('list')//tr//a/text()" return list(self.html.xpath(path)) def _download_backwards(self, year): self.url = ( "http://www.supremecourt.gov/oral_arguments/argument_audio/%s" % year ) self.html = self._download()
2.390625
2
code/main.py
pengzhansun/CF-CAR
8
1782
# -*- coding: utf-8 -*- import argparse import os import shutil import time import numpy as np import random from collections import OrderedDict import torch import torch.backends.cudnn as cudnn from callbacks import AverageMeter from data_utils.causal_data_loader_frames import VideoFolder from utils import save_results from tqdm import tqdm parser = argparse.ArgumentParser(description='Counterfactual CAR') # Path, dataset and log related arguments parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/', help='path to the folder with frames') parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json', help='path to the json file with train video meta data') parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json', help='path to the json file with validation video meta data') parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json', help='path to the json file with ground truth labels') parser.add_argument('--dataset', default='smth_smth', help='which dataset to train') parser.add_argument('--logname', default='my_method', help='name of the experiment for checkpoints and logs') parser.add_argument('--print_freq', '-p', default=20, type=int, metavar='N', help='print frequency (default: 20)') parser.add_argument('--ckpt', default='./ckpt', help='folder to output checkpoints') parser.add_argument('--resume_vision', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--resume_coord', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--resume_fusion', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') # model, image&feature dim and training related arguments parser.add_argument('--model_vision', default='rgb_roi') parser.add_argument('--model_coord', default='interaction') parser.add_argument('--model_fusion', default='concat_fusion') parser.add_argument('--fusion_function', default='fused_sum', type=str, help='function for fusing activations from each branch') parser.add_argument('--img_feature_dim', default=512, type=int, metavar='N', help='intermediate feature dimension for image-based features') parser.add_argument('--coord_feature_dim', default=512, type=int, metavar='N', help='intermediate feature dimension for coord-based features') parser.add_argument('--size', default=224, type=int, metavar='N', help='primary image input size') parser.add_argument('--num_boxes', default=4, type=int, help='num of boxes for each image') parser.add_argument('--num_frames', default=16, type=int, help='num of frames for the model') parser.add_argument('--num_classes', default=174, type=int, help='num of class in the model') parser.add_argument('--epochs', default=30, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start_epoch', default=None, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('--batch_size', '-b', default=16, type=int, metavar='N', help='mini-batch size') parser.add_argument('--lr', '--learning-rate', default=0.01, type=float, metavar='LR', help='initial learning rate') parser.add_argument('--lr_steps', default=[24, 35, 45], type=float, nargs="+", metavar='LRSteps', help='epochs to decay learning rate by 10') parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--weight_decay', '--wd', default=0.0001, type=float, metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--clip_gradient', '-cg', default=5, type=float, metavar='W', help='gradient norm clipping (default: 5)') parser.add_argument('--search_stride', type=int, default=5, help='test performance every n strides') # train mode, hardware setting and others related arguments parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual inference model on validation set') parser.add_argument('--parallel', default=True, type=bool, help='whether or not train with multi GPUs') parser.add_argument('--gpu_index', type=str, default='0, 1, 2, 3', help='the index of gpu you want to use') best_loss = 1000000 def main(): global args, best_loss args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index print(args) # create vision model if args.model_vision == 'global_i3d': from model.model_lib import VideoGlobalModel as RGBModel print('global_i3d loaded!!') elif args.model_vision == 'rgb_roi': from model.model_lib import BboxVisualModel as RGBModel print('rgb_roi loaded!!') else: print("no such a vision model!") # create coord model if args.model_coord == 'interaction': from model.model_lib import BboxInteractionLatentModel as BboxModel print('interaction loaded!!') else: print("no such a coordinate model!") # create fusion model if args.model_fusion == 'concat_fusion': from model.model_lib import ConcatFusionModel as FusionModel print('concat_fusion loaded!!') else: print('no such a fusion model!') # load model branch vision_model = RGBModel(args) coord_model = BboxModel(args) fusion_model = FusionModel(args) # create the fusion function for the activation of three branches if args.fusion_function == 'fused_sum': from fusion_function import logsigsum as fusion_func print('fused_sum loaded!!') elif args.fusion_function == 'naive_sum': from fusion_function import naivesum as fusion_func print('naive_sum loaded!!') else: print('no such a fusion function!') fusion_function = fusion_func() if args.parallel: vision_model = torch.nn.DataParallel(vision_model).cuda() coord_model = torch.nn.DataParallel(coord_model).cuda() fusion_model = torch.nn.DataParallel(fusion_model).cuda() else: vision_model = vision_model.cuda() coord_model = coord_model.cuda() fusion_model = fusion_model.cuda() # optionally resume vision model from a checkpoint if args.resume_vision: assert os.path.isfile(args.resume_vision), "No checkpoint found at '{}'".format(args.resume_vision) print("=> loading checkpoint '{}'".format(args.resume_vision)) checkpoint = torch.load(args.resume_vision) if args.start_epoch is None: args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] vision_model.load_state_dict(checkpoint['state_dict']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume_vision, checkpoint['epoch'])) # optionally resume coord model from a checkpoint if args.resume_coord: assert os.path.isfile(args.resume_coord), "No checkpoint found at '{}'".format(args.resume_coord) print("=> loading checkpoint '{}'".format(args.resume_coord)) checkpoint = torch.load(args.resume_coord) if args.start_epoch is None: args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] coord_model.load_state_dict(checkpoint['state_dict']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume_coord, checkpoint['epoch'])) if args.resume_fusion: assert os.path.isfile(args.resume_fusion), "No checkpoint found at '{}'".format(args.resume_fusion) print("=> loading checkpoint '{}'".format(args.resume_fusion)) checkpoint = torch.load(args.resume_fusion) if args.start_epoch is None: args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] fusion_model.load_state_dict(checkpoint['state_dict']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume_fusion, checkpoint['epoch'])) if args.start_epoch is None: args.start_epoch = 0 cudnn.benchmark = True # create training and validation dataset dataset_train = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_train, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=False, if_augment=True, ) dataset_val = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_val, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=True, if_augment=True, ) # create training and validation loader train_loader = torch.utils.data.DataLoader( dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True ) val_loader = torch.utils.data.DataLoader( dataset_val, drop_last=True, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False ) model_list = [vision_model, coord_model, fusion_model] optimizer_vision = torch.optim.SGD(filter(lambda p: p.requires_grad, vision_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_coord = torch.optim.SGD(filter(lambda p: p.requires_grad, coord_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_fusion = torch.optim.SGD(filter(lambda p: p.requires_grad, fusion_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_list = [optimizer_vision, optimizer_coord, optimizer_fusion] criterion = torch.nn.CrossEntropyLoss() search_list = np.linspace(0.0, 1.0, 11) # factual inference (vanilla test stage) if args.evaluate: validate(val_loader, model_list, fusion_function, criterion, class_to_idx=dataset_val.classes_dict) return # Counterfactual inference by trying a list of hyperparameter if args.cf_inference_group: cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=dataset_val.classes_dict) return print('training begin...') for epoch in tqdm(range(args.start_epoch, args.epochs)): adjust_learning_rate(optimizer_vision, epoch, args.lr_steps, 'vision') adjust_learning_rate(optimizer_coord, epoch, args.lr_steps, 'coord') adjust_learning_rate(optimizer_fusion, epoch, args.lr_steps, 'fusion') # train for one epoch train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion) if (epoch+1) >= 30 and (epoch + 1) % args.search_stride == 0: loss = validate(val_loader, model_list, fusion_function, criterion, epoch=epoch, class_to_idx=dataset_val.classes_dict) else: loss = 100 # remember best loss and save checkpoint is_best = loss < best_loss best_loss = min(loss, best_loss) save_checkpoint( { 'epoch': epoch + 1, 'state_dict': vision_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_vision, args.logname))) save_checkpoint( { 'epoch': epoch + 1, 'state_dict': coord_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_coord, args.logname))) save_checkpoint( { 'epoch': epoch + 1, 'state_dict': fusion_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_fusion, args.logname))) def train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion): global args batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acc_top1 = AverageMeter() acc_top5 = AverageMeter() # load three model branches [vision_model, coord_model, fusion_model] = model_list # load four optimizers, including the one designed for uniform assumption [optimizer_vision, optimizer_coord, optimizer_fusion] = optimizer_list # switch to train mode vision_model.train() coord_model.train() fusion_model.train() end = time.time() for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(train_loader): data_time.update(time.time() - end) # obtain the activation and vision features from vision branch output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision = output_vision.view((-1, len(train_loader.dataset.classes))) # obtain the activation and coordinate features from coordinate branch output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(train_loader.dataset.classes))) # detach the computation graph, avoid the gradient confusion feature_vision_detached = feature_vision.detach() feature_coord_detached = feature_coord.detach() # obtain the activation of fusion branch output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(train_loader.dataset.classes))) output_factual = fusion_function(output_vision, output_coord, output_fusion) # loss_fusion is the loss of output_fusion(fused, obtained from the fusion_function) loss_vision = criterion(output_vision, video_label.long().cuda()) loss_coord = criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_fusion, video_label.long().cuda()) loss_factual = criterion(output_factual, video_label.long().cuda()) # Measure the accuracy of the sum of three branch activation results acc1, acc5 = accuracy(output_factual.cpu(), video_label, topk=(1, 5)) # record the accuracy and loss losses.update(loss_factual.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # refresh the optimizer optimizer_vision.zero_grad() optimizer_coord.zero_grad() optimizer_fusion.zero_grad() loss = loss_vision + loss_coord + loss_factual loss.backward() if args.clip_gradient is not None: torch.nn.utils.clip_grad_norm_(vision_model.parameters(), args.clip_gradient) # update the parameter optimizer_vision.step() optimizer_coord.step() optimizer_fusion.step() batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5)) def validate(val_loader, model_list, fusion_function, criterion, epoch=None, class_to_idx=None): batch_time = AverageMeter() losses = AverageMeter() acc_top1 = AverageMeter() acc_top5 = AverageMeter() logits_matrix = [] targets_list = [] # unpack three models [vision_model, coord_model, fusion_model] = model_list # switch to evaluate mode vision_model.eval() coord_model.eval() fusion_model.eval() end = time.time() for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(val_loader): # compute output with torch.no_grad(): output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision = output_vision.view((-1, len(val_loader.dataset.classes))) output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(val_loader.dataset.classes))) # detach the computation graph, avoid the gradient confusion feature_vision_detached = feature_vision.detach() feature_coord_detached = feature_coord.detach() # obtain the activation of fusion branch output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes))) # fuse three outputs output_factual = fusion_function(output_vision, output_coord, output_fusion) # warning: loss_fusion is the loss of output_fusion(fused, obtained from the fusion_function) loss_vision = criterion(output_vision, video_label.long().cuda()) loss_coord = criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_factual, video_label.long().cuda()) # statistic result from fusion_branch or value after fusion function output = output_factual loss = loss_vision acc1, acc5 = accuracy(output.cpu(), video_label, topk=(1, 5)) if args.evaluate: logits_matrix.append(output.cpu().data.numpy()) targets_list.append(video_label.cpu().numpy()) # measure accuracy and record loss losses.update(loss.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0 or i + 1 == len(val_loader): print('Test: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})\t'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5, )) if args.evaluate: logits_matrix = np.concatenate(logits_matrix) targets_list = np.concatenate(targets_list) save_results(logits_matrix, targets_list, class_to_idx, args) return losses.avg def cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=None): batch_time = AverageMeter() search_length = len(search_list) search_dict = {} for i in range(search_length): search_dict['acc_1_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() search_dict['acc_5_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() [vision_model, coord_model, fusion_model] = model_list # switch to evaluate mode vision_model.eval() coord_model.eval() fusion_model.eval() end = time.time() for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(val_loader): # compute output with torch.no_grad(): # factual inference output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision = output_vision.view((-1, len(val_loader.dataset.classes))) output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(val_loader.dataset.classes))) # obtain the activation of fusion branch output_fusion = fusion_model(feature_vision.cuda(), feature_coord.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes))) # fuse three outputs output_factual = fusion_function(output_vision, output_coord, output_fusion) # counterfactual inference output_vision_subtrahend = output_vision output_counterfactual = fusion_function(output_vision_subtrahend, torch.tensor(0.0), torch.tensor(0.0)) for j in range(search_length): weight = search_list[j] output_debiased = output_factual - output_counterfactual * weight acc1, acc5 = accuracy(output_debiased.cpu(), video_label, topk=(1, 5)) search_dict['acc_1_alpha_{}'.format(round(search_list[j], 1))].update(acc1.item(), global_img_tensors.size(0)) search_dict['acc_5_alpha_{}'.format(round(search_list[j], 1))].update(acc5.item(), global_img_tensors.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0 or i + 1 == len(val_loader): print('Cf-Inference: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Acc1_0.0 {acc_top1_00.val:.1f} ({acc_top1_00.avg:.1f})\t' 'Acc1_0.2 {acc_top1_02.val:.1f} ({acc_top1_02.avg:.1f})\t' 'Acc1_0.5 {acc_top1_05.val:.1f} ({acc_top1_05.avg:.1f})\t' 'Acc1_0.8 {acc_top1_08.val:.1f} ({acc_top1_08.avg:.1f})\t' 'Acc1_1.0 {acc_top1_10.val:.1f} ({acc_top1_10.avg:.1f})'.format( i, len(val_loader), batch_time=batch_time, acc_top1_00=search_dict['acc_1_alpha_0.0'], acc_top1_02=search_dict['acc_1_alpha_0.2'], acc_top1_05=search_dict['acc_1_alpha_0.5'], acc_top1_08=search_dict['acc_1_alpha_0.8'], acc_top1_10=search_dict['acc_1_alpha_1.0'])) for k in range(search_length): print(search_list[k], search_dict['acc_1_alpha_{}'.format(round(search_list[k], 1))].avg, search_dict['acc_5_alpha_{}'.format(round(search_list[k], 1))].avg) return def save_checkpoint(state, is_best, filename): torch.save(state, filename + '_latest.pth.tar') if is_best: shutil.copyfile(filename + '_latest.pth.tar', filename + '_best.pth.tar') def adjust_learning_rate(optimizer, epoch, lr_steps, branch_name=None): """Sets the learning rate to the initial LR decayed by 10""" decay = 0.1 ** (sum(epoch >= np.array(lr_steps))) lr = args.lr * decay if branch_name == 'vision': for param_group in optimizer.param_groups: param_group['lr'] = lr * 0.8 elif branch_name == 'coord': for param_group in optimizer.param_groups: param_group['lr'] = lr elif branch_name == 'fusion': for param_group in optimizer.param_groups: param_group['lr'] = lr else: for param_group in optimizer.param_groups: param_group['lr'] = lr def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res if __name__ == '__main__': main()
1.96875
2
api/application/__init__.py
114000/webapp-boilerplate
0
1783
# encoding: utf-8 from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_cors import CORS import logging app = Flask(__name__) CORS(app, resources={r"/*": {"origins": "*"}}) app.config.from_object('config.current') db = SQLAlchemy(app) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) ''' ''' import application.jwt import application.routes.config import application.routes.user import application.routes.permission import application.routes.role import application.routes.access # after Model defined db.create_all()
1.90625
2
Betsy/Betsy/modules/get_illumina_control.py
jefftc/changlab
9
1784
from Module import AbstractModule class Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run( self, network, antecedents, out_attributes, user_options, num_cores, outfile): import os import shutil from genomicode import filelib in_data = antecedents result_files = os.listdir(in_data.identifier) for result_file in result_files: if '-controls' in result_file: goal_file = os.path.join(in_data.identifier, result_file) shutil.copyfile(goal_file, outfile) assert filelib.exists_nz(outfile), ( 'the output file %s for illu_control fails' % outfile ) def name_outfile(self, antecedents, user_options): from Betsy import module_utils original_file = module_utils.get_inputid(antecedents.identifier) filename = 'control_illumina_' + original_file + '.gct' return filename
2.265625
2
src/backup/template/PositionalArgumentTemplate.py
ytyaru0/Python.TemplateFileMaker.20180314204216
0
1785
<gh_stars>0 from string import Template import re class PositionalArgumentTemplate(Template): # (?i): 大文字小文字を区別しないモードを開始する # (?-i): 大文字小文字を区別しないモードを無効にする idpattern_default = Template.idpattern # (?-i:[_a-zA-Z][_a-zA-Z0-9]*) idpattern = '([0-9]+)' def find_place_holders(self, template:str): #for m in re.findall(self.pattern, template): #for m in re.finditer(self.pattern, template): for m in self.pattern.finditer(template): print(m, type(m)) #print(dir(m)) #print(len(m.groups())) print(m[0]) #print(m.groups()) #print(m, m.groups(), m.group('named'), type(m)) #print(m.group('escaped')) #print(m.group('named')) #print(m.group('braced')) #print(m.group('invalid')) if __name__ == '__main__': template_str = '${0} is Aug.' t = PositionalArgumentTemplate(template_str) print(template_str) print(dir(t)) print(t.delimiter) print(t.idpattern) print(type(t.idpattern)) print(t.flags) print(t.pattern) print(t.substitute(**{'0':'V'})) t.find_place_holders(template_str)
2.796875
3
cla-backend/cla/tests/unit/test_company.py
kdhaigud/easycla
0
1786
<filename>cla-backend/cla/tests/unit/test_company.py<gh_stars>0 # Copyright The Linux Foundation and each contributor to CommunityBridge. # SPDX-License-Identifier: MIT import json import os import requests import uuid import hug import pytest from falcon import HTTP_200, HTTP_409 import cla from cla import routes ID_TOKEN = os.environ.get('ID_TOKEN') API_URL = os.environ.get('API_URL') def test_create_company_duplicate(): """ Test creating duplicate company names """ import pdb;pdb.set_trace() url = f'{API_URL}/v1/company' company_name = 'test_company_name' data = { 'company_id' : uuid.uuid4() , 'company_name' : company_name , } headers = { 'Authorization' : f'Bearer {ID_TOKEN}' } response = requests.post(url, data=data, headers=headers) assert response.status == HTTP_200 # add duplicate company data = { 'company_id' : uuid.uuid4(), 'company_name' : company_name } req = hug.test.post(routes, url, data=data, headers=headers) assert req.status == HTTP_409
2.203125
2
py/WatchDialog.py
mathematicalmichael/SpringNodes
51
1787
<gh_stars>10-100 # Copyright(c) 2017, <NAME> # @5devene, <EMAIL> # www.badmonkeys.net import clr clr.AddReference('System.Windows.Forms') clr.AddReference('System.Drawing') from System.Drawing import Point, Color, Font from System.Windows.Forms import * from cStringIO import StringIO str_file = StringIO() size1 = [30, 23] #height, width def tolist(obj1): if hasattr(obj1,"__iter__"): return obj1 else: return [obj1] def write_str(str1, GCL, str_file=str_file, size1=size1): ln1 = len(str1) if ln1 > size1[1]: size1[1] = ln1 str_file.write("%s%s\n" % ("".join(GCL), str1) ) def list2str(l1, writeInd, GCL=None, GCint=-1, size1=size1): if GCL is None: GCL = [] GCint += 1 GCL.append(None) for i, x in enumerate(l1): GCL[GCint] = "[%i] " % i if writeInd else " " if hasattr(x, "Id"): #is element write_str("%s %i" % (x.ToString(), x.Id), GCL) elif hasattr(x, "__iter__"): if not x: write_str("Empty List", GCL) else: list2str(x, writeInd, GCL, GCint, size1) elif x is None: write_str("null", GCL) else: write_str(x.ToString(), GCL) size1[0] += 19 GCL.pop(GCint) GCint -= 1 class WatchBox(Form): def __init__(self, t1): self.Text = "SpringNodes: Expandable Watch Window" self.BackColor = Color.FromArgb(40,40,40) self.ControlBox = False self.TopMost = True self.FormBorderStyle = FormBorderStyle.Sizable self.StartPosition = FormStartPosition.CenterScreen self.Resize += self.resize1 self.text1 = None self.button1 = Button() self.button1.Text = 'Close' self.button1.Font = Font("Calibri", 10) self.button1.AutoSize = True self.button1.Width = 200 self.button1.ForeColor = Color.FromArgb(234,234,234) self.button1.Click += self.save self.Controls.Add(self.button1) self.box1 = RichTextBox() self.box1.Multiline = True self.box1.Location = Point(5, 5) self.box1.Font = Font("Calibri", 12) self.box1.BackColor = Color.FromArgb(53,53,53) self.box1.ForeColor = Color.FromArgb(234,234,234) self.box1.DetectUrls = True self.box1.Text = t1 self.Controls.Add(self.box1) def adjust_controls(self, height1, width1): if height1 > 800: height1 = 800 self.box1.ScrollBars = RichTextBoxScrollBars.Vertical if width1 < 23 : width1 = 23 if width1 > 88: width1 = 88 self.Width = 10 + (width1 + 2) * 9 #character width seems to vary between PCs self.Height = height1 + 90 self.box1.Width = self.Width - 17 self.box1.Height = self.Height - 80 self.button1.Location = Point(self.Width/2 - 103, self.Height - 70) def resize1(self, sender, event): if self.Width < 210: self.Width = 230 if self.Height < 120: self.Height = 120 self.box1.Width = self.Width - 17 self.box1.Height = self.Height - 80 self.button1.Location = Point(self.Width/2 - 103, self.Height - 70) def save(self, sender, event): self.text1 = self.box1.Text self.Close() l1 = [] if IN[0] is None else tolist(IN[0]) list2str(l1, IN[1]) str_content = str_file.getvalue() str_file.close() width1 = 100 form = WatchBox(str_content) form.adjust_controls(*size1) Application.Run(form) OUT = form.text1 Application.Exit() form.Dispose()
2.40625
2
292-nim-game.py
mvj3/leetcode
0
1788
""" Question: Nim Game My Submissions Question You are playing the following Nim Game with your friend: There is a heap of stones on the table, each time one of you take turns to remove 1 to 3 stones. The one who removes the last stone will be the winner. You will take the first turn to remove the stones. Both of you are very clever and have optimal strategies for the game. Write a function to determine whether you can win the game given the number of stones in the heap. For example, if there are 4 stones in the heap, then you will never win the game: no matter 1, 2, or 3 stones you remove, the last stone will always be removed by your friend. Hint: If there are 5 stones in the heap, could you figure out a way to remove the stones such that you will always be the winner? Credits: Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases. Performance: 1. Total Accepted: 31755 Total Submissions: 63076 Difficulty: Easy 2. Your runtime beats 43.52% of python submissions. """ class Solution(object): def canWinNim(self, n): """ :type n: int :rtype: bool """ if n <= 3: return True if n % 4 == 0: return False else: return True assert Solution().canWinNim(0) is True assert Solution().canWinNim(1) is True assert Solution().canWinNim(2) is True assert Solution().canWinNim(3) is True assert Solution().canWinNim(4) is False assert Solution().canWinNim(5) is True assert Solution().canWinNim(6) is True assert Solution().canWinNim(7) is True assert Solution().canWinNim(8) is False
3.734375
4
script_tests/maf_extract_ranges_indexed_tests.py
lldelisle/bx-python
122
1789
import unittest import base class Test(base.BaseScriptTest, unittest.TestCase): command_line = "./scripts/maf_extract_ranges_indexed.py ./test_data/maf_tests/mm8_chr7_tiny.maf -c -m 5 -p mm8." input_stdin = base.TestFile(filename="./test_data/maf_tests/dcking_ghp074.bed") output_stdout = base.TestFile(filename="./test_data/maf_tests/dcking_ghp074.maf")
2.078125
2
qstklearn/1knn.py
elxavicio/QSTK
339
1790
<filename>qstklearn/1knn.py<gh_stars>100-1000 ''' (c) 2011, 2012 Georgia Tech Research Corporation This source code is released under the New BSD license. Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License for license details. Created on Feb 20, 2011 @author: <NAME> @organization: Georgia Institute of Technology @contact: <EMAIL> @summary: This is an implementation of the 1-KNN algorithm for ranking features quickly. It uses the knn implementation. @status: oneKNN functions correctly, optimized to use n^2/2 algorithm. ''' import matplotlib.pyplot as plt from pylab import gca import itertools import string import numpy as np import math import knn from time import clock ''' @summary: Query function for 1KNN, return value is a double between 0 and 1. @param naData: A 2D numpy array. Each row is a data point with the final column containing the classification. ''' def oneKnn( naData ): if naData.ndim != 2: raise Exception( "Data should have two dimensions" ) lLen = naData.shape[0] ''' # of dimensions, subtract one for classification ''' lDim = naData.shape[1] - 1 ''' Start best distances as very large ''' ldDistances = [1E300] * lLen llIndexes = [-1] * lLen dDistance = 0.0; ''' Loop through finding closest neighbors ''' for i in range( lLen ): for j in range( i+1, lLen ): dDistance = 0.0 for k in range( 0, lDim ): dDistance += (naData[i][k] - naData[j][k])**2 dDistance = math.sqrt( dDistance ) ''' Two distances to check, for i's best, and j's best ''' if dDistance < ldDistances[i]: ldDistances[i] = dDistance llIndexes[i] = j if dDistance < ldDistances[j]: ldDistances[j] = dDistance llIndexes[j] = i lCount = 0 ''' Now count # of matching pairs ''' for i in range( lLen ): if naData[i][-1] == naData[ llIndexes[i] ][-1]: lCount = lCount + 1 return float(lCount) / lLen ''' Test function to plot results ''' def _plotResults( naDist1, naDist2, lfOneKnn, lf5Knn ): plt.clf() plt.subplot(311) plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1], color='r' ) #plt.ylabel( 'Feature 2' ) #plt.xlabel( 'Feature 1' ) #gca().annotate( '', xy=( .8, 0 ), xytext=( -.3 , 0 ), arrowprops=dict(facecolor='red', shrink=0.05) ) gca().annotate( '', xy=( .7, 0 ), xytext=( 1.5 , 0 ), arrowprops=dict(facecolor='black', shrink=0.05) ) plt.title( 'Data Distribution' ) plt.subplot(312) plt.plot( range( len(lfOneKnn) ), lfOneKnn ) plt.ylabel( '1-KNN Value' ) #plt.xlabel( 'Distribution Merge' ) plt.title( '1-KNN Performance' ) plt.subplot(313) plt.plot( range( len(lf5Knn) ), lf5Knn ) plt.ylabel( '% Correct Classification' ) #plt.xlabel( 'Distribution Merge' ) plt.title( '5-KNN Performance' ) plt.subplots_adjust() plt.show() ''' Function to plot 2 distributions ''' def _plotDist( naDist1, naDist2, i ): plt.clf() plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1], color='r' ) plt.ylabel( 'Feature 2' ) plt.xlabel( 'Feature 1' ) plt.title( 'Iteration ' + str(i) ) plt.show() ''' Function to test KNN performance ''' def _knnResult( naData ): ''' Split up data into training/testing ''' lSplit = naData.shape[0] * .7 naTrain = naData[:lSplit, :] naTest = naData[lSplit:, :] knn.addEvidence( naTrain.astype(float), 1 ); ''' Query with last column omitted and 5 nearest neighbors ''' naResults = knn.query( naTest[:,:-1], 5, 'mode') ''' Count returns which are correct ''' lCount = 0 for i, dVal in enumerate(naResults): if dVal == naTest[i,-1]: lCount = lCount + 1 dResult = float(lCount) / naResults.size return dResult ''' Tests performance of 1-KNN ''' def _test1(): ''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance ''' for i in range(3): ''' Select one of three distributions ''' if i == 0: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] ) naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) elif i == 1: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] ) naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) else: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] ) naTest2 = np.hstack( (naTest2, np.ones(250).reshape(-1,1) ) ) naOrig = np.vstack( (naTest1, naTest2) ) naBoth = np.vstack( (naTest1, naTest2) ) ''' Keep track of runtimes ''' t = clock() cOneRuntime = t-t; cKnnRuntime = t-t; lfResults = [] lfKnnResults = [] for i in range( 15 ): #_plotDist( naTest1, naBoth[100:,:], i ) t = clock() lfResults.append( oneKnn( naBoth ) ) cOneRuntime = cOneRuntime + (clock() - t) t = clock() lfKnnResults.append( _knnResult( np.random.permutation(naBoth) ) ) cKnnRuntime = cKnnRuntime + (clock() - t) naBoth[500:,0] = naBoth[500:,0] - .1 print 'Runtime OneKnn:', cOneRuntime print 'Runtime 5-KNN:', cKnnRuntime _plotResults( naTest1, naTest2, lfResults, lfKnnResults ) ''' Tests performance of 1-KNN ''' def _test2(): ''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance ''' np.random.seed( 12345 ) ''' Create 5 distributions for each of the 5 attributes ''' dist1 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist2 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist3 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist4 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist5 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) lDists = [ dist1, dist2, dist3, dist4, dist5 ] ''' All features used except for distribution 4 ''' distY = np.sin( dist1 ) + np.sin( dist2 ) + np.sin( dist3 ) + np.sin( dist5 ) distY = distY.reshape( -1, 1 ) for i, fVal in enumerate( distY ): if fVal >= 0: distY[i] = 1 else: distY[i] = 0 for i in range( 1, 6 ): lsNames = [] lf1Vals = [] lfVals = [] for perm in itertools.combinations( '12345', i ): ''' set test distribution to first element ''' naTest = lDists[ int(perm[0]) - 1 ] sPerm = perm[0] ''' stack other distributions on ''' for j in range( 1, len(perm) ): sPerm = sPerm + str(perm[j]) naTest = np.hstack( (naTest, lDists[ int(perm[j]) - 1 ] ) ) ''' finally stack y values ''' naTest = np.hstack( (naTest, distY) ) lf1Vals.append( oneKnn( naTest ) ) lfVals.append( _knnResult( np.random.permutation(naTest) ) ) lsNames.append( sPerm ) ''' Plot results ''' plt1 = plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2, color='r' ) plt2 = plt.bar( np.arange(len(lfVals)) + 0.2, lfVals, .2, color='b' ) plt.legend( (plt1[0], plt2[0]), ('1-KNN', 'KNN, K=5') ) plt.ylabel('1-KNN Value/KNN Classification') plt.xlabel('Feature Set') plt.title('Combinations of ' + str(i) + ' Features') plt.ylim( (0,1) ) if len(lf1Vals) < 2: plt.xlim( (-1,1) ) gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2 ) gca().xaxis.set_ticklabels( lsNames ) plt.show() if __name__ == '__main__': _test1() #_test2()
2.625
3
pyscf/nao/m_comp_coulomb_pack.py
robert-anderson/pyscf
2
1791
<filename>pyscf/nao/m_comp_coulomb_pack.py<gh_stars>1-10 # Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function, division from pyscf.nao.m_coulomb_am import coulomb_am import numpy as np try: import numba as nb from pyscf.nao.m_numba_utils import fill_triu_v2, fill_tril use_numba = True except: use_numba = False # # # def comp_coulomb_pack(sv, ao_log=None, funct=coulomb_am, dtype=np.float64, **kvargs): """ Computes the matrix elements given by funct, for instance coulomb interaction Args: sv : (System Variables), this must have arrays of coordinates and species, etc ao_log : description of functions (either orbitals or product basis functions) Returns: matrix elements for the whole system in packed form (lower triangular part) """ from pyscf.nao.m_ao_matelem import ao_matelem_c from pyscf.nao.m_pack2den import ij2pack_l aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp) me = ao_matelem_c(sv.ao_log) if ao_log is None else aome.init_one_set(ao_log) atom2s = np.zeros((sv.natm+1), dtype=np.int64) for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp] norbs = atom2s[-1] res = np.zeros(norbs*(norbs+1)//2, dtype=dtype) for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): #print("atom1 = {0}, rv1 = {1}".format(atom1, rv1)) for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): if atom2>atom1: continue # skip oo2f = funct(me,sp1,rv1,sp2,rv2, **kvargs) if use_numba: fill_triu_v2(oo2f, res, s1, f1, s2, f2, norbs) else: for i1 in range(s1,f1): for i2 in range(s2, min(i1+1, f2)): res[ij2pack_l(i1,i2,norbs)] = oo2f[i1-s1,i2-s2] #print("number call = ", count) #print("sum kernel: {0:.6f}".format(np.sum(abs(res)))) #np.savetxt("kernel_pyscf.txt", res) #import sys #sys.exit() return res, norbs
1.890625
2
nova/tests/unit/test_service_auth.py
panguan737/nova
0
1792
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from keystoneauth1 import service_token import mock import nova.conf from nova import context from nova import service_auth from nova import test CONF = nova.conf.CONF class ServiceAuthTestCase(test.NoDBTestCase): def setUp(self): super(ServiceAuthTestCase, self).setUp() self.ctx = context.RequestContext('fake', 'fake') self.addCleanup(service_auth.reset_globals) @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_no_wraps(self, mock_load): context = mock.MagicMock() context.get_auth_plugin.return_value = "fake" result = service_auth.get_auth_plugin(context) self.assertEqual("fake", result) mock_load.assert_not_called() @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_wraps(self, mock_load): self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx) self.assertIsInstance(result, service_token.ServiceTokenAuthWrapper) @mock.patch.object(ks_loading, 'load_auth_from_conf_options', return_value=None) def test_get_auth_plugin_wraps_bad_config(self, mock_load): """Tests the case that send_service_user_token is True but there is some misconfiguration with the [service_user] section which makes KSA return None for the service user auth. """ self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx) self.assertEqual(1, mock_load.call_count) self.assertNotIsInstance(result, service_token.ServiceTokenAuthWrapper)
1.789063
2
classification/model/build_gen.py
LittleWat/MCD_DA
464
1793
import svhn2mnist import usps import syn2gtrsb import syndig2svhn def Generator(source, target, pixelda=False): if source == 'usps' or target == 'usps': return usps.Feature() elif source == 'svhn': return svhn2mnist.Feature() elif source == 'synth': return syn2gtrsb.Feature() def Classifier(source, target): if source == 'usps' or target == 'usps': return usps.Predictor() if source == 'svhn': return svhn2mnist.Predictor() if source == 'synth': return syn2gtrsb.Predictor()
2.59375
3
deep_table/nn/models/loss/info_nce_loss.py
pfnet-research/deep-table
48
1794
<reponame>pfnet-research/deep-table<filename>deep_table/nn/models/loss/info_nce_loss.py import torch from torch import Tensor from torch.nn.modules.loss import _Loss class InfoNCELoss(_Loss): """Info NCE Loss. A type of contrastive loss function used for self-supervised learning. References: <NAME>, <NAME>, and <NAME>, "Representation Learning with Contrastive Predictive Coding," ArXiv:1807.03748 [cs.LG], 2018. <https://arxiv.org/abs/1807.03748v2> """ def __init__(self, reduction: str = "sum") -> None: """ Args: reduction (str) """ super().__init__(reduction=reduction) self.reduction = reduction def forward(self, z_origin: Tensor, z_noisy: Tensor, t: float = 0.7) -> Tensor: sim = cos_sim_matrix(z_origin, z_noisy) exp_sim = torch.exp(sim / t) loss = -torch.log(torch.diagonal(exp_sim) / exp_sim.sum(1)) if self.reduction == "sum": loss = loss.sum() elif self.reduction == "mean": loss = loss.mean() return loss def cos_sim_matrix(a: Tensor, b: Tensor, eps: float = 1e-8) -> Tensor: a_n, b_n = a.norm(dim=1), b.norm(dim=1) a_norm = a / torch.clamp(a_n.unsqueeze(1), min=eps) b_norm = b / torch.clamp(b_n.unsqueeze(1), min=eps) sim_matrix = torch.mm(a_norm, b_norm.transpose(0, 1)) return sim_matrix
2.625
3
patroni/config.py
korkin25/patroni
0
1795
import json import logging import os import shutil import tempfile import yaml from collections import defaultdict from copy import deepcopy from patroni import PATRONI_ENV_PREFIX from patroni.exceptions import ConfigParseError from patroni.dcs import ClusterConfig from patroni.postgresql.config import CaseInsensitiveDict, ConfigHandler from patroni.utils import deep_compare, parse_bool, parse_int, patch_config logger = logging.getLogger(__name__) _AUTH_ALLOWED_PARAMETERS = ( 'username', 'password', 'sslmode', 'sslcert', 'sslkey', 'sslpassword', 'sslrootcert', 'sslcrl', 'sslcrldir', 'gssencmode', 'channel_binding' ) def default_validator(conf): if not conf: return "Config is empty." class Config(object): """ This class is responsible for: 1) Building and giving access to `effective_configuration` from: * `Config.__DEFAULT_CONFIG` -- some sane default values * `dynamic_configuration` -- configuration stored in DCS * `local_configuration` -- configuration from `config.yml` or environment 2) Saving and loading `dynamic_configuration` into 'patroni.dynamic.json' file located in local_configuration['postgresql']['data_dir'] directory. This is necessary to be able to restore `dynamic_configuration` if DCS was accidentally wiped 3) Loading of configuration file in the old format and converting it into new format 4) Mimicking some of the `dict` interfaces to make it possible to work with it as with the old `config` object. """ PATRONI_CONFIG_VARIABLE = PATRONI_ENV_PREFIX + 'CONFIGURATION' __CACHE_FILENAME = 'patroni.dynamic.json' __DEFAULT_CONFIG = { 'ttl': 30, 'loop_wait': 10, 'retry_timeout': 10, 'maximum_lag_on_failover': 1048576, 'maximum_lag_on_syncnode': -1, 'check_timeline': False, 'master_start_timeout': 300, 'master_stop_timeout': 0, 'synchronous_mode': False, 'synchronous_mode_strict': False, 'synchronous_node_count': 1, 'standby_cluster': { 'create_replica_methods': '', 'host': '', 'port': '', 'primary_slot_name': '', 'restore_command': '', 'archive_cleanup_command': '', 'recovery_min_apply_delay': '' }, 'postgresql': { 'bin_dir': '', 'use_slots': True, 'parameters': CaseInsensitiveDict({p: v[0] for p, v in ConfigHandler.CMDLINE_OPTIONS.items() if p not in ('wal_keep_segments', 'wal_keep_size')}) }, 'watchdog': { 'mode': 'automatic', } } def __init__(self, configfile, validator=default_validator): self._modify_index = -1 self._dynamic_configuration = {} self.__environment_configuration = self._build_environment_configuration() # Patroni reads the configuration from the command-line argument if it exists, otherwise from the environment self._config_file = configfile and os.path.exists(configfile) and configfile if self._config_file: self._local_configuration = self._load_config_file() else: config_env = os.environ.pop(self.PATRONI_CONFIG_VARIABLE, None) self._local_configuration = config_env and yaml.safe_load(config_env) or self.__environment_configuration if validator: error = validator(self._local_configuration) if error: raise ConfigParseError(error) self.__effective_configuration = self._build_effective_configuration({}, self._local_configuration) self._data_dir = self.__effective_configuration.get('postgresql', {}).get('data_dir', "") self._cache_file = os.path.join(self._data_dir, self.__CACHE_FILENAME) self._load_cache() self._cache_needs_saving = False @property def config_file(self): return self._config_file @property def dynamic_configuration(self): return deepcopy(self._dynamic_configuration) def check_mode(self, mode): return bool(parse_bool(self._dynamic_configuration.get(mode))) def _load_config_path(self, path): """ If path is a file, loads the yml file pointed to by path. If path is a directory, loads all yml files in that directory in alphabetical order """ if os.path.isfile(path): files = [path] elif os.path.isdir(path): files = [os.path.join(path, f) for f in sorted(os.listdir(path)) if (f.endswith('.yml') or f.endswith('.yaml')) and os.path.isfile(os.path.join(path, f))] else: logger.error('config path %s is neither directory nor file', path) raise ConfigParseError('invalid config path') overall_config = {} for fname in files: with open(fname) as f: config = yaml.safe_load(f) patch_config(overall_config, config) return overall_config def _load_config_file(self): """Loads config.yaml from filesystem and applies some values which were set via ENV""" config = self._load_config_path(self._config_file) patch_config(config, self.__environment_configuration) return config def _load_cache(self): if os.path.isfile(self._cache_file): try: with open(self._cache_file) as f: self.set_dynamic_configuration(json.load(f)) except Exception: logger.exception('Exception when loading file: %s', self._cache_file) def save_cache(self): if self._cache_needs_saving: tmpfile = fd = None try: (fd, tmpfile) = tempfile.mkstemp(prefix=self.__CACHE_FILENAME, dir=self._data_dir) with os.fdopen(fd, 'w') as f: fd = None json.dump(self.dynamic_configuration, f) tmpfile = shutil.move(tmpfile, self._cache_file) self._cache_needs_saving = False except Exception: logger.exception('Exception when saving file: %s', self._cache_file) if fd: try: os.close(fd) except Exception: logger.error('Can not close temporary file %s', tmpfile) if tmpfile and os.path.exists(tmpfile): try: os.remove(tmpfile) except Exception: logger.error('Can not remove temporary file %s', tmpfile) # configuration could be either ClusterConfig or dict def set_dynamic_configuration(self, configuration): if isinstance(configuration, ClusterConfig): if self._modify_index == configuration.modify_index: return False # If the index didn't changed there is nothing to do self._modify_index = configuration.modify_index configuration = configuration.data if not deep_compare(self._dynamic_configuration, configuration): try: self.__effective_configuration = self._build_effective_configuration(configuration, self._local_configuration) self._dynamic_configuration = configuration self._cache_needs_saving = True return True except Exception: logger.exception('Exception when setting dynamic_configuration') def reload_local_configuration(self): if self.config_file: try: configuration = self._load_config_file() if not deep_compare(self._local_configuration, configuration): new_configuration = self._build_effective_configuration(self._dynamic_configuration, configuration) self._local_configuration = configuration self.__effective_configuration = new_configuration return True else: logger.info('No local configuration items changed.') except Exception: logger.exception('Exception when reloading local configuration from %s', self.config_file) @staticmethod def _process_postgresql_parameters(parameters, is_local=False): return {name: value for name, value in (parameters or {}).items() if name not in ConfigHandler.CMDLINE_OPTIONS or not is_local and ConfigHandler.CMDLINE_OPTIONS[name][1](value)} def _safe_copy_dynamic_configuration(self, dynamic_configuration): config = deepcopy(self.__DEFAULT_CONFIG) for name, value in dynamic_configuration.items(): if name == 'postgresql': for name, value in (value or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value)) elif name not in ('connect_address', 'listen', 'data_dir', 'pgpass', 'authentication'): config['postgresql'][name] = deepcopy(value) elif name == 'standby_cluster': for name, value in (value or {}).items(): if name in self.__DEFAULT_CONFIG['standby_cluster']: config['standby_cluster'][name] = deepcopy(value) elif name in config: # only variables present in __DEFAULT_CONFIG allowed to be overridden from DCS if name in ('synchronous_mode', 'synchronous_mode_strict'): config[name] = value else: config[name] = int(value) return config @staticmethod def _build_environment_configuration(): ret = defaultdict(dict) def _popenv(name): return os.environ.pop(PATRONI_ENV_PREFIX + name.upper(), None) for param in ('name', 'namespace', 'scope'): value = _popenv(param) if value: ret[param] = value def _fix_log_env(name, oldname): value = _popenv(oldname) name = PATRONI_ENV_PREFIX + 'LOG_' + name.upper() if value and name not in os.environ: os.environ[name] = value for name, oldname in (('level', 'loglevel'), ('format', 'logformat'), ('dateformat', 'log_datefmt')): _fix_log_env(name, oldname) def _set_section_values(section, params): for param in params: value = _popenv(section + '_' + param) if value: ret[section][param] = value _set_section_values('restapi', ['listen', 'connect_address', 'certfile', 'keyfile', 'keyfile_password', 'cafile', 'ciphers', 'verify_client', 'http_extra_headers', 'https_extra_headers', 'allowlist', 'allowlist_include_members']) _set_section_values('ctl', ['insecure', 'cacert', 'certfile', 'keyfile', 'keyfile_password']) _set_section_values('postgresql', ['listen', 'connect_address', 'config_dir', 'data_dir', 'pgpass', 'bin_dir']) _set_section_values('log', ['level', 'traceback_level', 'format', 'dateformat', 'max_queue_size', 'dir', 'file_size', 'file_num', 'loggers']) _set_section_values('raft', ['data_dir', 'self_addr', 'partner_addrs', 'password', 'bind_addr']) for first, second in (('restapi', 'allowlist_include_members'), ('ctl', 'insecure')): value = ret.get(first, {}).pop(second, None) if value: value = parse_bool(value) if value is not None: ret[first][second] = value for second in ('max_queue_size', 'file_size', 'file_num'): value = ret.get('log', {}).pop(second, None) if value: value = parse_int(value) if value is not None: ret['log'][second] = value def _parse_list(value): if not (value.strip().startswith('-') or '[' in value): value = '[{0}]'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing list %s', value) return None for first, second in (('raft', 'partner_addrs'), ('restapi', 'allowlist')): value = ret.get(first, {}).pop(second, None) if value: value = _parse_list(value) if value: ret[first][second] = value def _parse_dict(value): if not value.strip().startswith('{'): value = '{{{0}}}'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing dict %s', value) return None for first, params in (('restapi', ('http_extra_headers', 'https_extra_headers')), ('log', ('loggers',))): for second in params: value = ret.get(first, {}).pop(second, None) if value: value = _parse_dict(value) if value: ret[first][second] = value def _get_auth(name, params=None): ret = {} for param in params or _AUTH_ALLOWED_PARAMETERS[:2]: value = _popenv(name + '_' + param) if value: ret[param] = value return ret restapi_auth = _get_auth('restapi') if restapi_auth: ret['restapi']['authentication'] = restapi_auth authentication = {} for user_type in ('replication', 'superuser', 'rewind'): entry = _get_auth(user_type, _AUTH_ALLOWED_PARAMETERS) if entry: authentication[user_type] = entry if authentication: ret['postgresql']['authentication'] = authentication for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): # PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..) name, suffix = (param[8:].split('_', 1) + [''])[:2] if suffix in ('HOST', 'HOSTS', 'PORT', 'USE_PROXIES', 'PROTOCOL', 'SRV', 'SRV_SUFFIX', 'URL', 'PROXY', 'CACERT', 'CERT', 'KEY', 'VERIFY', 'TOKEN', 'CHECKS', 'DC', 'CONSISTENCY', 'REGISTER_SERVICE', 'SERVICE_CHECK_INTERVAL', 'NAMESPACE', 'CONTEXT', 'USE_ENDPOINTS', 'SCOPE_LABEL', 'ROLE_LABEL', 'POD_IP', 'PORTS', 'LABELS', 'BYPASS_API_SERVICE', 'KEY_PASSWORD', 'USE_SSL', 'SET_ACLS') and name: value = os.environ.pop(param) if suffix == 'PORT': value = value and parse_int(value) elif suffix in ('HOSTS', 'PORTS', 'CHECKS'): value = value and _parse_list(value) elif suffix in ('LABELS', 'SET_ACLS'): value = _parse_dict(value) elif suffix in ('USE_PROXIES', 'REGISTER_SERVICE', 'USE_ENDPOINTS', 'BYPASS_API_SERVICE', 'VERIFY'): value = parse_bool(value) if value: ret[name.lower()][suffix.lower()] = value for dcs in ('etcd', 'etcd3'): if dcs in ret: ret[dcs].update(_get_auth(dcs)) users = {} for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): name, suffix = (param[8:].rsplit('_', 1) + [''])[:2] # PATRONI_<username>_PASSWORD=<password>, PATRONI_<username>_OPTIONS=<option1,option2,...> # CREATE USER "<username>" WITH <OPTIONS> PASSWORD '<password>' if name and suffix == 'PASSWORD': password = <PASSWORD>(param) if password: users[name] = {'password': password} options = os.environ.pop(param[:-9] + '_OPTIONS', None) options = options and _parse_list(options) if options: users[name]['options'] = options if users: ret['bootstrap']['users'] = users return ret def _build_effective_configuration(self, dynamic_configuration, local_configuration): config = self._safe_copy_dynamic_configuration(dynamic_configuration) for name, value in local_configuration.items(): if name == 'postgresql': for name, value in (value or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value, True)) elif name != 'use_slots': # replication slots must be enabled/disabled globally config['postgresql'][name] = deepcopy(value) elif name not in config or name in ['watchdog']: config[name] = deepcopy(value) if value else {} # restapi server expects to get restapi.auth = 'username:password' if 'restapi' in config and 'authentication' in config['restapi']: config['restapi']['auth'] = '{username}:{password}'.format(**config['restapi']['authentication']) # special treatment for old config # 'exhibitor' inside 'zookeeper': if 'zookeeper' in config and 'exhibitor' in config['zookeeper']: config['exhibitor'] = config['zookeeper'].pop('exhibitor') config.pop('zookeeper') pg_config = config['postgresql'] # no 'authentication' in 'postgresql', but 'replication' and 'superuser' if 'authentication' not in pg_config: pg_config['use_pg_rewind'] = 'pg_rewind' in pg_config pg_config['authentication'] = {u: pg_config[u] for u in ('replication', 'superuser') if u in pg_config} # no 'superuser' in 'postgresql'.'authentication' if 'superuser' not in pg_config['authentication'] and 'pg_rewind' in pg_config: pg_config['authentication']['superuser'] = pg_config['pg_rewind'] # handle setting additional connection parameters that may be available # in the configuration file, such as SSL connection parameters for name, value in pg_config['authentication'].items(): pg_config['authentication'][name] = {n: v for n, v in value.items() if n in _AUTH_ALLOWED_PARAMETERS} # no 'name' in config if 'name' not in config and 'name' in pg_config: config['name'] = pg_config['name'] updated_fields = ( 'name', 'scope', 'retry_timeout', 'synchronous_mode', 'synchronous_mode_strict', 'synchronous_node_count', 'maximum_lag_on_syncnode' ) pg_config.update({p: config[p] for p in updated_fields if p in config}) return config def get(self, key, default=None): return self.__effective_configuration.get(key, default) def __contains__(self, key): return key in self.__effective_configuration def __getitem__(self, key): return self.__effective_configuration[key] def copy(self): return deepcopy(self.__effective_configuration)
2.25
2
src/Products/CMFCore/tests/test_DirectoryView.py
fdiary/Products.CMFCore
3
1796
############################################################################## # # Copyright (c) 2002 Zope Foundation and Contributors. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """ Unit tests for DirectoryView module. """ import sys import unittest import warnings from os import mkdir from os import remove from os.path import join from tempfile import mktemp from App.config import getConfiguration from . import _globals from .base.dummy import DummyFolder from .base.testcase import FSDVTest from .base.testcase import WritableFSDVTest class DirectoryViewPathTests(unittest.TestCase): """ These test that, no matter what is stored in their dirpath, FSDV's will do their best to find an appropriate skin and only do nothing in the case where an appropriate skin can't be found. """ def setUp(self): from Products.CMFCore.DirectoryView import addDirectoryViews from Products.CMFCore.DirectoryView import registerDirectory registerDirectory('fake_skins', _globals) self.ob = DummyFolder() addDirectoryViews(self.ob, 'fake_skins', _globals) def test__generateKey(self): from Products.CMFCore.DirectoryView import _generateKey key = _generateKey('Products.CMFCore', 'tests') self.assertEqual(key.split(':')[0], 'Products.CMFCore') subkey = _generateKey('Products.CMFCore', 'tests\foo') self.assertTrue(subkey.startswith(key)) def test__findProductForPath(self): from Products.CMFCore.DirectoryView import _findProductForPath cmfpath = sys.modules['Products.CMFCore'].__path__[0] self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', '')) cmfpath = join(cmfpath, 'tests') self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', 'tests')) def test_getDirectoryInfo(self): skin = self.ob.fake_skin skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin') self.assertTrue(hasattr(self.ob.fake_skin, 'test1'), self.ob.fake_skin.getDirPath()) # Test we do nothing if given a really wacky path def test_UnhandleableExpandPath(self): file = mktemp() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.ob.fake_skin.manage_properties(file) self.assertEqual(self.ob.fake_skin.objectIds(), []) # Check that a warning was raised. self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, UserWarning)) text = ('DirectoryView fake_skin refers to a non-existing path %r' % file) self.assertTrue(text in str(w[-1].message)) # this test tests that registerDirectory creates keys in the right format. def test_registerDirectoryKeys(self): from Products.CMFCore.DirectoryView import _dirreg dirs = _dirreg._directories self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin' in dirs, dirs.keys()) self.assertEqual(self.ob.fake_skin.getDirPath(), 'Products.CMFCore.tests:fake_skins/fake_skin') class DirectoryViewTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def test_addDirectoryViews(self): # Test addDirectoryViews # also test registration of directory views doesn't barf pass def test_DirectoryViewExists(self): # Check DirectoryView added by addDirectoryViews # appears as a DirectoryViewSurrogate due # to Acquisition hackery. from Products.CMFCore.DirectoryView import DirectoryViewSurrogate self.assertTrue(isinstance(self.ob.fake_skin, DirectoryViewSurrogate)) def test_DirectoryViewMethod(self): # Check if DirectoryView method works self.assertEqual(self.ob.fake_skin.test1(), 'test1') def test_properties(self): # Make sure the directory view is reading properties self.assertEqual(self.ob.fake_skin.testPT.title, 'Zope Pope') def test_ignored(self): # Test that "artifact" files and dirs are ignored for name in '#test1', 'CVS', '.test1', 'test1~': self.assertTrue(name not in self.ob.fake_skin.objectIds(), '%s not ignored' % name) def test_surrogate_writethrough(self): # CMF Collector 316: It is possible to cause ZODB writes because # setting attributes on the non-persistent surrogate writes them # into the persistent DirectoryView as well. This is bad in situations # where you only want to store markers and remove them before the # transaction has ended - they never got removed because there was # no equivalent __delattr__ on the surrogate that would clean up # the persistent DirectoryView as well. fs = self.ob.fake_skin test_foo = 'My Foovalue' fs.foo = test_foo self.assertEqual(fs.foo, test_foo) self.assertEqual(fs.__dict__['_real'].foo, test_foo) del fs.foo self.assertRaises(AttributeError, getattr, fs, 'foo') self.assertRaises(AttributeError, getattr, fs.__dict__['_real'], 'foo') class DirectoryViewIgnoreTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self.manual_ign = ('CVS', 'SVN', 'test_manual_ignore.py') self._registerDirectory(self, ignore=self.manual_ign) def test_ignored(self): # Test that "artifact" files and dirs are ignored, # even when a custom ignore list is used; and that the # custom ignore list is also honored auto_ign = ('#test1', '.test1', 'test1~') must_ignore = self.manual_ign + auto_ign + ('test_manual_ignore',) visible = self.ob.fake_skin.objectIds() for name in must_ignore: self.assertFalse(name in visible) class DirectoryViewFolderTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def tearDown(self): from Products.CMFCore import DirectoryView # This is nasty, but there is no way to unregister anything # right now... metatype_registry = DirectoryView._dirreg._meta_types if 'FOLDER' in metatype_registry: del metatype_registry['FOLDER'] FSDVTest.tearDown(self) def test_DirectoryViewMetadata(self): # Test to determine if metadata shows up correctly on a # FSDV that has a corresponding .metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.title, 'test_directory Title') def test_DirectoryViewMetadataOnPropertyManager(self): # Test to determine if metadata shows up correctly on a # FSDV that has a corresponding .metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.getProperty('title'), 'test_directory Title') def test_DirectoryViewFolderDefault(self): # Test that a folder inside the fake skin really is of type # DirectoryViewSurrogate from Products.CMFCore.DirectoryView import DirectoryViewSurrogate testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DirectoryViewSurrogate)) def test_DirectoryViewFolderCustom(self): # Now we register a different class under the fake meta_type # "FOLDER" and test again... from Products.CMFCore.DirectoryView import DirectoryView from Products.CMFCore.DirectoryView import registerMetaType class DummyDirectoryViewSurrogate: pass class DummyDirectoryView(DirectoryView): def __of__(self, parent): return DummyDirectoryViewSurrogate() registerMetaType('FOLDER', DummyDirectoryView) # In order to regenerate the FSDV data we need to remove and # register again, that way the newly registered meta_type is used self.ob._delObject('fake_skin') self._registerDirectory(self) testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DummyDirectoryViewSurrogate)) class DebugModeTests(WritableFSDVTest): def setUp(self): from Products.CMFCore.DirectoryView import _dirreg WritableFSDVTest.setUp(self) self.saved_cfg_debug_mode = getConfiguration().debug_mode getConfiguration().debug_mode = True # initialise skins self._registerDirectory(self) # add a method to the fake skin folder self._writeFile('test2.py', "return 'test2'") # edit the test1 method self._writeFile('test1.py', "return 'new test1'") # add a new folder mkdir(join(self.skin_path_name, 'test3')) info = _dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath) info.reload() self.use_dir_mtime = info.use_dir_mtime def tearDown(self): getConfiguration().debug_mode = self.saved_cfg_debug_mode WritableFSDVTest.tearDown(self) def test_AddNewMethod(self): # See if a method added to the skin folder can be found self.assertEqual(self.ob.fake_skin.test2(), 'test2') def test_EditMethod(self): # See if an edited method exhibits its new behaviour self.assertEqual(self.ob.fake_skin.test1(), 'new test1') def test_DeleteMethod(self): # Make sure a deleted method goes away remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) def test_DeleteAddEditMethod(self): # Check that if we delete a method, then add it back, # then edit it, the DirectoryView notices. # This exercises yet another Win32 mtime weirdity. remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) # add method back to the fake skin folder self._writeFile('test2.py', "return 'test2.2'", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.2') # edit method self._writeFile('test2.py', "return 'test2.3'", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.3') def test_NewFolder(self): # See if a new folder shows up self.assertFalse(hasattr(self.ob.fake_skin, 'test3')) def test_DeleteFolder(self): # Make sure a deleted folder goes away self.assertTrue(hasattr(self.ob.fake_skin, 'test_directory')) # It has a file, which we need to delete first. self.assertTrue(hasattr(self.ob.fake_skin.test_directory, 'README.txt')) self._deleteFile(join('test_directory', 'README.txt'), self.use_dir_mtime) self._deleteDirectory('test_directory', self.use_dir_mtime) self.assertFalse(hasattr(self.ob.fake_skin, 'test_directory')) def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DirectoryViewPathTests)) suite.addTest(unittest.makeSuite(DirectoryViewTests)) suite.addTest(unittest.makeSuite(DirectoryViewIgnoreTests)) suite.addTest(unittest.makeSuite(DirectoryViewFolderTests)) suite.addTest(unittest.makeSuite(DebugModeTests)) return suite
1.914063
2
pycycle/elements/flight_conditions.py
eshendricks/pyCycle
0
1797
import openmdao.api as om from pycycle.thermo.cea import species_data from pycycle.constants import AIR_ELEMENTS from pycycle.elements.ambient import Ambient from pycycle.elements.flow_start import FlowStart class FlightConditions(om.Group): """Determines total and static flow properties given an altitude and Mach number using the input atmosphere model""" def initialize(self): self.options.declare('thermo_method', default='CEA', values=('CEA',), desc='Method for computing thermodynamic properties') self.options.declare('thermo_data', default=species_data.janaf, desc='thermodynamic data set', recordable=False) self.options.declare('elements', default=AIR_ELEMENTS, desc='set of elements present in the flow') self.options.declare('use_WAR', default=False, values=[True, False], desc='If True, includes WAR calculation') def setup(self): thermo_method = self.options['thermo_method'] thermo_data = self.options['thermo_data'] elements = self.options['elements'] use_WAR = self.options['use_WAR'] self.add_subsystem('ambient', Ambient(), promotes=('alt', 'dTs')) # inputs conv = self.add_subsystem('conv', om.Group(), promotes=['*']) if use_WAR == True: proms = ['Fl_O:*', 'MN', 'W', 'WAR'] else: proms = ['Fl_O:*', 'MN', 'W'] conv.add_subsystem('fs', FlowStart(thermo_method=thermo_method, thermo_data=thermo_data, elements=elements, use_WAR=use_WAR), promotes=proms) balance = conv.add_subsystem('balance', om.BalanceComp()) balance.add_balance('Tt', val=500.0, lower=1e-4, units='degR', desc='Total temperature', eq_units='degR') balance.add_balance('Pt', val=14.696, lower=1e-4, units='psi', desc='Total pressure', eq_units='psi') # sub.set_order(['fs','balance']) newton = conv.nonlinear_solver = om.NewtonSolver() newton.options['atol'] = 1e-10 newton.options['rtol'] = 1e-10 newton.options['maxiter'] = 10 newton.options['iprint'] = -1 newton.options['solve_subsystems'] = True newton.options['reraise_child_analysiserror'] = False newton.linesearch = om.BoundsEnforceLS() newton.linesearch.options['bound_enforcement'] = 'scalar' newton.linesearch.options['iprint'] = -1 # newton.linesearch.options['solve_subsystems'] = True conv.linear_solver = om.DirectSolver(assemble_jac=True) self.connect('ambient.Ps', 'balance.rhs:Pt') self.connect('ambient.Ts', 'balance.rhs:Tt') self.connect('balance.Pt', 'fs.P') self.connect('balance.Tt', 'fs.T') self.connect('Fl_O:stat:P', 'balance.lhs:Pt') self.connect('Fl_O:stat:T', 'balance.lhs:Tt') # self.set_order(['ambient', 'subgroup']) if __name__ == "__main__": p1 = om.Problem() p1.model = om.Group() des_vars = p1.model.add_subsystem('des_vars', om.IndepVarComp()) des_vars.add_output('W', 0.0, units='lbm/s') des_vars.add_output('alt', 1., units='ft') des_vars.add_output('MN', 0.5) des_vars.add_output('dTs', 0.0, units='degR') fc = p1.model.add_subsystem("fc", FlightConditions()) p1.model.connect('des_vars.W', 'fc.W') p1.model.connect('des_vars.alt', 'fc.alt') p1.model.connect('des_vars.MN', 'fc.MN') p1.model.connect('des_vars.dTs', 'fc.dTs') p1.setup() # p1.root.list_connections() p1['des_vars.alt'] = 17868.79060515557 p1['des_vars.MN'] = 2.101070288213628 p1['des_vars.dTs'] = 0.0 p1['des_vars.W'] = 1.0 p1.run_model() print('Ts_atm: ', p1['fc.ambient.Ts']) print('Ts_set: ', p1['fc.Fl_O:stat:T']) print('Ps_atm: ', p1['fc.ambient.Ps']) print('Ps_set: ', p1['fc.Fl_O:stat:P']) print('rhos_atm: ', p1['fc.ambient.rhos']*32.175) print('rhos_set: ', p1['fc.Fl_O:stat:rho']) print('W', p1['fc.Fl_O:stat:W']) print('Pt: ', p1['fc.Fl_O:tot:P'])
2.328125
2
server/cauth/views.py
mashaka/TravelHelper
0
1798
<reponame>mashaka/TravelHelper from django.shortcuts import render from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm from django.contrib.auth import update_session_auth_hash, login, authenticate from django.contrib import messages from django.shortcuts import render, redirect from social_django.models import UserSocialAuth from django.http import HttpResponse from django.shortcuts import get_object_or_404, redirect from rest_framework.authtoken.models import Token from app.methods import prepare_user def get_token(request): if request.user: user = request.user prepare_user(user) token,_ = Token.objects.get_or_create(user=user) url = "travel://?token=" + token.key + '&id=' + str(user.id) else: url = "travel://error" response = HttpResponse(url, status=302) response['Location'] = url return response @login_required def get_facebook_token(request): q = get_object_or_404(UserSocialAuth, user=request.user, provider='facebook') return HttpResponse(str(q.extra_data)) def signup(request): return render(request, 'signup.html') @login_required def home(request): return render(request, 'home.html') @login_required def settings(request): user = request.user try: github_login = user.social_auth.get(provider='github') except UserSocialAuth.DoesNotExist: github_login = None try: twitter_login = user.social_auth.get(provider='twitter') except UserSocialAuth.DoesNotExist: twitter_login = None try: facebook_login = user.social_auth.get(provider='facebook') except UserSocialAuth.DoesNotExist: facebook_login = None can_disconnect = (user.social_auth.count() > 1 or user.has_usable_password()) return render(request, 'settings.html', { 'facebook_login': facebook_login, 'can_disconnect': can_disconnect }) @login_required def password(request): if request.user.has_usable_password(): PasswordForm = PasswordChangeForm else: PasswordForm = AdminPasswordChangeForm if request.method == 'POST': form = PasswordForm(request.user, request.POST) if form.is_valid(): form.save() update_session_auth_hash(request, form.user) messages.success(request, 'Your password was successfully updated!') return redirect('password') else: messages.error(request, 'Please correct the error below.') else: form = PasswordForm(request.user) return render(request, 'password.html', {'form': form})
2.046875
2
samples/modules/tensorflow/magic_wand/train/data_split_person.py
lviala-zaack/zephyr
6,224
1799
# Lint as: python3 # coding=utf-8 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Split data into train, validation and test dataset according to person. That is, use some people's data as train, some other people's data as validation, and the rest ones' data as test. These data would be saved separately under "/person_split". It will generate new files with the following structure: ├──person_split │   ├── test │   ├── train │   └──valid """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import random from data_split import read_data from data_split import write_data def person_split(whole_data, train_names, valid_names, test_names): """Split data by person.""" random.seed(30) random.shuffle(whole_data) train_data = [] valid_data = [] test_data = [] for idx, data in enumerate(whole_data): # pylint: disable=unused-variable if data["name"] in train_names: train_data.append(data) elif data["name"] in valid_names: valid_data.append(data) elif data["name"] in test_names: test_data.append(data) print("train_length:" + str(len(train_data))) print("valid_length:" + str(len(valid_data))) print("test_length:" + str(len(test_data))) return train_data, valid_data, test_data if __name__ == "__main__": data = read_data("./data/complete_data") train_names = [ "hyw", "shiyun", "tangsy", "dengyl", "jiangyh", "xunkai", "negative3", "negative4", "negative5", "negative6" ] valid_names = ["lsj", "pengxl", "negative2", "negative7"] test_names = ["liucx", "zhangxy", "negative1", "negative8"] train_data, valid_data, test_data = person_split(data, train_names, valid_names, test_names) if not os.path.exists("./person_split"): os.makedirs("./person_split") write_data(train_data, "./person_split/train") write_data(valid_data, "./person_split/valid") write_data(test_data, "./person_split/test")
2.875
3