id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
101380
|
import math
import numpy as np
def quaternion_to_rotation_matrix(q):
# Original C++ Method defined in pba/src/pba/DataInterface.h
qq = math.sqrt(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3])
qw = qx = qy = qz = 0
if qq > 0: # NORMALIZE THE QUATERNION
qw = q[0] / qq
qx = q[1] / qq
qy = q[2] / qq
qz = q[3] / qq
else:
qw = 1
qx = qy = qz = 0
m = np.zeros((3, 3), dtype=float)
m[0][0] = float(qw * qw + qx * qx - qz * qz - qy * qy)
m[0][1] = float(2 * qx * qy - 2 * qz * qw)
m[0][2] = float(2 * qy * qw + 2 * qz * qx)
m[1][0] = float(2 * qx * qy + 2 * qw * qz)
m[1][1] = float(qy * qy + qw * qw - qz * qz - qx * qx)
m[1][2] = float(2 * qz * qy - 2 * qx * qw)
m[2][0] = float(2 * qx * qz - 2 * qy * qw)
m[2][1] = float(2 * qy * qz + 2 * qw * qx)
m[2][2] = float(qz * qz + qw * qw - qy * qy - qx * qx)
return m
def rotation_matrix_to_quaternion(m):
# Original C++ Method defined in pba/src/pba/DataInterface.h
q = np.array([0, 0, 0, 0], dtype=float)
q[0] = 1 + m[0][0] + m[1][1] + m[2][2]
if q[0] > 0.000000001:
q[0] = math.sqrt(q[0]) / 2.0
q[1] = (m[2][1] - m[1][2]) / (4.0 * q[0])
q[2] = (m[0][2] - m[2][0]) / (4.0 * q[0])
q[3] = (m[1][0] - m[0][1]) / (4.0 * q[0])
else:
if m[0][0] > m[1][1] and m[0][0] > m[2][2]:
s = 2.0 * math.sqrt(1.0 + m[0][0] - m[1][1] - m[2][2])
q[1] = 0.25 * s
q[2] = (m[0][1] + m[1][0]) / s
q[3] = (m[0][2] + m[2][0]) / s
q[0] = (m[1][2] - m[2][1]) / s
elif m[1][1] > m[2][2]:
s = 2.0 * math.sqrt(1.0 + m[1][1] - m[0][0] - m[2][2])
q[1] = (m[0][1] + m[1][0]) / s
q[2] = 0.25 * s
q[3] = (m[1][2] + m[2][1]) / s
q[0] = (m[0][2] - m[2][0]) / s
else:
s = 2.0 * math.sqrt(1.0 + m[2][2] - m[0][0] - m[1][1])
q[1] = (m[0][2] + m[2][0]) / s
q[2] = (m[1][2] + m[2][1]) / s
q[3] = 0.25 * s
q[0] = (m[0][1] - m[1][0]) / s
return q
class Extrinsics:
def __init__(self):
# center is the coordinate of the camera center with respect to the
# world coordinate frame (t = -R C)
self._center = np.array([0, 0, 0], dtype=float)
# the translation vector is the vector used to transform points in
# world coordinates to camera coordinates (C = -R^T t)
self._translation_vec = np.array([0, 0, 0], dtype=float)
# use for these attributes the getter and setter methods
self._quaternion = np.array([0, 0, 0, 0], dtype=float)
# for rotations the inverse is equal to the transpose
# self._rotation_inv_mat = np.linalg.transpose(self._rotation_mat)
self._rotation_mat = np.zeros((3, 3), dtype=float)
@staticmethod
def invert_transformation_mat(trans_mat):
# Exploit that the inverse of the rotation part is equal to the
# transposed of the rotation part. This should be more robust than
# trans_mat_inv = np.linalg.inv(trans_mat)
trans_mat_inv = np.zeros_like(trans_mat)
rotation_part_inv = trans_mat[0:3, 0:3].T
trans_mat_inv[0:3, 0:3] = rotation_part_inv
trans_mat_inv[0:3, 3] = -np.dot(rotation_part_inv, trans_mat[0:3, 3])
trans_mat_inv[3, 3] = 1
return trans_mat_inv
def is_rotation_mat_valid(self, some_mat):
# TEST if rotation_mat is really a rotation matrix
# (i.e. det = -1 or det = 1)
det = np.linalg.det(some_mat)
is_close = np.isclose(det, 1) or np.isclose(det, -1)
# if not is_close:
# logger.vinfo('some_mat', some_mat)
# logger.vinfo('determinante', det)
return is_close
def set_quaternion(self, quaternion):
self._quaternion = quaternion
# we must change the rotation matrixes as well
self._rotation_mat = quaternion_to_rotation_matrix(quaternion)
def set_rotation_mat(self, rotation_mat):
assert self.is_rotation_mat_valid(rotation_mat)
self._rotation_mat = rotation_mat
# we must change the quaternion as well
self._quaternion = rotation_matrix_to_quaternion(rotation_mat)
def set_camera_center_after_rotation(self, center):
assert self.is_rotation_mat_valid(self._rotation_mat)
self._center = center
self._translation_vec = -np.dot(self._rotation_mat, center)
def set_camera_translation_vector_after_rotation(self, translation_vector):
# translation_vector: trans_vec = -Rc
assert self.is_rotation_mat_valid(self._rotation_mat)
self._translation_vec = translation_vector
self._center = -np.dot(
self._rotation_mat.transpose(), translation_vector
)
def get_quaternion(self):
return self._quaternion
def get_rotation_mat(self):
# Note:
# self._rotation_mat.T or self._rotation_mat.transpose()
# DO NOT CHANGE THE MATRIX
return self._rotation_mat
def get_translation_vec(self):
return self._translation_vec
def get_camera_center(self):
return self._center
def get_4x4_world_to_cam_mat(self):
# This matrix can be used to convert points given in world coordinates
# into points given in camera coordinates
# M = [R -Rc]
# [0 1],
# https://en.wikipedia.org/wiki/Transformation_matrix#/media/File:2D_affine_transformation_matrix.svg
homogeneous_mat = np.identity(4, dtype=float)
homogeneous_mat[0:3, 0:3] = self.get_rotation_mat()
homogeneous_mat[0:3, 3] = -self.get_rotation_mat().dot(
self.get_camera_center()
)
return homogeneous_mat
def set_4x4_cam_to_world_mat(self, cam_to_world_mat):
# This matrix can be used to convert points given in camera coordinates
# into points given in world coordinates
# M = [R^T c]
# [0 1]
#
# https://en.wikipedia.org/wiki/Transformation_matrix#/media/File:2D_affine_transformation_matrix.svg
rotation_part = cam_to_world_mat[0:3, 0:3]
translation_part = cam_to_world_mat[0:3, 3]
self.set_rotation_mat(rotation_part.transpose())
self.set_camera_center_after_rotation(translation_part)
def get_4x4_cam_to_world_mat(self):
# This matrix can be used to convert points given in camera coordinates
# into points given in world coordinates
# M = [R^T c]
# [0 1]
# :return:
#
# https://en.wikipedia.org/wiki/Transformation_matrix#/media/File:2D_affine_transformation_matrix.svg
homogeneous_mat = np.identity(4, dtype=float)
homogeneous_mat[0:3, 0:3] = self.get_rotation_mat().transpose()
homogeneous_mat[0:3, 3] = self.get_camera_center()
return homogeneous_mat
def cam_to_world_coord_multiple_coords(self, cam_coords):
num_coords = cam_coords.shape[0]
hom_entries = np.ones(num_coords).reshape((num_coords, 1))
cam_coords_hom = np.hstack((cam_coords, hom_entries))
world_coords_hom = (
self.get_4x4_cam_to_world_mat().dot(cam_coords_hom.T).T
)
world_coords = np.delete(world_coords_hom, 3, 1)
return world_coords
|
StarcoderdataPython
|
3235175
|
<reponame>JacopoPan/leetcode-top100-liked-questions
"""
Runtime: 341 ms, faster than 63.77% of Python3 online submissions for Shuffle an Array.
Memory Usage: 19.6 MB, less than 94.64% of Python3 online submissions for Shuffle an Array.
"""
from typing import List
from typing import Optional
from random import randint
class Solution:
def __init__(self, nums: List[int]):
self.nums = nums
self.bak = self.nums
self.nums = self.shuffle()
def reset(self) -> List[int]:
return self.bak
def shuffle(self) -> List[int]:
ans = []
indices = list(range(len(self.nums)))
while indices:
idx = randint(0,len(indices)-1)
ans.append(self.bak[indices[idx]])
indices.remove(indices[idx])
return ans
def main():
obj = Solution([1,2,3])
ans_1 = obj.shuffle()
ans_2 = obj.reset()
ans_3 = obj.shuffle()
print('Output:', [ans_1, ans_2, ans_3])
print('Expected:', [[3,2,1],[1,2,3],[2,1,3]])
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3377168
|
from collections import defaultdict
from dockerscan import SharedConfig, String
class DockerImageInfoModel(SharedConfig):
image_path = String()
class DockerImageAnalyzeModel(SharedConfig):
image_path = String()
class DockerImageExtractModel(SharedConfig):
image_path = String()
extract_path = String()
class DockerImageInfo:
def __init__(self):
self.author = ""
self.host_name = ""
self.entry_point = ""
self.working_dir = ""
self.created_date = ""
self.docker_version = ""
self.cmd = ""
self.labels = []
self.environment = []
self.user = ""
#: dict - { PORT_NO: ["TCP", "UDP"]}
#: dict - { PORT_NO: ["TCP"]}
self.exposed_ports = defaultdict(set)
def add_layer_info(self, layer_info: dict):
# Get container config
# container_config = layer_info.get("container_config", None)
container_config = layer_info.get("config", None)
if container_config:
basic_info = {
"Hostname": "host_name",
"WorkingDir": "working_dir",
"Entrypoint": "entry_point",
"User": "user"
}
list_info = {
"Env": "environment",
"Labels": "labels"
}
for json_prop, class_prop in basic_info.items():
json_value = container_config.get(json_prop)
if json_value:
setattr(self, class_prop, json_value)
for json_prop, class_prop in list_info.items():
json_value = container_config.get(json_prop)
if json_value:
class_value = getattr(self, class_prop)
class_value.extend(json_value)
if container_config.get("Cmd", None):
# Get only the Cmd Command of the last layer
if "container" in layer_info:
self.cmd = " ".join(container_config.get("Cmd"))
# Add exposed ports
if container_config.get("ExposedPorts"):
for port in container_config.get("ExposedPorts").keys():
port, proto = port.split("/")
self.exposed_ports[port].add(proto)
# Only storage the date for the last layer. And only the last layer
# contains "container" property
if layer_info.get("container", None):
self.created_date = layer_info.get("created")
if layer_info.get("author"):
self.author = layer_info.get("author")
if layer_info.get("docker_version"):
self.docker_version = layer_info.get("docker_version")
__all__ = ("DockerImageInfoModel", "DockerImageInfo",
"DockerImageExtractModel", "DockerImageAnalyzeModel",)
|
StarcoderdataPython
|
4819906
|
#coding:utf-8
#coding:utf-8
import networkx as nx
def load_edgelist(file_name):
graph = nx.Graph()
edges = []
with open(file_name) as file:
for line in file:
if line.startswith('#'):
continue
l = line.strip().split()
if len(l)==2:
u, v = l
edges.append((u, v, 1))
else:
u, v, weight = l
edges.append((u, v, float(weight)))
for u, v, weight in edges:
if not graph.has_edge(u, v):
graph.add_edge(u, v, weight=weight)
else:
graph[u][v]['weight'] += weight
return graph
def save_embedding(embeddings, file_name):
with open(file_name, 'w') as file:
for key, val in embeddings.items():
file.write('{} {}\n'.format(
key,
' '.join(list(map(str, val))),
))
def load_labels(file_name):
labels = {}
with open(file_name) as file:
for line in file:
l = line.strip().split()
node = l[0]
labels[node] = [int(label) for label in l[1:]]
return labels
def load_dataset(dataset, dataset_dir='datasets'):
graph = load_edgelist('{}/{}_edgelist.txt'.format(dataset_dir, dataset))
labels = load_labels('{}/{}_labels.txt'.format(dataset_dir, dataset))
return graph, labels
def load_hetero_nodes(file_name):
nodes = []
with open(file_name) as file:
for line in file:
node, tag = line.strip().split()
nodes.append((node, tag))
return nodes
def load_hetero_edges(file_name):
edges = []
with open(file_name) as file:
for line in file:
l = line.strip().split()
if len(l)==2:
edges.append((l[0], l[1], 1.0))
else:
edges.append((l[0], l[1], float(l[2])))
return edges
def load_hetero_graph(nodes_file_name, edges_file_name):
nodes = load_hetero_nodes(nodes_file_name)
edges = load_hetero_edges(edges_file_name)
graph = nx.Graph()
for node, tag in nodes:
graph.add_node(node, tag=tag)
for u, v, weight in edges:
graph.add_edge(u, v, weight=weight)
return graph
|
StarcoderdataPython
|
129462
|
<reponame>cojalvo/Map
import os, os.path, datetime, string, errno
from maperipy import *
import GenIsraelHikingTilesLite
# http://stackoverflow.com/questions/749711/how-to-get-the-python-exe-location-programmatically
MaperitiveDir = os.path.dirname(os.path.dirname(os.path.normpath(os.__file__)))
# App.log('MaperitiveDir: ' + MaperitiveDir)
ProgramFiles = os.path.normpath(os.path.dirname(MaperitiveDir))
# App.log('ProgramFiles: ' + ProgramFiles)
IsraelHikingDir = os.path.dirname(os.path.dirname(os.path.normpath(App.script_dir)))
# App.log('App.script_dir: ' + App.script_dir)
# App.log('IsraelHikingDir: ' + IsraelHikingDir)
App.run_command('change-dir dir="' + IsraelHikingDir +'"')
os.chdir(IsraelHikingDir)
# # Keep the name of the Tile Upload command
# upload_tiles = os.path.join(IsraelHikingDir, "Scripts", "Batch", "UploadTiles.bat")
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def add_to_PATH(app_dir):
full_app_dir=os.path.join(ProgramFiles, app_dir)
for path_dir in (string.split(os.environ["PATH"], os.pathsep)):
if os.path.basename(path_dir) == app_dir:
# Application already found in PATH
return
if not os.path.isdir(full_app_dir):
# Application not a sibling of Maperitive
App.log("Warning: " + app_dir + " location not found. Could not add it to PATH.")
return
os.environ["PATH"] = string.join([os.environ["PATH"],full_app_dir], os.pathsep)
# Keep batch windows open up to 24 hours
os.environ["NOPAUSE"] = "TIMEOUT /T 86400"
# os.rmdir(os.path.join(IsraelHikingDir, 'Site', 'Tiles_Lite' ))
gen_cmd = GenIsraelHikingTilesLite.IsraelHikingTileGenCommand()
App.run_command("run-script file=" + os.path.join("Scripts", "Maperitive", "IsraelHikingLite.mscript"))
# Map Created
#Original# App.run_command("generate-tiles minzoom=7 maxzoom=15 subpixel=3 tilesdir=" + IsraelHikingDir + "\Site\Tiles use-fprint=true")
gen_cmd.GenToDirectory(16, 16, os.path.join(IsraelHikingDir, 'Site', 'Tiles_Lite'))
App.collect_garbage()
|
StarcoderdataPython
|
3224366
|
<gh_stars>0
from django.db import models
from django.contrib.auth.models import User
import datetime as dt
# Create your models here.
class Project(models.Model):
title = models.CharField(max_length=30)
image = models.ImageField(upload_to='images/')
description = models.TextField()
link = models.CharField(max_length=255)
user = models.ForeignKey(User, on_delete=models.CASCADE)
posted_on = models.DateTimeField(auto_now_add=True)
def save_project(self):
self.save()
def delete_project(self):
self.delete()
def __str__(self):
return self.title
class Profile(models.Model):
profile_pic = models.ImageField(upload_to='profile_pic/', null=True, blank=True)
bio =
contacts = models.CharField(max_length=50, blank=True)
user = models.OneToOneField(User, on_delete=models.CASCADE)
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
def __str__(self):
return self.user.username
class Review(models.Model):
design = models.IntegerField(default=0)
usability = models.IntegerField(default=0)
content = models.IntegerField(default=0)
average = models.IntegerField(default=0)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def save_review(self):
self.save()
def delete_review(self):
self.delete()
def __str__(self):
return self.project.title
|
StarcoderdataPython
|
3384692
|
#
# This software is delivered under the terms of the MIT License
#
# Copyright (c) 2009 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#
# Simple base class for plugins implementation
# Ref to http://martyalchin.com/2008/jan/10/simple-plugin-framework/
#
__all__ = ['PluginMount', 'PluginLoader', 'SourceManager']
import os, sys
verbose = 0
class PluginMount(type):
def __init__(cls, name, bases, attrs):
cls.plugin_map = {}
if not hasattr(cls, 'plugins'):
# This branch only executes when processing the mount point itself.
# So, since this is a new plugin type, not an implementation, this
# class shouldn't be registered as a plugin. Instead, it sets up a
# list where plugins can be registered later.
cls.plugins = []
else:
# This must be a plugin implementation, which should be registered.
# Simply appending it to the list is all that's needed to keep
# track of it later.
cls.plugins.append(cls)
def get_plugin(cls, name):
try:
p = cls.plugin_map[name]
except KeyError:
for p in cls.plugins:
if p.plugin_name_ == name:
cls.plugin_map[name] = p
return p
raise Exception, "Plugin not found: " + name
return p
class PluginLoader:
""" PluginLoader is a static class that loads all the availble
plugins from the plugins directory
"""
def __init__(self):
pdir = os.path.dirname(sys.argv[0])
pluginpath = os.path.join(pdir, "plugins")
try: # might not be a filesystem path
files = os.listdir(pluginpath)
sys.path.insert(0,pluginpath)
except OSError:
files = []
for file in files:
if file.endswith('.py'):
name = file.rsplit('.', 1)[0]
if verbose != 0:
print "Loading plugin " + name
__import__(name)
class SourceManager:
""" SourceManager plugins must derive from this class.
Methods that must be implemented by SourceManager plugins are:
name(), get_actual_revision(), get_head_revision(),
extract(), update(), commit(), rebase(), deliver(),
dump(), list().
Class attributes that must be available:
plugin_name_, plugin_description_
"""
__metaclass__ = PluginMount
loader = PluginLoader()
|
StarcoderdataPython
|
24163
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.cloud import monitoring_v3
from google.cloud.monitoring_v3.proto import service_pb2
from google.cloud.monitoring_v3.proto import service_service_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestServiceMonitoringServiceClient(object):
def test_create_service(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
expected_response = {"name": name, "display_name": display_name}
expected_response = service_pb2.Service(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.project_path("[PROJECT]")
service = {}
response = client.create_service(parent, service)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.CreateServiceRequest(
parent=parent, service=service
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.project_path("[PROJECT]")
service = {}
with pytest.raises(CustomException):
client.create_service(parent, service)
def test_get_service(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
expected_response = {"name": name_2, "display_name": display_name}
expected_response = service_pb2.Service(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_path("[PROJECT]", "[SERVICE]")
response = client.get_service(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.GetServiceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_path("[PROJECT]", "[SERVICE]")
with pytest.raises(CustomException):
client.get_service(name)
def test_list_services(self):
# Setup Expected Response
next_page_token = ""
services_element = {}
services = [services_element]
expected_response = {"next_page_token": next_page_token, "services": services}
expected_response = service_service_pb2.ListServicesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.project_path("[PROJECT]")
paged_list_response = client.list_services(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.services[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_service_pb2.ListServicesRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_services_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.project_path("[PROJECT]")
paged_list_response = client.list_services(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_update_service(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
expected_response = {"name": name, "display_name": display_name}
expected_response = service_pb2.Service(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
service = {}
response = client.update_service(service)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.UpdateServiceRequest(service=service)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
service = {}
with pytest.raises(CustomException):
client.update_service(service)
def test_delete_service(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_path("[PROJECT]", "[SERVICE]")
client.delete_service(name)
assert len(channel.requests) == 1
expected_request = service_service_pb2.DeleteServiceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_path("[PROJECT]", "[SERVICE]")
with pytest.raises(CustomException):
client.delete_service(name)
def test_create_service_level_objective(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
goal = 317825.0
expected_response = {"name": name, "display_name": display_name, "goal": goal}
expected_response = service_pb2.ServiceLevelObjective(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.service_path("[PROJECT]", "[SERVICE]")
service_level_objective = {}
response = client.create_service_level_objective(
parent, service_level_objective
)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.CreateServiceLevelObjectiveRequest(
parent=parent, service_level_objective=service_level_objective
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.service_path("[PROJECT]", "[SERVICE]")
service_level_objective = {}
with pytest.raises(CustomException):
client.create_service_level_objective(parent, service_level_objective)
def test_get_service_level_objective(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
goal = 317825.0
expected_response = {"name": name_2, "display_name": display_name, "goal": goal}
expected_response = service_pb2.ServiceLevelObjective(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
response = client.get_service_level_objective(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.GetServiceLevelObjectiveRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
with pytest.raises(CustomException):
client.get_service_level_objective(name)
def test_list_service_level_objectives(self):
# Setup Expected Response
next_page_token = ""
service_level_objectives_element = {}
service_level_objectives = [service_level_objectives_element]
expected_response = {
"next_page_token": next_page_token,
"service_level_objectives": service_level_objectives,
}
expected_response = service_service_pb2.ListServiceLevelObjectivesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.service_path("[PROJECT]", "[SERVICE]")
paged_list_response = client.list_service_level_objectives(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.service_level_objectives[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_service_pb2.ListServiceLevelObjectivesRequest(
parent=parent
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_service_level_objectives_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.service_path("[PROJECT]", "[SERVICE]")
paged_list_response = client.list_service_level_objectives(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_update_service_level_objective(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
goal = 317825.0
expected_response = {"name": name, "display_name": display_name, "goal": goal}
expected_response = service_pb2.ServiceLevelObjective(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
service_level_objective = {}
response = client.update_service_level_objective(service_level_objective)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.UpdateServiceLevelObjectiveRequest(
service_level_objective=service_level_objective
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
service_level_objective = {}
with pytest.raises(CustomException):
client.update_service_level_objective(service_level_objective)
def test_delete_service_level_objective(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
client.delete_service_level_objective(name)
assert len(channel.requests) == 1
expected_request = service_service_pb2.DeleteServiceLevelObjectiveRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
with pytest.raises(CustomException):
client.delete_service_level_objective(name)
|
StarcoderdataPython
|
124529
|
<gh_stars>1-10
"""
`EqualizeStrings <http://community.topcoder.com/stat?c=problem_statement&pm=10933>`__
"""
def solution (s, t):
out = ""
for i in range(len(s)):
x = s[i]
y = t[i]
diff = abs(ord(x) - ord(y))
if diff < 26 / 2:
# values are close by, use minimum
out += min([x, y])
else:
# values are far, wrapping always uses 'a'
out += 'a'
return out
|
StarcoderdataPython
|
1693224
|
<gh_stars>1-10
import json
import requests
import configparser
class Spider(object):
def __init__(self, auth_user, auth_pass, degree):
self.step = degree
self.cache = ['https://api.github.com/users/' +
auth_user + '/followers']
self.auth = {
'user': auth_user,
'pass': <PASSWORD>
}
self.db = '../data/github-data.json'
def get_followers(self, url):
params = {
'per_page': 100,
'page': 1
}
data = []
while True:
resp = requests.get(url=url, auth=(
self.auth['user'], self.auth['pass']), params=params)
temp = json.loads(resp.text)
# rate limite
if type(temp) == type(dict()):
if 'message' in temp:
break
# if not empty, continue request get
if temp:
params['page'] += 1
data += temp
else:
break
return data
def creeper(self, depth):
if depth < 1:
return
# iterate follower in cache, initial cache contains the first user
cache = self.cache[:]
for item in cache:
print('START:' + item)
# fetch all follower under the url
data = self.get_followers(item)
# construct user object
user = {
"url": item,
"followers": data
}
# store the user object into database (json file)
# TODO: optimize here
with open(self.db, "r") as json_file:
db = json.load(json_file)
if user not in db: # only consider new user
db.append(user)
with open(self.db, "w") as json_file:
json_file.write(json.dumps(
db, indent=4, separators=(',', ': ')))
# push all new follower url into CACHE
for follower in data:
self.cache.append(follower["followers_url"])
# clear this item in CACHE, make sure no repetition
while item in self.cache:
self.cache.remove(item)
print('FINISH: ' + item)
print('depth ' + str(depth) + ' finished...')
self.creeper(depth - 1)
def main():
# load config
config = configparser.ConfigParser()
config.read('../data/config.ini')
username = config.get('AUTH', 'username')
password = config.get('AUTH', 'password')
degree = config.get('SPIDER', 'degree')
# create spider object
spider = Spider(username, password, degree)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3322507
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.document
import typing
from abc import abstractmethod
from ..lang.x_event_listener import XEventListener as XEventListener_c7230c4a
if typing.TYPE_CHECKING:
from .undo_manager_event import UndoManagerEvent as UndoManagerEvent_1c2d0eba
from ..lang.event_object import EventObject as EventObject_a3d70b03
class XUndoManagerListener(XEventListener_c7230c4a):
"""
implemented by components which want to be notified of changes in the Undo/Redo stacks of an Undo manager.
**since**
OOo 3.4
See Also:
`API XUndoManagerListener <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1document_1_1XUndoManagerListener.html>`_
"""
__ooo_ns__: str = 'com.sun.star.document'
__ooo_full_ns__: str = 'com.sun.star.document.XUndoManagerListener'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.document.XUndoManagerListener'
@abstractmethod
def actionRedone(self, iEvent: 'UndoManagerEvent_1c2d0eba') -> None:
"""
is called when the top-most action of the Redo stack has been re-applied.
"""
@abstractmethod
def actionUndone(self, iEvent: 'UndoManagerEvent_1c2d0eba') -> None:
"""
is called when the top-most action of the undo stack has been undone.
"""
@abstractmethod
def allActionsCleared(self, iEvent: 'EventObject_a3d70b03') -> None:
"""
is called when both the Undo and the Redo stack have been cleared from all Undo actions.
"""
@abstractmethod
def cancelledContext(self, iEvent: 'UndoManagerEvent_1c2d0eba') -> None:
"""
is called when an Undo context has been left, but no actions have been added within this context.
In such a case, the context which has just been left will not contribute to the undo stack, but instead be silently removed. Consequently, the UndoManagerEvent.UndoActionTitle is empty.
"""
@abstractmethod
def enteredContext(self, iEvent: 'UndoManagerEvent_1c2d0eba') -> None:
"""
is called when a new Undo context has been entered.
UndoManagerEvent.UndoActionTitle carries the title of the Undo context, and UndoManagerEvent.UndoContextDepth the number of open Undo contexts, including the one just entered.
"""
@abstractmethod
def enteredHiddenContext(self, iEvent: 'UndoManagerEvent_1c2d0eba') -> None:
"""
is called when a new hidden Undo context has been entered.
UndoManagerEvent.UndoActionTitle carries the title of the Undo context, and UndoManagerEvent.UndoContextDepth the number of open Undo contexts, including the one just entered.
"""
@abstractmethod
def leftContext(self, iEvent: 'UndoManagerEvent_1c2d0eba') -> None:
"""
is called when an Undo context has been left.
UndoManagerEvent.UndoActionTitle carries the title of the Undo context, and UndoManagerEvent.UndoContextDepth the number of open Undo contexts, excluding the one just left.
"""
@abstractmethod
def leftHiddenContext(self, iEvent: 'UndoManagerEvent_1c2d0eba') -> None:
"""
is called when a hidden Undo context has been left.
UndoManagerEvent.UndoActionTitle is empty, as hidden Undo contexts don't have a title.
"""
@abstractmethod
def redoActionsCleared(self, iEvent: 'EventObject_a3d70b03') -> None:
"""
is called when the Redo stack has been cleared.
"""
@abstractmethod
def resetAll(self, iEvent: 'EventObject_a3d70b03') -> None:
"""
called when the complete undo manager has been reset
"""
@abstractmethod
def undoActionAdded(self, iEvent: 'UndoManagerEvent_1c2d0eba') -> None:
"""
is called when an undo action is added to the undo stack.
Note that the action must not necessarily be the new top element of the stack: In case there's an open Undo context, UndoManagerEvent.UndoContextDepth will be greater 0, and the newly added action will be subordinate of the context action.
"""
__all__ = ['XUndoManagerListener']
|
StarcoderdataPython
|
3300582
|
'''
Author: <NAME>
Copyright (c) 2020 <NAME>
'''
class PriceLookup:
def __init__(self):
self.prices = {}
def add_price(self, ticker, price):
if ticker in self.prices:
raise Exception(f"Ticker '{ticker}' was already added")
self.prices[ticker] = price
def get_price(self, ticker):
if ticker not in self.prices:
raise Exception(f"Ticker '{ticker}' has associated price")
return self.prices[ticker]
|
StarcoderdataPython
|
4828971
|
<reponame>J3rome/python-uds
#!/usr/bin/env python
__author__ = "<NAME>"
__copyrights__ = "Copyright 2018, the python-uds project"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from ... import Config
from ... import CanTp
#from ... import LinTp
from ... import TestTp
from os import path
##
# @brief class for creating Tp objects
class TpFactory(object):
configType = ''
configParameters = []
config = None
##
# @brief method to create the different connection types
@staticmethod
def __call__(tpType, configPath=None, **kwargs):
#TpFactory.loadConfiguration(configPath)
if(tpType == "CAN"):
return CanTp(configPath=configPath, **kwargs)
elif(tpType == "DoIP"):
raise NotImplementedError("DoIP transport not currently supported")
elif(tpType == "K-LINE"):
raise NotImplementedError("K-Line Transport not currently supported")
elif(tpType == "LIN"):
return LinTp(configPath=configPath, **kwargs)
elif(tpType == "FLEXRAY"):
raise NotImplementedError("FlexRay Transport not currently supported")
elif(tpType == "TEST"):
return TestTp()
else:
raise Exception("Unknown transport type selected")
@staticmethod
def loadConfiguration(configPath=None):
#load the base config
baseConfig = path.dirname(__file__) + "/config.ini"
config = Config()
if path.exists(baseConfig):
config.read(baseConfig)
else:
raise FileNotFoundError("No base config file")
# check the config path
if configPath is not None:
if path.exists(configPath):
config.read(configPath)
else:
raise FileNotFoundError("specified config not found")
TpFactory.config = config
if __name__ == "__main__":
pass
|
StarcoderdataPython
|
1650624
|
<filename>rewards/snapshot/nft_snapshot.py
from decimal import Decimal, DecimalException
from typing import Dict
from helpers.constants import BADGER
from helpers.enums import BalanceType, Network
from rewards.classes.Snapshot import Snapshot
from rewards.utils.emission_utils import get_nft_weight
from subgraph.queries.nfts import fetch_nfts
def nft_snapshot(chain: Network, block: int) -> Snapshot:
nfts = fetch_nfts(chain, block)
bals = {}
for user, nft_balances in nfts.items():
for nft_balance in nft_balances:
nft_address = nft_balance["address"]
nft_id = nft_balance["id"]
bals[user] = bals.get(user, 0) + get_nft_weight(chain, nft_address, nft_id)
return Snapshot(BADGER, bals, ratio=1, type=BalanceType.Native)
def nft_snapshot_usd(chain: Network, block: int) -> Dict[str, Decimal]:
return nft_snapshot(chain, block).convert_to_usd(chain).balances
|
StarcoderdataPython
|
1671188
|
# Standard Library
import argparse
import random
import time
import uuid
# Third Party
import mxnet as mx
import numpy as np
from mxnet import autograd, gluon, init
from mxnet.gluon import nn
from mxnet.gluon.data.vision import datasets, transforms
# First Party
from smdebug.mxnet import Hook, SaveConfig, modes
def parse_args():
parser = argparse.ArgumentParser(
description="Train a mxnet gluon model for FashonMNIST dataset"
)
parser.add_argument("--batch-size", type=int, default=256, help="Batch size")
parser.add_argument(
"--output-uri",
type=str,
default=f"s3://smdebug-testing/outputs/basic-mxnet-hook-{uuid.uuid4()}",
help="S3 URI of the bucket where tensor data will be stored.",
)
parser.add_argument(
"--smdebug_path",
type=str,
default=None,
help="S3 URI of the bucket where tensor data will be stored.",
)
parser.add_argument("--learning_rate", type=float, default=0.1)
parser.add_argument("--random_seed", type=bool, default=False)
parser.add_argument(
"--num_steps",
type=int,
help="Reduce the number of training "
"and evaluation steps to the give number if desired."
"If this is not passed, trains for one epoch "
"of training and validation data",
)
opt = parser.parse_args()
return opt
def acc(output, label):
return (output.argmax(axis=1) == label.astype("float32")).mean().asscalar()
def train_model(batch_size, net, train_data, valid_data, lr, hook, num_steps=None):
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": lr})
# Start the training.
for epoch in range(1):
train_loss, train_acc, valid_acc = 0.0, 0.0, 0.0
tic = time.time()
hook.set_mode(modes.TRAIN)
for i, (data, label) in enumerate(train_data):
if num_steps is not None and num_steps < i:
break
data = data.as_in_context(mx.cpu(0))
# forward + backward
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
# update parameters
trainer.step(batch_size)
# calculate training metrics
train_loss += loss.mean().asscalar()
train_acc += acc(output, label)
# calculate validation accuracy
hook.set_mode(modes.EVAL)
for i, (data, label) in enumerate(valid_data):
if num_steps is not None and num_steps < i:
break
data = data.as_in_context(mx.cpu(0))
valid_acc += acc(net(data), label)
print(
"Epoch %d: loss %.3f, train acc %.3f, test acc %.3f, in %.1f sec"
% (
epoch,
train_loss / len(train_data),
train_acc / len(train_data),
valid_acc / len(valid_data),
time.time() - tic,
)
)
def prepare_data(batch_size):
mnist_train = datasets.FashionMNIST(train=True)
X, y = mnist_train[0]
("X shape: ", X.shape, "X dtype", X.dtype, "y:", y)
text_labels = [
"t-shirt",
"trouser",
"pullover",
"dress",
"coat",
"sandal",
"shirt",
"sneaker",
"bag",
"ankle boot",
]
X, y = mnist_train[0:10]
transformer = transforms.Compose([transforms.ToTensor(), transforms.Normalize(0.13, 0.31)])
mnist_train = mnist_train.transform_first(transformer)
train_data = gluon.data.DataLoader(
mnist_train, batch_size=batch_size, shuffle=True, num_workers=4
)
mnist_valid = gluon.data.vision.FashionMNIST(train=False)
valid_data = gluon.data.DataLoader(
mnist_valid.transform_first(transformer), batch_size=batch_size, num_workers=4
)
return train_data, valid_data
# Create a model using gluon API. The hook is currently
# supports MXNet gluon models only.
def create_gluon_model():
# Create Model in Gluon
net = nn.HybridSequential()
net.add(
nn.Conv2D(channels=6, kernel_size=5, activation="relu"),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Conv2D(channels=16, kernel_size=3, activation="relu"),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Flatten(),
nn.Dense(120, activation="relu"),
nn.Dense(84, activation="relu"),
nn.Dense(10),
)
net.initialize(init=init.Xavier(), ctx=mx.cpu())
return net
# Create a hook. The initialization of hook determines which tensors
# are logged while training is in progress.
# Following function shows the default initialization that enables logging of
# weights, biases and gradients in the model.
def create_hook(output_s3_uri):
# With the following SaveConfig, we will save tensors for steps 1, 2 and 3
# (indexing starts with 0).
save_config = SaveConfig(save_steps=[1, 2, 3])
# Create a hook that logs weights, biases and gradients while training the model.
hook = Hook(
out_dir=output_s3_uri,
save_config=save_config,
include_collections=["weights", "gradients", "biases"],
)
return hook
def main():
opt = parse_args()
# these random seeds are only intended for test purpose.
# for now, 128,12,2 could promise no assert failure with running tests
# if you wish to change the number, notice that certain steps' tensor value may be capable of variation
if opt.random_seed:
mx.random.seed(128)
random.seed(12)
np.random.seed(2)
# Create a Gluon Model.
net = create_gluon_model()
# Create a hook for logging the desired tensors.
# The output_s3_uri is a the URI for the s3 bucket where the tensors will be saved.
# The trial_id is used to store the tensors from different trials separately.
output_uri = opt.smdebug_path if opt.smdebug_path is not None else opt.output_uri
hook = create_hook(output_uri)
net.hybridize()
# Register the hook to the top block.
hook.register_hook(net)
# Start the training.
batch_size = opt.batch_size
train_data, valid_data = prepare_data(batch_size)
train_model(batch_size, net, train_data, valid_data, opt.learning_rate, hook, opt.num_steps)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1642401
|
<filename>tests/test_search_boring.py
"""Module grouping tests for the boring search module."""
import datetime
import pytest
from owslib.fes import PropertyIsEqualTo
from pydov.search.boring import BoringSearch
from pydov.types.boring import Boring
from pydov.util import owsutil
from tests.abstract import (
AbstractTestSearch,
)
from tests.test_search import (
mp_wfs,
wfs,
mp_remote_md,
mp_remote_fc,
mp_remote_describefeaturetype,
mp_remote_wfs_feature,
mp_dov_xml,
mp_dov_xml_broken,
wfs_getfeature,
wfs_feature,
)
location_md_metadata = 'tests/data/types/boring/md_metadata.xml'
location_fc_featurecatalogue = \
'tests/data/types/boring/fc_featurecatalogue.xml'
location_wfs_describefeaturetype = \
'tests/data/types/boring/wfsdescribefeaturetype.xml'
location_wfs_getfeature = 'tests/data/types/boring/wfsgetfeature.xml'
location_wfs_feature = 'tests/data/types/boring/feature.xml'
location_dov_xml = 'tests/data/types/boring/boring.xml'
@pytest.fixture
def md_metadata(wfs, mp_remote_md):
"""PyTest fixture providing a MD_Metadata instance of the
dov-pub:Boringen layer.
Parameters
----------
wfs : pytest.fixture returning owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
mp_remote_md : pytest.fixture
Monkeypatch the call to get the remote metadata of the
dov-pub:Boringen layer.
Returns
-------
owslib.iso.MD_Metadata
Parsed metadata describing the Boringen WFS layer in more detail,
in the ISO 19115/19139 format.
"""
contentmetadata = wfs.contents['dov-pub:Boringen']
return owsutil.get_remote_metadata(contentmetadata)
class TestBoringSearch(AbstractTestSearch):
def get_search_object(self):
"""Get an instance of the search object for this type.
Returns
-------
pydov.search.boring.BoringSearch
Instance of BoringSearch used for searching.
"""
return BoringSearch()
def get_type(self):
"""Get the class reference for this datatype.
Returns
-------
pydov.types.boring.Boring
Class reference for the Boring class.
"""
return Boring
def get_valid_query_single(self):
"""Get a valid query returning a single feature.
Returns
-------
owslib.fes.OgcExpression
OGC expression of the query.
"""
return PropertyIsEqualTo(propertyname='boornummer',
literal='GEO-04/169-BNo-B1')
def get_inexistent_field(self):
"""Get the name of a field that doesn't exist.
Returns
-------
str
The name of an inexistent field.
"""
return 'onbestaand'
def get_xml_field(self):
"""Get the name of a field defined in XML only.
Returns
-------
str
The name of the XML field.
"""
return 'boormethode'
def get_valid_returnfields(self):
"""Get a list of valid return fields from the main type.
Returns
-------
tuple
A tuple containing only valid return fields.
"""
return ('pkey_boring', 'boornummer', 'diepte_boring_tot',
'datum_aanvang')
def get_valid_returnfields_subtype(self):
"""Get a list of valid return fields, including fields from a subtype.
Returns
-------
tuple
A tuple containing valid return fields, including fields from a
subtype.
"""
return ('pkey_boring', 'boornummer', 'diepte_methode_van',
'diepte_methode_tot')
def get_valid_returnfields_extra(self):
"""Get a list of valid return fields, including extra WFS only
fields not present in the default dataframe.
Returns
-------
tuple
A tuple containing valid return fields, including extra fields
from WFS, not present in the default dataframe.
"""
return ('pkey_boring', 'doel')
def get_df_default_columns(self):
"""Get a list of the column names (and order) from the default
dataframe.
Returns
-------
list
A list of the column names of the default dataframe.
"""
return ['pkey_boring', 'boornummer', 'x', 'y', 'mv_mtaw',
'start_boring_mtaw', 'gemeente',
'diepte_boring_van', 'diepte_boring_tot',
'datum_aanvang', 'uitvoerder', 'boorgatmeting',
'diepte_methode_van', 'diepte_methode_tot',
'boormethode']
def test_search_date(self, mp_wfs, mp_remote_describefeaturetype,
mp_remote_md, mp_remote_fc, mp_remote_wfs_feature,
mp_dov_xml):
"""Test the search method with only the query parameter.
Test whether the result is correct.
Parameters
----------
mp_wfs : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
mp_remote_describefeaturetype : pytest.fixture
Monkeypatch the call to a remote DescribeFeatureType.
mp_remote_md : pytest.fixture
Monkeypatch the call to get the remote metadata.
mp_remote_fc : pytest.fixture
Monkeypatch the call to get the remote feature catalogue.
mp_remote_wfs_feature : pytest.fixture
Monkeypatch the call to get WFS features.
mp_dov_xml : pytest.fixture
Monkeypatch the call to get the remote XML data.
"""
df = self.get_search_object().search(
query=self.get_valid_query_single())
# specific test for the Zulu time wfs 1.1.0 issue
assert df.datum_aanvang.unique()[0] == datetime.date(2004, 12, 20)
def test_search_nan(self, mp_wfs, mp_remote_describefeaturetype,
mp_remote_md, mp_remote_fc, mp_remote_wfs_feature,
mp_dov_xml):
"""Test the search method with only the query parameter.
Test whether the result is correct.
Parameters
----------
mp_wfs : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
mp_remote_describefeaturetype : pytest.fixture
Monkeypatch the call to a remote DescribeFeatureType.
mp_remote_md : pytest.fixture
Monkeypatch the call to get the remote metadata.
mp_remote_fc : pytest.fixture
Monkeypatch the call to get the remote feature catalogue.
mp_remote_wfs_feature : pytest.fixture
Monkeypatch the call to get WFS features.
mp_dov_xml : pytest.fixture
Monkeypatch the call to get the remote XML data.
"""
df = self.get_search_object().search(
query=self.get_valid_query_single())
assert df.mv_mtaw.hasnans
def test_search_xmlresolving(self, mp_remote_describefeaturetype,
mp_remote_wfs_feature, mp_dov_xml):
"""Test the search method with return fields from XML but not from a
subtype.
Test whether the output dataframe contains the resolved XML data.
Parameters
----------
mp_remote_describefeaturetype : pytest.fixture
Monkeypatch the call to a remote DescribeFeatureType.
mp_remote_wfs_feature : pytest.fixture
Monkeypatch the call to get WFS features.
mp_dov_xml : pytest.fixture
Monkeypatch the call to get the remote XML data.
"""
df = self.get_search_object().search(
query=self.get_valid_query_single(),
return_fields=('pkey_boring', 'boornummer', 'boorgatmeting'))
assert not df.boorgatmeting[0]
|
StarcoderdataPython
|
1788736
|
<reponame>pepincho/Python101-and-Algo1-Courses
import sys
def sum_numbers():
my_file = open(sys.argv[1], "r")
numbers = my_file.read().split(' ')
numbers_int = [int(x) for x in numbers]
sum_numbers = sum(numbers_int)
my_file.close()
return sum_numbers
def main():
print (sum_numbers())
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
15448
|
import unittest
from typing import Any
from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task
from coiny.utils import NullCoinPrice
class HasJson:
def __init__(self, data) -> None:
self.data = data
async def __aenter__(self):
return self
async def __aexit__(self, *args, **kwargs):
pass
async def json(self):
return self.data
class PriceTaskTests(unittest.IsolatedAsyncioTestCase):
async def test_price_task_empty_queue(self):
queue = CoinyQueue()
session = CoinySession()
result = await price_task(queue, session)
self.assertEqual(NullCoinPrice, result)
async def test_price_task_queue(self):
class NoGetSession(CoinySession):
"""HACK: Not a good idea to inherit from CoinySession"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.mock_url = ""
def get(
self, url: str, *, allow_redirects: bool = True, **kwargs: Any
) -> HasJson:
self.mock_url = f"called:{url}"
return HasJson({"mycoin": {"XYZ": 3.4}})
queue = CoinyQueue()
await queue.put(("mycoin", "XYZ", "https://myurl"))
async with NoGetSession() as session:
result = await price_task(queue, session)
expected = CoinPrice(fiat="XYZ", coin="mycoin", rate=3.4)
self.assertEqual(expected, result)
self.assertEqual("called:https://myurl", session.mock_url)
async def test_price_task_mock_eth(self):
mock_url = "https://run.mocky.io/v3/09750cfe-39a5-4d31-9651-2292765a8fe3"
# returns -> {"ethereum": {"eur": 3295.23}}
queue = CoinyQueue()
await queue.put(("ethereum", "eur", mock_url))
async with CoinySession() as session:
result = await price_task(queue, session)
expected = CoinPrice(fiat="eur", coin="ethereum", rate=3295.23)
self.assertEqual(expected, result)
async def test_price_task_mock_eth_invalid(self):
mock_url = "https://run.mocky.io/v3/09750cfe-39a5-4d31-9651-2292765a8fe3"
queue = CoinyQueue()
await queue.put(("bitcoin", "gbp", mock_url))
async with CoinySession() as session:
result = await price_task(queue, session)
self.assertEqual(NullCoinPrice, result)
async def test_price_task_real_eth(self):
queue = CoinyQueue()
await queue.put(("ethereum", "eur", price_now_url("ethereum", "eur")))
async with CoinySession() as session:
result = await price_task(queue, session)
# no way to test the live price of course
half_expected = CoinPrice(fiat="eur", coin="ethereum", rate=0.0)
self.assertEqual(half_expected.fiat, result.fiat)
self.assertEqual(half_expected.coin, result.coin)
__all__ = ["PriceTaskTests"]
|
StarcoderdataPython
|
89127
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
# DEMO OF WEAK TYPING in PYTHON
# int age = 4 <-strongly typed variables, declare type along with id
age = 4; # we CANNOT declare a type for variable age
print(age)
print(type(age))
age = ('calico','calico','himalyian')
print(age)
print(type(age))
# # Allegheny county EMS dispatch analysis
# In[ ]:
# HIGH LEVEL GOAL:
# Source: WPRDC Data set on EMS/Fire dispatch via
# https://data.wprdc.org/dataset/allegheny-county-911-dispatches-ems-and-fire/resource/ff33ca18-2e0c-4cb5-bdcd-60a5dc3c0418?view_id=5007870f-c48b-4849-bb25-3e46c37f2dc7
# Determine the rate of redacted call descriptions across
# EMS dispatches
# Has the rate of redaction changed year over year?
# if so, how?
# TODO: download CSV file from WPRDC into a raw data directory
# Review the fields in the file on WPRDC
# In[ ]:
# Raw input: CSV file containing a header row of column names
# and 1 or more rows, each row representing a single EMS dispatch
# in Allegheny County in year X
# In[ ]:
def iterate_EMS_records(file_path):
'''
Retrieve each record from a CSV of EMS records from the filepath
Intended for use with the WPRDC's record of EMS dispatches
in Allegheny County and will provide a dictionary of each record
for use by processing functions
'''
# Open file at filepath
# Use for loop over each record
# In[ ]:
def test_for_redacted_description(ems_rec):
'''
Examine EMS dispatch record and look for redacted or blank
descriptions
'''
# In[ ]:
# Based on record check, increment count by year
def red_year_total(redaction_year):
'''
Maintains a dictionary of counts by year passed when called
Assumes that each call corresponds with a single record
in the EMS dispatch data set, so a call with input of '2019'
means, add 1 to the 2019 total of redacted records
'''
# In[ ]:
# Based on record check, write record ID to log file
def write_redacted_rec_to_log(ems_rec):
'''
Extract record ID and write to log file specific in global dict
'''
# In[ ]:
def display_redaction_count_by_year(year_counts):
'''
Given a dictionary of year(key):['total','redactions']
make a pretty output to the console
'''
# In[ ]:
# Desired output
# 1) Dictionary of format: { year:count_of_removed_records}
# 2) Text file whose rows are the record IDs of EMS
# dispatches whose description was removed/redacted
|
StarcoderdataPython
|
15427
|
from typing import List
class Solution:
def findOcurrences(self, text: str, first: str, second: str) -> List[str]:
ls = text.split()
return [c for a, b, c in zip(ls, ls[1:], ls[2:]) if a == first and b == second]
|
StarcoderdataPython
|
1658813
|
class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
return self.kSum(sorted(nums), target, 4)
def kSum(self, nums: List[int], target: int, k: int) -> List[List[int]]:
if not nums or nums[0] * k > target or target > nums[-1] * k: return []
if k == 2: return self.twoSum(nums, target)
res = []
for i in range(len(nums)):
if i > 0 and nums[i] == nums[i - 1]: continue
for sub_sum in self.kSum(nums[i + 1:], target - nums[i], k - 1):
res.append([nums[i]] + sub_sum)
return res
def twoSum(self, nums: List[int], target: int) -> List[List[int]]:
res = []
s = set()
for i in range(len(nums)):
if (not res or res[-1][1] != nums[i]) and target - nums[i] in s:
res.append([target - nums[i], nums[i]])
s.add(nums[i])
return res
|
StarcoderdataPython
|
141708
|
<filename>gpsr_command_understanding/models/command_predictor.py
from typing import List
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register('command_parser')
class CommandParser(Predictor):
"""Predictor wrapper for the CommandParser"""
def predict_text(self, text: str) -> JsonDict:
return self.predict_instance(self._dataset_reader.text_to_instance(source_string=text))
def predict_instance(self, instance: Instance) -> JsonDict:
self._model.vocab.extend_from_instances([instance])
# Pretrained transformer embedders don't have an extend method, so this won't do anything to them
self._model.extend_embedder_vocab({
'_source_embedder.token_embedder_source_tokens': 'https://s3-us-west-2.amazonaws.com/allennlp/datasets/glove/glove.6B.100d.txt.gz'})
outputs = self._model.forward_on_instance(instance)
out_dict = sanitize(outputs)
digest = " ".join(out_dict["predicted_tokens"])
out_dict["digest"] = digest
return out_dict
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
outputs = self._model.forward_on_instances(instances)
out_dict = sanitize(outputs)
for i, pred in enumerate(out_dict):
digest = " ".join(out_dict[i]["predicted_tokens"])
out_dict[i]["digest"] = digest
return out_dict
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
command = json_dict['command']
return self._dataset_reader.text_to_instance(source_string=command)
|
StarcoderdataPython
|
1718221
|
import argparse
import os
from pathlib import Path
from GUM_Dispenser.GUM_Exceptions import InvalidSourcePathError, ConfigurationNotFoundError, PackageNotFoundError
from GUM_Dispenser.GUM_Exceptions import SourceModuleNotFoundError, UserConfirmedInvalidSetup
from GUM_Dispenser.GUM_setup_parser import parse_setup
from GUM_Dispenser.GUM_Describe_Source import describe_project
from GUM_Dispenser.GUM_Generate_NOMNOML import generate_project_nomnoml
import logging
def define_arguments() -> 'ArgumentParser':
"""Define command line arguments for GUM Dispenser"""
arg_parser = argparse.ArgumentParser(description="Generates nomnoml from a source package")
arg_parser.add_argument('--path', '-p', help='The path to the base development directory'
+ ' of or a package folder in your source code. Default is current working directory',
default=os.getcwd())
arg_parser.add_argument('--setup_file', '-s', help='The path to the setup.py file. '
+ 'If given, ignore any setup.py file found at --path and its parent for this file.'
+ 'If not given, checks the value of --path and its immediate parent for setup.py',
default=None)
arg_parser.add_argument('--debug', help='Flag to display debug level messages during execution',
action='store_true')
return arg_parser
def initialize_log(arguments_received : dict) -> None:
"""Set up logging components and bind them together"""
# Use our custom formatter
main_formatter = logging.Formatter(fmt='%(asctime)s %(module)s.py: %(levelname)s - %(message)s')
# Set up our handler
main_handler = logging.StreamHandler()
level_string = ''
if arguments_received['debug']:
level_string = 'DEBUG'
main_handler.setLevel(level_string)
else:
level_string = 'INFO'
main_handler.setLevel(level_string)
main_handler.setFormatter(main_formatter)
# Set up our logger
main_logger = logging.getLogger('GUM Dispenser')
if arguments_received['debug']:
main_logger.setLevel(level_string)
else:
main_logger.setLevel(level_string)
main_logger.addHandler(main_handler)
logging.getLogger('GUM Dispenser').info('Welcome to GUM Dispenser!')
logging.getLogger('GUM Dispenser').info('Logging level: ' + level_string)
def check_for_setup(arguments_received: dict) -> str:
"""Get the path to a nearby or specified setup.py file as a string"""
setup_path_str = None
# Check user supplied setup path if it is present
if arguments_received['setup_file'] is not None and arguments_received['setup_file'] != '':
setup_path_str = arguments_received['setup_file']
setup_path = Path(setup_path_str)
if setup_path.exists():
# Find a setup.py file at or near the given path
if setup_path.name == 'setup.py':
logging.getLogger('GUM Dispenser').info('Setup found in given directory: ' + str(setup_path.parent))
setup_path_str = str(setup_path.parent)
elif setup_path.is_dir():
if Path(setup_path).joinpath('setup.py').exists():
logging.getLogger('GUM Dispenser').info("Setup found in given directory: " + str(setup_path))
else:
logging.getLogger('GUM Dispenser').error('Given explicit setup.py path ' + str(setup_path) +
' does not exist. Checking near development directory instead...')
setup_path_str = None
# User did not specify setup.py location or no file found near given path
if setup_path_str is None:
logging.getLogger('GUM Dispenser').info('No explicit input directory for setup.py. ' +
'Checking near development directory...')
# Check given directory and one level up for the setup file
if Path(arguments_received['path']).joinpath('setup.py').exists():
logging.getLogger('GUM Dispenser').info("Setup found in base development directory: " +
arguments_received['path'])
setup_path_str = arguments_received['path']
elif Path(arguments_received['path']).parent.joinpath('setup.py').exists():
logging.getLogger('GUM Dispenser').info("Setup found in base development directory: " +
str(Path(arguments_received['path']).parent))
setup_path_str = str(Path(arguments_received['path']).parent)
# Require presence of a setup.py file
if setup_path_str is None:
raise ConfigurationNotFoundError
return setup_path_str
def dispense_gum(arguments_received: dict) -> None:
try:
development_directory = Path(arguments_received['path']).resolve() # Expand symbolic links
# Input should be a directory
if not development_directory.exists() or not development_directory.is_dir():
raise InvalidSourcePathError
setup_path = check_for_setup(arguments_received)
# Read list of attributes and values used in setup.py, ignoring comments
setup_distro_defs = parse_setup(setup_path)
logging.getLogger('GUM Dispenser').debug(setup_distro_defs)
# Get a dictionary full of relevant data for UML text generation
uml_data = describe_project(setup_distro_defs, development_directory)
logging.getLogger('GUM Dispenser').debug(uml_data)
print(generate_project_nomnoml(uml_data, setup_distro_defs['entry_points']))
except InvalidSourcePathError as err:
logging.getLogger('GUM Dispenser').exception("The given source path was not found on your system.\n" +
"Choose a valid path or change " +
"your current directory to your local source package.")
except ConfigurationNotFoundError as err:
logging.getLogger('GUM Dispenser').exception("We couldn't find a complete project definition" +
" in your specified setup.py file.\n" +
"Specify either packages or modules in a setup.py file " +
"near your given source code path.")
except PackageNotFoundError as err:
logging.getLogger('GUM Dispenser').exception("The package " + str(err) +
" specified in your setup.py doesn't exist or is not a directory.")
except SourceModuleNotFoundError as err:
logging.getLogger('GUM Dispenser').exception('Missing or bad path for .py source file\n')
except UserConfirmedInvalidSetup as err:
logging.getLogger('GUM Dispenser').exception('User requested program termination. Goodbye')
except Exception as err:
logging.getLogger('GUM Dispenser').exception('Error: ' + str(err))
def main():
input_parser = define_arguments()
arguments_received = input_parser.parse_args()
initialize_log(vars(arguments_received))
dispense_gum(vars(arguments_received))
|
StarcoderdataPython
|
3317796
|
from PyQt5.QtWidgets import QPushButton, QLineEdit, QMessageBox, QGridLayout, QLabel, QListWidget
from PyQt5 import QtWidgets
from PyQt5 import uic
from src.controllers import MainController
from src.assets.Label import Label
class Setup(QtWidgets.QDialog):
controller : MainController
def __init__(self,controller,parent=None):
super().__init__(parent)
self.controller = controller
self.init_ui()
def init_ui(self):
dialog = uic.loadUi("src/views/Setup.ui", self)
dialog.show()
self.cheminLocalLineEdit.setText(self.controller.destinationPath)
self.pushButton_validate.pressed.connect(self.validate_changes)
self.pushButton_cancel.pressed.connect(self.cancel_changes)
def cancel_changes(self):
self.close()
def validate_changes(self):
if self.radioButton_remote.isChecked():
ftpUrl = self.uRLDistanteFtpLineEdit.text()
ftpUsername = self.nomDUtilisateurLineEdit.text()
ftpPassword = self.motDePasseLineEdit.text()
ftpDirectory = self.repertoireLineEdit.text()
self.controller.create_ftp_connection(ftpUrl,ftpUsername,ftpPassword)
self.controller.ftpController.set_directory(ftpDirectory)
else:
localPath = self.cheminLocalLineEdit.text()
self.controller.end_ftp_connection()
self.controller.destinationPath = localPath
self.close()
#valider: appeler fonction configure du mainController
#passer en paramètres
#modifier filemanager pour mode distant et mode local
|
StarcoderdataPython
|
183566
|
<gh_stars>0
from ex1.expense_app.models import Expense
from ex1.profile_app.models import Profile
def get_profile():
return Profile.objects.first()
def get_budget_left():
user = Profile.objects.first()
expenses = Expense.objects.all()
result = user.budget - sum([ex.price for ex in expenses])
return result
|
StarcoderdataPython
|
1742753
|
#!/usr/bin/env python3
# compare representations of versioned items in OCaml files in a Github pull request
import os
import sys
import shutil
import subprocess
exit_code = 0
def run_comparison(base_commit, compare_script):
cwd = os.getcwd()
# create a copy of the repo at base branch
if os.path.exists('base'):
shutil.rmtree('base')
os.mkdir('base')
os.chdir('base')
# it would be faster to do a clone of the local repo, but there's "smudge error" (?)
subprocess.run(['git', 'clone', '<EMAIL>:CodaProtocol/coda.git'])
os.chdir('coda')
subprocess.run(['git', 'checkout', base_commit])
os.chdir(cwd)
# changed files in the PR
diffs_raw = subprocess.check_output(
['git', 'diff', '--name-only', base_commit])
diffs_decoded = diffs_raw.decode('UTF-8')
diffs = diffs_decoded.split('\n')
for diff in diffs:
fn = os.path.basename(diff)
if not fn.endswith('.ml'):
continue
orig = 'base/coda/' + diff
# don't compare if file added or deleted
if not (os.path.exists(orig) and os.path.exists(diff)):
continue
completed_process = subprocess.run(
['./scripts/' + compare_script, orig, diff])
if not completed_process.returncode == 0:
global exit_code
exit_code = 1
sys.exit(exit_code)
|
StarcoderdataPython
|
132375
|
"""
Book: Building RESTful Python Web Services
Chapter 3: Improving and adding authentication to an API with Django
Author: <NAME> - Twitter.com/gastonhillar
Publisher: Packt Publishing Ltd. - http://www.packtpub.com
"""
from django.contrib.auth.models import User
user = User.objects.create_user('kevin', '<EMAIL>', '<PASSWORD>')
user.save()
|
StarcoderdataPython
|
1774232
|
from .action_scheme import ActionScheme, DTypeString, TradeActionUnion
from .continuous_actions import ContinuousActions
from .discrete_actions import DiscreteActions
from .multi_discrete_actions import MultiDiscreteActions
# 交易动作字典
_registry = {
'continuous': ContinuousActions,
'discrete': DiscreteActions,
'multi-discrete': MultiDiscreteActions,
}
def get(identifier: str) -> ActionScheme:
"""Gets the `ActionScheme` that matches with the identifier.
通过identifier标识,获取相应的动作方案
Arguments:
identifier: The identifier for the `ActionScheme`
Raises:
KeyError: if identifier is not associated with any `ActionScheme`
"""
if identifier not in _registry.keys():
raise KeyError(f'Identifier {identifier} is not associated with any `ActionScheme`.')
return _registry[identifier]()
|
StarcoderdataPython
|
1786811
|
<filename>pathutils/utils.py
"""utils.py
Various utilities
"""
import operator
import webbrowser
import pandas as pd
def sorted_dict_items(d, reverse=False):
"""Sorted (key, value) pairs by value.
"""
result = sorted(d.items(), key=operator.itemgetter(1))
if reverse:
return result[::-1]
else:
return result
def get_beaker_lookup(UserId: str, SessionId: str, OrgId: str=None, is_staging: bool=False) -> dict:
if is_staging:
if OrgId is None:
session_url_template = "https://app.staging.fullstory.com/ui/thefullstory.com/session/{UserId}:{SessionId}"
scope_url_template = "https://app.staging.fullstory.com/admin/s/scope/scope.html?OrgId=thefullstory.com&UserId={UserId}&SessionId={SessionId}"
return {"session_url": session_url_template.format(UserId=UserId, SessionId=SessionId),
"scope_url": scope_url_template.format(UserId=UserId, SessionId=SessionId)}
else:
session_url_template = "https://app.staging.fullstory.com/ui/{OrgId}/session/{UserId}:{SessionId}"
scope_url_template = "https://app.staging.fullstory.com/admin/s/scope/scope.html?OrgId={OrgId}&UserId={UserId}&SessionId={SessionId}"
return {"session_url": session_url_template.format(OrgId=OrgId, UserId=UserId, SessionId=SessionId),
"scope_url": scope_url_template.format(OrgId=OrgId, UserId=UserId, SessionId=SessionId)}
else:
if OrgId is None:
session_url_template = "https://app.fullstory.com/ui/thefullstory.com/session/{UserId}:{SessionId}"
scope_url_template = "https://app.fullstory.com/admin/s/scope/scope.html?OrgId=thefullstory.com&UserId={UserId}&SessionId={SessionId}"
return {"session_url": session_url_template.format(UserId=UserId, SessionId=SessionId),
"scope_url": scope_url_template.format(UserId=UserId, SessionId=SessionId)}
else:
session_url_template = "https://app.fullstory.com/ui/{OrgId}/session/{UserId}:{SessionId}"
scope_url_template = "https://app.fullstory.com/admin/s/scope/scope.html?OrgId={OrgId}&UserId={UserId}&SessionId={SessionId}"
return {"session_url": session_url_template.format(OrgId=OrgId, UserId=UserId, SessionId=SessionId),
"scope_url": scope_url_template.format(OrgId=OrgId, UserId=UserId, SessionId=SessionId)}
def pseudo_beaker(UserId: str, SessionId: str, replay=True, scope=True, browser=None, OrgId: str=None, is_staging: bool=True) -> dict:
"""
Mimic the Beaker admin tool in opening up one or both of session replay and
Scope tools for a given User Id and Session Id.
Option to specify a browser (e.g. "safari", "chrome") otherwise the system
default is used.
"""
url_dict = get_beaker_lookup(UserId, SessionId, OrgId, is_staging)
if browser is None:
w = webbrowser
else:
w = webbrowser.get(browser)
if replay:
w.open_new(url_dict["session_url"])
if scope:
w.open_new(url_dict["scope_url"])
return url_dict
def get_sessions(events_df: pd.DataFrame) -> list:
"""
Obtain a tuple of distinct session ids present in the input dataframe
that has been multi-indexed by `preproc_events`.
Input:
events_df: dataframe of events
Output:
list of distinct session ids present in the input
"""
# get_level_values(0) only pulls the `sid` first part of the multi-index,
# at position 0. The second index (at position 1) is the time-ordered
# position
return list(set(events_df.index.get_level_values(0)))
def preproc_events(events_df: pd.DataFrame) -> pd.DataFrame:
"""
Input:
events_df: dataframe imported from BigQuery
Output:
Same dataframe with additional columns and datetime format for event time
events_df is re-indexed according to
unique sessions (sid, i), where `sid` = (UserId + SessionId) as a
concatenated string, and `i`, the original integer index.
The unique session index is also added as the `distinct_session_id`
column in the dataframe. Get an iterable sequence of the session ids
using `get_sessions`.
Event start times are transformed to DateTime from string.
"""
events_df["distinct_session_id"] = events_df["UserId"].astype(
str
) + events_df["SessionId"].astype(str)
# Time
events_df["EventStart"] = pd.to_datetime(events_df["EventStart"])
# create a parent/child relationship: distinct_session_id (parent) --> index (child)
events_df.set_index(
pd.MultiIndex.from_arrays(
[ pd.Index(events_df["distinct_session_id"]), events_df.index ],
names=("sid", "i"),
),
inplace=True,
)
# Note that `events_df.index.get_level_values(0)` will have repeats of the
# unique session IDs over their corresponding rows, and so
# it's of length `len(events_df)` not `len(unique_session_ids)`
# sort event times per session
events_df.sort_values(["sid", "EventStart"], ascending=[1, 1], inplace=True)
# create a proper incrementing integer index for each session, move unique
# `i to a column
events_df["idx"] = events_df.groupby("sid").cumcount()
return events_df.reset_index().set_index(["sid", "idx"])
def filter_events(
events_df: pd.DataFrame, org=None, session=None, start_time=None
) -> pd.DataFrame:
"""
Inputs:
events_df: dataframe with multi-index
org: OrgId string or sequence of these
session: UserId+SessionId string or sequence of these
start_time: Singleton or sequence of pairs (t0, t1) bounding
first event time of a session, where times are given
in the format 'YYYY-MM-DD HH:MM:SS UTC'
Output:
New dataframe copy of input, filtered according to arguments.
"""
# reduce dataset according to any org specification (per session)
# (one or more orgs)
if org is not None and org != "" and org != [""]:
if isinstance(org, str):
# singleton
org = [org]
events_df = events_df.loc[
list(
set(
events_df[events_df["OrgId"].isin(org)][
"distinct_session_id"
]
)
)
]
# reduce dataset according to any session specification (one or more)
if session is not None and len(session) != 0:
if isinstance(session, str):
# singleton
session = [session]
events_df = events_df.loc[session]
# reduce dataset according to any time specification (tuple of start, end
# times)
if start_time is not None:
# assumes that only a pair is provided
# transform to datetime format if not already
t0, t1 = [pd.to_datetime(t) for t in start_time]
# ensure only whole sessions are found that start in this range.
# (assumes original data set does not truncate any sessions.)
# get all session start times
groups = events_df.groupby("distinct_session_id")["EventStart"].min()
sids = groups[groups.between(t0, t1)].index
events_df = events_df.loc[sids]
return events_df
|
StarcoderdataPython
|
182058
|
<gh_stars>0
x = y = 0
x = int(input('number: '))
y = bin(x)
print('Binary: ', y[2:])
|
StarcoderdataPython
|
12035
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from __future__ import division
import os
import numpy
from io import BytesIO
from matplotlib import pyplot
import requests
import torch
from PIL import Image
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
from maskrcnn_benchmark.structures.image_list import ImageList
if __name__ == "__main__":
# load config from file and command-line arguments
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cfg.merge_from_file(
os.path.join(project_dir,
"configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"))
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=0.7,
show_mask_heatmaps=False,
masks_per_dim=2,
min_image_size=480,
)
def single_image_to_top_predictions(image):
image = image.float() / 255.0
image = image.permute(2, 0, 1)
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
image = image * 255
else:
image = image[[2, 1, 0]]
# we absolutely want fixed size (int) here (or we run into a tracing error (or bug?)
# or we might later decide to make things work with variable size...
image = image - torch.tensor(cfg.INPUT.PIXEL_MEAN)[:, None, None]
# should also do variance...
image_list = ImageList(image.unsqueeze(0), [(int(image.size(-2)), int(image.size(-1)))])
result, = coco_demo.model(image_list)
scores = result.get_field("scores")
keep = (scores >= coco_demo.confidence_threshold)
result = (result.bbox[keep],
result.get_field("labels")[keep],
result.get_field("mask")[keep],
scores[keep])
return result
@torch.jit.script
def my_paste_mask(mask, bbox, height, width, threshold=0.5, padding=1, contour=True, rectangle=False):
# type: (Tensor, Tensor, int, int, float, int, bool, bool) -> Tensor
padded_mask = torch.constant_pad_nd(mask, (padding, padding, padding, padding))
scale = 1.0 + 2.0 * float(padding) / float(mask.size(-1))
center_x = (bbox[2] + bbox[0]) * 0.5
center_y = (bbox[3] + bbox[1]) * 0.5
w_2 = (bbox[2] - bbox[0]) * 0.5 * scale
h_2 = (bbox[3] - bbox[1]) * 0.5 * scale # should have two scales?
bbox_scaled = torch.stack([center_x - w_2, center_y - h_2,
center_x + w_2, center_y + h_2], 0)
TO_REMOVE = 1
w = (bbox_scaled[2] - bbox_scaled[0] + TO_REMOVE).clamp(min=1).long()
h = (bbox_scaled[3] - bbox_scaled[1] + TO_REMOVE).clamp(min=1).long()
scaled_mask = torch.ops.maskrcnn_benchmark.upsample_bilinear(padded_mask.float(), h, w)
x0 = bbox_scaled[0].long()
y0 = bbox_scaled[1].long()
x = x0.clamp(min=0)
y = y0.clamp(min=0)
leftcrop = x - x0
topcrop = y - y0
w = torch.min(w - leftcrop, width - x)
h = torch.min(h - topcrop, height - y)
# mask = torch.zeros((height, width), dtype=torch.uint8)
# mask[y:y + h, x:x + w] = (scaled_mask[topcrop:topcrop + h, leftcrop:leftcrop + w] > threshold)
mask = torch.constant_pad_nd((scaled_mask[topcrop:topcrop + h, leftcrop:leftcrop + w] > threshold),
(int(x), int(width - x - w), int(y), int(height - y - h))) # int for the script compiler
if contour:
mask = mask.float()
# poor person's contour finding by comparing to smoothed
mask = (mask - torch.nn.functional.conv2d(mask.unsqueeze(0).unsqueeze(0),
torch.full((1, 1, 3, 3), 1.0 / 9.0), padding=1)[0, 0]).abs() > 0.001
if rectangle:
x = torch.arange(width, dtype=torch.long).unsqueeze(0)
y = torch.arange(height, dtype=torch.long).unsqueeze(1)
r = bbox.long()
# work around script not liking bitwise ops
rectangle_mask = ((((x == r[0]) + (x == r[2])) * (y >= r[1]) * (y <= r[3]))
+ (((y == r[1]) + (y == r[3])) * (x >= r[0]) * (x <= r[2])))
mask = (mask + rectangle_mask).clamp(max=1)
return mask
@torch.jit.script
def add_annotations(image, labels, scores, bboxes, class_names=','.join(coco_demo.CATEGORIES), color=torch.tensor([255, 255, 255], dtype=torch.long)):
# type: (Tensor, Tensor, Tensor, Tensor, str, Tensor) -> Tensor
result_image = torch.ops.maskrcnn_benchmark.add_annotations(image, labels, scores, bboxes, class_names, color)
return result_image
@torch.jit.script
def combine_masks(image, labels, masks, scores, bboxes, threshold=0.5, padding=1, contour=True, rectangle=False, palette=torch.tensor([33554431, 32767, 2097151])):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, float, int, bool, bool, Tensor) -> Tensor
height = image.size(0)
width = image.size(1)
image_with_mask = image.clone()
for i in range(masks.size(0)):
color = ((palette * labels[i]) % 255).to(torch.uint8)
one_mask = my_paste_mask(masks[i, 0], bboxes[i], height, width, threshold, padding, contour, rectangle)
image_with_mask = torch.where(one_mask.unsqueeze(-1), color.unsqueeze(0).unsqueeze(0), image_with_mask)
image_with_mask = add_annotations(image_with_mask, labels, scores, bboxes)
return image_with_mask
def process_image_with_traced_model(image):
original_image = image
if coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY:
assert (image.size(0) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0
and image.size(1) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0)
boxes, labels, masks, scores = traced_model(image)
# todo: make this in one large thing
result_image = combine_masks(original_image, labels, masks, scores, boxes, 0.5, 1, rectangle=True)
return result_image
def fetch_image(url):
response = requests.get(url)
return Image.open(BytesIO(response.content)).convert("RGB")
if __name__ == "__main__":
pil_image = fetch_image(
url="http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg")
# convert to BGR format
image = torch.from_numpy(numpy.array(pil_image)[:, :, [2, 1, 0]])
original_image = image
if coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY:
assert (image.size(0) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0
and image.size(1) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0)
for p in coco_demo.model.parameters():
p.requires_grad_(False)
traced_model = torch.jit.trace(single_image_to_top_predictions, (image,))
@torch.jit.script
def end_to_end_model(image):
boxes, labels, masks, scores = traced_model(image)
result_image = combine_masks(image, labels, masks, scores, boxes, 0.5, 1, rectangle=True)
return result_image
end_to_end_model.save('end_to_end_model.pt')
result_image = process_image_with_traced_model(original_image)
# self.show_mask_heatmaps not done
pyplot.imshow(result_image[:, :, [2, 1, 0]])
pyplot.show()
# second image
image2 = fetch_image(
url='http://farm4.staticflickr.com/3153/2970773875_164f0c0b83_z.jpg')
image2 = image2.resize((640, 480), Image.BILINEAR)
image2 = torch.from_numpy(numpy.array(image2)[:, :, [2, 1, 0]])
result_image2 = process_image_with_traced_model(image2)
# self.show_mask_heatmaps not done
pyplot.imshow(result_image2[:, :, [2, 1, 0]])
pyplot.show()
|
StarcoderdataPython
|
3336043
|
<reponame>d4yvie/advent_of_code_2021<filename>Day23/organization_state.py
from dataclasses import dataclass
from aoc_types import VectorX
Rooms = tuple[VectorX, ...]
@dataclass(frozen=True)
class OrganizationState:
energy: int
rooms: tuple
hallway: tuple = (None,) * 11
def __lt__(self, other):
return self.energy < other.energy
@property
def id(self) -> tuple[tuple, tuple]:
return self.hallway, self.rooms
@property
def are_amphipods_organized(self):
return all(hallway is None for hallway in self.hallway) and all(
all(ambhipod == i for ambhipod in room) for i, room in enumerate(self.rooms)
)
|
StarcoderdataPython
|
3398196
|
"""
===========
NH2D fitter: ortho- and para- in the same file, but not modeled together
===========
Reference for line params:
New line parameters are taken from the recent laboratory work:
Melosso et al. (2021)
Journal of Molecular Spectroscopy, vol. 377, March 2021, 111431
https://doi.org/10.1016/j.jms.2021.111431
These superseed the frequencies from F. Daniel et al. (2016)
http://adsabs.harvard.edu/abs/2016A%26A...586L...4D
"""
from . import hyperfine
import astropy.units as u
freq_dict_cen = {
'o-1_11-1_01': 85.926263e9,
'p-1_11-1_01': 110.153599e9,
'o-1_01-0_00': 332.82251e9,
'p-1_01-0_00': 332.78189e9,
}
freq_dict = {
####### ortho-NH2D J=1_11-1_01
'o-1_11-1_01_01': 85.9246990e9,
'o-1_11-1_01_02': 85.9247505e9,
'o-1_11-1_01_03': 85.9247772e9,
'o-1_11-1_01_04': 85.9252797e9,
'o-1_11-1_01_05': 85.9253576e9,
'o-1_11-1_01_06': 85.9256502e9,
'o-1_11-1_01_07': 85.9256695e9,
'o-1_11-1_01_08': 85.9256886e9,
'o-1_11-1_01_09': 85.9256962e9,
'o-1_11-1_01_10': 85.9257017e9,
'o-1_11-1_01_11': 85.9257285e9,
'o-1_11-1_01_12': 85.9261987e9,
'o-1_11-1_01_13': 85.9262178e9,
'o-1_11-1_01_14': 85.9262310e9,
'o-1_11-1_01_15': 85.9262475e9,
'o-1_11-1_01_16': 85.9262517e9,
'o-1_11-1_01_17': 85.9262708e9,
'o-1_11-1_01_18': 85.9262766e9,
'o-1_11-1_01_19': 85.9262824e9,
'o-1_11-1_01_20': 85.9262914e9,
'o-1_11-1_01_21': 85.9262990e9,
'o-1_11-1_01_22': 85.9263088e9,
'o-1_11-1_01_23': 85.9263091e9,
'o-1_11-1_01_24': 85.9263257e9,
'o-1_11-1_01_25': 85.9268116e9,
'o-1_11-1_01_26': 85.9268282e9,
'o-1_11-1_01_27': 85.9268645e9,
'o-1_11-1_01_28': 85.9268718e9,
'o-1_11-1_01_29': 85.9268895e9,
'o-1_11-1_01_30': 85.9269061e9,
'o-1_11-1_01_31': 85.9271096e9,
'o-1_11-1_01_32': 85.9271418e9,
'o-1_11-1_01_33': 85.9277048e9,
'o-1_11-1_01_34': 85.9277224e9,
'o-1_11-1_01_35': 85.9277391e9,
####### ortho-NH2D J=1_01-0_00
'o-1_01-0_00_01': 332.7808404e9,
'o-1_01-0_00_02': 332.7808404e9,
'o-1_01-0_00_03': 332.7808404e9,
'o-1_01-0_00_04': 332.7816734e9,
'o-1_01-0_00_05': 332.7816734e9,
'o-1_01-0_00_06': 332.7816734e9,
'o-1_01-0_00_07': 332.7816983e9,
'o-1_01-0_00_08': 332.7817512e9,
'o-1_01-0_00_09': 332.7817512e9,
'o-1_01-0_00_10': 332.7822538e9,
'o-1_01-0_00_11': 332.7822538e9,
'o-1_01-0_00_12': 332.7822538e9,
'o-1_01-0_00_13': 332.7822805e9,
'o-1_01-0_00_14': 332.7822805e9,
'o-1_01-0_00_15': 332.7823320e9,
####### para-NH2D J=1_11-1_01
'p-1_11-1_01_01': 110.1520107e9,
'p-1_11-1_01_02': 110.1520630e9,
'p-1_11-1_01_03': 110.1520892e9,
'p-1_11-1_01_04': 110.1525927e9,
'p-1_11-1_01_05': 110.1526707e9,
'p-1_11-1_01_06': 110.1529617e9,
'p-1_11-1_01_07': 110.1529820e9,
'p-1_11-1_01_08': 110.1530013e9,
'p-1_11-1_01_09': 110.1530081e9,
'p-1_11-1_01_10': 110.1530141e9,
'p-1_11-1_01_11': 110.1530402e9,
'p-1_11-1_01_12': 110.1535116e9,
'p-1_11-1_01_13': 110.1535309e9,
'p-1_11-1_01_14': 110.1535437e9,
'p-1_11-1_01_15': 110.1535596e9,
'p-1_11-1_01_16': 110.1535645e9,
'p-1_11-1_01_17': 110.1535838e9,
'p-1_11-1_01_18': 110.1535897e9,
'p-1_11-1_01_19': 110.1535945e9,
'p-1_11-1_01_20': 110.1536042e9,
'p-1_11-1_01_21': 110.1536119e9,
'p-1_11-1_01_22': 110.1536207e9,
'p-1_11-1_01_23': 110.1536217e9,
'p-1_11-1_01_24': 110.1536381e9,
'p-1_11-1_01_25': 110.1541241e9,
'p-1_11-1_01_26': 110.1541415e9,
'p-1_11-1_01_27': 110.1541770e9,
'p-1_11-1_01_28': 110.1541857e9,
'p-1_11-1_01_29': 110.1542022e9,
'p-1_11-1_01_30': 110.1542196e9,
'p-1_11-1_01_31': 110.1544234e9,
'p-1_11-1_01_32': 110.1544555e9,
'p-1_11-1_01_33': 110.1550195e9,
'p-1_11-1_01_34': 110.1550360e9,
'p-1_11-1_01_35': 110.1550534e9,
####### para-NH2D J=1_01-0_00
'p-1_01-0_00_01': 332.8215574e9,
'p-1_01-0_00_02': 332.8215574e9,
'p-1_01-0_00_03': 332.8215574e9,
'p-1_01-0_00_04': 332.8223912e9,
'p-1_01-0_00_05': 332.8223912e9,
'p-1_01-0_00_06': 332.8223912e9,
'p-1_01-0_00_07': 332.8224164e9,
'p-1_01-0_00_08': 332.8224692e9,
'p-1_01-0_00_09': 332.8224692e9,
'p-1_01-0_00_10': 332.8229727e9,
'p-1_01-0_00_11': 332.8229727e9,
'p-1_01-0_00_12': 332.8229727e9,
'p-1_01-0_00_13': 332.8229989e9,
'p-1_01-0_00_14': 332.8229989e9,
'p-1_01-0_00_15': 332.8230512e9,
}
line_strength_dict = {
####### ortho-NH2D J=1_11-1_01
'o-1_11-1_01_01': 0.01295,
'o-1_11-1_01_02': 0.06163,
'o-1_11-1_01_03': 0.03552,
'o-1_11-1_01_04': 0.00033,
'o-1_11-1_01_05': 0.00068,
'o-1_11-1_01_06': 0.01377,
'o-1_11-1_01_07': 0.00730,
'o-1_11-1_01_08': 0.06915,
'o-1_11-1_01_09': 0.02936,
'o-1_11-1_01_10': 0.00037,
'o-1_11-1_01_11': 0.02030,
'o-1_11-1_01_12': 0.09879,
'o-1_11-1_01_13': 0.01726,
'o-1_11-1_01_14': 0.01881,
'o-1_11-1_01_15': 0.01031,
'o-1_11-1_01_16': 0.02297,
'o-1_11-1_01_17': 0.17283,
'o-1_11-1_01_18': 0.02660,
'o-1_11-1_01_19': 0.03280,
'o-1_11-1_01_20': 0.00662,
'o-1_11-1_01_21': 0.01394,
'o-1_11-1_01_22': 0.05729,
'o-1_11-1_01_23': 0.01174,
'o-1_11-1_01_24': 0.00757,
'o-1_11-1_01_25': 0.01545,
'o-1_11-1_01_26': 0.03456,
'o-1_11-1_01_27': 0.06345,
'o-1_11-1_01_28': 0.01973,
'o-1_11-1_01_29': 0.00112,
'o-1_11-1_01_30': 0.00568,
'o-1_11-1_01_31': 0.00018,
'o-1_11-1_01_32': 0.00057,
'o-1_11-1_01_33': 0.01069,
'o-1_11-1_01_34': 0.06063,
'o-1_11-1_01_35': 0.03905,
####### ortho-NH2D J=1_01-0_00
'o-1_01-0_00_01': 0.01579,
'o-1_01-0_00_02': 0.03206,
'o-1_01-0_00_03': 0.06326,
'o-1_01-0_00_04': 0.04784,
'o-1_01-0_00_05': 0.05919,
'o-1_01-0_00_06': 0.00409,
'o-1_01-0_00_07': 0.25925,
'o-1_01-0_00_08': 0.14819,
'o-1_01-0_00_09': 0.03699,
'o-1_01-0_00_10': 0.04748,
'o-1_01-0_00_11': 0.01986,
'o-1_01-0_00_12': 0.04376,
'o-1_01-0_00_13': 0.03699,
'o-1_01-0_00_14': 0.14819,
'o-1_01-0_00_15': 0.03704,
####### para-NH2D J=1_11-1_01
'p-1_11-1_01_01': 0.01296,
'p-1_11-1_01_02': 0.06162,
'p-1_11-1_01_03': 0.03552,
'p-1_11-1_01_04': 0.00033,
'p-1_11-1_01_05': 0.00068,
'p-1_11-1_01_06': 0.01378,
'p-1_11-1_01_07': 0.00730,
'p-1_11-1_01_08': 0.06914,
'p-1_11-1_01_09': 0.02937,
'p-1_11-1_01_10': 0.00037,
'p-1_11-1_01_11': 0.02030,
'p-1_11-1_01_12': 0.09872,
'p-1_11-1_01_13': 0.01729,
'p-1_11-1_01_14': 0.01882,
'p-1_11-1_01_15': 0.01029,
'p-1_11-1_01_16': 0.02300,
'p-1_11-1_01_17': 0.17283,
'p-1_11-1_01_18': 0.02660,
'p-1_11-1_01_19': 0.03282,
'p-1_11-1_01_20': 0.00661,
'p-1_11-1_01_21': 0.01395,
'p-1_11-1_01_22': 0.01173,
'p-1_11-1_01_23': 0.05728,
'p-1_11-1_01_24': 0.00757,
'p-1_11-1_01_25': 0.01547,
'p-1_11-1_01_26': 0.03455,
'p-1_11-1_01_27': 0.06342,
'p-1_11-1_01_28': 0.01973,
'p-1_11-1_01_29': 0.00113,
'p-1_11-1_01_30': 0.00570,
'p-1_11-1_01_31': 0.00018,
'p-1_11-1_01_32': 0.00057,
'p-1_11-1_01_33': 0.01069,
'p-1_11-1_01_34': 0.06062,
'p-1_11-1_01_35': 0.03906,
####### para-NH2D J=1_01-0_00
'p-1_01-0_00_01': 0.01580,
'p-1_01-0_00_02': 0.03207,
'p-1_01-0_00_03': 0.06324,
'p-1_01-0_00_04': 0.04784,
'p-1_01-0_00_05': 0.05919,
'p-1_01-0_00_06': 0.00409,
'p-1_01-0_00_07': 0.25927,
'p-1_01-0_00_08': 0.14814,
'p-1_01-0_00_09': 0.03704,
'p-1_01-0_00_10': 0.04747,
'p-1_01-0_00_11': 0.01985,
'p-1_01-0_00_12': 0.04379,
'p-1_01-0_00_13': 0.03704,
'p-1_01-0_00_14': 0.14814,
'p-1_01-0_00_15': 0.03704,
}
# Get offset velocity dictionary in km/s based on the lines frequencies and rest frequency
conv_o1_1 = u.doppler_radio(freq_dict_cen['o-1_11-1_01']*u.Hz)
conv_p1_1 = u.doppler_radio(freq_dict_cen['p-1_11-1_01']*u.Hz)
conv_o1_0 = u.doppler_radio(freq_dict_cen['o-1_01-0_00']*u.Hz)
conv_p1_0 = u.doppler_radio(freq_dict_cen['p-1_01-0_00']*u.Hz)
voff_lines_dict = {
name: ((freq_dict[name]*u.Hz).to(u.km/u.s, equivalencies=conv_o1_1).value) for name in freq_dict.keys() if "o-1_11-1_01" in name
}
voff_lines_dict.update({
name: ((freq_dict[name]*u.Hz).to(u.km/u.s, equivalencies=conv_p1_1).value) for name in freq_dict.keys() if "p-1_11-1_01" in name
})
voff_lines_dict.update({
name: ((freq_dict[name]*u.Hz).to(u.km/u.s, equivalencies=conv_o1_0).value) for name in freq_dict.keys() if "o-1_01-0_00" in name
})
voff_lines_dict.update({
name: ((freq_dict[name]*u.Hz).to(u.km/u.s, equivalencies=conv_p1_0).value) for name in freq_dict.keys() if "p-1_01-0_00" in name
})
# relative_strength_total_degeneracy is not used in the CLASS implementation
# of the hfs fit. It is the sum of the degeneracy values for all hyperfines
# for a given line; it gives the relative weights between lines.
# Hyperfine weights are treated as normalized within one rotational transition.
wo1_1 = sum(val for name,val in line_strength_dict.items() if 'o-1_11-1_01' in name)
wp1_1 = sum(val for name,val in line_strength_dict.items() if 'p-1_11-1_01' in name)
wo1_0 = sum(val for name,val in line_strength_dict.items() if 'o-1_01-0_00' in name)
wp1_0 = sum(val for name,val in line_strength_dict.items() if 'p-1_01-0_00' in name)
relative_strength_total_degeneracy = {
name : wo1_1 for name in line_strength_dict.keys() if "o-1_11-1_01" in name
}
relative_strength_total_degeneracy.update({
name : wp1_1 for name in line_strength_dict.keys() if "p-1_11-1_01" in name
})
relative_strength_total_degeneracy.update({
name : wo1_0 for name in line_strength_dict.keys() if "o-1_01-0_00" in name
})
relative_strength_total_degeneracy.update({
name : wp1_0 for name in line_strength_dict.keys() if "p-1_01-0_00" in name
})
# Get the list of line names from the previous lists
line_names = [name for name in voff_lines_dict.keys()]
nh2d_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict, freq_dict,
line_strength_dict,
relative_strength_total_degeneracy)
nh2d_vtau_fitter = nh2d_vtau.fitter
nh2d_vtau_vheight_fitter = nh2d_vtau.vheight_fitter
nh2d_vtau_tbg_fitter = nh2d_vtau.background_fitter
|
StarcoderdataPython
|
1612843
|
command = input()
company_dict = {}
while not command == "End":
company, user = command.split(" -> ")
if not company in company_dict:
company_dict[company] = [user]
else:
if not user in company_dict[company]:
company_dict[company].append(user)
command = input()
company_dict = dict(sorted(company_dict.items(), key= lambda x : x[0]))
for company, user in company_dict.items():
print(f"{company}")
for name in user:
print(f"-- {name}")
|
StarcoderdataPython
|
1746130
|
from .functions import divar_get_phone
|
StarcoderdataPython
|
1765416
|
<filename>generate.py
###############################################################################
# Language Modeling on Penn Tree Bank
#
# This file generates new sentences sampled from the language model
#
###############################################################################
import argparse
import torch
from torch.autograd import Variable
import data
parser = argparse.ArgumentParser(description='PyTorch PTB Language Model')
# Model parameters.
parser.add_argument('--data', type=str, default='./data/penn',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (LSTM, QRNN)')
parser.add_argument('--checkpoint', type=str, default='./model.pt',
help='model checkpoint to use')
parser.add_argument('--outf', type=str, default='generated.txt',
help='output file for generated text')
parser.add_argument('--words', type=int, default='1000',
help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature - higher will increase diversity')
parser.add_argument('--log-interval', type=int, default=100,
help='reporting interval')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
with open(args.checkpoint, 'rb') as f:
model, _, _ = torch.load(f)
model.eval()
if args.model == 'QRNN':
model.reset()
if args.cuda:
model.cuda()
else:
model.cpu()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
lines = []
with open(args.data + 'test.txt', 'r') as test:
lines = test.readlines()
lines = [line for line in lines if line.strip() != '']
first_line = lines[0].split() + ['<eos>']
first_word = first_line[0]
# input = Variable(torch.rand(1, 1).mul(ntokens).long(), volatile=True)
input = torch.tensor([[corpus.dictionary.word2idx[first_word]]])
if args.cuda:
input.data = input.data.cuda()
hidden = model.init_hidden(1)
success = 0
error = 0
wpa = 0
with open(args.outf, 'a') as outf:
for line in lines:
tokens = line.split() + ['<eos>']
for token in tokens[1:]:
output, hidden = model(input, hidden)
word_weights = model.decoder(output).squeeze().data.div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.data.fill_(corpus.dictionary.word2idx[token])
try:
word = corpus.dictionary.idx2word[word_idx]
except:
word = '<unk>'
print(word, token)
if word == token:
success += 1
else:
error += 1
wpa = success / (success + error)
outf.write('\n' + args.checkpoint + ': ' + str(wpa))
|
StarcoderdataPython
|
3286349
|
<reponame>ramonakira/piclodio3<gh_stars>100-1000
from collections import OrderedDict
from rest_framework import status
from rest_framework.reverse import reverse
from tests.test_views.test_alarm_clock_view.base import Base
class TestList(Base):
def setUp(self):
super(TestList, self).setUp()
self.url = reverse('api:alarmclocks:list_create')
def test_list(self):
response = self.client.get(self.url, format='json')
expected = OrderedDict(
[('count', 2),
('next', None),
('previous', None),
('results', [
OrderedDict([('id', 1),
('name', 'alarm1'),
('monday', True),
('tuesday', False),
('wednesday', False),
('thursday', False),
('friday', False),
('saturday', False),
('sunday', False),
('hour', 8),
('minute', 20),
('enabled', True),
('auto_stop_minutes', 0),
('webradio', 1)]),
OrderedDict([('id', 2),
('name', 'alarm2'),
('monday', False),
('tuesday', False),
('wednesday', True),
('thursday', False),
('friday', False),
('saturday', False),
('sunday', False),
('hour', 8),
('minute', 20),
('enabled', True),
('auto_stop_minutes', 0),
('webradio', 2)])])])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(expected, response.data)
|
StarcoderdataPython
|
1616906
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 26 18:29:41 2019
@author: <NAME>
"""
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import tools
import numpy as np
from scipy import ndimage
#from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
#%%
#images
#PSP_001414_1780_RED_img_row_33792_col_12288_w_1024_h_1024_x_0_y_0
#PSP_001414_1780_RED_img_row_32768_col_15360_w_1024_h_1024_x_0_y_0
#PSP_001414_1780_RED_img_row_32768_col_14336_w_1024_h_1024_x_0_y_0
#PSP_001414_1780_RED_img_row_32768_col_13312_w_1024_h_1024_x_0_y_0
#PSP_001414_1780_RED_img_row_9216_col_11264_w_1024_h_1024_x_0_y_0
#chameleon
#parachute
path = "C:/Users/<NAME>/Documents/Unterlagen/SoSe2019/mars/python/1024x1024/"
img = cv2.imread('rocks.jpg')
#im = Image.open('rocks.jpg')
#np_im = np.array(im)
sharpened = tools.sharp(img, 3)
stretched = tools.stretch_8bit(img)
enhanced1 = tools.stretch_8bit(sharpened)
enhanced2 = tools.sharp(stretched, 3)
plt.imshow(enhanced1)
plt.show()
plt.imshow(enhanced2)
plt.show()
compare = tools.concatenate([img, sharpened, stretched, enhanced1, enhanced2]) #they are img type
plt.imshow(compare)
plt.show()
print(type(compare))
#cv2.imwrite('land_sharp3.jpg', compare)
compare = tools.concatenate([img, enhanced1, enhanced2])
plt.imshow(compare)
plt.show()
#compare.save('land_orgfinal_sharp3.jpg')
#cv2.imwrite('output/enhanced.jpg', enhanced1)
#cv2.imwrite('output/stretched.jpg', stretched)
#v2.imwrite('output/sharpened.jpg', sharpened)
#%%
img = cv2.imread('output/techno-signature_augmentation/parachute.jpg')
#simple = tools.augment_simple(img)
#augmentations = tools.augment_random(simple[3], generations = 8)
#augmentations = [flipped, rolled, rotated90, rotated180]
#cv2.imwrite('flipped.jpg', simple[0])
#cv2.imwrite('rolled.jpg', simple[1])
#cv2.imwrite('rotated90.jpg', simple[2])
#cv2.imwrite('rotated180.jpg', simple[3])
lista = [cv2.imread('aug_00.jpg'),cv2.imread('aug_01.jpg'),cv2.imread('aug_02.jpg'),cv2.imread('aug_03.jpg'),
cv2.imread('aug_04.jpg'),cv2.imread('aug_05.jpg'),cv2.imread('aug_06.jpg'),cv2.imread('aug_07.jpg')]
#lista2 = [cv2.imread('aug_08.jpg'),cv2.imread('aug_09.jpg'),cv2.imread('aug_10.jpg'),cv2.imread('aug_11.jpg'),
#cv2.imread('aug_12.jpg'),cv2.imread('aug_13.jpg'),cv2.imread('aug_14.jpg'),cv2.imread('aug_15.jpg')]
#lista3 = [cv2.imread('aug_16.jpg'),cv2.imread('aug_17.jpg'),cv2.imread('aug_18.jpg'),cv2.imread('aug_19.jpg'),
#cv2.imread('aug_20.jpg'),cv2.imread('aug_21.jpg'),cv2.imread('aug_22.jpg'),cv2.imread('aug_23.jpg')]
#%%
concatenated = tools.concatenate(lista)
plt.imshow(concatenated)
plt.show()
concatenated.save('comb5.jpg')
#%%
#_________________ create function with this _________________________ DONE
list_im = ['output/original.jpg','output/sharpened.jpg','output/stretched.jpg','output/enhanced.jpg']
imgs = [ Image.open(i) for i in list_im ]
# pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)
min_shape = sorted( [(np.sum(i.size), i.size ) for i in imgs])[0][1]
imgs_comb = np.hstack( (np.asarray( i.resize(min_shape) ) for i in imgs ) )
# save that beautiful picture
imgs_comb = Image.fromarray( imgs_comb)
#imgs_comb.save( 'test_hor.jpg' )
# for a vertical stacking it is simple: use vstack
imgs_comb = np.vstack( (np.asarray( i.resize(min_shape) ) for i in imgs ) )
imgs_comb = Image.fromarray( imgs_comb)
#imgs_comb.save( 'test_ver.jpg' )
#_______________________________________________________________________
#%%
def concatenate(imgflnames): #file name, Image.fromarray for cv2 or numpy. Error: ValueError: cannot resize an array that references or is referenced
#by another array in this way.
#Use the np.resize function or refcheck=False
images = [cv2.imread(i) for i in imgflnames] #for loop one line for lists
print("\n", type(images), "\n")
print("lenght: ", len(images))
print("dimension 0: ", images[0].ndim)
print("dimension 1: ", images[1].ndim)
min_shape = sorted( [(np.sum(i.shape), i.shape ) for i in images])[0][1]
print(min_shape)
imgs_comb = np.hstack( (np.asarray(cv2.resize(i,(min_shape[0], min_shape[1]))) for i in images ) )
#res = cv2.resize(img_np, dsize=(2048, 2048), interpolation=cv2.INTER_CUBIC)
imgs_comb = Image.fromarray( imgs_comb)
return imgs_comb
def concatenate2(imgflnames): #file name, dimensionality problem: all the input arrays must have same number of dimensions. Could be fix with a resize function
images = [Image.open(i) for i in imgflnames] #for loop one line for lists
print("\n", type(images), "\n")
print("lenght: ", len(images))
print("dimension 0: ", images[0].size)
print("dimension 1: ", images[1].size)
min_shape = sorted( [(np.sum(i.size), i.size ) for i in images])[0][1]
print(min_shape)
imgs_comb = np.hstack( (np.asarray( i.resize(min_shape) ) for i in images ) )
imgs_comb = Image.fromarray( imgs_comb)
return imgs_comb
#%%
list_im = ['output/enhancement/original.jpg','output/enhancement/enhanced.jpg']
imgs = [ Image.open(i) for i in list_im ]
# pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)
min_shape = sorted( [(np.sum(i.size), i.size ) for i in imgs])[0][1]
imgs_comb = np.hstack((np.asarray(i.resize(min_shape) ) for i in imgs))
# save that beautiful picture
imgs_comb = Image.fromarray( imgs_comb)
plt.imshow(imgs_comb)
plt.show()
two = concatenate2(list_im)
plt.imshow(two)
plt.show()
#imgs_comb.save( 'orginal_final.jpg' )
#tools.augment_random(img, 20)
#augmented = tools.augment_simple(img)
#cv2.imwrite("output/chameleon.jpg", img)
#cv2.imwrite("output/flipped.jpg", augmented[0])
#cv2.imwrite("output/rolled.jpg", augmented[1])
#cv2.imwrite("output/rotated90.jpg", augmented[2])
#cv2.imwrite("output/rotated180.jpg", augmented[3])
#%% register_image(img)
#try with chameleon and rotate 27.5
#try resize again
img = cv2.imread('resized.jpg')
plt.imshow(img)
plt.show()
#img = cv2.imread('resized.jpg')
ref = tools.generate_template(img)
plt.imshow(ref)
plt.show()
cv2.imwrite('refresized.jpg',ref)
#ref = tools.generate_template(img, [255,0,0])
#plt.imshow(ref)
#plt.show()
#%%
type2_list = ['type2.jpg','reftype2.jpg','translation_type2.jpg','rigid_body_type2.jpg','scale_type2.jpg','affine_type2.jpg','bilatelar_type2.jpg']
resized_list = ['resized.jpg','align_and_crop_before.jpg','refresized.jpg','translation.jpg','rigid_body.jpg','scaled_rotation.jpg','affine.jpg','bilinear.jpg']
conc1 = tools.concatenate(type2_list, True)
plt.imshow(conc1)
plt.show()
conc2 = tools.concatenate(resized_list, True)
plt.imshow(conc2)
plt.show()
#%%
img_list = ['output/enhancement/original.jpg','output/enhancement/enhanced.jpg', 'bilinear_template.jpg'] #dimensionality problem
images = [Image.open(i) for i in img_list]
for i in images:
print (i.size)
print (type(i))
concatenated = concatenate(img_list)
plt.imshow(concatenated)
plt.show()
#%% concatenation test detailed
list_im1 = 'output/enhancement/original.jpg'
imgs_1 = Image.open(list_im1)
imgs2_1 = cv2.imread(list_im1)
print(imgs_1.size)
print("PIL Image type: ", type(imgs_1))
print(imgs2_1.shape)
print("CV2read imgs2 type: ", type(imgs2_1))
list_im2 = 'output/enhancement/enhanced.jpg'
imgs_2 = Image.open(list_im2)
imgs2_2 = cv2.imread(list_im2)
print("\n",imgs_2.size)
print("PIL Image type: ", type(imgs_2))
print(imgs2_2.shape)
print("CV2read imgs2 type: ", type(imgs2_2))
list_im3 = 'bilinear_template.jpg'
imgs_3 = Image.open(list_im3)
imgs2_3 = cv2.imread(list_im3)
print("\n",imgs_3.size)
print("PIL Image type: ", type(imgs_3))
print(imgs2_3.shape)
print("CV2read imgs2 type: ", type(imgs2_3))
result = tools.concatenate([list_im3, list_im2, list_im1])
plt.imshow(result)
plt.show()
#%%
#img_rotated = ndimage.rotate(img, 27)
#cv2.imwrite('output/rotated_chameleon27.jpg', img_rotated)
#transformations = tools.register_image(img, ref = 'bilinear_template.jpg') #best result so far
transformations = tools.register_image(img)
transformations = tools.register_image(img ,'solid')
transformations = tools.register_image(img, ref = 'bilinear_template.jpg') #homography function could have the same
#%%
cv2.imwrite('output/translation_resized_bilinear.jpg', transformations[0])
cv2.imwrite('output/rotation_resized_bilinear.jpg', transformations[1])
cv2.imwrite('output/scaled_rotation_resized_bilinear.jpg', transformations[2])
cv2.imwrite('output/affine_resized_bilinear.jpg', transformations[3])
cv2.imwrite('output/bilinear_resized_bilinear.jpg', transformations[4])
#%%
def random_color(low=5, high=250):
color = [np.random.randint(5,250),np.random.randint(5,250),np.random.randint(5,250)]
return color
#%%
def generate_template():
ref = np.zeros((img.shape[0],img.shape[1],3), dtype = 'uint8')
margin = int(min(img.shape[0], img.shape[1])/10)
#for 200,200 to shape[1]-200 shape[0]-200 generate random
for i in range(0,img.shape[1]-2*margin):
i+=1
for j in range(0,img.shape[0]-2*margin):
ref[margin+i,margin+j,:] = random_color()
j+=1
return ref
#%%
plt.imshow(ref)
plt.show()
cv2.imwrite('test_template.jpg', ref)
|
StarcoderdataPython
|
4833866
|
<reponame>mas-veritas2/veritastool
"""
Basic fairness measures specific to uplift models.
Written by <NAME> and <NAME>,
Gradient Institute Ltd. (<EMAIL>).
Copyright © 2020 Monetary Authority of Singapore
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import numpy as np
import pandas as pd
from functools import partial
from typing import Union, Tuple, Callable, Dict
from scipy.stats import multinomial
from scipy.stats.mstats import mquantiles
from scipy.integrate import simps
from sklearn.metrics import log_loss, r2_score
from sklearn.preprocessing import OneHotEncoder
from uplift import Uplifter
from response import Responder
#
# Generic Uplift Scorers
#
def make_model_scorer(
scorefn: Callable,
*args,
pred_method: str="select",
**kwargs
) -> float:
"""Make a scorer for an uplift model (Uplifter or Responder class)."""
# Make the scorer function
def scorer(estimator, X, y):
predict = getattr(estimator, pred_method)(X)
score = scorefn(y, predict, *args, **kwargs)
return score
return scorer
def make_fair_scorer(
func: Callable,
prot_attr: str,
priv_group: int,
diff: bool,
*args,
pred_method: str="select",
**kwargs
) -> float:
"""Make a scorer that is the disparity between protected group scores."""
# make the scorer function
def scorer(estimator, X, y):
# get selection and protected attribute
predict = getattr(estimator, pred_method)(X)
attributes = _get_attributes(y)
protected = np.array(attributes[prot_attr] != priv_group)
# call the input scoring function
s_r = func(y[protected], predict[protected], *args, **kwargs)
s_i = func(y[~protected], predict[~protected], *args, **kwargs)
measure = s_r - s_i if diff else s_r / s_i
return measure
return scorer
def gini_fairness(
estimator: Union[Responder, Uplifter],
X: Union[np.array, pd.DataFrame],
y: pd.Series,
prot_attr: str,
n_percentiles: int=20,
) -> float:
"""Calculate the Gini coefficient for continuous protected attributes.
This calculation is based on trapezoidal integration of the Lorenz curve.
"""
selection = estimator.select(X)
attributes = _get_attributes(y)
# Get the continuous protected attribute
if prot_attr in attributes.columns:
A = np.array(attributes[prot_attr])
elif prot_attr in X.columns:
A = np.array(X[prot_attr])
else:
raise ValueError("`prot_attr` is not in y or X!")
G = _gini_coefficient(selection, A, n_percentiles)
return G
#
# Test data evaluator
#
def test_model(
estimator: Uplifter,
X_test: Union[pd.DataFrame, np.array],
y_test: pd.Series,
scorers: Dict[str, Callable],
lower_quantile: float=0.05,
upper_quantile: float=0.95,
replications: int=50
) -> Dict[str, Tuple[float, int]]:
"""Evaluate the uplift model scores on a test dataset, with uncertainty."""
scores = {}
for k, fun in scorers.items():
pfun = partial(fun, estimator)
scores[k] = _empirical_bootstrap(
pfun, X_test, y_test,
q_lower=lower_quantile,
q_upper=upper_quantile,
replications=replications
)
return scores
#
# Additional model measures
#
def std_nlog_loss(
y_true: Union[pd.Series, np.ndarray],
p_pred: np.array
) -> float:
"""Standardised negative log-loss.
Standardised against a naive predictor trained on the test set.
"""
nll = - log_loss(y_true, p_pred)
# This assumes the labels were sorted using python's sort function,
# which is true for scikit learn classes.
y_enc = OneHotEncoder(sparse=False).fit_transform(
y_true.to_numpy()[:, np.newaxis])
p_rate = y_enc.mean(axis=0)
naive = multinomial(n=1, p=p_rate)
naivell = naive.logpmf(y_enc).mean()
std_nll = nll - naivell
return std_nll
def empirical_lift(
y: pd.Series,
selected: np.array
) -> float:
"""Estimate the empirical lift from a selection."""
Ntr = sum(y[selected] == "TR")
Ntn = sum(y[selected] == "TN")
pRcT = Ntr / (Ntr + Ntn)
Ncr = sum(y[selected] == "CR")
Ncn = sum(y[selected] == "CN")
pRcC = Ncr / (Ncr + Ncn)
emp_lift = pRcT - pRcC
return emp_lift
def lift_r2(
y: pd.Series,
lift: np.array
) -> float:
"""Calculate R2 score between predicted lift and empirical lift deciles."""
deciles = np.arange(10)
dec_idx = pd.qcut(lift, 10, labels=deciles)
# Compute the empirical lift per deciles
emp_lift = np.array([empirical_lift(y, dec_idx == d) for d in deciles])
# Compute the average predicted lift per decile
med_lift = np.array([np.median(lift[dec_idx == d]) for d in deciles])
# R2 between lifts
r2 = r2_score(emp_lift, med_lift)
return r2
def proportion_selected(
y: pd.Series,
selected: np.array
) -> float:
"""Calculate the proportion of the cohort selected."""
# Impact rate
p_sel = sum(selected) / len(selected)
return p_sel
#
# Mock deployment impact scoring
#
def deployment_outcomes(y: pd.Series, selected: np.array) -> pd.DataFrame:
"""Get the 'real world' outcomes from a deployment."""
attributes = _get_attributes(y)
# Copy not selected outcomes to outcomes
applied = np.array(attributes.ns_applied)
acquired = np.array(attributes.ns_acquired)
success = np.array(attributes.ns_success)
# Change selected to selected outcomes
applied[selected] = attributes.loc[selected, "s_applied"]
acquired[selected] = attributes.loc[selected, "s_acquired"]
success[selected] = attributes.loc[selected, "s_success"]
outcomes = pd.DataFrame({
"applied": applied,
"acquired": acquired,
"success": success,
}, index=y.index)
return outcomes
def mock_deploy(
estimator: Uplifter,
X_deploy: Union[pd.DataFrame, np.array],
y_deploy: pd.Series,
y_train: pd.Series,
scorers: Dict[str, Callable],
lower_quantile: float=0.05,
upper_quantile: float=0.95,
replications: int=50
) -> Dict[str, Tuple[float, int]]:
"""Evaluate the uplift model selection harms and benefits."""
scores = {}
for k, fun in scorers.items():
pfun = partial(fun, estimator)
scores[k] = _empirical_bootstrap(
pfun, X_deploy, y_deploy, y_train,
q_lower=lower_quantile,
q_upper=upper_quantile,
replications=replications
)
return scores
def make_impacts(
scorefn: Callable,
*args,
**kwargs
) -> Callable:
"""Make a deployment impact scorer."""
# make the scorer function
def scorer(estimator, X_deploy, y_deploy, y_train):
selected = estimator.select(X_deploy)
outcomes = estimator.predict_outcomes(X_deploy)
out_dep = deployment_outcomes(y_deploy, selected)
out_ctl = _control_outcomes(y_train)
Ir = scorefn(out_dep, out_ctl, selected, outcomes, *args, **kwargs)
return Ir
return scorer
def make_fair_impacts(
scorefn: Callable,
prot_attr: str,
reported_group: int,
*args,
**kwargs
) -> Callable:
"""Make an impact scorer that is the disparity between groups."""
# make the scorer function
def scorer(estimator, X_deploy, y_deploy, y_train):
# get selection and control
selected = estimator.select(X_deploy)
outcomes = estimator.predict_outcomes(X_deploy)
out_dep = deployment_outcomes(y_deploy, selected)
out_ctl = _control_outcomes(y_train)
# selection reported mask
att_dep = _get_attributes(y_deploy)
rpt_dep = np.array(att_dep[prot_attr] == reported_group)
# control reported mask
att_ctl = _get_attributes(y_train)[_get_control(y_train)]
rpt_ctl = np.array(att_ctl[prot_attr] == reported_group)
pred_out_dep = {k: v[rpt_dep] for k, v in outcomes.items()}
# call the input scoring function
Ir = scorefn(out_dep[rpt_dep], out_ctl[rpt_ctl], selected[rpt_dep],
pred_out_dep, *args, **kwargs)
return Ir
return scorer
#
# Direct harm and Benefit scoring functions
#
def benefit_from_receive(
out_dep: pd.DataFrame,
out_ctl: pd.DataFrame,
selected: np.array,
outcomes: Dict,
) -> float:
"""Calculate benefit from receiving an intervention."""
return proportion_selected(out_dep, selected)
def benefit_from_receive_gini(
out_dep: pd.DataFrame,
out_ctl: pd.DataFrame,
selected: np.array,
outcomes: Dict,
prot_attr: str,
n_percentiles: int=20,
) -> float:
"""Calculate benefit from receiving an intervention, continuous."""
attributes = _get_attributes(out_dep)
A = np.array(attributes[prot_attr])
G = _gini_coefficient(selected, A, n_percentiles)
return G
def harm_from_unwanted(
out_dep: pd.DataFrame,
out_ctl: pd.DataFrame,
selected: np.array,
outcomes: Dict,
) -> float:
"""Calculate the harm from receiving an unwanted intervention."""
selected = selected.astype(bool)
s_napplied = sum(1 - out_dep.applied[selected])
ns_napplied = sum(1 - out_dep.applied[~selected])
Ir = s_napplied / (s_napplied + ns_napplied)
return Ir
def harm_from_unwanted_gini(
out_dep: pd.DataFrame,
out_ctl: pd.DataFrame,
selected: np.array,
outcomes: Dict,
prot_attr: str,
n_percentiles: int=20,
) -> float:
"""Calculate the harm from receiving unwanted intervention, continuous."""
selected = selected.astype(bool)
napplied = (1 - out_dep.applied).astype(bool)
s_napplied = np.logical_and(selected, napplied)
attributes = _get_attributes(out_dep)
A = np.array(attributes[prot_attr])
G = _gini_coefficient(s_napplied, A, n_percentiles)
return G
#
# Indirect/Causal Harm and Benefit Measures
#
def benefit_from_acquire(
out_dep: pd.DataFrame,
out_ctl: pd.DataFrame,
selected: np.array,
outcomes: Dict,
) -> Tuple[float, int]:
"""Calculate the benefit from acquiring the product."""
I_acq = sum(out_dep.acquired) / len(out_dep)
I_c_acq = sum(out_ctl.acquired) / len(out_ctl)
DI = I_acq - I_c_acq
return DI
def harm_failed_application(
out_dep: pd.DataFrame,
out_ctl: pd.DataFrame,
selected: np.array,
outcomes: Dict,
denominator_applied: bool=False
) -> Tuple[float, int]:
"""Calculate the harm from a failed application."""
# Impact rates
sel_app_nacq = np.logical_and(out_dep.acquired == 0, out_dep.applied == 1)
ctl_app_nacq = np.logical_and(out_ctl.acquired == 0, out_ctl.applied == 1)
if denominator_applied:
I_acq = sum(sel_app_nacq) / sum(out_dep.applied)
I_c_acq = sum(ctl_app_nacq) / sum(out_ctl.applied)
else:
I_acq = sum(sel_app_nacq) / len(out_dep)
I_c_acq = sum(ctl_app_nacq) / len(out_ctl)
DI = I_acq - I_c_acq
return DI
def harm_longterm(
out_dep: pd.DataFrame,
out_ctl: pd.DataFrame,
selected: np.array,
outcomes: Dict,
) -> Tuple[float, int]:
"""Calculate the harm from a long-term credit outcome."""
# Impact rates
sel_acq_nsuc = np.logical_and(out_dep.success == 0, out_dep.acquired == 1)
ctl_acq_nsuc = np.logical_and(out_ctl.success == 0, out_ctl.acquired == 1)
Ncohort = sum(out_dep.acquired)
I_suc = sum(sel_acq_nsuc) / Ncohort
I_c_suc = sum(ctl_acq_nsuc) / sum(out_ctl.acquired)
DI = I_suc - I_c_suc
return DI
#
# Private module functions
#
def _get_attributes(y: pd.Series) -> pd.DataFrame:
"""Get the attributes from a target Series."""
attributes = y.index.to_frame(index=False)
attributes.set_index("ID", inplace=True)
return attributes
def _get_selection_protection(
estimator: Uplifter,
X: Union[np.array, pd.DataFrame],
y: pd.Series,
prot_attr: str,
priv_group: int
) -> Tuple[np.array, np.array, pd.DataFrame]:
"""Get selection, protected attribute masks, and other attributes."""
selection = estimator.select(X)
attributes = _get_attributes(y)
protected = np.array(attributes[prot_attr] != priv_group)
return selection, protected
def _control_outcomes(y_train: pd.Series) -> pd.DataFrame:
"""Get the control outcomes from the experiment."""
attributes = _get_attributes(y_train)
# Filter only control data
incontrol = _get_control(y_train)
control = attributes[incontrol]
# Copy not selected outcomes to outcomes
outcomes = control[["ns_applied", "ns_acquired", "ns_success"]]
outcomes.columns = ["applied", "acquired", "success"]
return outcomes
def _get_control(y):
"""Get a mask of those in the control group."""
CR = np.array(y == "CR")
CN = np.array(y == "CN")
incontrol = np.logical_or(CR, CN)
return incontrol
# def _empirical_bootstrap(
# func: Callable,
# *data,
# replications: int,
# q_lower: float,
# q_upper: float,
# **fkwargs
# ) -> Tuple[float, float, float]:
# """Get the confidence intervals using the empirical bootstrap."""
# # get the score from data
# score = func(*data, **fkwargs)
# return score, score, score
def _empirical_bootstrap(
func: Callable,
*data,
replications: int,
q_lower: float,
q_upper: float,
**fkwargs
) -> Tuple[float, float, float]:
"""Get the confidence intervals using the empirical bootstrap."""
# get the score from data
score = func(*data, **fkwargs)
N = len(data[0])
# resample the data, get the score differences
samples = np.zeros(replications)
for r in range(replications):
rind = np.random.choice(N, N, replace=True)
sdata = [d.iloc[rind] for d in data]
score_sample = func(*sdata, **fkwargs)
samples[r] = score_sample - score
# Compute the quantiles of these differences, then compute corresponding
# quantiles for the score note that the quantiles of the differences are
# reversed when applying to the score.
d_l, d_u = mquantiles(samples, prob=[1. - q_lower, 1. - q_upper])
score_l, score_u = score - d_l, score - d_u
return score_l, score, score_u
def _gini_coefficient(
selection: np.array,
attribute: np.array,
n_percentiles: int
) -> float:
"""Gini coefficient of the selection shared over the attribute."""
# Cut the selected cohort lift into percentiles based on their attribute
percentiles = np.arange(n_percentiles)
perc_idx = pd.qcut(attribute, n_percentiles, labels=percentiles)
# Calculate the area under the Lorenz curve
hist = np.array([sum(selection[perc_idx == p]) for p in percentiles])
cum_select = np.cumsum(hist) / sum(selection)
area = simps(np.insert(cum_select, 0, 0), # start at 0
np.linspace(0, 1, n_percentiles + 1))
G = 1. - 2. * area
return G
|
StarcoderdataPython
|
114933
|
<gh_stars>0
#!/usr/bin/env python3
def main():
for i in range(1,11):
for j in range(1,11):
if(j==10):
print(i*j)
else:
print(i*j, end=" ")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
55042
|
# coding=utf-8
# Copyright 2014-2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from time import time
from oslo_log import log as logging
from neutron.plugins.common import constants as plugin_const
from neutron_lbaas.services.loadbalancer import constants as lb_const
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex
from f5_openstack_agent.lbaasv2.drivers.bigip import l7policy_service
from f5_openstack_agent.lbaasv2.drivers.bigip import listener_service
from f5_openstack_agent.lbaasv2.drivers.bigip import pool_service
from f5_openstack_agent.lbaasv2.drivers.bigip import virtual_address
from f5_openstack_agent.lbaasv2.drivers.bigip import utils
from requests import HTTPError
#import pdb
LOG = logging.getLogger(__name__)
class LBaaSBuilder(object):
# F5 LBaaS Driver using iControl for BIG-IP to
# create objects (vips, pools) - not using an iApp."""
def __init__(self, conf, driver, l2_service=None):
self.conf = conf
self.driver = driver
self.l2_service = l2_service
self.service_adapter = driver.service_adapter
self.listener_builder = listener_service.ListenerServiceBuilder(self,
self.service_adapter,
driver.cert_manager,
conf.f5_parent_ssl_profile)
self.pool_builder = pool_service.PoolServiceBuilder(
self.service_adapter,
conf.f5_parent_https_monitor)
self.l7service = l7policy_service.L7PolicyService(self, conf)
self.esd = None
@utils.instrument_execution_time
def assure_service(self, service, traffic_group, all_subnet_hints, delete_event=False):
"""Assure that a service is configured on the BIGIP."""
start_time = time()
LOG.debug("Starting assure_service")
# Needed also for delete events because of subnet hints
self._assure_loadbalancer_created(service, all_subnet_hints)
# Create and update
if not delete_event:
self._assure_pools_created(service)
self._assure_listeners_created(service)
self._assure_monitors_created(service)
self._assure_members_created(service, all_subnet_hints)
self._assure_pools_configured(service)
self._assure_l7policies_created(service)
self._assure_l7rules_created(service)
else: # delete
self._assure_monitors_deleted(service)
self._assure_members_deleted(service, all_subnet_hints)
self._assure_l7rules_deleted(service)
self._assure_l7policies_deleted(service)
self._assure_pools_deleted(service)
self._assure_listeners_deleted(service)
self._assure_loadbalancer_deleted(service)
LOG.debug(" _assure_service took %.5f secs" %
(time() - start_time))
return all_subnet_hints
@utils.instrument_execution_time
def _assure_loadbalancer_created(self, service, all_subnet_hints):
if 'loadbalancer' not in service:
return
bigips = self.driver.get_config_bigips()
loadbalancer = service["loadbalancer"]
vip_address = virtual_address.VirtualAddress(
self.service_adapter,
loadbalancer)
for bigip in bigips:
vip_address.assure(bigip)
if self.driver.l3_binding:
loadbalancer = service["loadbalancer"]
self.driver.l3_binding.bind_address(
subnet_id=loadbalancer["vip_subnet_id"],
ip_address=loadbalancer["vip_address"])
self._update_subnet_hints(loadbalancer["provisioning_status"],
loadbalancer["vip_subnet_id"],
loadbalancer["network_id"],
all_subnet_hints,
False)
if loadbalancer['provisioning_status'] != plugin_const.PENDING_DELETE:
loadbalancer['provisioning_status'] = plugin_const.ACTIVE
@utils.instrument_execution_time
def _assure_listeners_created(self, service):
if 'listeners' not in service:
return
listeners = service["listeners"]
loadbalancer = service["loadbalancer"]
networks = service["networks"]
bigips = self.driver.get_config_bigips()
old_listener = service.get('old_listener')
for listener in listeners:
if (old_listener != None and old_listener.get('id') == listener.get('id')):
svc = {"loadbalancer": loadbalancer,
"listener": listener,
"old_listener": old_listener,
"networks": networks}
else:
svc = {"loadbalancer": loadbalancer,
"listener": listener,
"networks": networks}
default_pool_id = listener.get('default_pool_id', '')
if default_pool_id:
pool = self.get_pool_by_id(service, default_pool_id)
if pool:
svc['pool'] = pool
if listener['provisioning_status'] == plugin_const.PENDING_UPDATE:
try:
self.listener_builder.update_listener(svc, bigips)
except Exception as err:
loadbalancer['provisioning_status'] = plugin_const.ERROR
listener['provisioning_status'] = plugin_const.ERROR
LOG.exception(err)
raise f5_ex.VirtualServerUpdateException(err.message)
elif listener['provisioning_status'] != \
plugin_const.PENDING_DELETE:
try:
# create_listener() will do an update if VS exists
self.listener_builder.create_listener(svc, bigips)
listener['operating_status'] = \
svc['listener']['operating_status']
except Exception as err:
loadbalancer['provisioning_status'] = plugin_const.ERROR
listener['provisioning_status'] = plugin_const.ERROR
raise f5_ex.VirtualServerCreationException(err.message)
if listener['provisioning_status'] != plugin_const.PENDING_DELETE:
listener['provisioning_status'] = plugin_const.ACTIVE
@utils.instrument_execution_time
def _assure_pools_created(self, service):
if "pools" not in service:
return
pools = service["pools"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for pool in pools:
if pool['provisioning_status'] != plugin_const.PENDING_DELETE:
svc = {"loadbalancer": loadbalancer, "pool": pool}
svc['members'] = self._get_pool_members(service, pool['id'])
try:
# create or update pool
if pool['provisioning_status'] == \
plugin_const.PENDING_CREATE:
self.pool_builder.create_pool(svc, bigips)
else:
try:
self.pool_builder.update_pool(svc, bigips)
except HTTPError as err:
if err.response.status_code == 404:
self.pool_builder.create_pool(svc, bigips)
except HTTPError as err:
if err.response.status_code != 409:
pool['provisioning_status'] = plugin_const.ERROR
loadbalancer['provisioning_status'] = (
plugin_const.ERROR)
raise f5_ex.PoolCreationException(err.message)
except Exception as err:
pool['provisioning_status'] = plugin_const.ERROR
loadbalancer['provisioning_status'] = plugin_const.ERROR
raise f5_ex.PoolCreationException(err.message)
pool['provisioning_status'] = plugin_const.ACTIVE
@utils.instrument_execution_time
def _assure_pools_configured(self, service):
if "pools" not in service:
return
pools = service["pools"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for pool in pools:
if pool['provisioning_status'] != plugin_const.PENDING_DELETE:
svc = {"loadbalancer": loadbalancer, "pool": pool}
svc['members'] = self._get_pool_members(service, pool['id'])
try:
# assign pool name to virtual
pool_name = self.service_adapter.init_pool_name(
loadbalancer, pool)
# get associated listeners for pool
for listener in pool['listeners']:
listener = self.get_listener_by_id(service, listener['id'])
if listener:
svc['listener'] = listener
self.listener_builder.update_listener_pool(
svc, pool_name["name"], bigips)
# update virtual sever pool name, session persistence
self.listener_builder.update_session_persistence(
svc, bigips)
# ccloud: update pool to set lb_method right
self.pool_builder.update_pool(svc, bigips)
pool['provisioning_status'] = plugin_const.ACTIVE
except HTTPError as err:
if err.response.status_code != 409:
pool['provisioning_status'] = plugin_const.ERROR
loadbalancer['provisioning_status'] = (
plugin_const.ERROR)
LOG.exception(err)
raise f5_ex.PoolCreationException("ccloud: Error #1" + err.message)
except Exception as err:
pool['provisioning_status'] = plugin_const.ERROR
loadbalancer['provisioning_status'] = plugin_const.ERROR
LOG.exception(err)
raise f5_ex.PoolCreationException("ccloud: Error #2" + err.message)
@utils.instrument_execution_time
def _get_pool_members(self, service, pool_id):
'''Return a list of members associated with given pool.'''
members = []
for member in service['members']:
if member['pool_id'] == pool_id:
members.append(member)
return members
@utils.instrument_execution_time
def _update_listener_pool(self, service, listener_id, pool_name, bigips):
listener = self.get_listener_by_id(service, listener_id)
if listener is not None:
try:
listener["pool"] = pool_name
svc = {"loadbalancer": service["loadbalancer"],
"listener": listener}
self.listener_builder.update_listener(svc, bigips)
except Exception as err:
listener['provisioning_status'] = plugin_const.ERROR
raise f5_ex.VirtualServerUpdateException(err.message)
@utils.instrument_execution_time
def _assure_monitors_deleted(self, service):
if not (("pools" in service) and ("healthmonitors" in service)):
return
monitors = service["healthmonitors"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for monitor in monitors:
svc = {"loadbalancer": loadbalancer,
"healthmonitor": monitor,
"pool": self.get_pool_by_id(service, monitor["pool_id"])}
if monitor['provisioning_status'] == plugin_const.PENDING_DELETE:
try:
self.pool_builder.delete_healthmonitor(svc, bigips)
except Exception as err:
monitor['provisioning_status'] = plugin_const.ERROR
raise f5_ex.MonitorDeleteException(err.message)
@utils.instrument_execution_time
def _assure_monitors_created(self, service):
if not (("pools" in service) and ("healthmonitors" in service)):
return
monitors = service["healthmonitors"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for monitor in monitors:
svc = {"loadbalancer": loadbalancer,
"healthmonitor": monitor,
"pool": self.get_pool_by_id(service, monitor["pool_id"])}
if monitor['provisioning_status'] != plugin_const.PENDING_DELETE:
try:
self.pool_builder.create_healthmonitor(svc, bigips)
except HTTPError as err:
if err.response.status_code != 409:
# pool['provisioning_status'] = plugin_const.ERROR
loadbalancer['provisioning_status'] = (
plugin_const.ERROR
)
raise f5_ex.MonitorCreationException(err.message)
else:
self.pool_builder.update_healthmonitor(svc, bigips)
except Exception as err:
monitor['provisioning_status'] = plugin_const.ERROR
raise f5_ex.MonitorCreationException(err.message)
monitor['provisioning_status'] = plugin_const.ACTIVE
@utils.instrument_execution_time
def _assure_members_created(self, service, all_subnet_hints):
if not (("pools" in service) and ("members" in service)):
return
members = service["members"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for member in members:
pool = self.get_pool_by_id(service, member["pool_id"])
svc = {"loadbalancer": loadbalancer,
"member": member,
"pool": pool}
if 'port' not in member and \
member['provisioning_status'] != plugin_const.PENDING_DELETE:
LOG.warning("Member definition does not include Neutron port")
# delete member if pool is being deleted
if not (member['provisioning_status'] == plugin_const.PENDING_DELETE or \
pool['provisioning_status'] == plugin_const.PENDING_DELETE):
try:
self.pool_builder.create_member(svc, bigips)
member['provisioning_status'] = plugin_const.ACTIVE
except HTTPError as err:
if err.response.status_code != 409:
# FIXME(RB)
# pool['provisioning_status'] = plugin_const.ERROR
loadbalancer['provisioning_status'] = (
plugin_const.ERROR
)
raise f5_ex.MemberCreationException(err.message)
else:
try:
self.pool_builder.update_member(svc, bigips)
except Exception as err:
member['provisioning_status'] = plugin_const.ERROR
raise f5_ex.MemberUpdateException(err.message)
except Exception as err:
member['provisioning_status'] = plugin_const.ERROR
raise f5_ex.MemberCreationException(err.message)
self._update_subnet_hints(member["provisioning_status"],
member["subnet_id"],
member["network_id"],
all_subnet_hints,
True)
@utils.instrument_execution_time
def _assure_members_deleted(self, service, all_subnet_hints):
if not (("pools" in service) and ("members" in service)):
return
members = service["members"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for member in members:
pool = self.get_pool_by_id(service, member["pool_id"])
svc = {"loadbalancer": loadbalancer,
"member": member,
"pool": pool}
if 'port' not in member and \
member['provisioning_status'] != plugin_const.PENDING_DELETE:
LOG.warning("Member definition does not include Neutron port")
# delete member if pool is being deleted
if member['provisioning_status'] == plugin_const.PENDING_DELETE or \
pool['provisioning_status'] == plugin_const.PENDING_DELETE:
try:
self.pool_builder.delete_member(svc, bigips)
except Exception as err:
member['provisioning_status'] = plugin_const.ERROR
raise f5_ex.MemberDeleteException(err.message)
self._update_subnet_hints(member["provisioning_status"],
member["subnet_id"],
member["network_id"],
all_subnet_hints,
True)
@utils.instrument_execution_time
def _assure_loadbalancer_deleted(self, service):
if (service['loadbalancer']['provisioning_status'] !=
plugin_const.PENDING_DELETE):
return
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
if self.driver.l3_binding:
self.driver.l3_binding.unbind_address(
subnet_id=loadbalancer["vip_subnet_id"],
ip_address=loadbalancer["vip_address"])
vip_address = virtual_address.VirtualAddress(
self.service_adapter,
loadbalancer)
for bigip in bigips:
vip_address.assure(bigip, delete=True)
@utils.instrument_execution_time
def _assure_pools_deleted(self, service):
if 'pools' not in service:
return
pools = service["pools"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for pool in pools:
# Is the pool being deleted?
if pool['provisioning_status'] == plugin_const.PENDING_DELETE:
svc = {"loadbalancer": loadbalancer,
"pool": pool}
try:
# update listeners for pool
for listener in pool['listeners']:
svc['listener'] = \
self.get_listener_by_id(service, listener['id'])
# remove pool name from virtual before deleting pool
self.listener_builder.update_listener_pool(
svc, "", bigips)
self.listener_builder.remove_session_persistence(
svc, bigips)
# delete pool
self.pool_builder.delete_pool(svc, bigips)
except Exception as err:
pool['provisioning_status'] = plugin_const.ERROR
raise f5_ex.PoolDeleteException(err.message)
@utils.instrument_execution_time
def _assure_listeners_deleted(self, service):
if 'listeners' not in service:
return
listeners = service["listeners"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for listener in listeners:
if listener['provisioning_status'] == plugin_const.PENDING_DELETE:
svc = {"loadbalancer": loadbalancer,
"listener": listener}
# ccloud: try to delete persistence which might be attached to listener
# ignore errors, persistence might be used somewhere else if pool is used more than once as default
try:
self.listener_builder.remove_session_persistence(
svc, bigips)
except Exception:
pass
# delete the listener
try:
self.listener_builder.delete_listener(svc, bigips)
except Exception as err:
listener['provisioning_status'] = plugin_const.ERROR
raise f5_ex.VirtualServerDeleteException(err.message)
@staticmethod
def _check_monitor_delete(service):
# If the pool is being deleted, then delete related objects
if service['pool']['status'] == plugin_const.PENDING_DELETE:
# Everything needs to be go with the pool, so overwrite
# service state to appropriately remove all elements
service['vip']['status'] = plugin_const.PENDING_DELETE
for member in service['members']:
member['status'] = plugin_const.PENDING_DELETE
for monitor in service['pool']['health_monitors_status']:
monitor['status'] = plugin_const.PENDING_DELETE
@staticmethod
def get_pool_by_id(service, pool_id):
if pool_id and "pools" in service:
pools = service["pools"]
for pool in pools:
if pool["id"] == pool_id:
return pool
return None
@staticmethod
def get_listener_by_id(service, listener_id):
if "listeners" in service:
listeners = service["listeners"]
for listener in listeners:
if listener["id"] == listener_id:
return listener
return None
def _update_subnet_hints(self, status, subnet_id,
network_id, all_subnet_hints, is_member):
bigips = self.driver.get_config_bigips()
for bigip in bigips:
subnet_hints = all_subnet_hints[bigip.device_name]
if status == plugin_const.PENDING_CREATE or \
status == plugin_const.PENDING_UPDATE:
if subnet_id in subnet_hints['check_for_delete_subnets']:
del subnet_hints['check_for_delete_subnets'][subnet_id]
if subnet_id not in subnet_hints['do_not_delete_subnets']:
subnet_hints['do_not_delete_subnets'].append(subnet_id)
elif status == plugin_const.PENDING_DELETE:
if subnet_id not in subnet_hints['do_not_delete_subnets']:
subnet_hints['check_for_delete_subnets'][subnet_id] = \
{'network_id': network_id,
'subnet_id': subnet_id,
'is_for_member': is_member}
@utils.instrument_execution_time
def listener_exists(self, bigip, service):
"""Test the existence of the listener defined by service."""
try:
# Throw an exception if the listener does not exist.
self.listener_builder.get_listener(service, bigip)
except HTTPError as err:
LOG.debug("Virtual service service discovery error, %s." %
err.message)
return False
return True
@utils.instrument_execution_time
def _assure_l7policies_created(self, service):
if 'l7policies' not in service:
return
bigips = self.driver.get_config_bigips()
l7policies = service['l7policies']
for l7policy in l7policies:
if l7policy['provisioning_status'] != plugin_const.PENDING_DELETE:
try:
name = l7policy.get('name', None)
if name and self.is_esd(name):
continue
else:
self.l7service.create_l7policy(
l7policy, service, bigips)
except Exception as err:
l7policy['provisioning_status'] = plugin_const.ERROR
service['loadbalancer']['provisioning_status'] = \
plugin_const.ERROR
raise f5_ex.L7PolicyCreationException(err.message)
l7policy['provisioning_status'] = plugin_const.ACTIVE
@utils.instrument_execution_time
def _assure_l7policies_deleted(self, service):
if 'l7policies' not in service:
return
bigips = self.driver.get_config_bigips()
l7policies = service['l7policies']
for l7policy in l7policies:
if l7policy['provisioning_status'] == plugin_const.PENDING_DELETE:
try:
name = l7policy.get('name', None)
if name and self.is_esd(name):
continue
else:
# Note: use update_l7policy because a listener can have
# multiple policies
self.l7service.update_l7policy(
l7policy, service, bigips)
except Exception as err:
l7policy['provisioning_status'] = plugin_const.ERROR
service['loadbalancer']['provisioning_status'] = \
plugin_const.ERROR
raise f5_ex.L7PolicyDeleteException(err.message)
@utils.instrument_execution_time
def _assure_l7rules_created(self, service):
if 'l7policy_rules' not in service:
return
bigips = self.driver.get_config_bigips()
l7rules = service['l7policy_rules']
for l7rule in l7rules:
if l7rule['provisioning_status'] != plugin_const.PENDING_DELETE:
try:
# ignore L7 rule if its policy is really an ESD
l7policy = self.get_l7policy_for_rule(
service['l7policies'], l7rule)
name = l7policy.get('name', None)
if name and self.is_esd(name):
LOG.error("L7 policy {0} is an ESD. Cannot add "
"an L7 rule to and ESD.".format(name))
continue
self.l7service.create_l7rule(l7rule, service, bigips)
except Exception as err:
l7rule['provisioning_status'] = plugin_const.ERROR
service['loadbalancer']['provisioning_status'] = \
plugin_const.ERROR
raise f5_ex.L7PolicyCreationException(err.message)
l7rule['provisioning_status'] = plugin_const.ACTIVE
@utils.instrument_execution_time
def _assure_l7rules_deleted(self, service):
if 'l7policy_rules' not in service:
return
bigips = self.driver.get_config_bigips()
l7rules = service['l7policy_rules']
for l7rule in l7rules:
if l7rule['provisioning_status'] == plugin_const.PENDING_DELETE:
try:
# ignore L7 rule if its policy is really an ESD
l7policy = self.get_l7policy_for_rule(
service['l7policies'], l7rule)
name = l7policy.get('name', None)
if name and self.is_esd(name):
continue
self.l7service.bigips = self.driver.get_config_bigips()
self.l7service.delete_l7rule(l7rule, service, bigips)
except Exception as err:
l7rule['provisioning_status'] = plugin_const.ERROR
service['loadbalancer']['provisioning_status'] = \
plugin_const.ERROR
raise f5_ex.L7PolicyDeleteException(err.message)
@utils.instrument_execution_time
def get_listener_stats(self, service, stats):
"""Get statistics for a loadbalancer service.
Sums values for stats defined in stats dictionary for all listeners
defined in service object. For example, if loadbalancer has two
listeners and stats defines a stat 'clientside.bitsIn' as a key, the
sum of all pools' clientside.bitsIn will be returned in stats.
Provisioning status is ignored -- PENDING_DELETE objects are
included.
:param service: defines loadbalancer and set of pools.
:param stats: a dictionary that defines which stats to get.
Should be initialized by caller with 0 values.
:return: stats are appended to input stats dict (i.e., contains
the sum of given stats for all BIG-IPs).
"""
listeners = service["listeners"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
collected_stats = {}
for stat in stats:
collected_stats[stat] = 0
for listener in listeners:
svc = {"loadbalancer": loadbalancer, "listener": listener}
vs_stats = self.listener_builder.get_stats(svc, bigips, stats)
for stat in stats:
collected_stats[stat] += vs_stats[stat]
return collected_stats
@utils.instrument_execution_time
def update_operating_status(self, service):
bigip = self.driver.get_active_bigip()
loadbalancer = service["loadbalancer"]
status_keys = ['status.availabilityState',
'status.enabledState']
members = service["members"]
for member in members:
if member['provisioning_status'] == plugin_const.ACTIVE:
pool = self.get_pool_by_id(service, member["pool_id"])
svc = {"loadbalancer": loadbalancer,
"member": member,
"pool": pool}
status = self.pool_builder.get_member_status(
svc, bigip, status_keys)
member['operating_status'] = self.convert_operating_status(
status)
@staticmethod
def convert_operating_status(status):
"""Convert object status to LBaaS operating status.
status.availabilityState and status.enabledState = Operating Status
available enabled ONLINE
available disabled DISABLED
offline - OFFLINE
unknown - NO_MONITOR
"""
op_status = None
available = status.get('status.availabilityState', '')
if available == 'available':
enabled = status.get('status.enabledState', '')
if enabled == 'enabled':
op_status = lb_const.ONLINE
elif enabled == 'disabled':
op_status = lb_const.DISABLED
else:
LOG.warning('Unexpected value %s for status.enabledState',
enabled)
elif available == 'offline':
op_status = lb_const.OFFLINE
elif available == 'unknown':
op_status = lb_const.NO_MONITOR
return op_status
def get_l7policy_for_rule(self, l7policies, l7rule):
policy_id = l7rule['policy_id']
for policy in l7policies:
if policy_id == policy['id']:
return policy
return None
def init_esd(self, esd):
self.esd = esd
def get_esd(self, name):
if self.esd:
return self.esd.get_esd(name)
return None
def is_esd(self, name):
return self.esd.get_esd(name) is not None
|
StarcoderdataPython
|
3296372
|
"""Base case, provide cluster specific assertion and cluster
facilities to make test easy to read.
"""
import os
import string
from docker import errors
from random import choice, randint
from . import const
from . import cluster
class ClusterTestCase:
def __init__(self):
self.cluster = cluster.Cluster()
def assert_key_exists(self, key):
"""Make sure a key exists in the consul k/v store"""
assert key in self.cluster.consul.kv
def assert_volume_exists_only_on(self, volume, node_name, kind='local'):
for name, node in self.cluster.nodes.items():
volumes = node['docker_cli'].volumes.list(
filters=dict(name=volume)
)
if node_name == name:
assert len(volumes) == 1, \
"We expect 1 volume named {} on node {}, " \
"found {} volumes {}".format(
volume, node_name, len(volumes),
[v.name for v in volumes]
)
assert volumes[0].attrs['Driver'] == kind,\
"Volume {} on node {} use {} driver, {} was " \
"expected".format(
volume, node_name, volumes[0].attrs['Driver'], kind
)
else:
assert len(volumes) == 0, \
"We expect 0 volume called {} on node {}, " \
"found {} volumes {}".format(
volume, name, len(volumes),
[v.name for v in volumes]
)
def assert_consul_service_on_node(self, service_id, node):
assert self.cluster.consul.catalog.service(
service_id
)[0]['Node'] == node
def assert_project_cloned(self, application, deploy_id, nodes=None):
if not nodes or not isinstance(nodes, list):
nodes = []
path = os.path.join(
cluster.DEPLOY_ROOT_DIR,
"{}-{}".format(application.name, deploy_id),
".env"
)
expected_content = "COMPOSE_PROJECT_NAME={}\n".format(
application.compose_project_name
)
for name, _ in self.cluster.nodes.items():
if name in nodes:
self.assert_file(
name, const.consul['container'], path, expected_content
)
else:
self.assert_file(
name,
const.consul['container'],
path,
"cat: can't open '{}': No such file or "
"directory\n".format(path),
)
def assert_btrfs_scheduled(self, kind, volume, nodes):
"""Assert btrfs scheduled are present on given nodes and absent on
others"""
def filter_schedule(schedule, kind, volume):
if schedule.volume == volume and schedule.kind == kind:
return True
for name, node in self.cluster.nodes.items():
container = node['docker_cli'].containers.get(
const.consul['container']
)
scheduled = self.cluster.get_scheduled(
container, filter_schedule, kind, volume
)
if name in nodes:
assert len(scheduled) == 1, \
"We expected 1 schedul {} on node {} for {} volume, " \
"but {} were found.".format(
kind, name, volume, len(scheduled)
)
else:
assert len(scheduled) == 0, \
"We expected 0 schedul {} on node {} for {} volume, " \
"but {} were found.".format(
kind, name, volume, len(scheduled)
)
def assert_container_running_on(self, containers, nodes):
for name, node in self.cluster.nodes.items():
for container_name in containers:
try:
container = node['docker_cli'].containers.get(
container_name
)
except errors.NotFound:
container = None
pass
if name in nodes:
assert container.status == 'running'
else:
assert container is None
def assert_file(self, node, container, path, expected_content):
"""Make sure expected content is present in a container:
:param node : node where service is running
:param container: container name
:param path : path to the file (inside the docker container) to
assert content
:param expected_content: content to assert
"""
content = self.cluster.nodes.get(node)['docker_cli'].containers.get(
container
).exec_run(
'sh -c "sleep 0.1; cat {}"'.format(path)
).output.decode('utf-8')
assert expected_content.strip() == content.strip(), \
"Content not matched, expected: {} - got {}".format(
expected_content, content
)
def generate_run_id(self):
allowedchars = string.ascii_lowercase + string.digits
# do not use uuid to avoid length exceeded limitation
return "".join(
choice(allowedchars) for x in range(randint(3, 5))
)
|
StarcoderdataPython
|
109165
|
"""
Helper function: given a manager instance, automatically build a CLI that can be exposed as a package script
"""
import argparse
from pprint import pprint as pp
import sys
import typing as ty
from . import exceptions, manager
class AssetCLI:
def __init__(self, manager: manager.AssetManager):
self._manager = manager
def run(self):
args = self._parse_args()
args.func(args)
def _parse_args(self):
def add_common(subparser):
group = subparser.add_mutually_exclusive_group()
group.add_argument('--all', help='Apply this action for all assets in manifest', action='store_true')
group.add_argument('--type', help='Apply this action for a specific record type', nargs=1)
subparser.add_argument('--tag', nargs=2, action='append',
help="Tag attributes with metadata for the desired item")
parser = argparse.ArgumentParser(description="Manage and download assets for {}".format(self._manager.name))
parser.add_argument('--local', nargs='?', default=None, help='Base path for the local cache directory')
parser.add_argument('--remote', nargs='?', default=None, help='Base URL for downloading pre-built assets')
# parser.add_argument('-y', '--yes', help='Automatic yes to prompts; run non-interactively')
subparsers = parser.add_subparsers(dest='cmd', help='Several sub-commands are available')
subparsers.required = True
show_parser = subparsers.add_parser('show', help='Show information about assets in local cache')
show_parser.add_argument('--available', default=False, action='store_true',
help='Show assets available for download, rather than what is currently installed')
add_common(show_parser)
show_parser.set_defaults(func=self.show_command)
# TODO (future) add an update feature?
download_parser = subparsers.add_parser('download', help='Download the specified assets (pre-built)')
add_common(download_parser)
download_parser.set_defaults(func=self.download_command)
download_parser.add_argument('--no-update', dest='no_update', default=False, action='store_true',
help='Skip downloading of assets that already exist locally')
build_parser = subparsers.add_parser('build', help='Build the specified assets from a recipe')
add_common(build_parser)
build_parser.set_defaults(func=self.build_command)
return parser.parse_args()
def _validate_common(self, args):
single_item = args.type is not None
if args.all and single_item:
sys.exit('Options "--all" and "--type" are mutually exclusive')
if not args.all and not single_item:
sys.exit('Must specify asset using `--all` or `--type name --tag key1 value1 --tag key2 value2...`')
def _set_manifests(self, args):
if args.local:
self._manager.set_local_manifest(args.local)
if args.remote:
self._manager.set_remote_manifest(args.remote)
def _get_matching_records(self, args, manifest) -> ty.List[dict]:
"""Get one or more matching records"""
if args.all:
records = manifest._items # type: ty.List[dict]
else:
tags = dict(args.tag or [])
try:
records = [manifest.locate(args.type[0], **tags)]
except exceptions.NoMatchingAsset:
records = []
return records
# Implement specific CLI subcommands
def show_command(self, args):
"""
Show full manifest information about currently available assets
"""
self._validate_common(args)
self._set_manifests(args)
use_local = not args.available
manifest = self._manager._local if use_local else self._manager._remote
if not use_local:
manifest.load()
records = self._get_matching_records(args, manifest)
print('- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -')
print('Local manifest path: ', self._manager._local._manifest_path)
print('Remote manifest path: ', self._manager._remote._manifest_path)
if not len(records):
sys.exit("No matching items found.")
for record in records:
print('- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -')
pp(record)
def download_command(self, args):
"""
Download one or more assets from the specified remote location
:param args:
:return:
"""
self._validate_common(args)
self._set_manifests(args)
manifest = self._manager._remote
manifest.load()
records = self._get_matching_records(args, manifest)
if not len(records):
sys.exit("No matching items found.")
for record in records:
try:
self._manager.download(record['_type'], **record)
print('Successfully downloaded file: {}'.format(record['_path']))
except exceptions.ImmutableManifestError as e:
if args.no_update:
print('Asset already exists; will not download: {}'.format(record['_path']))
else:
raise e
if len(records) > 1:
print('All files successfully downloaded. Thank you.')
def build_command(self, args):
"""
Build one or more assets from the specified recipe
"""
self._validate_common(args)
self._set_manifests(args)
manifest = self._manager._recipes
records = self._get_matching_records(args, manifest)
if not len(records):
sys.exit("No matching items found.")
for record in records:
result = self._manager.build(record['_type'], **record)
print('The requested asset has been built: {}'.format(result['_path']))
if len(records) > 1:
print('All files have been successfully built. Thank you.')
|
StarcoderdataPython
|
1663227
|
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
from rezgui.objects.App import app
from rezgui.windows.MainWindow import MainWindow
import os.path
import sys
def get_context_files(filepaths):
context_files = []
for path in filepaths:
if os.path.exists(path):
if os.path.isfile(path):
context_files.append(os.path.abspath(path))
else:
raise IOError("Not a file: %s" % path)
else:
open(path) # raise IOError
return context_files
def run(opts=None, parser=None):
main_window = MainWindow()
app.set_main_window(main_window)
main_window.show()
if opts.diff:
# open context in diff mode against another
context_files = get_context_files(opts.diff)
subwindow = main_window.open_context_and_diff_with_file(*context_files)
if subwindow:
subwindow.showMaximized()
else:
# open contexts
context_files = get_context_files(opts.FILE or [])
for filepath in context_files:
subwindow = main_window.open_context(filepath)
if len(context_files) == 1:
subwindow.showMaximized()
else:
main_window.cascade()
sys.exit(app.exec_())
|
StarcoderdataPython
|
3351888
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import List, Set, Mapping, Tuple, Sequence
import pytest
from fastapi import FastAPI
from pydantic import BaseModel, ValidationError
from starlette.testclient import TestClient
from fastapi_contrib.db.models import MongoDBTimeStampedModel
from fastapi_contrib.serializers import openapi
from fastapi_contrib.serializers.common import Serializer, ModelSerializer
from tests.mock import MongoDBMock
from tests.utils import override_settings
from unittest.mock import patch
from tests.utils import AsyncMock
app = FastAPI()
app.mongodb = MongoDBMock(inserted_id=3)
class RouteTestModel(MongoDBTimeStampedModel):
c: str
class Meta:
collection = "collection"
@openapi.patch
class RouteTestSerializer(ModelSerializer):
a: int = 1
d: int = None
class Meta:
model = RouteTestModel
read_only_fields = {"id"}
write_only_fields = {"c"}
@app.post(
"/test/",
response_model=RouteTestSerializer.response_model
)
async def routetester(serializer: RouteTestSerializer) -> dict:
instance = await serializer.save()
return instance.dict()
def test_serializer_inheritance_works():
@openapi.patch
class TestSerializer(Serializer):
a = 1
def b(self):
return "b"
serializer = TestSerializer()
assert serializer.a == 1
assert serializer.b() == "b"
def test_model_serializer_inheritance_works():
class Model(BaseModel):
e: int = 2
f: str
@openapi.patch
class TestSerializer(ModelSerializer):
a = 1
c: str
d: int = None
def b(self):
return "b"
class Meta:
model = Model
serializer = TestSerializer(c="2", d=3, f="4")
assert serializer.a == 1
assert serializer.c == "2"
assert serializer.d == 3
assert serializer.b() == "b"
assert serializer.e == 2
assert serializer.f == "4"
with pytest.raises(ValidationError):
TestSerializer(c=dict(), d="asd")
with pytest.raises(ValidationError):
TestSerializer(c=None, d=None)
with pytest.raises(ValidationError):
TestSerializer()
def test_sanitize_list_serializer():
@openapi.patch
class TestSerializer(Serializer):
a: int = 1
data = [{"a": 1}, {"b": 2}, {"c": 3}]
sanitized_data = TestSerializer.sanitize_list(data)
assert data == sanitized_data
@openapi.patch
class TestSerializer(Serializer):
a: int = 1
class Meta:
exclude: set = {"b", "c"}
data = [{"a": 1, "b": 2}, {"b": 2}, {"c": 3}]
sanitized_data = TestSerializer.sanitize_list(data)
assert [{"a": 1}, {}, {}] == sanitized_data
@pytest.mark.asyncio
async def test_serializer_save():
@openapi.patch
class TestSerializer(Serializer):
a: int = 1
serializer = TestSerializer()
await serializer.save()
assert not hasattr(serializer, "id")
@pytest.mark.asyncio
async def test_serializer_update_one():
@openapi.patch
class TestSerializer(Serializer):
a: int = 1
serializer = TestSerializer()
await serializer.update_one(filter_kwargs={"id": 1})
assert not hasattr(serializer, "id")
@pytest.mark.asyncio
async def test_serializer_update_many():
@openapi.patch
class TestSerializer(Serializer):
a: int = 1
serializer = TestSerializer()
await serializer.update_many(filter_kwargs={"id": 1})
assert not hasattr(serializer, "id")
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_serializers.app")
async def test_model_serializer_save():
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
@openapi.patch
class TestSerializer(ModelSerializer):
a = 1
c: str
d: int = None
class Meta:
model = Model
serializer = TestSerializer(c="2")
instance = await serializer.save()
assert instance.id == 1
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_serializers.app")
async def test_model_serializer_update_one():
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
@openapi.patch
class TestSerializer(ModelSerializer):
a = 1
c: str
d: int = None
class Meta:
model = Model
serializer = TestSerializer(c="2")
result = await serializer.update_one({"a": 1})
assert result.raw_result == {}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_serializers.app")
async def test_models_serializer_update_one_with_push():
with patch(
'fastapi_contrib.db.models.MongoDBModel.update_one',
new_callable=AsyncMock) as mock_update:
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
@openapi.patch
class TestSerializer(ModelSerializer):
a = 1
c: str = ''
d: int = None
l: list = []
class Meta:
model = Model
serializer = TestSerializer(c="2", l=[1, 2])
await serializer.update_one({'id': 1}, array_fields=['l'])
mock_update.mock.assert_called_with(filter_kwargs={'id': 1}, **{
'$set': {'c': '2'},
'$push': {'l': {'$each': [1, 2]}}}
)
serializer = TestSerializer(l=[1, 2])
await serializer.update_one({'id': 1}, array_fields=['l'])
mock_update.mock.assert_called_with(filter_kwargs={'id': 1}, **{
'$push': {'l': {'$each': [1, 2]}}}
)
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_serializers.app")
async def test_models_serializer_update_one_skip_defaults():
with patch(
'fastapi_contrib.db.models.MongoDBModel.update_one',
new_callable=AsyncMock) as mock_update:
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
@openapi.patch
class TestSerializer(ModelSerializer):
a = 1
c: str
d: int = None
class Meta:
model = Model
serializer = TestSerializer(c="2")
await serializer.update_one({'id': 1})
mock_update.mock.assert_called_with(
filter_kwargs={'id': 1},
**{'$set': {'c': '2'}}
)
await serializer.update_one({'id': 1}, skip_defaults=False)
mock_update.mock.assert_called_with(filter_kwargs={'id': 1}, **{
'$set': {'id': None, 'created': None, 'c': '2', 'a': 1, 'd': None}}
)
@override_settings(fastapi_app="tests.db.test_serializers.app")
def test_model_serializer_in_route():
from fastapi_contrib.db.client import MongoDBClient
MongoDBClient.__instance = None
MongoDBClient._MongoDBClient__instance = None
test_client = TestClient(app)
response = test_client.post("/test/", json={"c": "cc", "id": 123})
assert response.status_code == 200
response = response.json()
assert response["id"] == 3
assert "c" not in response.keys()
@pytest.mark.asyncio
async def test_model_serializer_update_many():
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
@openapi.patch
class TestSerializer(ModelSerializer):
a = 1
c: str
d: int = None
class Meta:
model = Model
serializer = TestSerializer(c="2")
result = await serializer.update_many({"a": 1})
assert result.raw_result == {}
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_serializers.app")
async def test_models_serializer_update_many_with_push():
with patch(
'fastapi_contrib.db.models.MongoDBModel.update_many',
new_callable=AsyncMock) as mock_update:
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
@openapi.patch
class TestSerializer(ModelSerializer):
a = 1
c: str = ''
d: int = None
l: list = []
class Meta:
model = Model
serializer = TestSerializer(c="2", l=[1, 2])
await serializer.update_many({'id': 1}, array_fields=['l'])
mock_update.mock.assert_called_with(filter_kwargs={'id': 1}, **{
'$set': {'c': '2'},
'$push': {'l': {'$each': [1, 2]}}}
)
serializer = TestSerializer(l=[1, 2])
await serializer.update_many({'id': 1}, array_fields=['l'])
mock_update.mock.assert_called_with(filter_kwargs={'id': 1}, **{
'$push': {'l': {'$each': [1, 2]}}}
)
@pytest.mark.asyncio
@override_settings(fastapi_app="tests.db.test_serializers.app")
async def test_models_serializer_update_many_skip_defaults():
with patch(
'fastapi_contrib.db.models.MongoDBModel.update_many',
new_callable=AsyncMock) as mock_update:
class Model(MongoDBTimeStampedModel):
class Meta:
collection = "collection"
@openapi.patch
class TestSerializer(ModelSerializer):
a = 1
c: str
d: int = None
class Meta:
model = Model
serializer = TestSerializer(c="2")
await serializer.update_many({'id': 1})
mock_update.mock.assert_called_with(
filter_kwargs={'id': 1}, **{'$set': {'c': '2'}}
)
await serializer.update_many({'id': 1}, skip_defaults=False)
mock_update.mock.assert_called_with(filter_kwargs={'id': 1}, **{
'$set': {'id': None, 'created': None, 'c': '2', 'a': 1, 'd': None}}
)
def test_serializer_dict():
@openapi.patch
class TestSerializer(Serializer):
a: int = 1
serializer = TestSerializer()
_dict = serializer.dict()
assert _dict == {"a": 1}
@openapi.patch
class TestSerializer(Serializer):
a: int = 1
b: str
class Meta:
exclude: set = {"a"}
serializer = TestSerializer(b="b")
_dict = serializer.dict()
assert _dict == {"b": "b"}
_dict = serializer.dict(exclude={"b"})
assert _dict == {}
def test_model_serializer_dict():
@openapi.patch
class TestSerializer(ModelSerializer):
a = 1
c: str
d: int = None
class Meta:
write_only_fields: set = {"c"}
exclude: set = {"d"}
serializer = TestSerializer(c="2")
_dict = serializer.dict()
assert _dict == {"a": 1}
@pytest.mark.asyncio
async def test_model_serializer_multiple_values_fields():
class Model(MongoDBTimeStampedModel):
int_list: List[int] = None
str_set: Set[str] = None
int_mapping: Mapping[int, str] = None
str_tuple: Tuple[str, str, str] = None
int_tuple_ellips: Tuple[int, ...] = None
int_sequence: Sequence[int] = None
class Meta:
collection = "collection"
@openapi.patch
class TestSerializer(ModelSerializer):
a = 1
c: str
d: int = None
class Meta:
model = Model
serializer = TestSerializer(c="2", int_list=[1, 2, 3])
assert serializer.int_list == [1, 2, 3]
serializer = TestSerializer(c="2", str_set={"1", "2", "3"})
assert serializer.str_set == {"1", "2", "3"}
serializer = TestSerializer(c="2", int_mapping={1: 1, 2: 2, 3: 3})
assert serializer.int_mapping == {1: "1", 2: "2", 3: "3"}
serializer = TestSerializer(c="2", str_tuple=("a", "b", "c"))
assert serializer.str_tuple == ("a", "b", "c")
serializer = TestSerializer(c="2", int_tuple_ellips=(1, 1, 1))
assert serializer.int_tuple_ellips == (1, 1, 1)
serializer = TestSerializer(c="2", int_sequence=(x for x in [2, 3]))
assert list(serializer.int_sequence) == [2, 3]
serializer = TestSerializer(
c="2", int_list=[1, 2, 3], str_set={"1", "2", "3"}
)
assert serializer.str_set == {"1", "2", "3"}
assert serializer.int_list == [1, 2, 3]
with pytest.raises(ValidationError) as excinfo:
TestSerializer(c="2", int_list=["a", "b", "c"])
assert excinfo.value.errors()[0]["loc"][0] == "int_list"
assert excinfo.value.errors()[0]["msg"] == "value is not a valid integer"
assert excinfo.value.errors()[0]["type"] == "type_error.integer"
|
StarcoderdataPython
|
1646794
|
<filename>apidemo/urls.py
#coding:utf-8
from django.conf import settings
from django.conf.urls import patterns, url
import views
# Uncomment the next two lines to enable the admin:
# admin.autodiscover()
urlpatterns = patterns('',
url
# media URL
(r'^js/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT + "js/"}),
#apidemo
(r'^$', views.index),
(r'^index/$', views.index),
(r'^userinfo$', views.show_userinfo),
(r'^videolist$', views.show_videolist),
(r'^play/([\d, A-Z]{1,6})/([\d, A-Z]{1,32})$', views.show_play),
(r'^playcode/([\d, A-Z]{1,32})$', views.show_playcode),
(r'^edit/([\d, A-Z]{1,32})$', views.show_edit),
(r'^upload$', views.show_upload),
(r'^notify_info$', views.show_notify_info),
(r'^search$', views.show_search),
(r'^notify$', views.notify),
(r'^getuploadurl$', views.get_upload_url),
(r'^searchvideo$', views.search_video),
(r'^delete/([\d, A-Z]{1,32})$', views.delete),
(r'^editvideo$', views.edit_video),
(r'^videosync$', views.videosync),
)
|
StarcoderdataPython
|
1711349
|
#!/usr/bin/env python3
from dhole.config import load_cfg
from dhole.server import ServerV1
config_path = "./configs/demo.py"
if __name__ == "__main__":
cfg = load_cfg(config_path)
server = ServerV1(cfg)
server.run_containers()
# remove containers
# server.stop_containers()
# server.remove_containers()
|
StarcoderdataPython
|
1681922
|
<gh_stars>0
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Rajesh
#
# Created: 29-12-2019
# Copyright: (c) Rajesh 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
#Python multithreading example to demonstrate locking
#1.Define a subclass using Thread class.
#2.Instantiate the subclass and trigger the thread
#3.Implement lock in thread's run method
import threading
import datetime
exitFlag = 0
class myThread (threading.Thread):
def __init__(self,name,counter):
threading.Thread.__init__(self)
self.ThreadID = counter
self.name = name
self.counter =counter
def run(self):
print ("Starting"+ self.name)
#Acquire lock to Sync Thread
threadLock.acquire()
print_date (self.name,self.counter)
#Release lock for the next thread
threadLock.release()
print("Exiting" + self.name)
def print_date (threadName,counter):
datefields = []
today = datetime.date.today()
datefields.append(today)
print("%s[%d]: %s" %threadName%counter%datefields[0])
threadLock = threading.Lock()
threads = []
# Create new threads
thread1 = myThread("Thread", 1)
thread2 = myThread("Thread", 2)
# Start new Threads
thread1.start()
thread2.start()
# Add threads to thread list
threads.append(thread1)
threads.append(thread2)
# Wait for all threads to complete
for t in threads:
t.join()
print ("Exiting the Program!!!")
|
StarcoderdataPython
|
57151
|
<gh_stars>0
#!/usr/bin/env python3
import make_energy as me
import make_instance as mi
import solve_problem as sop
import visualize_solution as vs
if __name__ == '__main__':
# set problem
type_matrix, weak_matrix, resist_matrix, enemy, skill = mi.make_instance()
# set costs & constraints
model = me.make_energy(type_matrix=type_matrix,
weak_matrix=weak_matrix,
resist_matrix=resist_matrix,
enemy=enemy,
skill=skill)
# set hyper parameters
n_enemies = len(enemy)
n_skills = 4
parameters = {'h_a': n_enemies+1,
'h_b': 2*n_enemies+1,
'h_c': 2/n_skills+0.1,
'h_d': 2/n_skills*4}
# solve with OpenJij
solution, broken = sop.solve_problem(model=model, **parameters)
# check broken
print(broken)
# visualize result
vs.visualize_solution(solution, enemy, skill)
|
StarcoderdataPython
|
3265215
|
<gh_stars>0
#https://dojang.io/mod/quiz/review.php?attempt=1073791&cmid=2246
''' 구구단 출력!
number= int(input())
for i in list(range(1,10)):
print(f'{number} * {i} ={number*i}')
'''
# https://dojang.io/mod/quiz/review.php?attempt=1073809&cmid=2252
'''
account= int(input())
while account > 0:
account -=1350
if account < 0:
break
print(account)
'''
start, stop = map(int, input().split())
i = start
while True:
if i%10!=3:
continue
if i > stop:
break
print(i, end=' ')
i += 1
|
StarcoderdataPython
|
185063
|
<gh_stars>0
from keras.layers import Layer
import numpy as np
import tensorflow as tf
class Normalization(Layer):
def call(self, x):
return x/127.5 - 1.0
|
StarcoderdataPython
|
1787862
|
import requests
import json
import datetime
import smtplib
from email.message import EmailMessage
#function to send email_alert
def email_alert(subject, body, to):
msg = EmailMessage()
msg.set_content(body)
msg['subject'] = subject
msg['to'] = to
user = "<EMAIL>"
msg['from'] = user
pwd = "*******"
server = smtplib.SMTP("smtp.gmail.com",587)
server.starttls()
server.login(user,pwd)
server.send_message(msg)
server.quit()
#converts the json file to more readable form
def jprint(obj):
text = json.dumps(obj,sort_keys=True,indent=4)
return text
#For knowing the availability of vaccines, we use datetime module
t = datetime.datetime.now()
day = t.day + 1
month = t.month
year = t.year
count = 0 #count maintains the count of days (here, 20 days)
sent = ""
while count < 20:
if(day > 31):
day = 1
month = month + 1
if(month > 12):
month = 1
year = year + 1
date = str(day) + '-' + str(month) + '-' + str(year)
req = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByDistrict?district_id=445&date=" + date #api request to fetch the metadata of vaccination centres and available vaccines
response = requests.get(req)
st = str(jprint(response.json()))
file = open("data.json",'w')
file.write(st)
file.close()
f = open('data.json','r')
dict = json.load(f)
for i in dict['sessions']:
if(i['min_age_limit'] == 18):
stri = ""
stri = stri + "Available at: " + i['name'] + " on " + date + "\n"
sent = sent + stri
print("Available at: " + i['name'] + " on " + date)
f.close()
day = day + 1
count = count + 1
if(len(sent)):
email_alert("Vaccine Available", sent, "<EMAIL>")
|
StarcoderdataPython
|
199361
|
from flask import Blueprint,render_template,request,make_response,current_app
from voice_api.blueprints.ext import Ext
fs_api=Blueprint('fs_api',__name__,template_folder='templates')
@fs_api.route('/api/auth-ext',methods=['POST'])
def auth_ext():
r_token=request.args['tk']
if r_token in current_app.config['FS_TOKEN']:
r=request.form
#print(r)
sip_auth_username=r.get('user')
domain=r.get('domain')
if r.get('Event-Calling-Function')== 'switch_load_network_lists':
response=make_response(render_template('404.xml'))
response.headers['Content-Type'] = 'application/xml'
return response
elif r.get('Event-Calling-Function')=='config_sofia':
response=make_response(render_template('404.xml'))
response.headers['Content-Type'] = 'application/xml'
return response
else:
#查询数据库返回xml
try:
ext=Ext.query({'domain':domain,'extnumber':sip_auth_username})
except AttributeError:
response=make_response(render_template('404.xml'))
response.headers['Content-Type'] = 'application/xml'
return response
if ext['extnumber'] in ['1000','1001']:
response=make_response(render_template('404.xml'))
response.headers['Content-Type'] = 'application/xml'
return response
else:
response=make_response(render_template('ext.xml',ext=ext))
response.headers['Content-Type'] = 'application/xml'
return response
else:
return 'Auth Fail',401
@fs_api.route('/api/queue-info',methods=['POST'])
def queue_info():
r=request
queues=[{'name':'1003'},{'name':'1004'}]
if r.values.get('key_value')=='callcenter.conf':
response=make_response(render_template('callcenter.conf.xml',queues=queues))
response.headers['Content-Type'] = 'application/xml'
return response
else:
response=make_response(render_template('404.xml'))
response.headers['Content-Type'] = 'application/xml'
return response
# @fs_api.route('/fsapi/hello',Methods=['POST'])
# def hello():
# return 'Hello FSAPI'
# @fs_api.route('/fsapi/hello',Methods=['POST'])
# def hello():
# return 'Hello FSAPI'
# @fs_api.route('/fsapi/hello',Methods=['POST'])
# def hello():
# return 'Hello FSAPI'
|
StarcoderdataPython
|
1669067
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
from exoscale.api.compute import *
class TestComputePrivateNetwork:
def test_attach_instance(self, exo, privnet, instance):
private_network = PrivateNetwork._from_cs(exo.compute, privnet())
instance = Instance._from_cs(exo.compute, instance())
private_network.attach_instance(instance)
res = exo.compute.cs.listNics(
virtualmachineid=instance.id, networkid=private_network.id, fetch_list=True
)
assert len(res) == 1
assert res[0]["networkid"] == private_network.id
assert res[0]["virtualmachineid"] == instance.id
for nic in res:
if nic["isdefault"]:
continue
exo.compute.cs.removeNicFromVirtualMachine(
virtualmachineid=instance.id, nicid=nic["id"]
)
def test_detach_instance(self, exo, privnet, instance):
private_network = PrivateNetwork._from_cs(exo.compute, privnet())
instance = Instance._from_cs(exo.compute, instance())
exo.compute.cs.addNicToVirtualMachine(
virtualmachineid=instance.id, networkid=private_network.id
)
[res] = exo.compute.cs.listNics(
virtualmachineid=instance.id, networkid=private_network.id, fetch_list=True
)
assert res["networkid"] == private_network.id
assert res["virtualmachineid"] == instance.id
private_network.detach_instance(instance)
res = exo.compute.cs.listNics(
virtualmachineid=instance.id, networkid=private_network.id, fetch_list=True
)
assert len(res) == 0
def test_update(self, exo, privnet):
private_network = PrivateNetwork._from_cs(
exo.compute,
privnet(start_ip="10.0.0.10", end_ip="10.0.0.50", netmask="255.255.255.0"),
)
name_edited = private_network.name + " (edited)"
description_edited = private_network.description + " (edited)"
start_ip_edited = "10.0.0.1"
end_ip_edited = "10.0.0.100"
netmask_edited = "255.0.0.0"
private_network.update(
name=name_edited,
description=description_edited,
start_ip=start_ip_edited,
end_ip=end_ip_edited,
netmask=netmask_edited,
)
res = exo.compute.cs.listNetworks(id=private_network.id, fetch_list=True)
assert res[0]["name"] == name_edited
assert private_network.name == name_edited
assert res[0]["displaytext"] == description_edited
assert private_network.description == description_edited
assert res[0]["startip"] == start_ip_edited
assert private_network.start_ip == start_ip_edited
assert res[0]["endip"] == end_ip_edited
assert private_network.end_ip == end_ip_edited
assert res[0]["netmask"] == netmask_edited
assert private_network.netmask == netmask_edited
def test_delete(self, exo, privnet):
private_network = PrivateNetwork._from_cs(exo.compute, privnet(teardown=False))
private_network_id = private_network.id
private_network.delete()
assert private_network.id is None
with pytest.raises(CloudStackApiException) as excinfo:
res = exo.compute.cs.listNetworks(id=private_network_id, fetch_list=True)
assert len(res) == 0
assert excinfo.type == CloudStackApiException
assert "does not exist" in excinfo.value.error["errortext"]
def test_properties(self, exo, privnet, instance):
private_network = PrivateNetwork._from_cs(exo.compute, privnet())
instance = Instance._from_cs(exo.compute, instance())
res = exo.compute.cs.addNicToVirtualMachine(
virtualmachineid=instance.id, networkid=private_network.id
)
private_network_instances = list(private_network.instances)
assert len(private_network_instances) == 1
assert private_network_instances[0].name == instance.name
for nic in res["virtualmachine"]["nic"]:
if nic["isdefault"]:
continue
exo.compute.cs.removeNicFromVirtualMachine(
virtualmachineid=instance.id, nicid=nic["id"]
)
|
StarcoderdataPython
|
3214106
|
<gh_stars>10-100
from django.core.management.base import BaseCommand
from waldur_core.logging.loggers import event_logger
BLANK_LINE = '\n\n'
class Command(BaseCommand):
def handle(self, *args, **options):
print("# Events", end=BLANK_LINE)
groups = sorted([(k, v) for k, v in event_logger.get_all_groups().items()])
for event_group, events in groups:
print(f"## {str(event_group).capitalize()}", end=BLANK_LINE)
for event in sorted(events):
print(f"- {event}")
print()
|
StarcoderdataPython
|
4814560
|
<reponame>BachFive/GammaGo_3<gh_stars>0
#!/usr/local/bin/python2
from dlgo.gtp import GTPFrontend
from dlgo.agent.predict import load_prediction_agent
from dlgo.agent import termination
import h5py
model_file = h5py.File("agents/betago.hdf5", "r")
agent = load_prediction_agent(model_file)
strategy = termination.get("opponent_passes")
termination_agent = termination.TerminationAgent(agent, strategy)
frontend = GTPFrontend(termination_agent)
frontend.run()
|
StarcoderdataPython
|
83075
|
<filename>scraper/scraper/dbrouter.py
class DBRouter(object):
def db_for_read(self, model, **hints):
if model._meta.app_label == 'panglao':
return 'panglao'
if model._meta.app_label == 'cheapcdn':
return 'cheapcdn'
if model._meta.app_label == 'lifecycle':
return 'lifecycle'
return 'default'
def db_for_write(self, model, **hints):
if model._meta.app_label == 'panglao':
return 'panglao'
if model._meta.app_label == 'cheapcdn':
return 'cheapcdn'
if model._meta.app_label == 'lifecycle':
return 'lifecycle'
return 'default'
def allow_relation(self, obj1, obj2, **hints):
if obj1._meta.app_label == 'panglao':
if obj2._meta.app_label != 'panglao':
return False
if obj1._meta.app_label == 'cheapcdn':
if obj2._meta.app_label != 'cheapcdn':
return False
if obj1._meta.app_label == 'lifecycle':
if obj2._meta.app_label != 'lifecycle':
return False
def allow_migrate(self, db, app_label, **hints):
if db == 'cheapcdn' and app_label == 'cheapcdn':
return True
if db == 'lifecycle' and app_label == 'lifecycle':
return True
return False
|
StarcoderdataPython
|
1722768
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-27 01:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0003_game_tilesposition'),
]
operations = [
migrations.RenameField(
model_name='game',
old_name='tilesPosition',
new_name='tilePosition',
),
]
|
StarcoderdataPython
|
3330265
|
<reponame>HansGR/WorldsCollide
text_value = {
'<end>' : 0x00,
'<line>' : 0x01,
'!' : 0xbe,
'?' : 0xbf,
'/' : 0xc0,
':' : 0xc1,
'"' : 0xc2,
'\'' : 0xc3,
'-' : 0xc4,
'.' : 0xc5,
',' : 0xc6,
'…' : 0xc7,
';' : 0xc8,
'#' : 0xc9,
'+' : 0xca,
'(' : 0xcb,
')' : 0xcc,
'%' : 0xcd,
'~' : 0xce,
'=' : 0xd2,
'<dotted line>' : 0xd3,
'<N arrow>' : 0xd4,
'<E arrow>' : 0xd5,
'<SW arrow>' : 0xd6,
'<X>' : 0xd7,
'<dirk icon>' : 0xd8,
'<sword icon>' : 0xd9,
'<lance icon>' : 0xda,
'<knife icon>' : 0xdb,
'<rod icon>' : 0xdc,
'<brush icon>' : 0xdd,
'<stars icon>' : 0xde,
'<special icon>' : 0xdf,
'<gambler icon>' : 0xe0,
'<claw icon>' : 0xe1,
'<shield icon>' : 0xe2,
'<helmet icon>' : 0xe3,
'<armor icon>' : 0xe4,
'<tool icon>' : 0xe5,
'<skean icon>' : 0xe6,
'<relic icon>' : 0xe7,
'<white magic icon>' : 0xe8,
'<black magic icon>' : 0xe9,
'<gray magic icon>' : 0xea,
' ' : 0xfe,
'\0' : 0xff,
}
for ascii_val in range(ord('A'), ord('Z') + 1):
text_value[chr(ascii_val)] = ascii_val + 0x3f
for ascii_val in range(ord('a'), ord('z') + 1):
text_value[chr(ascii_val)] = ascii_val + 0x39
for ascii_val in range(ord('0'), ord('9') + 1):
text_value[chr(ascii_val)] = ascii_val + 0x84
value_text = {v: k for k, v in text_value.items()}
|
StarcoderdataPython
|
1751523
|
<gh_stars>0
# Copyright 2020 by Chromation, Inc
# All Rights Reserved by Chromation, Inc
from microspeclib.datatypes import CommandNull
from microspeclib.logger import CHROMASPEC_LOGGER as log
import time
# The intended difference between this and the Simple interface is to provide more
# fine control, such as breaking up sending commands and then looping to wait for
# replies. It requires creating Command objects and then passing them along, in
# contrast to the one routine per command structure of the Simple interface.
__all__ = [ "MicroSpecExpertInterface" ]
from microspeclib.internal.stream import MicroSpecSerialIOStream, MicroSpecEmulatedStream
class MicroSpecExpertInterface(MicroSpecSerialIOStream):
def __init__(self, serial_number=None, device=None, timeout=2.0, retry_timeout=0.001, emulation=False, *args, **kwargs):
log.info("serial_number=%s, device=%s, timeout=%s, retry_timeout=%s, emulation=%s, args=%s, kwargs=%s",
serial_number, device, timeout, retry_timeout, emulation, args, kwargs)
if emulation:
self.emulation = MicroSpecEmulatedStream(socat=True, fork=True, timeout=timeout)
device = self.emulation.software
super().__init__(serial_number=serial_number,
device=device, timeout=timeout,
*args, **kwargs)
self.retry_timeout = retry_timeout
self.current_command = []
log.info("return")
def __setattr__(self, attr, value):
if attr == "timeout":
log.info("set timeout to %s", value)
self.stream.timeout = value
else:
self.__dict__[attr] = value
def __getattribute__(self, attr):
if attr == "timeout":
log.info("return timeout=%s", self.stream.timeout)
return self.stream.timeout
else:
return super().__getattribute__(attr)
def sendCommand(self, command):
"""Send one command to the sensor, do not wait for a response. You may stack these, and they
will return in FIFO order later.
Parameters
----------
command: :py:mod:`CommandObject <microspeclib.datatypes.command>`
Create one of these objects and pass it as an argument
Returns
-------
None
"""
log.info("command=%s", command)
try:
if bytes(command) == b'':
log.warning("Error packing payload for command '%s'", str(command))
raise Exception("Unable to send partial command '%s'"%(str(command)))
except:
log.warning("Error packing payload for command '%s'", str(command))
raise Exception("Unable to send partial command '%s'"%(str(command)))
super().sendCommand(command)
log.info("appending command=%s to current_command=%s", command, self.current_command)
self.current_command.append(command)
log.info("return")
def receiveReply(self):
"""Receive one command result command sent. If multiple were sent, receive the next one in FIFO order,
if available. A failure to retrieve it does not remove it from the queue, but a reply that contains
a failed status does.
Returns
-------
:py:mod:`BridgeReplyObject <microspeclib.datatypes.bridge>`
:py:mod:`SensorReplyObject <microspeclib.datatypes.sensor>`
"""
log.info("waiting for reply")
start = time.time()
reply = super().receiveReply(self.current_command[0].command_id) if self.current_command else None
since = time.time() - start
timeout = self.timeout if self.timeout else 0
remain = timeout - since
log.info("start=%s reply=%s since=%s timeout=%s remain=%s"%(start,reply,since,timeout,remain))
while reply is None and remain > 0:
log.info("no reply yet, timeout remaining=%s", remain)
time.sleep( self.retry_timeout if remain > self.retry_timeout else remain )
reply = super().receiveReply(self.current_command[0].command_id) if self.current_command else None
since = time.time() - start
remain = timeout - since
log.info("start=%s reply=%s since=%s timeout=%s remain=%s"%(start,reply,since,timeout,remain))
if reply:
log.info("popping command since reply was found")
self.current_command.pop(0)
log.info("return %s", reply)
return reply
def sendAndReceive(self, command):
"""Send one command to the sensor, and receive one command result command sent.
If multiple were sent, receive the next one in FIFO order, if available.
This does NOT mean that the reply will match the command, if you already sent a command
that you had not received yet. A failure to retrieve it does not remove it from
the queue, but a reply that contains a failed status does.
Parameters
----------
command: :py:mod:`CommandObject <microspeclib.datatypes.command>`
Create one of these objects and pass it as an argument
Returns
-------
:py:mod:`BridgeReplyObject <microspeclib.datatypes.bridge>`
:py:mod:`SensorReplyObject <microspeclib.datatypes.sensor>`
"""
log.info("command=%s", command)
self.sendCommand(command)
reply = self.receiveReply()
log.info("return %s", reply)
return reply
def flush(self, timeout=0.1):
"""Tell hardware to finish sending any remaining commands and flush it's communication line,
then flush the local queue and throw away all waiting replies. This does not send a Reset command."""
log.info("flushing stream by sending a null and clearing all data from the line")
self.sendCommand(CommandNull())
old_timeout = self.timeout
self.timeout = timeout
# TODO(sustainablelab): serialwin32.py line 270 `if size > 0`
# throws TypeError: `>` not supported betwen instances of
# `NoneType` and `int`. Change read(None) to read_all() ?
# The goal is to empty the buffer. reset_input_buffer does
# that, so I'm not sure what the purpose of the read is. But
# a read_all() won't fail, whereas a read(None) isn't
# supported.
# self.stream.read(None) # <----------- This line removed 2021-03-11
self.stream.read_all() # <------------- This line added 2021-03-11
# TODO(sustainablelab): this reset is happening before the
# firmware has a chance to fill the read buffer. So flush is
# not flushing out the read buffer.
self.stream.reset_input_buffer()
self.buffer = b''
self.timeout = old_timeout
self.current_command = []
log.info("return")
|
StarcoderdataPython
|
44241
|
#! /usr/bin/env python3
import sh
import click
import re
def real_git(*args, **kwargs):
mock_git(*args, **kwargs)
return sh.git(*args, **kwargs)
def mock_git(*args, **kwargs):
click.echo(sh.git.bake(*args, **kwargs), err=True)
return ""
def branch_exists(name):
try:
get_commit_hash(name)
return True
except:
return False
def get_current_branch():
return sh.git("rev-parse", "--abbrev-ref", "HEAD").strip()
def get_commit_hash(commit_spec):
return sh.git("rev-parse", commit_spec).strip()
@click.command()
@click.argument("commit", type=str)
@click.argument("branch", type=str)
@click.option("--base", type=str, default="origin/master", help="Base branch to branch from")
@click.option("--push/--no-push", default=True, help="Push the feature branch")
@click.option("--new/--not-new", default=False, help="Make a new branch")
@click.option("--switch/--no-switch", default=False, help="Switch to the other branch")
@click.option("--mock/--real", default=False, help="Just print git commands")
def main(commit, branch, base, push, new, switch, mock):
""" COMMIT: a commit range to be cherry-picked into BRANCH, e.g. HEAD^1 or HEAD..HEAD~1, or a hash range
BRANCH: this branch will be rebased off of the base branch, e.g. myname/my-great-feature
"""
# Mock git just prints the command which would be run
if mock:
git = mock_git
else:
git = real_git
current_branch = get_current_branch()
exists = branch_exists(branch)
if exists and new:
raise click.UsageError(f"Branch {branch} already exists. remove --new")
if not exists and not new:
raise click.UsageError(f"Branch {branch} must be created. use --new")
try:
click.echo(f"Currently on {current_branch}", err=True)
# Resolve the commit name unless a hash was specified
if not re.match(r"[0-9a-f]{40}", commit):
commit = get_commit_hash(commit)
if "\n^" in commit:
commit = commit.replace("\n^", "^..")
# Describe the actions to be performed
push_msg = ""
if push and not new:
push_msg = " and force push"
if push and new:
push_msg = " and push upstream"
branch_action = "create" if new else "hard reset"
click.echo(f"Going to {branch_action} branch {branch} on {base} then cherry pick {commit}{push_msg}", err=True)
click.echo(err=True)
# Checkout or create the branch and reset to the the base branch
if not exists:
git("checkout", "-b", branch)
else:
git("checkout", branch)
git("reset", "--hard", base)
# Cherry pick the commit(s) into the branch
git("cherry-pick", commit)
# Push to origin
if push:
# Set upstream if necessary, otherwise force push
if not exists:
git("push", "--set-upstream", "origin", branch)
else:
git("push", "--force")
finally:
if not switch:
git("checkout", current_branch)
|
StarcoderdataPython
|
12997
|
<reponame>reflectometry/osrefl
from greens_thm_form import greens_form_line, greens_form_shape
from numpy import arange, linspace, float64, indices, zeros_like, ones_like, pi, sin, complex128, array, exp, newaxis, cumsum, sum, cos, sin, log, log10
from osrefl.theory.DWBAGISANS import dwbaWavefunction
class shape:
def __init__(self, name):
self.name = name
self.points = []
self.sld = 0.0
self.sldi = 0.0
def rectangle(x0, y0, dx, dy, sld=0.0, sldi=0.0):
#generate points for a rectangle
rect = shape('rectangle')
rect.points = [[x0,y0], [x0+dx, y0], [x0+dx, y0+dy], [x0, y0+dy]]
rect.sld = sld
rect.sldi = sldi
rect.area = dx * dy
return rect
def sawtooth(z, n=6, x_length=3000.0, base_width=500.0, height=300.0, sld=0.0, sldi=0.0, sld_front=0.0, sldi_front=0.0):
if z>height:
return [], sld_front
width = (z / height) * base_width
front_width = base_width - width
rects = [rectangle(0, base_width*(i+0.5) - width/2.0, x_length, width, sld, sldi) for i in range(n)]
# now rectangles for the gaps between the sawtooths...
if (sld_front !=0.0 and sldi_front != 0.0):
front_rects = [rectangle(0, 0, x_length, front_width/2.0, sld_front, sldi_front)]
front_rects.extend([rectangle(0, base_width*(i+0.5)+width/2.0, x_length, front_width, sld_front, sldi_front) for i in range(1,n-1)])
front_rects.append(rectangle(0, base_width*(n-0.5)+width/2.0, x_length, front_width/2.0, sld_front, sldi_front))
rects.extend(front_rects)
# now calculate the average SLD (nuclear) for the layer
avg_sld = (width * sld + front_width * sld_front) / base_width
avg_sldi = (width * sldi + front_width * sldi_front) / base_width
return rects, avg_sld, avg_sldi
def arc(r, theta_start, theta_end, x_center, y_center, theta_step=1.0, close=True, sld=0.0, sldi=0.0, ):
a = shape('arc')
a.theta_start = theta_start
a.theta_end = theta_end
a.area = pi * r**2 * abs(theta_end - theta_start)/360.0
if close == True:
a.points.append([x_center, y_center]) # center point
numpoints = (theta_end - theta_start) / theta_step + 1
thetas = linspace(theta_start, theta_end, numpoints) * pi/180 # to radians
for th in thetas:
a.points.append([r*cos(th) + x_center, r*sin(th) + y_center])
a.sld = sld
a.sldi = sldi
return a
def limit_cyl(arc, xmin=0.0, xmax=0.0, ymin=0.0, ymax=0.0):
new_arc = shape('arc')
new_arc.sld = arc.sld
new_arc.sldi = arc.sldi
new_arc.theta_start = arc.theta_start
new_arc.theta_end = arc.theta_end
#new_arc.area = arc.area
for point in arc.points:
if (point[0] >= xmin) and (point[0] <= xmax) and (point[1] >=ymin) and (point[1] <= ymax):
new_arc.points.append(point)
if len(new_arc.points) < 3:
new_arc.area = 0.0
else:
new_arc.area = (len(new_arc.points) - 2) / 360.0 * arc.area
return new_arc
def conj(sld):
conjugate_sld = sld.copy()
conjugate_sld[:,2] *= -1
return conjugate_sld
# alternating SLD
wavelength = 1.24 # x-ray wavelength, Angstroms
spacing = 600.0 # distance between cylinder centers
radius = 200.0 # Angstroms, radius of cylinders
thickness = 300.0 # Angstrom, thickness of cylinder layer
sublayer_thickness = 200.0 # Angstrom, full layer of matrix below cylinders
matrix_sld = pi/(wavelength**2) * 2.0 * 1.0e-6 # substrate
matrix_sldi = pi/(wavelength**2) * 2.0 * 1.0e-7 # absorption in substrate
cyl_sld = 0.0
cyl_sldi = 0.0 # cylinders are holes in matrix
unit_dx = 2.0 * spacing
unit_dy = 1.0 * spacing
matrix = rectangle(0,0, 3000, 3000, matrix_sld, matrix_sldi)
cylinders = []
centers = []
for i in range(3):
for j in range(6):
x0 = i * 2.0 * spacing
y0 = j * spacing
x1 = x0 + spacing # basis
y1 = y0 + spacing/2.0
cylinders.append(arc(radius, 0.0, 360.0, x0, y0, sld=cyl_sld, sldi=cyl_sldi))
cylinders.append(arc(radius, 0.0, 360.0, x1, y1, sld=cyl_sld, sldi=cyl_sldi))
cyl_area = 0.0
for cyl in cylinders:
cyl_area += cyl.area
clipped_cylinders = [limit_cyl(cyl, xmin=0.0, xmax=3000.0, ymin=0.0, ymax=3000.0) for cyl in cylinders]
clipped_cyl_area = 0.0
for cyl in clipped_cylinders:
clipped_cyl_area += cyl.area
print "clipped_cyl_area / matrix.area = ", clipped_cyl_area / matrix.area
print "ratio should be 0.3491 for FCT planar array with a/b = 2 and r = a/6"
avg_sld = (matrix.area * matrix_sld + clipped_cyl_area * cyl_sld) / matrix.area
avg_sldi = (matrix.area * matrix_sldi + clipped_cyl_area * cyl_sldi) / matrix.area
front_sld = 0.0 # air
back_sld = pi/(wavelength**2) * 2.0 * 5.0e-6 # substrate
back_sldi = pi/(wavelength**2) * 2.0 * 7.0e-8 # absorption in substrate
qz = linspace(0.01, 0.21, 501)
qy = linspace(-0.1, 0.1, 500)
qx = ones_like(qy, dtype=complex128) * 1e-8
SLDArray = [ [0,0,0], # air
[avg_sld, thickness, avg_sldi], # sample
[matrix_sld, sublayer_thickness, matrix_sldi], # full matrix layer under cylinders
[back_sld, 0, back_sldi] ]
FT = zeros_like(qx, dtype=complex128)
for cyl in clipped_cylinders:
FT += greens_form_shape(cyl.points, qx, qy) * (cyl.sld)
FT += greens_form_shape(matrix.points, qx, qy) * (matrix.sld)
FT += greens_form_shape(matrix.points, qx, qy) * (-avg_sld)
SLDArray = array(SLDArray)
def calc_gisans(alpha_in, show_plot=True):
#alpha_in = 0.25 # incoming beam angle
kz_in_0 = 2*pi/wavelength * sin(alpha_in * pi/180.0)
kz_out_0 = kz_in - qz
wf_in = dwbaWavefunction(kz_in_0, SLDArray)
wf_out = dwbaWavefunction(-kz_out_0, conj(SLDArray))
kz_in_l = wf_in.kz_l
kz_out_l = -wf_out.kz_l
zs = cumsum(SLDArray[1:-1,1])
dz = SLDArray[1:-1,1][:,newaxis]
z_array = array(zs)[:,newaxis]
qrt_inside = kz_in_l[1] - kz_out_l[1]
qtt_inside = kz_in_l[1] + kz_out_l[1]
qtr_inside = -kz_in_l[1] + kz_out_l[1]
qrr_inside = -kz_in_l[1] - kz_out_l[1]
# the overlap is the forward-moving amplitude c in psi_in multiplied by
# the forward-moving amplitude in the time-reversed psi_out, which
# ends up being the backward-moving amplitude d in the non-time-reversed psi_out
# (which is calculated by the wavefunction calculator)
# ... and vice-verso for d and c in psi_in and psi_out
overlap = wf_out.d[1] * wf_in.c[1] / (1j * qtt_inside) * (exp(1j * qtt_inside * thickness) - 1.0)
overlap += wf_out.c[1] * wf_in.d[1] / (1j * qrr_inside) * (exp(1j * qrr_inside * thickness) - 1.0)
overlap += wf_out.d[1] * wf_in.d[1] / (1j * qtr_inside) * (exp(1j * qtr_inside * thickness) - 1.0)
overlap += wf_out.c[1] * wf_in.c[1] / (1j * qrt_inside) * (exp(1j * qrt_inside * thickness) - 1.0)
overlap_BA = 1.0 / (1j * qz) * (exp(1j * qz * thickness) - 1.0)
overlap_BA += 1.0 / (-1j * qz) * (exp(-1j * qz * thickness) - 1.0)
gisans = overlap[:,newaxis] * FT[newaxis, :]
gisans_BA = overlap_BA[:,newaxis] * FT[newaxis, :]
extent = [qy.min(), qy.max(), qz.min(), qz.max()]
if show_plot == True:
from pylab import imshow, figure, colorbar
figure()
imshow(log10(abs(gisans)**2), origin='lower', extent=extent, aspect='auto')
colorbar()
figure()
imshow(log10(abs(gisans_BA)**2), origin='lower', extent=extent, aspect='auto')
colorbar()
return gisans, gisans_BA
|
StarcoderdataPython
|
1665626
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.shortcuts import render
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth import login, logout
from django.contrib.auth.models import User
from django.template.loader import get_template
from django.core.mail.backends.smtp import EmailBackend
from django.core.mail import send_mail, get_connection, EmailMessage, EmailMultiAlternatives
from django.views.generic import ListView, DetailView, UpdateView, TemplateView
from registration.models import *
from intro.models import *
from portal.models import CampusAmbassador
from Event.models import *
from apogee17.settings import *
import difflib
def pcr_logout(request):
logout(request)
return redirect('pcradmin:dashboard')
class IntroRegList(ListView):
model = IntroReg
paginate_by=50
template_name='generic/introreg_list.html'
class ParticipantDetail(UpdateView):
model = Participant
success_url = '.'
template_name='generic/participant_edit.html'
fields = ['name', 'email', 'phone', 'college', 'gender', 'address',
'events', 'bank_name', 'bank_ifsc', 'bank_account_no']
class PCRConfirmDoc(TemplateView):
template_name = "pcradmin/pcrconfirmmail.html"
def get_context_data(self, *args, **kwargs):
context = super(PCRConfirmDoc, self).get_context_data(*args, **kwargs)
if self.request.user.is_authenticated() and hasattr(self.request.user, 'participant'):
part = self.request.user.participant
context["name"] = part.name
context["code"] = part.barcode
return context
def get(self, request, *args, **kwargs):
if request.user.is_authenticated() and hasattr(request.user, 'participant'):
return super(PCRConfirmDoc, self).get(request, *args, **kwargs)
return redirect('main:main')
class ParticipantList(ListView):
model = Participant
template_name='generic/participant_list.html'
paginate_by=45
def get_queryset(self):
qs = super(ParticipantList, self).get_queryset().filter(msp=None).order_by('pcr_approval')
if self.kwargs.get('college_pk', False):
return qs.filter(college__pk=self.kwargs.get('college_pk'))
if self.kwargs.get('event_pk', False):
return Event.objects.get(pk=self.kwargs.get('event_pk')).participant_set.filter(msp=None)
return qs
def get_context_data(self, *args, **kwargs):
context = super(ParticipantList, self).get_context_data(*args, **kwargs)
if self.kwargs.get('college_pk', False):
context['title'] = 'Participants of ' + str(self.kwargs.get('college_pk', False))
if self.kwargs.get('event_pk', False):
event_pk = self.kwargs.get('event_pk', False)
context['title'] = 'Participants of ' + str(Event.objects.get(pk=event_pk).name)
return context
def post(self, request, *args, **kwargs):
part_ids = request.POST.getlist('part_list')
if not part_ids:
return self.get(request, *args, **kwargs)
if request.POST.get('approval', False):
val = int(request.POST.get('approval', 0))
if val == 2:
html = get_template("pcradmin/pcrconfirmmail_static.html").render({})
email = EmailMultiAlternatives(subject="APOGEE 2017 Confirmation", body='---', to=["<EMAIL>"], bcc=[part.email for part in Participant.objects.filter(id__in=part_ids, pcr_approval=False, msp=None)])
email.attach_alternative(html, "text/html")
email.attach_file(BASE_DIR+'/data/RulesBooklet2017.pdf')
email.attach_file(BASE_DIR+'/data/StartupWeekendRuleBooklet.pdf')
email.attach_file(BASE_DIR+'/data/Documentation_Startup_Weekend_BITS_Pilani.pdf')
email.send()
Participant.objects.filter(id__in=part_ids, pcr_approval=False).update(pcr_approval=True)
return render(request, 'pcradmin/showmailsent.html', {"title" : "Mail Sent to Participant"})
elif val == 1:
Participant.objects.filter(id__in=part_ids).update(pcr_approval=False, msp=None)
return self.get(request, *args, **kwargs)
class AmbassadorList(ListView):
model = CampusAmbassador
template_name = 'generic/ambassador_list.html'
paginate_by=50
def get_queryset(self):
qs = super(AmbassadorList, self).get_queryset().order_by('pcr_approved')
if 'approved' in self.kwargs:
qs = qs.filter(pcr_approved=True)
return qs
def get_context_data(self, *args, **kwargs):
context = super(AmbassadorList, self).get_context_data(*args, **kwargs)
return context
def post(self, request, *args, **kwargs):
body = unicode(u'''Hello,
Greetings from Team APOGEE- BITS Pilani!
APOGEE is the annual technical festival of BITS Pilani. For its promotion in other colleges, we have joined hands with the internship platform YOUTH4WORK to recruit Marketing Interns. As a part of the process, you were called and told the about the work (in a broader fashion).
This is to inform you that you have been selected as a Marketing Intern for APOGEE 2017. You are now a part of APOGEE and YOUTH4WORK family and we expect you to work towards making it a grand success. However, there will be a great influx of responsibilities on your shoulders. We expect you to put in sincere efforts from your side. You will be in touch with one of our members via phone and email periodically.
Incentives:
1. Marketing Intern Certificate by Youth4work for 2 months is a valuable deliverable by a renowned internship platform. This recognition will specially be helpful for people looking to sit for placements in the future.
2. Based on your performance and how much participation you are able to bring, your registration fee may be waivered upto an extent/completely.
3. For Exceptional performances, be ready for goodies by Youth4work and APOGEE!
The following are expected from you promptly:
1. Please Pre-register and help your friends pre-register on https://www.bits-apogee.org/ .
2. Please reply to <EMAIL> with the e-mail address and contact numbers of the NSS head in your college along with the Mechanical, Computer Science and Electrical departments’ (if any) student representatives.
3. You are expected to share the posts uploaded on the APOGEE Facebook page https://www.facebook.com/bitsapogee ; it will be monitored by one of our members.
4. Please subscribe on Youth4Work (website) if you haven’t till now (otherwise you will be not be a recognized Intern).
5. From announcing in classrooms and sending out mails to meeting various heads of participating teams, do all that which ensures participation in the events (whose posters will be sent out to you). Any other idea from you will always be received with a great pleasure.
To begin with, we require you to like the APOGEE Facebook page to keep yourself updated and also pre-register yourself on the APOGEE website www.bits-apogee.org. From now onwards, you will be regularly updated with your responsibilities via email. Do let us know if you have any suggestions specifically for your college that might help all of us spread the word about APOGEE and ensure healthy participation in a better way.
We wish you a grand success in this endeavor. Let's make APOGEE a grand success together.
Good Luck! ''')
amb_ids = request.POST.getlist('amb_list', [])
if request.POST.get('approval', False) and amb_ids:
val = int(request.POST.get('approval',0))
if val == 2:
for ca in CampusAmbassador.objects.filter(id__in=amb_ids, pcr_approved=False):
email = EmailMessage(subject="Campus Ambassador Approval", body=body, to=[ca.email])
email.send()
CampusAmbassador.objects.filter(id__in=amb_ids, pcr_approved=False).update(pcr_approved=True)
return render(request, 'pcradmin/showmailsent.html')
elif val == 1:
CampusAmbassador.objects.filter(id__in=amb_ids).update(pcr_approved=False)
elif request.POST.get('mail', False):
id_str = ','.join(amb_ids)
mailbody = 'Default Mail Body'
gauss_check= 0
context = {'mailbody' :mailbody,'id_str' : id_str}
return render(request, 'pcradmin/mail_selected_amb.html', context)
return self.get(request, *args, **kwargs)
class CollegeList(ListView):
model = College
template_name = 'generic/college_list.html'
ordering = ('name')
def get_context_data(self, *args, **kwargs):
context = super(CollegeList, self).get_context_data(*args, **kwargs)
object_list = []
for college in context['object_list']:
approved_count = college.participant_set.filter(pcr_approval=True).count()
object_list.append((college, approved_count))
context['object_list'] = object_list
print context
return context
def post(self, request, *args, **kwargs):
if not request.POST['finalcollege']:
return self.get(request, *args, **kwargs)
try:
finalcollege = College.objects.get(name=request.POST['finalcollege'])
except:
finalcollege = College.objects.create(name=request.POST['finalcollege'], is_displayed=True)
colleges = College.objects.filter(pk__in=request.POST.getlist('colleges')).exclude(pk=finalcollege.pk)
for college in colleges:
college.participant_set.all().update(college=finalcollege)
colleges.delete()
return self.get(request, *args, **kwargs)
def mail_selected_amb(request):
id_str = request.POST.get('id_str', False)
amb_ids = id_str.split(',')
body = str(request.POST.get('body' , ''))
if body != '':
for ca in CampusAmbassador.objects.filter(id__in=amb_ids):
email = EmailMessage(subject="Campus Ambassador", body=body, to=[ca.email])
email.send()
return render(request, 'pcradmin/showmailsent.html')
def mail_approved(request):
id_str = ','.join([str(ca.id) for ca in CampusAmbassador.objects.filter(pcr_approved=True)])
context = {'mailbody' :'Default Mail Body','id_str' : id_str}
return render(request, 'pcradmin/mail_selected_amb.html', context)
class ParticipantMail(TemplateView):
template_name = "generic/participant_mail.html"
def get_context_data(self, *args, **kwargs):
context = super(ParticipantMail, self).get_context_data(*args, **kwargs)
context["filter"] = self.kwargs.get("filter", "all")
return context
def post(self, request, *args, **kwargs):
qs = Participant.objects.filter(msp=None)
context = {"title" : "Mail Sent to All Participants"}
if self.kwargs.get('filter', False) == "approved":
context = {"title" : "Mail Sent to All Approved Participants"}
qs = qs.filter(pcr_approval=True)
subject = request.POST.get('subject', '')
body = request.POST.get('body', '')
for part in qs:
email = EmailMessage(subject=subject, body=body, to=[part.email])
email.send()
return render(request, 'pcradmin/showmailsent.html', context)
################################################################
def deepgetattr(obj, attr, default = None):
"""
Get a named attribute from an object; multi_getattr(x, 'a.b.c.d') is
equivalent to x.a.b.c.d. When a default argument is given, it is
returned when any attribute in the chain doesn't exist; without
it, an exception is raised when a missing attribute is encountered.
"""
attributes = attr.split(".")
for i in attributes:
try:
obj = getattr(obj, i)
except AttributeError:
if default:
return default
else:
raise
return obj
@staff_member_required
def eventwise_stats(request):
events = Event.objects.exclude(category__name='Others') #[x for x in Event.objects.order_by('name') if x.category.name != "Other"]
college = College.objects.all()
eventwise = []
for event in events:
entry = {}
entry['id'] = event.id
entry['name'] = event.name
entry['category'] = str(event.category.name)
entry['males'] = str(event.participant_set.filter(gender__istartswith='M', msp=None).count())+' | '+str(event.participant_set.filter(gender__istartswith='M', pcr_approval=True, msp=None).count())
entry['females'] = str(event.participant_set.filter(gender__istartswith='F', msp=None).count())+' | '+str(event.participant_set.filter(gender__istartswith='F', pcr_approval=True, msp=None).count())
entry['total'] = str(event.participant_set.filter(msp=None).count())+' | '+str(event.participant_set.filter(pcr_approval=True, msp=None).count())
for key, value in entry.iteritems():
if type(value) is str:
if value == '0 | 0 | 0':
entry[key] = value.replace('0 | 0 | 0', '---')
eventwise.append(entry)
total = {}
total['males'] = str(Participant.objects.filter(gender__istartswith='M', msp=None).count())+' | '+str(Participant.objects.filter(gender__istartswith='M', pcr_approval=True, msp=None).count())
total['females'] = str(Participant.objects.filter(gender__istartswith='F', msp=None).count())+' | '+str(Participant.objects.filter(gender__istartswith='F', pcr_approval=True, msp=None).count())
total['total'] = str(Participant.objects.filter(msp=None).count())+' | '+str(Participant.objects.filter(pcr_approval=True, msp=None).count())
total_amb = CampusAmbassador.objects.all().count()
app_amb = CampusAmbassador.objects.filter(pcr_approved=True).count()
amb_stats = str(total_amb)+ " | " +str(app_amb)
context = {
'eventwise' : eventwise,
'total' : total,
'amb_stats' : amb_stats,
}
return render(request, 'pcradmin/eventwise_stats.html', context)
@staff_member_required
def total_stats(request):
total_part = Participant.objects.filter(msp=None).count()
app_part = Participant.objects.filter(pcr_approval=True, msp=None).count()
part_stats = str(total_part)+ " | " +str(app_part)
total_amb = CampusAmbassador.objects.all().count()
app_amb = CampusAmbassador.objects.filter(pcr_approved=True).count()
amb_stats = str(total_amb)+ " | " +str(app_amb)
paid = Participant.objects.filter(fee_paid=True, msp=None).count()
context = {
'amb_stats' : amb_stats,
'part_stats' : part_stats,
'paid':paid
}
return render(request, 'pcradmin/total_stats.html', context)
|
StarcoderdataPython
|
3251905
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'firstQtDesignedWindow.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(461, 515)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pressMeButton = QtWidgets.QPushButton(self.centralwidget)
self.pressMeButton.setGeometry(QtCore.QRect(10, 80, 161, 81))
self.pressMeButton.setObjectName("pressMeButton")
self.heyLabel = QtWidgets.QLabel(self.centralwidget)
self.heyLabel.setGeometry(QtCore.QRect(20, 40, 141, 16))
self.heyLabel.setObjectName("heyLabel")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 461, 21))
self.menubar.setObjectName("menubar")
self.menuFIle = QtWidgets.QMenu(self.menubar)
self.menuFIle.setObjectName("menuFIle")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar.addAction(self.menuFIle.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pressMeButton.setText(_translate("MainWindow", "Press Me"))
self.heyLabel.setText(_translate("MainWindow", "Hey"))
self.menuFIle.setTitle(_translate("MainWindow", "FIle"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
1695527
|
import streamlit as st
import numpy as np
import pandas as pd
import json
from pathlib import Path
p = Path().absolute()
p_log=p/'logs'
flns = list(p_log.glob('*json'))
dfs = []
for file in flns:
with open(file) as f:
json_data = pd.json_normalize(json.loads(f.read()))
dfs.append(json_data)
df = pd.concat(dfs, sort=False) # or sort=True depending on your needs
st.write("Here's an overview of training model metadata:")
st.write(df)
|
StarcoderdataPython
|
143986
|
import turtle
ninja = turtle.Turtle()
ninja.speed(10)
for i in range(180):
ninja.forward(100)
ninja.right(30)
ninja.forward(20)
ninja.left(60)
ninja.forward(50)
ninja.right(30)
ninja.penup()
ninja.setposition(0, 0)
ninja.pendown()
ninja.right(2)
turtle.done()
|
StarcoderdataPython
|
4809306
|
<reponame>saiduc/Alexander-Bot
import calendar
import numpy as np
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
plt.style.use("seaborn-dark")
def plot_calendar(days, months):
plt.figure(figsize=(9, 1.8))
# non days are grayed
ax = plt.gca().axes
ax.add_patch(Rectangle((29, 2), width=.9, height=.9,
color='black', alpha=.3))
ax.add_patch(Rectangle((30, 2), width=.9, height=.9,
color='black', alpha=.5))
ax.add_patch(Rectangle((31, 2), width=.9, height=.9,
color='black', alpha=.5))
ax.add_patch(Rectangle((31, 4), width=.9, height=.9,
color='black', alpha=.5))
ax.add_patch(Rectangle((31, 6), width=.9, height=.9,
color='black', alpha=.5))
ax.add_patch(Rectangle((31, 9), width=.9, height=.9,
color='black', alpha=.5))
ax.add_patch(Rectangle((31, 11), width=.9, height=.9,
color='black', alpha=.5))
weekend_days, weekend_months = get_weekends(2020)
for i in range(len(weekend_days)):
ax.add_patch(Rectangle((weekend_days[i], weekend_months[i]), width=.9, height=.9, color='gray', alpha=.3))
ax.add_patch(Rectangle((30, 6), width=.9, height=.9, color='gray', alpha=.3))
for d, m in zip(days, months):
ax.add_patch(Rectangle((d, m),
width=0.9, height=0.9, color='C0'))
plt.yticks(np.arange(1, 13)+.5, list(calendar.month_abbr)[1:])
plt.xticks(np.arange(1, 32)+.5, np.arange(1, 32))
plt.xlim(1, 32)
plt.ylim(6, 11)
plt.gca().invert_yaxis()
# remove borders and ticks
for spine in plt.gca().spines.values():
spine.set_visible(True)
plt.tick_params(top=False, bottom=False, left=False, right=False)
plt.tight_layout()
plt.savefig("tmp.jpg")
def get_data(name):
names, dates = np.loadtxt("./data/log.dat", delimiter=",", unpack=True, dtype="str")
data = []
for i in range(len(names)):
if names[i] == name:
data.append(dates[i])
months = []
days = []
for item in data:
months.append(int(item.split("-")[1]))
days.append(int(item.split("-")[2]))
return days, months
def get_weekends(year):
weekend_day = []
weekend_month = []
start = datetime(year, 1, 1)
for i in range(365):
day_of_the_year = start + timedelta(days=i)
if day_of_the_year.weekday() > 4:
weekend_day.append(day_of_the_year.day)
weekend_month.append(day_of_the_year.month)
return weekend_day, weekend_month
|
StarcoderdataPython
|
75419
|
<filename>clinicadl/clinicadl/svm/classification_utils.py
# coding: utf8
import abc
import os
import pandas as pd
from clinica.pipelines.machine_learning import base
import clinica.pipelines.machine_learning.voxel_based_io as vbio
import clinica.pipelines.machine_learning.ml_utils as utils
from multiprocessing.pool import ThreadPool
from os import path
import numpy as np
from sklearn.model_selection import StratifiedKFold
import nibabel as nib
__author__ = "<NAME>"
__copyright__ = "Copyright 2018 The Aramis Lab Team"
__credits__ = ["<NAME>, <NAME>"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class CAPSInput(base.MLInput):
def __init__(self, caps_directory, diagnoses_tsv, group_id, image_type, precomputed_kernel=None):
"""
Args:
caps_directory:
subjects_visits_tsv:
diagnoses_tsv:
group_id:
image_type: 'T1', 'fdg', 'av45', 'pib' or 'flute'
precomputed_kernel:
"""
self._caps_directory = caps_directory
self._group_id = group_id
self._image_type = image_type
self._images = None
self._x = None
self._y = None
self._kernel = None
subjects_visits = pd.read_csv(diagnoses_tsv, sep='\t')
if list(subjects_visits.columns.values) != ['participant_id', 'session_id', 'diagnosis']:
raise Exception('Subjects and visits file is not in the correct format.')
self._subjects = list(subjects_visits.participant_id)
self._sessions = list(subjects_visits.session_id)
diagnoses = pd.read_csv(diagnoses_tsv, sep='\t')
if 'diagnosis' not in list(diagnoses.columns.values):
raise Exception('Diagnoses file is not in the correct format.')
self._diagnoses = list(diagnoses.diagnosis)
if image_type not in ['T1', 'fdg', 'av45', 'pib', 'flute', 'dwi']:
raise Exception("Incorrect image type. It must be one of the values 'T1', 'fdg', 'av45', 'pib', 'flute' or 'dwi'")
if precomputed_kernel is not None:
if type(precomputed_kernel) == np.ndarray:
if precomputed_kernel.shape == (len(self._subjects), len(self._subjects)):
self._kernel = precomputed_kernel
else:
raise Exception("""Precomputed kernel provided is not in the correct format.
It must be a numpy.ndarray object with number of rows and columns equal to the number of subjects,
or a filename to a numpy txt file containing an object with the described format.""")
elif type(precomputed_kernel == str):
self._kernel = np.loadtxt(precomputed_kernel)
else:
raise Exception("""Precomputed kernel provided is not in the correct format.
It must be a numpy.ndarray object with number of rows and columns equal to the number of subjects,
or a filename to a numpy txt file containing an object with the described format.""")
@abc.abstractmethod
def get_images(self):
"""
Returns: a list of filenames
"""
pass
@abc.abstractmethod
def get_x(self):
"""
Returns: a numpy 2d-array.
"""
pass
def get_y(self):
"""
Returns: a list of integers. Each integer represents a class.
"""
if self._y is not None:
return self._y
unique = list(set(self._diagnoses))
self._y = np.array([unique.index(x) for x in self._diagnoses])
return self._y
def get_kernel(self, kernel_function=utils.gram_matrix_linear, recompute_if_exists=False):
"""
Returns: a numpy 2d-array.
"""
if self._kernel is not None and not recompute_if_exists:
return self._kernel
if self._x is None:
self.get_x()
print("Computing kernel ...")
self._kernel = kernel_function(self._x)
print("Kernel computed")
return self._kernel
def save_kernel(self, output_dir):
"""
Args:
output_dir:
Returns:
"""
if self._kernel is not None:
filename = path.join(output_dir, 'kernel.txt')
np.savetxt(filename, self._kernel)
return filename
raise Exception("Unable to save the kernel. Kernel must have been computed before.")
@abc.abstractmethod
def save_weights_as_nifti(self, weights, output_dir):
pass
class CAPSVoxelBasedInput(CAPSInput):
def __init__(self, caps_directory, diagnoses_tsv, group_id, image_type, fwhm=0,
modulated="on", pvc=None, mask_zeros=True, precomputed_kernel=None):
"""
Args:
caps_directory:
diagnoses_tsv:
group_id:
image_type: 'T1', 'fdg', 'av45', 'pib' or 'flute'
fwhm:
modulated:
mask_zeros:
precomputed_kernel:
"""
super(CAPSVoxelBasedInput, self).__init__(caps_directory, diagnoses_tsv, group_id,
image_type, precomputed_kernel=precomputed_kernel)
self._fwhm = fwhm
self._modulated = modulated
self._pvc = pvc
self._mask_zeros = mask_zeros
self._orig_shape = None
self._data_mask = None
if modulated not in ['on', 'off']:
raise Exception("Incorrect modulation parameter. It must be one of the values 'on' or 'off'")
def get_images(self):
"""
Returns: a list of filenames
"""
if self._images is not None:
return self._images
if self._image_type == 'T1':
fwhm = '' if self._fwhm == 0 else '_fwhm-%dmm' % int(self._fwhm)
self._images = [path.join(self._caps_directory, 'subjects', self._subjects[i], self._sessions[i],
't1/spm/dartel/group-' + self._group_id,
'%s_%s_T1w_segm-graymatter_space-Ixi549Space_modulated-%s%s_probability.nii.gz'
% (self._subjects[i], self._sessions[i], self._modulated, fwhm))
for i in range(len(self._subjects))]
else:
pvc = '' if self._pvc is None else '_pvc-%s' % self._pvc
fwhm = '' if self._fwhm == 0 else '_fwhm-%dmm' % int(self._fwhm)
suvr = 'pons' if self._image_type == 'fdg' else 'cerebellumPons'
self._images = [path.join(self._caps_directory, 'subjects', self._subjects[i], self._sessions[i],
'pet/preprocessing/group-' + self._group_id,
'%s_%s_task-rest_acq-%s_pet_space-Ixi549Space%s_suvr-%s_mask-brain%s_pet.nii.gz'
% (self._subjects[i], self._sessions[i], self._image_type, pvc, suvr, fwhm))
for i in range(len(self._subjects))]
for image in self._images:
if not path.exists(image):
raise Exception("File %s doesn't exists." % image)
return self._images
def get_x(self):
"""
Returns: a numpy 2d-array.
"""
if self._x is not None:
return self._x
print('Loading ' + str(len(self.get_images())) + ' subjects')
self._x, self._orig_shape, self._data_mask = vbio.load_data(self._images, mask=self._mask_zeros)
print('Subjects loaded')
return self._x
def save_weights_as_nifti(self, weights, output_dir):
if self._images is None:
self.get_images()
output_filename = path.join(output_dir, 'weights.nii.gz')
data = vbio.revert_mask(weights, self._data_mask, self._orig_shape)
vbio.weights_to_nifti(data, self._images[0], output_filename)
class KFoldCV(base.MLValidation):
def __init__(self, ml_algorithm):
self._ml_algorithm = ml_algorithm
self._fold_results = []
self._classifier = None
self._best_params = None
self._cv = None
def validate(self, y, n_folds=10, n_threads=15, splits_indices=None):
if splits_indices is None:
skf = StratifiedKFold(n_splits=n_folds, shuffle=True, )
self._cv = list(skf.split(np.zeros(len(y)), y))
else:
self._cv = splits_indices
async_pool = ThreadPool(n_threads)
async_result = {}
for i in range(n_folds):
train_index, test_index = self._cv[i]
async_result[i] = async_pool.apply_async(self._ml_algorithm.evaluate, (train_index, test_index))
async_pool.close()
async_pool.join()
for i in range(n_folds):
self._fold_results.append(async_result[i].get())
# save the mean of the best models
self._classifier, self._best_params = self._ml_algorithm.apply_best_parameters(self._fold_results)
return self._classifier, self._best_params, self._fold_results
def save_results(self, output_dir):
if self._fold_results is None:
raise Exception("No results to save. Method validate() must be run before save_results().")
subjects_folds = []
results_folds = []
container_dir = path.join(output_dir, 'folds')
os.makedirs(container_dir, exist_ok=True)
for i in range(len(self._fold_results)):
subjects_df = pd.DataFrame({'y': self._fold_results[i]['y'],
'y_hat': self._fold_results[i]['y_hat'],
'y_index': self._fold_results[i]['y_index']})
subjects_df.to_csv(path.join(container_dir, 'subjects_fold-' + str(i) + '.tsv'),
index=False, sep='\t', encoding='utf-8')
subjects_folds.append(subjects_df)
results_df = pd.DataFrame({'balanced_accuracy': self._fold_results[i]['evaluation']['balanced_accuracy'],
'auc': self._fold_results[i]['auc'],
'accuracy': self._fold_results[i]['evaluation']['accuracy'],
'sensitivity': self._fold_results[i]['evaluation']['sensitivity'],
'specificity': self._fold_results[i]['evaluation']['specificity'],
'ppv': self._fold_results[i]['evaluation']['ppv'],
'npv': self._fold_results[i]['evaluation']['npv']}, index=['i', ])
results_df.to_csv(path.join(container_dir, 'results_fold-' + str(i) + '.tsv'),
index=False, sep='\t', encoding='utf-8')
results_folds.append(results_df)
all_subjects = pd.concat(subjects_folds)
all_subjects.to_csv(path.join(output_dir, 'subjects.tsv'),
index=False, sep='\t', encoding='utf-8')
all_results = pd.concat(results_folds)
all_results.to_csv(path.join(output_dir, 'results.tsv'),
index=False, sep='\t', encoding='utf-8')
mean_results = pd.DataFrame(all_results.apply(np.nanmean).to_dict(), columns=all_results.columns, index=[0, ])
mean_results.to_csv(path.join(output_dir, 'mean_results.tsv'),
index=False, sep='\t', encoding='utf-8')
print("Mean results of the classification:")
print("Balanced accuracy: %s" % (mean_results['balanced_accuracy'].to_string(index=False)))
print("specificity: %s" % (mean_results['specificity'].to_string(index=False)))
print("sensitivity: %s" % (mean_results['sensitivity'].to_string(index=False)))
print("auc: %s" % (mean_results['auc'].to_string(index=False)))
def extract_indices_from_5_fold(diagnosis_tsv_folder, n_splits, output_dir, baseline=True, diagnoses_list=['AD', 'CN']):
"""
This is a function to restore the indices of the 5 fold made for CNN based on all the diagnosis tsvs
:param diagnosis_tsv_folder:
:param n_splits:
:param baseline_or_longitudinal:
:return: the combined tsv file and the indices for 5 fold
"""
splits_indices = []
for i in range(n_splits):
train_df = pd.DataFrame()
valid_df = pd.DataFrame()
train_index = []
valid_index = []
train_path = path.join(diagnosis_tsv_folder, 'train_splits-' + str(n_splits),
'split-' + str(i))
valid_path = path.join(diagnosis_tsv_folder, 'validation_splits-' + str(n_splits),
'split-' + str(i))
print("Train", train_path)
print("Valid", valid_path)
for diagnosis in diagnoses_list:
if baseline:
train_diagnosis_tsv = path.join(train_path, diagnosis + '_baseline.tsv')
else:
train_diagnosis_tsv = path.join(train_path, diagnosis + '.tsv')
valid_diagnosis_tsv = path.join(valid_path, diagnosis + '_baseline.tsv')
train_diagnosis_df = pd.read_csv(train_diagnosis_tsv, sep='\t')
valid_diagnosis_df = pd.read_csv(valid_diagnosis_tsv, sep='\t')
train_df = pd.concat([train_df, train_diagnosis_df])
valid_df = pd.concat([valid_df, valid_diagnosis_df])
train_df.reset_index(inplace=True, drop=True)
valid_df.reset_index(inplace=True, drop=True)
if i == 0:
# only concatenate the train + valid for the first fold
all_df = pd.concat([train_df, valid_df])
all_df.reset_index(inplace=True, drop=True)
all_tsv = os.path.join(output_dir, 'all_subjects.tsv')
all_df.to_csv(all_tsv, index=False, sep='\t', encoding='utf-8')
# find the index for the training and validation based on the concatenated tsv.
for j in range(len(train_df)):
row = train_df.iloc[j]
for index, row_all in all_df.iterrows():
if row['participant_id'] == row_all['participant_id'] and row['session_id'] == row_all['session_id'] and row['diagnosis'] == row_all['diagnosis']:
train_index.append(index)
for k in range(len(valid_df)):
row = valid_df.iloc[k]
for index, row_all in all_df.iterrows():
if row['participant_id'] == row_all['participant_id'] and row['session_id'] == row_all['session_id'] and row['diagnosis'] == row_all['diagnosis']:
valid_index.append(index)
# convert the list of index to be an array
train_index_array = np.asarray(train_index)
valid_index_array = np.asarray(valid_index)
# convert the two arrays into a tuple
index_tuple = (train_index_array, valid_index_array)
splits_indices.append(index_tuple)
return splits_indices, all_tsv
def load_data_svm(image_list, mask=True):
"""
Args:
image_list:
mask:
Returns:
"""
data = None
shape = None
data_mask = None
first = True
for i in range(len(image_list)):
subj = nib.load(image_list[i])
subj_data = np.nan_to_num(subj.get_data().flatten())
# Memory allocation for ndarray containing all data to avoid copying the array for each new subject
if first:
data = np.ndarray(shape=(len(image_list), subj_data.shape[0]), dtype=float, order='C')
shape = subj.get_data().shape
first = False
data[i, :] = subj_data
if mask:
data_mask = (data != 0).sum(axis=0) != 0
data = data[:, data_mask]
return data, shape, data_mask
def revert_mask(weights, mask, shape):
"""
Args:
weights:
mask:
shape:
Returns:
"""
z = np.zeros(np.prod(shape))
z[mask] = weights # ValueError: NumPy boolean array indexing assignment cannot assign 1636161 input values to the 1634188 output values where the mask is true
new_weights = np.reshape(z, shape)
return new_weights
def evaluate_prediction(y, y_hat):
true_positive = 0.0
true_negative = 0.0
false_positive = 0.0
false_negative = 0.0
tp = []
tn = []
fp = []
fn = []
for i in range(len(y)):
if y[i] == 1:
if y_hat[i] == 1:
true_positive += 1
tp.append(i)
else:
false_negative += 1
fn.append(i)
else: # -1
if y_hat[i] == 0:
true_negative += 1
tn.append(i)
else:
false_positive += 1
fp.append(i)
accuracy = (true_positive + true_negative) / (true_positive + true_negative + false_positive + false_negative)
if (true_positive + false_negative) != 0:
sensitivity = true_positive / (true_positive + false_negative)
else:
sensitivity = 0.0
if (false_positive + true_negative) != 0:
specificity = true_negative / (false_positive + true_negative)
else:
specificity = 0.0
if (true_positive + false_positive) != 0:
ppv = true_positive / (true_positive + false_positive)
else:
ppv = 0.0
if (true_negative + false_negative) != 0:
npv = true_negative / (true_negative + false_negative)
else:
npv = 0.0
balanced_accuracy = (sensitivity + specificity) / 2
results = {'accuracy': accuracy,
'balanced_accuracy': balanced_accuracy,
'sensitivity': sensitivity,
'specificity': specificity,
'ppv': ppv,
'npv': npv
}
return results
def save_data(df, output_dir, folder_name):
"""
Save data so it can be used by the workflow
:param df:
:param output_dir:
:param folder_name:
:return: path to the tsv files
"""
import os
from os import path
results_dir = path.join(output_dir, 'data', folder_name)
os.makedirs(results_dir, exist_ok=True)
df.to_csv(path.join(results_dir, 'all_subjects.tsv'), sep="\t", index=False)
return path.join(results_dir, 'all_subjects.tsv')
def save_weights(classifier, x, output_dir):
dual_coefficients = classifier.dual_coef_
sv_indices = classifier.support_
weighted_sv = dual_coefficients.transpose() * x[sv_indices]
weights = np.sum(weighted_sv, 0)
np.savetxt(path.join(output_dir, 'weights.txt'), weights)
return weights
def apply_best_parameters_each_split(kernel, x, y, results_list, balanced, n_fold, diagnoses_tsv, output_dir):
"""
Save the best model for each fold
:param results_list:
:return:
"""
from sklearn.svm import SVC
best_c = results_list[n_fold]['best_parameter']['c']
best_bal_acc = results_list[n_fold]['best_parameter']['balanced_accuracy']
train_index = results_list[n_fold]['x_index']
if balanced:
svc = SVC(C=best_c, kernel='precomputed', probability=True, tol=1e-6, class_weight='balanced')
else:
svc = SVC(C=best_c, kernel='precomputed', probability=True, tol=1e-6)
outer_kernel = kernel[train_index, :][:, train_index]
y_train = y[train_index]
# save the training data for reconstruction use
df = pd.read_csv(diagnoses_tsv, sep='\t')
df_training = df.iloc[train_index]
result_dir = path.join(output_dir, 'classifier', 'fold_' + str(n_fold))
os.makedirs(result_dir, exist_ok=True)
training_tsv = os.path.join(result_dir, 'training_subjects.tsv')
df_training.to_csv(training_tsv, index=False, sep='\t', encoding='utf-8')
svc.fit(outer_kernel, y_train)
# save the weight
save_weights(svc, x[train_index], result_dir)
return svc, {'c': best_c, 'balanced_accuracy': best_bal_acc}, train_index
|
StarcoderdataPython
|
3297915
|
"""#General classification and regression explanations
For examples and interpretation, see my notebooks on [general classification explanations](https://github.com/dsbowen/gshap/blob/master/classification.ipynb) and [general regression explanations](https://github.com/dsbowen/gshap/blob/master/regression.ipynb).
"""
class ProbabilityDistance():
"""
This class measures how likely each predicted target value (output) was to
have been generated by a 'positive' distribution or density, rather than a
'negative' distribution or density.
Parameters
----------
positive : callable or list of callables
Densities and distributions take the output of a model, usually a
(# observations,) or (# observations, # classes) vector. It returns a
(# observations,) vector of probabilities that the predicted target
value was generated by the density or distribution.
negative : callable or list of callables or None, default=None
Similarly defined. If `None`, the probability that each observation
comes from a negative density or distribution will be treated as the
complement of `positive`.
Attributes
----------
positive : callable or list of callables
Set from the `positive` parameter.
negative : callable or list of callables
Set from the `negative` parameter.
Examples
--------
```python
import gshap
from gshap.probability_distance import ProbabilityDistance
from sklearn.datasets import load_iris
from sklearn.svm import SVC
X, y = load_iris(return_X_y=True)
clf = SVC(probability=True).fit(X,y)
# probability that each observation is in class 1
pos_distribution = lambda y_pred: y_pred[:,1]
# probability that each observation is in class 0
neg_distribution = lambda y_pred: y_pred[:,0]
g = ProbabilityDistance(pos_distribution, neg_distribution)
explainer = gshap.KernelExplainer(clf.predict_proba, X, g)
explainer.gshap_values(x, nsamples=1000)
```
Out:
```
array([0.02175944, 0.01505252, 0.17106646, 0.13605429])
```
"""
def __init__(self, positive, negative=None):
self.positive = positive
self.negative = negative
def __call__(self, output):
"""
Parameters
----------
output : np.array
Model output, usually (# observations,) or
(# obervations, # classes) array for regression or classification
problems, respectively.
Returns
-------
probability : float
Probability that every predicted target value was generated by
a positive density or distribution, rather than a negative density
or distribution.
"""
p_pos = self._compute_probability(self.positive, output)
if self.negative:
p_neg = self._compute_probability(self.negative, output)
else:
p_neg = 1 - p_pos
x = 1 / (1 + (p_neg/p_pos).prod())
return x
def _compute_probability(self, funcs, output):
"""
Compute the probability that each value of the output was generated by
one of the density or distribution functions.
Parameters
----------
funcs : None, callable, or list of callables
Density functions
output : np.array-like
(# observations,) model output vector
"""
if funcs is None:
funcs = []
elif not isinstance(funcs, list):
funcs = [funcs]
return sum([func(output) for func in funcs])
|
StarcoderdataPython
|
3220221
|
from .models import Dictionary
from base.serializers import BaseHyperlinkedModelSerializer
class DictionarySerializer(BaseHyperlinkedModelSerializer):
class Meta(BaseHyperlinkedModelSerializer.Meta):
model = Dictionary
def create(self, validated_data):
return Dictionary.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.save()
return instance
|
StarcoderdataPython
|
136183
|
"""
@author <NAME>, January 2020
@source Hivemind, https://github.com/compserv/hivemind
@dataSource Open Computing Facility, https://www.ocf.berkeley.edu/
@dataMaintainers HKN's Computing Services Committee, https://hkn.eecs.berkeley.edu/about/officers
"""
import urllib.request, json, sys
# To get extended output, put 1 (or any other nonzero number) as an argument, as:
# python3 telepath.py 1
# To get basic output, put nothing or 0 as an argument.
if (len(sys.argv) - 1 == 1):
PRINTALL = int(sys.argv[1])
else :
PRINTALL = False
def onlyHive(fullData):
"""
Parses the fullData set and returns only those servers with "hive" in their name.
This could probably be generalized to return other machines like those in Soda.
"""
toReturn = {}
for server in fullData:
if str(server)[0:4] == "hive":
toReturn[server] = fullData[server]
return toReturn
def cleanTime(seconds):
"""
A function used for more detailed input that converts a time in seconds to hh:mm:ss.
"""
hours = seconds // (60 * 60)
minutes = (seconds % 60 * 60) // 60
seconds = (seconds % 60)
return str(hours)[:-2] + ":" + str(minutes)[:-2] + ":" + str(seconds)[0:5]
def findBestServer(hiveData):
"""
Sweeps the dictionary and finds the best server by first finding the minimum average CPU usage and then finding the one with the fewest users.
"""
def findMin(valueFunction, fullData):
"""
Finds the minimum value in the data set fullData according to a value obtained by applying valueFunction to those values
"""
minValue = None
minValues = {}
for data in fullData:
if fullData[data]:
averageValue = valueFunction(data, fullData)
if minValue == None or averageValue < minValue:
minValue = averageValue
minValues = {}
minValues[data] = fullData[data]
elif averageValue == minValue:
minValues[data] = fullData[data]
return minValues
bestCPU = findMin((lambda x, dataSet: dataSet[x]['load_avgs'][1]), hiveData) # First, get the best servers by lowest average CPU usage, as Hivemind's code does
return findMin((lambda x, dataSet: len(dataSet[x]['users'])), bestCPU) # Then, get the best servers by fewest number of online users
def format(serverDict, sortKeyword='id'):
"""
Returns an array of nicely formatted servers, sorted by whatever the user prefers, or id by default.
"""
sortDict = {'id': lambda server: int(server.name[4:-3]),
'uptime': lambda server: server.uptime}
sortFunction = sortDict[sortKeyword]
class Server:
def __init__(self, serverName, dataSet):
self.name = str(serverName)
self.loadAvgs = dataSet[serverName]['load_avgs']
self.users = dataSet[serverName]['users']
self.uptime = dataSet[serverName]['uptime']
def __str__(self):
return str(self.name[:-3]) + " (" + str(self.loadAvgs[1] * 100) + "% mean CPU load, " + str(len(self.users)) + " users online, up for " + cleanTime(self.uptime) + ")"
serverList = []
for server in serverDict:
serverList.append(Server(server, serverDict))
# Now, sort the list based on the sorting function
serverList.sort(key=sortFunction)
return serverList
serverData = json.loads(urllib.request.urlopen("https://www.ocf.berkeley.edu/~hkn/hivemind/data/latest.json").read().decode())
serverData = serverData['data']
serverData = onlyHive(serverData)
bestServers = findBestServer(serverData)
allServers = format(bestServers, 'uptime')
if PRINTALL:
for server in allServers:
print(server)
else :
print(allServers[0].name[4:-3])
|
StarcoderdataPython
|
126192
|
from .text_based import CircleILTISROIData, PolygonILTISROIData
from .tiff_based import SpatialFootprintROIData
|
StarcoderdataPython
|
54901
|
<filename>nature/benchmarks/protein_folding_problem_benchmark.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Protein Folding Problem benchmarks."""
from itertools import product
from timeit import timeit
from qiskit import Aer
from qiskit.utils import QuantumInstance
from qiskit_nature.problems.sampling.protein_folding import (
PenaltyParameters,
Peptide,
ProteinFoldingProblem,
RandomInteraction,
MiyazawaJerniganInteraction,
MixedInteraction,
)
# pylint: disable=redefined-outer-name, invalid-name, attribute-defined-outside-init
class ProteinFoldingProblemBenchmarks:
"""Protein Folding Problem benchmarks."""
version = 1
params = [
["Neuropeptide", "NeuropeptideDummySide", "Angiotensin", "AngiotensinDummySide"],
["MiyazawaJerniganInteraction", "RandomInteraction", "MixedInteraction"],
]
param_names = ["peptide", "interaction type"]
def __init__(self):
self.peptides = {
"Neuropeptide": ("APRLRFY", [""] * 7), #
"NeuropeptideDummySide": ("APRLRFY", ["", "", "R", "", "T", "W", ""]),
# Neuropeptide with dummy side chains
"Angiotensin": ("DRVYIHPFHL", [""] * 10), # Angiotensin I, human
"AngiotensinDummySide": (
"DRVYIHPFHL",
["", "", "P", "R", "L", "H", "Y", "", "I", ""],
),
} # Angiotensin I, human with dummy side chains
self.interactions = {
"MiyazawaJerniganInteraction": MiyazawaJerniganInteraction(),
"RandomInteraction": RandomInteraction(),
"MixedInteraction": MixedInteraction(),
}
def setup(self, peptide_id, interaction_id):
"""setup"""
qasm_sim = Aer.get_backend("qasm_simulator")
self._qins = QuantumInstance(backend=qasm_sim, shots=1)
self.main_chain_residue_sequence = self.peptides[peptide_id][0]
self.side_chain_residue_sequences = self.peptides[peptide_id][1]
peptide = Peptide(self.main_chain_residue_sequence, self.side_chain_residue_sequences)
interaction = self.interactions[interaction_id]
self.protein_folding_problem = ProteinFoldingProblem(
peptide, interaction, PenaltyParameters()
)
def time_generate_peptide(self, _, __):
"""Time generation of a peptide."""
return Peptide(self.main_chain_residue_sequence, self.side_chain_residue_sequences)
def time_generate_full_qubit_operator(self, _, __):
"""Time generation of full protein folding qubit operator."""
return self.protein_folding_problem._qubit_op_full()
def time_generate_compressed_qubit_operator(self, _, __):
"""Time generation of compressed protein folding qubit operator."""
return self.protein_folding_problem.qubit_op()
if __name__ == "__main__":
protein_folding_problem_benchmarks = ProteinFoldingProblemBenchmarks()
for peptide_id, interaction_id in product(*ProteinFoldingProblemBenchmarks.params):
bench = ProteinFoldingProblemBenchmarks()
try:
bench.setup(peptide_id, interaction_id)
except NotImplementedError:
continue
for method in set(dir(ProteinFoldingProblemBenchmarks)):
if method.startswith("time_"):
elapsed = timeit(f"bench.{method}(None, None)", number=10, globals=globals())
print(
f"main_chain_residue_seq="
f"{bench.peptides[peptide_id][0]}, "
f"side_chain_residue_sequences={bench.peptides[peptide_id][1]}, "
f"interaction={bench.interactions[interaction_id]} {method}:\t{elapsed}"
)
|
StarcoderdataPython
|
3207622
|
<gh_stars>1-10
import os
i=0
while True:
#write
f = open('test0.txt', 'a')
f.write(".")
#Add
os.system('git add .')
os.system('git commit -m "1"')
i=i+1
print(str(i)+':commits')
|
StarcoderdataPython
|
1795509
|
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from ._base import MutableTelegramObject
if TYPE_CHECKING: # pragma: no cover
from .keyboard_button import KeyboardButton
class ReplyKeyboardMarkup(MutableTelegramObject):
"""
This object represents a custom keyboard with reply options (see Introduction to bots for
details and examples).
Source: https://core.telegram.org/bots/api#replykeyboardmarkup
"""
keyboard: List[List[KeyboardButton]]
"""Array of button rows, each represented by an Array of KeyboardButton objects"""
resize_keyboard: Optional[bool] = None
"""Requests clients to resize the keyboard vertically for optimal fit (e.g., make the keyboard
smaller if there are just two rows of buttons). Defaults to false, in which case the custom
keyboard is always of the same height as the app's standard keyboard."""
one_time_keyboard: Optional[bool] = None
"""Requests clients to hide the keyboard as soon as it's been used. The keyboard will still be
available, but clients will automatically display the usual letter-keyboard in the chat –
the user can press a special button in the input field to see the custom keyboard again.
Defaults to false."""
selective: Optional[bool] = None
"""Use this parameter if you want to show the keyboard to specific users only. Targets: 1)
users that are @mentioned in the text of the Message object; 2) if the bot's message is a
reply (has reply_to_message_id), sender of the original message."""
|
StarcoderdataPython
|
3322273
|
#!/usr/bin/python3.6
# created by cicek on 19.03.2019 00:17
import math
print("Enter numbers: ")
unsortedList = [int(item) for item in input().split()]
"""6 14 45 78 18 47 53 83 91 81 77 84 99 64 42"""
print("unsorted list:\n" + str(unsortedList) + "\n")
# def findMyParent(a: int):
# if (unsortedList.index(a) == 0):
# print("root")
# exit(0)
# else:
# parentIndex = math.floor((unsortedList.index(a) + 1) / 2)
# parent = unsortedList[parentIndex - 1]
# print(parent)
def heapify(unsortedList, index, size):
min = index
left_index = index * 2 + 1
right_index = index * 2 + 2
if ((left_index < size) and unsortedList[left_index] < unsortedList[min]):
min = left_index
if ((right_index < size) and unsortedList[right_index] < unsortedList[min]):
min = right_index
if (min != index):
unsortedList[min], unsortedList[index] = unsortedList[index], unsortedList[min]
heapify(unsortedList, min, size)
sortedList = list() # []
def heapSort(unsortedList):
n = len(unsortedList)
for i in range(n // 2 - 1, -1, -1):
heapify(unsortedList, i, n)
for i in range(n - 1, -1, -1):
sortedList.append(unsortedList[0])
print(sortedList)
unsortedList[0], unsortedList[i] = unsortedList[i], unsortedList[0]
heapify(unsortedList, 0, i)
return sortedList
print("\nsorted list:\n" + str(heapSort(unsortedList)))
"""
Enter numbers:
6 14 45 78 18 47 53 83 91 81 77 84 99 64 42
unsorted list:
[6, 14, 45, 78, 18, 47, 53, 83, 91, 81, 77, 84, 99, 64, 42]
[6]
[6, 14]
[6, 14, 18]
[6, 14, 18, 42]
[6, 14, 18, 42, 45]
[6, 14, 18, 42, 45, 47]
[6, 14, 18, 42, 45, 47, 53]
[6, 14, 18, 42, 45, 47, 53, 64]
[6, 14, 18, 42, 45, 47, 53, 64, 77]
[6, 14, 18, 42, 45, 47, 53, 64, 77, 78]
[6, 14, 18, 42, 45, 47, 53, 64, 77, 78, 81]
[6, 14, 18, 42, 45, 47, 53, 64, 77, 78, 81, 83]
[6, 14, 18, 42, 45, 47, 53, 64, 77, 78, 81, 83, 84]
[6, 14, 18, 42, 45, 47, 53, 64, 77, 78, 81, 83, 84, 91]
[6, 14, 18, 42, 45, 47, 53, 64, 77, 78, 81, 83, 84, 91, 99]
sorted list:
[6, 14, 18, 42, 45, 47, 53, 64, 77, 78, 81, 83, 84, 91, 99]
Process finished with exit code 0
"""
|
StarcoderdataPython
|
37790
|
"""
A whole file dedicated to parsing __version__ in all it's weird possible ways
1) Only acts on source, no file handling.
2) some functions for *by line*
3) some functions for *by file*
4) Handle quotes
5) Handle whitespace
6) Handle version as tuple
"""
import ast
import re
from typing import Any, Optional, Tuple
version_tokens = [
"__version__", # canonical
"__VERSION__", # rare and wrong, but who am I to argue
"VERSION", # rare
"version",
"PACKAGE_VERSION",
]
def find_by_ast(line: str, version_token: str = "__version__") -> Optional[str]:
"""
Safer way to 'execute' python code to get a simple value
:param line:
:param version_token:
:return:
"""
if not line:
return ""
# clean up line.
simplified_line = simplify_line(line)
if simplified_line.startswith(version_token):
# noinspection PyBroadException
try:
tree: Any = ast.parse(simplified_line)
if hasattr(tree.body[0].value, "s"):
return str(tree.body[0].value.s)
if hasattr(tree.body[0].value, "elts"):
version_parts = []
for elt in tree.body[0].value.elts:
if hasattr(elt, "n"):
version_parts.append(str(elt.n))
else:
version_parts.append(str(elt.s))
return ".".join(version_parts)
if hasattr(tree.body[0].value, "n"):
return str(tree.body[0].value.n)
# print(tree)
except Exception:
# raise
return None
return None
def simplify_line(line: str, keep_comma: bool = False) -> str:
"""
Change ' to "
Remove tabs and spaces (assume no significant whitespace inside a version string!)
"""
if not line:
return ""
if "#" in line:
parts = line.split("#")
simplified_line = parts[0]
else:
simplified_line = line
simplified_line = (
simplified_line.replace(" ", "")
.replace("'", '"')
.replace("\t", "")
.replace("\n", "")
.replace("'''", '"') # version strings shouldn't be split across lines normally
.replace('"""', '"')
)
if not keep_comma:
simplified_line = simplified_line.strip(" ,")
return simplified_line
def find_version_by_regex(
file_source: str, version_token: str = "__version__"
) -> Optional[str]:
"""
Regex for dunder version
"""
if not file_source:
return None
version_match = re.search(
r"^" + version_token + r" = ['\"]([^'\"]*)['\"]", file_source, re.M
)
if version_match:
candidate = version_match.group(1)
if candidate in ("", "."): # yes, it will match to a .
return None
return candidate
return None
def find_version_by_string_lib(
line: str, version_token: str = "__version__"
) -> Optional[str]:
"""
No regex parsing. Or at least, mostly, not regex.
"""
if not line:
return None
simplified_line = simplify_line(line)
version = None
if simplified_line.strip().startswith(version_token):
if '"' not in simplified_line:
pass
# logger.debug("Weird version string, no double quote : " + unicode((full_path, line, simplified_line)))
else:
if "=" in simplified_line:
post_equals = simplified_line.split("=")[1]
if post_equals.startswith('"'):
parts = post_equals.split('"')
version = parts[0]
if not version:
version = None
return version
def validate_string(version: Optional[str]) -> Optional[str]:
"""
Trying to catch expressions here
:param version:
:return:
"""
if not version:
return None
for char in str(version):
if char in " \t()":
return None
# raise TypeError("Bad parse : " + version)
return version
def find_in_line(line: str) -> Tuple[Optional[str], Optional[str]]:
"""
Use three strategies to parse version string
"""
if not line:
return None, None
for version_token in version_tokens:
by_ast = find_by_ast(line, version_token)
by_ast = validate_string(by_ast)
if by_ast:
return by_ast, version_token
by_string_lib = find_version_by_string_lib(line, version_token)
by_string_lib = validate_string(by_string_lib)
if by_string_lib:
return by_string_lib, version_token
by_regex = find_version_by_regex(line, version_token)
by_regex = validate_string(by_regex)
if by_regex:
return by_regex, version_token
return None, None
|
StarcoderdataPython
|
66866
|
from simalia.math.numbers.integer import Integer
from simalia.math.operators.sum import Sum
from simalia.pymath import Variable
class Pi(Sum, Variable):
def __init__(self, iterations=11):
self.__k = Variable("k", Integer(0))
super().__init__(iterations, self.__k, self.__formular)
self.iterations = iterations
def __str__(self):
return "\u03c0"
def latex(self):
return "\\pi"
def __formular(self):
k8 = (Integer(8) * self.__k.val).calc()
return (Integer(1) / Integer(16) ** self.__k.val) * (
(Integer(4) / (k8 + Integer(1))) -
(Integer(2) / (k8 + Integer(4))) -
(Integer(1) / (k8 + Integer(5))) -
(Integer(1) / (k8 + Integer(6))))
|
StarcoderdataPython
|
3295309
|
########################################################################
#
# (C) 2015, <NAME> <<EMAIL>>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import json
from ansible import context
from ansible.errors import AnsibleError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils.six.moves import input
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.urls import open_url
from ansible.utils.color import stringc
from ansible.utils.display import Display
display = Display()
class GalaxyLogin(object):
''' Class to handle authenticating user with Galaxy API prior to performing CUD operations '''
GITHUB_AUTH = 'https://api.github.com/authorizations'
def __init__(self, galaxy, github_token=None):
self.galaxy = galaxy
self.github_username = None
self.github_password = None
self._validate_certs = not context.CLIARGS['ignore_certs']
if github_token is None:
self.get_credentials()
def get_credentials(self):
display.display(u'\n\n' + "We need your " + stringc("GitHub login", 'bright cyan') +
" to identify you.", screen_only=True)
display.display("This information will " + stringc("not be sent to Galaxy", 'bright cyan') +
", only to " + stringc("api.github.com.", "yellow"), screen_only=True)
display.display("The password will not be displayed." + u'\n\n', screen_only=True)
display.display("Use " + stringc("--github-token", 'yellow') +
" if you do not want to enter your password." + u'\n\n', screen_only=True)
try:
self.github_username = input("GitHub Username: ")
except Exception:
pass
try:
self.github_password = getpass.getpass("Password for %s: " % self.github_username)
except Exception:
pass
if not self.github_username or not self.github_password:
raise AnsibleError("Invalid GitHub credentials. Username and password are required.")
def remove_github_token(self):
'''
If for some reason an ansible-galaxy token was left from a prior login, remove it. We cannot
retrieve the token after creation, so we are forced to create a new one.
'''
try:
tokens = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
url_password=self.github_password, force_basic_auth=True,
validate_certs=self._validate_certs, http_agent=user_agent()))
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
for token in tokens:
if token['note'] == 'ansible-galaxy login':
display.vvvvv('removing token: %s' % token['token_last_eight'])
try:
open_url('https://api.github.com/authorizations/%d' % token['id'],
url_username=self.github_username, url_password=self.github_password, method='DELETE',
force_basic_auth=True, validate_certs=self._validate_certs, http_agent=user_agent())
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
def create_github_token(self):
'''
Create a personal authorization token with a note of 'ansible-galaxy login'
'''
self.remove_github_token()
args = json.dumps({"scopes": ["public_repo"], "note": "ansible-galaxy login"})
try:
data = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
url_password=self.github_password, force_basic_auth=True, data=args,
validate_certs=self._validate_certs, http_agent=user_agent()))
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
return data['token']
|
StarcoderdataPython
|
3224973
|
import numpy as np
from rlgym.utils import RewardFunction
from rlgym.utils.common_values import CEILING_Z, BALL_MAX_SPEED, CAR_MAX_SPEED, BLUE_TEAM, BLUE_GOAL_BACK, \
BLUE_GOAL_CENTER, ORANGE_GOAL_BACK, ORANGE_GOAL_CENTER, BALL_RADIUS, ORANGE_TEAM
from rlgym.utils.gamestates import GameState, PlayerData
from rlgym.utils.math import cosine_similarity
from numpy import exp
from numpy.linalg import norm
class NectoRewardFunction(RewardFunction):
BLUE_GOAL = (np.array(BLUE_GOAL_BACK) + np.array(BLUE_GOAL_CENTER)) / 2
ORANGE_GOAL = (np.array(ORANGE_GOAL_BACK) + np.array(ORANGE_GOAL_CENTER)) / 2
def __init__(
self,
team_spirit=0.3,
goal_w=10,
goal_dist_w=10,
goal_speed_bonus_w=2.5,
goal_dist_bonus_w=2.5,
demo_w=5,
dist_w=0.75, # Changed from 1
align_w=0.5,
boost_w=1, # Changed from 0.5
touch_height_w=1, # Changed from 0.5
touch_accel_w=0.5, # Changed from 1
):
self.team_spirit = team_spirit
self.current_state = None
self.last_state = None
self.n = 0
self.goal_w = goal_w
self.goal_dist_w = goal_dist_w
self.goal_speed_bonus_w = goal_speed_bonus_w
self.goal_dist_bonus_w = goal_dist_bonus_w
self.demo_w = demo_w
self.dist_w = dist_w
self.align_w = align_w
self.boost_w = boost_w
self.touch_height_w = touch_height_w
self.touch_accel_w = touch_accel_w
self.state_quality = None
self.player_qualities = None
self.rewards = None
def _state_qualities(self, state: GameState):
ball_pos = state.ball.position
state_quality = self.goal_dist_w * (exp(-norm(self.ORANGE_GOAL - ball_pos) / CAR_MAX_SPEED)
- exp(-norm(self.BLUE_GOAL - ball_pos) / CAR_MAX_SPEED))
player_qualities = np.zeros(len(state.players))
for i, player in enumerate(state.players):
pos = player.car_data.position
# Align player->ball and player->net vectors
alignment = 0.5 * (cosine_similarity(ball_pos - pos, ORANGE_GOAL_BACK - pos)
- cosine_similarity(ball_pos - pos, BLUE_GOAL_BACK - pos))
if player.team_num == ORANGE_TEAM:
alignment *= -1
liu_dist = exp(-norm(ball_pos - pos) / 1410) # Max driving speed
player_qualities[i] = (self.dist_w * liu_dist + self.align_w * alignment
+ self.boost_w * np.sqrt(player.boost_amount))
# TODO use only dist of closest player for entire team
return state_quality, player_qualities
def _calculate_rewards(self, state: GameState):
# Calculate rewards, positive for blue, negative for orange
state_quality, player_qualities = self._state_qualities(state)
player_rewards = np.zeros_like(player_qualities)
for i, player in enumerate(state.players):
last = self.last_state.players[i]
if player.ball_touched:
curr_vel = self.current_state.ball.linear_velocity
last_vel = self.last_state.ball.linear_velocity
# On ground it gets about 0.05 just for touching, as well as some extra for the speed it produces
# Close to 20 in the limit with ball on top, but opponents should learn to challenge way before that
player_rewards[i] += (self.touch_height_w * state.ball.position[2] / CEILING_Z +
self.touch_accel_w * norm(curr_vel - last_vel) / BALL_MAX_SPEED)
if player.is_demoed and not last.is_demoed:
player_rewards[i] -= self.demo_w / 2
if player.match_demolishes > last.match_demolishes:
player_rewards[i] += self.demo_w / 2
mid = len(player_rewards) // 2
player_rewards += player_qualities - self.player_qualities
player_rewards[:mid] += state_quality - self.state_quality
player_rewards[mid:] -= state_quality - self.state_quality
self.player_qualities = player_qualities
self.state_quality = state_quality
# Handle goals with no scorer for critic consistency,
# random state could send ball straight into goal
d_blue = state.blue_score - self.last_state.blue_score
d_orange = state.orange_score - self.last_state.orange_score
if d_blue > 0:
goal_speed = norm(self.last_state.ball.linear_velocity)
distances = norm(
np.stack([p.car_data.position for p in state.players[mid:]])
- self.last_state.ball.position,
axis=-1
)
player_rewards[mid:] = -self.goal_dist_bonus_w * (1 - exp(-distances / CAR_MAX_SPEED))
player_rewards[:mid] = (self.goal_w * d_blue
+ self.goal_dist_bonus_w * goal_speed / BALL_MAX_SPEED)
if d_orange > 0:
goal_speed = norm(self.last_state.ball.linear_velocity)
distances = norm(
np.stack([p.car_data.position for p in state.players[:mid]])
- self.last_state.ball.position,
axis=-1
)
player_rewards[:mid] = -self.goal_dist_bonus_w * (1 - exp(-distances / CAR_MAX_SPEED))
player_rewards[mid:] = (self.goal_w * d_orange
+ self.goal_dist_bonus_w * goal_speed / BALL_MAX_SPEED)
blue = player_rewards[:mid]
orange = player_rewards[mid:]
bm = np.nan_to_num(blue.mean())
om = np.nan_to_num(orange.mean())
player_rewards[:mid] = (1 - self.team_spirit) * blue + self.team_spirit * bm - om
player_rewards[mid:] = (1 - self.team_spirit) * orange + self.team_spirit * om - bm
self.last_state = state
self.rewards = player_rewards
def reset(self, initial_state: GameState):
self.n = 0
self.last_state = None
self.rewards = None
self.current_state = initial_state
self.state_quality, self.player_qualities = self._state_qualities(initial_state)
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
if state != self.current_state:
self.last_state = self.current_state
self.current_state = state
self._calculate_rewards(state)
self.n = 0
rew = self.rewards[self.n]
self.n += 1
return float(rew)
|
StarcoderdataPython
|
163054
|
from setuptools import setup
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="javaccflab",
packages=["javaccflab"],
entry_points={
"console_scripts": ['javaccflab = javaccflab.java_ccf:main']
},
version='0.1.12',
description="JavaCCF is utility to fix style in Java files",
long_description=long_description,
long_description_content_type='text/markdown',
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/tochanenko/MetaProgramming/tree/lab-2",
)
|
StarcoderdataPython
|
4814619
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 27 12:30:28 2016
test stuff
test inverse rational function
@author: sebalander
"""
# %%
import cv2
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from rational import inverseRational
from rational import directRational
# % DATA FILES
imageFile = "./resources/PTZgrid/ptz_(0.850278, -0.014444, 0.0).jpg"
cornersFile = "./resources/PTZgrid/ptzCorners.npy"
patternFile = "./resources/PTZgrid/ptzGridPattern.npy"
distCoeffsFile = "./resources/PTZchessboard/zoom 0.0/ptzDistCoeffs.npy"
linearCoeffsFile = "./resources/PTZchessboard/zoom 0.0/ptzLinearCoeffs.npy"
rvecOptimFile = "./resources/PTZchessboard/zoom 0.0/ptzSheetRvecOptim.npy"
tVecOptimFile = "./resources/PTZchessboard/zoom 0.0/ptzSheetTvecOptim.npy"
# % LOAD DATA
img = cv2.imread(imageFile)
corners = np.load(cornersFile)
objectPoints = np.load(patternFile)
distCoeffs = np.load(distCoeffsFile)
cameraMatrix = np.load(linearCoeffsFile)
rVec = np.load(rvecOptimFile)
tVec = np.load(tVecOptimFile)
# %% PLOT LOADED DATA
plt.imshow(img)
plt.scatter(corners[:,0,0], corners[:,0,1])
plt.show()
[x,y,z], _ = cv2.Rodrigues(rVec) # get from ortogonal matrix
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot([0, tVec[0,0]],
[0, tVec[1,0]],
[0, tVec[2,0]])
ax.plot([tVec[0,0], tVec[0,0] + x[0]],
[tVec[1,0], tVec[1,0] + x[1]],
[tVec[2,0], tVec[2,0] + x[2]])
ax.plot([tVec[0,0], tVec[0,0] + y[0]],
[tVec[1,0], tVec[1,0] + y[1]],
[tVec[2,0], tVec[2,0] + y[2]])
ax.plot([tVec[0,0], tVec[0,0] + z[0]],
[tVec[1,0], tVec[1,0] + z[1]],
[tVec[2,0], tVec[2,0] + z[2]])
ax.scatter(objectPoints[0,:,0],
objectPoints[0,:,1],
objectPoints[0,:,2])
plt.show()
# %% CALCULATE PROJECTIONS
rotMatrix, _ = cv2.Rodrigues(rVec)
cornersProjected = directRational(objectPoints,
rotMatrix,
tVec,
cameraMatrix,
distCoeffs,
plot=True,
img=img,
corners=corners)
cornersProjectedOpenCV, _ = cv2.projectPoints(objectPoints,
rVec,
tVec,
cameraMatrix,
distCoeffs)
objectPointsProjected = inverseRational(corners,
rotMatrix,
tVec,
cameraMatrix,
distCoeffs)
# %% IN IMAGE CHECK DIRECT MAPPING
plt.imshow(img)
plt.plot(corners[:,0,0],
corners[:,0,1],'o',label='corners')
plt.plot(cornersProjected[:,0,0],
cornersProjected[:,0,1],'x',label='projected manual')
plt.plot(cornersProjectedOpenCV[:,0,0],
cornersProjectedOpenCV[:,0,1],'*',label='projected OpenCV')
plt.legend()
plt.show()
# %% PLOT 3D SCENE CHECK INVERSE MAPPING
[x,y,z] = rotMatrix # get from ortogonal matrix
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot([0, tVec[0,0]],
[0, tVec[1,0]],
[0, tVec[2,0]])
ax.plot([tVec[0,0], tVec[0,0] + x[0]],
[tVec[1,0], tVec[1,0] + x[1]],
[tVec[2,0], tVec[2,0] + x[2]])
ax.plot([tVec[0,0], tVec[0,0] + y[0]],
[tVec[1,0], tVec[1,0] + y[1]],
[tVec[2,0], tVec[2,0] + y[2]])
ax.plot([tVec[0,0], tVec[0,0] + z[0]],
[tVec[1,0], tVec[1,0] + z[1]],
[tVec[2,0], tVec[2,0] + z[2]])
ax.scatter(objectPointsProjected[:,0,0],
objectPointsProjected[:,0,1],
objectPointsProjected[:,0,2])
ax.scatter(objectPoints[0,:,0],
objectPoints[0,:,1],
objectPoints[0,:,2])
# %%
plt.scatter(objectPoints[0,:,0],objectPoints[0,:,1])
plt.plot(objectPointsProjected[:,0,0],objectPointsProjected[:,0,1],'x')
for i in range(objectPoints.shape[1]):
plt.plot([objectPoints[0,i,0],objectPointsProjected[i,0,0]],
[objectPoints[0,i,1],objectPointsProjected[i,0,1]],
'-k')#,headwidth=0,headlength=0,headaxislength=0)
rError = np.sqrt(np.sum((objectPoints[0,:]-objectPointsProjected[:,0])**2,1))
# %%
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.bar(objectPoints[0,:,0],rError,zs=objectPoints[0,:,1],width=0.1,zdir='y')
#ax.plot_surface(objectPoints[0,:,0],objectPoints[0,:,1],rError)
|
StarcoderdataPython
|
196521
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import weakref
from copy import deepcopy
from enum import Enum
from functools import partial
from typing import Any, List, Tuple, Dict, Type, Union
from ...serialization.serializables import (
SerializableMeta,
FieldTypes,
BoolField,
Int32Field,
Float32Field,
StringField,
ListField,
DictField,
ReferenceField,
)
from ...serialization.core import Placeholder
from ...serialization.serializables import Serializable
from ...serialization.serializables.core import SerializableSerializer
from ...typing import OperandType
from ...utils import AttributeDict, classproperty
from ..base import Base
from ..entity.core import Entity, EntityData
from ..entity.chunks import Chunk
from ..entity.tileables import Tileable
from ..entity.output_types import OutputType
from ..mode import enter_mode
class OperandMetaclass(SerializableMeta):
def __new__(mcs, name: str, bases: Tuple[Type], properties: Dict):
if "__call__" in properties:
# if __call__ is specified for an operand,
# make sure that entering user space
properties["__call__"] = enter_mode(kernel=False)(properties["__call__"])
return super().__new__(mcs, name, bases, properties)
class OperandStage(Enum):
map = 0
reduce = 1
combine = 2
agg = 3
class SchedulingHint(Serializable):
# worker to execute, only work for chunk op,
# if specified, the op should be executed on the specified worker
# only work for those operand that has no input
expect_worker = StringField("expect_worker", default=None)
# will this operand be assigned a worker or not
reassign_worker = BoolField("reassign_worker", default=False)
# mark a op as fuseable
fuseable = BoolField("fuseable", default=True)
# True means control dependency, False means data dependency
_pure_depends = ListField("pure_depends", FieldTypes.bool, default=None)
# useful when setting chunk index as priority,
# useful for those op like read_csv, the first chunk
# need to be executed not later than the later ones,
# because the range index of later chunk should be accumulated from
# indexes of previous ones
# `gpu` indicates that if the operand should be executed on the GPU.
gpu = BoolField("gpu", default=None)
priority = Int32Field("priority", default=0)
@classproperty
def all_hint_names(cls):
return list(cls._FIELDS)
def can_be_fused(self) -> bool:
if not self.fuseable:
return False
if self.reassign_worker:
return False
if self._pure_depends and any(depend for depend in self._pure_depends):
# control dependency exists
return False
return True
def _install_scheduling_hint_properties(cls: Type["Operand"]):
def get_hint(name):
def _get_val(operand: "Operand"):
if operand.scheduling_hint:
return getattr(operand.scheduling_hint, name)
def _set_val(operand: "Operand", val: Any):
if not operand.scheduling_hint:
operand.scheduling_hint = SchedulingHint(**{name: val})
else:
setattr(operand.scheduling_hint, name, val)
return property(_get_val, _set_val)
for hint_name in SchedulingHint.all_hint_names:
setattr(cls, hint_name, get_hint(hint_name))
return cls
@_install_scheduling_hint_properties
class Operand(Base, metaclass=OperandMetaclass):
"""
Operand base class. All operands should have a type, which can be Add, Subtract etc.
`sparse` indicates that if the operand is applied on a sparse tensor/chunk.
`device`, 0 means the CPU, otherwise means the GPU device.
Operand can have inputs and outputs
which should be the :class:`mars.tensor.core.TensorData`, :class:`mars.tensor.core.ChunkData` etc.
"""
__slots__ = ("__weakref__",)
attr_tag = "attr"
_init_update_key_ = False
_output_type_ = None
_no_copy_attrs_ = Base._no_copy_attrs_ | {"scheduling_hint"}
sparse = BoolField("sparse", default=False)
device = Int32Field("device", default=None)
# will this operand create a view of input data or not
create_view = BoolField("create_view", default=False)
stage = ReferenceField("stage", OperandStage, default=None)
memory_scale = Float32Field("memory_scale", default=None)
tileable_op_key = StringField("tileable_op_key", default=None)
extra_params = DictField("extra_params", key_type=FieldTypes.string)
# scheduling hint
scheduling_hint = ReferenceField("scheduling_hint", default=None)
_inputs = ListField("inputs", FieldTypes.reference(EntityData))
_outputs = ListField("outputs")
_output_types = ListField("output_type", FieldTypes.reference(OutputType))
def __init__(self: OperandType, *args, **kwargs):
extra_names = (
set(kwargs) - set(self._FIELDS) - set(SchedulingHint.all_hint_names)
)
extras = AttributeDict((k, kwargs.pop(k)) for k in extra_names)
kwargs["extra_params"] = kwargs.pop("extra_params", extras)
self._extract_scheduling_hint(kwargs)
super().__init__(*args, **kwargs)
@classmethod
def _extract_scheduling_hint(cls, kwargs: Dict[str, Any]):
if "scheduling_hint" in kwargs:
return
scheduling_hint_kwargs = dict()
for hint_name in SchedulingHint.all_hint_names:
if hint_name in kwargs:
scheduling_hint_kwargs[hint_name] = kwargs.pop(hint_name)
if scheduling_hint_kwargs:
kwargs["scheduling_hint"] = SchedulingHint(**scheduling_hint_kwargs)
def __repr__(self):
if self.stage is None:
return f"{type(self).__name__} <key={self.key}>"
else:
return f"{type(self).__name__} <key={self.key}, stage={self.stage.name}>"
@classmethod
def _get_entity_data(cls, entity):
if isinstance(entity, Entity):
return entity.data
return entity
@classmethod
def _get_inputs_data(cls, inputs):
return [cls._get_entity_data(inp) for inp in inputs]
def _set_inputs(self, inputs):
if inputs is not None:
inputs = self._get_inputs_data(inputs)
if hasattr(self, "check_inputs"):
self.check_inputs(inputs)
setattr(self, "_inputs", inputs)
@property
def inputs(self) -> List[Union[Chunk, Tileable]]:
inputs = getattr(self, "_inputs", None)
if not inputs:
return list()
return inputs
@inputs.setter
def inputs(self, vals):
self._set_inputs(vals)
@property
def output_limit(self) -> int:
return 1
@property
def pure_depends(self):
val = getattr(self, "_pure_depends", None)
if not val:
return [False] * len(self.inputs or ())
return val
@property
def output_types(self):
return getattr(self, "_output_types", None)
@output_types.setter
def output_types(self, value):
self._output_types = value
def _attach_outputs(self, *outputs):
self._outputs = [
weakref.ref(self._get_entity_data(o)) if o is not None else o
for o in outputs
]
if len(self._outputs) > self.output_limit:
raise ValueError("Outputs' size exceeds limitation")
@property
def outputs(self) -> List[Union[Chunk, Tileable]]:
outputs = getattr(self, "_outputs", None)
if outputs:
return [ref() for ref in outputs]
@outputs.setter
def outputs(self, outputs):
self._attach_outputs(*outputs)
def is_sparse(self) -> bool:
return self.sparse
issparse = is_sparse
def is_gpu(self) -> bool:
return self.gpu
@property
def retryable(self) -> bool:
return True
def get_dependent_data_keys(self):
return [dep.key for dep in self.inputs or ()]
def _get_output_type(self, output_idx):
if self.output_types:
try:
return self.output_types[output_idx]
except IndexError:
return self.output_types[0]
else:
return self._output_type_
def copy(self: OperandType) -> OperandType:
new_op = super().copy()
new_op.outputs = []
# copy scheduling_hint
new_op.scheduling_hint = SchedulingHint(
**{
field: getattr(self.scheduling_hint, field)
for field in SchedulingHint.all_hint_names
}
)
new_op.extra_params = deepcopy(self.extra_params)
return new_op
def on_output_modify(self, new_output):
# when `create_view` is True, if the output is modified,
# the modification should be set back to the input.
# This function is for this sort of usage.
# Remember, if `create_view` is False, this function should take no effect.
raise NotImplementedError
def on_input_modify(self, new_input):
# when `create_view` is True, if the input is modified,
# this function could be used to respond the modification.
# Remember, if `create_view` is False, this function should take no effect.
raise NotImplementedError
class OperandSerializer(SerializableSerializer):
serializer_name = "operand"
@classmethod
def _get_tag_to_values(cls, obj: Operand):
tag_to_values = super()._get_tag_to_values(obj)
# outputs are weak-refs which are not pickle-able
tag_to_values["outputs"] = [out_ref() for out_ref in tag_to_values["outputs"]]
return tag_to_values
def deserialize(self, header: Dict, buffers: List, context: Dict) -> Operand:
# convert outputs back to weak-refs
operand: Operand = (yield from super().deserialize(header, buffers, context))
for i, out in enumerate(operand._outputs):
def cb(o, index):
outputs = operand._outputs
outputs[index] = weakref.ref(o)
if len(outputs) > 1 and all(
not isinstance(o, Placeholder) for o in outputs
):
# all replaced
# add siblings for multiple outputs
outputs = operand.outputs
for j in range(len(outputs)):
outputs[j]._siblings = outputs[:j] + outputs[j + 1 :]
if isinstance(out, Placeholder):
out.callbacks.append(partial(cb, index=i))
else:
cb(out, i)
return operand
OperandSerializer.register(Operand)
class VirtualOperand(Operand):
def get_dependent_data_keys(self):
return []
class HasInput(Operand):
__slots__ = ()
@property
def input(self):
return self._input
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
|
StarcoderdataPython
|
1756606
|
from turtle import*
speed(11)
shape("turtle")
for count in range(4):
forward(100)
right(90)
done()
|
StarcoderdataPython
|
3212068
|
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_mail import Mail
app = Flask(__name__)
app.config.from_pyfile('config.py')
app.config.from_pyfile('mail-config.py')
db = SQLAlchemy(app)
mail = Mail(app)
Bootstrap(app)
from views import *
if __name__ == '__main__':
db.create_all()
print("starting application....")
app.run(host='0.0.0.0',port=8000,debug=True,threaded=True)
|
StarcoderdataPython
|
3260908
|
from django.shortcuts import render, redirect
from .models import Article
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from . import forms
from django.core.paginator import Paginator
def article_list(request):
articles = Article.objects.all().order_by('-date')
paginator = Paginator(articles, 6)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request, 'articles/article_list.html',
{'page_obj': page_obj})
def article_detail(request, slug):
article = Article.objects.get(slug=slug)
return render(request, 'articles/article_detail.html', {'article':article})
# Check if user logged in, sent to login page
@login_required(login_url="/accounts/login")
def article_create(request):
if request.method == 'POST':
form = forms.CreateArticle(request.POST, request.FILES)
if form.is_valid():
instance = form.save(commit=False)# Save but don't commit
instance.author = request.user # Get and store logged in author
instance.save()
return redirect('articles:list')
else:
form = forms.CreateArticle()
return render(request, 'articles/article_create.html', {'form': form})
|
StarcoderdataPython
|
178893
|
# nrf52 bobble test in python
import time
from adafruit_ble import BLERadio
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble.services.nordic import UARTService
ble = BLERadio()
while True:
while ble.connected and any(
UARTService in connection for connection in ble.connections
):
for connection in ble.connections:
if UARTService not in connection:
continue
print("RTBobble52 ready...")
uart = connection[UARTService]
while connection.connected:
s= input("command: ")
uart.write(s.encode("utf-8"))
uart.write(b'\n')
time.sleep(1)
print("disconnected, scanning")
for advertisement in ble.start_scan(ProvideServicesAdvertisement, timeout=1):
if UARTService not in advertisement.services:
continue
ble.connect(advertisement)
print("connected")
break
ble.stop_scan()
|
StarcoderdataPython
|
3228822
|
<filename>pymath/coin_sums/__init__.py<gh_stars>1-10
def make_pounds(coins, bill):
"""
Find how many ways there are to make bill from the given list of coins
:param coins List of coins
:type coins list
:param bill Coin/note to make change for
:type bill int
:return: Number of ways to make change for the given currency
:rtype: int
"""
ways_to_make_bill = [0] * (bill + 1)
ways_to_make_bill[0] = 1
for x in range(len(coins)):
for n in range(coins[x], bill + 1):
ways_to_make_bill[n] += ways_to_make_bill[n - coins[x]]
return ways_to_make_bill[bill]
if __name__ == "__main__":
c = [1, 2, 5, 10, 20, 50, 100, 200]
b = 200
ways_to_make_b = make_pounds(c, b)
print(f"There are {ways_to_make_b} ways to make change for {b} given {c}")
|
StarcoderdataPython
|
3385405
|
<gh_stars>1-10
#!/usr/bin/python
from Bio import SeqIO
import sys
import os
if len(sys.argv) < 2:
print("USAGE: avg_lengths.py <infile>...")
sys.exit(1)
length_map = dict()
for f in sys.argv[1:]:
length_map[f] = []
for seq in SeqIO.parse(open(f), "fasta"):
length_map[f].append(len(seq.seq))
longest_length = 0
for f in length_map:
sys.stdout.write("%s\t" % os.path.split(f)[1])
length_map[f] = sorted(length_map[f])
if len(length_map[f]) > longest_length:
longest_length = len(length_map[f])
sys.stdout.write("\n")
for i in range(longest_length):
for l in list(length_map.values()):
if len(l) > i:
sys.stdout.write("%s\t" % l[i])
else:
sys.stdout.write(" \t")
sys.stdout.write("\n")
|
StarcoderdataPython
|
1749397
|
from test_utils import run_query, redshift_connector
import pytest
def test_voronoi_lines_success():
fixture_file = open('./test/integration/voronoi_fixtures/in/wkts.txt', 'r')
points = fixture_file.readlines()
fixture_file.close()
results = run_query(
f"""SELECT @@RS_PREFIX@@processing.ST_VORONOILINES(
ST_GeomFromText('{points[0].rstrip()}')),
@@RS_PREFIX@@processing.ST_VORONOILINES(
ST_GeomFromText('{points[1].rstrip()}'))"""
)
fixture_file = open(
'./test/integration/voronoi_fixtures/out/geojsons_lines.txt', 'r'
)
lines = fixture_file.readlines()
fixture_file.close()
for idx, result in enumerate(results):
assert str(result[0]) == lines[idx].rstrip()
def test_voronoi_lines_none():
results = run_query(
"""SELECT @@RS_PREFIX@@processing.ST_VORONOILINES(
ST_GeomFromText(Null))"""
)
assert results[0][0] is None
def test_voronoi_lines_wrong_geom_type():
fixture_file = open('./test/integration/voronoi_fixtures/in/wkts.txt', 'r')
points = fixture_file.readlines()
fixture_file.close()
with pytest.raises(redshift_connector.error.ProgrammingError) as excinfo:
run_query(
f"""SELECT @@RS_PREFIX@@processing.ST_VORONOILINES(
ST_GeomFromText('{points[3].rstrip()}'))"""
)
assert 'Input points parameter must be MultiPoint' in str(excinfo.value)
def test_voronoi_lines_geom_too_long():
fixture_file = open('./test/integration/voronoi_fixtures/in/wkts.txt', 'r')
points = fixture_file.readlines()
fixture_file.close()
with pytest.raises(redshift_connector.error.ProgrammingError) as excinfo:
run_query(
f"""SELECT @@RS_PREFIX@@processing.ST_VORONOILINES(
ST_GeomFromText('{points[2].rstrip()}'))"""
)
assert 'Value too long for character type' in str(excinfo.value)
def test_voronoi_lines_default_not_succeed():
fixture_file = open('./test/integration/voronoi_fixtures/in/wkts.txt', 'r')
points = fixture_file.readlines()
fixture_file.close()
with pytest.raises(redshift_connector.error.ProgrammingError) as excinfo:
run_query(
f"""SELECT @@RS_PREFIX@@processing.ST_VORONOILINES(
ST_GeomFromText('{points[1].rstrip()}'), JSON_PARSE('[
-80.73611869702799,30.50013148785057,
-55.200433643307896, 41.019920879156246]'))"""
)
assert 'Points should be within the bounding box supplied' in str(excinfo.value)
def test_voronoi_lines_default():
fixture_file = open('./test/integration/voronoi_fixtures/in/wkts.txt', 'r')
points = fixture_file.readlines()
fixture_file.close()
results = run_query(
f"""SELECT @@RS_PREFIX@@processing.ST_VORONOILINES(
ST_GeomFromText('{points[0].rstrip()}'), JSON_PARSE('[-76.704999999999998,
38.655000000000001, -74.594999999999999, 40.475000000000009]')),
@@RS_PREFIX@@processing.ST_VORONOILINES(
ST_GeomFromText('{points[0].rstrip()}'))"""
)
fixture_file = open(
'./test/integration/voronoi_fixtures/out/geojsons_lines.txt', 'r'
)
lines = fixture_file.readlines()
fixture_file.close()
assert str(results[0][1]) == lines[0].rstrip()
assert str(results[0][0]) == str(results[0][1])
|
StarcoderdataPython
|
67004
|
<reponame>waymanls/awx
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import dateutil
import functools
import html
import logging
import re
import requests
import socket
import sys
import time
from base64 import b64encode
from collections import OrderedDict
from urllib3.exceptions import ConnectTimeoutError
# Django
from django.conf import settings
from django.core.exceptions import FieldError, ObjectDoesNotExist
from django.db.models import Q, Sum
from django.db import IntegrityError, transaction, connection
from django.shortcuts import get_object_or_404
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from django.views.decorators.csrf import csrf_exempt
from django.template.loader import render_to_string
from django.http import HttpResponse
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
# Django REST Framework
from rest_framework.exceptions import APIException, PermissionDenied, ParseError, NotFound
from rest_framework.parsers import FormParser
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.renderers import JSONRenderer, StaticHTMLRenderer
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.views import exception_handler, get_view_name
from rest_framework import status
# Django REST Framework YAML
from rest_framework_yaml.parsers import YAMLParser
from rest_framework_yaml.renderers import YAMLRenderer
# QSStats
import qsstats
# ANSIConv
import ansiconv
# Python Social Auth
from social_core.backends.utils import load_backends
# Django OAuth Toolkit
from oauth2_provider.models import get_access_token_model
import pytz
from wsgiref.util import FileWrapper
# AWX
from awx.main.tasks import send_notifications, update_inventory_computed_fields
from awx.main.access import get_user_queryset, HostAccess
from awx.api.generics import (
APIView,
BaseUsersList,
CopyAPIView,
DeleteLastUnattachLabelMixin,
GenericAPIView,
ListAPIView,
ListCreateAPIView,
ResourceAccessList,
RetrieveAPIView,
RetrieveDestroyAPIView,
RetrieveUpdateAPIView,
RetrieveUpdateDestroyAPIView,
SimpleListAPIView,
SubDetailAPIView,
SubListAPIView,
SubListAttachDetachAPIView,
SubListCreateAPIView,
SubListCreateAttachDetachAPIView,
SubListDestroyAPIView,
)
from awx.api.versioning import reverse
from awx.main import models
from awx.main.utils import (
camelcase_to_underscore,
extract_ansible_vars,
get_awx_http_client_headers,
get_object_or_400,
getattrd,
get_pk_from_dict,
schedule_task_manager,
ignore_inventory_computed_fields,
set_environ,
)
from awx.main.utils.encryption import encrypt_value
from awx.main.utils.filters import SmartFilter
from awx.main.utils.insights import filter_insights_api_response
from awx.main.redact import UriCleaner
from awx.api.permissions import (
JobTemplateCallbackPermission,
TaskPermission,
ProjectUpdatePermission,
InventoryInventorySourcesUpdatePermission,
UserPermission,
InstanceGroupTowerPermission,
VariableDataPermission,
WorkflowApprovalPermission,
)
from awx.api import renderers
from awx.api import serializers
from awx.api.metadata import RoleMetadata
from awx.main.constants import ACTIVE_STATES
from awx.main.scheduler.dag_workflow import WorkflowDAG
from awx.api.views.mixin import (
ControlledByScmMixin,
InstanceGroupMembershipMixin,
OrganizationCountsMixin,
RelatedJobsPreventDeleteMixin,
UnifiedJobDeletionMixin,
NoTruncateMixin,
)
from awx.api.views.organization import ( # noqa
OrganizationList,
OrganizationDetail,
OrganizationInventoriesList,
OrganizationUsersList,
OrganizationAdminsList,
OrganizationExecutionEnvironmentsList,
OrganizationProjectsList,
OrganizationJobTemplatesList,
OrganizationWorkflowJobTemplatesList,
OrganizationTeamsList,
OrganizationActivityStreamList,
OrganizationNotificationTemplatesList,
OrganizationNotificationTemplatesAnyList,
OrganizationNotificationTemplatesErrorList,
OrganizationNotificationTemplatesStartedList,
OrganizationNotificationTemplatesSuccessList,
OrganizationNotificationTemplatesApprovalList,
OrganizationInstanceGroupsList,
OrganizationGalaxyCredentialsList,
OrganizationAccessList,
OrganizationObjectRolesList,
)
from awx.api.views.inventory import ( # noqa
InventoryList,
InventoryDetail,
InventoryUpdateEventsList,
InventoryList,
InventoryDetail,
InventoryActivityStreamList,
InventoryInstanceGroupsList,
InventoryAccessList,
InventoryObjectRolesList,
InventoryJobTemplateList,
InventoryCopy,
)
from awx.api.views.root import ( # noqa
ApiRootView,
ApiOAuthAuthorizationRootView,
ApiVersionRootView,
ApiV2RootView,
ApiV2PingView,
ApiV2ConfigView,
ApiV2SubscriptionView,
ApiV2AttachView,
)
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver # noqa
logger = logging.getLogger('awx.api.views')
def api_exception_handler(exc, context):
"""
Override default API exception handler to catch IntegrityError exceptions.
"""
if isinstance(exc, IntegrityError):
exc = ParseError(exc.args[0])
if isinstance(exc, FieldError):
exc = ParseError(exc.args[0])
if isinstance(context['view'], UnifiedJobStdout):
context['view'].renderer_classes = [renderers.BrowsableAPIRenderer, JSONRenderer]
if isinstance(exc, APIException):
req = context['request']._request
if 'awx.named_url_rewritten' in req.environ and not str(getattr(exc, 'status_code', 0)).startswith('2'):
# if the URL was rewritten, and it's not a 2xx level status code,
# revert the request.path to its original value to avoid leaking
# any context about the existance of resources
req.path = req.environ['awx.named_url_rewritten']
if exc.status_code == 403:
exc = NotFound(detail=_('Not found.'))
return exception_handler(exc, context)
class DashboardView(APIView):
deprecated = True
name = _("Dashboard")
swagger_topic = 'Dashboard'
def get(self, request, format=None):
'''Show Dashboard Details'''
data = OrderedDict()
data['related'] = {'jobs_graph': reverse('api:dashboard_jobs_graph_view', request=request)}
user_inventory = get_user_queryset(request.user, models.Inventory)
inventory_with_failed_hosts = user_inventory.filter(hosts_with_active_failures__gt=0)
user_inventory_external = user_inventory.filter(has_inventory_sources=True)
# if there are *zero* inventories, this aggregrate query will be None, fall back to 0
failed_inventory = user_inventory.aggregate(Sum('inventory_sources_with_failures'))['inventory_sources_with_failures__sum'] or 0
data['inventories'] = {
'url': reverse('api:inventory_list', request=request),
'total': user_inventory.count(),
'total_with_inventory_source': user_inventory_external.count(),
'job_failed': inventory_with_failed_hosts.count(),
'inventory_failed': failed_inventory,
}
user_inventory_sources = get_user_queryset(request.user, models.InventorySource)
ec2_inventory_sources = user_inventory_sources.filter(source='ec2')
ec2_inventory_failed = ec2_inventory_sources.filter(status='failed')
data['inventory_sources'] = {}
data['inventory_sources']['ec2'] = {
'url': reverse('api:inventory_source_list', request=request) + "?source=ec2",
'failures_url': reverse('api:inventory_source_list', request=request) + "?source=ec2&status=failed",
'label': 'Amazon EC2',
'total': ec2_inventory_sources.count(),
'failed': ec2_inventory_failed.count(),
}
user_groups = get_user_queryset(request.user, models.Group)
groups_inventory_failed = models.Group.objects.filter(inventory_sources__last_job_failed=True).count()
data['groups'] = {'url': reverse('api:group_list', request=request), 'total': user_groups.count(), 'inventory_failed': groups_inventory_failed}
user_hosts = get_user_queryset(request.user, models.Host)
user_hosts_failed = user_hosts.filter(last_job_host_summary__failed=True)
data['hosts'] = {
'url': reverse('api:host_list', request=request),
'failures_url': reverse('api:host_list', request=request) + "?last_job_host_summary__failed=True",
'total': user_hosts.count(),
'failed': user_hosts_failed.count(),
}
user_projects = get_user_queryset(request.user, models.Project)
user_projects_failed = user_projects.filter(last_job_failed=True)
data['projects'] = {
'url': reverse('api:project_list', request=request),
'failures_url': reverse('api:project_list', request=request) + "?last_job_failed=True",
'total': user_projects.count(),
'failed': user_projects_failed.count(),
}
git_projects = user_projects.filter(scm_type='git')
git_failed_projects = git_projects.filter(last_job_failed=True)
svn_projects = user_projects.filter(scm_type='svn')
svn_failed_projects = svn_projects.filter(last_job_failed=True)
archive_projects = user_projects.filter(scm_type='archive')
archive_failed_projects = archive_projects.filter(last_job_failed=True)
data['scm_types'] = {}
data['scm_types']['git'] = {
'url': reverse('api:project_list', request=request) + "?scm_type=git",
'label': 'Git',
'failures_url': reverse('api:project_list', request=request) + "?scm_type=git&last_job_failed=True",
'total': git_projects.count(),
'failed': git_failed_projects.count(),
}
data['scm_types']['svn'] = {
'url': reverse('api:project_list', request=request) + "?scm_type=svn",
'label': 'Subversion',
'failures_url': reverse('api:project_list', request=request) + "?scm_type=svn&last_job_failed=True",
'total': svn_projects.count(),
'failed': svn_failed_projects.count(),
}
data['scm_types']['archive'] = {
'url': reverse('api:project_list', request=request) + "?scm_type=archive",
'label': 'Remote Archive',
'failures_url': reverse('api:project_list', request=request) + "?scm_type=archive&last_job_failed=True",
'total': archive_projects.count(),
'failed': archive_failed_projects.count(),
}
user_list = get_user_queryset(request.user, models.User)
team_list = get_user_queryset(request.user, models.Team)
credential_list = get_user_queryset(request.user, models.Credential)
job_template_list = get_user_queryset(request.user, models.JobTemplate)
organization_list = get_user_queryset(request.user, models.Organization)
data['users'] = {'url': reverse('api:user_list', request=request), 'total': user_list.count()}
data['organizations'] = {'url': reverse('api:organization_list', request=request), 'total': organization_list.count()}
data['teams'] = {'url': reverse('api:team_list', request=request), 'total': team_list.count()}
data['credentials'] = {'url': reverse('api:credential_list', request=request), 'total': credential_list.count()}
data['job_templates'] = {'url': reverse('api:job_template_list', request=request), 'total': job_template_list.count()}
return Response(data)
class DashboardJobsGraphView(APIView):
name = _("Dashboard Jobs Graphs")
swagger_topic = 'Jobs'
def get(self, request, format=None):
period = request.query_params.get('period', 'month')
job_type = request.query_params.get('job_type', 'all')
user_unified_jobs = get_user_queryset(request.user, models.UnifiedJob).exclude(launch_type='sync')
success_query = user_unified_jobs.filter(status='successful')
failed_query = user_unified_jobs.filter(status='failed')
if job_type == 'inv_sync':
success_query = success_query.filter(instance_of=models.InventoryUpdate)
failed_query = failed_query.filter(instance_of=models.InventoryUpdate)
elif job_type == 'playbook_run':
success_query = success_query.filter(instance_of=models.Job)
failed_query = failed_query.filter(instance_of=models.Job)
elif job_type == 'scm_update':
success_query = success_query.filter(instance_of=models.ProjectUpdate)
failed_query = failed_query.filter(instance_of=models.ProjectUpdate)
success_qss = qsstats.QuerySetStats(success_query, 'finished')
failed_qss = qsstats.QuerySetStats(failed_query, 'finished')
start_date = now()
if period == 'month':
end_date = start_date - dateutil.relativedelta.relativedelta(months=1)
interval = 'days'
elif period == 'two_weeks':
end_date = start_date - dateutil.relativedelta.relativedelta(weeks=2)
interval = 'days'
elif period == 'week':
end_date = start_date - dateutil.relativedelta.relativedelta(weeks=1)
interval = 'days'
elif period == 'day':
end_date = start_date - dateutil.relativedelta.relativedelta(days=1)
interval = 'hours'
else:
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
dashboard_data = {"jobs": {"successful": [], "failed": []}}
for element in success_qss.time_series(end_date, start_date, interval=interval):
dashboard_data['jobs']['successful'].append([time.mktime(element[0].timetuple()), element[1]])
for element in failed_qss.time_series(end_date, start_date, interval=interval):
dashboard_data['jobs']['failed'].append([time.mktime(element[0].timetuple()), element[1]])
return Response(dashboard_data)
class InstanceList(ListAPIView):
name = _("Instances")
model = models.Instance
serializer_class = serializers.InstanceSerializer
search_fields = ('hostname',)
class InstanceDetail(RetrieveUpdateAPIView):
name = _("Instance Detail")
model = models.Instance
serializer_class = serializers.InstanceSerializer
def update(self, request, *args, **kwargs):
r = super(InstanceDetail, self).update(request, *args, **kwargs)
if status.is_success(r.status_code):
obj = self.get_object()
obj.refresh_capacity()
obj.save()
r.data = serializers.InstanceSerializer(obj, context=self.get_serializer_context()).to_representation(obj)
return r
class InstanceUnifiedJobsList(SubListAPIView):
name = _("Instance Jobs")
model = models.UnifiedJob
serializer_class = serializers.UnifiedJobListSerializer
parent_model = models.Instance
def get_queryset(self):
po = self.get_parent_object()
qs = get_user_queryset(self.request.user, models.UnifiedJob)
qs = qs.filter(execution_node=po.hostname)
return qs
class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView):
name = _("Instance's Instance Groups")
model = models.InstanceGroup
serializer_class = serializers.InstanceGroupSerializer
parent_model = models.Instance
relationship = 'rampart_groups'
class InstanceGroupList(ListCreateAPIView):
name = _("Instance Groups")
model = models.InstanceGroup
serializer_class = serializers.InstanceGroupSerializer
class InstanceGroupDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
always_allow_superuser = False
name = _("Instance Group Detail")
model = models.InstanceGroup
serializer_class = serializers.InstanceGroupSerializer
permission_classes = (InstanceGroupTowerPermission,)
def update_raw_data(self, data):
if self.get_object().is_container_group:
data.pop('policy_instance_percentage', None)
data.pop('policy_instance_minimum', None)
data.pop('policy_instance_list', None)
return super(InstanceGroupDetail, self).update_raw_data(data)
class InstanceGroupUnifiedJobsList(SubListAPIView):
name = _("Instance Group Running Jobs")
model = models.UnifiedJob
serializer_class = serializers.UnifiedJobListSerializer
parent_model = models.InstanceGroup
relationship = "unifiedjob_set"
class InstanceGroupInstanceList(InstanceGroupMembershipMixin, SubListAttachDetachAPIView):
name = _("Instance Group's Instances")
model = models.Instance
serializer_class = serializers.InstanceSerializer
parent_model = models.InstanceGroup
relationship = "instances"
search_fields = ('hostname',)
class ScheduleList(ListCreateAPIView):
name = _("Schedules")
model = models.Schedule
serializer_class = serializers.ScheduleSerializer
class ScheduleDetail(RetrieveUpdateDestroyAPIView):
model = models.Schedule
serializer_class = serializers.ScheduleSerializer
class SchedulePreview(GenericAPIView):
model = models.Schedule
name = _('Schedule Recurrence Rule Preview')
serializer_class = serializers.SchedulePreviewSerializer
permission_classes = (IsAuthenticated,)
def post(self, request):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
next_stamp = now()
schedule = []
gen = models.Schedule.rrulestr(serializer.validated_data['rrule']).xafter(next_stamp, count=20)
# loop across the entire generator and grab the first 10 events
for event in gen:
if len(schedule) >= 10:
break
if not dateutil.tz.datetime_exists(event):
# skip imaginary dates, like 2:30 on DST boundaries
continue
schedule.append(event)
return Response({'local': schedule, 'utc': [s.astimezone(pytz.utc) for s in schedule]})
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ScheduleZoneInfo(APIView):
swagger_topic = 'System Configuration'
def get(self, request):
zones = [{'name': zone} for zone in models.Schedule.get_zoneinfo()]
return Response(zones)
class LaunchConfigCredentialsBase(SubListAttachDetachAPIView):
model = models.Credential
serializer_class = serializers.CredentialSerializer
relationship = 'credentials'
def is_valid_relation(self, parent, sub, created=False):
if not parent.unified_job_template:
return {"msg": _("Cannot assign credential when related template is null.")}
ask_mapping = parent.unified_job_template.get_ask_mapping()
if self.relationship not in ask_mapping:
return {"msg": _("Related template cannot accept {} on launch.").format(self.relationship)}
elif sub.passwords_needed:
return {"msg": _("Credential that requires user input on launch " "cannot be used in saved launch configuration.")}
ask_field_name = ask_mapping[self.relationship]
if not getattr(parent.unified_job_template, ask_field_name):
return {"msg": _("Related template is not configured to accept credentials on launch.")}
elif sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]:
return {
"msg": _("This launch configuration already provides a {credential_type} credential.").format(credential_type=sub.unique_hash(display=True))
}
elif sub.pk in parent.unified_job_template.credentials.values_list('pk', flat=True):
return {"msg": _("Related template already uses {credential_type} credential.").format(credential_type=sub.name)}
# None means there were no validation errors
return None
class ScheduleCredentialsList(LaunchConfigCredentialsBase):
parent_model = models.Schedule
class ScheduleUnifiedJobsList(SubListAPIView):
model = models.UnifiedJob
serializer_class = serializers.UnifiedJobListSerializer
parent_model = models.Schedule
relationship = 'unifiedjob_set'
name = _('Schedule Jobs List')
class AuthView(APIView):
'''List enabled single-sign-on endpoints'''
authentication_classes = []
permission_classes = (AllowAny,)
swagger_topic = 'System Configuration'
def get(self, request):
from rest_framework.reverse import reverse
data = OrderedDict()
err_backend, err_message = request.session.get('social_auth_error', (None, None))
auth_backends = list(load_backends(settings.AUTHENTICATION_BACKENDS, force_load=True).items())
# Return auth backends in consistent order: Google, GitHub, SAML.
auth_backends.sort(key=lambda x: 'g' if x[0] == 'google-oauth2' else x[0])
for name, backend in auth_backends:
login_url = reverse('social:begin', args=(name,))
complete_url = request.build_absolute_uri(reverse('social:complete', args=(name,)))
backend_data = {'login_url': login_url, 'complete_url': complete_url}
if name == 'saml':
backend_data['metadata_url'] = reverse('sso:saml_metadata')
for idp in sorted(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS.keys()):
saml_backend_data = dict(backend_data.items())
saml_backend_data['login_url'] = '%s?idp=%s' % (login_url, idp)
full_backend_name = '%s:%s' % (name, idp)
if (err_backend == full_backend_name or err_backend == name) and err_message:
saml_backend_data['error'] = err_message
data[full_backend_name] = saml_backend_data
else:
if err_backend == name and err_message:
backend_data['error'] = err_message
data[name] = backend_data
return Response(data)
class TeamList(ListCreateAPIView):
model = models.Team
serializer_class = serializers.TeamSerializer
class TeamDetail(RetrieveUpdateDestroyAPIView):
model = models.Team
serializer_class = serializers.TeamSerializer
class TeamUsersList(BaseUsersList):
model = models.User
serializer_class = serializers.UserSerializer
parent_model = models.Team
relationship = 'member_role.members'
ordering = ('username',)
class TeamRolesList(SubListAttachDetachAPIView):
model = models.Role
serializer_class = serializers.RoleSerializerWithParentAccess
metadata_class = RoleMetadata
parent_model = models.Team
relationship = 'member_role.children'
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
team = get_object_or_404(models.Team, pk=self.kwargs['pk'])
if not self.request.user.can_access(models.Team, 'read', team):
raise PermissionDenied()
return models.Role.filter_visible_roles(self.request.user, team.member_role.children.all().exclude(pk=team.read_role.pk))
def post(self, request, *args, **kwargs):
sub_id = request.data.get('id', None)
if not sub_id:
return super(TeamRolesList, self).post(request)
role = get_object_or_400(models.Role, pk=sub_id)
org_content_type = ContentType.objects.get_for_model(models.Organization)
if role.content_type == org_content_type and role.role_field in ['member_role', 'admin_role']:
data = dict(msg=_("You cannot assign an Organization participation role as a child role for a Team."))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
if role.is_singleton():
data = dict(msg=_("You cannot grant system-level permissions to a team."))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
team = get_object_or_404(models.Team, pk=self.kwargs['pk'])
credential_content_type = ContentType.objects.get_for_model(models.Credential)
if role.content_type == credential_content_type:
if not role.content_object.organization or role.content_object.organization.id != team.organization.id:
data = dict(msg=_("You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
return super(TeamRolesList, self).post(request, *args, **kwargs)
class TeamObjectRolesList(SubListAPIView):
model = models.Role
serializer_class = serializers.RoleSerializer
parent_model = models.Team
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return models.Role.objects.filter(content_type=content_type, object_id=po.pk)
class TeamProjectsList(SubListAPIView):
model = models.Project
serializer_class = serializers.ProjectSerializer
parent_model = models.Team
def get_queryset(self):
team = self.get_parent_object()
self.check_parent_access(team)
model_ct = ContentType.objects.get_for_model(self.model)
parent_ct = ContentType.objects.get_for_model(self.parent_model)
proj_roles = models.Role.objects.filter(Q(ancestors__content_type=parent_ct) & Q(ancestors__object_id=team.pk), content_type=model_ct)
return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in proj_roles])
class TeamActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.Team
relationship = 'activitystream_set'
search_fields = ('changes',)
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(
Q(team=parent)
| Q(project__in=models.Project.accessible_objects(parent, 'read_role'))
| Q(credential__in=models.Credential.accessible_objects(parent, 'read_role'))
)
class TeamAccessList(ResourceAccessList):
model = models.User # needs to be User for AccessLists's
parent_model = models.Team
class ExecutionEnvironmentList(ListCreateAPIView):
always_allow_superuser = False
model = models.ExecutionEnvironment
serializer_class = serializers.ExecutionEnvironmentSerializer
swagger_topic = "Execution Environments"
class ExecutionEnvironmentDetail(RetrieveUpdateDestroyAPIView):
always_allow_superuser = False
model = models.ExecutionEnvironment
serializer_class = serializers.ExecutionEnvironmentSerializer
swagger_topic = "Execution Environments"
class ExecutionEnvironmentJobTemplateList(SubListAPIView):
model = models.UnifiedJobTemplate
serializer_class = serializers.UnifiedJobTemplateSerializer
parent_model = models.ExecutionEnvironment
relationship = 'unifiedjobtemplates'
class ExecutionEnvironmentCopy(CopyAPIView):
model = models.ExecutionEnvironment
copy_return_serializer_class = serializers.ExecutionEnvironmentSerializer
class ExecutionEnvironmentActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.ExecutionEnvironment
relationship = 'activitystream_set'
search_fields = ('changes',)
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(execution_environment=parent)
class ProjectList(ListCreateAPIView):
model = models.Project
serializer_class = serializers.ProjectSerializer
class ProjectDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = models.Project
serializer_class = serializers.ProjectSerializer
class ProjectPlaybooks(RetrieveAPIView):
model = models.Project
serializer_class = serializers.ProjectPlaybooksSerializer
class ProjectInventories(RetrieveAPIView):
model = models.Project
serializer_class = serializers.ProjectInventoriesSerializer
class ProjectTeamsList(ListAPIView):
model = models.Team
serializer_class = serializers.TeamSerializer
def get_queryset(self):
p = get_object_or_404(models.Project, pk=self.kwargs['pk'])
if not self.request.user.can_access(models.Project, 'read', p):
raise PermissionDenied()
project_ct = ContentType.objects.get_for_model(models.Project)
team_ct = ContentType.objects.get_for_model(self.model)
all_roles = models.Role.objects.filter(Q(descendents__content_type=project_ct) & Q(descendents__object_id=p.pk), content_type=team_ct)
return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in all_roles])
class ProjectSchedulesList(SubListCreateAPIView):
name = _("Project Schedules")
model = models.Schedule
serializer_class = serializers.ScheduleSerializer
parent_model = models.Project
relationship = 'schedules'
parent_key = 'unified_job_template'
class ProjectScmInventorySources(SubListAPIView):
name = _("Project SCM Inventory Sources")
model = models.InventorySource
serializer_class = serializers.InventorySourceSerializer
parent_model = models.Project
relationship = 'scm_inventory_sources'
parent_key = 'source_project'
class ProjectActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.Project
relationship = 'activitystream_set'
search_fields = ('changes',)
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
if parent is None:
return qs
elif parent.credential is None:
return qs.filter(project=parent)
return qs.filter(Q(project=parent) | Q(credential=parent.credential))
class ProjectNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView):
model = models.NotificationTemplate
serializer_class = serializers.NotificationTemplateSerializer
parent_model = models.Project
class ProjectNotificationTemplatesStartedList(ProjectNotificationTemplatesAnyList):
relationship = 'notification_templates_started'
class ProjectNotificationTemplatesErrorList(ProjectNotificationTemplatesAnyList):
relationship = 'notification_templates_error'
class ProjectNotificationTemplatesSuccessList(ProjectNotificationTemplatesAnyList):
relationship = 'notification_templates_success'
class ProjectUpdatesList(SubListAPIView):
model = models.ProjectUpdate
serializer_class = serializers.ProjectUpdateListSerializer
parent_model = models.Project
relationship = 'project_updates'
class ProjectUpdateView(RetrieveAPIView):
model = models.Project
serializer_class = serializers.ProjectUpdateViewSerializer
permission_classes = (ProjectUpdatePermission,)
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_update:
project_update = obj.update()
if not project_update:
return Response({}, status=status.HTTP_400_BAD_REQUEST)
else:
data = OrderedDict()
data['project_update'] = project_update.id
data.update(serializers.ProjectUpdateSerializer(project_update, context=self.get_serializer_context()).to_representation(project_update))
headers = {'Location': project_update.get_absolute_url(request=request)}
return Response(data, headers=headers, status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class ProjectUpdateList(ListAPIView):
model = models.ProjectUpdate
serializer_class = serializers.ProjectUpdateListSerializer
class ProjectUpdateDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
model = models.ProjectUpdate
serializer_class = serializers.ProjectUpdateDetailSerializer
class ProjectUpdateEventsList(SubListAPIView):
model = models.ProjectUpdateEvent
serializer_class = serializers.ProjectUpdateEventSerializer
parent_model = models.ProjectUpdate
relationship = 'project_update_events'
name = _('Project Update Events List')
search_fields = ('stdout',)
def finalize_response(self, request, response, *args, **kwargs):
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
return super(ProjectUpdateEventsList, self).finalize_response(request, response, *args, **kwargs)
class SystemJobEventsList(SubListAPIView):
model = models.SystemJobEvent
serializer_class = serializers.SystemJobEventSerializer
parent_model = models.SystemJob
relationship = 'system_job_events'
name = _('System Job Events List')
search_fields = ('stdout',)
def finalize_response(self, request, response, *args, **kwargs):
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
return super(SystemJobEventsList, self).finalize_response(request, response, *args, **kwargs)
class ProjectUpdateCancel(RetrieveAPIView):
model = models.ProjectUpdate
obj_permission_type = 'cancel'
serializer_class = serializers.ProjectUpdateCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class ProjectUpdateNotificationsList(SubListAPIView):
model = models.Notification
serializer_class = serializers.NotificationSerializer
parent_model = models.ProjectUpdate
relationship = 'notifications'
search_fields = ('subject', 'notification_type', 'body')
class ProjectUpdateScmInventoryUpdates(SubListAPIView):
name = _("Project Update SCM Inventory Updates")
model = models.InventoryUpdate
serializer_class = serializers.InventoryUpdateListSerializer
parent_model = models.ProjectUpdate
relationship = 'scm_inventory_updates'
parent_key = 'source_project_update'
class ProjectAccessList(ResourceAccessList):
model = models.User # needs to be User for AccessLists's
parent_model = models.Project
class ProjectObjectRolesList(SubListAPIView):
model = models.Role
serializer_class = serializers.RoleSerializer
parent_model = models.Project
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return models.Role.objects.filter(content_type=content_type, object_id=po.pk)
class ProjectCopy(CopyAPIView):
model = models.Project
copy_return_serializer_class = serializers.ProjectSerializer
class UserList(ListCreateAPIView):
model = models.User
serializer_class = serializers.UserSerializer
permission_classes = (UserPermission,)
ordering = ('username',)
class UserMeList(ListAPIView):
model = models.User
serializer_class = serializers.UserSerializer
name = _('Me')
ordering = ('username',)
def get_queryset(self):
return self.model.objects.filter(pk=self.request.user.pk)
class OAuth2ApplicationList(ListCreateAPIView):
name = _("OAuth 2 Applications")
model = models.OAuth2Application
serializer_class = serializers.OAuth2ApplicationSerializer
swagger_topic = 'Authentication'
class OAuth2ApplicationDetail(RetrieveUpdateDestroyAPIView):
name = _("OAuth 2 Application Detail")
model = models.OAuth2Application
serializer_class = serializers.OAuth2ApplicationSerializer
swagger_topic = 'Authentication'
def update_raw_data(self, data):
data.pop('client_secret', None)
return super(OAuth2ApplicationDetail, self).update_raw_data(data)
class ApplicationOAuth2TokenList(SubListCreateAPIView):
name = _("OAuth 2 Application Tokens")
model = models.OAuth2AccessToken
serializer_class = serializers.OAuth2TokenSerializer
parent_model = models.OAuth2Application
relationship = 'oauth2accesstoken_set'
parent_key = 'application'
swagger_topic = 'Authentication'
class OAuth2ApplicationActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.OAuth2Application
relationship = 'activitystream_set'
swagger_topic = 'Authentication'
search_fields = ('changes',)
class OAuth2TokenList(ListCreateAPIView):
name = _("OAuth2 Tokens")
model = models.OAuth2AccessToken
serializer_class = serializers.OAuth2TokenSerializer
swagger_topic = 'Authentication'
class OAuth2UserTokenList(SubListCreateAPIView):
name = _("OAuth2 User Tokens")
model = models.OAuth2AccessToken
serializer_class = serializers.OAuth2TokenSerializer
parent_model = models.User
relationship = 'main_oauth2accesstoken'
parent_key = 'user'
swagger_topic = 'Authentication'
class UserAuthorizedTokenList(SubListCreateAPIView):
name = _("OAuth2 User Authorized Access Tokens")
model = models.OAuth2AccessToken
serializer_class = serializers.UserAuthorizedTokenSerializer
parent_model = models.User
relationship = 'oauth2accesstoken_set'
parent_key = 'user'
swagger_topic = 'Authentication'
def get_queryset(self):
return get_access_token_model().objects.filter(application__isnull=False, user=self.request.user)
class OrganizationApplicationList(SubListCreateAPIView):
name = _("Organization OAuth2 Applications")
model = models.OAuth2Application
serializer_class = serializers.OAuth2ApplicationSerializer
parent_model = models.Organization
relationship = 'applications'
parent_key = 'organization'
swagger_topic = 'Authentication'
class UserPersonalTokenList(SubListCreateAPIView):
name = _("OAuth2 Personal Access Tokens")
model = models.OAuth2AccessToken
serializer_class = serializers.UserPersonalTokenSerializer
parent_model = models.User
relationship = 'main_oauth2accesstoken'
parent_key = 'user'
swagger_topic = 'Authentication'
def get_queryset(self):
return get_access_token_model().objects.filter(application__isnull=True, user=self.request.user)
class OAuth2TokenDetail(RetrieveUpdateDestroyAPIView):
name = _("OAuth Token Detail")
model = models.OAuth2AccessToken
serializer_class = serializers.OAuth2TokenDetailSerializer
swagger_topic = 'Authentication'
class OAuth2TokenActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.OAuth2AccessToken
relationship = 'activitystream_set'
swagger_topic = 'Authentication'
search_fields = ('changes',)
class UserTeamsList(SubListAPIView):
model = models.Team
serializer_class = serializers.TeamSerializer
parent_model = models.User
def get_queryset(self):
u = get_object_or_404(models.User, pk=self.kwargs['pk'])
if not self.request.user.can_access(models.User, 'read', u):
raise PermissionDenied()
return models.Team.accessible_objects(self.request.user, 'read_role').filter(Q(member_role__members=u) | Q(admin_role__members=u)).distinct()
class UserRolesList(SubListAttachDetachAPIView):
model = models.Role
serializer_class = serializers.RoleSerializerWithParentAccess
metadata_class = RoleMetadata
parent_model = models.User
relationship = 'roles'
permission_classes = (IsAuthenticated,)
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
u = get_object_or_404(models.User, pk=self.kwargs['pk'])
if not self.request.user.can_access(models.User, 'read', u):
raise PermissionDenied()
content_type = ContentType.objects.get_for_model(models.User)
return models.Role.filter_visible_roles(self.request.user, u.roles.all()).exclude(content_type=content_type, object_id=u.id)
def post(self, request, *args, **kwargs):
sub_id = request.data.get('id', None)
if not sub_id:
return super(UserRolesList, self).post(request)
user = get_object_or_400(models.User, pk=self.kwargs['pk'])
role = get_object_or_400(models.Role, pk=sub_id)
credential_content_type = ContentType.objects.get_for_model(models.Credential)
if role.content_type == credential_content_type:
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
if not role.content_object.organization and not request.user.is_superuser:
data = dict(msg=_("You cannot grant private credential access to another user"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
return super(UserRolesList, self).post(request, *args, **kwargs)
def check_parent_access(self, parent=None):
# We hide roles that shouldn't be seen in our queryset
return True
class UserProjectsList(SubListAPIView):
model = models.Project
serializer_class = serializers.ProjectSerializer
parent_model = models.User
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
my_qs = models.Project.accessible_objects(self.request.user, 'read_role')
user_qs = models.Project.accessible_objects(parent, 'read_role')
return my_qs & user_qs
class UserOrganizationsList(OrganizationCountsMixin, SubListAPIView):
model = models.Organization
serializer_class = serializers.OrganizationSerializer
parent_model = models.User
relationship = 'organizations'
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
my_qs = models.Organization.accessible_objects(self.request.user, 'read_role')
user_qs = models.Organization.objects.filter(member_role__members=parent)
return my_qs & user_qs
class UserAdminOfOrganizationsList(OrganizationCountsMixin, SubListAPIView):
model = models.Organization
serializer_class = serializers.OrganizationSerializer
parent_model = models.User
relationship = 'admin_of_organizations'
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
my_qs = models.Organization.accessible_objects(self.request.user, 'read_role')
user_qs = models.Organization.objects.filter(admin_role__members=parent)
return my_qs & user_qs
class UserActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.User
relationship = 'activitystream_set'
search_fields = ('changes',)
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(Q(actor=parent) | Q(user__in=[parent]))
class UserDetail(RetrieveUpdateDestroyAPIView):
model = models.User
serializer_class = serializers.UserSerializer
def update_filter(self, request, *args, **kwargs):
'''make sure non-read-only fields that can only be edited by admins, are only edited by admins'''
obj = self.get_object()
can_change = request.user.can_access(models.User, 'change', obj, request.data)
can_admin = request.user.can_access(models.User, 'admin', obj, request.data)
su_only_edit_fields = ('is_superuser', 'is_system_auditor')
admin_only_edit_fields = ('username', 'is_active')
fields_to_check = ()
if not request.user.is_superuser:
fields_to_check += su_only_edit_fields
if can_change and not can_admin:
fields_to_check += admin_only_edit_fields
bad_changes = {}
for field in fields_to_check:
left = getattr(obj, field, None)
right = request.data.get(field, None)
if left is not None and right is not None and left != right:
bad_changes[field] = (left, right)
if bad_changes:
raise PermissionDenied(_('Cannot change %s.') % ', '.join(bad_changes.keys()))
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
can_delete = request.user.can_access(models.User, 'delete', obj)
if not can_delete:
raise PermissionDenied(_('Cannot delete user.'))
return super(UserDetail, self).destroy(request, *args, **kwargs)
class UserAccessList(ResourceAccessList):
model = models.User # needs to be User for AccessLists's
parent_model = models.User
class CredentialTypeList(ListCreateAPIView):
model = models.CredentialType
serializer_class = serializers.CredentialTypeSerializer
class CredentialTypeDetail(RetrieveUpdateDestroyAPIView):
model = models.CredentialType
serializer_class = serializers.CredentialTypeSerializer
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if instance.managed_by_tower:
raise PermissionDenied(detail=_("Deletion not allowed for managed credential types"))
if instance.credentials.exists():
raise PermissionDenied(detail=_("Credential types that are in use cannot be deleted"))
return super(CredentialTypeDetail, self).destroy(request, *args, **kwargs)
class CredentialTypeCredentialList(SubListCreateAPIView):
model = models.Credential
parent_model = models.CredentialType
relationship = 'credentials'
serializer_class = serializers.CredentialSerializer
class CredentialTypeActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.CredentialType
relationship = 'activitystream_set'
search_fields = ('changes',)
class CredentialList(ListCreateAPIView):
model = models.Credential
serializer_class = serializers.CredentialSerializerCreate
class CredentialOwnerUsersList(SubListAPIView):
model = models.User
serializer_class = serializers.UserSerializer
parent_model = models.Credential
relationship = 'admin_role.members'
ordering = ('username',)
class CredentialOwnerTeamsList(SubListAPIView):
model = models.Team
serializer_class = serializers.TeamSerializer
parent_model = models.Credential
def get_queryset(self):
credential = get_object_or_404(self.parent_model, pk=self.kwargs['pk'])
if not self.request.user.can_access(models.Credential, 'read', credential):
raise PermissionDenied()
content_type = ContentType.objects.get_for_model(self.model)
teams = [c.content_object.pk for c in credential.admin_role.parents.filter(content_type=content_type)]
return self.model.objects.filter(pk__in=teams)
class UserCredentialsList(SubListCreateAPIView):
model = models.Credential
serializer_class = serializers.UserCredentialSerializerCreate
parent_model = models.User
parent_key = 'user'
def get_queryset(self):
user = self.get_parent_object()
self.check_parent_access(user)
visible_creds = models.Credential.accessible_objects(self.request.user, 'read_role')
user_creds = models.Credential.accessible_objects(user, 'read_role')
return user_creds & visible_creds
class TeamCredentialsList(SubListCreateAPIView):
model = models.Credential
serializer_class = serializers.TeamCredentialSerializerCreate
parent_model = models.Team
parent_key = 'team'
def get_queryset(self):
team = self.get_parent_object()
self.check_parent_access(team)
visible_creds = models.Credential.accessible_objects(self.request.user, 'read_role')
team_creds = models.Credential.objects.filter(Q(use_role__parents=team.member_role) | Q(admin_role__parents=team.member_role))
return (team_creds & visible_creds).distinct()
class OrganizationCredentialList(SubListCreateAPIView):
model = models.Credential
serializer_class = serializers.OrganizationCredentialSerializerCreate
parent_model = models.Organization
parent_key = 'organization'
def get_queryset(self):
organization = self.get_parent_object()
self.check_parent_access(organization)
user_visible = models.Credential.accessible_objects(self.request.user, 'read_role').all()
org_set = models.Credential.accessible_objects(organization.admin_role, 'read_role').all()
if self.request.user.is_superuser or self.request.user.is_system_auditor:
return org_set
return org_set & user_visible
class CredentialDetail(RetrieveUpdateDestroyAPIView):
model = models.Credential
serializer_class = serializers.CredentialSerializer
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if instance.managed_by_tower:
raise PermissionDenied(detail=_("Deletion not allowed for managed credentials"))
return super(CredentialDetail, self).destroy(request, *args, **kwargs)
class CredentialActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.Credential
relationship = 'activitystream_set'
search_fields = ('changes',)
class CredentialAccessList(ResourceAccessList):
model = models.User # needs to be User for AccessLists's
parent_model = models.Credential
class CredentialObjectRolesList(SubListAPIView):
model = models.Role
serializer_class = serializers.RoleSerializer
parent_model = models.Credential
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return models.Role.objects.filter(content_type=content_type, object_id=po.pk)
class CredentialCopy(CopyAPIView):
model = models.Credential
copy_return_serializer_class = serializers.CredentialSerializer
class CredentialExternalTest(SubDetailAPIView):
"""
Test updates to the input values and metadata of an external credential
before saving them.
"""
name = _('External Credential Test')
model = models.Credential
serializer_class = serializers.EmptySerializer
obj_permission_type = 'use'
def post(self, request, *args, **kwargs):
obj = self.get_object()
backend_kwargs = {}
for field_name, value in obj.inputs.items():
backend_kwargs[field_name] = obj.get_input(field_name)
for field_name, value in request.data.get('inputs', {}).items():
if value != '$encrypted$':
backend_kwargs[field_name] = value
backend_kwargs.update(request.data.get('metadata', {}))
try:
obj.credential_type.plugin.backend(**backend_kwargs)
return Response({}, status=status.HTTP_202_ACCEPTED)
except requests.exceptions.HTTPError as exc:
message = 'HTTP {}'.format(exc.response.status_code)
return Response({'inputs': message}, status=status.HTTP_400_BAD_REQUEST)
except Exception as exc:
message = exc.__class__.__name__
args = getattr(exc, 'args', [])
for a in args:
if isinstance(getattr(a, 'reason', None), ConnectTimeoutError):
message = str(a.reason)
return Response({'inputs': message}, status=status.HTTP_400_BAD_REQUEST)
class CredentialInputSourceDetail(RetrieveUpdateDestroyAPIView):
name = _("Credential Input Source Detail")
model = models.CredentialInputSource
serializer_class = serializers.CredentialInputSourceSerializer
class CredentialInputSourceList(ListCreateAPIView):
name = _("Credential Input Sources")
model = models.CredentialInputSource
serializer_class = serializers.CredentialInputSourceSerializer
class CredentialInputSourceSubList(SubListCreateAPIView):
name = _("Credential Input Sources")
model = models.CredentialInputSource
serializer_class = serializers.CredentialInputSourceSerializer
parent_model = models.Credential
relationship = 'input_sources'
parent_key = 'target_credential'
class CredentialTypeExternalTest(SubDetailAPIView):
"""
Test a complete set of input values for an external credential before
saving it.
"""
name = _('External Credential Type Test')
model = models.CredentialType
serializer_class = serializers.EmptySerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
backend_kwargs = request.data.get('inputs', {})
backend_kwargs.update(request.data.get('metadata', {}))
try:
obj.plugin.backend(**backend_kwargs)
return Response({}, status=status.HTTP_202_ACCEPTED)
except requests.exceptions.HTTPError as exc:
message = 'HTTP {}'.format(exc.response.status_code)
return Response({'inputs': message}, status=status.HTTP_400_BAD_REQUEST)
except Exception as exc:
message = exc.__class__.__name__
args = getattr(exc, 'args', [])
for a in args:
if isinstance(getattr(a, 'reason', None), ConnectTimeoutError):
message = str(a.reason)
return Response({'inputs': message}, status=status.HTTP_400_BAD_REQUEST)
class HostRelatedSearchMixin(object):
@property
def related_search_fields(self):
# Edge-case handle: https://github.com/ansible/ansible-tower/issues/7712
ret = super(HostRelatedSearchMixin, self).related_search_fields
ret.append('ansible_facts')
return ret
class HostList(HostRelatedSearchMixin, ListCreateAPIView):
always_allow_superuser = False
model = models.Host
serializer_class = serializers.HostSerializer
def get_queryset(self):
qs = super(HostList, self).get_queryset()
filter_string = self.request.query_params.get('host_filter', None)
if filter_string:
filter_qs = SmartFilter.query_from_string(filter_string)
qs &= filter_qs
return qs.distinct()
def list(self, *args, **kwargs):
try:
return super(HostList, self).list(*args, **kwargs)
except Exception as e:
return Response(dict(error=_(str(e))), status=status.HTTP_400_BAD_REQUEST)
class HostDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
always_allow_superuser = False
model = models.Host
serializer_class = serializers.HostSerializer
def delete(self, request, *args, **kwargs):
if self.get_object().inventory.pending_deletion:
return Response({"error": _("The inventory for this host is already being deleted.")}, status=status.HTTP_400_BAD_REQUEST)
return super(HostDetail, self).delete(request, *args, **kwargs)
class HostAnsibleFactsDetail(RetrieveAPIView):
model = models.Host
serializer_class = serializers.AnsibleFactsSerializer
class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIView):
model = models.Host
serializer_class = serializers.HostSerializer
parent_model = models.Inventory
relationship = 'hosts'
parent_key = 'inventory'
def get_queryset(self):
inventory = self.get_parent_object()
qs = getattrd(inventory, self.relationship).all()
# Apply queryset optimizations
qs = qs.select_related(*HostAccess.select_related).prefetch_related(*HostAccess.prefetch_related)
return qs
class HostGroupsList(ControlledByScmMixin, SubListCreateAttachDetachAPIView):
'''the list of groups a host is directly a member of'''
model = models.Group
serializer_class = serializers.GroupSerializer
parent_model = models.Host
relationship = 'groups'
def update_raw_data(self, data):
data.pop('inventory', None)
return super(HostGroupsList, self).update_raw_data(data)
def create(self, request, *args, **kwargs):
# Inject parent host inventory ID into new group data.
data = request.data
# HACK: Make request data mutable.
if getattr(data, '_mutable', None) is False:
data._mutable = True
data['inventory'] = self.get_parent_object().inventory_id
return super(HostGroupsList, self).create(request, *args, **kwargs)
class HostAllGroupsList(SubListAPIView):
'''the list of all groups of which the host is directly or indirectly a member'''
model = models.Group
serializer_class = serializers.GroupSerializer
parent_model = models.Host
relationship = 'groups'
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model).distinct()
sublist_qs = parent.all_groups.distinct()
return qs & sublist_qs
class HostInventorySourcesList(SubListAPIView):
model = models.InventorySource
serializer_class = serializers.InventorySourceSerializer
parent_model = models.Host
relationship = 'inventory_sources'
class HostSmartInventoriesList(SubListAPIView):
model = models.Inventory
serializer_class = serializers.InventorySerializer
parent_model = models.Host
relationship = 'smart_inventories'
class HostActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.Host
relationship = 'activitystream_set'
search_fields = ('changes',)
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(Q(host=parent) | Q(inventory=parent.inventory))
class BadGateway(APIException):
status_code = status.HTTP_502_BAD_GATEWAY
default_detail = ''
default_code = 'bad_gateway'
class GatewayTimeout(APIException):
status_code = status.HTTP_504_GATEWAY_TIMEOUT
default_detail = ''
default_code = 'gateway_timeout'
class HostInsights(GenericAPIView):
model = models.Host
serializer_class = serializers.EmptySerializer
def _call_insights_api(self, url, session, headers):
try:
with set_environ(**settings.AWX_TASK_ENV):
res = session.get(url, headers=headers, timeout=120)
except requests.exceptions.SSLError:
raise BadGateway(_('SSLError while trying to connect to {}').format(url))
except requests.exceptions.Timeout:
raise GatewayTimeout(_('Request to {} timed out.').format(url))
except requests.exceptions.RequestException as e:
raise BadGateway(_('Unknown exception {} while trying to GET {}').format(e, url))
if res.status_code == 401:
raise BadGateway(_('Unauthorized access. Please check your Insights Credential username and password.'))
elif res.status_code != 200:
raise BadGateway(
_('Failed to access the Insights API at URL {}.' ' Server responded with {} status code and message {}').format(
url, res.status_code, res.content
)
)
try:
return res.json()
except ValueError:
raise BadGateway(_('Expected JSON response from Insights at URL {}' ' but instead got {}').format(url, res.content))
def _get_session(self, username, password):
session = requests.Session()
session.auth = requests.auth.HTTPBasicAuth(username, password)
return session
def _get_platform_info(self, host, session, headers):
url = '{}/api/inventory/v1/hosts?insights_id={}'.format(settings.INSIGHTS_URL_BASE, host.insights_system_id)
res = self._call_insights_api(url, session, headers)
try:
res['results'][0]['id']
except (IndexError, KeyError):
raise NotFound(_('Could not translate Insights system ID {}' ' into an Insights platform ID.').format(host.insights_system_id))
return res['results'][0]
def _get_reports(self, platform_id, session, headers):
url = '{}/api/insights/v1/system/{}/reports/'.format(settings.INSIGHTS_URL_BASE, platform_id)
return self._call_insights_api(url, session, headers)
def _get_remediations(self, platform_id, session, headers):
url = '{}/api/remediations/v1/remediations?system={}'.format(settings.INSIGHTS_URL_BASE, platform_id)
remediations = []
# Iterate over all of the pages of content.
while url:
data = self._call_insights_api(url, session, headers)
remediations.extend(data['data'])
url = data['links']['next'] # Will be `None` if this is the last page.
return remediations
def _get_insights(self, host, session, headers):
platform_info = self._get_platform_info(host, session, headers)
platform_id = platform_info['id']
reports = self._get_reports(platform_id, session, headers)
remediations = self._get_remediations(platform_id, session, headers)
return {'insights_content': filter_insights_api_response(platform_info, reports, remediations)}
def get(self, request, *args, **kwargs):
host = self.get_object()
cred = None
if host.insights_system_id is None:
return Response(dict(error=_('This host is not recognized as an Insights host.')), status=status.HTTP_404_NOT_FOUND)
if host.inventory and host.inventory.insights_credential:
cred = host.inventory.insights_credential
else:
return Response(dict(error=_('The Insights Credential for "{}" was not found.').format(host.inventory.name)), status=status.HTTP_404_NOT_FOUND)
username = cred.get_input('username', default='')
password = <PASSWORD>.get_input('password', default='')
session = self._get_session(username, password)
headers = get_awx_http_client_headers()
data = self._get_insights(host, session, headers)
return Response(data, status=status.HTTP_200_OK)
def handle_exception(self, exc):
# Continue supporting the slightly different way we have handled error responses on this view.
response = super().handle_exception(exc)
response.data['error'] = response.data.pop('detail')
return response
class GroupList(ListCreateAPIView):
model = models.Group
serializer_class = serializers.GroupSerializer
class EnforceParentRelationshipMixin(object):
"""
Useful when you have a self-refering ManyToManyRelationship.
* Tower uses a shallow (2-deep only) url pattern. For example:
When an object hangs off of a parent object you would have the url of the
form /api/v2/parent_model/34/child_model. If you then wanted a child of the
child model you would NOT do /api/v2/parent_model/34/child_model/87/child_child_model
Instead, you would access the child_child_model via /api/v2/child_child_model/87/
and you would create child_child_model's off of /api/v2/child_model/87/child_child_model_set
Now, when creating child_child_model related to child_model you still want to
link child_child_model to parent_model. That's what this class is for
"""
enforce_parent_relationship = ''
def update_raw_data(self, data):
data.pop(self.enforce_parent_relationship, None)
return super(EnforceParentRelationshipMixin, self).update_raw_data(data)
def create(self, request, *args, **kwargs):
# Inject parent group inventory ID into new group data.
data = request.data
# HACK: Make request data mutable.
if getattr(data, '_mutable', None) is False:
data._mutable = True
data[self.enforce_parent_relationship] = getattr(self.get_parent_object(), '%s_id' % self.enforce_parent_relationship)
return super(EnforceParentRelationshipMixin, self).create(request, *args, **kwargs)
class GroupChildrenList(ControlledByScmMixin, EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
model = models.Group
serializer_class = serializers.GroupSerializer
parent_model = models.Group
relationship = 'children'
enforce_parent_relationship = 'inventory'
def unattach(self, request, *args, **kwargs):
sub_id = request.data.get('id', None)
if sub_id is not None:
return super(GroupChildrenList, self).unattach(request, *args, **kwargs)
parent = self.get_parent_object()
if not request.user.can_access(self.model, 'delete', parent):
raise PermissionDenied()
parent.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def is_valid_relation(self, parent, sub, created=False):
# Prevent any cyclical group associations.
parent_pks = set(parent.all_parents.values_list('pk', flat=True))
parent_pks.add(parent.pk)
child_pks = set(sub.all_children.values_list('pk', flat=True))
child_pks.add(sub.pk)
if parent_pks & child_pks:
return {'error': _('Cyclical Group association.')}
return None
class GroupPotentialChildrenList(SubListAPIView):
model = models.Group
serializer_class = serializers.GroupSerializer
parent_model = models.Group
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
qs = qs.filter(inventory__pk=parent.inventory.pk)
except_pks = set([parent.pk])
except_pks.update(parent.all_parents.values_list('pk', flat=True))
except_pks.update(parent.all_children.values_list('pk', flat=True))
return qs.exclude(pk__in=except_pks)
class GroupHostsList(HostRelatedSearchMixin, ControlledByScmMixin, SubListCreateAttachDetachAPIView):
'''the list of hosts directly below a group'''
model = models.Host
serializer_class = serializers.HostSerializer
parent_model = models.Group
relationship = 'hosts'
def update_raw_data(self, data):
data.pop('inventory', None)
return super(GroupHostsList, self).update_raw_data(data)
def create(self, request, *args, **kwargs):
parent_group = models.Group.objects.get(id=self.kwargs['pk'])
# Inject parent group inventory ID into new host data.
request.data['inventory'] = parent_group.inventory_id
existing_hosts = models.Host.objects.filter(inventory=parent_group.inventory, name=request.data.get('name', ''))
if existing_hosts.count() > 0 and (
'variables' not in request.data or request.data['variables'] == '' or request.data['variables'] == '{}' or request.data['variables'] == '---'
):
request.data['id'] = existing_hosts[0].id
return self.attach(request, *args, **kwargs)
return super(GroupHostsList, self).create(request, *args, **kwargs)
class GroupAllHostsList(HostRelatedSearchMixin, SubListAPIView):
'''the list of all hosts below a group, even including subgroups'''
model = models.Host
serializer_class = serializers.HostSerializer
parent_model = models.Group
relationship = 'hosts'
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model).distinct() # need distinct for '&' operator
sublist_qs = parent.all_hosts.distinct()
return qs & sublist_qs
class GroupInventorySourcesList(SubListAPIView):
model = models.InventorySource
serializer_class = serializers.InventorySourceSerializer
parent_model = models.Group
relationship = 'inventory_sources'
class GroupActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.Group
relationship = 'activitystream_set'
search_fields = ('changes',)
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(Q(group=parent) | Q(host__in=parent.hosts.all()))
class GroupDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
model = models.Group
serializer_class = serializers.GroupSerializer
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'delete', obj):
raise PermissionDenied()
obj.delete_recursive()
return Response(status=status.HTTP_204_NO_CONTENT)
class InventoryGroupsList(SubListCreateAttachDetachAPIView):
model = models.Group
serializer_class = serializers.GroupSerializer
parent_model = models.Inventory
relationship = 'groups'
parent_key = 'inventory'
class InventoryRootGroupsList(SubListCreateAttachDetachAPIView):
model = models.Group
serializer_class = serializers.GroupSerializer
parent_model = models.Inventory
relationship = 'groups'
parent_key = 'inventory'
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model).distinct() # need distinct for '&' operator
return qs & parent.root_groups
class BaseVariableData(RetrieveUpdateAPIView):
parser_classes = api_settings.DEFAULT_PARSER_CLASSES + [YAMLParser]
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES + [YAMLRenderer]
permission_classes = (VariableDataPermission,)
class InventoryVariableData(BaseVariableData):
model = models.Inventory
serializer_class = serializers.InventoryVariableDataSerializer
class HostVariableData(BaseVariableData):
model = models.Host
serializer_class = serializers.HostVariableDataSerializer
class GroupVariableData(BaseVariableData):
model = models.Group
serializer_class = serializers.GroupVariableDataSerializer
class InventoryScriptView(RetrieveAPIView):
model = models.Inventory
serializer_class = serializers.InventoryScriptSerializer
permission_classes = (TaskPermission,)
filter_backends = ()
def retrieve(self, request, *args, **kwargs):
obj = self.get_object()
hostname = request.query_params.get('host', '')
hostvars = bool(request.query_params.get('hostvars', ''))
towervars = bool(request.query_params.get('towervars', ''))
show_all = bool(request.query_params.get('all', ''))
subset = request.query_params.get('subset', '')
if subset:
if not isinstance(subset, str):
raise ParseError(_('Inventory subset argument must be a string.'))
if subset.startswith('slice'):
slice_number, slice_count = models.Inventory.parse_slice_params(subset)
else:
raise ParseError(_('Subset does not use any supported syntax.'))
else:
slice_number, slice_count = 1, 1
if hostname:
hosts_q = dict(name=hostname)
if not show_all:
hosts_q['enabled'] = True
host = get_object_or_404(obj.hosts, **hosts_q)
return Response(host.variables_dict)
return Response(obj.get_script_data(hostvars=hostvars, towervars=towervars, show_all=show_all, slice_number=slice_number, slice_count=slice_count))
class InventoryTreeView(RetrieveAPIView):
model = models.Inventory
serializer_class = serializers.GroupTreeSerializer
filter_backends = ()
def _populate_group_children(self, group_data, all_group_data_map, group_children_map):
if 'children' in group_data:
return
group_data['children'] = []
for child_id in group_children_map.get(group_data['id'], set()):
group_data['children'].append(all_group_data_map[child_id])
group_data['children'].sort(key=lambda x: x['name'])
for child_data in group_data['children']:
self._populate_group_children(child_data, all_group_data_map, group_children_map)
def retrieve(self, request, *args, **kwargs):
inventory = self.get_object()
group_children_map = inventory.get_group_children_map()
root_group_pks = inventory.root_groups.order_by('name').values_list('pk', flat=True)
groups_qs = inventory.groups
groups_qs = groups_qs.prefetch_related('inventory_sources')
all_group_data = serializers.GroupSerializer(groups_qs, many=True).data
all_group_data_map = dict((x['id'], x) for x in all_group_data)
tree_data = [all_group_data_map[x] for x in root_group_pks]
for group_data in tree_data:
self._populate_group_children(group_data, all_group_data_map, group_children_map)
return Response(tree_data)
class InventoryInventorySourcesList(SubListCreateAPIView):
name = _('Inventory Source List')
model = models.InventorySource
serializer_class = serializers.InventorySourceSerializer
parent_model = models.Inventory
# Sometimes creation blocked by SCM inventory source restrictions
always_allow_superuser = False
relationship = 'inventory_sources'
parent_key = 'inventory'
class InventoryInventorySourcesUpdate(RetrieveAPIView):
name = _('Inventory Sources Update')
model = models.Inventory
obj_permission_type = 'start'
serializer_class = serializers.InventorySourceUpdateSerializer
permission_classes = (InventoryInventorySourcesUpdatePermission,)
def retrieve(self, request, *args, **kwargs):
inventory = self.get_object()
update_data = []
for inventory_source in inventory.inventory_sources.exclude(source=''):
details = {'inventory_source': inventory_source.pk, 'can_update': inventory_source.can_update}
update_data.append(details)
return Response(update_data)
def post(self, request, *args, **kwargs):
inventory = self.get_object()
update_data = []
successes = 0
failures = 0
for inventory_source in inventory.inventory_sources.exclude(source=''):
details = OrderedDict()
details['inventory_source'] = inventory_source.pk
details['status'] = None
if inventory_source.can_update:
update = inventory_source.update()
details.update(serializers.InventoryUpdateDetailSerializer(update, context=self.get_serializer_context()).to_representation(update))
details['status'] = 'started'
details['inventory_update'] = update.id
successes += 1
else:
if not details.get('status'):
details['status'] = _('Could not start because `can_update` returned False')
failures += 1
update_data.append(details)
if failures and successes:
status_code = status.HTTP_202_ACCEPTED
elif failures and not successes:
status_code = status.HTTP_400_BAD_REQUEST
elif not failures and not successes:
return Response({'detail': _('No inventory sources to update.')}, status=status.HTTP_400_BAD_REQUEST)
else:
status_code = status.HTTP_200_OK
return Response(update_data, status=status_code)
class InventorySourceList(ListCreateAPIView):
model = models.InventorySource
serializer_class = serializers.InventorySourceSerializer
always_allow_superuser = False
class InventorySourceDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = models.InventorySource
serializer_class = serializers.InventorySourceSerializer
class InventorySourceSchedulesList(SubListCreateAPIView):
name = _("Inventory Source Schedules")
model = models.Schedule
serializer_class = serializers.ScheduleSerializer
parent_model = models.InventorySource
relationship = 'schedules'
parent_key = 'unified_job_template'
class InventorySourceActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.InventorySource
relationship = 'activitystream_set'
search_fields = ('changes',)
class InventorySourceNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView):
model = models.NotificationTemplate
serializer_class = serializers.NotificationTemplateSerializer
parent_model = models.InventorySource
def post(self, request, *args, **kwargs):
parent = self.get_parent_object()
if parent.source not in models.CLOUD_INVENTORY_SOURCES:
return Response(
dict(msg=_("Notification Templates can only be assigned when source is one of {}.").format(models.CLOUD_INVENTORY_SOURCES, parent.source)),
status=status.HTTP_400_BAD_REQUEST,
)
return super(InventorySourceNotificationTemplatesAnyList, self).post(request, *args, **kwargs)
class InventorySourceNotificationTemplatesStartedList(InventorySourceNotificationTemplatesAnyList):
relationship = 'notification_templates_started'
class InventorySourceNotificationTemplatesErrorList(InventorySourceNotificationTemplatesAnyList):
relationship = 'notification_templates_error'
class InventorySourceNotificationTemplatesSuccessList(InventorySourceNotificationTemplatesAnyList):
relationship = 'notification_templates_success'
class InventorySourceHostsList(HostRelatedSearchMixin, SubListDestroyAPIView):
model = models.Host
serializer_class = serializers.HostSerializer
parent_model = models.InventorySource
relationship = 'hosts'
check_sub_obj_permission = False
def perform_list_destroy(self, instance_list):
inv_source = self.get_parent_object()
with ignore_inventory_computed_fields():
if not settings.ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
# job host summary deletion necessary to avoid deadlock
models.JobHostSummary.objects.filter(host__inventory_sources=inv_source).update(host=None)
models.Host.objects.filter(inventory_sources=inv_source).delete()
r = super(InventorySourceHostsList, self).perform_list_destroy([])
else:
# Advance delete of group-host memberships to prevent deadlock
# Activity stream doesn't record disassociation here anyway
# no signals-related reason to not bulk-delete
models.Host.groups.through.objects.filter(host__inventory_sources=inv_source).delete()
r = super(InventorySourceHostsList, self).perform_list_destroy(instance_list)
update_inventory_computed_fields.delay(inv_source.inventory_id)
return r
class InventorySourceGroupsList(SubListDestroyAPIView):
model = models.Group
serializer_class = serializers.GroupSerializer
parent_model = models.InventorySource
relationship = 'groups'
check_sub_obj_permission = False
def perform_list_destroy(self, instance_list):
inv_source = self.get_parent_object()
with ignore_inventory_computed_fields():
if not settings.ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
models.Group.objects.filter(inventory_sources=inv_source).delete()
r = super(InventorySourceGroupsList, self).perform_list_destroy([])
else:
# Advance delete of group-host memberships to prevent deadlock
# Same arguments for bulk delete as with host list
models.Group.hosts.through.objects.filter(group__inventory_sources=inv_source).delete()
r = super(InventorySourceGroupsList, self).perform_list_destroy(instance_list)
update_inventory_computed_fields.delay(inv_source.inventory_id)
return r
class InventorySourceUpdatesList(SubListAPIView):
model = models.InventoryUpdate
serializer_class = serializers.InventoryUpdateListSerializer
parent_model = models.InventorySource
relationship = 'inventory_updates'
class InventorySourceCredentialsList(SubListAttachDetachAPIView):
parent_model = models.InventorySource
model = models.Credential
serializer_class = serializers.CredentialSerializer
relationship = 'credentials'
def is_valid_relation(self, parent, sub, created=False):
# Inventory source credentials are exclusive with all other credentials
# subject to change for https://github.com/ansible/awx/issues/277
# or https://github.com/ansible/awx/issues/223
if parent.credentials.exists():
return {'msg': _("Source already has credential assigned.")}
error = models.InventorySource.cloud_credential_validation(parent.source, sub)
if error:
return {'msg': error}
return None
class InventorySourceUpdateView(RetrieveAPIView):
model = models.InventorySource
obj_permission_type = 'start'
serializer_class = serializers.InventorySourceUpdateSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_update:
update = obj.update()
if not update:
return Response({}, status=status.HTTP_400_BAD_REQUEST)
else:
headers = {'Location': update.get_absolute_url(request=request)}
data = OrderedDict()
data['inventory_update'] = update.id
data.update(serializers.InventoryUpdateDetailSerializer(update, context=self.get_serializer_context()).to_representation(update))
return Response(data, status=status.HTTP_202_ACCEPTED, headers=headers)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class InventoryUpdateList(ListAPIView):
model = models.InventoryUpdate
serializer_class = serializers.InventoryUpdateListSerializer
class InventoryUpdateDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
model = models.InventoryUpdate
serializer_class = serializers.InventoryUpdateDetailSerializer
class InventoryUpdateCredentialsList(SubListAPIView):
parent_model = models.InventoryUpdate
model = models.Credential
serializer_class = serializers.CredentialSerializer
relationship = 'credentials'
class InventoryUpdateCancel(RetrieveAPIView):
model = models.InventoryUpdate
obj_permission_type = 'cancel'
serializer_class = serializers.InventoryUpdateCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class InventoryUpdateNotificationsList(SubListAPIView):
model = models.Notification
serializer_class = serializers.NotificationSerializer
parent_model = models.InventoryUpdate
relationship = 'notifications'
search_fields = ('subject', 'notification_type', 'body')
class JobTemplateList(ListCreateAPIView):
model = models.JobTemplate
serializer_class = serializers.JobTemplateSerializer
always_allow_superuser = False
def post(self, request, *args, **kwargs):
ret = super(JobTemplateList, self).post(request, *args, **kwargs)
if ret.status_code == 201:
job_template = models.JobTemplate.objects.get(id=ret.data['id'])
job_template.admin_role.members.add(request.user)
return ret
class JobTemplateDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = models.JobTemplate
serializer_class = serializers.JobTemplateSerializer
always_allow_superuser = False
class JobTemplateLaunch(RetrieveAPIView):
model = models.JobTemplate
obj_permission_type = 'start'
serializer_class = serializers.JobLaunchSerializer
always_allow_superuser = False
def update_raw_data(self, data):
try:
obj = self.get_object()
except PermissionDenied:
return data
extra_vars = data.pop('extra_vars', None) or {}
if obj:
needed_passwords = obj.passwords_needed_to_start
if needed_passwords:
data['credential_passwords'] = {}
for p in needed_passwords:
data['credential_passwords'][p] = u''
else:
data.pop('credential_passwords')
for v in obj.variables_needed_to_start:
extra_vars.setdefault(v, u'')
if extra_vars:
data['extra_vars'] = extra_vars
modified_ask_mapping = models.JobTemplate.get_ask_mapping()
modified_ask_mapping.pop('extra_vars')
for field, ask_field_name in modified_ask_mapping.items():
if not getattr(obj, ask_field_name):
data.pop(field, None)
elif field == 'inventory':
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None)
elif field == 'credentials':
data[field] = [cred.id for cred in obj.credentials.all()]
else:
data[field] = getattr(obj, field)
return data
def modernize_launch_payload(self, data, obj):
"""
Steps to do simple translations of request data to support
old field structure to launch endpoint
TODO: delete this method with future API version changes
"""
modern_data = data.copy()
id_fd = '{}_id'.format('inventory')
if 'inventory' not in modern_data and id_fd in modern_data:
modern_data['inventory'] = modern_data[id_fd]
# credential passwords were historically provided as top-level attributes
if 'credential_passwords' not in modern_data:
modern_data['credential_passwords'] = data.copy()
return modern_data
def post(self, request, *args, **kwargs):
obj = self.get_object()
try:
modern_data = self.modernize_launch_payload(data=request.data, obj=obj)
except ParseError as exc:
return Response(exc.detail, status=status.HTTP_400_BAD_REQUEST)
serializer = self.serializer_class(data=modern_data, context={'template': obj})
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
if not request.user.can_access(models.JobLaunchConfig, 'add', serializer.validated_data, template=obj):
raise PermissionDenied()
passwords = serializer.validated_data.pop('credential_passwords', {})
new_job = obj.create_unified_job(**serializer.validated_data)
result = new_job.signal_start(**passwords)
if not result:
data = dict(passwords_needed_to_start=new_job.passwords_needed_to_start)
new_job.delete()
return Response(data, status=status.HTTP_400_BAD_REQUEST)
else:
data = OrderedDict()
if isinstance(new_job, models.WorkflowJob):
data['workflow_job'] = new_job.id
data['ignored_fields'] = self.sanitize_for_response(serializer._ignored_fields)
data.update(serializers.WorkflowJobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
else:
data['job'] = new_job.id
data['ignored_fields'] = self.sanitize_for_response(serializer._ignored_fields)
data.update(serializers.JobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
headers = {'Location': new_job.get_absolute_url(request)}
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
def sanitize_for_response(self, data):
"""
Model objects cannot be serialized by DRF,
this replaces objects with their ids for inclusion in response
"""
def display_value(val):
if hasattr(val, 'id'):
return val.id
else:
return val
sanitized_data = {}
for field_name, value in data.items():
if isinstance(value, (set, list)):
sanitized_data[field_name] = []
for sub_value in value:
sanitized_data[field_name].append(display_value(sub_value))
else:
sanitized_data[field_name] = display_value(value)
return sanitized_data
class JobTemplateSchedulesList(SubListCreateAPIView):
name = _("Job Template Schedules")
model = models.Schedule
serializer_class = serializers.ScheduleSerializer
parent_model = models.JobTemplate
relationship = 'schedules'
parent_key = 'unified_job_template'
class JobTemplateSurveySpec(GenericAPIView):
model = models.JobTemplate
obj_permission_type = 'admin'
serializer_class = serializers.EmptySerializer
ALLOWED_TYPES = {'text': str, 'textarea': str, 'password': str, 'multiplechoice': str, 'multiselect': str, 'integer': int, 'float': float}
def get(self, request, *args, **kwargs):
obj = self.get_object()
return Response(obj.display_survey_spec())
def post(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'change', obj, None):
raise PermissionDenied()
response = self._validate_spec_data(request.data, obj.survey_spec)
if response:
return response
obj.survey_spec = request.data
obj.save(update_fields=['survey_spec'])
return Response()
@staticmethod
def _validate_spec_data(new_spec, old_spec):
schema_errors = {}
for field, expect_type, type_label in [('name', str, 'string'), ('description', str, 'string'), ('spec', list, 'list of items')]:
if field not in new_spec:
schema_errors['error'] = _("Field '{}' is missing from survey spec.").format(field)
elif not isinstance(new_spec[field], expect_type):
schema_errors['error'] = _("Expected {} for field '{}', received {} type.").format(type_label, field, type(new_spec[field]).__name__)
if isinstance(new_spec.get('spec', None), list) and len(new_spec["spec"]) < 1:
schema_errors['error'] = _("'spec' doesn't contain any items.")
if schema_errors:
return Response(schema_errors, status=status.HTTP_400_BAD_REQUEST)
variable_set = set()
old_spec_dict = models.JobTemplate.pivot_spec(old_spec)
for idx, survey_item in enumerate(new_spec["spec"]):
context = dict(idx=str(idx), survey_item=survey_item)
# General element validation
if not isinstance(survey_item, dict):
return Response(dict(error=_("Survey question %s is not a json object.") % str(idx)), status=status.HTTP_400_BAD_REQUEST)
for field_name in ['type', 'question_name', 'variable', 'required']:
if field_name not in survey_item:
return Response(
dict(error=_("'{field_name}' missing from survey question {idx}").format(field_name=field_name, **context)),
status=status.HTTP_400_BAD_REQUEST,
)
val = survey_item[field_name]
allow_types = str
type_label = 'string'
if field_name == 'required':
allow_types = bool
type_label = 'boolean'
if not isinstance(val, allow_types):
return Response(
dict(
error=_("'{field_name}' in survey question {idx} expected to be {type_label}.").format(
field_name=field_name, type_label=type_label, **context
)
),
status=status.HTTP_400_BAD_REQUEST,
)
if survey_item['variable'] in variable_set:
return Response(
dict(error=_("'variable' '%(item)s' duplicated in survey question %(survey)s.") % {'item': survey_item['variable'], 'survey': str(idx)}),
status=status.HTTP_400_BAD_REQUEST,
)
else:
variable_set.add(survey_item['variable'])
# Type-specific validation
# validate question type <-> default type
qtype = survey_item["type"]
if qtype not in JobTemplateSurveySpec.ALLOWED_TYPES:
return Response(
dict(
error=_("'{survey_item[type]}' in survey question {idx} is not one of '{allowed_types}' allowed question types.").format(
allowed_types=', '.join(JobTemplateSurveySpec.ALLOWED_TYPES.keys()), **context
)
),
status=status.HTTP_400_BAD_REQUEST,
)
if 'default' in survey_item and survey_item['default'] != '':
if not isinstance(survey_item['default'], JobTemplateSurveySpec.ALLOWED_TYPES[qtype]):
type_label = 'string'
if qtype in ['integer', 'float']:
type_label = qtype
return Response(
dict(
error=_("Default value {survey_item[default]} in survey question {idx} expected to be {type_label}.").format(
type_label=type_label, **context
)
),
status=status.HTTP_400_BAD_REQUEST,
)
# additional type-specific properties, the UI provides these even
# if not applicable to the question, TODO: request that they not do this
for key in ['min', 'max']:
if key in survey_item:
if survey_item[key] is not None and (not isinstance(survey_item[key], int)):
return Response(
dict(error=_("The {min_or_max} limit in survey question {idx} expected to be integer.").format(min_or_max=key, **context)),
status=status.HTTP_400_BAD_REQUEST,
)
# if it's a multiselect or multiple choice, it must have coices listed
# choices and defualts must come in as strings seperated by /n characters.
if qtype == 'multiselect' or qtype == 'multiplechoice':
if 'choices' in survey_item:
if isinstance(survey_item['choices'], str):
survey_item['choices'] = '\n'.join(choice for choice in survey_item['choices'].splitlines() if choice.strip() != '')
else:
return Response(
dict(error=_("Survey question {idx} of type {survey_item[type]} must specify choices.".format(**context))),
status=status.HTTP_400_BAD_REQUEST,
)
# If there is a default string split it out removing extra /n characters.
# Note: There can still be extra newline characters added in the API, these are sanitized out using .strip()
if 'default' in survey_item:
if isinstance(survey_item['default'], str):
survey_item['default'] = '\n'.join(choice for choice in survey_item['default'].splitlines() if choice.strip() != '')
list_of_defaults = survey_item['default'].splitlines()
else:
list_of_defaults = survey_item['default']
if qtype == 'multiplechoice':
# Multiplechoice types should only have 1 default.
if len(list_of_defaults) > 1:
return Response(
dict(error=_("Multiple Choice (Single Select) can only have one default value.".format(**context))),
status=status.HTTP_400_BAD_REQUEST,
)
if any(item not in survey_item['choices'] for item in list_of_defaults):
return Response(
dict(error=_("Default choice must be answered from the choices listed.".format(**context))), status=status.HTTP_400_BAD_REQUEST
)
# Process encryption substitution
if "default" in survey_item and isinstance(survey_item['default'], str) and survey_item['default'].startswith('$encrypted$'):
# Submission expects the existence of encrypted DB value to replace given default
if qtype != "password":
return Response(
dict(
error=_(
"$encrypted$ is a reserved keyword for password question defaults, " "survey question {idx} is type {survey_item[type]}."
).format(**context)
),
status=status.HTTP_400_BAD_REQUEST,
)
old_element = old_spec_dict.get(survey_item['variable'], {})
encryptedish_default_exists = False
if 'default' in old_element:
old_default = old_element['default']
if isinstance(old_default, str):
if old_default.startswith('$encrypted$'):
encryptedish_default_exists = True
elif old_default == "": # unencrypted blank string is allowed as DB value as special case
encryptedish_default_exists = True
if not encryptedish_default_exists:
return Response(
dict(error=_("$encrypted$ is a reserved keyword, may not be used for new default in position {idx}.").format(**context)),
status=status.HTTP_400_BAD_REQUEST,
)
survey_item['default'] = old_element['default']
elif qtype == "password" and 'default' in survey_item:
# Submission provides new encrypted default
survey_item['default'] = encrypt_value(survey_item['default'])
def delete(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'delete', obj):
raise PermissionDenied()
obj.survey_spec = {}
obj.save()
return Response()
class WorkflowJobTemplateSurveySpec(JobTemplateSurveySpec):
model = models.WorkflowJobTemplate
class JobTemplateActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.JobTemplate
relationship = 'activitystream_set'
search_fields = ('changes',)
class JobTemplateNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView):
model = models.NotificationTemplate
serializer_class = serializers.NotificationTemplateSerializer
parent_model = models.JobTemplate
class JobTemplateNotificationTemplatesStartedList(JobTemplateNotificationTemplatesAnyList):
relationship = 'notification_templates_started'
class JobTemplateNotificationTemplatesErrorList(JobTemplateNotificationTemplatesAnyList):
relationship = 'notification_templates_error'
class JobTemplateNotificationTemplatesSuccessList(JobTemplateNotificationTemplatesAnyList):
relationship = 'notification_templates_success'
class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
model = models.Credential
serializer_class = serializers.CredentialSerializer
parent_model = models.JobTemplate
relationship = 'credentials'
def get_queryset(self):
# Return the full list of credentials
parent = self.get_parent_object()
self.check_parent_access(parent)
sublist_qs = getattrd(parent, self.relationship)
sublist_qs = sublist_qs.prefetch_related(
'created_by', 'modified_by', 'admin_role', 'use_role', 'read_role', 'admin_role__parents', 'admin_role__members'
)
return sublist_qs
def is_valid_relation(self, parent, sub, created=False):
if sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]:
return {"error": _("Cannot assign multiple {credential_type} credentials.").format(credential_type=sub.unique_hash(display=True))}
kind = sub.credential_type.kind
if kind not in ('ssh', 'vault', 'cloud', 'net', 'kubernetes'):
return {'error': _('Cannot assign a Credential of kind `{}`.').format(kind)}
return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created)
class JobTemplateLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
parent_model = models.JobTemplate
relationship = 'labels'
def post(self, request, *args, **kwargs):
# If a label already exists in the database, attach it instead of erroring out
# that it already exists
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
existing = models.Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
if existing.exists():
existing = existing[0]
request.data['id'] = existing.id
del request.data['name']
del request.data['organization']
if models.Label.objects.filter(unifiedjobtemplate_labels=self.kwargs['pk']).count() > 100:
return Response(
dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST
)
return super(JobTemplateLabelList, self).post(request, *args, **kwargs)
class JobTemplateCallback(GenericAPIView):
model = models.JobTemplate
permission_classes = (JobTemplateCallbackPermission,)
serializer_class = serializers.EmptySerializer
parser_classes = api_settings.DEFAULT_PARSER_CLASSES + [FormParser]
@csrf_exempt
@transaction.non_atomic_requests
def dispatch(self, *args, **kwargs):
return super(JobTemplateCallback, self).dispatch(*args, **kwargs)
def find_matching_hosts(self):
"""
Find the host(s) in the job template's inventory that match the remote
host for the current request.
"""
# Find the list of remote host names/IPs to check.
remote_hosts = set()
for header in settings.REMOTE_HOST_HEADERS:
for value in self.request.META.get(header, '').split(','):
value = value.strip()
if value:
remote_hosts.add(value)
# Add the reverse lookup of IP addresses.
for rh in list(remote_hosts):
try:
result = socket.gethostbyaddr(rh)
except socket.herror:
continue
except socket.gaierror:
continue
remote_hosts.add(result[0])
remote_hosts.update(result[1])
# Filter out any .arpa results.
for rh in list(remote_hosts):
if rh.endswith('.arpa'):
remote_hosts.remove(rh)
if not remote_hosts:
return set()
# Find the host objects to search for a match.
obj = self.get_object()
hosts = obj.inventory.hosts.all()
# Populate host_mappings
host_mappings = {}
for host in hosts:
host_name = host.get_effective_host_name()
host_mappings.setdefault(host_name, [])
host_mappings[host_name].append(host)
# Try finding direct match
matches = set()
for host_name in remote_hosts:
if host_name in host_mappings:
matches.update(host_mappings[host_name])
if len(matches) == 1:
return matches
# Try to resolve forward addresses for each host to find matches.
for host_name in host_mappings:
try:
result = socket.getaddrinfo(host_name, None)
possible_ips = set(x[4][0] for x in result)
possible_ips.discard(host_name)
if possible_ips and possible_ips & remote_hosts:
matches.update(host_mappings[host_name])
except socket.gaierror:
pass
except UnicodeError:
pass
return matches
def get(self, request, *args, **kwargs):
job_template = self.get_object()
matching_hosts = self.find_matching_hosts()
data = dict(host_config_key=job_template.host_config_key, matching_hosts=[x.name for x in matching_hosts])
if settings.DEBUG:
d = dict([(k, v) for k, v in request.META.items() if k.startswith('HTTP_') or k.startswith('REMOTE_')])
data['request_meta'] = d
return Response(data)
def post(self, request, *args, **kwargs):
extra_vars = None
# Be careful here: content_type can look like '<content_type>; charset=blar'
if request.content_type.startswith("application/json"):
extra_vars = request.data.get("extra_vars", None)
# Permission class should have already validated host_config_key.
job_template = self.get_object()
# Attempt to find matching hosts based on remote address.
matching_hosts = self.find_matching_hosts()
# If the host is not found, update the inventory before trying to
# match again.
inventory_sources_already_updated = []
if len(matching_hosts) != 1:
inventory_sources = job_template.inventory.inventory_sources.filter(update_on_launch=True)
inventory_update_pks = set()
for inventory_source in inventory_sources:
if inventory_source.needs_update_on_launch:
# FIXME: Doesn't check for any existing updates.
inventory_update = inventory_source.create_inventory_update(**{'_eager_fields': {'launch_type': 'callback'}})
inventory_update.signal_start()
inventory_update_pks.add(inventory_update.pk)
inventory_update_qs = models.InventoryUpdate.objects.filter(pk__in=inventory_update_pks, status__in=('pending', 'waiting', 'running'))
# Poll for the inventory updates we've started to complete.
while inventory_update_qs.count():
time.sleep(1.0)
transaction.commit()
# Ignore failed inventory updates here, only add successful ones
# to the list to be excluded when running the job.
for inventory_update in models.InventoryUpdate.objects.filter(pk__in=inventory_update_pks, status='successful'):
inventory_sources_already_updated.append(inventory_update.inventory_source_id)
matching_hosts = self.find_matching_hosts()
# Check matching hosts.
if not matching_hosts:
data = dict(msg=_('No matching host could be found!'))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
elif len(matching_hosts) > 1:
data = dict(msg=_('Multiple hosts matched the request!'))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
else:
host = list(matching_hosts)[0]
if not job_template.can_start_without_user_input(callback_extra_vars=extra_vars):
data = dict(msg=_('Cannot start automatically, user input required!'))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
limit = host.name
# NOTE: We limit this to one job waiting per host per callblack to keep them from stacking crazily
if models.Job.objects.filter(status__in=['pending', 'waiting', 'running'], job_template=job_template, limit=limit).count() > 0:
data = dict(msg=_('Host callback job already pending.'))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
# Everything is fine; actually create the job.
kv = {"limit": limit}
kv.setdefault('_eager_fields', {})['launch_type'] = 'callback'
if extra_vars is not None and job_template.ask_variables_on_launch:
extra_vars_redacted, removed = extract_ansible_vars(extra_vars)
kv['extra_vars'] = extra_vars_redacted
kv['_prevent_slicing'] = True # will only run against 1 host, so no point
with transaction.atomic():
job = job_template.create_job(**kv)
# Send a signal to signify that the job should be started.
result = job.signal_start(inventory_sources_already_updated=inventory_sources_already_updated)
if not result:
data = dict(msg=_('Error starting job!'))
job.delete()
return Response(data, status=status.HTTP_400_BAD_REQUEST)
# Return the location of the new job.
headers = {'Location': job.get_absolute_url(request=request)}
return Response(status=status.HTTP_201_CREATED, headers=headers)
class JobTemplateJobsList(SubListAPIView):
model = models.Job
serializer_class = serializers.JobListSerializer
parent_model = models.JobTemplate
relationship = 'jobs'
parent_key = 'job_template'
class JobTemplateSliceWorkflowJobsList(SubListCreateAPIView):
model = models.WorkflowJob
serializer_class = serializers.WorkflowJobListSerializer
parent_model = models.JobTemplate
relationship = 'slice_workflow_jobs'
parent_key = 'job_template'
class JobTemplateInstanceGroupsList(SubListAttachDetachAPIView):
model = models.InstanceGroup
serializer_class = serializers.InstanceGroupSerializer
parent_model = models.JobTemplate
relationship = 'instance_groups'
class JobTemplateAccessList(ResourceAccessList):
model = models.User # needs to be User for AccessLists's
parent_model = models.JobTemplate
class JobTemplateObjectRolesList(SubListAPIView):
model = models.Role
serializer_class = serializers.RoleSerializer
parent_model = models.JobTemplate
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return models.Role.objects.filter(content_type=content_type, object_id=po.pk)
class JobTemplateCopy(CopyAPIView):
model = models.JobTemplate
copy_return_serializer_class = serializers.JobTemplateSerializer
class WorkflowJobNodeList(ListAPIView):
model = models.WorkflowJobNode
serializer_class = serializers.WorkflowJobNodeListSerializer
search_fields = ('unified_job_template__name', 'unified_job_template__description')
class WorkflowJobNodeDetail(RetrieveAPIView):
model = models.WorkflowJobNode
serializer_class = serializers.WorkflowJobNodeDetailSerializer
class WorkflowJobNodeCredentialsList(SubListAPIView):
model = models.Credential
serializer_class = serializers.CredentialSerializer
parent_model = models.WorkflowJobNode
relationship = 'credentials'
class WorkflowJobTemplateNodeList(ListCreateAPIView):
model = models.WorkflowJobTemplateNode
serializer_class = serializers.WorkflowJobTemplateNodeSerializer
search_fields = ('unified_job_template__name', 'unified_job_template__description')
class WorkflowJobTemplateNodeDetail(RetrieveUpdateDestroyAPIView):
model = models.WorkflowJobTemplateNode
serializer_class = serializers.WorkflowJobTemplateNodeDetailSerializer
class WorkflowJobTemplateNodeCredentialsList(LaunchConfigCredentialsBase):
parent_model = models.WorkflowJobTemplateNode
class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
model = models.WorkflowJobTemplateNode
serializer_class = serializers.WorkflowJobTemplateNodeSerializer
always_allow_superuser = True
parent_model = models.WorkflowJobTemplateNode
relationship = ''
enforce_parent_relationship = 'workflow_job_template'
search_fields = ('unified_job_template__name', 'unified_job_template__description')
'''
Limit the set of WorkflowJobTemplateNodes to the related nodes of specified by
'relationship'
'''
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).all()
def is_valid_relation(self, parent, sub, created=False):
if created:
return None
if parent.id == sub.id:
return {"Error": _("Cycle detected.")}
'''
Look for parent->child connection in all relationships except the relationship that is
attempting to be added; because it's ok to re-add the relationship
'''
relationships = ['success_nodes', 'failure_nodes', 'always_nodes']
relationships.remove(self.relationship)
qs = functools.reduce(lambda x, y: (x | y), (Q(**{'{}__in'.format(r): [sub.id]}) for r in relationships))
if models.WorkflowJobTemplateNode.objects.filter(Q(pk=parent.id) & qs).exists():
return {"Error": _("Relationship not allowed.")}
parent_node_type_relationship = getattr(parent, self.relationship)
parent_node_type_relationship.add(sub)
graph = WorkflowDAG(parent.workflow_job_template)
if graph.has_cycle():
parent_node_type_relationship.remove(sub)
return {"Error": _("Cycle detected.")}
parent_node_type_relationship.remove(sub)
return None
class WorkflowJobTemplateNodeCreateApproval(RetrieveAPIView):
model = models.WorkflowJobTemplateNode
serializer_class = serializers.WorkflowJobTemplateNodeCreateApprovalSerializer
permission_classes = []
def post(self, request, *args, **kwargs):
obj = self.get_object()
serializer = self.get_serializer(instance=obj, data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
approval_template = obj.create_approval_template(**serializer.validated_data)
data = serializers.WorkflowApprovalTemplateSerializer(approval_template, context=self.get_serializer_context()).data
return Response(data, status=status.HTTP_201_CREATED)
def check_permissions(self, request):
if not request.user.is_authenticated:
raise PermissionDenied()
obj = self.get_object().workflow_job_template
if request.method == 'POST':
if not request.user.can_access(models.WorkflowJobTemplate, 'change', obj, request.data):
self.permission_denied(request)
else:
if not request.user.can_access(models.WorkflowJobTemplate, 'read', obj):
self.permission_denied(request)
class WorkflowJobTemplateNodeSuccessNodesList(WorkflowJobTemplateNodeChildrenBaseList):
relationship = 'success_nodes'
class WorkflowJobTemplateNodeFailureNodesList(WorkflowJobTemplateNodeChildrenBaseList):
relationship = 'failure_nodes'
class WorkflowJobTemplateNodeAlwaysNodesList(WorkflowJobTemplateNodeChildrenBaseList):
relationship = 'always_nodes'
class WorkflowJobNodeChildrenBaseList(SubListAPIView):
model = models.WorkflowJobNode
serializer_class = serializers.WorkflowJobNodeListSerializer
parent_model = models.WorkflowJobNode
relationship = ''
search_fields = ('unified_job_template__name', 'unified_job_template__description')
#
# Limit the set of WorkflowJobeNodes to the related nodes of specified by
#'relationship'
#
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).all()
class WorkflowJobNodeSuccessNodesList(WorkflowJobNodeChildrenBaseList):
relationship = 'success_nodes'
class WorkflowJobNodeFailureNodesList(WorkflowJobNodeChildrenBaseList):
relationship = 'failure_nodes'
class WorkflowJobNodeAlwaysNodesList(WorkflowJobNodeChildrenBaseList):
relationship = 'always_nodes'
class WorkflowJobTemplateList(ListCreateAPIView):
model = models.WorkflowJobTemplate
serializer_class = serializers.WorkflowJobTemplateSerializer
always_allow_superuser = False
class WorkflowJobTemplateDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = models.WorkflowJobTemplate
serializer_class = serializers.WorkflowJobTemplateSerializer
always_allow_superuser = False
class WorkflowJobTemplateCopy(CopyAPIView):
model = models.WorkflowJobTemplate
copy_return_serializer_class = serializers.WorkflowJobTemplateSerializer
def get(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(obj.__class__, 'read', obj):
raise PermissionDenied()
can_copy, messages = request.user.can_access_with_errors(self.model, 'copy', obj)
data = OrderedDict(
[
('can_copy', can_copy),
('can_copy_without_user_input', can_copy),
('templates_unable_to_copy', [] if can_copy else ['all']),
('credentials_unable_to_copy', [] if can_copy else ['all']),
('inventories_unable_to_copy', [] if can_copy else ['all']),
]
)
if messages and can_copy:
data['can_copy_without_user_input'] = False
data.update(messages)
return Response(data)
def _build_create_dict(self, obj):
"""Special processing of fields managed by char_prompts"""
r = super(WorkflowJobTemplateCopy, self)._build_create_dict(obj)
field_names = set(f.name for f in obj._meta.get_fields())
for field_name, ask_field_name in obj.get_ask_mapping().items():
if field_name in r and field_name not in field_names:
r.setdefault('char_prompts', {})
r['char_prompts'][field_name] = r.pop(field_name)
return r
@staticmethod
def deep_copy_permission_check_func(user, new_objs):
for obj in new_objs:
for field_name in obj._get_workflow_job_field_names():
item = getattr(obj, field_name, None)
if item is None:
continue
elif field_name in ['inventory']:
if not user.can_access(item.__class__, 'use', item):
setattr(obj, field_name, None)
elif field_name in ['unified_job_template']:
if not user.can_access(item.__class__, 'start', item, validate_license=False):
setattr(obj, field_name, None)
elif field_name in ['credentials']:
for cred in item.all():
if not user.can_access(cred.__class__, 'use', cred):
logger.debug('Deep copy: removing {} from relationship due to permissions'.format(cred))
item.remove(cred.pk)
obj.save()
class WorkflowJobTemplateLabelList(JobTemplateLabelList):
parent_model = models.WorkflowJobTemplate
class WorkflowJobTemplateLaunch(RetrieveAPIView):
model = models.WorkflowJobTemplate
obj_permission_type = 'start'
serializer_class = serializers.WorkflowJobLaunchSerializer
always_allow_superuser = False
def update_raw_data(self, data):
try:
obj = self.get_object()
except PermissionDenied:
return data
extra_vars = data.pop('extra_vars', None) or {}
if obj:
for v in obj.variables_needed_to_start:
extra_vars.setdefault(v, u'')
if extra_vars:
data['extra_vars'] = extra_vars
modified_ask_mapping = models.WorkflowJobTemplate.get_ask_mapping()
modified_ask_mapping.pop('extra_vars')
for field_name, ask_field_name in obj.get_ask_mapping().items():
if not getattr(obj, ask_field_name):
data.pop(field_name, None)
elif field_name == 'inventory':
data[field_name] = getattrd(obj, "%s.%s" % (field_name, 'id'), None)
else:
data[field_name] = getattr(obj, field_name)
return data
def post(self, request, *args, **kwargs):
obj = self.get_object()
if 'inventory_id' in request.data:
request.data['inventory'] = request.data['inventory_id']
serializer = self.serializer_class(instance=obj, data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
if not request.user.can_access(models.JobLaunchConfig, 'add', serializer.validated_data, template=obj):
raise PermissionDenied()
new_job = obj.create_unified_job(**serializer.validated_data)
new_job.signal_start()
data = OrderedDict()
data['workflow_job'] = new_job.id
data['ignored_fields'] = serializer._ignored_fields
data.update(serializers.WorkflowJobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
headers = {'Location': new_job.get_absolute_url(request)}
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
class WorkflowJobRelaunch(GenericAPIView):
model = models.WorkflowJob
obj_permission_type = 'start'
serializer_class = serializers.EmptySerializer
def check_object_permissions(self, request, obj):
if request.method == 'POST' and obj:
relaunch_perm, messages = request.user.can_access_with_errors(self.model, 'start', obj)
if not relaunch_perm and 'workflow_job_template' in messages:
self.permission_denied(request, message=messages['workflow_job_template'])
return super(WorkflowJobRelaunch, self).check_object_permissions(request, obj)
def get(self, request, *args, **kwargs):
return Response({})
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.is_sliced_job:
jt = obj.job_template
if not jt:
raise ParseError(_('Cannot relaunch slice workflow job orphaned from job template.'))
elif not obj.inventory or min(obj.inventory.hosts.count(), jt.job_slice_count) != obj.workflow_nodes.count():
raise ParseError(_('Cannot relaunch sliced workflow job after slice count has changed.'))
new_workflow_job = obj.create_relaunch_workflow_job()
new_workflow_job.signal_start()
data = serializers.WorkflowJobSerializer(new_workflow_job, context=self.get_serializer_context()).data
headers = {'Location': new_workflow_job.get_absolute_url(request=request)}
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
class WorkflowJobTemplateWorkflowNodesList(SubListCreateAPIView):
model = models.WorkflowJobTemplateNode
serializer_class = serializers.WorkflowJobTemplateNodeSerializer
parent_model = models.WorkflowJobTemplate
relationship = 'workflow_job_template_nodes'
parent_key = 'workflow_job_template'
search_fields = ('unified_job_template__name', 'unified_job_template__description')
def get_queryset(self):
return super(WorkflowJobTemplateWorkflowNodesList, self).get_queryset().order_by('id')
class WorkflowJobTemplateJobsList(SubListAPIView):
model = models.WorkflowJob
serializer_class = serializers.WorkflowJobListSerializer
parent_model = models.WorkflowJobTemplate
relationship = 'workflow_jobs'
parent_key = 'workflow_job_template'
class WorkflowJobTemplateSchedulesList(SubListCreateAPIView):
name = _("Workflow Job Template Schedules")
model = models.Schedule
serializer_class = serializers.ScheduleSerializer
parent_model = models.WorkflowJobTemplate
relationship = 'schedules'
parent_key = 'unified_job_template'
class WorkflowJobTemplateNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView):
model = models.NotificationTemplate
serializer_class = serializers.NotificationTemplateSerializer
parent_model = models.WorkflowJobTemplate
class WorkflowJobTemplateNotificationTemplatesStartedList(WorkflowJobTemplateNotificationTemplatesAnyList):
relationship = 'notification_templates_started'
class WorkflowJobTemplateNotificationTemplatesErrorList(WorkflowJobTemplateNotificationTemplatesAnyList):
relationship = 'notification_templates_error'
class WorkflowJobTemplateNotificationTemplatesSuccessList(WorkflowJobTemplateNotificationTemplatesAnyList):
relationship = 'notification_templates_success'
class WorkflowJobTemplateNotificationTemplatesApprovalList(WorkflowJobTemplateNotificationTemplatesAnyList):
relationship = 'notification_templates_approvals'
class WorkflowJobTemplateAccessList(ResourceAccessList):
model = models.User # needs to be User for AccessLists's
parent_model = models.WorkflowJobTemplate
class WorkflowJobTemplateObjectRolesList(SubListAPIView):
model = models.Role
serializer_class = serializers.RoleSerializer
parent_model = models.WorkflowJobTemplate
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return models.Role.objects.filter(content_type=content_type, object_id=po.pk)
class WorkflowJobTemplateActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.WorkflowJobTemplate
relationship = 'activitystream_set'
search_fields = ('changes',)
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(Q(workflow_job_template=parent) | Q(workflow_job_template_node__workflow_job_template=parent)).distinct()
class WorkflowJobList(ListAPIView):
model = models.WorkflowJob
serializer_class = serializers.WorkflowJobListSerializer
class WorkflowJobDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
model = models.WorkflowJob
serializer_class = serializers.WorkflowJobSerializer
class WorkflowJobWorkflowNodesList(SubListAPIView):
model = models.WorkflowJobNode
serializer_class = serializers.WorkflowJobNodeListSerializer
always_allow_superuser = True
parent_model = models.WorkflowJob
relationship = 'workflow_job_nodes'
parent_key = 'workflow_job'
search_fields = ('unified_job_template__name', 'unified_job_template__description')
def get_queryset(self):
return super(WorkflowJobWorkflowNodesList, self).get_queryset().order_by('id')
class WorkflowJobCancel(RetrieveAPIView):
model = models.WorkflowJob
obj_permission_type = 'cancel'
serializer_class = serializers.WorkflowJobCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
schedule_task_manager()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class WorkflowJobNotificationsList(SubListAPIView):
model = models.Notification
serializer_class = serializers.NotificationSerializer
parent_model = models.WorkflowJob
relationship = 'notifications'
search_fields = ('subject', 'notification_type', 'body')
def get_sublist_queryset(self, parent):
return self.model.objects.filter(
Q(unifiedjob_notifications=parent)
| Q(unifiedjob_notifications__unified_job_node__workflow_job=parent, unifiedjob_notifications__workflowapproval__isnull=False)
).distinct()
class WorkflowJobActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.WorkflowJob
relationship = 'activitystream_set'
search_fields = ('changes',)
class SystemJobTemplateList(ListAPIView):
model = models.SystemJobTemplate
serializer_class = serializers.SystemJobTemplateSerializer
def get(self, request, *args, **kwargs):
if not request.user.is_superuser and not request.user.is_system_auditor:
raise PermissionDenied(_("Superuser privileges needed."))
return super(SystemJobTemplateList, self).get(request, *args, **kwargs)
class SystemJobTemplateDetail(RetrieveAPIView):
model = models.SystemJobTemplate
serializer_class = serializers.SystemJobTemplateSerializer
class SystemJobTemplateLaunch(GenericAPIView):
model = models.SystemJobTemplate
obj_permission_type = 'start'
serializer_class = serializers.EmptySerializer
def get(self, request, *args, **kwargs):
return Response({})
def post(self, request, *args, **kwargs):
obj = self.get_object()
new_job = obj.create_unified_job(extra_vars=request.data.get('extra_vars', {}))
new_job.signal_start()
data = OrderedDict()
data['system_job'] = new_job.id
data.update(serializers.SystemJobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
headers = {'Location': new_job.get_absolute_url(request)}
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
class SystemJobTemplateSchedulesList(SubListCreateAPIView):
name = _("System Job Template Schedules")
model = models.Schedule
serializer_class = serializers.ScheduleSerializer
parent_model = models.SystemJobTemplate
relationship = 'schedules'
parent_key = 'unified_job_template'
class SystemJobTemplateJobsList(SubListAPIView):
model = models.SystemJob
serializer_class = serializers.SystemJobListSerializer
parent_model = models.SystemJobTemplate
relationship = 'jobs'
parent_key = 'system_job_template'
class SystemJobTemplateNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView):
model = models.NotificationTemplate
serializer_class = serializers.NotificationTemplateSerializer
parent_model = models.SystemJobTemplate
class SystemJobTemplateNotificationTemplatesStartedList(SystemJobTemplateNotificationTemplatesAnyList):
relationship = 'notification_templates_started'
class SystemJobTemplateNotificationTemplatesErrorList(SystemJobTemplateNotificationTemplatesAnyList):
relationship = 'notification_templates_error'
class SystemJobTemplateNotificationTemplatesSuccessList(SystemJobTemplateNotificationTemplatesAnyList):
relationship = 'notification_templates_success'
class JobList(ListAPIView):
model = models.Job
serializer_class = serializers.JobListSerializer
class JobDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
model = models.Job
serializer_class = serializers.JobDetailSerializer
def update(self, request, *args, **kwargs):
obj = self.get_object()
# Only allow changes (PUT/PATCH) when job status is "new".
if obj.status != 'new':
return self.http_method_not_allowed(request, *args, **kwargs)
return super(JobDetail, self).update(request, *args, **kwargs)
class JobCredentialsList(SubListAPIView):
model = models.Credential
serializer_class = serializers.CredentialSerializer
parent_model = models.Job
relationship = 'credentials'
class JobLabelList(SubListAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
parent_model = models.Job
relationship = 'labels'
parent_key = 'job'
class WorkflowJobLabelList(JobLabelList):
parent_model = models.WorkflowJob
class JobActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.Job
relationship = 'activitystream_set'
search_fields = ('changes',)
class JobCancel(RetrieveAPIView):
model = models.Job
obj_permission_type = 'cancel'
serializer_class = serializers.JobCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class JobRelaunch(RetrieveAPIView):
model = models.Job
obj_permission_type = 'start'
serializer_class = serializers.JobRelaunchSerializer
def update_raw_data(self, data):
data = super(JobRelaunch, self).update_raw_data(data)
try:
obj = self.get_object()
except PermissionDenied:
return data
if obj:
needed_passwords = obj.passwords_needed_to_start
if needed_passwords:
data['credential_passwords'] = {}
for p in needed_passwords:
data['credential_passwords'][p] = u''
else:
data.pop('credential_passwords', None)
return data
@transaction.non_atomic_requests
def dispatch(self, *args, **kwargs):
return super(JobRelaunch, self).dispatch(*args, **kwargs)
def check_object_permissions(self, request, obj):
if request.method == 'POST' and obj:
relaunch_perm, messages = request.user.can_access_with_errors(self.model, 'start', obj)
if not relaunch_perm and 'detail' in messages:
self.permission_denied(request, message=messages['detail'])
return super(JobRelaunch, self).check_object_permissions(request, obj)
def post(self, request, *args, **kwargs):
obj = self.get_object()
context = self.get_serializer_context()
modified_data = request.data.copy()
modified_data.setdefault('credential_passwords', {})
for password in obj.passwords_needed_to_start:
if password in modified_data:
modified_data['credential_passwords'][password] = modified_data[password]
# Note: is_valid() may modify request.data
# It will remove any key/value pair who's key is not in the 'passwords_needed_to_start' list
serializer = self.serializer_class(data=modified_data, context=context, instance=obj)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
copy_kwargs = {}
retry_hosts = serializer.validated_data.get('hosts', None)
if retry_hosts and retry_hosts != 'all':
if obj.status in ACTIVE_STATES:
return Response(
{'hosts': _('Wait until job finishes before retrying on {status_value} hosts.').format(status_value=retry_hosts)},
status=status.HTTP_400_BAD_REQUEST,
)
host_qs = obj.retry_qs(retry_hosts)
if not obj.job_events.filter(event='playbook_on_stats').exists():
return Response(
{'hosts': _('Cannot retry on {status_value} hosts, playbook stats not available.').format(status_value=retry_hosts)},
status=status.HTTP_400_BAD_REQUEST,
)
retry_host_list = host_qs.values_list('name', flat=True)
if len(retry_host_list) == 0:
return Response(
{'hosts': _('Cannot relaunch because previous job had 0 {status_value} hosts.').format(status_value=retry_hosts)},
status=status.HTTP_400_BAD_REQUEST,
)
copy_kwargs['limit'] = ','.join(retry_host_list)
new_job = obj.copy_unified_job(**copy_kwargs)
result = new_job.signal_start(**serializer.validated_data['credential_passwords'])
if not result:
data = dict(msg=_('Error starting job!'))
new_job.delete()
return Response(data, status=status.HTTP_400_BAD_REQUEST)
else:
data = serializers.JobSerializer(new_job, context=context).data
# Add job key to match what old relaunch returned.
data['job'] = new_job.id
headers = {'Location': new_job.get_absolute_url(request=request)}
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
class JobCreateSchedule(RetrieveAPIView):
model = models.Job
obj_permission_type = 'start'
serializer_class = serializers.JobCreateScheduleSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if not obj.can_schedule:
if getattr(obj, 'passwords_needed_to_start', None):
return Response({"error": _('Cannot create schedule because job requires credential passwords.')}, status=status.HTTP_400_BAD_REQUEST)
try:
obj.launch_config
except ObjectDoesNotExist:
return Response({"error": _('Cannot create schedule because job was launched by legacy method.')}, status=status.HTTP_400_BAD_REQUEST)
return Response({"error": _('Cannot create schedule because a related resource is missing.')}, status=status.HTTP_400_BAD_REQUEST)
config = obj.launch_config
# Make up a name for the schedule, guarentee that it is unique
name = 'Auto-generated schedule from job {}'.format(obj.id)
existing_names = models.Schedule.objects.filter(name__startswith=name).values_list('name', flat=True)
if name in existing_names:
idx = 1
alt_name = '{} - number {}'.format(name, idx)
while alt_name in existing_names:
idx += 1
alt_name = '{} - number {}'.format(name, idx)
name = alt_name
schedule_data = dict(
name=name,
unified_job_template=obj.unified_job_template,
enabled=False,
rrule='{}Z RRULE:FREQ=MONTHLY;INTERVAL=1'.format(now().strftime('DTSTART:%Y%m%dT%H%M%S')),
extra_data=config.extra_data,
survey_passwords=config.survey_passwords,
inventory=config.inventory,
char_prompts=config.char_prompts,
credentials=set(config.credentials.all()),
)
if not request.user.can_access(models.Schedule, 'add', schedule_data):
raise PermissionDenied()
creds_list = schedule_data.pop('credentials')
schedule = models.Schedule.objects.create(**schedule_data)
schedule.credentials.add(*creds_list)
data = serializers.ScheduleSerializer(schedule, context=self.get_serializer_context()).data
data.serializer.instance = None # hack to avoid permissions.py assuming this is Job model
headers = {'Location': schedule.get_absolute_url(request=request)}
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
class JobNotificationsList(SubListAPIView):
model = models.Notification
serializer_class = serializers.NotificationSerializer
parent_model = models.Job
relationship = 'notifications'
search_fields = ('subject', 'notification_type', 'body')
class BaseJobHostSummariesList(SubListAPIView):
model = models.JobHostSummary
serializer_class = serializers.JobHostSummarySerializer
parent_model = None # Subclasses must define this attribute.
relationship = 'job_host_summaries'
name = _('Job Host Summaries List')
search_fields = ('host_name',)
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).select_related('job', 'job__job_template', 'host')
class HostJobHostSummariesList(BaseJobHostSummariesList):
parent_model = models.Host
class GroupJobHostSummariesList(BaseJobHostSummariesList):
parent_model = models.Group
class JobJobHostSummariesList(BaseJobHostSummariesList):
parent_model = models.Job
class JobHostSummaryDetail(RetrieveAPIView):
model = models.JobHostSummary
serializer_class = serializers.JobHostSummarySerializer
class JobEventList(NoTruncateMixin, ListAPIView):
model = models.JobEvent
serializer_class = serializers.JobEventSerializer
search_fields = ('stdout',)
class JobEventDetail(RetrieveAPIView):
model = models.JobEvent
serializer_class = serializers.JobEventSerializer
def get_serializer_context(self):
context = super().get_serializer_context()
context.update(no_truncate=True)
return context
class JobEventChildrenList(NoTruncateMixin, SubListAPIView):
model = models.JobEvent
serializer_class = serializers.JobEventSerializer
parent_model = models.JobEvent
relationship = 'children'
name = _('Job Event Children List')
search_fields = ('stdout',)
def get_queryset(self):
parent_event = self.get_parent_object()
self.check_parent_access(parent_event)
qs = self.request.user.get_queryset(self.model).filter(parent_uuid=parent_event.uuid)
return qs
class JobEventHostsList(HostRelatedSearchMixin, SubListAPIView):
model = models.Host
serializer_class = serializers.HostSerializer
parent_model = models.JobEvent
relationship = 'hosts'
name = _('Job Event Hosts List')
def get_queryset(self):
parent_event = self.get_parent_object()
self.check_parent_access(parent_event)
qs = self.request.user.get_queryset(self.model).filter(job_events_as_primary_host=parent_event)
return qs
class BaseJobEventsList(NoTruncateMixin, SubListAPIView):
model = models.JobEvent
serializer_class = serializers.JobEventSerializer
parent_model = None # Subclasses must define this attribute.
relationship = 'job_events'
name = _('Job Events List')
search_fields = ('stdout',)
def finalize_response(self, request, response, *args, **kwargs):
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
return super(BaseJobEventsList, self).finalize_response(request, response, *args, **kwargs)
class HostJobEventsList(BaseJobEventsList):
parent_model = models.Host
def get_queryset(self):
parent_obj = self.get_parent_object()
self.check_parent_access(parent_obj)
qs = self.request.user.get_queryset(self.model).filter(host=parent_obj)
return qs
class GroupJobEventsList(BaseJobEventsList):
parent_model = models.Group
class JobJobEventsList(BaseJobEventsList):
parent_model = models.Job
def get_queryset(self):
job = self.get_parent_object()
self.check_parent_access(job)
qs = job.job_events.select_related('host').order_by('start_line')
return qs.all()
class AdHocCommandList(ListCreateAPIView):
model = models.AdHocCommand
serializer_class = serializers.AdHocCommandListSerializer
always_allow_superuser = False
@transaction.non_atomic_requests
def dispatch(self, *args, **kwargs):
return super(AdHocCommandList, self).dispatch(*args, **kwargs)
def update_raw_data(self, data):
# Hide inventory and limit fields from raw data, since they will be set
# automatically by sub list create view.
parent_model = getattr(self, 'parent_model', None)
if parent_model in (models.Host, models.Group):
data.pop('inventory', None)
data.pop('limit', None)
return super(AdHocCommandList, self).update_raw_data(data)
def create(self, request, *args, **kwargs):
# Inject inventory ID and limit if parent objects is a host/group.
if hasattr(self, 'get_parent_object') and not getattr(self, 'parent_key', None):
data = request.data
# HACK: Make request data mutable.
if getattr(data, '_mutable', None) is False:
data._mutable = True
parent_obj = self.get_parent_object()
if isinstance(parent_obj, (models.Host, models.Group)):
data['inventory'] = parent_obj.inventory_id
data['limit'] = parent_obj.name
# Check for passwords needed before creating ad hoc command.
credential_pk = get_pk_from_dict(request.data, 'credential')
if credential_pk:
credential = get_object_or_400(models.Credential, pk=credential_pk)
needed = credential.passwords_needed
provided = dict([(field, request.data.get(field, '')) for field in needed])
if not all(provided.values()):
data = dict(passwords_needed_to_start=needed)
return Response(data, status=status.HTTP_400_BAD_REQUEST)
response = super(AdHocCommandList, self).create(request, *args, **kwargs)
if response.status_code != status.HTTP_201_CREATED:
return response
# Start ad hoc command running when created.
ad_hoc_command = get_object_or_400(self.model, pk=response.data['id'])
result = ad_hoc_command.signal_start(**request.data)
if not result:
data = dict(passwords_needed_to_start=ad_hoc_command.passwords_needed_to_start)
ad_hoc_command.delete()
return Response(data, status=status.HTTP_400_BAD_REQUEST)
return response
class InventoryAdHocCommandsList(AdHocCommandList, SubListCreateAPIView):
parent_model = models.Inventory
relationship = 'ad_hoc_commands'
parent_key = 'inventory'
class GroupAdHocCommandsList(AdHocCommandList, SubListCreateAPIView):
parent_model = models.Group
relationship = 'ad_hoc_commands'
class HostAdHocCommandsList(AdHocCommandList, SubListCreateAPIView):
parent_model = models.Host
relationship = 'ad_hoc_commands'
class AdHocCommandDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
model = models.AdHocCommand
serializer_class = serializers.AdHocCommandDetailSerializer
class AdHocCommandCancel(RetrieveAPIView):
model = models.AdHocCommand
obj_permission_type = 'cancel'
serializer_class = serializers.AdHocCommandCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class AdHocCommandRelaunch(GenericAPIView):
model = models.AdHocCommand
obj_permission_type = 'start'
serializer_class = serializers.AdHocCommandRelaunchSerializer
# FIXME: Figure out why OPTIONS request still shows all fields.
@transaction.non_atomic_requests
def dispatch(self, *args, **kwargs):
return super(AdHocCommandRelaunch, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
obj = self.get_object()
data = dict(passwords_needed_to_start=obj.passwords_needed_to_start)
return Response(data)
def post(self, request, *args, **kwargs):
obj = self.get_object()
# Re-validate ad hoc command against serializer to check if module is
# still allowed.
data = {}
for field in ('job_type', 'inventory_id', 'limit', 'credential_id', 'module_name', 'module_args', 'forks', 'verbosity', 'extra_vars', 'become_enabled'):
if field.endswith('_id'):
data[field[:-3]] = getattr(obj, field)
else:
data[field] = getattr(obj, field)
serializer = serializers.AdHocCommandSerializer(data=data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Check for passwords needed before copying ad hoc command.
needed = obj.passwords_needed_to_start
provided = dict([(field, request.data.get(field, '')) for field in needed])
if not all(provided.values()):
data = dict(passwords_needed_to_start=needed)
return Response(data, status=status.HTTP_400_BAD_REQUEST)
# Copy and start the new ad hoc command.
new_ad_hoc_command = obj.copy()
result = new_ad_hoc_command.signal_start(**request.data)
if not result:
data = dict(passwords_needed_to_start=new_ad_hoc_command.passwords_needed_to_start)
new_ad_hoc_command.delete()
return Response(data, status=status.HTTP_400_BAD_REQUEST)
else:
data = serializers.AdHocCommandSerializer(new_ad_hoc_command, context=self.get_serializer_context()).data
# Add ad_hoc_command key to match what was previously returned.
data['ad_hoc_command'] = new_ad_hoc_command.id
headers = {'Location': new_ad_hoc_command.get_absolute_url(request=request)}
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
class AdHocCommandEventList(NoTruncateMixin, ListAPIView):
model = models.AdHocCommandEvent
serializer_class = serializers.AdHocCommandEventSerializer
search_fields = ('stdout',)
class AdHocCommandEventDetail(RetrieveAPIView):
model = models.AdHocCommandEvent
serializer_class = serializers.AdHocCommandEventSerializer
def get_serializer_context(self):
context = super().get_serializer_context()
context.update(no_truncate=True)
return context
class BaseAdHocCommandEventsList(NoTruncateMixin, SubListAPIView):
model = models.AdHocCommandEvent
serializer_class = serializers.AdHocCommandEventSerializer
parent_model = None # Subclasses must define this attribute.
relationship = 'ad_hoc_command_events'
name = _('Ad Hoc Command Events List')
search_fields = ('stdout',)
class HostAdHocCommandEventsList(BaseAdHocCommandEventsList):
parent_model = models.Host
# class GroupJobEventsList(BaseJobEventsList):
# parent_model = Group
class AdHocCommandAdHocCommandEventsList(BaseAdHocCommandEventsList):
parent_model = models.AdHocCommand
class AdHocCommandActivityStreamList(SubListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
parent_model = models.AdHocCommand
relationship = 'activitystream_set'
search_fields = ('changes',)
class AdHocCommandNotificationsList(SubListAPIView):
model = models.Notification
serializer_class = serializers.NotificationSerializer
parent_model = models.AdHocCommand
relationship = 'notifications'
search_fields = ('subject', 'notification_type', 'body')
class SystemJobList(ListAPIView):
model = models.SystemJob
serializer_class = serializers.SystemJobListSerializer
def get(self, request, *args, **kwargs):
if not request.user.is_superuser and not request.user.is_system_auditor:
raise PermissionDenied(_("Superuser privileges needed."))
return super(SystemJobList, self).get(request, *args, **kwargs)
class SystemJobDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
model = models.SystemJob
serializer_class = serializers.SystemJobSerializer
class SystemJobCancel(RetrieveAPIView):
model = models.SystemJob
obj_permission_type = 'cancel'
serializer_class = serializers.SystemJobCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class SystemJobNotificationsList(SubListAPIView):
model = models.Notification
serializer_class = serializers.NotificationSerializer
parent_model = models.SystemJob
relationship = 'notifications'
search_fields = ('subject', 'notification_type', 'body')
class UnifiedJobTemplateList(ListAPIView):
model = models.UnifiedJobTemplate
serializer_class = serializers.UnifiedJobTemplateSerializer
search_fields = ('description', 'name', 'jobtemplate__playbook')
class UnifiedJobList(ListAPIView):
model = models.UnifiedJob
serializer_class = serializers.UnifiedJobListSerializer
search_fields = ('description', 'name', 'job__playbook')
def redact_ansi(line):
# Remove ANSI escape sequences used to embed event data.
line = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', line)
# Remove ANSI color escape sequences.
return re.sub(r'\x1b[^m]*m', '', line)
class StdoutFilter(object):
def __init__(self, fileobj):
self._functions = []
self.fileobj = fileobj
self.extra_data = ''
if hasattr(fileobj, 'close'):
self.close = fileobj.close
def read(self, size=-1):
data = self.extra_data
while size > 0 and len(data) < size:
line = self.fileobj.readline(size)
if not line:
break
line = self.process_line(line)
data += line
if size > 0 and len(data) > size:
self.extra_data = data[size:]
data = data[:size]
else:
self.extra_data = ''
return data
def register(self, func):
self._functions.append(func)
def process_line(self, line):
for func in self._functions:
line = func(line)
return line
class UnifiedJobStdout(RetrieveAPIView):
authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES
serializer_class = serializers.UnifiedJobStdoutSerializer
renderer_classes = [
renderers.BrowsableAPIRenderer,
StaticHTMLRenderer,
renderers.PlainTextRenderer,
renderers.AnsiTextRenderer,
JSONRenderer,
renderers.DownloadTextRenderer,
renderers.AnsiDownloadRenderer,
]
filter_backends = ()
def retrieve(self, request, *args, **kwargs):
unified_job = self.get_object()
try:
target_format = request.accepted_renderer.format
if target_format in ('html', 'api', 'json'):
content_encoding = request.query_params.get('content_encoding', None)
start_line = request.query_params.get('start_line', 0)
end_line = request.query_params.get('end_line', None)
dark_val = request.query_params.get('dark', '')
dark = bool(dark_val and dark_val[0].lower() in ('1', 't', 'y'))
content_only = bool(target_format in ('api', 'json'))
dark_bg = (content_only and dark) or (not content_only and (dark or not dark_val))
content, start, end, absolute_end = unified_job.result_stdout_raw_limited(start_line, end_line)
# Remove any ANSI escape sequences containing job event data.
content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content)
body = ansiconv.to_html(html.escape(content))
context = {'title': get_view_name(self.__class__), 'body': mark_safe(body), 'dark': dark_bg, 'content_only': content_only}
data = render_to_string('api/stdout.html', context).strip()
if target_format == 'api':
return Response(mark_safe(data))
if target_format == 'json':
content = content.encode('utf-8')
if content_encoding == 'base64':
content = b64encode(content)
return Response({'range': {'start': start, 'end': end, 'absolute_end': absolute_end}, 'content': content})
return Response(data)
elif target_format == 'txt':
return Response(unified_job.result_stdout)
elif target_format == 'ansi':
return Response(unified_job.result_stdout_raw)
elif target_format in {'txt_download', 'ansi_download'}:
filename = '{type}_{pk}{suffix}.txt'.format(
type=camelcase_to_underscore(unified_job.__class__.__name__), pk=unified_job.id, suffix='.ansi' if target_format == 'ansi_download' else ''
)
content_fd = unified_job.result_stdout_raw_handle(enforce_max_bytes=False)
redactor = StdoutFilter(content_fd)
if target_format == 'txt_download':
redactor.register(redact_ansi)
if type(unified_job) == models.ProjectUpdate:
redactor.register(UriCleaner.remove_sensitive)
response = HttpResponse(FileWrapper(redactor), content_type='text/plain')
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
else:
return super(UnifiedJobStdout, self).retrieve(request, *args, **kwargs)
except models.StdoutMaxBytesExceeded as e:
response_message = _(
"Standard Output too large to display ({text_size} bytes), " "only download supported for sizes over {supported_size} bytes."
).format(text_size=e.total, supported_size=e.supported)
if request.accepted_renderer.format == 'json':
return Response({'range': {'start': 0, 'end': 1, 'absolute_end': 1}, 'content': response_message})
else:
return Response(response_message)
class ProjectUpdateStdout(UnifiedJobStdout):
model = models.ProjectUpdate
class InventoryUpdateStdout(UnifiedJobStdout):
model = models.InventoryUpdate
class JobStdout(UnifiedJobStdout):
model = models.Job
class AdHocCommandStdout(UnifiedJobStdout):
model = models.AdHocCommand
class NotificationTemplateList(ListCreateAPIView):
model = models.NotificationTemplate
serializer_class = serializers.NotificationTemplateSerializer
class NotificationTemplateDetail(RetrieveUpdateDestroyAPIView):
model = models.NotificationTemplate
serializer_class = serializers.NotificationTemplateSerializer
def delete(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'delete', obj):
return Response(status=status.HTTP_404_NOT_FOUND)
hours_old = now() - dateutil.relativedelta.relativedelta(hours=8)
if obj.notifications.filter(status='pending', created__gt=hours_old).exists():
return Response({"error": _("Delete not allowed while there are pending notifications")}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
return super(NotificationTemplateDetail, self).delete(request, *args, **kwargs)
class NotificationTemplateTest(GenericAPIView):
'''Test a Notification Template'''
name = _('Notification Template Test')
model = models.NotificationTemplate
obj_permission_type = 'start'
serializer_class = serializers.EmptySerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
msg = "Tower Notification Test {} {}".format(obj.id, settings.TOWER_URL_BASE)
if obj.notification_type in ('email', 'pagerduty'):
body = "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)
elif obj.notification_type in ('webhook', 'grafana'):
body = '{{"body": "Ansible Tower Test Notification {} {}"}}'.format(obj.id, settings.TOWER_URL_BASE)
else:
body = {"body": "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)}
notification = obj.generate_notification(msg, body)
if not notification:
return Response({}, status=status.HTTP_400_BAD_REQUEST)
else:
connection.on_commit(lambda: send_notifications.delay([notification.id]))
data = OrderedDict()
data['notification'] = notification.id
data.update(serializers.NotificationSerializer(notification, context=self.get_serializer_context()).to_representation(notification))
headers = {'Location': notification.get_absolute_url(request=request)}
return Response(data, headers=headers, status=status.HTTP_202_ACCEPTED)
class NotificationTemplateNotificationList(SubListAPIView):
model = models.Notification
serializer_class = serializers.NotificationSerializer
parent_model = models.NotificationTemplate
relationship = 'notifications'
parent_key = 'notification_template'
search_fields = ('subject', 'notification_type', 'body')
class NotificationTemplateCopy(CopyAPIView):
model = models.NotificationTemplate
copy_return_serializer_class = serializers.NotificationTemplateSerializer
class NotificationList(ListAPIView):
model = models.Notification
serializer_class = serializers.NotificationSerializer
search_fields = ('subject', 'notification_type', 'body')
class NotificationDetail(RetrieveAPIView):
model = models.Notification
serializer_class = serializers.NotificationSerializer
class LabelList(ListCreateAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
class LabelDetail(RetrieveUpdateAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
class ActivityStreamList(SimpleListAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
search_fields = ('changes',)
class ActivityStreamDetail(RetrieveAPIView):
model = models.ActivityStream
serializer_class = serializers.ActivityStreamSerializer
class RoleList(ListAPIView):
model = models.Role
serializer_class = serializers.RoleSerializer
permission_classes = (IsAuthenticated,)
search_fields = ('role_field', 'content_type__model')
class RoleDetail(RetrieveAPIView):
model = models.Role
serializer_class = serializers.RoleSerializer
class RoleUsersList(SubListAttachDetachAPIView):
model = models.User
serializer_class = serializers.UserSerializer
parent_model = models.Role
relationship = 'members'
ordering = ('username',)
def get_queryset(self):
role = self.get_parent_object()
self.check_parent_access(role)
return role.members.all()
def post(self, request, *args, **kwargs):
# Forbid implicit user creation here
sub_id = request.data.get('id', None)
if not sub_id:
return super(RoleUsersList, self).post(request)
user = get_object_or_400(models.User, pk=sub_id)
role = self.get_parent_object()
credential_content_type = ContentType.objects.get_for_model(models.Credential)
if role.content_type == credential_content_type:
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
if not role.content_object.organization and not request.user.is_superuser:
data = dict(msg=_("You cannot grant private credential access to another user"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
return super(RoleUsersList, self).post(request, *args, **kwargs)
class RoleTeamsList(SubListAttachDetachAPIView):
model = models.Team
serializer_class = serializers.TeamSerializer
parent_model = models.Role
relationship = 'member_role.parents'
permission_classes = (IsAuthenticated,)
def get_queryset(self):
role = self.get_parent_object()
self.check_parent_access(role)
return models.Team.objects.filter(member_role__children=role)
def post(self, request, pk, *args, **kwargs):
sub_id = request.data.get('id', None)
if not sub_id:
return super(RoleTeamsList, self).post(request)
team = get_object_or_400(models.Team, pk=sub_id)
role = models.Role.objects.get(pk=self.kwargs['pk'])
organization_content_type = ContentType.objects.get_for_model(models.Organization)
if role.content_type == organization_content_type and role.role_field in ['member_role', 'admin_role']:
data = dict(msg=_("You cannot assign an Organization participation role as a child role for a Team."))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
credential_content_type = ContentType.objects.get_for_model(models.Credential)
if role.content_type == credential_content_type:
if not role.content_object.organization or role.content_object.organization.id != team.organization.id:
data = dict(msg=_("You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
action = 'attach'
if request.data.get('disassociate', None):
action = 'unattach'
if role.is_singleton() and action == 'attach':
data = dict(msg=_("You cannot grant system-level permissions to a team."))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
if not request.user.can_access(self.parent_model, action, role, team, self.relationship, request.data, skip_sub_obj_read_check=False):
raise PermissionDenied()
if request.data.get('disassociate', None):
team.member_role.children.remove(role)
else:
team.member_role.children.add(role)
return Response(status=status.HTTP_204_NO_CONTENT)
class RoleParentsList(SubListAPIView):
model = models.Role
serializer_class = serializers.RoleSerializer
parent_model = models.Role
relationship = 'parents'
permission_classes = (IsAuthenticated,)
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
role = models.Role.objects.get(pk=self.kwargs['pk'])
return models.Role.filter_visible_roles(self.request.user, role.parents.all())
class RoleChildrenList(SubListAPIView):
model = models.Role
serializer_class = serializers.RoleSerializer
parent_model = models.Role
relationship = 'children'
permission_classes = (IsAuthenticated,)
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
role = models.Role.objects.get(pk=self.kwargs['pk'])
return models.Role.filter_visible_roles(self.request.user, role.children.all())
# Create view functions for all of the class-based views to simplify inclusion
# in URL patterns and reverse URL lookups, converting CamelCase names to
# lowercase_with_underscore (e.g. MyView.as_view() becomes my_view).
this_module = sys.modules[__name__]
for attr, value in list(locals().items()):
if isinstance(value, type) and issubclass(value, APIView):
name = camelcase_to_underscore(attr)
view = value.as_view()
setattr(this_module, name, view)
class WorkflowApprovalTemplateDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = models.WorkflowApprovalTemplate
serializer_class = serializers.WorkflowApprovalTemplateSerializer
class WorkflowApprovalTemplateJobsList(SubListAPIView):
model = models.WorkflowApproval
serializer_class = serializers.WorkflowApprovalListSerializer
parent_model = models.WorkflowApprovalTemplate
relationship = 'approvals'
parent_key = 'workflow_approval_template'
class WorkflowApprovalList(ListCreateAPIView):
model = models.WorkflowApproval
serializer_class = serializers.WorkflowApprovalListSerializer
def get(self, request, *args, **kwargs):
return super(WorkflowApprovalList, self).get(request, *args, **kwargs)
class WorkflowApprovalDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
model = models.WorkflowApproval
serializer_class = serializers.WorkflowApprovalSerializer
class WorkflowApprovalApprove(RetrieveAPIView):
model = models.WorkflowApproval
serializer_class = serializers.WorkflowApprovalViewSerializer
permission_classes = (WorkflowApprovalPermission,)
def post(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(models.WorkflowApproval, 'approve_or_deny', obj):
raise PermissionDenied(detail=_("User does not have permission to approve or deny this workflow."))
if obj.status != 'pending':
return Response({"error": _("This workflow step has already been approved or denied.")}, status=status.HTTP_400_BAD_REQUEST)
obj.approve(request)
return Response(status=status.HTTP_204_NO_CONTENT)
class WorkflowApprovalDeny(RetrieveAPIView):
model = models.WorkflowApproval
serializer_class = serializers.WorkflowApprovalViewSerializer
permission_classes = (WorkflowApprovalPermission,)
def post(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(models.WorkflowApproval, 'approve_or_deny', obj):
raise PermissionDenied(detail=_("User does not have permission to approve or deny this workflow."))
if obj.status != 'pending':
return Response({"error": _("This workflow step has already been approved or denied.")}, status=status.HTTP_400_BAD_REQUEST)
obj.deny(request)
return Response(status=status.HTTP_204_NO_CONTENT)
|
StarcoderdataPython
|
3301729
|
<reponame>yuriy-logosha/myutils
from subprocess import Popen, PIPE
def run(scr):
try:
command = ['osascript', '-e %s' % scr]
lines = []
with Popen(command, stdout=PIPE, universal_newlines=True) as process:
for line in process.stdout:
lines.append(line)
return lines
except Exception as e:
return e
|
StarcoderdataPython
|
1689794
|
<filename>pelita/utils/debug.py<gh_stars>0
# -*- coding: utf-8 -*-
"""Various helper methods."""
import threading
import logging
from pelita.utils import SuspendableThread
_logger = logging.getLogger("pelita.utils")
_logger.setLevel(logging.DEBUG)
__docformat__ = "restructuredtext"
class ThreadInfoLogger(SuspendableThread):
def __init__(self, interval, show_threads=True):
super(ThreadInfoLogger, self).__init__()
self.lvl = logging.DEBUG
self.interval = interval
self.show_threads = show_threads
self.thread.daemon = True
self._wait = threading.Event()
def _run(self):
self._wait.wait(self.interval)
_logger.log(self.lvl, "%d threads alive (including this logger)" % threading.active_count())
if self.show_threads:
_logger.log(self.lvl, ", ".join(str(t) for t in threading.enumerate()))
|
StarcoderdataPython
|
3220792
|
# 数据库连接配置(请将本文件修改后重命名为config.py)
db_host = "数据库地址"
db_user = "数据库用户名"
db_passwd = "<PASSWORD>"
db_dbname = '数据库名'
|
StarcoderdataPython
|
43393
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reach force torque sensor arm element used for configuration."""
import dataclasses
from pyreach.gyms import reach_element
@dataclasses.dataclass(frozen=True)
class ReachForceTorqueSensor(reach_element.ReachElement):
"""Represents a Reach Force Torque Sensor configuration.
Attributes:
reach_name: The underlying Reach device type name of the force torque
sensor. May be empty.
is_synchronous: If True, the next Gym observation will synchronize all
observations element that have this flag set otherwise the next
observation is asynchronous. This argument is optional and defaults to
False.
"""
is_synchronous: bool = False
|
StarcoderdataPython
|
173067
|
<gh_stars>10-100
"""The type file for the collection Lab.
moved out of __init.py__ in order to have lab specific acl
that allows a lab member to edit their lab info at any time
"""
from pyramid.security import (
Allow,
Deny,
Everyone,
)
from base64 import b64encode
from snovault import (
collection,
load_schema,
calculated_property
)
from .base import (
Item
)
ONLY_ADMIN_VIEW = [
(Allow, 'group.admin', ['view', 'edit']),
(Allow, 'group.read-only-admin', ['view']),
(Allow, 'remoteuser.INDEXER', ['view']),
(Allow, 'remoteuser.EMBED', ['view']),
(Deny, Everyone, ['view', 'edit'])
]
SUBMITTER_CREATE = []
ALLOW_EVERYONE_VIEW = [
(Allow, Everyone, 'view'),
]
ALLOW_EVERYONE_VIEW_AND_SUBMITTER_EDIT = [
(Allow, Everyone, 'view'),
(Allow, 'role.lab_submitter', 'edit'),
] + ONLY_ADMIN_VIEW
def _build_lab_embedded_list():
""" Helper function intended to be used to create the embedded list for lab.
All types should implement a function like this going forward.
"""
return Item.embedded_list + [
# Award linkTo
'awards.project',
'awards.name',
'awards.pi.last_name',
'awards.center_title'
]
@collection(
name='labs',
unique_key='lab:name',
properties={
'title': 'Labs',
'description': 'Listing of 4D Nucleome labs',
})
class Lab(Item):
"""Lab class."""
item_type = 'lab'
schema = load_schema('encoded:schemas/lab.json')
name_key = 'name'
embedded_list = _build_lab_embedded_list()
STATUS_ACL = {
'current': ALLOW_EVERYONE_VIEW_AND_SUBMITTER_EDIT,
'deleted': ONLY_ADMIN_VIEW,
'revoked': ALLOW_EVERYONE_VIEW,
'inactive': ALLOW_EVERYONE_VIEW,
}
@calculated_property(schema={
"title": "Correspondence",
"description": "Point of contact(s) for this Lab.",
"type": "array",
"uniqueItems": True,
"items": {
"title": "Lab Contact - Public Snippet",
"description": "A User associated with the lab who is also a point of contact.",
"type": "object",
"additionalProperties": False,
"properties": {
"display_title": {
"type": "string"
},
"contact_email": {
"type": "string",
"format": "email"
},
"@id": {
"type": "string"
}
}
}
})
def correspondence(self, request, pi=None, contact_persons=None):
"""
Definitive list of users (linkTo User) who are designated as point of contact(s) for this Lab.
Returns:
List of @IDs which refer to either PI or alternate list of contacts defined in `contact_persons`.
"""
contact_people = None
if contact_persons:
contact_people = contact_persons
elif pi:
contact_people = [pi]
def fetch_and_pick_embedded_properties(person_at_id):
'''Clear out some properties from person'''
try:
person = request.embed(person_at_id, '@@object')
except Exception:
return None
encoded_email = b64encode(person['contact_email'].encode('utf-8')).decode('utf-8') if person.get('contact_email') else None
return {
"contact_email": encoded_email, # Security against web scrapers
"@id": person.get('@id'),
"display_title": person.get('display_title')
}
if contact_people is not None:
contact_people_dicts = [ fetch_and_pick_embedded_properties(person) for person in contact_people ]
return [ person for person in contact_people_dicts if person is not None ]
def __init__(self, registry, models):
super().__init__(registry, models)
if hasattr(self, 'STATUS_ACL'):
self.STATUS_ACL.update(self.__class__.STATUS_ACL)
else:
self.STATUS_ACL = self.__class__.STATUS_ACL
def __ac_local_roles__(self):
"""This creates roles that the lab item needs so it can be edited & viewed"""
roles = {}
lab_submitters = 'submits_for.%s' % self.uuid
roles[lab_submitters] = 'role.lab_submitter'
lab_member = 'lab.%s' % self.uuid
roles[lab_member] = 'role.lab_member'
return roles
|
StarcoderdataPython
|
4838162
|
from .alipay import alipay
|
StarcoderdataPython
|
1633474
|
<reponame>Alecto3-D/testable-greeter<gh_stars>1-10
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import json
import os
import stat
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot.interfaces import WorkerTooOldError
from buildbot.process import remotecommand
from buildbot.process import remotetransfer
from buildbot.process.buildstep import FAILURE
from buildbot.process.buildstep import SKIPPED
from buildbot.process.buildstep import SUCCESS
from buildbot.process.buildstep import BuildStep
from buildbot.steps.worker import CompositeStepMixin
from buildbot.util import flatten
from buildbot.util.eventual import eventually
from buildbot.worker_transition import WorkerAPICompatMixin
from buildbot.worker_transition import reportDeprecatedWorkerNameUsage
def makeStatusRemoteCommand(step, remote_command, args):
self = remotecommand.RemoteCommand(
remote_command, args, decodeRC={None: SUCCESS, 0: SUCCESS})
self.useLogDelayed('stdio', lambda arg: step.step_status.addLog('stdio'), True)
return self
class _TransferBuildStep(BuildStep):
"""
Base class for FileUpload and FileDownload to factor out common
functionality.
"""
renderables = ['workdir']
haltOnFailure = True
flunkOnFailure = True
def __init__(self, workdir=None, **buildstep_kwargs):
BuildStep.__init__(self, **buildstep_kwargs)
self.workdir = workdir
def runTransferCommand(self, cmd, writer=None):
# Run a transfer step, add a callback to extract the command status,
# add an error handler that cancels the writer.
self.cmd = cmd
d = self.runCommand(cmd)
@d.addCallback
def checkResult(_):
if writer and cmd.didFail():
writer.cancel()
return FAILURE if cmd.didFail() else SUCCESS
@d.addErrback
def cancel(res):
if writer:
writer.cancel()
return res
return d
def interrupt(self, reason):
self.addCompleteLog('interrupt', str(reason))
if self.cmd:
d = self.cmd.interrupt(reason)
return d
class FileUpload(_TransferBuildStep, WorkerAPICompatMixin):
name = 'upload'
renderables = ['workersrc', 'masterdest', 'url']
def __init__(self, workersrc=None, masterdest=None,
workdir=None, maxsize=None, blocksize=16 * 1024, mode=None,
keepstamp=False, url=None, urlText=None,
slavesrc=None, # deprecated, use `workersrc` instead
**buildstep_kwargs):
# Deprecated API support.
if slavesrc is not None:
reportDeprecatedWorkerNameUsage(
"'slavesrc' keyword argument is deprecated, "
"use 'workersrc' instead")
assert workersrc is None
workersrc = slavesrc
# Emulate that first two arguments are positional.
if workersrc is None or masterdest is None:
raise TypeError("__init__() takes at least 3 arguments")
_TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs)
self.workersrc = workersrc
self._registerOldWorkerAttr("workersrc")
self.masterdest = masterdest
self.maxsize = maxsize
self.blocksize = blocksize
if not isinstance(mode, (int, type(None))):
config.error(
'mode must be an integer or None')
self.mode = mode
self.keepstamp = keepstamp
self.url = url
self.urlText = urlText
def finished(self, results):
log.msg("File '{}' upload finished with results {}".format(
os.path.basename(self.workersrc), str(results)))
self.step_status.setText(self.descriptionDone)
_TransferBuildStep.finished(self, results)
def start(self):
self.checkWorkerHasCommand("uploadFile")
source = self.workersrc
masterdest = self.masterdest
# we rely upon the fact that the buildmaster runs chdir'ed into its
# basedir to make sure that relative paths in masterdest are expanded
# properly. TODO: maybe pass the master's basedir all the way down
# into the BuildStep so we can do this better.
masterdest = os.path.expanduser(masterdest)
log.msg("FileUpload started, from worker %r to master %r"
% (source, masterdest))
if self.description is None:
self.description = ['uploading %s' % (os.path.basename(source))]
if self.descriptionDone is None:
self.descriptionDone = self.description
if self.url is not None:
urlText = self.urlText
if urlText is None:
urlText = os.path.basename(masterdest)
self.addURL(urlText, self.url)
self.step_status.setText(self.description)
# we use maxsize to limit the amount of data on both sides
fileWriter = remotetransfer.FileWriter(
masterdest, self.maxsize, self.mode)
if self.keepstamp and self.workerVersionIsOlderThan("uploadFile", "2.13"):
m = ("This worker (%s) does not support preserving timestamps. "
"Please upgrade the worker." % self.build.workername)
raise WorkerTooOldError(m)
# default arguments
args = {
'workdir': self.workdir,
'writer': fileWriter,
'maxsize': self.maxsize,
'blocksize': self.blocksize,
'keepstamp': self.keepstamp,
}
if self.workerVersionIsOlderThan('uploadFile', '3.0'):
args['slavesrc'] = source
else:
args['workersrc'] = source
cmd = makeStatusRemoteCommand(self, 'uploadFile', args)
d = self.runTransferCommand(cmd, fileWriter)
d.addCallback(self.finished).addErrback(self.failed)
class DirectoryUpload(_TransferBuildStep, WorkerAPICompatMixin):
name = 'upload'
renderables = ['workersrc', 'masterdest', 'url']
def __init__(self, workersrc=None, masterdest=None,
workdir=None, maxsize=None, blocksize=16 * 1024,
compress=None, url=None,
slavesrc=None, # deprecated, use `workersrc` instead
**buildstep_kwargs
):
# Deprecated API support.
if slavesrc is not None:
reportDeprecatedWorkerNameUsage(
"'slavesrc' keyword argument is deprecated, "
"use 'workersrc' instead")
assert workersrc is None
workersrc = slavesrc
# Emulate that first two arguments are positional.
if workersrc is None or masterdest is None:
raise TypeError("__init__() takes at least 3 arguments")
_TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs)
self.workersrc = workersrc
self._registerOldWorkerAttr("workersrc")
self.masterdest = masterdest
self.maxsize = maxsize
self.blocksize = blocksize
if compress not in (None, 'gz', 'bz2'):
config.error(
"'compress' must be one of None, 'gz', or 'bz2'")
self.compress = compress
self.url = url
def start(self):
self.checkWorkerHasCommand("uploadDirectory")
source = self.workersrc
masterdest = self.masterdest
# we rely upon the fact that the buildmaster runs chdir'ed into its
# basedir to make sure that relative paths in masterdest are expanded
# properly. TODO: maybe pass the master's basedir all the way down
# into the BuildStep so we can do this better.
masterdest = os.path.expanduser(masterdest)
log.msg("DirectoryUpload started, from worker %r to master %r"
% (source, masterdest))
self.descriptionDone = "uploading %s" % os.path.basename(source)
if self.url is not None:
self.addURL(
os.path.basename(os.path.normpath(masterdest)), self.url)
# we use maxsize to limit the amount of data on both sides
dirWriter = remotetransfer.DirectoryWriter(
masterdest, self.maxsize, self.compress, 0o600)
# default arguments
args = {
'workdir': self.workdir,
'writer': dirWriter,
'maxsize': self.maxsize,
'blocksize': self.blocksize,
'compress': self.compress
}
if self.workerVersionIsOlderThan('uploadDirectory', '3.0'):
args['slavesrc'] = source
else:
args['workersrc'] = source
cmd = makeStatusRemoteCommand(self, 'uploadDirectory', args)
d = self.runTransferCommand(cmd, dirWriter)
d.addCallback(self.finished).addErrback(self.failed)
class MultipleFileUpload(_TransferBuildStep, WorkerAPICompatMixin,
CompositeStepMixin):
name = 'upload'
logEnviron = False
renderables = ['workersrcs', 'masterdest', 'url']
def __init__(self, workersrcs=None, masterdest=None,
workdir=None, maxsize=None, blocksize=16 * 1024, glob=False,
mode=None, compress=None, keepstamp=False, url=None,
slavesrcs=None, # deprecated, use `workersrcs` instead
**buildstep_kwargs):
# Deprecated API support.
if slavesrcs is not None:
reportDeprecatedWorkerNameUsage(
"'slavesrcs' keyword argument is deprecated, "
"use 'workersrcs' instead")
assert workersrcs is None
workersrcs = slavesrcs
# Emulate that first two arguments are positional.
if workersrcs is None or masterdest is None:
raise TypeError("__init__() takes at least 3 arguments")
_TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs)
self.workersrcs = workersrcs
self._registerOldWorkerAttr("workersrcs")
self.masterdest = masterdest
self.maxsize = maxsize
self.blocksize = blocksize
if not isinstance(mode, (int, type(None))):
config.error(
'mode must be an integer or None')
self.mode = mode
if compress not in (None, 'gz', 'bz2'):
config.error(
"'compress' must be one of None, 'gz', or 'bz2'")
self.compress = compress
self.glob = glob
self.keepstamp = keepstamp
self.url = url
def uploadFile(self, source, masterdest):
fileWriter = remotetransfer.FileWriter(
masterdest, self.maxsize, self.mode)
args = {
'workdir': self.workdir,
'writer': fileWriter,
'maxsize': self.maxsize,
'blocksize': self.blocksize,
'keepstamp': self.keepstamp,
}
if self.workerVersionIsOlderThan('uploadFile', '3.0'):
args['slavesrc'] = source
else:
args['workersrc'] = source
cmd = makeStatusRemoteCommand(self, 'uploadFile', args)
return self.runTransferCommand(cmd, fileWriter)
def uploadDirectory(self, source, masterdest):
dirWriter = remotetransfer.DirectoryWriter(
masterdest, self.maxsize, self.compress, 0o600)
args = {
'workdir': self.workdir,
'writer': dirWriter,
'maxsize': self.maxsize,
'blocksize': self.blocksize,
'compress': self.compress
}
if self.workerVersionIsOlderThan('uploadDirectory', '3.0'):
args['slavesrc'] = source
else:
args['workersrc'] = source
cmd = makeStatusRemoteCommand(self, 'uploadDirectory', args)
return self.runTransferCommand(cmd, dirWriter)
def startUpload(self, source, destdir):
masterdest = os.path.join(destdir, os.path.basename(source))
args = {
'file': source,
'workdir': self.workdir
}
cmd = makeStatusRemoteCommand(self, 'stat', args)
d = self.runCommand(cmd)
@d.addCallback
def checkStat(_):
s = cmd.updates['stat'][-1]
if stat.S_ISDIR(s[stat.ST_MODE]):
return self.uploadDirectory(source, masterdest)
elif stat.S_ISREG(s[stat.ST_MODE]):
return self.uploadFile(source, masterdest)
return defer.fail('%r is neither a regular file, nor a directory' % source)
@d.addCallback
def uploadDone(result):
d = defer.maybeDeferred(
self.uploadDone, result, source, masterdest)
d.addCallback(lambda _: result)
return d
return d
def uploadDone(self, result, source, masterdest):
pass
def allUploadsDone(self, result, sources, masterdest):
if self.url is not None:
self.addURL(
os.path.basename(os.path.normpath(masterdest)), self.url)
def start(self):
self.checkWorkerHasCommand("uploadDirectory")
self.checkWorkerHasCommand("uploadFile")
self.checkWorkerHasCommand("stat")
masterdest = os.path.expanduser(self.masterdest)
sources = self.workersrcs if isinstance(self.workersrcs, list) else [self.workersrcs]
if self.keepstamp and self.workerVersionIsOlderThan("uploadFile", "2.13"):
m = ("This worker (%s) does not support preserving timestamps. "
"Please upgrade the worker." % self.build.workername)
raise WorkerTooOldError(m)
if not sources:
return self.finished(SKIPPED)
@defer.inlineCallbacks
def globSources(sources):
dl = defer.DeferredList([
self.runGlob(
os.path.join(self.workdir, source), abandonOnFailure=False) for source in sources
])
results = yield dl
results = [
result[1]
for result in filter(lambda result: result[0], results)
]
results = flatten(results)
defer.returnValue(results)
@defer.inlineCallbacks
def uploadSources(sources):
if not sources:
defer.returnValue(SKIPPED)
else:
for source in sources:
result = yield self.startUpload(source, masterdest)
if result == FAILURE:
defer.returnValue(FAILURE)
defer.returnValue(SUCCESS)
def logUpload(sources):
log.msg("MultipleFileUpload started, from worker %r to master %r" %
(sources, masterdest))
nsrcs = len(sources)
self.descriptionDone = 'uploading %d %s' % (nsrcs, 'file'
if nsrcs == 1 else
'files')
return sources
if self.glob:
s = globSources(sources)
else:
s = defer.succeed(sources)
s.addCallback(logUpload)
d = s.addCallback(uploadSources)
@d.addCallback
def allUploadsDone(result):
d = defer.maybeDeferred(
self.allUploadsDone, result, sources, masterdest)
d.addCallback(lambda _: result)
return d
d.addCallback(self.finished).addErrback(self.failed)
def finished(self, result):
return BuildStep.finished(self, result)
class FileDownload(_TransferBuildStep, WorkerAPICompatMixin):
name = 'download'
renderables = ['mastersrc', 'workerdest']
def __init__(self, mastersrc, workerdest=None,
workdir=None, maxsize=None, blocksize=16 * 1024, mode=None,
slavedest=None, # deprecated, use `workerdest` instead
**buildstep_kwargs):
# Deprecated API support.
if slavedest is not None:
reportDeprecatedWorkerNameUsage(
"'slavedest' keyword argument is deprecated, "
"use 'workerdest' instead")
assert workerdest is None
workerdest = slavedest
# Emulate that first two arguments are positional.
if workerdest is None:
raise TypeError("__init__() takes at least 3 arguments")
_TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs)
self.mastersrc = mastersrc
self.workerdest = workerdest
self._registerOldWorkerAttr("workerdest")
self.maxsize = maxsize
self.blocksize = blocksize
if not isinstance(mode, (int, type(None))):
config.error(
'mode must be an integer or None')
self.mode = mode
def start(self):
self.checkWorkerHasCommand("downloadFile")
# we are currently in the buildmaster's basedir, so any non-absolute
# paths will be interpreted relative to that
source = os.path.expanduser(self.mastersrc)
workerdest = self.workerdest
log.msg("FileDownload started, from master %r to worker %r" %
(source, workerdest))
self.descriptionDone = "downloading to %s" % os.path.basename(
workerdest)
# setup structures for reading the file
try:
fp = open(source, 'rb')
except IOError:
# if file does not exist, bail out with an error
self.addCompleteLog('stderr',
'File %r not available at master' % source)
# TODO: once BuildStep.start() gets rewritten to use
# maybeDeferred, just re-raise the exception here.
eventually(BuildStep.finished, self, FAILURE)
return
fileReader = remotetransfer.FileReader(fp)
# default arguments
args = {
'maxsize': self.maxsize,
'reader': fileReader,
'blocksize': self.blocksize,
'workdir': self.workdir,
'mode': self.mode,
}
if self.workerVersionIsOlderThan('downloadFile', '3.0'):
args['slavedest'] = workerdest
else:
args['workerdest'] = workerdest
cmd = makeStatusRemoteCommand(self, 'downloadFile', args)
d = self.runTransferCommand(cmd)
d.addCallback(self.finished).addErrback(self.failed)
class StringDownload(_TransferBuildStep, WorkerAPICompatMixin):
name = 'string_download'
renderables = ['workerdest', 's']
def __init__(self, s, workerdest=None,
workdir=None, maxsize=None, blocksize=16 * 1024, mode=None,
slavedest=None, # deprecated, use `workerdest` instead
**buildstep_kwargs):
# Deprecated API support.
if slavedest is not None:
reportDeprecatedWorkerNameUsage(
"'slavedest' keyword argument is deprecated, "
"use 'workerdest' instead")
assert workerdest is None
workerdest = slavedest
# Emulate that first two arguments are positional.
if workerdest is None:
raise TypeError("__init__() takes at least 3 arguments")
_TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs)
self.s = s
self.workerdest = workerdest
self._registerOldWorkerAttr("workerdest")
self.maxsize = maxsize
self.blocksize = blocksize
if not isinstance(mode, (int, type(None))):
config.error(
"StringDownload step's mode must be an integer or None,"
" got '%s'" % mode)
self.mode = mode
def start(self):
# we use 'downloadFile' remote command on the worker
self.checkWorkerHasCommand("downloadFile")
# we are currently in the buildmaster's basedir, so any non-absolute
# paths will be interpreted relative to that
workerdest = self.workerdest
log.msg("StringDownload started, from master to worker %r" %
workerdest)
self.descriptionDone = "downloading to %s" % os.path.basename(
workerdest)
# setup structures for reading the file
fileReader = remotetransfer.StringFileReader(self.s)
# default arguments
args = {
'maxsize': self.maxsize,
'reader': fileReader,
'blocksize': self.blocksize,
'workdir': self.workdir,
'mode': self.mode,
}
if self.workerVersionIsOlderThan('downloadFile', '3.0'):
args['slavedest'] = workerdest
else:
args['workerdest'] = workerdest
cmd = makeStatusRemoteCommand(self, 'downloadFile', args)
d = self.runTransferCommand(cmd)
d.addCallback(self.finished).addErrback(self.failed)
class JSONStringDownload(StringDownload, WorkerAPICompatMixin):
name = "json_download"
def __init__(self, o, workerdest=None,
slavedest=None, # deprecated, use `workerdest` instead
**buildstep_kwargs):
# Deprecated API support.
if slavedest is not None:
reportDeprecatedWorkerNameUsage(
"'slavedest' keyword argument is deprecated, "
"use 'workerdest' instead")
assert workerdest is None
workerdest = slavedest
# Emulate that first two arguments are positional.
if workerdest is None:
raise TypeError("__init__() takes at least 3 arguments")
if 's' in buildstep_kwargs:
del buildstep_kwargs['s']
s = json.dumps(o)
StringDownload.__init__(
self, s=s, workerdest=workerdest, **buildstep_kwargs)
class JSONPropertiesDownload(StringDownload, WorkerAPICompatMixin):
name = "json_properties_download"
def __init__(self, workerdest=None,
slavedest=None, # deprecated, use `workerdest` instead
**buildstep_kwargs):
# Deprecated API support.
if slavedest is not None:
reportDeprecatedWorkerNameUsage(
"'slavedest' keyword argument is deprecated, "
"use 'workerdest' instead")
assert workerdest is None
workerdest = slavedest
# Emulate that first two arguments are positional.
if workerdest is None:
raise TypeError("__init__() takes at least 2 arguments")
self.super_class = StringDownload
if 's' in buildstep_kwargs:
del buildstep_kwargs['s']
StringDownload.__init__(
self, s=None, workerdest=workerdest, **buildstep_kwargs)
def start(self):
properties = self.build.getProperties()
props = {}
for key, value, source in properties.asList():
props[key] = value
self.s = json.dumps(dict(
properties=props,
sourcestamps=[ss.asDict()
for ss in self.build.getAllSourceStamps()],
),
)
return self.super_class.start(self)
|
StarcoderdataPython
|
44109
|
"""Constants used in Integer.
"""
MAX_INT = 2 ** 31 - 1
FAILED = -2147483646.0
|
StarcoderdataPython
|
76150
|
<gh_stars>1-10
import re
from helper_methods import get_date
class ImportTable:
"""
This class creates a table to help with importing data into the database
Attributes
----------
table_name : str
the file name of the imported CSV or XLSX file
column_names : str
column names
column_names_types : str
column names and types in the dataframe
import_table : Pandas.DataFrame
the dataframe of the imported CSV or XLSX file data
"""
def __init__(self, file_path, import_table):
"""
Parameters
----------
file_path : str
the file path of the imported CSV or XLSX file
import_table : pandas.DataFrame
the dataframe of the imported CSV or XLSX file data
"""
self.table_name = self._get_file_name(file_path)
self.column_names = self._get_dataframe_column_names(import_table)
self.column_names_types = self._get_column_names_types_string(
import_table)
self.import_table = import_table
def _get_column_names_types_string(self, dataframe):
"""
Get column names and types and convert them into a string for the create table query
Parameters
----------
dataframe : pandas.DataFrame
the dataframe to get column names and data types from
Returns
-------
str
column names and types in the dataframe
"""
dictionary_types = self._get_dataframe_column_types(dataframe)
return ','.join(['Id integer'] + list(
{f'{key} {dictionary_types[key]}' for key in dictionary_types}))
def _get_dataframe_column_names(self, dataframe):
"""
Get the column names from a dataframe
Parameters
----------
dataframe : pandas.DataFrame
the dataframe to get column names from
Returns
-------
str
column names
"""
return ','.join(['Id'] + list(dataframe.columns))
def _get_dataframe_column_types(self, dataframe):
"""
Get the column data types from a dataframe and converts it to valid sqlite column types
Parameters
----------
dataframe : pandas.DataFrame
the dataframe to get column data types from
Returns
-------
dict
valid sqlite3 column data types
"""
dictionary_types = dict(dataframe.dtypes)
for key in dictionary_types:
if (dictionary_types[key] == 'int64'):
dictionary_types[key] = 'INTEGER'
elif (dictionary_types[key] == 'float64'):
dictionary_types[key] = 'REAL'
else:
dictionary_types[key] = 'TEXT'
return dictionary_types
def _get_file_name(self, path):
"""
Get the file name from a file path
Parameters
----------
path : str
the path of the file to get the name from
Returns
-------
str
a file name without file type or a default table name
"""
path_string = ' '
date = get_date()
if '/' in path:
path_string = path.split('/')[-1].split(".")[0]
elif '\\' in path:
path_string = path.split('\\')[-1].split(".")[0]
return path_string if re.match("^[A-Za-z0-9_-]*$", path_string) or not path_string else f'DefaultTable{date}'
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.