text
stringlengths 4
1.02M
| meta
dict |
---|---|
import logging
import telnetlib
import random
import redis
import json
import os
import threading
import pdb
from scrapy import signals
from .user_agents_pc import agents
from .proxy import initIPPOOLS, updateIPPOOLS
from .cookie import initCookie, updateCookie, removeCookie
from scrapy.utils.response import response_status_message
from scrapy.downloadermiddlewares.retry import RetryMiddleware
from scrapy.exceptions import IgnoreRequest
# ------------------------------------------
# 版本:1.0
# 日期:2017-8-06
# 作者:AlexTan
# <CSDN: http://blog.csdn.net/alextan_>
# <e-mail: [email protected]>
# ------------------------------------------
logger = logging.getLogger(__name__)
class UserAgentMiddleware(object):
""" 换User-Agent """
def process_request(self, request, spider):
agent = random.choice(agents)
request.headers["User-Agent"] = agent
class ProxyMiddleware(RetryMiddleware):
'''IP代理'''
def __init__(self, settings, crawler):
#自己获取的ip
self.TIMES = 10
RetryMiddleware.__init__(self, settings)
self.rconn = settings.get("RCONN", redis.Redis(crawler.settings.get('REDIS_HOST', 'localhsot'), crawler.settings.get('REDIS_PORT', 6379)))
#initIPPOOLS(self.rconn)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings, crawler)
def process_request(self,request,spider):
#pdb.set_trace()
ipNum=len(self.rconn.keys('IP*'))
#pdb.set_trace()
if ipNum<50:
proxy_thread = threading.Thread(target= initIPPOOLS,args = (self.rconn,))
proxy_thread.setDaemon(True)
proxy_thread.start()
#initIPPOOLS(self.rconn)
if self.TIMES == 3:
baseIP=random.choice(self.rconn.keys('IP:*'))
ip=str(baseIP,'utf-8').replace('IP:','')
try:
IP,PORT,status=ip.split(':')
request.meta['status'] = status
telnetlib.Telnet(IP,port=PORT,timeout=2) #测试ip是否有效
except:
logger.warning("The ip is not available !( IP:%s )" % ip)
updateIPPOOLS(self.rconn,IP+':'+PORT,status)
else:
#pdb.set_trace()
self.IP = "http://" + IP + ':' + PORT
logger.warning("The current IP is %s!" % self.IP)
self.TIMES = 0
updateIPPOOLS(self.rconn,IP+':'+PORT,status,1)
#pdb.set_trace()
else:
self.TIMES += 1
#pdb.set_trace()
if self.IP is not "":
request.meta["proxy"] = self.IP
def process_response(self,request,response,spider):
if response.status in [400,403,404,429,500,502,503,504]:
self.TIMES = 3
logger.error("%s! error..." % response.status)
#pdb.set_trace()
try:
updateIPPOOLS(self.rconn,request.meta['proxy'].replace('http://',''),request.meta['status'],-1)
except:
pass
reason = response_status_message(response.status)
return self._retry(request, reason, spider) or response # 重试
else:
return response
def process_exception(self, request, exception, spider):
#pdb.set_trace()
self.TIMES = 3
try:
updateIPPOOLS(self.rconn,request.meta['proxy'].replace('http://',''),request.meta['status'],-1)
except:
pass
return request
class CookiesMiddleware(RetryMiddleware):
""" 维护Cookie """
def __init__(self, settings, crawler):
RetryMiddleware.__init__(self, settings)
self.rconn = settings.get("RCONN", redis.Redis(crawler.settings.get('REDIS_HOST', 'localhsot'), crawler.settings.get('REDIS_PORT', 6379)))
initCookie(self.rconn, crawler.spider.name)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings, crawler)
def process_request(self, request, spider):
redisKeys = self.rconn.keys()
while len(redisKeys) > 0:
elem = random.choice(redisKeys)
#pdb.set_trace()
if b'zhihuspider:Cookies' in elem:
#pdb.set_trace()
elem = str(elem,'utf-8')
cookie = json.loads(str(self.rconn.get(elem),'utf-8'))
request.cookies = cookie
request.meta["accountText"] = elem.split("Cookies:")[-1]
break
else:
#pdb.set_trace()
redisKeys.remove(elem)
def process_response(self, request, response, spider):
#pdb.set_trace()
reason = response_status_message(response.status)
if response.status in [300, 301, 302, 303]:
pdb.set_trace()
if reason == '301 Moved Permanently':
return self._retry(request, reason, spider) or response # 重试
else:
raise IgnoreRequest
elif response.status in [403, 414]:
logger.error("%s! Stopping..." % response.status)
os.system("pause")
updateCookie(request.meta['accountText'], self.rconn, spider.name, request.cookies)
return self._retry(request, reason, spider) or response # 重试
else:
return response
| {
"content_hash": "941c34fdb34e9f3d7b5c882fa4eb2bd1",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 146,
"avg_line_length": 36.66206896551724,
"alnum_prop": 0.5718585402558315,
"repo_name": "AlexTan-b-z/ZhihuSpider",
"id": "67921b595f19ce678571722de6ecfb7847271f83",
"size": "5403",
"binary": false,
"copies": "1",
"ref": "refs/heads/V2.0",
"path": "zhihu/zhihu/middlewares.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "81853"
}
],
"symlink_target": ""
} |
import unittest
import os
import fileops
class TestReadWriteFile(unittest.TestCase):
"""Test case to verify list read/write functionality."""
def setUp(self):
"""This function is run before each test."""
self.fixture_file = r"v:\workspace\FileHandling\src\test-read-write.txt"
self.fixture_list = ["my", "written", "text"]
self.fixture_list_empty_strings = ["my", "", "", "written", "text"]
self.fixture_list_trailing_empty_strings = ["my", "written", "text", "", ""]
def verify_file(self, fixture_list):
"""Verifies that a given list, when written to a file,
is returned by reading the same file."""
fileops.write_list(self.fixture_file, fixture_list)
observed = fileops.read_list(self.fixture_file)
self.assertEqual(observed, fixture_list,
"%s does not equal %s" % (observed, fixture_list))
def test_read_write_list(self):
self.verify_file(self.fixture_list)
def test_read_write_list_empty_strings(self):
self.verify_file(self.fixture_list_empty_strings)
def test_read_write_list_trailing_empty_strings(self):
self.verify_file(self.fixture_list_trailing_empty_strings)
def tearDown(self):
"""This function is run after each test."""
try:
os.remove(self.fixture_file)
except OSError:
pass
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "fb834813897dadb47e385f072c3af7d6",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 84,
"avg_line_length": 37.55,
"alnum_prop": 0.6051930758988016,
"repo_name": "ceeblet/OST_PythonCertificationTrack",
"id": "b7129b188db2309653dbd32d27bbcf532f437466",
"size": "1502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python2/FileHandling/src/test_fileops.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "198495"
}
],
"symlink_target": ""
} |
"""Collection of distribution implementations."""
from chainer.distributions.bernoulli import Bernoulli # NOQA
from chainer.distributions.beta import Beta # NOQA
from chainer.distributions.categorical import Categorical # NOQA
from chainer.distributions.cauchy import Cauchy # NOQA
from chainer.distributions.chisquare import Chisquare # NOQA
from chainer.distributions.dirichlet import Dirichlet # NOQA
from chainer.distributions.exponential import Exponential # NOQA
from chainer.distributions.gamma import Gamma # NOQA
from chainer.distributions.geometric import Geometric # NOQA
from chainer.distributions.gumbel import Gumbel # NOQA
from chainer.distributions.laplace import Laplace # NOQA
from chainer.distributions.log_normal import LogNormal # NOQA
from chainer.distributions.multivariate_normal import MultivariateNormal # NOQA
from chainer.distributions.normal import Normal # NOQA
from chainer.distributions.one_hot_categorical import OneHotCategorical # NOQA
from chainer.distributions.pareto import Pareto # NOQA
from chainer.distributions.poisson import Poisson # NOQA
from chainer.distributions.uniform import Uniform # NOQA
| {
"content_hash": "7c035e8df85c7ebfb4e0e14c39ee3d90",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 80,
"avg_line_length": 57.95,
"alnum_prop": 0.8308886971527178,
"repo_name": "jnishi/chainer",
"id": "db73f2bd295369a15e6252c1b85f1fbd67633784",
"size": "1159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainer/distributions/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "70"
},
{
"name": "C++",
"bytes": "1460543"
},
{
"name": "CMake",
"bytes": "42279"
},
{
"name": "Cuda",
"bytes": "53858"
},
{
"name": "Dockerfile",
"bytes": "1457"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "5121452"
},
{
"name": "Shell",
"bytes": "22130"
}
],
"symlink_target": ""
} |
from django.contrib import admin
#from models import Blog
#class BlogAdmin(admin.ModelAdmin):
# list_display = ('key', 'address_from', 'block_index', 'tx_id')
# search_fields = ('key', 'address_from', 'block_index', 'tx_id')
#admin.site.register(Blog, BlogAdmin)
from models import Transaction
class TransactionAdmin(admin.ModelAdmin):
list_display = ('tx_id', 'peercoin_address', 'payload_retrieved', 'payload_executed', 'pm_key', 'pm_payload')
search_fields = ('tx_id', 'peercoin_address', 'payload_retrieved', 'payload_executed', 'pm_key', 'pm_payload')
admin.site.register(Transaction, TransactionAdmin)
from models import Listing
class ListingAdmin(admin.ModelAdmin):
list_display = ('tx_id', 'category', 'subcategory', 'quantity', 'requested_peercoin', 'peercoin_address')
search_fields = ('tx_id', 'category', 'subcategory', 'peercoin_address')
admin.site.register(Listing, ListingAdmin)
from models import Message
class MessageAdmin(admin.ModelAdmin):
list_display = ('tx_id', 'listing_tx_id', 'offer_tx_id', 'peercoin_address', 'message')
search_fields = ('tx_id', 'listing_tx_id', 'offer_tx_id', 'peercoin_address')
admin.site.register(Message, MessageAdmin)
from models import Offer
class OfferAdmin(admin.ModelAdmin):
list_display = ('tx_id', 'listing_tx_id', 'quantity', 'offered_peercoin', 'peercoin_address', 'offer_status', 'tx_id_status_change')
search_fields = ('tx_id', 'listing_tx_id', 'peercoin_address', 'offer_status', 'tx_id_status_change')
admin.site.register(Offer, OfferAdmin)
| {
"content_hash": "59d476d9c82f79d403197d7878efbd93",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 136,
"avg_line_length": 49.903225806451616,
"alnum_prop": 0.7117000646412411,
"repo_name": "Peerapps/PeerMarket-Server",
"id": "94025c30efe015d3153cba04e7b3933a8bc4eb7b",
"size": "1547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peermarket/admin.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "52615"
},
{
"name": "HTML",
"bytes": "13206"
},
{
"name": "JavaScript",
"bytes": "582330"
},
{
"name": "Python",
"bytes": "295180"
}
],
"symlink_target": ""
} |
from sikuli import *
import os
from keywordgroup import KeywordGroup
class _ApplicationKeywords(KeywordGroup):
def __init__(self):
self.application_name = None
self.application_path = None
# Public
def set_application_focus(self, app_name):
"""Sets focus to the open ``application`` matching the given ``app_name``.
Example:
| Set Application Focus | My Awesome App | # Sets the focus to My Awesome App |
"""
self._info("Setting focus at application '%s'." % app_name)
self._set_application_name(app_name)
try:
App(self.application_name).focus()
except FindFailed, err:
raise AssertionError("Application '%s' not found." % (app_name))
def switch_application_focus(self, app_name):
"""Switches focus to the open ``application`` matching the given ``app_name``.
Example:
| Set Application Focus | My Awesome App | # Switches the focus to `My Awesome App` application |
| Switch Application Focus | My Very Awesome App | # Switches the focus to `My Very Awesome App` |
"""
self._info("Switching focus to application '%s'." % app_name)
self._set_application_name(app_name)
try:
switchApp(self.application_name)
except FindFailed, err:
raise AssertionError("Application '%s' not found." % (app_name))
def check_and_open_application(self, app_name, path):
"""Checks if application is running and sets the focus to the application,
otherwise, opens application matching the given ``app_name`` and ``path``.
Example:
| Open Application | My Awesome App | C:/Program Files (x86)/Awesome App/awesomeapp.exe | # Opens `My Awesome App`, if app is already running, sets the focus to the app |
See also `Close Application`, `Open Application`,
and `Application Is Running`
"""
self._info("Opening application '%s' in path '%s'." % (app_name, path))
if os.path.exists(path):
self._set_application_path(path)
self._set_application_name(app_name)
if not App(self.application_name).isRunning():
App(self.application_path).open()
else:
App(app_name).focus()
else:
raise AssertionError("Application path '%s' not found." % (path))
def close_application(self, app_name):
"""Checks if the application matching the given ``app_name`` is running then closes it.
See also `Check And Open Application`, `Open Application` and `Application Is Running`
"""
self._info("Closing application '%s'." % app_name)
self._set_application_name(app_name)
App.close(self.application_name)
def open_application(self, application_path):
"""opens the application matching the given ``application_path``.
See also `Check And Open Application`, `Close Application` and `Application Is Running`
"""
if os.path.exists(application_path):
App.open(application_path)
else:
raise AssertionError("Application path '%s' not found." % (application_path))
def application_is_running(self, app_name):
"""Returns `True` if application as specified in `app_name` is running, else, returns `False`.
See also `Check And Open Application`, `Close Application`, and `Open Application`.
"""
return App(app_name).isRunning()
def run_command(self, command):
"""Runs a command, script or application path as specified in `command`.
Example:
| Run Command | control appwiz.cpl | # Opens the Windows Control Panel > Programs and Features window. |
"""
run(command)
def app_has_window(self, app_name):
"""Returns `True` if application's window or dialog as specified in `app_name` is open,
else, returns `False`.
See also `App Get Process ID`, `App Get Name` and `App Get Window`.
Example:
| App Has Window | Calculator | # Returns `True` if Calculator app is running in windows, else `False`. |
"""
return App(app_name).hasWindow()
def app_get_process_ID(self, app_name):
"""Returns the application's process ID as number if app is running, -1 otherwise.
See also `App Has Window`, `App Get Name` and `App Get Window`.
Example:
| App Get Process ID | Calculator | # Returns a PID number if Calculator app is running in windows, else `-1`. |
"""
return App(app_name).getPID()
def app_get_name(self, app_name):
"""Returns the application's short name as show in the process list.
See also `App Has Window`, `App Get Process ID` and `App Get Window`.
Example:
| App Get Name | Calculator | # Returns `calc.exe` if Calculator app is running in windows. |
"""
return App(app_name).getName()
def app_get_window(self, app_name):
"""Returns the title of the frontmost window of the application as specified in `app_name`,
might be an empty string
See also `App Has Window`, `App Get Process ID` and `App Get Name`.
Example:
| App Get Window | Calculator | # Returns `Calculator` if Calculator app is running in windows. |
"""
return App(app_name).getWindow()
# Private
"""***************************** Internal Methods ************************************"""
def _set_application_name(self, application_name):
self.application_name = application_name
def _get_application_name(self):
if self.application_name is not None:
return self.application_name
def _set_application_path(self, application_path):
if not self._path_exists(application_path):
raise AssertionError("Path '%s' does not exist." % (application_path))
else:
self.application_path = application_path
def _get_application_path(self):
if self.application_path is not None:
return self.application_path | {
"content_hash": "d12fd089af01c8bf9bc821dcfc78a751",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 178,
"avg_line_length": 40.18181818181818,
"alnum_prop": 0.6115061409179057,
"repo_name": "jaredfin/SikuliXRobotLibrary",
"id": "81395cb9b64b830df38b9fe6f1f0b5dbde7afa14",
"size": "6188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/SikuliXRobotLibrary/keywords/_application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "591621"
},
{
"name": "Python",
"bytes": "128254"
},
{
"name": "RobotFramework",
"bytes": "8455"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-08-01-preview")
) # type: Literal["2022-08-01-preview"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/signalR/{resourceName}/sharedPrivateLinkResources",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"resourceName": _SERIALIZER.url("resource_name", resource_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
shared_private_link_resource_name: str,
resource_group_name: str,
resource_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-08-01-preview")
) # type: Literal["2022-08-01-preview"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/signalR/{resourceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"sharedPrivateLinkResourceName": _SERIALIZER.url(
"shared_private_link_resource_name", shared_private_link_resource_name, "str"
),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"resourceName": _SERIALIZER.url("resource_name", resource_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
shared_private_link_resource_name: str,
resource_group_name: str,
resource_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-08-01-preview")
) # type: Literal["2022-08-01-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/signalR/{resourceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"sharedPrivateLinkResourceName": _SERIALIZER.url(
"shared_private_link_resource_name", shared_private_link_resource_name, "str"
),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"resourceName": _SERIALIZER.url("resource_name", resource_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
shared_private_link_resource_name: str,
resource_group_name: str,
resource_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-08-01-preview")
) # type: Literal["2022-08-01-preview"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/signalR/{resourceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"sharedPrivateLinkResourceName": _SERIALIZER.url(
"shared_private_link_resource_name", shared_private_link_resource_name, "str"
),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"resourceName": _SERIALIZER.url("resource_name", resource_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
class SignalRSharedPrivateLinkResourcesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.signalr.SignalRManagementClient`'s
:attr:`signal_rshared_private_link_resources` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> Iterable["_models.SharedPrivateLinkResource"]:
"""List shared private link resources.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param resource_name: The name of the resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SharedPrivateLinkResource or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.signalr.models.SharedPrivateLinkResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-01-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SharedPrivateLinkResourceList]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SharedPrivateLinkResourceList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/signalR/{resourceName}/sharedPrivateLinkResources"} # type: ignore
@distributed_trace
def get(
self, shared_private_link_resource_name: str, resource_group_name: str, resource_name: str, **kwargs: Any
) -> _models.SharedPrivateLinkResource:
"""Get the specified shared private link resource.
:param shared_private_link_resource_name: The name of the shared private link resource.
Required.
:type shared_private_link_resource_name: str
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param resource_name: The name of the resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedPrivateLinkResource or the result of cls(response)
:rtype: ~azure.mgmt.signalr.models.SharedPrivateLinkResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-01-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SharedPrivateLinkResource]
request = build_get_request(
shared_private_link_resource_name=shared_private_link_resource_name,
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("SharedPrivateLinkResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/signalR/{resourceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}"} # type: ignore
def _create_or_update_initial(
self,
shared_private_link_resource_name: str,
resource_group_name: str,
resource_name: str,
parameters: Union[_models.SharedPrivateLinkResource, IO],
**kwargs: Any
) -> _models.SharedPrivateLinkResource:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-01-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SharedPrivateLinkResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "SharedPrivateLinkResource")
request = build_create_or_update_request(
shared_private_link_resource_name=shared_private_link_resource_name,
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("SharedPrivateLinkResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("SharedPrivateLinkResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/signalR/{resourceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}"} # type: ignore
@overload
def begin_create_or_update(
self,
shared_private_link_resource_name: str,
resource_group_name: str,
resource_name: str,
parameters: _models.SharedPrivateLinkResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.SharedPrivateLinkResource]:
"""Create or update a shared private link resource.
:param shared_private_link_resource_name: The name of the shared private link resource.
Required.
:type shared_private_link_resource_name: str
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param resource_name: The name of the resource. Required.
:type resource_name: str
:param parameters: The shared private link resource. Required.
:type parameters: ~azure.mgmt.signalr.models.SharedPrivateLinkResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either SharedPrivateLinkResource or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.signalr.models.SharedPrivateLinkResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
shared_private_link_resource_name: str,
resource_group_name: str,
resource_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.SharedPrivateLinkResource]:
"""Create or update a shared private link resource.
:param shared_private_link_resource_name: The name of the shared private link resource.
Required.
:type shared_private_link_resource_name: str
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param resource_name: The name of the resource. Required.
:type resource_name: str
:param parameters: The shared private link resource. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either SharedPrivateLinkResource or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.signalr.models.SharedPrivateLinkResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
shared_private_link_resource_name: str,
resource_group_name: str,
resource_name: str,
parameters: Union[_models.SharedPrivateLinkResource, IO],
**kwargs: Any
) -> LROPoller[_models.SharedPrivateLinkResource]:
"""Create or update a shared private link resource.
:param shared_private_link_resource_name: The name of the shared private link resource.
Required.
:type shared_private_link_resource_name: str
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param resource_name: The name of the resource. Required.
:type resource_name: str
:param parameters: The shared private link resource. Is either a model type or a IO type.
Required.
:type parameters: ~azure.mgmt.signalr.models.SharedPrivateLinkResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either SharedPrivateLinkResource or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.signalr.models.SharedPrivateLinkResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-01-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SharedPrivateLinkResource]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
shared_private_link_resource_name=shared_private_link_resource_name,
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("SharedPrivateLinkResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/signalR/{resourceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, shared_private_link_resource_name: str, resource_group_name: str, resource_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-01-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
shared_private_link_resource_name=shared_private_link_resource_name,
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/signalR/{resourceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}"} # type: ignore
@distributed_trace
def begin_delete(
self, shared_private_link_resource_name: str, resource_group_name: str, resource_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Delete the specified shared private link resource.
:param shared_private_link_resource_name: The name of the shared private link resource.
Required.
:type shared_private_link_resource_name: str
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param resource_name: The name of the resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-01-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
shared_private_link_resource_name=shared_private_link_resource_name,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SignalRService/signalR/{resourceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}"} # type: ignore
| {
"content_hash": "b9c25bc7e825c650a109b6e243926463",
"timestamp": "",
"source": "github",
"line_count": 731,
"max_line_length": 251,
"avg_line_length": 47.33789329685362,
"alnum_prop": 0.6539995376257081,
"repo_name": "Azure/azure-sdk-for-python",
"id": "c2bd81da017b69705d650a214cd86ae67d80ac54",
"size": "35104",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/signalr/azure-mgmt-signalr/azure/mgmt/signalr/operations/_signal_rshared_private_link_resources_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import os
from os.path import join
import urlparse
import urllib
import httplib
import logging
import authomatic
from authomatic.exceptions import FetchError
def custom_fetch(self, url, method='GET', params=None, headers=None, body='', max_redirects=5, content_parser=None): # NOQA
params = params or {}
params.update(self.access_params)
headers = headers or {}
headers.update(self.access_headers)
scheme, host, path, query, fragment = urlparse.urlsplit(url)
query = urllib.urlencode(params)
if method in ('POST', 'PUT', 'PATCH'):
if not body:
# Put querystring to body
body = query
query = None
headers.update({'Content-Type': 'application/x-www-form-urlencoded'})
request_path = urlparse.urlunsplit((None, None, path, query, None))
self._log(logging.DEBUG, u' \u251C\u2500 host: {0}'.format(host))
self._log(logging.DEBUG, u' \u251C\u2500 path: {0}'.format(request_path))
self._log(logging.DEBUG, u' \u251C\u2500 method: {0}'.format(method))
self._log(logging.DEBUG, u' \u251C\u2500 body: {0}'.format(body))
self._log(logging.DEBUG, u' \u251C\u2500 params: {0}'.format(params))
self._log(logging.DEBUG, u' \u2514\u2500 headers: {0}'.format(headers))
# Connect
proxy = os.environ.get('http_proxy', None)
if proxy is None:
if scheme.lower() == 'https':
connection = httplib.HTTPSConnection(host)
else:
connection = httplib.HTTPConnection(host)
else:
proxy_scheme, proxy_host, proxy_path, _, _ = urlparse.urlsplit(proxy)
proxy_host, proxy_port = proxy_host.split(':')
self._log(logging.INFO, u'Using proxy on %s://%s:%s' % (proxy_scheme, proxy_host, proxy_port))
if proxy_scheme.lower() == 'https':
connection = httplib.HTTPSConnection(proxy_host, proxy_port)
else:
connection = httplib.HTTPConnection(proxy_host, proxy_port)
request_path = "%s://%s" % (scheme, (join(host.rstrip('/'), request_path.lstrip('/'))))
try:
connection.request(method, request_path, body, headers)
except Exception as e:
raise FetchError(
'Could not connect!',
original_message=e.message,
url=request_path
)
response = connection.getresponse()
location = response.getheader('Location')
if response.status in (300, 301, 302, 303, 307) and location:
if location == url:
raise FetchError(
'Url redirects to itself!',
url=location,
status=response.status
)
elif max_redirects > 0:
remaining_redirects = max_redirects - 1
self._log(logging.DEBUG, 'Redirecting to {0}'.format(url))
self._log(logging.DEBUG, 'Remaining redirects: {0}'.format(remaining_redirects))
# Call this method again.
response = self._fetch(
url=location,
params=params,
method=method,
headers=headers,
max_redirects=remaining_redirects
)
else:
raise FetchError(
'Max redirects reached!',
url=location,
status=response.status
)
else:
self._log(logging.DEBUG, u'Got response:')
self._log(logging.DEBUG, u' \u251C\u2500 url: {0}'.format(url))
self._log(logging.DEBUG, u' \u251C\u2500 status: {0}'.format(response.status))
self._log(logging.DEBUG, u' \u2514\u2500 headers: {0}'.format(response.getheaders()))
return authomatic.core.Response(response, content_parser)
| {
"content_hash": "32ee151dc5e4101b3e85f4363a64d00d",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 124,
"avg_line_length": 34.971698113207545,
"alnum_prop": 0.59778796870785,
"repo_name": "heynemann/generator-flask-app",
"id": "4d06aac9ae9aa29d6729703fe63d848714e6b27c",
"size": "3754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/templates/_authomatic_ext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2468"
},
{
"name": "CoffeeScript",
"bytes": "288"
},
{
"name": "HTML",
"bytes": "4198"
},
{
"name": "JavaScript",
"bytes": "15136"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "42467"
}
],
"symlink_target": ""
} |
"""
Main module of the Gol and Pressure Demo.
Uses the Complex Automaton Base.
"""
from cab_core.ca.cab_cell import CellHex
from cab_core.cab_global_constants import GlobalConstants
from cab_core.cab_system import ComplexAutomaton
from cab_core.util.cab_input_handling import InputHandler
from cab_core.util.cab_visualization import Visualization
import pygame
import numpy
import math
import random
__author__ = 'Michael Wagner'
class GC(GlobalConstants):
def __init__(self):
super().__init__()
self.VERSION = 'version: 08-2015'
################################
# SIMULATION CONSTANTS #
################################
self.RUN_SIMULATION = False
self.ONE_AGENT_PER_CELL = False
################################
# CA CONSTANTS #
################################
self.USE_HEX_CA = True
self.USE_MOORE_NEIGHBORHOOD = True
self.USE_CA_BORDERS = True
self.DIM_X = 50 # How many cells is the ca wide?
self.DIM_Y = 50 # How many cells is the ca high?
self.CELL_SIZE = 10 # How long/wide is one cell?
self.GRID_WIDTH = self.DIM_X * self.CELL_SIZE
self.GRID_HEIGHT = self.DIM_Y * self.CELL_SIZE
################################
# ABM CONSTANTS #
################################
################################
# UTILITY CONSTANTS #
################################
class GolCell(CellHex):
def __init__(self, x, y, c_size, c):
super().__init__(x, y, c_size, c)
self.alive = 0
self.next_state = 0
# The rules:
# cell will be [b]orn if it has the following amount of neighbors
self.b = [2]
# cell will [s]tay alive if it has the following amount of neighbors
self.s = [3, 4]
def sense_neighborhood(self):
_neighs_alive = 0
for cell in self.neighbors:
if cell.alive == 1 and not cell.is_border:
_neighs_alive += 1
if self.alive == 0 and _neighs_alive in self.b:
self.next_state = 1
elif self.alive == 1 and _neighs_alive in self.s:
self.next_state = 1
else:
self.next_state = 0
def update(self):
self.alive = self.next_state
def clone(self, x, y, c_size):
return GolCell(x, y, c_size, self.gc)
class GolIO(InputHandler):
def __init__(self, cab_sys):
super().__init__(cab_sys)
def clone(self, cab_sys):
return GolIO(cab_sys)
def get_mouse_hex_coords(self):
_q = (self.mx * math.sqrt(3)/3 - self.my/3)# / self.sys.gc.CELL_SIZE
_r = self.my * 2/3# / self.sys.gc.CELL_SIZE
cell_q, cell_r = hex_round(_q, _r)
return cell_q, cell_r
def custom_mouse_action(self, button):
# Click on left mouse button.
if button == 1:
cell_x, cell_y = self.get_mouse_hex_coords()
self.sys.ca.ca_grid[cell_x, cell_y].alive = 1 - self.sys.ca.ca_grid[cell_x, cell_y].alive
# Click on middle mouse button / mouse wheel
elif button == 2:
cell_x, cell_y = self.get_mouse_hex_coords()
# Click on right mouse button
elif button == 3:
cell_x, cell_y = self.get_mouse_hex_coords()
for cell in list(self.sys.ca.ca_grid.values()):
if random.random() > 0.65:
cell.alive = 1
else:
cell.alive = 0
class GolVis(Visualization):
def __init__(self, c, screen):
super().__init__(c, screen)
def clone(self, cab_sys):
return GolVis(self.gc, cab_sys)
def draw_cell(self, cell):
"""
Simple exemplary visualization. Draw cell in white.
"""
if cell is None:
pass
else:
if cell.is_border:
pygame.gfxdraw.filled_polygon(self.surface, cell.get_corners(), (120, 120, 120))
else:
if cell.alive:
red = 90
green = 90
blue = 90
else:
red = 220
green = 220
blue = 220
pygame.gfxdraw.filled_polygon(self.surface, cell.get_corners(), (red, green, blue))
pygame.gfxdraw.aapolygon(self.surface, cell.get_corners(), (190, 190, 190))
def hex_round(q, r):
return cube_to_hex(*cube_round(*hex_to_cube(q, r)))
def cube_round(x, y, z):
rx = round(x)
ry = round(y)
rz = round(z)
dx = abs(rx - x)
dy = abs(ry - y)
dz = abs(rz - z)
if dx > dy and dx > dz:
rx = -ry - rz
elif dy > dz:
ry = -rx - rz
else:
rz = -rx - ry
return rx, ry, rz
def cube_to_hex(x, y, z):
return x, y
def hex_to_cube(q, r):
z = -q - r
return q, r, z
if __name__ == '__main__':
gc = GC()
pc = GolCell(0, 0, 0, gc)
ph = GolIO(None)
pv = GolVis(gc, None)
simulation = ComplexAutomaton(gc, proto_cell=pc, proto_handler=ph, proto_visualizer=pv)
simulation.run_main_loop()
# cProfile.run("simulation.run_main_loop()") | {
"content_hash": "e9f835cce8f42cbd11f6b4d0851151db",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 101,
"avg_line_length": 29.795454545454547,
"alnum_prop": 0.5062929061784897,
"repo_name": "Micutio/Pyosphere",
"id": "96ee11b02b1ff5a730e40c3c8ef00798fbd72522",
"size": "5244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gameoflife.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38665"
}
],
"symlink_target": ""
} |
"""Implementation of the RTSP protocol.
This is a simple implementation of the RTSP protocol used by Apple (with its quirks
and all). It is somewhat generalized to support both AirPlay 1 and 2.
"""
import asyncio
from hashlib import md5
import logging
import plistlib
from random import randrange
from typing import Any, Dict, Mapping, NamedTuple, Optional, Tuple, Union
import async_timeout
from pyatv.protocols.dmap import tags
from pyatv.support.http import HttpConnection, HttpResponse
from pyatv.support.metadata import AudioMetadata
_LOGGER = logging.getLogger(__name__)
FRAMES_PER_PACKET = 352
USER_AGENT = "AirPlay/540.31"
HTTP_PROTOCOL = "HTTP/1.1"
ANNOUNCE_PAYLOAD = (
"v=0\r\n"
+ "o=iTunes {session_id} 0 IN IP4 {local_ip}\r\n"
+ "s=iTunes\r\n"
+ "c=IN IP4 {remote_ip}\r\n"
+ "t=0 0\r\n"
+ "m=audio 0 RTP/AVP 96\r\n"
+ "a=rtpmap:96 AppleLossless\r\n"
+ f"a=fmtp:96 {FRAMES_PER_PACKET} 0 "
+ "{bits_per_channel} 40 10 14 {channels} 255 0 0 {sample_rate}\r\n"
)
# Used to signal that traffic is to be unencrypted
AUTH_SETUP_UNENCRYPTED = b"\x01"
# Just a static Curve25519 public key used to satisfy the auth-setup step for devices
# requiring that (e.g. AirPort Express). We never verify anything. Source:
# https://github.com/owntone/owntone-server/blob/
# c1db4d914f5cd8e7dbe6c1b6478d68a4c14824af/src/outputs/raop.c#L276
CURVE25519_PUB_KEY = (
b"\x59\x02\xed\xe9\x0d\x4e\xf2\xbd"
b"\x4c\xb6\x8a\x63\x30\x03\x82\x07"
b"\xa9\x4d\xbd\x50\xd8\xaa\x46\x5b"
b"\x5d\x8c\x01\x2a\x0c\x7e\x1d\x4e"
)
class DigestInfo(NamedTuple):
"""
OAuth information used for password protected devices.
"""
username: str
realm: str
password: str
nonce: str
def get_digest_payload(method, uri, user, realm, pwd, nonce):
"""Return the Authorization payload for Apples OAuth."""
payload = (
'Digest username="{0}", realm="{1}", nonce="{2}", uri="{3}", response="{4}"'
)
ha1 = md5(f"{user}:{realm}:{pwd}".encode("utf-8")).hexdigest()
ha2 = md5(f"{method}:{uri}".encode("utf-8")).hexdigest()
di_response = md5(f"{ha1}:{nonce}:{ha2}".encode("utf-8")).hexdigest()
return payload.format(user, realm, nonce, uri, di_response)
class RtspSession:
"""Representation of an RTSP session."""
def __init__(self, connection: HttpConnection) -> None:
"""Initialize a new RtspSession."""
super().__init__()
self.connection = connection
self.requests: Dict[int, Tuple[asyncio.Event, Optional[HttpResponse]]] = {}
self.digest_info: Optional[DigestInfo] = None # Password authentication
self.cseq = 0
self.session_id: int = randrange(2**32)
self.dacp_id: str = f"{randrange(2 ** 64):X}"
self.active_remote: int = randrange(2**32)
@property
def uri(self) -> str:
"""Return URI used for session requests."""
return f"rtsp://{self.connection.local_ip}/{self.session_id}"
@staticmethod
def error_received(exc) -> None:
"""Handle a connection error."""
_LOGGER.error("Error received: %s", exc)
async def info(self) -> Dict[str, object]:
"""Return device information."""
device_info = await self.exchange(
"GET", "/info", allow_error=True, protocol=HTTP_PROTOCOL
)
# If not supported, just return an empty dict
if device_info.code != 200:
_LOGGER.debug("Device does not support /info")
return {}
body = (
device_info.body
if isinstance(device_info.body, bytes)
else device_info.body.encode("utf-8")
)
return plistlib.loads(body)
async def auth_setup(self) -> HttpResponse:
"""Send auth-setup message."""
# Payload to say that we want to proceed unencrypted
body = AUTH_SETUP_UNENCRYPTED + CURVE25519_PUB_KEY
return await self.exchange(
"POST",
"/auth-setup",
content_type="application/octet-stream",
body=body,
protocol=HTTP_PROTOCOL,
)
# This method is only used by AirPlay 1 and is very specific (e.g. does not support
# annnouncing arbitrary audio formats) and should probably move to the AirPlay 1
# specific RAOP implementation. It will however live here for now until something
# motivates that.
async def announce(
self,
bytes_per_channel: int,
channels: int,
sample_rate: int,
password: Optional[str],
) -> HttpResponse:
"""Send ANNOUNCE message."""
body = ANNOUNCE_PAYLOAD.format(
session_id=self.session_id,
local_ip=self.connection.local_ip,
remote_ip=self.connection.remote_ip,
bits_per_channel=8 * bytes_per_channel,
channels=channels,
sample_rate=sample_rate,
)
requires_password: bool = password is not None
response = await self.exchange(
"ANNOUNCE",
content_type="application/sdp",
body=body,
allow_error=requires_password,
)
# Save the necessary data for password authentication
www_authenticate = response.headers.get("www-authenticate", None)
if response.code == 401 and www_authenticate and requires_password:
_, realm, _, nonce, _ = www_authenticate.split('"')
info = DigestInfo("pyatv", realm, password, nonce) # type: ignore
self.digest_info = info
response = await self.exchange(
"ANNOUNCE",
content_type="application/sdp",
body=body,
)
return response
async def setup(
self,
headers: Optional[Dict[str, Any]] = None,
body: Optional[Union[str, bytes]] = None,
) -> HttpResponse:
"""Send SETUP message."""
return await self.exchange("SETUP", headers=headers, body=body)
async def record(
self,
headers: Optional[Dict[str, Any]] = None,
body: Optional[Union[str, bytes]] = None,
) -> HttpResponse:
"""Send RECORD message."""
return await self.exchange("RECORD", headers=headers, body=body)
async def set_parameter(self, parameter: str, value: str) -> HttpResponse:
"""Send SET_PARAMETER message."""
return await self.exchange(
"SET_PARAMETER",
content_type="text/parameters",
body=f"{parameter}: {value}",
)
async def set_metadata(
self,
rtsp_session: int,
rtpseq: int,
rtptime: int,
metadata: AudioMetadata,
) -> HttpResponse:
"""Change metadata for what is playing."""
payload = b""
if metadata.title:
payload += tags.string_tag("minm", metadata.title)
if metadata.album:
payload += tags.string_tag("asal", metadata.album)
if metadata.artist:
payload += tags.string_tag("asar", metadata.artist)
return await self.exchange(
"SET_PARAMETER",
content_type="application/x-dmap-tagged",
headers={
"Session": rtsp_session,
"RTP-Info": f"seq={rtpseq};rtptime={rtptime}",
},
body=tags.container_tag("mlit", payload),
)
async def feedback(self, allow_error=False) -> HttpResponse:
"""Send SET_PARAMETER message."""
return await self.exchange("POST", uri="/feedback", allow_error=allow_error)
async def teardown(self, rtsp_session) -> HttpResponse:
"""Send TEARDOWN message."""
return await self.exchange("TEARDOWN", headers={"Session": rtsp_session})
async def exchange( # pylint: disable=too-many-locals
self,
method: str,
uri: Optional[str] = None,
content_type: Optional[str] = None,
headers: Mapping[str, object] = None,
body: Union[str, bytes] = None,
allow_error: bool = False,
protocol: str = "RTSP/1.0",
) -> HttpResponse:
"""Send a RTSP message and return response."""
cseq = self.cseq
self.cseq += 1
hdrs = {
"CSeq": cseq,
"DACP-ID": self.dacp_id,
"Active-Remote": self.active_remote,
"Client-Instance": self.dacp_id,
}
# Add the password authentication if required
if self.digest_info:
hdrs["Authorization"] = get_digest_payload(
method, uri or self.uri, *self.digest_info
)
if headers:
hdrs.update(headers)
# Map an asyncio Event to current CSeq and make the request
self.requests[cseq] = (asyncio.Event(), None)
resp = await self.connection.send_and_receive(
method,
uri or self.uri,
protocol=protocol,
user_agent=USER_AGENT,
content_type=content_type,
headers=hdrs,
body=body,
allow_error=allow_error,
)
# The response most likely contains a CSeq and it is also very likely to be
# the one we expect, but it could be for someone else. So set the correct event
# and save response.
resp_cseq = int(resp.headers.get("CSeq", "-1"))
if resp_cseq in self.requests:
# Insert response for correct CSeq and activate event
event, _ = self.requests[resp_cseq]
self.requests[resp_cseq] = (event, resp)
event.set()
# Wait for response to the CSeq we expect
try:
async with async_timeout.timeout(4):
await self.requests[cseq][0].wait()
response = self.requests[cseq][1]
except asyncio.TimeoutError as ex:
raise TimeoutError(
f"no response to CSeq {cseq} ({uri or self.uri})"
) from ex
finally:
del self.requests[cseq]
# Programming error: forgot to store response before activating event
if response is None:
raise RuntimeError(f"no response was saved for {cseq}")
return response
| {
"content_hash": "ed068ad47b866ce9ea8ac9e7c2917ddb",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 87,
"avg_line_length": 33.811258278145694,
"alnum_prop": 0.5932817549701302,
"repo_name": "postlund/pyatv",
"id": "7b9b997b2e9150a6129ac0b6d351b6bb630c16f7",
"size": "10211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyatv/support/rtsp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "456"
},
{
"name": "Python",
"bytes": "1432120"
},
{
"name": "Shell",
"bytes": "2108"
}
],
"symlink_target": ""
} |
import unittest
from graphtheory.structures.edges import Edge
from graphtheory.structures.graphs import Graph
from graphtheory.coloring.nodecolorlf import LargestFirstNodeColoring
# 0 --- 1 --- 2 outerplanar, chordal, 2-tree
# | / | / |
# | / | / |
# | / | / |
# 3 --- 4 --- 5
# Best node coloring - 3 colors.
# color = {0:a, 1:b, 2:c, 3:c, 4:a, 5:b}
class TestNodeColoring(unittest.TestCase):
def setUp(self):
self.N = 6
self.G = Graph(self.N)
self.nodes = range(self.N)
self.edges = [
Edge(0, 1), Edge(0, 3), Edge(1, 3), Edge(1, 4), Edge(1, 2),
Edge(2, 4), Edge(2, 5), Edge(3, 4), Edge(4, 5)]
for node in self.nodes:
self.G.add_node(node)
for edge in self.edges:
self.G.add_edge(edge)
#self.G.show()
def test_lf_node_coloring(self):
algorithm = LargestFirstNodeColoring(self.G)
algorithm.run()
for node in self.G.iternodes():
self.assertNotEqual(algorithm.color[node], None)
for edge in self.G.iteredges():
self.assertNotEqual(algorithm.color[edge.source],
algorithm.color[edge.target])
#print algorithm.color
all_colors = set(algorithm.color[node] for node in self.G.iternodes())
self.assertEqual(len(all_colors), 3)
def test_exceptions(self):
self.assertRaises(ValueError, LargestFirstNodeColoring,
Graph(5, directed=True))
def tearDown(self): pass
if __name__ == "__main__":
unittest.main()
# EOF
| {
"content_hash": "23ca0f9e947746d165d95ef40176f5ea",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 78,
"avg_line_length": 31.019607843137255,
"alnum_prop": 0.5802781289506953,
"repo_name": "ufkapano/graphs-dict",
"id": "6df7d50d4126ba2e5f905d8eec986842f677f318",
"size": "1606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphtheory/coloring/tests/test_nodecolorlf.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "970894"
}
],
"symlink_target": ""
} |
"""SCons.Platform.cygwin
Platform-specific initialization for Cygwin systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/cygwin.py 2014/03/02 14:18:15 garyo"
import posix
from SCons.Platform import TempFileMunge
def generate(env):
posix.generate(env)
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = '.exe'
env['SHLIBPREFIX'] = ''
env['SHLIBSUFFIX'] = '.dll'
env['LIBPREFIXES'] = [ '$LIBPREFIX', '$SHLIBPREFIX' ]
env['LIBSUFFIXES'] = [ '$LIBSUFFIX', '$SHLIBSUFFIX' ]
env['TEMPFILE'] = TempFileMunge
env['TEMPFILEPREFIX'] = '@'
env['MAXLINELENGTH'] = 2048
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "cf5616a6fb4ef76c9c6a42ee646d6c84",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 119,
"avg_line_length": 37.45454545454545,
"alnum_prop": 0.7315533980582525,
"repo_name": "sftd/scons",
"id": "7429407a312c8c770bc3579f45a1f85cbb9f1ac0",
"size": "2060",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "scons-local/SCons/Platform/cygwin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1913081"
}
],
"symlink_target": ""
} |
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "retools-"
cfg.versionfile_source = "retools/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| {
"content_hash": "c9d44b02dba90ef30ae7e7ac0eb72b5f",
"timestamp": "",
"source": "github",
"line_count": 450,
"max_line_length": 79,
"avg_line_length": 33.977777777777774,
"alnum_prop": 0.5714192282537607,
"repo_name": "jeroyang/retools",
"id": "8b1516a224ec303762684b2ebdfab1d804fdc884",
"size": "15765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "retools/_version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1696"
},
{
"name": "Python",
"bytes": "83511"
}
],
"symlink_target": ""
} |
from south.db import db
from django.db import models
from states.models import *
class Migration:
def forwards(self, orm):
# Changing field 'StateReport.group_action'
# (to signature: django.db.models.fields.CharField(max_length=50, null=True))
db.alter_column('states_statereport', 'group_action', orm['states.statereport:group_action'])
def backwards(self, orm):
# Changing field 'StateReport.group_action'
# (to signature: django.db.models.fields.CharField(max_length=50))
db.alter_column('states_statereport', 'group_action', orm['states.statereport:group_action'])
models = {
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'states.state': {
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'state_id': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '32'})
},
'states.statelog': {
'current_state_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '32'}),
'from_state_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['states.State']"})
},
'states.statereport': {
'ascending': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'columns': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['states.StateReportColumn']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'group_action': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['states.StateReportItem']"}),
'menu_order': ('django.db.models.fields.SmallIntegerField', [], {'default': '-1'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order_column': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sorted_report'", 'to': "orm['states.StateReportColumn']"})
},
'states.statereportcolumn': {
'column_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'expression': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'sorting_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'states.statereportitem': {
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kwargs': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'state_id': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '32'})
}
}
complete_apps = ['states']
| {
"content_hash": "fa3a84ea3a3954b7c66ea2e1bc8934b3",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 156,
"avg_line_length": 62.19718309859155,
"alnum_prop": 0.5591032608695652,
"repo_name": "vikingco/django-states",
"id": "96e3560c0884befd9535580c33b9e9727d74f9c4",
"size": "4417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/states/south_migrations/0008_group_action.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9290"
},
{
"name": "Python",
"bytes": "174008"
},
{
"name": "SourcePawn",
"bytes": "204"
}
],
"symlink_target": ""
} |
from apis.models.test import Test, Answer
from apis.exception import NotFound
from . import Resource
class TestsIdScore(Resource):
def _get_test(self, id):
test = Test.objects(id=id).first()
if not test or test.status == 'draft':
raise NotFound('account_not_found')
return test
async def get(self, request, id):
answer = Answer.objects(test_id=id).first()
if not answer:
raise NotFound('answer_not_found')
return answer, 200
| {
"content_hash": "b40b0da7b5911158a7ca48936a3be820",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 51,
"avg_line_length": 26.842105263157894,
"alnum_prop": 0.6294117647058823,
"repo_name": "gusibi/Metis",
"id": "110b8c71fb91d1129f1aa7ba33bc37a74c94632b",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apis/v1/api/tests_id_score.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "98759"
}
],
"symlink_target": ""
} |
from zeit.cms.i18n import MessageFactory as _
import grokcore.component as grok
import lxml.objectify
import zeit.content.cp.blocks.block
import zeit.content.cp.interfaces
class XMLBlock(zeit.content.cp.blocks.block.Block):
grok.implements(zeit.content.cp.interfaces.IXMLBlock)
type = 'xml'
class XMLBlockFactory(zeit.content.cp.blocks.block.BlockFactory):
produces = XMLBlock
title = _('Raw XML block')
def get_xml(self):
container = super(XMLBlockFactory, self).get_xml()
raw = lxml.objectify.E.raw(u'\n\n\n')
lxml.objectify.deannotate(raw)
container.append(raw)
return container
| {
"content_hash": "15ff83a1c183587c430d9f459699265b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 65,
"avg_line_length": 27.041666666666668,
"alnum_prop": 0.711864406779661,
"repo_name": "ZeitOnline/zeit.content.cp",
"id": "b9b4619bd657bac14f4318e85db434d77a40656e",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zeit/content/cp/blocks/xml.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10641"
},
{
"name": "JavaScript",
"bytes": "14762"
},
{
"name": "Python",
"bytes": "332236"
}
],
"symlink_target": ""
} |
"""Implements the |Queue| interface on top of `celery`_, a distributed task
queue system. When a message is enqueued, a delivery task is queued up in
celery. Celery workers will then pick up the task and attempt delivery,
retrying and bouncing in the same manner as |Queue|.
A :class:`celery.Celery` object must be given to :class:`CeleryQueue`, and a
task will be registered to attempt delivery of |Envelope| objects. It may be
desirable to configure celery workers to `use gevent`_, since ``slimta``
|Relay| objects are expected to support it.
.. _celery: http://www.celeryproject.org/
.. _use gevent: http://docs.celeryproject.org\
/en/latest/configuration.html#celeryd-pool
"""
from __future__ import absolute_import
from celery.result import AsyncResult
from slimta.logging import log_exception
from slimta.queue import QueueError
from slimta.relay import PermanentRelayError, TransientRelayError
from slimta.bounce import Bounce
from slimta.policy import QueuePolicy
from slimta.smtp.reply import Reply
__all__ = ['CeleryQueue']
class CeleryQueue(object):
"""Instantiates a new object that can be used wherever a |Queue| is
expected.
:param celery: :class:`celery.Celery` object to register delivery task
with.
:param relay: |Relay| instance to attempt delivery with.
:param suffix: If given, the task registered in the :class:`~celery.Celery`
object will have its name suffixed wih an underscore and
this string.
:param backoff: Function that, given an |Envelope| and number of delivery
attempts, will return the number of seconds before the next
attempt. If it returns ``None``, the message will be
permanently failed. The default backoff function simply
returns ``None`` and messages are never retried.
:param bounce_factory: Function that produces a |Bounce| or |Envelope|
object given the same parameters as the |Bounce|
constructor. If the function returns ``None``, no
bounce is delivered. By default, a new |Bounce| is
created in every case.
"""
def __init__(self, celery, relay, suffix=None, backoff=None,
bounce_factory=None):
if suffix:
task_decorator = celery.task(name='attempt_delivery_'+suffix)
self.attempt_task = task_decorator(self.attempt_delivery)
else:
self.attempt_task = celery.task(self.attempt_delivery)
self.relay = relay
self.bounce_factory = bounce_factory or Bounce
self.backoff = backoff or self._default_backoff
self.queue_policies = []
def add_policy(self, policy):
"""Adds a |QueuePolicy| to be executed before messages are persisted
to storage.
:param policy: |QueuePolicy| object to execute.
"""
if isinstance(policy, QueuePolicy):
self.queue_policies.append(policy)
else:
raise TypeError('Argument not a QueuePolicy.')
def flush(self):
"""The :meth:`~slimta.queue.Queue.flush` method from |Queue| is not
available to :class:`CeleryQueue` objects.
:raises: :class:`NotImplementedError`
"""
raise NotImplementedError()
def kill(self):
pass
@staticmethod
def _default_backoff(envelope, attempts):
pass
def _run_policies(self, envelope):
results = [envelope]
def recurse(current, i):
try:
policy = self.queue_policies[i]
except IndexError:
return
ret = policy.apply(current)
if ret:
results.remove(current)
results.extend(ret)
for env in ret:
recurse(env, i+1)
else:
recurse(current, i+1)
recurse(envelope, 0)
return results
def enqueue(self, envelope):
envelopes = self._run_policies(envelope)
ids = [self._initiate_attempt(env) for env in envelopes]
results = list(zip(envelopes, ids))
return results
def _initiate_attempt(self, envelope, attempts=0, wait=None):
attempt = self.attempt_task.s(envelope, attempts)
if wait:
attempt.set(countdown=wait)
try:
return attempt.apply_async().id
except Exception as exc:
return QueueError(exc)
def attempt_delivery(self, envelope, attempts):
try:
self.relay._attempt(envelope, attempts)
except TransientRelayError as exc:
self._handle_transient_failure(envelope, attempts, exc.reply)
except PermanentRelayError as exc:
self.enqueue_bounce(envelope, exc.reply)
except Exception as exc:
log_exception(__name__)
reply = Reply('450', '4.0.0 Unhandled delivery error: '+str(exc))
self._handle_transient_failure(envelope, attempts, reply)
raise
def _handle_transient_failure(self, envelope, attempts, reply):
wait = self.backoff(envelope, attempts+1)
if wait:
self._initiate_attempt(envelope, attempts+1, wait=wait)
else:
reply.message += ' (Too many retries)'
self.enqueue_bounce(envelope, reply)
def enqueue_bounce(self, envelope, reply):
if envelope.sender:
bounce = self.bounce_factory(envelope, reply)
if bounce:
self._initiate_attempt(bounce)
# vim:et:fdm=marker:sts=4:sw=4:ts=4
| {
"content_hash": "fcfb8d5a64e034a6c33512c3525bdd0b",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 79,
"avg_line_length": 36.15923566878981,
"alnum_prop": 0.6198696494627444,
"repo_name": "slimta/python-slimta-celeryqueue",
"id": "36fdc0cec0b016896ac7e3fb732ce614e54b0ca2",
"size": "6770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slimta/celeryqueue/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16997"
}
],
"symlink_target": ""
} |
"""HydraTK default event class
.. module:: core.event
:platform: Unix
:synopsis: HydraTK default event class
.. moduleauthor:: Petr Czaderna <[email protected]>
"""
import pprint
class Event(object):
""" Class Event
"""
_id = None
_args = ()
_data = {}
_propagate = True
_run_default = True
_skip_before_hook = False
_skip_after_hook = False
def __init__(self, event_id, *args, **kwargs):
"""Class constructor
Called when the object is initialized
Args:
event_id (str): event
args (args): arguments
kwargs (kwargs): key value arguments
Raises:
error: ValueError
"""
self._args = ()
self._data = {}
self._propagate = True
self._run_default = True
self._skip_before_hook = False
self._skip_after_hook = False
if isinstance(event_id, str) and event_id != '':
self._id = event_id
self._data['target_event'] = None
self._data['source_event'] = None
else:
raise ValueError(
"Invalid event id specified, nonempty string is required")
if isinstance(args, tuple):
self._args = ()
self._args = args
if len(kwargs) > 0:
for k, v in kwargs.items():
self._data[k] = v
@property
def id(self):
"""Method gets id attribute
Args:
none
Returns:
str
"""
return self._id
def argc(self):
"""Method gets count of arguments
Args:
none
Returns:
int: count of arguments
"""
return len(self._args)
def args(self):
"""Method gets args attribute
Args:
none
Returns:
tuple
"""
return self._args
def get_all_data(self):
"""Method gets event data
Args:
none
Returns:
dict: data
"""
return self._data
def get_data(self, key):
"""Method gets requested event data
Args:
key (str): data key
Returns:
obj: data value
"""
return self._data[key] if (key in self._data) else None
def set_data(self, key, value):
"""Method sets requested event data
Args:
key (str): data key
value (obj): data value
Returns:
void
Raises:
error: ValueError
"""
if isinstance(key, str) and key != '':
self._data[key] = value
else:
raise ValueError(
"Invalid key specified, nonempty string is required")
def argv(self, num):
"""Method gets request event argument
Args:
num (int): argument index
Returns:
obj: argument value
"""
return self._args[num] if (num < len(self._args)) else None
def set_argv(self, num, val=None):
"""Method sets requested event argument
Args:
num (int): argument index
val (obj): argument value
Returns:
void
"""
if isinstance(num, int) and num < len(self._args):
args = list(self._args)
args[num] = val
self._args = tuple(args)
def stop_propagation(self):
"""Method stops event propagation
Args:
none
Returns:
void
"""
self._propagate = False
@property
def skip_before_hook(self):
""" skip_before_hook property getter """
return self._skip_before_hook
@property
def skip_after_hook(self):
""" skip_after_hook property getter """
return self._skip_after_hook
def prevent_default(self):
"""Method prevents default event processing
Args:
none
Returns:
void
"""
self._run_default = False
def run_default(self):
"""Method enables default event processing
Args:
none
Returns:
void
"""
self._run_default = True
def will_run_default(self):
"""Method gets default event processing
Args:
none
Returns:
bool: run_default
"""
return self._run_default
def propagate(self):
"""Method gets propagate attribute
Args:
none
Returns:
bool
"""
return self._propagate
| {
"content_hash": "013a4145f2a48a37b0441b9b83031589",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 74,
"avg_line_length": 18.359375,
"alnum_prop": 0.488936170212766,
"repo_name": "hydratk/hydratk",
"id": "7c7e722895fdc6eccb3668ffb71a331fdd9cdcce",
"size": "4724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hydratk/core/event.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "444574"
}
],
"symlink_target": ""
} |
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
from Ruby import send_evicts
#
# Declare caches used by the protocol
#
class L1Cache(RubyCache): pass
class L2Cache(RubyCache): pass
#
# Probe filter is a cache
#
class ProbeFilter(RubyCache): pass
def define_options(parser):
parser.add_option("--allow-atomic-migration", action="store_true",
help="allow migratory sharing for atomic only accessed blocks")
parser.add_option("--pf-on", action="store_true",
help="Hammer: enable Probe Filter")
parser.add_option("--dir-on", action="store_true",
help="Hammer: enable Full-bit Directory")
def create_system(options, full_system, system, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MOESI_hammer':
panic("This script requires the MOESI_hammer protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l1_cntrl_nodes = []
dir_cntrl_nodes = []
dma_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
#
l1i_cache = L1Cache(size = options.l1i_size,
assoc = options.l1i_assoc,
start_index_bit = block_size_bits,
is_icache = True)
l1d_cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits)
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = block_size_bits)
l1_cntrl = L1Cache_Controller(version = i,
L1Icache = l1i_cache,
L1Dcache = l1d_cache,
L2cache = l2_cache,
no_mig_atomic = not \
options.allow_atomic_migration,
send_evictions = send_evicts(options),
transitions_per_cycle = options.ports,
clk_domain=system.cpu[i].clk_domain,
ruby_system = ruby_system)
cpu_seq = RubySequencer(version = i,
icache = l1i_cache,
dcache = l1d_cache,
clk_domain=system.cpu[i].clk_domain,
ruby_system = ruby_system)
l1_cntrl.sequencer = cpu_seq
if options.recycle_latency:
l1_cntrl.recycle_latency = options.recycle_latency
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
# Add controllers and sequencers to the appropriate lists
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L1 controller and the network
# Connect the buffers from the controller to network
l1_cntrl.requestFromCache = MessageBuffer()
l1_cntrl.requestFromCache.master = ruby_system.network.slave
l1_cntrl.responseFromCache = MessageBuffer()
l1_cntrl.responseFromCache.master = ruby_system.network.slave
l1_cntrl.unblockFromCache = MessageBuffer()
l1_cntrl.unblockFromCache.master = ruby_system.network.slave
l1_cntrl.triggerQueue = MessageBuffer()
# Connect the buffers from the network to the controller
l1_cntrl.mandatoryQueue = MessageBuffer()
l1_cntrl.forwardToCache = MessageBuffer()
l1_cntrl.forwardToCache.slave = ruby_system.network.master
l1_cntrl.responseToCache = MessageBuffer()
l1_cntrl.responseToCache.slave = ruby_system.network.master
#if total-mem-size is set (defined gem5-gpu options) then we ignore the benchmark settings
try:
phys_mem_size = AddrRange(options.total_mem_size).size()
except AttributeError:
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
print "Number of dirs is %s" % options.num_dirs
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
#
# determine size and index bits for probe filter
# By default, the probe filter size is configured to be twice the
# size of the L2 cache.
#
pf_size = MemorySize(options.l2_size)
pf_size.value = pf_size.value * 2
dir_bits = int(math.log(options.num_dirs, 2))
pf_bits = int(math.log(pf_size.value, 2))
if options.numa_high_bit:
if options.pf_on or options.dir_on:
# if numa high bit explicitly set, make sure it does not overlap
# with the probe filter index
assert(options.numa_high_bit - dir_bits > pf_bits)
# set the probe filter start bit to just above the block offset
pf_start_bit = block_size_bits
else:
if dir_bits > 0:
pf_start_bit = dir_bits + block_size_bits - 1
else:
pf_start_bit = block_size_bits
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
for i in xrange(options.num_dirs):
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
pf = ProbeFilter(size = pf_size, assoc = 4,
start_index_bit = pf_start_bit)
dir_cntrl = Directory_Controller(version = i,
directory = RubyDirectoryMemory(
version = i, size = dir_size),
probeFilter = pf,
probe_filter_enabled = options.pf_on,
full_bit_dir_enabled = options.dir_on,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
if options.recycle_latency:
dir_cntrl.recycle_latency = options.recycle_latency
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
# Connect the directory controller to the network
dir_cntrl.forwardFromDir = MessageBuffer()
dir_cntrl.forwardFromDir.master = ruby_system.network.slave
dir_cntrl.responseFromDir = MessageBuffer()
dir_cntrl.responseFromDir.master = ruby_system.network.slave
dir_cntrl.dmaResponseFromDir = MessageBuffer(ordered = True)
dir_cntrl.dmaResponseFromDir.master = ruby_system.network.slave
dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.unblockToDir = MessageBuffer()
dir_cntrl.unblockToDir.slave = ruby_system.network.master
dir_cntrl.responseToDir = MessageBuffer()
dir_cntrl.responseToDir.slave = ruby_system.network.master
dir_cntrl.requestToDir = MessageBuffer()
dir_cntrl.requestToDir.slave = ruby_system.network.master
dir_cntrl.dmaRequestToDir = MessageBuffer(ordered = True)
dir_cntrl.dmaRequestToDir.slave = ruby_system.network.master
dir_cntrl.responseFromMemory = MessageBuffer()
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system,
slave = dma_port)
dma_cntrl = DMA_Controller(version = i,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
dma_cntrl_nodes.append(dma_cntrl)
if options.recycle_latency:
dma_cntrl.recycle_latency = options.recycle_latency
# Connect the dma controller to the network
dma_cntrl.responseFromDir = MessageBuffer(ordered = True)
dma_cntrl.responseFromDir.slave = ruby_system.network.master
dma_cntrl.requestToDir = MessageBuffer()
dma_cntrl.requestToDir.master = ruby_system.network.slave
dma_cntrl.mandatoryQueue = MessageBuffer()
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
# Create the io controller and the sequencer
if full_system:
io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
ruby_system._io_port = io_seq
io_controller = DMA_Controller(version = len(dma_ports),
dma_sequencer = io_seq,
ruby_system = ruby_system)
ruby_system.io_controller = io_controller
# Connect the dma controller to the network
io_controller.responseFromDir = MessageBuffer(ordered = True)
io_controller.responseFromDir.slave = ruby_system.network.master
io_controller.requestToDir = MessageBuffer()
io_controller.requestToDir.master = ruby_system.network.slave
io_controller.mandatoryQueue = MessageBuffer()
all_cntrls = all_cntrls + [io_controller]
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
| {
"content_hash": "ea6eb28b68d6efbf65c439ecc467fde9",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 94,
"avg_line_length": 42.41596638655462,
"alnum_prop": 0.5923724616146607,
"repo_name": "ayoubg/gem5-graphics",
"id": "d757b75c6c67e7e1b18c2f7f7f032b20d7e97074",
"size": "11725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gem5/configs/ruby/MOESI_hammer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1415940"
},
{
"name": "C",
"bytes": "56123812"
},
{
"name": "C++",
"bytes": "26907403"
},
{
"name": "CMake",
"bytes": "2202"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "GLSL",
"bytes": "137780"
},
{
"name": "HTML",
"bytes": "8934387"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "LLVM",
"bytes": "14115"
},
{
"name": "Lex",
"bytes": "52790"
},
{
"name": "M4",
"bytes": "112794"
},
{
"name": "Makefile",
"bytes": "296363"
},
{
"name": "Module Management System",
"bytes": "18236"
},
{
"name": "Objective-C",
"bytes": "156253"
},
{
"name": "Perl",
"bytes": "33619"
},
{
"name": "Protocol Buffer",
"bytes": "7033"
},
{
"name": "Python",
"bytes": "4911707"
},
{
"name": "Roff",
"bytes": "2474891"
},
{
"name": "Shell",
"bytes": "122734"
},
{
"name": "TeX",
"bytes": "40106"
},
{
"name": "Vim script",
"bytes": "4335"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "Yacc",
"bytes": "230414"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
from load_files import training_ims, training_labels
# Visualize an example just for testing
index = 14
label = training_labels[1]
image = training_ims[1]
image = np.array(image)
image = np.reshape(image, (28, 28))
image[image > 0] = 1
plt.imshow(image)
plt.title('this should be the number = ' + str(label))
plt.show()
| {
"content_hash": "bf8d03d01c28f980e2176a4a5b9c9c41",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 54,
"avg_line_length": 20.88888888888889,
"alnum_prop": 0.723404255319149,
"repo_name": "h-mayorquin/mnist_deep_neural_network_BPNNs",
"id": "104005803cd58790c43b8946250a8453786904a3",
"size": "376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visualize_example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "77059"
}
],
"symlink_target": ""
} |
"""Common utils to support apply and reverting recommendations on bulk."""
import collections
from concurrent import futures
import json
import logging
import time
from google_auth_httplib2 import AuthorizedHttp
import httplib2
from google.oauth2 import service_account
class Recommendation(object):
"""Encapsulate Recommendation information required to compute hero metrics."""
def __init__(self, data):
self.name = data["name"]
self.etag = data["etag"]
self.state = self.get_state(data)
self.principal = set()
self.principal_type = ""
self.remove_role = set()
self.add_roles = set()
self.resource = set()
self.extract_recommendation(data)
self.check_integrity()
self.update_data()
def __repr__(self):
return repr(
(self.state, self.principal, self.principal_type, self.remove_role,
self.add_roles, self.resource, self.name, self.etag))
def get_state(self, data):
"""Get state of the recommendation."""
if data["stateInfo"]["state"] == "ACTIVE":
return "ACTIVE"
elif data["stateInfo"]["state"] == "SUCCEEDED":
if ("reverted" in data["stateInfo"].get("stateMetadata", {}) and
data["stateInfo"]["stateMetadata"].get("reverted",
"false") == "true"):
return "SUCCEEDED_REVERTED"
else:
return "SUCCEEDED"
return data["stateInfo"]["state"]
def extract_recommendation(self, data):
"""Populate recommendation data from a recommendation payload."""
for op_grps in data.get("content", {}).get("operationGroups", []):
for op in op_grps["operations"]:
if op["action"] == "remove":
self.principal.add(
op["pathFilters"]["/iamPolicy/bindings/*/members/*"])
self.resource.add(op["resource"])
self.remove_role.add(
op["pathFilters"]["/iamPolicy/bindings/*/role"])
elif op["action"] == "add":
self.resource.add(op["resource"])
self.add_roles.add(
op["pathFilters"]["/iamPolicy/bindings/*/role"])
self.principal.add(op["value"])
else:
raise ValueError("Wrong action : " + op["action"])
def check_integrity(self):
"""Check invariance of a recommendation payload."""
assert len(
self.principal
) == 1, "there should be exactly one principal. principal : " + str(
self.principal)
assert len(
self.remove_role
) == 1, "there should be exactly one removed role. remove_role: " + str(
self.remove_role)
assert len(
self.resource
) == 1, "there should be exactly one resource. resource: " + str(
self.resource)
def update_data(self):
"""Update recommendation data after checking the integrity."""
self.principal = self.principal.pop()
self.principal_type = self.principal.split(":")[0]
self.resource = self.resource.pop()
def rate_limit_execution(f, rate_limit, *args):
"""Execute multiple threads of function f for args while respecting the rate limit.
Args:
f: function to execute
rate_limit: rate with which the functions should be executed.
*args: Args provided for executing the function f.
Returns:
Output of executing f on args
"""
i = 0
n = len(args[0])
all_output = []
max_request, duration = rate_limit
while i < n:
tic = int(time.time())
with futures.ThreadPoolExecutor(max_workers=max_request) as executor:
output_ = executor.map(f, *[arg[i:i + max_request] for arg in args])
i += max_request
all_output.extend(output_)
toc = int(time.time())
diff = toc - tic
if diff < duration and i < n:
time.sleep(duration - diff)
logging.info("Finish investigating %d items out of total %d items.",
min(i, n), n)
return all_output
def get_recommendations(project_id, recommender, state, credentials):
"""Returns all recommendtions.
Args:
project_id: (str) Project for which to get the recommendtion.
recommender: Recommender stub to call recommender API
state: state of the recommendation
credentials: client credentials
"""
http = httplib2.Http()
authorize_http = AuthorizedHttp(credentials, http=http)
parent = "projects/{}/locations/global/recommenders/google.iam.policy.Recommender".format(
project_id)
fields = [
"recommendations/stateInfo/state", "recommendations/content",
"recommendations/etag", "recommendations/name",
"recommendations/stateInfo/stateMetadata"
]
try:
request = recommender.projects().locations().recommenders(
).recommendations().list(parent=parent, fields=",".join(fields))
response = request.execute(http=authorize_http)
recommendation_data = [
Recommendation(r) for r in response.get("recommendations", [])
]
return [r for r in recommendation_data if r.state == state]
except:
return []
def update_recommendation_status(recommendation, recommender_client, metadata,
credentials):
"""Update the recommendation status for the recommendations.
Args:
recommendation: Recommendation on IAM policy.
recommender_client: Iam recommender client.
metadata: (Dict) metadata to update the recommendation state.
credentials: service account credentials.
Returns:
Recommendations with updated status.
"""
http = httplib2.Http()
authorize_http = AuthorizedHttp(credentials, http=http)
return (recommender_client.projects().locations().recommenders().
recommendations().markSucceeded(name=recommendation["id"],
body={
"etag": recommendation["etag"],
"stateMetadata": metadata
}).execute(http=authorize_http))
def get_current_policy(resourcemanager_v1, project_id, credentials):
"""Returns the current policy associated with project_id.
Args:
resourcemanager_v1: ResourcemanagerV1 stub to call IAM API
project_id: (str) Project for which to get the recommendtion.
credentials: client credentials
"""
http = httplib2.Http()
authorize_http = AuthorizedHttp(credentials, http=http)
request = resourcemanager_v1.projects().getIamPolicy(resource=project_id)
cur_policy = request.execute(http=authorize_http)
del cur_policy["etag"]
return cur_policy
def update_policy(resourcemanager_v1, project_id, credentials, new_policy):
"""Returns the new policy associated with project_id.
Args:
resourcemanager_v1: ResourcemanagerV1 stub to call IAM API
project_id: (str) Project for which to get the recommendtion.
credentials: client credentials
new_policy: New policy to set on the project
"""
http = httplib2.Http()
authorize_http = AuthorizedHttp(credentials, http=http)
set_policy_request = resourcemanager_v1.projects().setIamPolicy(
resource=project_id, body={"policy": new_policy})
return set_policy_request.execute(http=authorize_http)
def get_credentials(service_account_file_path, scopes=None):
"""Returns credentials from a service_account_file_path.
Args:
service_account_file_path: (str) Path to service account key.
scopes: List scopes for service account
"""
if scopes is None:
scopes = ["https://www.googleapis.com/auth/cloud-platform"]
return service_account.Credentials.from_service_account_file(
service_account_file_path, scopes=scopes)
def diff_between_policies(old_policy, new_policy):
"""Returns the difference between two policies.
Args:
old_policy: Old policy
new_policy: New policy
"""
old_bindings = collections.defaultdict(set)
for b in old_policy["bindings"]:
if "condition" in b:
continue
for principal in b["members"]:
old_bindings[principal].add(b["role"])
new_bindings = collections.defaultdict(set)
for b in new_policy["bindings"]:
if "condition" in b:
continue
for principal in b["members"]:
new_bindings[principal].add(b["role"])
all_principals = {*old_bindings.keys(), *new_bindings.keys()}
entries = []
for principal in sorted(all_principals):
new_roles = new_bindings[principal]
old_roles = old_bindings[principal]
if new_roles == old_roles:
continue
removed_roles = old_roles - new_roles
added_roles = new_roles - old_roles
entry = {
"principal": principal,
"removed_roles": list(removed_roles),
"added_roles": list(added_roles)
}
entries.append(entry)
return json.dumps({"diff_policy": entries}, sort_keys=True, indent=4)
def remove_role_from_policy(policy, recommendation):
"""Remove roles for a policy based on recommendations.
Args:
policy: IAM policy.
recommendation: Recommendation on IAM policy.
Returns:
None. Change the policy in place.
"""
is_acted_recommendation = False
acted_and_succeeded = False
if not recommendation["role_recommended_to_be_removed"]:
return True # No role to be removed.
for binding in policy["bindings"]:
if binding["role"] not in recommendation[
"role_recommended_to_be_removed"]:
continue
if "condition" in binding:
continue
try:
is_acted_recommendation = True
binding["members"].remove(recommendation["principal"])
recommendation["role_recommended_to_be_removed"].remove(
binding["role"])
acted_and_succeeded = True
except:
logging.error("`%s` does not have `role:%s`.",
recommendation["principal"],
recommendation["role_recommended_to_be_removed"])
if not is_acted_recommendation:
logging.error("`%s` does not have `role:%s`.",
recommendation["principal"],
recommendation["role_recommended_to_be_removed"])
return is_acted_recommendation and acted_and_succeeded
def add_roles_in_policy(policy, recommendation):
"""Add roles in the policy based on recommendations.
Args:
policy: IAM policy.
recommendation: Recommendation on IAM policy.
Returns:
None. Change the policy in place.
"""
is_acted_recommendation = False
roles_to_be_added = set(
recommendation["roles_recommended_to_be_replaced_with"])
for binding in policy["bindings"]:
if binding["role"] not in roles_to_be_added:
continue
if "condition" in binding:
continue
binding["members"].append(recommendation["principal"])
roles_to_be_added.remove(binding["role"])
for role in roles_to_be_added:
policy["bindings"].append({
"role": role,
"members": [recommendation["principal"]]
})
is_acted_recommendation = True
return is_acted_recommendation
def writefile(data, output_file):
with open(output_file, "w") as f:
f.write(data)
def describe_recommendations(recommendations):
"""Returns a json string representation of recommendation with selected fileds.
Args:
recommendations: List(common.Recommendation)
"""
recommendations_sorted = sorted(recommendations, key=lambda x: x.principal)
data = []
for r in recommendations_sorted:
data.append({
"id": r.name,
"etag": r.etag,
"principal": r.principal,
"role_recommended_to_be_removed": list(r.remove_role),
"roles_recommended_to_be_replaced_with": list(r.add_roles)
})
return json.dumps({"recommendations": data}, indent=4, sort_keys=True)
| {
"content_hash": "14ab2ad1997ea56f783d64be4584ada8",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 94,
"avg_line_length": 35.5878962536023,
"alnum_prop": 0.6132480362782412,
"repo_name": "GoogleCloudPlatform/professional-services",
"id": "d0718bf4fab1818b3817ce09cf4df1f9ca63940f",
"size": "12349",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/iam-recommender-at-scale/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "117994"
},
{
"name": "C++",
"bytes": "174"
},
{
"name": "CSS",
"bytes": "13405"
},
{
"name": "Component Pascal",
"bytes": "798"
},
{
"name": "Dockerfile",
"bytes": "15093"
},
{
"name": "Go",
"bytes": "352968"
},
{
"name": "HCL",
"bytes": "204776"
},
{
"name": "HTML",
"bytes": "1229668"
},
{
"name": "Java",
"bytes": "338810"
},
{
"name": "JavaScript",
"bytes": "59905"
},
{
"name": "Jinja",
"bytes": "60083"
},
{
"name": "Makefile",
"bytes": "14129"
},
{
"name": "Python",
"bytes": "2250081"
},
{
"name": "Scala",
"bytes": "978327"
},
{
"name": "Shell",
"bytes": "109299"
},
{
"name": "Smarty",
"bytes": "19839"
},
{
"name": "TypeScript",
"bytes": "147194"
}
],
"symlink_target": ""
} |
from django.db import models
class Join(models.Model):
email = models.EmailField()
ref_id = models.CharField(max_length=120, default = 'abra')
ip_address = models.CharField(max_length=120, default='ABC')
# dupa = models.CharField(max_length=120, default='dupa')
# kupa = models.CharField(max_length=120, null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add =True, auto_now=False)
updated = models.DateTimeField(auto_now_add =False, auto_now=True)
def __str__(self):
return self.email
class JoinFirends(models.Model):
email = models.OneToOneField(Join, related_name='Sharer')
friends = models.ManyToManyField(Join, related_name='Friend', null=True, blank=True)
| {
"content_hash": "dee0e5f8ef64761b67e3ef342fe1a9f0",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 88,
"avg_line_length": 36.4,
"alnum_prop": 0.7046703296703297,
"repo_name": "micbuz/project2",
"id": "798d4d34ea8978c2926af350d9eeef9978d3b17f",
"size": "728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boot/joins/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "91640"
},
{
"name": "HTML",
"bytes": "15283"
},
{
"name": "JavaScript",
"bytes": "196154"
},
{
"name": "Python",
"bytes": "35881"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="sunburst", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
| {
"content_hash": "e5f53abbcc8178778a2844d6a69b3e52",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 77,
"avg_line_length": 35.45454545454545,
"alnum_prop": 0.6153846153846154,
"repo_name": "plotly/plotly.py",
"id": "2c25c5ac7b063ee35c8db787aedb9e285b1e7fe7",
"size": "390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/sunburst/_name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem103.py
#
# Special subset sums: optimum
# ============================
# Published on Friday, 26th August 2005, 06:00 pm
#
# Let S(A) represent the sum of elements in set A of size n. We shall call it a
# special sum set if for any two non-empty disjoint subsets, B and C, the
# following properties are true: S(B) S(C); that is, sums of subsets cannot
# be equal. If B contains more elements than C then S(B) S(C). If S(A) is
# minimised for a given n, we shall call it an optimum special sum set. The
# first five optimum special sum sets are given below. n = 1: {1} n = 2: {1, 2}
# n = 3: {2, 3, 4} n = 4: {3, 5, 6, 7} n = 5: {6, 9, 11, 12, 13} It seems that
# for a given optimum set, A = {a1, a2, ... , an}, the next optimum set is of
# the form B = {b, a1+b, a2+b, ... ,an+b}, where b is the "middle" element on
# the previous row. By applying this "rule" we would expect the optimum set for
# n = 6 to be A = {11, 17, 20, 22, 23, 24}, with S(A) = 117. However, this is
# not the optimum set, as we have merely applied an algorithm to provide a near
# optimum set. The optimum set for n = 6 is A = {11, 18, 19, 20, 22, 25}, with
# S(A) = 115 and corresponding set string: 111819202225. Given that A is an
# optimum special sum set for n = 7, find its set string. NOTE: This problem is
# related to problems 105 and 106.
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
| {
"content_hash": "4aa8daf1deaadb123531be7a1483c8c3",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 47.61290322580645,
"alnum_prop": 0.6422764227642277,
"repo_name": "olduvaihand/ProjectEuler",
"id": "f5e42c60264826297a39bddba835fb26af79e8bd",
"size": "1478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/problem103.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "0"
},
{
"name": "Python",
"bytes": "422751"
}
],
"symlink_target": ""
} |
'''
Problem
=======
A palindromic number reads the same both ways.
The largest palindrome made from the product of two 2-digit numbers is 9009 = 91x99.
Find the largest palindrome made from the product of two 3-digit numbers.
'''
def isPalindrome(num):
num = str(num)
charArr = list(num)
rrArahc = list(num)
rrArahc.reverse()
return charArr == rrArahc
palindromes = [ j*i for i in range(900,1000) for j in range(900,1000) if isPalindrome(j*i)]
print max(palindromes) #should be the only print statement
| {
"content_hash": "c31845dc608c943068f25b27e12560c9",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 92,
"avg_line_length": 29.27777777777778,
"alnum_prop": 0.7096774193548387,
"repo_name": "ryanbmilbourne/euler",
"id": "9b639b752b85e3e3f826bdd385040b3f377a0841",
"size": "550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/4.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "16870"
},
{
"name": "Python",
"bytes": "4400"
}
],
"symlink_target": ""
} |
'''code description'''
# pylint: disable = I0011, E0401, C0103, C0321
class Solution(object):
'''Solution description'''
def func(self, m, n, k):
'''Solution function description'''
res = []
self.helper(res, [], 0, 0, 0, 0, 0, 0, m, n, k)
return res
def helper(self, total, part, counter1, counter2, counter3, round, square, flower, m, n, k):
if counter1 < 0 or counter2 < 0 or counter3 < 0: return
if counter1 > m or counter2 > n or counter3 > k: return
if round > m or square > n or flower > k: return
if part and len(part) > (m + n + k) * 2: return
if part and len(part) == (m + n + k) * 2 and counter1 == 0 and counter2 == 0 and counter3 == 0 and\
round == m and square == n and flower == k:
total.append(part)
return
self.helper(total, part+['('], counter1+1, counter2, counter3, round, square, flower, m, n, k)
self.helper(total, part+[')'], counter1-1, counter2, counter3, round+1, square, flower, m, n, k)
self.helper(total, part+['['], counter1, counter2+1, counter3, round, square, flower, m, n, k)
self.helper(total, part+[']'], counter1, counter2-1, counter3, round, square+1, flower, m, n, k)
self.helper(total, part+['{'], counter1, counter2, counter3+1, round, square, flower, m, n, k)
self.helper(total, part+['}'], counter1, counter2, counter3-1, round, square, flower+1, m, n, k)
def main():
'''main function'''
_solution = Solution()
inp = [(1, 1, 1)]
for i in inp:
for r in _solution.func(i[0], i[1], i[2]):
print(r)
if __name__ == "__main__":
main()
| {
"content_hash": "4e94245c988cf1574bcb4f659309f25e",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 107,
"avg_line_length": 46.69444444444444,
"alnum_prop": 0.5651397977394408,
"repo_name": "cerebrumaize/leetcode",
"id": "3f7d7cf0ca06d29dcbbc4013fa0ae80965c05cf9",
"size": "1703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generate.multi.parentheses/1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "421"
},
{
"name": "Python",
"bytes": "277925"
},
{
"name": "TeX",
"bytes": "656"
}
],
"symlink_target": ""
} |
import datetime
import re
def string_to_date(s, format='%Y%m%d', strict=False):
"""
Convert a string to a datetime.date object
Returns ``None`` on dates that it can't parse unless you call it
with the kwarg ``strict`` set to ``True``.
"""
try:
return datetime.datetime.strptime(s, format).date()
except (TypeError, ValueError) as e:
if strict:
raise e
def parse_num_from_string(s):
"""
Parses out first integer from a string
Only works on integers that stand alone. Returns ``None`` on
strings without an integer.
"""
match = re.match('[^\d]*(\d+).*', s)
if match:
return int(match.groups()[0])
def extract_filing_date(s, strict=False):
"""
Convert a filing list date into a datetime.date object
This works like ``string_to_date`` and returns ``None`` when it
can't parse a date from the string unless ``strict`` is set to
``True``.
"""
if s:
date_string = re.sub(r'(st|nd|rd|th),', ',', s.split(':')[1].strip())
date_string = date_string.replace(' ', '').strip()
else:
date_string = s
return string_to_date(date_string, format='%B %d, %Y', strict=strict)
| {
"content_hash": "45372fe13e8d2155086ba75f84d0a851",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 77,
"avg_line_length": 27.2,
"alnum_prop": 0.5972222222222222,
"repo_name": "texas/tx_tecreports",
"id": "63ef0624426cca48dcbba012949e731f1ca67e08",
"size": "1224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tx_tecreports/fetcher/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "188374"
},
{
"name": "Ruby",
"bytes": "43"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="smartbackup",
version="0.1.0",
author="alex",
author_email="[email protected]",
description="Backup tools using the framework bakthat.",
license="MIT",
keywords="s3 backup tools",
url="https://github.com/alexsilva/smartbackup",
packages=find_packages(exclude=[]),
long_description=read('README.md'),
install_requires=["bakthat", "filechunkio"],
entry_points={'console_scripts': ["smartbackup = smartbackup:main"]},
classifiers=[
"Development Status :: 1 - Beta",
"Intended Audience :: Developers",
"Topic :: System :: Archiving :: Backup",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
],
zip_safe=False,
)
| {
"content_hash": "4b21ee4f7dd5819f2ac7d7c635fd5bf3",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 73,
"avg_line_length": 31,
"alnum_prop": 0.639599555061179,
"repo_name": "alexsilva/smartbackup",
"id": "4fb73a05421e7d1bf95cb470d9ab104d954723c5",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17132"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import threading
from past.builtins import basestring
from caffe2.proto import caffe2_pb2
# The name scope and device scope when creating a new operator.
_NAMESCOPE_SEPARATOR = '/'
_threadlocal_scope = threading.local()
def CurrentNameScope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "namescope"):
_threadlocal_scope.namescope = ''
return _threadlocal_scope.namescope
def CurrentDeviceScope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "devicescope"):
_threadlocal_scope.devicescope = None
return _threadlocal_scope.devicescope
@contextlib.contextmanager
def NameScope(prefix, reset=False):
global _threadlocal_scope
assert isinstance(prefix, basestring) or prefix is None, \
"NameScope takes in a string as its argument."
old_scope = CurrentNameScope()
prefix = prefix + _NAMESCOPE_SEPARATOR if prefix else ''
if reset:
_threadlocal_scope.namescope = prefix
else:
_threadlocal_scope.namescope = _threadlocal_scope.namescope + prefix
try:
yield
finally:
assert _threadlocal_scope.namescope.endswith(prefix), \
"The namescope variable is changed from outside NameScope() calls."
_threadlocal_scope.namescope = old_scope
@contextlib.contextmanager
def DeviceScope(scope, node_name=None):
new_scope = caffe2_pb2.DeviceOption()
if scope:
assert isinstance(scope, caffe2_pb2.DeviceOption), \
"DeviceScope takes in a caffe2_pb2.DeviceOption as its argument."
new_scope.CopyFrom(scope)
else:
assert node_name, "At least one argument should be non-null in DeviceScope"
# rewrite node_name if it is explicitly given
if node_name:
new_scope.node_name = node_name
global _threadlocal_scope
old_scope = CurrentDeviceScope()
# nested scope should inherit the node_name if it is not explicitly set
if old_scope and old_scope.HasField('node_name') and \
not new_scope.HasField('node_name'):
new_scope.node_name = old_scope.node_name
_threadlocal_scope.devicescope = new_scope
try:
yield
finally:
assert _threadlocal_scope.devicescope == new_scope, \
"The device scope is changed from outside DeviceScope() calls."
_threadlocal_scope.devicescope = old_scope
@contextlib.contextmanager
def EmptyDeviceScope():
"""
Allow users to 'disable' the device scope behaviour (so it can be
controlled at a NetDef::DeviceOption level, not overridden at
OperatorDef::DeviceOption level).
This sets the CurrentDeviceScope() to None, so that the field is
not set in CreateOperator(...), etc.
"""
old_scope = CurrentDeviceScope()
try:
_threadlocal_scope.devicescope = None
yield
finally:
_threadlocal_scope.devicescope = old_scope
return
| {
"content_hash": "ad1d8424f9cf1ad60443282d4454bfe8",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 83,
"avg_line_length": 31.783505154639176,
"alnum_prop": 0.6925072980862796,
"repo_name": "Yangqing/caffe2",
"id": "148980f902e7cb215a54aee7d232be9b696da1db",
"size": "3801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caffe2/python/scope.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3327"
},
{
"name": "C",
"bytes": "691775"
},
{
"name": "C++",
"bytes": "5773620"
},
{
"name": "CMake",
"bytes": "313982"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "2051079"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Makefile",
"bytes": "15290"
},
{
"name": "Metal",
"bytes": "41257"
},
{
"name": "Objective-C",
"bytes": "6505"
},
{
"name": "Objective-C++",
"bytes": "253857"
},
{
"name": "Python",
"bytes": "3805476"
},
{
"name": "Shell",
"bytes": "73185"
}
],
"symlink_target": ""
} |
'''Implements EWrapper subclass SynchronizedWrapper.'''
__copyright__ = "Copyright (c) 2009 Kevin J Bluck"
__version__ = "$Id$"
import Queue
from tws import EWrapper
class SynchronizedWrapper(EWrapper):
'''Synchronizes wrapper events into another thread.
Since the client socket is read in a worker thread, calls to
EWrapper will be made in the context of that thread, potentially
leading to synchronization errors. Constructing an instance of this
class with a chained EWrapper instance ensures that the chained
EWrapper's event method calls will be synchronized into a desired
client thread, usually the main thread.
The client thread must periodically invoke the dispatch() method to
cause waiting events to be invoked upon the chained EWrapper. The
exact means of starting dispatch will depend on the nature of the
application; command lines will do things differently than GUIs, and
any given GUI framework will have its own method. Timers, idle event
handlers, or simple polling loops are all possible means of making
dispatch happen at frequent intervals. It is important to understand
that nothing will happen until dispatch() is called, so it should be
called as frequently as necessary to avoid undesirable latency.
'''
_queue_factory = Queue.Queue
_queue_empty = Queue.Empty
def __init__(self, wrapper):
assert(isinstance(wrapper, __import__("tws").EWrapper))
super(SynchronizedWrapper, self).__init__()
self._queue = self._queue_factory()
self._chained_wrapper = wrapper
def _put_wrapper_call(self, method_name, *args):
self._queue.put(item=(method_name, args), block=False)
def dispatch(self):
'''Invokes waiting wrapper calls.
Wrapper calls are collected asynchronously from the socket reader
thread while the main thread executes. Periodically, this method
must be invoked to dispatch those waiting calls into the chained
EWrapper object. All waiting calls will be dispatched one after
another until the waiting queue is empty or an exception occurs.
'''
try:
# Keep trying to suck in waiting items and invoke the matching
# named methods on the chained wrapper object.
while True:
item = self._queue.get(block=False)
getattr(self._chained_wrapper, item[0])(*item[1])
# Once waiting queue is empty, just return.
except self._queue_empty:
return
def error(self, e):
self._put_wrapper_call("error", e)
def connectionClosed(self):
self._put_wrapper_call("connectionClosed")
def tickPrice(self, tickerId, field, price, canAutoExecute):
self._put_wrapper_call("tickPrice", tickerId, field, price,
canAutoExecute)
def tickSize(self, tickerId, field, size):
self._put_wrapper_call("tickSize", tickerId, field, size)
def tickOptionComputation(self, tickerId, field, impliedVol,
delta, modelPrice, pvDividend):
self._put_wrapper_call("tickOptionComputation", tickerId, field,
impliedVol, delta, modelPrice, pvDividend)
def tickGeneric(self, tickerId, tickType, value):
self._put_wrapper_call("tickGeneric", tickerId, tickType, value)
def tickString(self, tickerId, tickType, value):
self._put_wrapper_call("tickString", tickerId, tickType, value)
def tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints,
impliedFuture, holdDays, futureExpiry, dividendImpact,
dividendsToExpiry):
self._put_wrapper_call("tickEFP", tickerId, tickType, basisPoints,
formattedBasisPoints, impliedFuture,
holdDays, futureExpiry, dividendImpact,
dividendsToExpiry)
def orderStatus(self, orderId, status, filled, remaining, avgFillPrice,
permId, parentId, lastFillPrice, clientId, whyHeld):
self._put_wrapper_call("orderStatus", orderId, status, filled,
remaining, avgFillPrice, permId, parentId,
lastFillPrice, clientId, whyHeld)
def openOrder(self, orderId, contract, order, orderState):
self._put_wrapper_call("openOrder", orderId, contract, order,
orderState)
def openOrderEnd(self):
self._put_wrapper_call("openOrderEnd")
def updateAccountValue(self, key, value, currency, accountName):
self._put_wrapper_call("updateAccountValue", key, value, currency,
accountName)
def updatePortfolio(self, contract, position, marketPrice, marketValue,
averageCost, unrealizedPNL, realizedPNL, accountName):
self._put_wrapper_call("updatePortfolio", contract, position,
marketPrice, marketValue, averageCost,
unrealizedPNL, realizedPNL, accountName)
def updateAccountTime(self, timeStamp):
self._put_wrapper_call("updateAccountTime", timeStamp)
def accountDownloadEnd(self, accountName):
self._put_wrapper_call("accountDownloadEnd", accountName)
def nextValidId(self, orderId):
self._put_wrapper_call("nextValidId", orderId)
def contractDetails(self, reqId, contractDetails):
self._put_wrapper_call("contractDetails", reqId, contractDetails)
def bondContractDetails(self, reqId, contractDetails):
self._put_wrapper_call("bondContractDetails", reqId, contractDetails)
def contractDetailsEnd(self, reqId):
self._put_wrapper_call("contractDetailsEnd", reqId)
def execDetails(self, reqId, contract, execution):
self._put_wrapper_call("execDetails", reqId, contract, execution)
def execDetailsEnd(self, reqId):
self._put_wrapper_call("execDetailsEnd", reqId)
def updateMktDepth(self, tickerId, position, operation, side, price, size):
self._put_wrapper_call("updateMktDepth", tickerId, position,
operation, side, price, size)
def updateMktDepthL2(self, tickerId, position, marketMaker, operation,
side, price, size):
self._put_wrapper_call("updateMktDepthL2", tickerId, position,
marketMaker, operation, side, price, size)
def updateNewsBulletin(self, msgId, msgType, message, origExchange):
self._put_wrapper_call("updateNewsBulletin", msgId, msgType, message,
origExchange)
def managedAccounts(self, accountsList):
self._put_wrapper_call("managedAccounts", accountsList)
def receiveFA(self, faDataType, xml):
self._put_wrapper_call("receiveFA", faDataType, xml)
def historicalData(self, reqId, date, open, high, low, close, volume,
count, wap, hasGaps):
self._put_wrapper_call("historicalData", reqId, date, open, high,
low, close, volume, count, wap, hasGaps)
def scannerParameters(self, xml):
self._put_wrapper_call("scannerParameters", xml)
def scannerData(self, reqId, rank, contractDetails, distance, benchmark,
projection, legsStr):
self._put_wrapper_call("scannerData", reqId, rank, contractDetails,
distance, benchmark, projection, legsStr)
def scannerDataEnd(self, reqId):
self._put_wrapper_call("scannerDataEnd", reqId)
def realtimeBar(self, reqId, time, open, high, low, close, volume, wap,
count):
self._put_wrapper_call("realtimeBar", reqId, time, open, high, low,
close, volume, wap, count)
def currentTime(self, time):
self._put_wrapper_call("currentTime", time)
def fundamentalData(self, reqId, data):
self._put_wrapper_call("fundamentalData", reqId, data)
def deltaNeutralValidation(self, reqId, underComp):
self._put_wrapper_call("deltaNeutralValidation", reqId, underComp)
del EWrapper
del Queue
| {
"content_hash": "aa659351a6bf1543e5433a7cb4542cf0",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 79,
"avg_line_length": 36.32034632034632,
"alnum_prop": 0.6388557806912991,
"repo_name": "kbluck/pytws",
"id": "b0ae23cc2a681588a43d848e6459fdfdc720ded5",
"size": "8390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tws/helper/_synchronizedwrapper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "393950"
},
{
"name": "Shell",
"bytes": "463"
}
],
"symlink_target": ""
} |
from runner.koan import *
class AboutClasses(Koan):
class Dog(object):
"Dogs need regular walkies. Never, ever let them drive."
def test_instances_of_classes_can_be_created_adding_parentheses(self):
fido = self.Dog()
self.assertEqual('Dog', type(fido).__name__)
def test_classes_have_docstrings(self):
self.assertMatch('Dogs need regular walkies.', self.Dog.__doc__)
# ------------------------------------------------------------------
class Dog2(object):
def __init__(self):
self._name = 'Paul'
def set_name(self, a_name):
self._name = a_name
def test_init_method_is_the_constructor(self):
dog = self.Dog2()
self.assertEqual('Paul', dog._name)
def test_private_attributes_are_not_really_private(self):
dog = self.Dog2()
dog.set_name("Fido")
self.assertEqual('Fido', dog._name)
# The _ prefix in _name implies private ownership, but nothing is truly
# private in Python.
def test_you_can_also_access_the_value_out_using_getattr_and_dict(self):
fido = self.Dog2()
fido.set_name("Fido")
self.assertEqual('Fido', getattr(fido, "_name"))
# getattr(), setattr() and delattr() are a way of accessing attributes
# by method rather than through assignment operators
self.assertEqual('Fido', fido.__dict__["_name"])
# Yes, this works here, but don't rely on the __dict__ object! Some
# class implementations use optimization which result in __dict__ not
# showing everything.
# ------------------------------------------------------------------
class Dog3(object):
def __init__(self):
self._name = None
def set_name(self, a_name):
self._name = a_name
def get_name(self):
return self._name
name = property(get_name, set_name)
def test_that_name_can_be_read_as_a_property(self):
fido = self.Dog3()
fido.set_name("Fido")
self.assertEqual('Fido', fido.get_name()) # access as method
self.assertEqual('Fido', fido.name) # access as property
# ------------------------------------------------------------------
class Dog4(object):
def __init__(self):
self._name = None
@property
def name(self):
return self._name
@name.setter
def name(self, a_name):
self._name = a_name
def test_creating_properties_with_decorators_is_slightly_easier(self):
fido = self.Dog4()
fido.name = "Fido"
self.assertEqual('Fido', fido.name)
# ------------------------------------------------------------------
class Dog5(object):
def __init__(self, initial_name):
self._name = initial_name
@property
def name(self):
return self._name
def test_init_provides_initial_values_for_instance_variables(self):
fido = self.Dog5("Fido")
self.assertEqual('Fido', fido.name)
def test_args_must_match_init(self):
self.assertRaises(TypeError, self.Dog5) # Evaluates self.Dog5()
# THINK ABOUT IT:
# Why is this so?
def test_different_objects_have_difference_instance_variables(self):
fido = self.Dog5("Fido")
rover = self.Dog5("Rover")
self.assertEqual(False, rover.name == fido.name)
# ------------------------------------------------------------------
class Dog6(object):
def __init__(self, initial_name):
self._name = initial_name
def get_self(self):
return self
def __str__(self):
return self._name
def __repr__(self):
return "<Dog named '" + self._name + "'>"
def test_inside_a_method_self_refers_to_the_containing_object(self):
fido = self.Dog6("Fido")
self.assertEqual(fido, fido.get_self()) # Not a string!
def test_str_provides_a_string_version_of_the_object(self):
fido = self.Dog6("Fido")
self.assertEqual("Fido", str(fido))
def test_str_is_used_explicitly_in_string_interpolation(self):
fido = self.Dog6("Fido")
self.assertEqual('My dog is Fido', "My dog is " + str(fido))
def test_repr_provides_a_more_complete_string_version(self):
fido = self.Dog6("Fido")
self.assertEqual("<Dog named 'Fido'>", repr(fido))
def test_all_objects_support_str_and_repr(self):
seq = [1, 2, 3]
self.assertEqual("[1, 2, 3]", str(seq))
self.assertEqual("[1, 2, 3]", repr(seq))
self.assertEqual("STRING", str("STRING"))
self.assertEqual("'STRING'", repr("STRING"))
| {
"content_hash": "513fdcf354b575d9eec24c86e101cdff",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 79,
"avg_line_length": 30.876623376623378,
"alnum_prop": 0.5392218717139853,
"repo_name": "ducngtuan/my-python3-koans-solution",
"id": "a37c5ad301e2f54b72b333460865a4ab130da13d",
"size": "4802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/koans/about_classes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "4524"
},
{
"name": "Python",
"bytes": "323126"
},
{
"name": "Ruby",
"bytes": "48"
},
{
"name": "Shell",
"bytes": "1637"
}
],
"symlink_target": ""
} |
import sys
import gtk
import pango
import traceback
from StringIO import StringIO
import gtksourceview2 as gtksourceview
class SourcePad(gtk.ScrolledWindow):
"""This class represents a source code editor""" # No used yet!
def __init__(self, application):
gtk.ScrolledWindow.__init__(self)
self.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
adjustment = self.get_vadjustment()
adjustment.need_scroll = True
adjustment.connect("changed", self.update_adjustment)
adjustment.connect("value-changed", self.update_value)
self.buffer = gtksourceview.Buffer()
entry = gtksourceview.View(self.buffer)
entry.set_size_request(-1, 100)
#self.disconnect_handler = buffer.connect("changed", self.changed)
self.buffer.connect("insert-text", self.update_scroll, entry)
#self.add_with_viewport(entry)
self.add(entry)
entry.set_wrap_mode(gtk.WRAP_WORD_CHAR)
entry.set_wrap_mode(gtk.WRAP_CHAR)
font = pango.FontDescription('monospace')
entry.modify_font(font)
entry.set_show_line_numbers(True)
entry.set_show_line_marks(True)
entry.set_tab_width(8)
entry.set_auto_indent(True)
entry.set_insert_spaces_instead_of_tabs(False)
entry.set_show_right_margin(True)
entry.set_right_margin(30)
#entry.set_marker_pixbuf(marker_type, pixbuf)
entry.set_smart_home_end(True)
entry.connect("focus-in-event", self.focus_in)
entry.connect("focus-out-event", self.focus_out)
self.buffer.set_highlight_syntax(True)
self.buffer.set_max_undo_levels(10)
self.buffer.set_highlight_matching_brackets(True)
self.set_language("python") # default
#from application import Application
#self.application = Application()
self.application = application
def focus_in(self, event, data):
self.application.disable_bindings()
def focus_out(self, event, data):
self.application.enable_bindings()
def set_language(self, language):
manager = gtksourceview.LanguageManager()
srclang = manager.get_language(language)
self.buffer.set_language(srclang)
def update_scroll(self, buffer, iter, text, length, view):
mark = buffer.create_mark("end", iter, False)
view.scroll_mark_onscreen(mark)
# Methods for update the scrollbars of text area.
def update_adjustment(self, adjustment):
if adjustment.need_scroll:
adjustment.set_value(adjustment.upper - adjustment.page_size)
adjustment.need_scroll = True
def update_value(self, adjustment):
adjustment.need_scroll = abs(
adjustment.value + adjustment.page_size - adjustment.upper) < adjustment.step_increment
class CodeEditor(gtk.VBox):
"""This class represents a source code editor""" # No used yet!
def __init__(self, application):
gtk.VBox.__init__(self)
handle = gtk.HandleBox()
handle.set_handle_position(gtk.POS_LEFT)
self.pack_start(handle, False, False)
toolbar = gtk.Toolbar()
toolbar.set_orientation(gtk.ORIENTATION_HORIZONTAL)
#toolbar.set_style(gtk.TOOLBAR_ICONS)
toolbar.set_style(gtk.TOOLBAR_BOTH_HORIZ)
toolbar.set_icon_size(gtk.ICON_SIZE_MENU)
handle.add(toolbar)
position = 0
button = gtk.ToolButton(gtk.STOCK_MEDIA_PLAY)
button.connect("clicked", self.run)
toolbar.insert(button, position)
position += 1
button = gtk.ToolButton(gtk.STOCK_MEDIA_STOP)
toolbar.insert(button, position)
panel = gtk.HPaned()
panel.set_position(75) # TODO calculate
self.add(panel)
self.editor = SourcePad(application)
panel.pack1(self.editor, True, False)
view = gtk.ScrolledWindow()
view.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
panel.pack2(view, False, True)
output = gtk.TextView()
font = pango.FontDescription('monospace')
output.modify_font(font)
self.buffer = gtk.TextBuffer()
self.buffer.connect_after('insert-text', self.text_inserted, view)
output.set_buffer(self.buffer)
view.add(output)
self.tags = []
self.buffer.create_tag("normal", editable=False, wrap_mode=gtk.WRAP_WORD_CHAR)
self.buffer.create_tag("error", foreground="#f00", weight=pango.WEIGHT_BOLD, style=pango.STYLE_ITALIC)
self.tags.append('normal')
def set_error(self):
self.tags.append('error')
def unset_error(self):
if 'error' in self.tags:
del self.tags[self.tags.index('error')]
def text_inserted(self, buffer, iter, text, length, view):
position = buffer.get_iter_at_mark(buffer.get_insert())
iter.backward_chars(length)
for tag in self.tags:
buffer.apply_tag_by_name(tag, position, iter)
def run(self, widget):
buffer = self.editor.buffer
start = buffer.get_start_iter()
end = buffer.get_end_iter()
code = buffer.get_text(start, end)
stdio = (sys.stdin, sys.stdout, sys.stderr)
io = StringIO()
sys.stdout = sys.stderr = io
self.unset_error()
try:
exec(code, locals(), globals())
output = io.getvalue()
except Exception, exception:
self.set_error()
output = str(exception) + "\n" + traceback.format_exc()
sys.stdin, sys.stdout, sys.stderr = stdio
self.buffer.set_text(output)
if __name__ == '__main__':
window = gtk.Window()
window.connect("delete-event", gtk.main_quit)
editor = CodeEditor()
window.add(editor)
window.show_all()
gtk.main()
| {
"content_hash": "364649a34d2d6d9cc415f37ee4201a77",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 110,
"avg_line_length": 35.28484848484848,
"alnum_prop": 0.6344898660254208,
"repo_name": "jaliste/sanaviron.gtk-3",
"id": "185ef6eea057945cf49c475d281d8b2c5239a9db",
"size": "5864",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sanaviron/ui/codeeditor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "268444"
},
{
"name": "C++",
"bytes": "25424"
},
{
"name": "Erlang",
"bytes": "58"
},
{
"name": "Makefile",
"bytes": "5198"
},
{
"name": "Perl",
"bytes": "8016"
},
{
"name": "Python",
"bytes": "480779"
},
{
"name": "Shell",
"bytes": "3336"
}
],
"symlink_target": ""
} |
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': '127.0.0.1:11211',
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
DEBUG_TOOLBAR_PATCH_SETTINGS = False
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
| {
"content_hash": "bc18f3db07a91e6aad5dfdbfc0b5f580",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 80,
"avg_line_length": 31.35483870967742,
"alnum_prop": 0.49074074074074076,
"repo_name": "narayanaditya95/Robotix",
"id": "19bda00863c6dd284fdaf97c9a1964a3b5c2c8be",
"size": "1968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/local.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "443687"
},
{
"name": "HTML",
"bytes": "30557"
},
{
"name": "JavaScript",
"bytes": "11382"
},
{
"name": "Makefile",
"bytes": "933"
},
{
"name": "Python",
"bytes": "29848"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
} |
from functools import wraps
from pythonwrap import Parser, Client, Channel
import numerics
def register(command, min_args=0, max_args=0, access=1, rate_control=0):
def decorator(func):
Parser.Register(command, func,
min_args=min_args,
max_args=max_args,
access=access,
rate_control=rate_control
)
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return decorator
def event(evt):
def decorator(func):
if not evt.listeners:
evt.listeners = []
evt.listeners.append(func)
if not evt.handler:
def handler(*args, **kwargs):
ret = True
for listener in evt.listeners:
ret = listener(*args, **kwargs)
if not ret:
break
return ret
evt.handler = handler
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return decorator
class Target:
ANY = 0
CHANNEL = 1
NICK = 2
def have_target(target_type=Target.ANY, numeric=numerics.ERR_NOSUCHNICK, epilog=None):
def wrapper(func):
@wraps(func)
def decorator(client, name, *args, **kwargs):
_numeric = numeric
wants_channel = name[0] == '#'
target = None
if target_type == Target.CHANNEL or wants_channel:
if _numeric == numerics.ERR_NOSUCHNICK:
_numeric = numerics.ERR_NOSUCHCHANNEL
if wants_channel:
target = Channel.find(name)
else:
_numeric = numerics.ERR_BADCHANNAME
else:
target = Client.find_by_name(name)
if not target:
client.numeric(_numeric, name)
if epilog:
client.numeric(epilog, name)
return
else:
return func(client, target, *args, **kwargs)
return decorator
return wrapper
| {
"content_hash": "e644a0a591d59ee4fc6c0b9fcb043532",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 86,
"avg_line_length": 22.9625,
"alnum_prop": 0.6096897114861187,
"repo_name": "oftc/oftc-ircd",
"id": "c834c221625e7b8588b15c7bf4f1b9b2279e45d0",
"size": "2954",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "modules/python/ircd/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "12555"
},
{
"name": "C++",
"bytes": "229190"
},
{
"name": "Python",
"bytes": "27728"
},
{
"name": "Shell",
"bytes": "183"
}
],
"symlink_target": ""
} |
"""
Production Configurations
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis for cache
"""
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .base import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com', ])
# END SITE CONFIGURATION
INSTALLED_APPS += ['gunicorn', ]
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += ['storages', ]
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
# See:http://stackoverflow.com/questions/10390244/
from storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
MEDIA_URL = 'https://s3.amazonaws.com/%s/media/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATIC_URL = 'https://s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage'
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before
# 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ['collectfast', ] + INSTALLED_APPS
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='Heroes <[email protected]>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[Heroes]')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
INSTALLED_APPS += ['anymail', ]
ANYMAIL = {
'MAILGUN_API_KEY': env('DJANGO_MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_SENDER_DOMAIN')
}
EMAIL_BACKEND = 'anymail.backends.mailgun.MailgunBackend'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# CACHING
# ------------------------------------------------------------------------------
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false', ],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', ],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins', ],
'propagate': True
}
}
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| {
"content_hash": "dab3f107a412ebc8ecc3fe02a45f1728",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 117,
"avg_line_length": 36.36407766990291,
"alnum_prop": 0.6072620477906822,
"repo_name": "devrishik/Heroes",
"id": "7ffc964d302fc95080f043c1f99774e844b14a33",
"size": "7515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/production.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2142"
},
{
"name": "HTML",
"bytes": "23986"
},
{
"name": "JavaScript",
"bytes": "817"
},
{
"name": "Python",
"bytes": "57731"
},
{
"name": "Shell",
"bytes": "4200"
}
],
"symlink_target": ""
} |
from .TProtocol import *
from struct import pack, unpack
class TBinaryProtocol(TProtocolBase):
"""Binary implementation of the Thrift protocol driver."""
# NastyHaxx. Python 2.4+ on 32-bit machines forces hex constants to be
# positive, converting this into a long. If we hardcode the int value
# instead it'll stay in 32 bit-land.
# VERSION_MASK = 0xffff0000
VERSION_MASK = -65536
# VERSION_1 = 0x80010000
VERSION_1 = -2147418112
TYPE_MASK = 0x000000ff
def __init__(self, trans, strictRead=False, strictWrite=True):
TProtocolBase.__init__(self, trans)
self.strictRead = strictRead
self.strictWrite = strictWrite
def writeMessageBegin(self, name, type, seqid):
if self.strictWrite:
self.writeI32(TBinaryProtocol.VERSION_1 | type)
self.writeString(name.encode('utf-8'))
self.writeI32(seqid)
else:
self.writeString(name.encode('utf-8'))
self.writeByte(type)
self.writeI32(seqid)
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, type, id):
self.writeByte(type)
self.writeI16(id)
def writeFieldEnd(self):
pass
def writeFieldStop(self):
self.writeByte(TType.STOP)
def writeMapBegin(self, ktype, vtype, size):
self.writeByte(ktype)
self.writeByte(vtype)
self.writeI32(size)
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeSetEnd(self):
pass
def writeBool(self, bool):
if bool:
self.writeByte(1)
else:
self.writeByte(0)
def writeByte(self, byte):
buff = pack("!b", byte)
self.trans.write(buff)
def writeI16(self, i16):
buff = pack("!h", i16)
self.trans.write(buff)
def writeI32(self, i32):
buff = pack("!i", i32)
self.trans.write(buff)
def writeI64(self, i64):
buff = pack("!q", i64)
self.trans.write(buff)
def writeDouble(self, dub):
buff = pack("!d", dub)
self.trans.write(buff)
def writeString(self, wstr):
self.writeI32(len(wstr))
self.trans.write(wstr)
def readMessageBegin(self):
sz = self.readI32()
if sz < 0:
version = sz & TBinaryProtocol.VERSION_MASK
if version != TBinaryProtocol.VERSION_1:
raise TProtocolException(
type=TProtocolException.BAD_VERSION,
message='Bad version in readMessageBegin: %d' % (sz))
type = sz & TBinaryProtocol.TYPE_MASK
name = self.readString().decode('utf-8')
seqid = self.readI32()
else:
if self.strictRead:
raise TProtocolException(type=TProtocolException.BAD_VERSION,
message='No protocol version header')
name = self.trans.readAll(sz)
type = self.readByte()
seqid = self.readI32()
return (name, type, seqid)
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
type = self.readByte()
if type == TType.STOP:
return (None, type, 0)
id = self.readI16()
return (None, type, id)
def readFieldEnd(self):
pass
def readMapBegin(self):
ktype = self.readByte()
vtype = self.readByte()
size = self.readI32()
return (ktype, vtype, size)
def readMapEnd(self):
pass
def readListBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readListEnd(self):
pass
def readSetBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readSetEnd(self):
pass
def readBool(self):
byte = self.readByte()
if byte == 0:
return False
return True
def readByte(self):
buff = self.trans.readAll(1)
val, = unpack('!b', buff)
return val
def readI16(self):
buff = self.trans.readAll(2)
val, = unpack('!h', buff)
return val
def readI32(self):
buff = self.trans.readAll(4)
val, = unpack('!i', buff)
return val
def readI64(self):
buff = self.trans.readAll(8)
val, = unpack('!q', buff)
return val
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack('!d', buff)
return val
def readString(self):
len = self.readI32()
str = self.trans.readAll(len)
return str
class TBinaryProtocolFactory:
def __init__(self, strictRead=False, strictWrite=True):
self.strictRead = strictRead
self.strictWrite = strictWrite
def getProtocol(self, trans):
prot = TBinaryProtocol(trans, self.strictRead, self.strictWrite)
return prot
class TBinaryProtocolAccelerated(TBinaryProtocol):
"""C-Accelerated version of TBinaryProtocol.
This class does not override any of TBinaryProtocol's methods,
but the generated code recognizes it directly and will call into
our C module to do the encoding, bypassing this object entirely.
We inherit from TBinaryProtocol so that the normal TBinaryProtocol
encoding can happen if the fastbinary module doesn't work for some
reason. (TODO(dreiss): Make this happen sanely in more cases.)
In order to take advantage of the C module, just use
TBinaryProtocolAccelerated instead of TBinaryProtocol.
NOTE: This code was contributed by an external developer.
The internal Thrift team has reviewed and tested it,
but we cannot guarantee that it is production-ready.
Please feel free to report bugs and/or success stories
to the public mailing list.
"""
pass
class TBinaryProtocolAcceleratedFactory:
def getProtocol(self, trans):
return TBinaryProtocolAccelerated(trans)
| {
"content_hash": "e49544e7184deca14fed8aa85207cc00",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 72,
"avg_line_length": 24.141078838174273,
"alnum_prop": 0.6658645582674458,
"repo_name": "tailhook/aio-hs2",
"id": "13e2fd2bfe0306538ee257743cf38973ce3116b9",
"size": "6604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thrift/protocol/TBinaryProtocol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "27195"
},
{
"name": "Python",
"bytes": "461789"
},
{
"name": "Shell",
"bytes": "191"
}
],
"symlink_target": ""
} |
import platform
import setuptools
from setuptools.command import test
import sys
install_requires = [
'future',
# mock-1.0.1 is the last version compatible with setuptools <17.1,
# which is what comes with Ubuntu 14.04 LTS.
'mock<=1.0.1',
'portpicker',
'psutil',
'pytz',
'pyyaml',
'timeout_decorator'
]
if sys.version_info < (3, ):
install_requires.extend([
'enum34',
# "futures" is needed for py2 compatibility and it only works in 2.7
'futures',
])
if platform.system() == 'Windows':
install_requires.append('pywin32')
class PyTest(test.test):
"""Class used to execute unit tests using PyTest. This allows us to execute
unit tests without having to install the package.
"""
def finalize_options(self):
test.test.finalize_options(self)
self.test_args = ['-x', "tests"]
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
def main():
setuptools.setup(
name='mobly',
version='1.5',
maintainer = 'Ang Li',
maintainer_email = '[email protected]',
description='Automation framework for special end-to-end test cases',
license='Apache2.0',
url = 'https://github.com/google/mobly',
download_url = 'https://github.com/google/mobly/tarball/1.5',
packages=setuptools.find_packages(),
include_package_data=False,
scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],
tests_require=['pytest'],
install_requires=install_requires,
cmdclass={'test': PyTest}, )
if __name__ == '__main__':
main()
| {
"content_hash": "aa57107ec1392464b6ba77899ac12e6b",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 79,
"avg_line_length": 26.984375,
"alnum_prop": 0.614939200926462,
"repo_name": "l-meng/mobly",
"id": "e460099d95b3c9dd7356495b42c881d3750821fc",
"size": "2306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "440648"
}
],
"symlink_target": ""
} |
import numpy
from keras.layers import Input
from keras.models import Model
from deep_qa.layers.entailment_models import MultipleChoiceTupleEntailment
from deep_qa.layers.time_distributed_embedding import TimeDistributedEmbedding
class TestTupleAlignment:
def test_tuple_alignment_does_not_crash(self):
question_length = 5
num_options = 4
tuple_size = 3
num_tuples = 7
embedding_dim = 10
vocabulary_size = 15
batch_size = 32
question_input_layer = Input(shape=(question_length,), dtype='int32')
answer_input_layer = Input(shape=(num_options,), dtype='int32')
knowledge_input_layer = Input(shape=(num_tuples, tuple_size), dtype='int32')
# Embedding does not mask zeros
embedding = TimeDistributedEmbedding(input_dim=vocabulary_size, output_dim=embedding_dim,
mask_zero=True)
embedded_question = embedding(question_input_layer)
embedded_answer = embedding(answer_input_layer)
embedded_knowledge = embedding(knowledge_input_layer)
entailment_layer = MultipleChoiceTupleEntailment()
entailment_scores = entailment_layer([embedded_knowledge, embedded_question, embedded_answer])
model = Model(inputs=[knowledge_input_layer, question_input_layer, answer_input_layer],
outputs=entailment_scores)
model.compile(loss="mse", optimizer="sgd") # Will not train this model
knowledge_input = numpy.random.randint(0, vocabulary_size, (batch_size, num_tuples, tuple_size))
question_input = numpy.random.randint(0, vocabulary_size, (batch_size, question_length))
answer_input = numpy.random.randint(0, vocabulary_size, (batch_size, num_options))
model.predict([knowledge_input, question_input, answer_input])
| {
"content_hash": "2f40703828b9f7b6acb74ef139a0b970",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 104,
"avg_line_length": 54.38235294117647,
"alnum_prop": 0.6836127636560303,
"repo_name": "RTHMaK/RPGOne",
"id": "67fe146f66439d619ffe9200f86e8c5f13804b6a",
"size": "1892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_qa-master/tests/layers/tuple_alignment_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "1C Enterprise",
"bytes": "36"
},
{
"name": "Batchfile",
"bytes": "15029"
},
{
"name": "CSS",
"bytes": "41709"
},
{
"name": "Erlang",
"bytes": "39438"
},
{
"name": "Go",
"bytes": "287"
},
{
"name": "HTML",
"bytes": "633076"
},
{
"name": "JavaScript",
"bytes": "1128791"
},
{
"name": "Jupyter Notebook",
"bytes": "927247"
},
{
"name": "Makefile",
"bytes": "31756"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Matlab",
"bytes": "9454"
},
{
"name": "PHP",
"bytes": "708541"
},
{
"name": "PowerShell",
"bytes": "68503"
},
{
"name": "Python",
"bytes": "2278740"
},
{
"name": "Ruby",
"bytes": "1136"
},
{
"name": "Shell",
"bytes": "62555"
},
{
"name": "Smarty",
"bytes": "5752"
},
{
"name": "TeX",
"bytes": "34544"
}
],
"symlink_target": ""
} |
class ValidationError(Exception):
pass
class ConfigurationError(Exception):
pass
class AlreadyRegisteredError(Exception):
pass
class RegisterError(Exception):
pass
class PermissionError(Exception):
pass | {
"content_hash": "e37b6519b676e1d0cb87d921ccac20fe",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 40,
"avg_line_length": 12.777777777777779,
"alnum_prop": 0.7521739130434782,
"repo_name": "pombredanne/django-avocado",
"id": "e37b68b1bd73694487894f3852346f370074df03",
"size": "230",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "avocado/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
import json
from pprint import pprint
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy import API
if __name__ == '__main__':
with open('api_secret_token.json') as data_file:
authdata = json.load(data_file)
#pprint(authdata)
auth = OAuthHandler(authdata['consumer_key'], authdata['consumer_secret'])
auth.set_access_token(authdata['access_token'], authdata['access_token_secret'])
api = API(auth)
print(api.me().name)
# If the application settings are set for "Read and Write" then
# this line should tweet out a direct message
# The "Read and Write" setting is on https://dev.twitter.com/apps
api.send_direct_message(user="hrishikesh_date", text="Hello From Pi :)") | {
"content_hash": "dbfe14a4ab38115d5d4e77ebe5cc648a",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 82,
"avg_line_length": 37.13636363636363,
"alnum_prop": 0.7294981640146879,
"repo_name": "fuzzyhandle/pihangout",
"id": "55729100a855ba143fae2c47f5aea61c9fda934b",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testweet/testdirectmessage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21552"
},
{
"name": "Shell",
"bytes": "1133"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class EnvironmentVariable(Model):
"""A collection of environment variables to set.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the environment variable.
:type name: str
:param value: Required. The value of the environment variable.
:type value: str
"""
_validation = {
'name': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, *, name: str, value: str, **kwargs) -> None:
super(EnvironmentVariable, self).__init__(**kwargs)
self.name = name
self.value = value
| {
"content_hash": "eec0e1d4939ee17acf8ff4e0e48d0258",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 28.321428571428573,
"alnum_prop": 0.5939470365699874,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "56b44c8176455ed385540da26dd41e7c7021e614",
"size": "1267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-batchai/azure/mgmt/batchai/models/environment_variable_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
import os, sys, time, inspect
from prettytable import PrettyTable
"""
#Environment Setup
@src_dir : file current path
@arch_dir : libs path of different architecture
@leap_dir : Leap Motion module path
"""
src_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))
arch_dir = '../../motion-leap/lib/x64' if sys.maxsize > 2 ** 32 else '../../motion-leap/lib/x86'
leap_dir = '../../motion-leap/lib'
sys.path.insert(0, os.path.abspath(os.path.join(src_dir, arch_dir)))
sys.path.insert(0, os.path.abspath(os.path.join(src_dir, leap_dir)))
import Leap
class Listener(Leap.Listener):
def on_init(self, controller):
print "Initialized"
def on_connect(self, controller):
print "Connected"
def on_disconnect(self, controller):
print "Disconnected"
def on_exit(self, controller):
print "Exited"
def on_frame(self, controller):
frame = controller.frame()
"""
hand_position = []
all_finger_position = []
for hand in frame.hands:
hand_position.append(hand.stabilized_palm_position)
finger_position = []
for finger in frame.fingers:
finger_position.append(finger.stabilized_tip_position)
all_finger_position.append(finger_position)
"""
monitor = PrettyTable(
['frame_id',
'time_stamp',
'hands_count',
#'hand_position',
'fingers_count',
#'finger_position'
])
monitor.add_row(
[frame.id,
frame.timestamp,
len(frame.hands),
#hand_position,
len(frame.fingers),
#all_finger_position
])
print monitor
time.sleep(0.1)
os.system('clear')
def main():
listener = Listener()
controller = Leap.Controller()
controller.add_listener(listener)
print "Press Enter to quit..."
try:
sys.stdin.readline()
except KeyboardInterrupt:
pass
finally:
controller.remove_listener(listener)
if __name__ == "__main__":
main()
| {
"content_hash": "2920e0a35586912551ce68ec4bd8d267",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 96,
"avg_line_length": 26.25,
"alnum_prop": 0.563265306122449,
"repo_name": "peitaosu/motion-tools",
"id": "1307622fc4614b3d72064d2c7e89d23db4f6410e",
"size": "2205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data-monitor/dataMonitor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7904"
}
],
"symlink_target": ""
} |
from distutils.core import setup, Extension
setup(name="ctypes-test",
ext_modules = [Extension("C", ["C.c"])])
| {
"content_hash": "4b2eff3ab87e67babfb2816fd9746d13",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 46,
"avg_line_length": 29.5,
"alnum_prop": 0.6610169491525424,
"repo_name": "otron/python-intro-course-cern",
"id": "9f529d2f9f647c2d15745cae0ed432c6695fe30c",
"size": "119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_cython-etc/pure_C_setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "76"
},
{
"name": "HTML",
"bytes": "40716"
},
{
"name": "Python",
"bytes": "47223"
}
],
"symlink_target": ""
} |
from nose.tools import ok_
from flask.ext.viewunit import config, ViewTestCase
from app import app
class HookTest(ViewTestCase):
"""
All the flask hooks/functions use module-wide global state, so this is not
going to be entirely pretty. Sorry.
"""
def test_app_hook(self):
config.set_app(None)
try:
self.run_view('/')
ok_(False,
"Expected AssertionError calling run_view without setting app")
except AssertionError:
pass
config.set_app(app)
self.run_view('/')
def test_session_hook(self):
config.set_app(app)
try:
self.run_view('/', user_id=1)
ok_(False, "Expected AssertionError without session hook")
except AssertionError:
pass
def hook(test_session, user_id):
test_session['user_id'] = user_id
config.set_session_user_setter(hook)
self.run_view('/', user_id=11)
def test_db_hook(self):
config.set_app(app)
try:
self.run_view('/', expect_db_has=[('tables', {'foo': 1})])
ok_(False, "Expected AssertionError without session hook")
except AssertionError:
pass
# Huge hack: Return a tuple no matter the query -- it's just being
# len()ed anyway
def db_select(_sql, _params):
return [(1,)]
config.set_db_select_hook(db_select)
self.run_view('/', expect_db_has=[('tables', {'foo': 1})])
def setUp(self):
"""
Unset any viewunit hooks before tests
"""
# Ignore Pylint errors for catching any exception type, for this little
# hacky section
# pylint: disable=W0702
try:
self._old_app = config.get_app()
except:
self._old_app = None
config.set_app(app)
super(HookTest, self).setUp()
try:
self._old_session_hook = config.get_session_user_setter()
except:
self._old_session_hook = None
try:
self._old_db_hook = config.get_db_select_hook()
except:
self._old_db_hook = None
# pylint: enable=W0702
def tearDown(self):
"""
Restore the viewunit state to where it was before the tests
"""
super(HookTest, self).tearDown()
config.set_app(self._old_app)
config.set_session_user_setter(self._old_session_hook)
config.set_db_select_hook(self._old_db_hook)
| {
"content_hash": "c09793374e018fe3a707f958649587b8",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 27.923076923076923,
"alnum_prop": 0.5576544667453759,
"repo_name": "wingu/flask_viewunit",
"id": "a72b86368a4c7da46c0dbaae30ad8840466d0e4f",
"size": "2541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_hooks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "34616"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import os
import glob
import sys
import shutil
import tempfile
import threading
import multiprocessing
import random
import time
from mapproxy.util.lock import (
FileLock,
SemLock,
cleanup_lockdir,
LockTimeout,
)
from mapproxy.util.fs import (
_force_rename_dir,
swap_dir,
cleanup_directory,
write_atomic,
)
from mapproxy.util.times import timestamp_before
from mapproxy.test.helper import Mocker
from nose.tools import eq_
is_win = sys.platform == 'win32'
class TestFileLock(Mocker):
def setup(self):
Mocker.setup(self)
self.lock_dir = tempfile.mkdtemp()
self.lock_file = os.path.join(self.lock_dir, 'lock.lck')
def teardown(self):
shutil.rmtree(self.lock_dir)
Mocker.teardown(self)
def test_file_lock_timeout(self):
lock = self._create_lock()
assert_locked(self.lock_file)
lock # prevent lint warnings
def test_file_lock(self):
# Test a lock that becomes free during a waiting lock() call.
class Lock(threading.Thread):
def __init__(self, lock_file):
threading.Thread.__init__(self)
self.lock_file = lock_file
self.lock = FileLock(self.lock_file)
def run(self):
self.lock.lock()
time.sleep(0.2)
self.lock.unlock()
lock_thread = Lock(self.lock_file)
start_time = time.time()
lock_thread.start()
# wait until thread got the locked
while not lock_thread.lock._locked:
time.sleep(0.001)
# one lock that times out
assert_locked(self.lock_file)
# one lock that will get it after some time
l = FileLock(self.lock_file, timeout=0.3, step=0.001)
l.lock()
locked_for = time.time() - start_time
assert locked_for - 0.2 <=0.1, 'locking took to long?! (rerun if not sure)'
#cleanup
l.unlock()
lock_thread.join()
def test_lock_cleanup(self):
old_lock_file = os.path.join(self.lock_dir, 'lock_old.lck')
l = FileLock(old_lock_file)
l.lock()
l.unlock()
mtime = os.stat(old_lock_file).st_mtime
mtime -= 7*60
os.utime(old_lock_file, (mtime, mtime))
l = self._create_lock()
l.unlock()
assert os.path.exists(old_lock_file)
assert os.path.exists(self.lock_file)
cleanup_lockdir(self.lock_dir)
assert not os.path.exists(old_lock_file)
assert os.path.exists(self.lock_file)
def test_concurrent_access(self):
count_file = os.path.join(self.lock_dir, 'count.txt')
with open(count_file, 'wb') as f:
f.write('0')
def count_up():
with FileLock(self.lock_file, timeout=60):
with open(count_file, 'r+b') as f:
counter = int(f.read().strip())
f.seek(0)
f.write(str(counter+1))
def do_it():
for x in range(20):
time.sleep(0.002)
count_up()
threads = [threading.Thread(target=do_it) for _ in range(20)]
[t.start() for t in threads]
[t.join() for t in threads]
with open(count_file, 'r+b') as f:
counter = int(f.read().strip())
assert counter == 400, counter
def test_remove_on_unlock(self):
l = FileLock(self.lock_file, remove_on_unlock=True)
l.lock()
assert os.path.exists(self.lock_file)
l.unlock()
assert not os.path.exists(self.lock_file)
l.lock()
assert os.path.exists(self.lock_file)
os.remove(self.lock_file)
assert not os.path.exists(self.lock_file)
# ignore removed lock
l.unlock()
assert not os.path.exists(self.lock_file)
def _create_lock(self):
lock = FileLock(self.lock_file)
lock.lock()
return lock
def assert_locked(lock_file, timeout=0.02, step=0.001):
assert os.path.exists(lock_file)
l = FileLock(lock_file, timeout=timeout, step=step)
try:
l.lock()
assert False, 'file was not locked'
except LockTimeout:
pass
class TestSemLock(object):
def setup(self):
self.lock_dir = tempfile.mkdtemp()
self.lock_file = os.path.join(self.lock_dir, 'lock.lck')
def teardown(self):
shutil.rmtree(self.lock_dir)
def count_lockfiles(self):
return len(glob.glob(self.lock_file + '*'))
def test_single(self):
locks = [SemLock(self.lock_file, 1, timeout=0.01) for _ in range(2)]
locks[0].lock()
try:
locks[1].lock()
except LockTimeout:
pass
else:
assert False, 'expected LockTimeout'
def test_creating(self):
locks = [SemLock(self.lock_file, 2) for _ in range(3)]
eq_(self.count_lockfiles(), 0)
locks[0].lock()
eq_(self.count_lockfiles(), 1)
locks[1].lock()
eq_(self.count_lockfiles(), 2)
assert os.path.exists(locks[0]._lock._path)
assert os.path.exists(locks[1]._lock._path)
locks[0].unlock()
locks[2].lock()
locks[2].unlock()
locks[1].unlock()
def test_timeout(self):
locks = [SemLock(self.lock_file, 2, timeout=0.1) for _ in range(3)]
eq_(self.count_lockfiles(), 0)
locks[0].lock()
eq_(self.count_lockfiles(), 1)
locks[1].lock()
eq_(self.count_lockfiles(), 2)
try:
locks[2].lock()
except LockTimeout:
pass
else:
assert False, 'expected LockTimeout'
locks[0].unlock()
locks[2].unlock()
def test_load(self):
locks = [SemLock(self.lock_file, 8, timeout=1) for _ in range(20)]
new_locks = random.sample([l for l in locks if not l._locked], 5)
for l in new_locks:
l.lock()
for _ in range(20):
old_locks = random.sample([l for l in locks if l._locked], 3)
for l in old_locks:
l.unlock()
eq_(len([l for l in locks if l._locked]), 2)
eq_(len([l for l in locks if not l._locked]), 18)
new_locks = random.sample([l for l in locks if not l._locked], 3)
for l in new_locks:
l.lock()
eq_(len([l for l in locks if l._locked]), 5)
eq_(len([l for l in locks if not l._locked]), 15)
assert self.count_lockfiles() == 8
class DirTest(object):
def setup(self):
self.tmpdir = tempfile.mkdtemp()
def teardown(self):
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
def mkdir(self, name):
dirname = os.path.join(self.tmpdir, name)
os.mkdir(dirname)
self.mkfile(name, dirname=dirname)
return dirname
def mkfile(self, name, dirname=None):
if dirname is None:
dirname = self.mkdir(name)
filename = os.path.join(dirname, name + '.txt')
open(filename, 'w').close()
return filename
class TestForceRenameDir(DirTest):
def test_rename(self):
src_dir = self.mkdir('bar')
dst_dir = os.path.join(self.tmpdir, 'baz')
_force_rename_dir(src_dir, dst_dir)
assert os.path.exists(dst_dir)
assert os.path.exists(os.path.join(dst_dir, 'bar.txt'))
assert not os.path.exists(src_dir)
def test_rename_overwrite(self):
src_dir = self.mkdir('bar')
dst_dir = self.mkdir('baz')
_force_rename_dir(src_dir, dst_dir)
assert os.path.exists(dst_dir)
assert os.path.exists(os.path.join(dst_dir, 'bar.txt'))
assert not os.path.exists(src_dir)
class TestSwapDir(DirTest):
def test_swap_dir(self):
src_dir = self.mkdir('bar')
dst_dir = os.path.join(self.tmpdir, 'baz')
swap_dir(src_dir, dst_dir)
assert os.path.exists(dst_dir)
assert os.path.exists(os.path.join(dst_dir, 'bar.txt'))
assert not os.path.exists(src_dir)
def test_swap_dir_w_old(self):
src_dir = self.mkdir('bar')
dst_dir = self.mkdir('baz')
swap_dir(src_dir, dst_dir)
assert os.path.exists(dst_dir)
assert os.path.exists(os.path.join(dst_dir, 'bar.txt'))
assert not os.path.exists(src_dir)
def test_swap_dir_keep_old(self):
src_dir = self.mkdir('bar')
dst_dir = self.mkdir('baz')
swap_dir(src_dir, dst_dir, keep_old=True, backup_ext='.bak')
assert os.path.exists(dst_dir)
assert os.path.exists(os.path.join(dst_dir, 'bar.txt'))
assert os.path.exists(dst_dir + '.bak')
assert os.path.exists(os.path.join(dst_dir + '.bak', 'baz.txt'))
class TestCleanupDirectory(DirTest):
def test_no_remove(self):
dirs = [self.mkdir('dir'+str(n)) for n in range(10)]
for d in dirs:
assert os.path.exists(d), d
cleanup_directory(self.tmpdir, timestamp_before(minutes=1))
for d in dirs:
assert os.path.exists(d), d
def test_file_handler(self):
files = []
file_handler_calls = []
def file_handler(filename):
file_handler_calls.append(filename)
new_date = timestamp_before(weeks=1)
for n in range(10):
fname = 'foo'+str(n)
filename = self.mkfile(fname)
os.utime(filename, (new_date, new_date))
files.append(filename)
for filename in files:
assert os.path.exists(filename), filename
cleanup_directory(self.tmpdir, timestamp_before(), file_handler=file_handler)
for filename in files:
assert os.path.exists(filename), filename
assert set(files) == set(file_handler_calls)
def test_no_directory(self):
cleanup_directory(os.path.join(self.tmpdir, 'invalid'), timestamp_before())
# nothing should happen
def test_remove_all(self):
files = []
new_date = timestamp_before(weeks=1)
for n in range(10):
fname = 'foo'+str(n)
filename = self.mkfile(fname)
os.utime(filename, (new_date, new_date))
files.append(filename)
for filename in files:
assert os.path.exists(filename), filename
cleanup_directory(self.tmpdir, timestamp_before())
for filename in files:
assert not os.path.exists(filename), filename
assert not os.path.exists(os.path.dirname(filename)), filename
def test_remove_empty_dirs(self):
os.makedirs(os.path.join(self.tmpdir, 'foo', 'bar', 'baz'))
cleanup_directory(self.tmpdir, timestamp_before(minutes=-1))
assert not os.path.exists(os.path.join(self.tmpdir, 'foo'))
def test_remove_some(self):
files = []
new_date = timestamp_before(weeks=1)
for n in range(10):
fname = 'foo'+str(n)
filename = self.mkfile(fname)
if n % 2 == 0:
os.utime(filename, (new_date, new_date))
files.append(filename)
for filename in files:
assert os.path.exists(filename), filename
cleanup_directory(self.tmpdir, timestamp_before())
for filename in files[::2]:
assert not os.path.exists(filename), filename
assert not os.path.exists(os.path.dirname(filename)), filename
for filename in files[1::2]:
assert os.path.exists(filename), filename
def write_atomic_data((i, filename)):
data = str(i) + '\n' + 'x' * 10000
write_atomic(filename, data)
time.sleep(0.001)
class TestWriteAtomic(object):
def setup(self):
self.dirname = tempfile.mkdtemp()
def teardown(self):
if self.dirname:
shutil.rmtree(self.dirname)
def test_concurrent_write(self):
filename = os.path.join(self.dirname, 'tmpfile')
num_writes = 800
concurrent_writes = 8
p = multiprocessing.Pool(concurrent_writes)
p.map(write_atomic_data, ((i, filename) for i in xrange(num_writes)))
p.close()
p.join()
assert os.path.exists(filename)
last_i = int(open(filename).readline())
assert last_i > (num_writes / 2), ("file should contain content from "
"later writes, got content from write %d" % (last_i + 1)
)
os.unlink(filename)
assert os.listdir(self.dirname) == []
def test_not_a_file(self):
# check that expected errors are not hidden
filename = os.path.join(self.dirname, 'tmpfile')
os.mkdir(filename)
try:
write_atomic(filename, '12345')
except OSError:
pass
else:
assert False, 'expected exception'
| {
"content_hash": "a8576590b88bb045b8b66fd2ec5d9a4c",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 85,
"avg_line_length": 31.380487804878047,
"alnum_prop": 0.5736048499922276,
"repo_name": "Anderson0026/mapproxy",
"id": "270ad7deb5f7a9bf968eb4e92c9f698c9b62453f",
"size": "13515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mapproxy/test/unit/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12401"
},
{
"name": "Python",
"bytes": "1477825"
},
{
"name": "Shell",
"bytes": "3087"
}
],
"symlink_target": ""
} |
import argparse
import time
import sys
import httplib
##Built in python 2.7.3
##Author: Steve Miskiewicz
##Check the status of the request, will return true for goodStatus
##sys.exit badStatus
def getStatus(r1, goodStatus, badStatus):
if r1.status in goodStatus:
done = True
status = "Website Up."
return done, status
elif r1.status in badStatus:
done = 'failed'
status = "WEBSITE DOWN!"
return done, status
else:
print "Current Status:", r1.status, "ignored. Retrying..."
##Get a response from the URL, print out varables if bad
def pingSite(host, request):
try:
w1 = httplib.HTTPConnection(host)
w1.request("GET", request)
r1 = w1.getresponse()
except Exception, err:
print "Bad URL! Please check your entry."
print "Host:", host
print "Request:", request
return err
return r1
##Check the time and wait a second
def checkCurrentTime(length, timeout):
if length >= timeout:
checkTime = False
return checkTime
else:
time.sleep(1)
length += 1
return length
##Seporate the host from the rest of the URL
def formatUrl(url):
if url.startswith('http://'):
url = url.replace("http://", "")
if url.startswith('https://'):
url = url.replace("https://", "")
formatedUrl= url.split("/")
trailingUrl = ''
host = formatedUrl[0]
formatedUrl.remove(host)
for item in formatedUrl:
x = '/' + item
trailingUrl += x
return host, trailingUrl
def cmdArgs(args):
timeout = 60
parser = argparse.ArgumentParser(description='URL to Ping')
parser.add_argument('-u', '--url', action="store", help='Enter a full web address to check against goodStatus list', required=True)
parser.add_argument('-t', '--time', action="store", help='Enter number of seconds to ping, default is ' + str(timeout), type=int, required=False)
args = parser.parse_args()
url = args.url
if args.time:
timeout = args.time
return url, timeout
def main():
checkTime = True
timeCheck = 0
goodStatus = [200]
badStatus = [400, 401, 403, 404, 500]
##Get cmd args
args = cmdArgs(sys.argv)
##args = cmdArgs()
url = args[0]
timeout = args[1]
formatedUrl= formatUrl(url)
print "Status Check for website:", url
##Start checking the website for proper status codes,
##will exit after match or timeout
while checkTime:
timeCheck = checkCurrentTime(timeCheck, timeout)
if timeCheck == False:
print "Timeout exceeded!", "Status:", r1.status, "Reason:", r1.reason
sys.exit(1)
break
print "URL:", url, "Attempt:", timeCheck
r1 = pingSite(formatedUrl[0], formatedUrl[1])
done = getStatus(r1, goodStatus, badStatus)
if done:
print done[1], "Status:", r1.status, "Reason:", r1.reason
break
if done == 'failed':
sys.exit(1)
if __name__ == '__main__':
main()
| {
"content_hash": "46c034fe25862524a46ebe25f9c9bbc2",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 151,
"avg_line_length": 29.576923076923077,
"alnum_prop": 0.6059817945383615,
"repo_name": "smiskiewicz/httpStatusBot",
"id": "197b178b26820617cc2fd6424c996a1fdbdc9e9e",
"size": "3076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "httpStatusBot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6177"
}
],
"symlink_target": ""
} |
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
email = Column(String(50), nullable=False)
picture = Column(String(250))
class Restaurant(Base):
__tablename__ = 'restaurant'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
return {
'name' : self.name,
'id' : self.id,
}
class MenuItem(Base):
__tablename__ = 'menu_item'
name = Column(String(80), nullable=False)
id = Column(Integer, primary_key=True)
description = Column(String(250))
price = Column(String(8))
course = Column(String(250))
restaurant_id = Column(Integer, ForeignKey('restaurant.id'))
restaurant = relationship(Restaurant)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
return {
'name': self.name,
'description': self.description,
'id': self.id,
'price': self.price,
'course': self.course,
}
engine = create_engine('sqlite:///restaurantmenuwithusers.db')
Base.metadata.create_all(engine) | {
"content_hash": "7b9de9b16e9998d48a9fddb7414b4a04",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 64,
"avg_line_length": 24.578125,
"alnum_prop": 0.6484424666242848,
"repo_name": "maistrovas/My-Courses-Solutions",
"id": "7fb77ae551e72225ed9ff0520717ce47140e3fb0",
"size": "1573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Udacity_Fullstack_Nanodegree/vagrant/catalog/database_setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3141"
},
{
"name": "HTML",
"bytes": "22655"
},
{
"name": "JavaScript",
"bytes": "3552"
},
{
"name": "Python",
"bytes": "628586"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "741"
}
],
"symlink_target": ""
} |
from operator import gt, lt, ge, le
from hq.hquery.functions.core_string import string
from hq.verbosity import verbose_print
from hq.hquery.functions.core_boolean import boolean
from hq.hquery.functions.core_number import number
from hq.hquery.object_type import object_type, is_boolean, is_number
from hq.hquery.syntax_error import HquerySyntaxError
class RelationalOperator:
def __init__(self, op):
if op == '>':
self.base_op = gt
elif op == '>=':
self.base_op = ge
elif op == '<':
self.base_op = lt
elif op == '<=':
self.base_op = le
else:
raise HquerySyntaxError('unexpected relational operator "{0}"'.format(op))
def evaluate(self, first, second):
first_type = object_type(first)
second_type = object_type(second)
cmp = comparison_method_table[first_type][second_type]
return boolean(cmp(self.base_op, first, second))
@property
def name(self):
return self.base_op.__name__
def _cmp_node_sets(base_op, first, second):
first_values = set([number(node) for node in first])
second_values = set([number(node) for node in second])
verbose_print('Comparing two nodes sets (size {0} and {1}).'.format(len(first_values), len(second_values)))
for first_value in first_values:
for second_value in second_values:
if base_op(first_value, second_value):
msg = 'Comparison succeeded for "{0}" from first node set and "{1}" in second node set'
verbose_print(msg.format(first_value, second_value))
return True
verbose_print('Comparison failed for all nodes in both node sets.')
return False
def _cmp_nodes_to_value(base_op, first, second):
node_values = set([number(node) for node in first])
second = number(second)
verbose_print('Comparing {0} nodes in node set to value {1}'.format(len(node_values), second))
for node_value in node_values:
if base_op(node_value, second):
verbose_print('Comparison succeeded for node value "{0}" and value "{1}"'.format(node_value, second))
return True
verbose_print('Comparison failed for all nodes in the node set.')
return False
def _cmp_value_to_nodes(base_op, first, second):
node_values = set([number(node) for node in second])
first = number(first)
verbose_print('Comparing {0} nodes in node set to value "{1}"'.format(len(node_values), first))
for node_value in node_values:
if base_op(first, node_value):
verbose_print('Comparison succeeded for value "{0}" and node value "{1}'.format(first, node_value))
return True
verbose_print('Comparison failed for all nodes in the node set.')
return False
def _cmp_values(base_op, first, second):
if is_boolean(first) or is_boolean(second):
return base_op(1 if boolean(first) else 0, 1 if boolean(second) else 0)
elif is_number(first) or is_number(second):
return base_op(number(first), number(second))
else:
return base_op(string(first), string(second))
comparison_method_table = (
# BOOLEAN, SEQUENCE, NUMBER, STRING
(_cmp_values, _cmp_value_to_nodes, _cmp_values, _cmp_values), # BOOLEAN
(_cmp_nodes_to_value, _cmp_node_sets, _cmp_nodes_to_value, _cmp_nodes_to_value), # SEQUENCE
(_cmp_values, _cmp_value_to_nodes, _cmp_values, _cmp_values), # NUMBER
(_cmp_values, _cmp_value_to_nodes, _cmp_values, _cmp_values), # STRING
)
| {
"content_hash": "474068f03b68142c1ca46ce5b95e9122",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 113,
"avg_line_length": 37.39393939393939,
"alnum_prop": 0.6158833063209076,
"repo_name": "rbwinslow/hq",
"id": "25de6b7939b55e940389daaa8707a8438d103647",
"size": "3702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hq/hquery/relational_operators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "206050"
}
],
"symlink_target": ""
} |
import unittest
from autosklearn.pipeline.components.regression.gradient_boosting import GradientBoosting
from autosklearn.pipeline.util import _test_regressor, _test_regressor_iterative_fit
import sklearn.metrics
class GradientBoostingComponentTest(unittest.TestCase):
def test_default_configuration(self):
for i in range(10):
predictions, targets = _test_regressor(GradientBoosting)
self.assertAlmostEqual(0.35273007696557712,
sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration_iterative_fit(self):
for i in range(10):
predictions, targets = _test_regressor(GradientBoosting)
self.assertAlmostEqual(0.35273007696557712,
sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
| {
"content_hash": "e5603ce14a71bd74d64ae3d12da28845",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 89,
"avg_line_length": 39.80952380952381,
"alnum_prop": 0.7248803827751196,
"repo_name": "hmendozap/auto-sklearn",
"id": "4a331a79fcd35d0bf54328a48733b0f377ae6b7b",
"size": "836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_pipeline/components/regression/test_gradient_boosting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6722"
},
{
"name": "Makefile",
"bytes": "6791"
},
{
"name": "Python",
"bytes": "1207634"
},
{
"name": "Shell",
"bytes": "851"
}
],
"symlink_target": ""
} |
import time
#import RPi.GPIO as io
#io.setmode(io.BCM)
class WaterController(object):
CONFIGURATION = False
TOTAL_WATER_VOLUME_PUMPED = 0
CURRENT_WATER_FLOW_RATE = 0
def __init__(self, conf):
''' Setup the Water Controller. '''
print("> NOTICE: Activating Water Controller.")
self.CONFIGURATION = conf
print("> NOTICE: Water Controller Active!")
def readFlowSensor(self):
''' Reads the water pump flow sensor.
Liquid Flow Meter - Plastic 1/2" NPT Threaded
http://www.adafruit.com/products/828
Sensor Frequency (Hz) = 7.5 * Q (Liters/min)
Liters = Q * time elapsed (seconds) / 60 (seconds/minute)
Liters = (Frequency (Pulses/second) / 7.5) * time elapsed (seconds) / 60
Liters = Pulses / (7.5 * 60)
// Arduino Example
sei(); //Enable interrupts
delay (15);
cli(); //Disable interrupts
if ((pulsesCounter == previousMeasure) && (pulsesCounter != 0)) {
counter += 1;
}
previousMeasure = pulsesCounter;
if ((counter == 50) && (pulsesCounter != 0)) { // Send pulses number to Raspberry Pi
Serial.print("pulses:");
Serial.println (pulsesCounter, DEC); //Prints the pulses number
pulsesCounter = 0;
previousMeasure = 0;
counter = 0;
}
'''
print("> NOTICE: Reading the water pump flow sensor.")
| {
"content_hash": "37c67ae40c9c230da8859a83d5cc6aa6",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 86,
"avg_line_length": 25.352941176470587,
"alnum_prop": 0.6504253673627224,
"repo_name": "fisherinnovation/FI-Automated-Greenhouse",
"id": "adfbfc45687ae7de5f7ab42659c35df59db11dae",
"size": "1606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/WaterController.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1485180"
},
{
"name": "Python",
"bytes": "18373"
}
],
"symlink_target": ""
} |
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
| {
"content_hash": "7a04c4737ab01abec32e812b3f088226",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 82,
"avg_line_length": 28.0926517571885,
"alnum_prop": 0.5669282383714318,
"repo_name": "lazygunner/xunleipy",
"id": "95bb44ec8555d01cb370b5b6a440e3cf6cbefab8",
"size": "8816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xunleipy/rsa_lib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42197"
}
],
"symlink_target": ""
} |
"""
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'www.thoughtconcert.com',
'name': 'Thoughtconcert'
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'example.com',
'name': 'example.com'
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0002_alter_domain_unique'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| {
"content_hash": "e815ea646e280df4134a4a045e305c49",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 129,
"avg_line_length": 25.195652173913043,
"alnum_prop": 0.6264020707506471,
"repo_name": "dilwaria/thoughtconcert",
"id": "bc4e1ff1d4179620ec4650218c733b04aaf78ce0",
"size": "1159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thoughtconcert/contrib/sites/migrations/0003_set_site_domain_and_name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42214"
},
{
"name": "HTML",
"bytes": "70691"
},
{
"name": "JavaScript",
"bytes": "64499"
},
{
"name": "Python",
"bytes": "47928"
},
{
"name": "Shell",
"bytes": "4232"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import getpass
from builtins import input
from colors import cyan, green, red
from pants.auth.basic_auth import BasicAuth, BasicAuthCreds, Challenged
from pants.base.exceptions import TaskError
from pants.task.console_task import ConsoleTask
class Login(ConsoleTask):
"""Task to auth against some identity provider.
:API: public
"""
@classmethod
def subsystem_dependencies(cls):
return super(Login, cls).subsystem_dependencies() + (BasicAuth,)
@classmethod
def supports_passthru_args(cls):
return True
@classmethod
def register_options(cls, register):
super(Login, cls).register_options(register)
register('--to', fingerprint=True,
help='Log in to this provider. Can also be specified as a passthru arg.')
def console_output(self, targets):
if targets:
raise TaskError('The login task does not take any target arguments.')
# TODO: When we have other auth methods (e.g., OAuth2), select one by provider name.
requested_providers = list(filter(None, [self.get_options().to] + self.get_passthru_args()))
if len(requested_providers) != 1:
raise TaskError('Must specify exactly one provider.')
provider = requested_providers[0]
try:
BasicAuth.global_instance().authenticate(provider)
return ['', 'Logged in successfully using .netrc credentials.']
except Challenged as e:
creds = self._ask_for_creds(provider, e.url, e.realm)
BasicAuth.global_instance().authenticate(provider, creds=creds)
return ['', 'Logged in successfully.']
@staticmethod
def _ask_for_creds(provider, url, realm):
print(green('\nEnter credentials for:\n'))
print('{} {}'.format(green('Provider:'), cyan(provider)))
print('{} {}'.format(green('Realm: '), cyan(realm)))
print('{} {}'.format(green('URL: '), cyan(url)))
print(red('\nONLY ENTER YOUR CREDENTIALS IF YOU TRUST THIS SITE!\n'))
username = input(green('Username: '))
password = getpass.getpass(green('Password: '))
return BasicAuthCreds(username, password)
| {
"content_hash": "db5402a7b49b450cc9d688a1e3ca2106",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 96,
"avg_line_length": 36.20338983050848,
"alnum_prop": 0.6942883895131086,
"repo_name": "twitter/pants",
"id": "4db5b7c0fcebcf37ef61cd97c9c7ce2bdd46afa6",
"size": "2283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/core_tasks/login.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5639"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "85294"
},
{
"name": "Java",
"bytes": "498956"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "6700799"
},
{
"name": "Rust",
"bytes": "765598"
},
{
"name": "Scala",
"bytes": "89346"
},
{
"name": "Shell",
"bytes": "94395"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
from conf import ServiceConfigure
from utils import merge_into, blipp_import, get_most_recent
from schema_cache import SchemaCache
import settings
from validation import validate_add_defaults
import pprint
# should be blipp_conf, but netlogger doesn't like that for some reason
logger = settings.get_logger('confblipp')
class BlippConfigure(ServiceConfigure):
def __init__(self, initial_config={}, node_id=None,
pre_existing_measurements="ignore", urn=None):
if "name" not in initial_config:
initial_config['name']="blipp"
self.pem = pre_existing_measurements
self.schema_cache = SchemaCache()
self.probe_defaults = None
self.measurements = []
super(BlippConfigure, self).__init__(initial_config, node_id, urn)
def initialize(self):
super(BlippConfigure, self).initialize()
if not self.service_setup:
logger.error("initialize", msg="Could not reach UNIS to initialize service")
exit(-1)
self.initial_measurements = self.unis.get("/measurements?service=" +
self.config["selfRef"])
self.initial_measurements = get_most_recent(self.initial_measurements)
# use any pre-existing measurements found in UNIS right away
if self.pem=="use":
for m in self.initial_measurements:
self.measurements.append(m)
# now strip and add to measurements any probes found in the merged initial config
self.initial_probes = self._strip_probes(self.config)
if self.initial_probes:
self._post_probes()
self.unis.put("/services/" + self.config["id"], data=self.config)
def _post_probes(self):
failed_probes = {}
for name,probe in self.initial_probes.items():
probe.update({"name": name})
try:
probe = self._validate_schema_probe(probe)
except Exception as e:
logger.exc('_post_probes', e)
continue # skip this probe
r = self._post_measurement(probe)
if not r:
failed_probes[name] = probe
else:
# add the measurement to our internal list right away
self.measurements.append(r)
self.initial_probes = failed_probes
if failed_probes:
logger.warn('_post_probes', failed_probes=pprint.pformat(failed_probes))
def _validate_schema_probe(self, probe):
if "$schema" in probe:
schema = self.schema_cache.get(probe["$schema"])
validate_add_defaults(probe, schema)
return probe
def refresh(self):
interval = super(BlippConfigure, self).refresh()
if interval != int(self.config['properties']['configurations']['unis_poll_interval']):
return interval
# unis.get returns a list of config
if isinstance(self.config, list):
self.config = self.config[0]
self.initial_probes = self._strip_probes(self.config)
if self.initial_probes:
self._post_probes()
self.unis.put("/services/" + self.config["id"], data=self.config)
qmeas = self.unis.get("/measurements?service=" +
self.config["selfRef"])
if qmeas:
self.measurements = qmeas
self.measurements = get_most_recent(self.measurements)
for m in self.measurements:
size_orig = len(m["configuration"])
merge_into(m["configuration"], self.probe_defaults)
if size_orig < len(m["configuration"]):
self.unis.put("/measurements/"+m["id"], m)
r = self.unis.get("/measurements/"+m["id"])
m['ts'] = r['ts']
else:
''' If measurements don't exist then create them again - i.e register them again '''
self.measurements = get_most_recent(self.measurements)
for m in self.measurements:
self.unis.post("/measurements/", m)
return interval
def _post_measurement(self, probe):
probe_mod = blipp_import(probe["probe_module"])
if "EVENT_TYPES" in probe_mod.__dict__:
eventTypes = probe_mod.EVENT_TYPES.values()
else:
try:
eventTypes = probe["eventTypes"].values()
except KeyError:
logger.warn("_post_measurement", msg="No eventTypes present")
eventTypes = []
measurement = {}
measurement["service"] = self.config["selfRef"]
measurement["configuration"] = probe
measurement["eventTypes"] = eventTypes
r = self.unis.post("/measurements", measurement)
return r
def get_measurements(self):
'''
Return all measurements which are configured for this blipp
instance. Possibly excluding those which where initially
present when blipp started.
'''
measurements = []
for m in self.measurements:
if self.pem=="use":
measurements.append(m)
elif m not in self.initial_measurements:
measurements.append(m)
return filter(lambda m: m["configuration"].get("status", "ON").upper()=="ON", measurements)
def _strip_probes(self, initial_config):
probes = {}
try:
probes = initial_config["properties"]["configurations"]["probes"]
del initial_config["properties"]["configurations"]["probes"]
except Exception:
pass
try:
probe_defaults = initial_config["properties"]["configurations"]["probe_defaults"]
self.probe_defaults = probe_defaults
except Exception:
pass
if probes:
for probe in probes.values():
merge_into(probe, self.probe_defaults)
return probes
| {
"content_hash": "cc6bc5a7b2e4dc5f6a47d58ab143e489",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 99,
"avg_line_length": 39.627450980392155,
"alnum_prop": 0.574468085106383,
"repo_name": "periscope-ps/blipp",
"id": "ca9d6e643200cac244e8f080ff701e7a0870ce1b",
"size": "6577",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "blipp/blipp_conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "207513"
},
{
"name": "Shell",
"bytes": "5186"
}
],
"symlink_target": ""
} |
"""Type-based simulation operations
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern import pkconfig
from pykern import pkinspect
from pykern import pkio
from pykern import pkjson
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdp, pkdexc, pkdc, pkdformat
import hashlib
import inspect
import re
import requests
import sirepo.const
import sirepo.feature_config
import sirepo.job
import sirepo.resource
import sirepo.template
import sirepo.util
import uuid
_cfg = None
#: default compute_model
_ANIMATION_NAME = "animation"
#: prefix for auth header of sim_db_file requests
_AUTH_HEADER_PREFIX = f"{sirepo.util.AUTH_HEADER_SCHEME_BEARER} "
_MODEL_RE = re.compile(r"^[\w-]+$")
_IS_PARALLEL_RE = re.compile("animation", re.IGNORECASE)
#: separates values in frame id
_FRAME_ID_SEP = "*"
#: common keys to frame id followed by code-specific values
_FRAME_ID_KEYS = (
"frameIndex",
# computeModel when passed from persistent/parallel
# analysisModel when passe from transient/sequential
# sim_data.compute_model() is idempotent to this.
"frameReport",
"simulationId",
"simulationType",
"computeJobHash",
"computeJobSerial",
)
_TEMPLATE_RESOURCE_DIR = "template"
def get_class(type_or_data):
"""Simulation data class
Args:
type_or_data (str or dict): simulation type or description
Returns:
type: simulation data operation class
"""
return sirepo.util.import_submodule("sim_data", type_or_data).SimData
def resource_path(filename):
"""Path to common (not specific to sim type) resource file"""
return sirepo.resource.file_path(_TEMPLATE_RESOURCE_DIR, filename)
def template_globals(sim_type=None):
"""Initializer for templates
Usage::
_SIM_DATA, SIM_TYPE, _SCHEMA = sirepo.sim_data.template_globals()
Args:
sim_type (str): simulation type [calling module's basename]
Returns:
(class, str, object): SimData class, simulation type, and schema
"""
c = get_class(sim_type or pkinspect.module_basename(pkinspect.caller_module()))
return c, c.sim_type(), c.schema()
def parse_frame_id(frame_id):
"""Parse the frame_id and return it along with self
Args:
frame_id (str): values separated by "*"
Returns:
PKDict: frame_args
SimDataBase: sim_data object for this simulationType
"""
v = frame_id.split(_FRAME_ID_SEP)
res = PKDict(zip(_FRAME_ID_KEYS, v[: len(_FRAME_ID_KEYS)]))
res.frameIndex = int(res.frameIndex)
res.computeJobSerial = int(res.computeJobSerial)
s = get_class(res.simulationType)
s.frameReport = s.parse_model(res)
s.simulationId = s.parse_sid(res)
# TODO(robnagler) validate these
res.update(zip(s._frame_id_fields(res), v[len(_FRAME_ID_KEYS) :]))
return res, s
class SimDataBase(object):
ANALYSIS_ONLY_FIELDS = frozenset()
WATCHPOINT_REPORT = "watchpointReport"
WATCHPOINT_REPORT_RE = re.compile(r"^{}(\d+)$".format(WATCHPOINT_REPORT))
_EXAMPLE_RESOURCE_DIR = "examples"
_EXE_PERMISSIONS = 0o700
_LIB_RESOURCE_DIR = "lib"
@classmethod
def compute_job_hash(cls, data, qcall):
"""Hash fields related to data and set computeJobHash
Only needs to be unique relative to the report, not globally unique
so MD5 is adequate. Long and cryptographic hashes make the
cache checks slower.
Args:
data (dict): simulation data
changed (callable): called when value changed
Returns:
bytes: hash value
"""
cls._assert_server_side()
c = cls.compute_model(data)
if data.get("forceRun") or cls.is_parallel(c):
return "HashIsUnused"
m = data["models"]
res = hashlib.md5()
fields = sirepo.sim_data.get_class(data.simulationType)._compute_job_fields(
data, data.report, c
)
# values may be string or PKDict
fields.sort(key=lambda x: str(x))
for f in fields:
# assert isinstance(f, pkconfig.STRING_TYPES), \
# 'value={} not a string_type'.format(f)
# TODO(pjm): work-around for now
if isinstance(f, pkconfig.STRING_TYPES):
x = f.split(".")
value = m[x[0]][x[1]] if len(x) > 1 else m[x[0]]
else:
value = f
res.update(
pkjson.dump_bytes(
value,
sort_keys=True,
allow_nan=False,
)
)
res.update(
"".join(
(
str(cls.lib_file_abspath(b, data=data, qcall=qcall).mtime())
for b in sorted(cls.lib_file_basenames(data))
),
).encode()
)
return res.hexdigest()
@classmethod
def compute_model(cls, model_or_data):
"""Compute model for this model_or_data
Args:
model_or_data (): analysis model
Returns:
str: name of compute model for report
"""
m = cls.parse_model(model_or_data)
d = model_or_data if isinstance(model_or_data, dict) else None
# TODO(robnagler) is this necesary since m is parsed?
return cls.parse_model(cls._compute_model(m, d))
@classmethod
def delete_sim_file(cls, sim_id, basename):
return cls._delete_sim_db_file(cls._sim_file_uri(sim_id, basename))
@classmethod
def example_paths(cls):
return sirepo.resource.glob_paths(
_TEMPLATE_RESOURCE_DIR,
cls.sim_type(),
cls._EXAMPLE_RESOURCE_DIR,
f"*{sirepo.const.JSON_SUFFIX}",
)
@classmethod
def fixup_old_data(cls, data, qcall, **kwargs):
"""Update model data to latest schema
Modifies `data` in place.
Args:
data (dict): simulation
"""
raise NotImplementedError()
@classmethod
def frame_id(cls, data, response, model, index):
"""Generate a frame_id from values (unit testing)
Args:
data (PKDict): model data
response (PKDict): JSON response
model (str): animation name
index (int): index of frame
Returns:
str: combined frame id
"""
assert response.frameCount > index, pkdformat(
"response={} does not contain enough frames for index={}", response, index
)
frame_args = response.copy()
frame_args.frameReport = model
m = data.models[model]
return _FRAME_ID_SEP.join(
[
# POSIT: same order as _FRAME_ID_KEYS
str(index),
model,
data.models.simulation.simulationId,
data.simulationType,
response.computeJobHash,
str(response.computeJobSerial),
]
+ [str(m.get(k)) for k in cls._frame_id_fields(frame_args)],
)
@classmethod
def is_parallel(cls, data_or_model):
"""Is this report a parallel (long) simulation?
Args:
data_or_model (dict): sim data or compute_model
Returns:
bool: True if parallel job
"""
return bool(
_IS_PARALLEL_RE.search(
cls.compute_model(data_or_model)
if isinstance(data_or_model, dict)
else data_or_model
),
)
@classmethod
def is_run_mpi(cls):
raise NotImplementedError()
@classmethod
def is_watchpoint(cls, name):
return cls.WATCHPOINT_REPORT in name
@classmethod
def lib_file_abspath(cls, basename, data=None, qcall=None):
"""Returns full, unique paths of simulation files
Args:
basename (str): lib file basename
Returns:
object: py.path.local to files (duplicates removed) OR py.path.local
"""
p = cls._lib_file_abspath(basename, data=data, qcall=qcall)
if p:
return p
from sirepo import auth
raise sirepo.util.UserAlert(
'Simulation library file "{}" does not exist'.format(basename),
"basename={} not in lib or resource directories",
basename,
)
@classmethod
def lib_file_basenames(cls, data):
"""List files used by the simulation
Args:
data (dict): sim db
Returns:
set: list of str, sorted
"""
# _lib_file_basenames may return duplicates
return sorted(set(cls._lib_file_basenames(data)))
@classmethod
def lib_file_exists(cls, basename, qcall=None):
cls._assert_server_side()
return bool(cls._lib_file_abspath(basename, qcall=qcall))
@classmethod
def lib_file_in_use(cls, data, basename):
"""Check if file in use by simulation
Args:
data (dict): simulation
basename (str): to check
Returns:
bool: True if `basename` in use by `data`
"""
return any(f for f in cls.lib_file_basenames(data) if f == basename)
@classmethod
def lib_file_names_for_type(cls, file_type, qcall=None):
"""Return sorted list of files which match `file_type`
Args:
file_type (str): in the format of ``model-field``
Returns:
list: sorted list of file names stripped of file_type
"""
return sorted(
(
cls.lib_file_name_without_type(f.basename)
for f in cls._lib_file_list("{}.*".format(file_type), qcall=qcall)
)
)
@classmethod
def lib_file_name_with_model_field(cls, model_name, field, filename):
return "{}-{}.{}".format(model_name, field, filename)
@classmethod
def lib_file_name_with_type(cls, filename, file_type):
return "{}.{}".format(file_type, filename)
@classmethod
def lib_file_name_without_type(cls, basename):
"""Strip the file type prefix
See `lib_file_name` which prefixes with ``model-field.``
Args:
basename: lib file name with type
Returns:
str: basename without type prefix
"""
return re.sub(r"^.*?-.*?\.(.+\..+)$", r"\1", basename)
@classmethod
def lib_file_resource_path(cls, path):
return sirepo.resource.file_path(
_TEMPLATE_RESOURCE_DIR,
cls.sim_type(),
cls._LIB_RESOURCE_DIR,
path,
)
@classmethod
def lib_file_write_path(cls, basename, qcall=None):
cls._assert_server_side()
from sirepo import simulation_db
return simulation_db.simulation_lib_dir(cls.sim_type(), qcall=qcall).join(
basename
)
@classmethod
def lib_files_for_export(cls, data, qcall=None):
cls._assert_server_side()
res = []
for b in cls.lib_file_basenames(data):
f = cls.lib_file_abspath(b, data=data, qcall=qcall)
if f.exists():
res.append(f)
return res
@classmethod
def lib_files_from_other_user(cls, data, other_lib_dir, qcall):
"""Copy auxiliary files to other user
Does not copy resource files. Only works locally.
Args:
data (dict): simulation db
other_lib_dir (py.path): source directory
"""
cls._assert_server_side()
from sirepo import simulation_db
t = simulation_db.simulation_lib_dir(cls.sim_type(), qcall=qcall)
for f in cls._lib_file_basenames(data):
s = other_lib_dir.join(f)
if s.exists():
s.copy(t.join(f))
@classmethod
def lib_files_to_run_dir(cls, data, run_dir):
"""Copy auxiliary files to run_dir
Args:
data (dict): simulation db
run_dir (py.path): where to copy to
"""
for b in cls.lib_file_basenames(data):
t = run_dir.join(b)
s = cls.lib_file_abspath(b, data=data)
if t != s:
t.mksymlinkto(s, absolute=False)
@classmethod
def model_defaults(cls, name):
"""Returns a set of default model values from the schema.
Some special cases:
if the data type is "UUID" and the default value is empty, set the
value to a new UUUID string
if the data type is "RandomId" and the default value is empty, set the
value to a new Base62 string
if the data type has the form "model.zzz", set the value to the default
value of model "zzz"
Args:
name (str): model name
"""
import copy
res = PKDict()
for f, d in cls.schema().model[name].items():
if len(d) >= 3 and d[2] is not None:
m = d[1].split(".")
if len(m) > 1 and m[0] == "model" and m[1] in cls.schema().model:
res[f] = cls.model_defaults(m[1])
for ff, dd in d[2].items():
res[f][ff] = copy.deepcopy(d[2][ff])
continue
res[f] = copy.deepcopy(d[2])
if d[1] == "UUID" and not res[f]:
res[f] = str(uuid.uuid4())
if d[1] == "RandomId" and not res[f]:
res[f] = sirepo.util.random_base62(length=16)
return res
@classmethod
def parse_jid(cls, data, uid):
"""A Job is a tuple of user, sid, and compute_model.
A jid is words and dashes.
Args:
data (dict): extract sid and compute_model
uid (str): user id
Returns:
str: unique name (treat opaquely)
"""
return sirepo.job.join_jid(uid, cls.parse_sid(data), cls.compute_model(data))
@classmethod
def parse_model(cls, obj):
"""Find the model in the arg
Looks for `frameReport`, `report`, and `modelName`. Might be a compute or
analysis model.
Args:
obj (str or dict): simulation type or description
Returns:
str: target of the request
"""
if isinstance(obj, pkconfig.STRING_TYPES):
res = obj
elif isinstance(obj, dict):
res = obj.get("frameReport") or obj.get("report") or obj.get("computeModel")
else:
raise AssertionError("obj={} is unsupported type={}", obj, type(obj))
assert res and _MODEL_RE.search(res), "invalid model={} from obj={}".format(
res, obj
)
return res
@classmethod
def parse_sid(cls, obj):
"""Extract simulationId from obj
Args:
obj (object): may be data, req, resp, or string
Returns:
str: simulation id
"""
from sirepo import simulation_db
if isinstance(obj, pkconfig.STRING_TYPES):
res = obj
elif isinstance(obj, dict):
res = obj.get("simulationId") or obj.pknested_get(
"models.simulation.simulationId"
)
else:
raise AssertionError("obj={} is unsupported type={}", obj, type(obj))
return simulation_db.assert_sid(res)
@classmethod
def poll_seconds(cls, data):
"""Client poll period for simulation status
TODO(robnagler) needs to be encapsulated
Args:
data (dict): must container report name
Returns:
int: number of seconds to poll
"""
return 2 if cls.is_parallel(data) else 1
@classmethod
def proprietary_code_tarball(cls):
return None
@classmethod
def proprietary_code_lib_file_basenames(cls):
return []
@classmethod
def put_sim_file(cls, sim_id, file_path, basename):
return cls._put_sim_db_file(file_path, cls._sim_file_uri(sim_id, basename))
@classmethod
def resource_path(cls, filename):
"""Static resource (package_data) file for simulation
Returns:
py.path.local: absolute path to file
"""
return sirepo.resource.file_path(
_TEMPLATE_RESOURCE_DIR, cls.sim_type(), filename
)
@classmethod
def schema(cls):
from sirepo import simulation_db
return cls._memoize(simulation_db.get_schema(cls.sim_type()))
@classmethod
def sim_file_basenames(cls, data):
return cls._sim_file_basenames(data)
@classmethod
def sim_files_to_run_dir(cls, data, run_dir):
for b in cls.sim_file_basenames(data):
cls._sim_file_to_run_dir(
data.models.simulation.simulationId,
b.basename,
run_dir,
is_exe=b.get("is_exe", False),
)
@classmethod
def sim_type(cls):
return cls._memoize(pkinspect.module_basename(cls))
@classmethod
def support_files_to_run_dir(cls, data, run_dir):
cls.lib_files_to_run_dir(data, run_dir)
cls.sim_files_to_run_dir(data, run_dir)
@classmethod
def update_model_defaults(cls, model, name, dynamic=None):
defaults = cls.model_defaults(name)
if dynamic:
defaults.update(dynamic(name))
for f in defaults:
if f not in model:
model[f] = defaults[f]
@classmethod
def want_browser_frame_cache(cls, report):
return True
@classmethod
def watchpoint_id(cls, report):
m = cls.WATCHPOINT_REPORT_RE.search(report)
if not m:
raise RuntimeError("invalid watchpoint report name: ", report)
return int(m.group(1))
@classmethod
def _assert_server_side(cls):
assert (
not _cfg.lib_file_uri
), f"method={pkinspect.caller()} may only be called on server"
@classmethod
def _compute_model(cls, analysis_model, resp):
"""Returns ``animation`` for models with ``Animation`` in name
Subclasses should override, but call this. The mapping of
``<name>Animation`` to ``animation`` should stay consistent here.
Args:
model (str): analysis model
resp (PKDict): analysis model
Returns:
str: name of compute model for analysis_model
"""
if _ANIMATION_NAME in analysis_model.lower():
return _ANIMATION_NAME
return analysis_model
@classmethod
def _force_recompute(cls):
"""Random value to force a compute_job to recompute.
Used by `_compute_job_fields`
Returns:
str: random value same length as md5 hash
"""
return sirepo.util.random_base62(32)
@classmethod
def _frame_id_fields(cls, frame_args):
"""Schema specific frame_id fields"""
f = cls.schema().frameIdFields
r = frame_args.frameReport
return f[r] if r in f else f[cls.compute_model(r)]
@classmethod
def _delete_sim_db_file(cls, uri):
_request(
"DELETE",
_cfg.supervisor_sim_db_file_uri + uri,
).raise_for_status()
@classmethod
def _init_models(cls, models, names=None, dynamic=None):
if names:
names = set(list(names) + ["simulation"])
for n in names or cls.schema().model:
cls.update_model_defaults(
models.setdefault(n, PKDict()),
n,
dynamic=dynamic,
)
@classmethod
def _lib_file_abspath(cls, basename, data=None, qcall=None):
import sirepo.simulation_db
if _cfg.lib_file_uri:
# In agent
if basename in _cfg.lib_file_list:
# User generated lib file
p = pkio.py_path(basename)
r = _request("GET", _cfg.lib_file_uri + basename)
r.raise_for_status()
p.write_binary(r.content)
return p
elif not _cfg.lib_file_resource_only:
# Command line utility or server
f = sirepo.simulation_db.simulation_lib_dir(
cls.sim_type(),
qcall=qcall,
).join(basename)
if f.check(file=True):
return f
try:
# Lib file distributed with build
f = cls.lib_file_resource_path(basename)
if f.check(file=True):
return f
except Exception as e:
if not pkio.exception_is_not_found(e):
raise
return None
@classmethod
def _lib_file_list(cls, pat, want_user_lib_dir=True, qcall=None):
"""Unsorted list of absolute paths matching glob pat
Only works locally.
"""
cls._assert_server_side()
from sirepo import simulation_db
res = PKDict(
(
(f.basename, f)
for f in sirepo.resource.glob_paths(
_TEMPLATE_RESOURCE_DIR,
cls.sim_type(),
cls._LIB_RESOURCE_DIR,
pat,
)
)
)
if want_user_lib_dir:
# lib_dir overwrites resource_dir
res.update(
(f.basename, f)
for f in pkio.sorted_glob(
simulation_db.simulation_lib_dir(cls.sim_type(), qcall=qcall).join(
pat
),
)
)
return res.values()
@classmethod
def _memoize(cls, value):
"""Cache class method (no args)
Example::
@classmethod
def something(cls):
return cls._memoize(compute_something_once())
Args:
value (object): any object
Returns:
object: value
"""
@classmethod
def wrap(cls):
return value
setattr(
cls,
inspect.currentframe().f_back.f_code.co_name,
wrap,
)
return value
@classmethod
def _non_analysis_fields(cls, data, model):
"""Get the non-analysis fields for model
If the model has "analysis" fields, then return the full list of non-style fields
otherwise returns the model name (which implies all model fields)
Args:
data (dict): simulation
model (str): name of model to compute
Returns:
list: compute_fields fields for model or whole model
"""
s = set(data.models.get(model, {}).keys()) - cls.ANALYSIS_ONLY_FIELDS
if not s:
return [model]
return sorted(["{}.{}".format(model, x) for x in s])
@classmethod
def _organize_example(cls, data):
dm = data.models
if dm.simulation.get("isExample") and dm.simulation.folder == "/":
dm.simulation.folder = "/Examples"
@classmethod
def _proprietary_code_tarball(cls):
return f"{cls.sim_type()}.tar.gz"
@classmethod
def _put_sim_db_file(cls, file_path, uri):
_request(
"PUT",
_cfg.supervisor_sim_db_file_uri + uri,
data=pkio.read_binary(file_path),
).raise_for_status()
@classmethod
def _sim_file_basenames(cls, data):
return []
@classmethod
def _sim_db_file_to_run_dir(cls, uri, run_dir, is_exe=False):
p = run_dir.join(uri.split("/")[-1])
r = _request("GET", _cfg.supervisor_sim_db_file_uri + uri)
r.raise_for_status()
p.write_binary(r.content)
if is_exe:
p.chmod(cls._EXE_PERMISSIONS)
return p
@classmethod
def _sim_file_to_run_dir(cls, sim_id, basename, run_dir, is_exe=False):
return cls._sim_db_file_to_run_dir(
cls._sim_file_uri(sim_id, basename),
run_dir,
is_exe=is_exe,
)
@classmethod
def _sim_file_uri(cls, sim_id, basename):
from sirepo import simulation_db
return simulation_db.sim_db_file_uri(
cls.sim_type(),
sim_id,
basename,
)
class SimDbFileNotFound(Exception):
"""A sim db file could not be found"""
pass
def _request(method, uri, data=None):
r = requests.request(
method,
uri,
data=data,
verify=sirepo.job.cfg().verify_tls,
headers=PKDict(
{
sirepo.util.AUTH_HEADER: _AUTH_HEADER_PREFIX
+ _cfg.supervisor_sim_db_file_token,
}
),
)
if method == "GET" and r.status_code == 404:
raise SimDbFileNotFound(f"uri={uri} not found")
return r
_cfg = pkconfig.init(
lib_file_resource_only=(False, bool, "used by utility programs"),
lib_file_list=(
None,
lambda v: pkio.read_text(v).splitlines(),
"directory listing of remote lib",
),
lib_file_uri=(None, str, "where to get files from when remote"),
supervisor_sim_db_file_uri=(
None,
str,
"where to get/put simulation db files from/to supervisor",
),
supervisor_sim_db_file_token=(
None,
str,
"token for supervisor simulation file access",
),
)
| {
"content_hash": "a40279430d4dacb338845af041b39471",
"timestamp": "",
"source": "github",
"line_count": 854,
"max_line_length": 89,
"avg_line_length": 29.7576112412178,
"alnum_prop": 0.5597528823830323,
"repo_name": "radiasoft/sirepo",
"id": "7059bd6c4c210726e77b2a17fadc2bb4b0a6dc60",
"size": "25437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sirepo/sim_data/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "152"
},
{
"name": "CSS",
"bytes": "65716"
},
{
"name": "HTML",
"bytes": "144600"
},
{
"name": "JavaScript",
"bytes": "3855752"
},
{
"name": "Jinja",
"bytes": "190763"
},
{
"name": "Jupyter Notebook",
"bytes": "1262"
},
{
"name": "Opal",
"bytes": "61806"
},
{
"name": "Perl",
"bytes": "31089"
},
{
"name": "Python",
"bytes": "3022923"
},
{
"name": "SCSS",
"bytes": "29855"
},
{
"name": "Shell",
"bytes": "21259"
}
],
"symlink_target": ""
} |
'''
Access Groups v2
================
The following methods allow for interaction into the Tenable.io
:devportal:`access-groups-v2 <v2-access-groups>` API endpoints.
Methods available on ``tio.access_groups_v2``:
.. rst-class:: hide-signature
.. autoclass:: AccessGroupsV2API
:members:
'''
from restfly.utils import dict_merge
from tenable.errors import UnexpectedValueError
from tenable.io.base import TIOEndpoint, TIOIterator
class AccessGroupsIteratorV2(TIOIterator):
'''
The access groups v2 iterator provides a scalable way to work through
access groups result sets of any size. The iterator will walk through each
page of data, returning one record at a time. If it reaches the end of a
page of records, then it will request the next page of information and then
continue to return records from the next page (and the next, and the next)
until the counter reaches the total number of records that the API has
reported.
Attributes:
count (int): The current number of records that have been returned
page (list):
The current page of data being walked through. pages will be
cycled through as the iterator requests more information from the
API.
page_count (int): The number of record returned from the current page.
total (int):
The total number of records that exist for the current request.
'''
pass
class AccessGroupsV2API(TIOEndpoint):
'''
This will contain all methods related to access group
'''
def _list_clean(self, items):
'''
Removes duplicate values from list
Args:
items (list): list of items
Returns:
:obj:`list`:
Returns list of distinct values
'''
return list(set(self._check('items', items, list)))
def _principal_constructor(self, items):
'''
Simple principle tuple expander. Also supports validating principle
dictionaries for transparent passthrough.
'''
resp = list()
for item in items:
self._check('principal', item, (tuple, dict))
if isinstance(item, tuple):
data = dict()
if len(item) == 2:
item = item + ([],)
data['type'] = self._check('principal:type', item[0], str,
choices=['user', 'group'])
try:
data['principal_id'] = self._check('principal:id', item[1], 'uuid')
except UnexpectedValueError:
data['principal_name'] = self._check('principal:name', item[1], str)
data['permissions'] = self._list_clean(
[self._check('permission', permission, str,
choices=['CAN_VIEW', 'CAN_SCAN'], case='upper')
for permission in self._check('permissions', item[2], list)])
# if permissions are empty, we will assign default value to it
if not data['permissions']:
data['permissions'] = ['CAN_VIEW']
resp.append(data)
else:
self._check('principal:type', item['type'], str,
choices=['user', 'group'])
if 'principal_id' in item:
self._check('principal_id', item['principal_id'], 'uuid')
if 'principal_name' in item:
self._check('principal_name', item['principal_name'], str)
item['permissions'] = self._list_clean([
self._check('permission', permission, str,
choices=['CAN_VIEW', 'CAN_SCAN'], case='upper')
for permission in self._check('permissions', item['permissions']
if 'permissions' in item and item['permissions']
else None, list, default=['CAN_VIEW'])]
)
resp.append(item)
return resp
def list(self, *filters, **kw):
'''
Get the listing of configured access groups from Tenable.io.
:devportal:`access-groups-v2: list <v2-access-groups-listt>`
Args:
*filters (tuple, optional):
Filters are tuples in the form of ('NAME', 'OPERATOR', 'VALUE').
Multiple filters can be used and will filter down the data being
returned from the API.
Examples:
- ``('distro', 'match', 'win')``
- ``('name', 'nmatch', 'home')``
As the filters may change and sortable fields may change over
time, it's highly recommended that you look at the output of
the :py:meth:`tio.filters.access_groups_filters_v2()` endpoint to get more details.
filter_type (str, optional):
The filter_type operator determines how the filters are combined
together. ``and`` will inform the API that all of the filter
conditions must be met for an access group to be returned,
whereas ``or`` would mean that if any of the conditions are met,
the access group record will be returned.
limit (int, optional):
The number of records to retrieve. Default is 50
offset (int, optional):
The starting record to retrieve. Default is 0.
sort (tuple, optional):
A tuple of tuples identifying the the field and sort order of
the field.
wildcard (str, optional):
A string to pattern match against all available fields returned.
wildcard_fields (list, optional):
A list of fields to optionally restrict the wild-card matching
to.
Returns:
:obj:`AccessGroupsIterator`:
An iterator that handles the page management of the requested
records.
Examples:
Getting the listing of all agents:
>>> for group in tio.access_groups_v2.list():
... pprint(group)
Retrieving all of the windows agents:
>>> for group in tio.access_groups_v2.list(('name', 'eq', 'win')):
... pprint(group)
'''
limit = 50
offset = 0
pages = None
query = self._parse_filters(filters,
self._api.filters.access_group_filters_v2(), rtype='colon')
# If the offset was set to something other than the default starting
# point of 0, then we will update offset to reflect that.
if 'offset' in kw and self._check('offset', kw['offset'], int):
offset = kw['offset']
# The limit parameter affects how many records at a time we will pull
# from the API. The default in the API is set to 50, however we can
# pull any variable amount.
if 'limit' in kw and self._check('limit', kw['limit'], int):
limit = kw['limit']
# For the sorting fields, we are converting the tuple that has been
# provided to us and converting it into a comma-delimited string with
# each field being represented with its sorting order. e.g. If we are
# presented with the following:
#
# sort=(('field1', 'asc'), ('field2', 'desc'))
#
# we will generate the following string:
#
# sort=field1:asc,field2:desc
#
if 'sort' in kw and self._check('sort', kw['sort'], tuple):
query['sort'] = ','.join(['{}:{}'.format(
self._check('sort_field', i[0], str),
self._check('sort_direction', i[1], str, choices=['asc', 'desc'])
) for i in kw['sort']])
# The filter_type determines how the filters are combined together.
# The default is 'and', however you can always explicitly define 'and'
# or 'or'.
if 'filter_type' in kw and self._check(
'filter_type', kw['filter_type'], str, choices=['and', 'or']):
query['ft'] = kw['filter_type']
# The wild-card filter text refers to how the API will pattern match
# within all fields, or specific fields using the wildcard_fields param.
if 'wildcard' in kw and self._check('wildcard', kw['wildcard'], str):
query['w'] = kw['wildcard']
# The wildcard_fields parameter allows the user to restrict the fields
# that the wild-card pattern match pertains to.
if 'wildcard_fields' in kw and self._check(
'wildcard_fields', kw['wildcard_fields'], list):
query['wf'] = ','.join(kw['wildcard_fields'])
# Return the Iterator.
return AccessGroupsIteratorV2(self._api,
_limit=limit,
_offset=offset,
_pages_total=pages,
_query=query,
_path='v2/access-groups',
_resource='access_groups'
)
def create(self, name, rules, principals=None, all_users=False, access_group_type=None):
'''
Creates a new access group
:devportal:`access-groups: create <v2-access-groups-create>`
Args:
name (str):
The name of the access group to create.
rules (list):
a list of rule tuples. Tuples are defined in the standardized
method of name, operator, value. For example:
.. code-block:: python
('operating_system', 'eq', ['Windows NT'])
Rules will be validate against by the filters before being sent
to the API. Note that the value field in this context is a list
of string values.
principals (list, optional):
A list of principal tuples. Each tuple must contain the type,
the identifier and the permissions for the principal.
The identifier can be either a UUID associated to a user/group, or the name of the
user/group and the permissions can be either a CAN_VIEW or CAN_EDIT or Both in list
Default permission is ``CAN_VIEW``
For example:
.. code-block:: python
('user', '32a0c314-442b-4aed-bbf5-ba9cf5cafbf4', ['CAN_VIEW'])
('user', '[email protected]', ['CAN_SCAN'])
('group', '32a0c314-442b-4aed-bbf5-ba9cf5cafbf4')
all_users (bool, optional):
If enabled, the access group will apply to all users and any
principals defined will be ignored.
access_group_type (str, optional):
The type of access group. It can be one of two possible types:
`MANAGE_ASSETS`, `SCAN_TARGETS`
The default is `MANAGE_ASSETS`
Returns:
:obj:`dict`:
The resource record for the new access list.
Examples:
Allow all users to see 192.168.0.0/24:
>>> tio.access_groups_v2.create('Example',
... [('ipv4', 'eq', ['192.168.0.0/24'])],
... all_users=True)
Allow everyone in a specific group id to see specific hosts:
>>> tio.access_groups_v2.create('Example',
... [('netbios_name', 'eq', ['dc1.company.tld']),
... ('netbios_name', 'eq', ['dc2.company.tld'])],
... principals=[
... ('group', '32a0c314-442b-4aed-bbf5-ba9cf5cafbf4', ['CAN_VIEW'])
... ])
'''
if not principals:
principals = list()
# construct the payload dictionary
payload = {
# run the rules through the filter parser...
'rules': self._parse_filters(rules,
self._api.filters.access_group_asset_rules_filters_v2(),
rtype='accessgroup')['rules'],
# run the principals through the principal parser...
'principals': self._principal_constructor(principals),
'name': self._check('name', name, str),
'all_users': self._check('all_users', all_users, bool),
'access_group_type': self._check('access_group_type', access_group_type, str,
choices=['MANAGE_ASSETS', 'SCAN_TARGETS'],
default='MANAGE_ASSETS',
case='upper')
}
# call the API endpoint and return the response to the caller.
return self._api.post('v2/access-groups', json=payload).json()
def delete(self, group_id):
'''
Deletes the specified access group.
:devportal:`access-groups: delete <v2-access-groups-delete>`
Args:
group_id (str): The UUID of the access group to remove.
'''
self._api.delete('v2/access-groups/{}'.format(
self._check('group_id', group_id, 'uuid')))
def edit(self, group_id, **kw):
'''
Edits an access group
:devportal:`access-groups: edit <v2-access-groups-edit>`
Args:
group_id (str):
The UUID of the access group to edit.
name (str, optional):
The name of the access group to edit.
rules (list, optional):
a list of rule tuples. Tuples are defined in the standardized
method of name, operator, value. For example:
.. code-block:: python
('operating_system', 'eq', ['Windows NT'])
Rules will be validate against by the filters before being sent
to the API. Note that the value field in this context is a list
of string values.
principals (list, optional):
A list of principal tuples. Each tuple must contain the type,
the identifier and the permissions for the principal.
The identifier can be either a UUID associated to a user/group, or the name of the
user/group and the permissions can be either a CAN_VIEW or CAN_SCAN or Both in list
Default permission is ``CAN_VIEW``
For example:
.. code-block:: python
('user', '32a0c314-442b-4aed-bbf5-ba9cf5cafbf4', ['CAN_VIEW'])
('user', '[email protected]', ['CAN_SCAN'])
('group', '32a0c314-442b-4aed-bbf5-ba9cf5cafbf4')
all_users (bool):
If enabled, the access group will apply to all users and any
principals defined will be ignored.
all_assets (bool, optional):
Specifies if the access group to modify is the default
"all assets" group or a user-defined one.
access_group_type (str, optional):
The type of access group. It can be one of three possible types:
`MANAGE_ASSETS`, `SCAN_TARGETS`
The default is `MANAGE_ASSETS`
'''
# If any rules are specified, then run them through the filter parser.
if 'rules' in kw:
kw['rules'] = self._parse_filters(kw['rules'],
self._api.filters.access_group_asset_rules_filters_v2(),
rtype='accessgroup')['rules']
# if any principals are specified, then run them through the principal
# parser.
if 'principals' in kw:
kw['principals'] = self._principal_constructor(kw['principals'])
# get the details of the access group that we are supposed to be editing
# and then merge in the keywords specified.
details = dict_merge(self.details(self._check('group_id', group_id, 'uuid')), kw)
# construct the payload from the merged details.
payload = {
'name': self._check('name', details['name'], str),
'all_users': self._check('all_users', details['all_users'], bool),
'all_assets': self._check('all_assets', details['all_assets'], bool),
'rules': details['rules'],
'principals': details['principals'],
'access_group_type': details['access_group_type']
}
# call the API endpoint and return the response to the caller.
return self._api.put('v2/access-groups/{}'.format(group_id), json=payload).json()
def details(self, group_id):
'''
Retrieves the details of the specified access group.
:devportal:`access-groups: details <v2-access-groups-details>`
Args:
group_id (str): The UUID of the access group.
'''
return self._api.get('v2/access-groups/{}'.format(
self._check('group_id', group_id, 'uuid'))).json()
| {
"content_hash": "625665532fc09830a1da7f97b42bd663",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 99,
"avg_line_length": 41.50985221674877,
"alnum_prop": 0.5563994541031271,
"repo_name": "tenable/pyTenable",
"id": "cf5fb4ed1e0fb55c84d0b8350c3bcc0d9b543bf5",
"size": "16853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tenable/io/access_groups_v2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2769266"
}
],
"symlink_target": ""
} |
print ("____________________________________________________________")
import os,time,sys
def main():
print("Welcome to journal_writer!")
print("0 - set journal file")
print("1 - write to temporary file")
print("2 - sync temporary file to journal file")
print("3 - decrypt and read the journal file")
print("4 - write and sync file")
print("q - quit")
print("current file: "+set_name.name)
input0 = str(input("What would you like to do?: "))
if input0 == 'q':
os.system("clear") #clears the screen
exit()
elif input0 == "0":
set_name(str(input("name for journal file?: ")))
elif input0 == "1":
os.system("./write")
elif input0 == "2":
if set_name.name == "none":
os.system("./sync")
else:
os.system("./sync "+set_name.name)
elif input0 == "3":
if set_name.name == "none":
os.system("bash ./read") # says "bash" instead of just ./ because when it was just ./ it wouldn't display
else:
os.system("bash ./read "+set_name.name)
elif input0 == "4":
os.system("./write")
if set_name.name == "none":
os.system("./sync")
else:
os.system("./sync "+set_name.name)
else:
print ("that is not one of the options")
print
time.sleep(0.5)
main()
def set_name(name):
set_name.name = name
set_name("none")
try:
sys.argv[1]
except IndexError:
os.system("echo 'i just had to put a line here' >> /dev/null")
else:
set_name(sys.argv[1])
try:
main()
except KeyboardInterrupt:
os.system("clear")
print("CTRL-C pressed -- goodbye")
| {
"content_hash": "adaf2713b81fa6055b5f1597b1d29976",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 108,
"avg_line_length": 27.35185185185185,
"alnum_prop": 0.6079891672308734,
"repo_name": "aidenholmes/journal_writer",
"id": "9cfabe965d8d5b79e66679c43d826c7d814f1873",
"size": "1546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "journal_writer3.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3370"
},
{
"name": "Shell",
"bytes": "3547"
}
],
"symlink_target": ""
} |
import logging
import sys
import os
import yaml
from raven import Client
from raven.handlers.logging import SentryHandler
from raven.conf import setup_logging
import utils
with open(os.path.join('configs', 'prod.yml')) as config_file:
config = yaml.load(config_file.read())
if 'sentry' in config:
client = Client(config['sentry'], auto_log_stacks=True)
handler = SentryHandler(client)
setup_logging(handler)
else:
client = None
logging.info('Sentry.io not loaded')
def send_report_to_dev_chat(exc):
r2t = utils.Reddit2TelegramSender(config['telegram']['dev_chat'], config)
local_vars = sys.exc_info()[2].tb_next.tb_frame.f_locals
submodule = local_vars['submodule_name']
channel = local_vars['submodule'].t_channel
title = 'submodule: {}\nchannel: {}'.format(submodule, channel)
if 'submission' in local_vars:
link = local_vars['submission'].shortlink
error_cnt = r2t.store_error_link(channel, link)
title = '{title}\nlink: {link}\nerror_cnt: {cnt}'.format(
title=title,
link=link,
cnt=error_cnt['cnt']
)
report = '{t}\n\n\n{e}'.format(
t=title,
e=exc
)
r2t.send_text(report)
def report_error(fn):
def wrapper(*args, **kwargs):
try:
fn(*args, **kwargs)
except Exception as e:
if client: # has sentry instance
client.captureException()
else:
logging.exception('Exception Ignored.')
send_report_to_dev_chat(e)
return wrapper
| {
"content_hash": "6e9066f6ffc4e7260c567d4b04931dfe",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 77,
"avg_line_length": 27.75438596491228,
"alnum_prop": 0.6194690265486725,
"repo_name": "nsiregar/reddit2telegram",
"id": "17a15ae6c409dbc91bce3703fee7349e13fd5b62",
"size": "1599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reporting_stuff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "110421"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
} |
"""
This is a minimum working example of a Matchmaker Exchange server.
It is intended strictly as a useful reference, and should not be
used in a production setting.
"""
from __future__ import with_statement, division, unicode_literals
import logging
import json
from flask import Flask, request, after_this_request, jsonify
from flask_negotiate import consumes, produces
from collections import defaultdict
from werkzeug.exceptions import BadRequest
from .compat import urlopen, Request
from .auth import auth_token_required
from .models import MatchRequest
from .schemas import validate_request, validate_response, ValidationError
API_MIME_TYPE = 'application/vnd.ga4gh.matchmaker.v1.0+json'
# Global flask application
app = Flask(__name__.split('.')[0])
# app.config['DEBUG'] = True
# Logger
logger = logging.getLogger(__name__)
@app.route('/v1/match', methods=['POST'])
@consumes(API_MIME_TYPE, 'application/json')
@produces(API_MIME_TYPE)
@auth_token_required()
def match():
"""Return patients similar to the query patient"""
@after_this_request
def add_header(response):
response.headers['Content-Type'] = API_MIME_TYPE
return response
try:
logger.info("Getting flask request data")
request_json = request.get_json(force=True)
except BadRequest:
error = jsonify(message='Invalid request JSON')
error.status_code = 400
return error
try:
logger.info("Validate request syntax")
validate_request(request_json)
except ValidationError as e:
error = jsonify(message='Request does not conform to API specification',
request=request_json)
error.status_code = 422
return error
logger.info("Parsing query")
request_obj = MatchRequest.from_api(request_json)
logger.info("Finding similar patients")
response_obj = request_obj.match(n=5)
logger.info("Serializing response")
response_json = response_obj.to_api()
try:
logger.info("Validating response syntax")
validate_response(response_json)
except ValidationError as e:
# log to console and return response anyway
logger.error('Response does not conform to API specification:\n{}\n\nResponse:\n{}'.format(e, response_json))
return jsonify(response_json)
| {
"content_hash": "babbbc351c8317d874172257ba28d385",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 117,
"avg_line_length": 29.884615384615383,
"alnum_prop": 0.7014157014157014,
"repo_name": "MatchmakerExchange/reference-server",
"id": "40bbe3ed684753d1039b4be4dcbf23ba94c20175",
"size": "2331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mme_server/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62016"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from typing import TYPE_CHECKING
from libqtile import config, group, hook
from libqtile.backend.base import FloatStates
from libqtile.config import Match
if TYPE_CHECKING:
from libqtile.backend.base import Window
class WindowVisibilityToggler:
"""
WindowVisibilityToggler is a wrapper for a window, used in ScratchPad group
to toggle visibility of a window by toggling the group it belongs to.
The window is either sent to the named ScratchPad, which is by default
invisble, or the current group on the current screen.
With this functionality the window can be shown and hidden by a single
keystroke (bound to command of ScratchPad group).
By default, the window is also hidden if it loses focus.
"""
def __init__(self, scratchpad_name, window: Window, on_focus_lost_hide, warp_pointer):
"""
Initiliaze the WindowVisibilityToggler.
Parameters:
===========
scratchpad_name: string
The name (not label) of the ScratchPad group used to hide the window
window: window
The window to toggle
on_focus_lost_hide: bool
if True the associated window is hidden if it loses focus
warp_pointer: bool
if True the mouse pointer is warped to center of associated window
if shown. Only used if on_focus_lost_hide is True
"""
self.scratchpad_name = scratchpad_name
self.window = window
self.on_focus_lost_hide = on_focus_lost_hide
self.warp_pointer = warp_pointer
# determine current status based on visibility
self.shown = False
self.show()
def info(self):
return dict(
window=self.window.info(),
scratchpad_name=self.scratchpad_name,
visible=self.visible,
on_focus_lost_hide=self.on_focus_lost_hide,
warp_pointer=self.warp_pointer,
)
@property
def visible(self):
"""
Determine if associated window is currently visible.
That is the window is on a group different from the scratchpad
and that group is the current visible group.
"""
if self.window.group is None:
return False
return (
self.window.group.name != self.scratchpad_name
and self.window.group is self.window.qtile.current_group
)
def toggle(self):
"""
Toggle the visibility of associated window. Either show() or hide().
"""
if not self.visible or not self.shown:
self.show()
else:
self.hide()
def show(self):
"""
Show the associated window on top of current screen.
The window is moved to the current group as floating window.
If 'warp_pointer' is True the mouse pointer is warped to center of the
window if 'on_focus_lost_hide' is True.
Otherwise, if pointer is moved manually to window by the user
the window might be hidden again before actually reaching it.
"""
if (not self.visible) or (not self.shown):
win = self.window
# always set the floating state before changing group
# to avoid disturbance of tiling layout
win._float_state = FloatStates.TOP
# add to group and bring it to front.
win.togroup()
win.cmd_bring_to_front()
# toggle internal flag of visibility
self.shown = True
# add hooks to determine if focus get lost
if self.on_focus_lost_hide:
if self.warp_pointer:
win.focus(warp=True)
hook.subscribe.client_focus(self.on_focus_change)
hook.subscribe.setgroup(self.on_focus_change)
def hide(self):
"""
Hide the associated window. That is, send it to the scratchpad group.
"""
if self.visible or self.shown:
# unsubscribe the hook methods, since the window is not shown
if self.on_focus_lost_hide:
hook.unsubscribe.client_focus(self.on_focus_change)
hook.unsubscribe.setgroup(self.on_focus_change)
self.window.togroup(self.scratchpad_name)
self.shown = False
def unsubscribe(self):
"""unsubscribe all hooks"""
if self.on_focus_lost_hide and (self.visible or self.shown):
hook.unsubscribe.client_focus(self.on_focus_change)
hook.unsubscribe.setgroup(self.on_focus_change)
def on_focus_change(self, *args, **kwargs):
"""
hook method which is called on window focus change and group change.
Depending on 'on_focus_lost_xxx' arguments, the associated window may
get hidden (by call to hide) or even killed.
"""
if self.shown:
current_group = self.window.qtile.current_group
if (
self.window.group is not current_group
or self.window is not current_group.current_window
):
if self.on_focus_lost_hide:
self.hide()
class DropDownToggler(WindowVisibilityToggler):
"""
Specialized WindowVisibilityToggler which places the associatd window
each time it is shown at desired location.
For example this can be used to create a quake-like terminal.
"""
def __init__(self, window, scratchpad_name, ddconfig):
self.name = ddconfig.name
self.x = ddconfig.x
self.y = ddconfig.y
self.width = ddconfig.width
self.height = ddconfig.height
# Let's add the window to the scratchpad group.
window.togroup(scratchpad_name)
window.opacity = ddconfig.opacity
WindowVisibilityToggler.__init__(
self, scratchpad_name, window, ddconfig.on_focus_lost_hide, ddconfig.warp_pointer
)
def info(self):
info = WindowVisibilityToggler.info(self)
info.update(
dict(name=self.name, x=self.x, y=self.y, width=self.width, height=self.height)
)
return info
def show(self):
"""
Like WindowVisibilityToggler.show, but before showing the window,
its floating x, y, width and height is set.
"""
if (not self.visible) or (not self.shown):
# SET GEOMETRY
win = self.window
screen = win.qtile.current_screen
# calculate windows floating position and width/height
# these may differ for screens, and thus always recalculated.
x = int(screen.dx + self.x * screen.dwidth)
y = int(screen.dy + self.y * screen.dheight)
win.float_x = x
win.float_y = y
width = int(screen.dwidth * self.width)
height = int(screen.dheight * self.height)
win.place(x, y, width, height, win.borderwidth, win.bordercolor, respect_hints=True)
# Toggle the dropdown
WindowVisibilityToggler.show(self)
class ScratchPad(group._Group):
"""
Specialized group which is by default invisible and can be configured, to
spawn windows and toggle its visibility (in the current group) by command.
The ScratchPad group acts as a container for windows which are currently
not visible but associated to a DropDownToggler and can toggle their
group by command (of ScratchPad group).
The ScratchPad, by default, has no label and thus is not shown in
GroupBox widget.
"""
def __init__(
self,
name="scratchpad",
dropdowns: list[config.DropDown] | None = None,
label="",
single=False,
):
group._Group.__init__(self, name, label=label)
self._dropdownconfig = {dd.name: dd for dd in dropdowns} if dropdowns is not None else {}
self.dropdowns: dict[str, DropDownToggler] = {}
self._spawned: dict[str, Match] = {}
self._to_hide: list[str] = []
self._single = single
def _check_unsubscribe(self):
if not self.dropdowns:
hook.unsubscribe.client_killed(self.on_client_killed)
hook.unsubscribe.float_change(self.on_float_change)
def _spawn(self, ddconfig):
"""
Spawn a process by defined command.
Method is only called if no window is associated. This is either on the
first call to show or if the window was killed.
The process id of spawned process is saved and compared to new windows.
In case of a match the window gets associated to this DropDown object.
"""
name = ddconfig.name
if name not in self._spawned:
if not self._spawned:
hook.subscribe.client_new(self.on_client_new)
pid = self.qtile.cmd_spawn(ddconfig.command)
self._spawned[name] = ddconfig.match or Match(net_wm_pid=pid)
def on_client_new(self, client, *args, **kwargs):
"""
hook method which is called on new windows.
This method is subscribed if the given command is spawned
and unsubscribed immediately if the associated window is detected.
"""
name = None
for n, match in self._spawned.items():
if match.compare(client):
name = n
break
if name is not None:
self._spawned.pop(name)
if not self._spawned:
hook.unsubscribe.client_new(self.on_client_new)
self.dropdowns[name] = DropDownToggler(client, self.name, self._dropdownconfig[name])
if self._single:
for n, d in self.dropdowns.items():
if n != name:
d.hide()
if name in self._to_hide:
self.dropdowns[name].hide()
self._to_hide.remove(name)
if len(self.dropdowns) == 1:
hook.subscribe.client_killed(self.on_client_killed)
hook.subscribe.float_change(self.on_float_change)
def on_client_killed(self, client, *args, **kwargs):
"""
hook method which is called if a client is killed.
If the associated window is killed, reset internal state.
"""
name = None
for name, dd in self.dropdowns.items():
if dd.window is client:
dd.unsubscribe()
del self.dropdowns[name]
break
self._check_unsubscribe()
def on_float_change(self, *args, **kwargs):
"""
hook method which is called if window float state is changed.
If the current associated window is not floated (any more) the window
and process is detached from DRopDown, thus the next call to Show
will spawn a new process.
"""
name = None
for name, dd in self.dropdowns.items():
if not dd.window.floating:
if dd.window.group is not self:
dd.unsubscribe()
del self.dropdowns[name]
break
self._check_unsubscribe()
def cmd_dropdown_toggle(self, name):
"""
Toggle visibility of named DropDown.
"""
if self._single:
for n, d in self.dropdowns.items():
if n != name:
d.hide()
if name in self.dropdowns:
self.dropdowns[name].toggle()
else:
if name in self._dropdownconfig:
self._spawn(self._dropdownconfig[name])
def cmd_hide_all(self):
"""
Hide all scratchpads.
"""
for d in self.dropdowns.values():
d.hide()
def cmd_dropdown_reconfigure(self, name, **kwargs):
"""
reconfigure the named DropDown configuration.
Note that changed attributes only have an effect on spawning the window.
"""
if name not in self._dropdownconfig:
return
dd = self._dropdownconfig[name]
for attr, value in kwargs.items():
if hasattr(dd, attr):
setattr(dd, attr, value)
def cmd_dropdown_info(self, name=None):
"""
Get information on configured or currently active DropDowns.
If name is None, a list of all dropdown names is returned.
"""
if name is None:
return {"dropdowns": [ddname for ddname in self._dropdownconfig]}
elif name in self.dropdowns:
return self.dropdowns[name].info()
elif name in self._dropdownconfig:
return self._dropdownconfig[name].info()
else:
raise ValueError('No DropDown named "%s".' % name)
def get_state(self):
"""
Get the state of existing dropdown windows. Used for restoring state across
Qtile restarts (`restart` == True) or config reloads (`restart` == False).
"""
state = []
for name, dd in self.dropdowns.items():
client_wid = dd.window.wid
state.append((name, client_wid, dd.visible))
return state
def restore_state(self, state, restart: bool) -> list[int]:
"""
Restore the state of existing dropdown windows. Used for restoring state across
Qtile restarts (`restart` == True) or config reloads (`restart` == False).
"""
orphans = []
for name, wid, visible in state:
if name in self._dropdownconfig:
if restart:
self._spawned[name] = Match(wid=wid)
if not visible:
self._to_hide.append(name)
else:
# We are reloading the config; manage the clients now
self.dropdowns[name] = DropDownToggler(
self.qtile.windows_map[wid],
self.name,
self._dropdownconfig[name],
)
if not visible:
self.dropdowns[name].hide()
else:
orphans.append(wid)
if self._spawned:
# Handle re-managed clients after restarting
assert restart
hook.subscribe.client_new(self.on_client_new)
if not restart and self.dropdowns:
# We're only reloading so don't have these hooked via self.on_client_new
hook.subscribe.client_killed(self.on_client_killed)
hook.subscribe.float_change(self.on_float_change)
return orphans
| {
"content_hash": "694c538b73a7a6b0f48743551fb07f3b",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 97,
"avg_line_length": 37.70542635658915,
"alnum_prop": 0.5868969298245614,
"repo_name": "ramnes/qtile",
"id": "cfaa7c4ed24cf48e59b6a0538efffb03101aaac1",
"size": "15686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libqtile/scratchpad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "535"
},
{
"name": "Python",
"bytes": "2135461"
},
{
"name": "Shell",
"bytes": "8090"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('versiones', '0005_auto_20160329_0010'),
]
operations = [
migrations.AddField(
model_name='modulo',
name='proyecto',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='versiones.Proyecto'),
),
]
| {
"content_hash": "4583779883db5fa91caf9d53b6a1ba89",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 117,
"avg_line_length": 25.263157894736842,
"alnum_prop": 0.6395833333333333,
"repo_name": "famachot/controlVersionesAPI",
"id": "3e127d282f069fa151d7d8deefddf41efe22a627",
"size": "552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/versiones/migrations/0006_modulo_proyecto.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "54081"
},
{
"name": "HTML",
"bytes": "46207"
},
{
"name": "JavaScript",
"bytes": "96738"
},
{
"name": "Python",
"bytes": "38254"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import sys
PY2 = sys.version_info.major == 2
import atexit
from collections import defaultdict
try:
from cPickle import dump
except ImportError:
from pickle import dump
import datetime
import os
if PY2:
from itertools import izip_longest as zip_longest
else:
from itertools import zip_longest
import socket
import sys
import time
import traceback
import warnings
import __main__
import yaml
from simdb.db import DataLoader
STRIP_FROM_ENV = ['PS1']
# Customize YAML serialization
import numpy as np
def ndarray_representer(dumper, data):
data = data.tolist()
if isinstance(data, list):
return dumper.represent_list(data)
else:
return dumper.represent_float(data)
def numpy_scalar_representer(dumper, data):
return dumper.represent_float(float(data))
def unicode_representer(dumper, data):
if '\n' in data:
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
else:
return dumper.represent_unicode(data)
def str_representer(dumper, data):
if '\n' in data:
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
else:
return dumper.represent_str(data)
yaml.add_representer(np.ndarray, ndarray_representer)
yaml.add_representer(np.float64, numpy_scalar_representer)
if PY2:
yaml.add_representer(unicode, unicode_representer)
yaml.add_representer(str, str_representer)
_db_path = None
_run = None
_current_dataset = None
_current_dataset_data = None
_current_dataset_start_times = None
_current_dataset_stop_times = None
_current_dataset_auto_flush = False
def _initialize():
global _run, _db_path
try:
db_path = os.environ['SIMDB_PATH']
except KeyError:
raise RuntimeError('SIMDB_PATH is not set!')
try:
git_repos = os.environ['SIMDB_GIT_REPOS']
except KeyError:
git_repos = ''
warnings.warn('No git repositories specified via SIMDB_GIT_REPOS')
if git_repos:
git_repos = git_repos.split(':')
git_info = {p.rstrip('/').split('/')[-1]: _git_info(p) for p in git_repos}
else:
git_info = None
try:
script = os.path.abspath(__main__.__file__)
except AttributeError:
script = 'from console'
try:
with open(__main__.__file__, 'rb') as f:
script_content = f.read()
except AttributeError:
script_content = b''
argv = sys.argv
host = os.uname()
started = datetime.datetime.now()
if not os.path.exists(db_path):
os.mkdir(db_path)
if not os.path.exists(os.path.join(db_path, 'RUNS')):
os.mkdir(os.path.join(db_path, 'RUNS'))
uid = _make_uid(os.path.join(db_path, 'RUNS'))
os.mkdir(os.path.join(db_path, 'RUNS', uid))
env = {k: v for k, v in os.environ.items() if k not in STRIP_FROM_ENV}
with open(os.path.join(db_path, 'RUNS', uid, 'INFO'), 'wt') as f:
yaml.dump(dict(script=script,
argv=argv,
host=host,
git=git_info,
started=started,
environment=env),
f,
allow_unicode=True)
with open(os.path.join(db_path, 'RUNS', uid, 'SCRIPT'), 'wb') as f:
f.write(script_content)
_run = uid
_db_path = db_path
def new_dataset(experiment, auto_flush=False, **params):
global _current_dataset, _current_dataset_data, _current_dataset_start_times
global _current_dataset_stop_times, _current_dataset_auto_flush
assert ' ' not in experiment and '-' not in experiment
if not _run:
_initialize()
if _current_dataset:
_write_data(successful=True)
if not os.path.exists(os.path.join(_db_path, 'DATA')):
os.mkdir(os.path.join(_db_path, 'DATA'))
uid = _make_uid(os.path.join(_db_path, 'DATA'), experiment)
_current_dataset = os.path.join(_db_path, 'DATA', uid)
_current_dataset_data = {}
_current_dataset_start_times = defaultdict(list)
_current_dataset_stop_times = defaultdict(list)
_current_dataset_auto_flush = auto_flush
os.mkdir(_current_dataset)
with open(os.path.join(_current_dataset, 'INFO'), 'wt') as f:
yaml.dump({'experiment': experiment,
'started': datetime.datetime.now(),
'parameters': params,
'comment': '',
'tags': [],
'protected': False},
f)
os.symlink(os.path.join('..', '..', 'RUNS', _run),
os.path.join(_current_dataset, 'RUN'))
os.symlink(os.path.join('..', '..', 'DATA', uid),
os.path.join(_db_path, 'RUNS', _run, uid))
def add_values(**new_data):
_check_dataset_keys(new_data.keys())
_current_dataset_data.update({k: _to_list(v) for k, v in new_data.items()})
if _current_dataset_auto_flush:
flush()
def append_values(**new_data):
if not _current_dataset:
raise ValueError('no data set created')
data = _current_dataset_data
for k, v in new_data.items():
v = _to_list(v)
if k not in data:
data[k] = [v]
elif not isinstance(data[k], list):
data[k] = [data[k], v]
else:
data[k].append(v)
if _current_dataset_auto_flush:
flush()
def add_data(**new_data):
_check_dataset_keys(new_data.keys())
def dump_file(k, v):
filename = 'DATA.' + k + '.0'
with open(os.path.join(_current_dataset, filename), 'wb') as f:
dump(v, f, protocol=-1)
return filename
new_data = {k: DataLoader(dump_file(k, v)) for k, v in new_data.items()}
add_values(**new_data)
def append_data(**new_data):
if not _current_dataset:
raise ValueError('no data set created')
data = _current_dataset_data
def dump_file(k, v):
if k not in data:
count = 0
elif not isinstance(data[k], list):
count = 1
else:
count = len(data[k])
filename = 'DATA.' + k + '.' + str(count)
with open(os.path.join(_current_dataset, filename), 'wb') as f:
dump(v, f, protocol=-1)
return filename
new_data = {k: DataLoader(dump_file(k, v)) for k, v in new_data.items()}
append_values(**new_data)
def add_start_times(*keys):
now = time.time()
if not _current_dataset:
raise ValueError('no data set created')
for key in keys:
start = _current_dataset_start_times[key]
stop = _current_dataset_stop_times[key]
assert len(stop) <= len(start) <= len(stop) + 1
if len(start) > len(stop):
raise ValueError('timer for {} already started'.format(key))
start.append(now)
def add_stop_times(*keys):
now = time.time()
if not _current_dataset:
raise ValueError('no data set created')
for key in keys:
start = _current_dataset_start_times[key]
stop = _current_dataset_stop_times[key]
assert len(stop) <= len(start) <= len(stop) + 1
if len(start) == len(stop):
raise ValueError('timer for {} not started'.format(key))
stop.append(now)
def flush():
if not _current_dataset:
raise ValueError('no data set created')
_write_data(False)
def _write_data(successful):
assert _current_dataset
def process_data(v):
if isinstance(v, list):
a = np.array(v)
if not a.dtype == np.object:
return a
return v
data = {k: process_data(v) for k, v in _current_dataset_data.items()}
with open(os.path.join(_current_dataset, 'DATA'), 'wb') as f:
dump(data, f, protocol=-1)
def get_metadata(v):
if isinstance(v, np.ndarray):
return {'type': 'numpy.ndarray',
'shape': list(v.shape),
'dtype': str(v.dtype)}
elif isinstance(v, list):
info = {'len': len(v),
'total_elements': 0,
'max_depth': 0,
'max_len': len(v),
'element_types': set()}
def process_list(l, depth):
for x in l:
if isinstance(x, list):
info['max_len'] = max(info['max_len'], len(x))
info['max_depth'] = max(info['max_depth'], depth + 1)
process_list(x, depth+1)
else:
info['total_elements'] += 1
info['element_types'].add(type(x).__name__)
process_list(v, 0)
info['element_types'] = sorted(info['element_types'])
if info['max_depth']:
info['type'] = 'list of lists'
return info
else:
return {'type': 'list',
'len': len(v),
'element_types': info['element_types']}
else:
return type(v).__name__
with open(os.path.join(_current_dataset, 'INDEX'), 'wt') as f:
yaml.dump({k: get_metadata(v) for k, v in _current_dataset_data.items()}, f)
durations = {k: [stop - start for start, stop in zip_longest(v, _current_dataset_stop_times[k], fillvalue=0)]
for k, v in _current_dataset_start_times.items()}
with open(os.path.join(_current_dataset, 'TIMES'), 'wt') as f:
yaml.dump({'start': dict(_current_dataset_start_times),
'stop': dict(_current_dataset_stop_times),
'duration': durations},
f)
if successful:
with open(os.path.join(_current_dataset, 'FINISHED'), 'wt') as f:
yaml.dump(datetime.datetime.now(), f)
def _check_dataset_keys(new_keys):
if not _current_dataset:
raise ValueError('no data set created')
duplicate_keys = set(_current_dataset_data.keys()).intersection(new_keys)
if duplicate_keys:
raise ValueError('Keys {} already exist in data set'.format(duplicate_keys))
def _make_uid(path, prefix=''):
if prefix:
prefix = prefix + '-'
d = datetime.datetime.now()
while True:
uid = prefix + datetime.datetime.now().isoformat().replace(':', '-') + '-' + socket.gethostname()
# some filesystems do not allow ':' in filenames ..
if os.path.lexists(os.path.join(path, uid)):
d = d + datetime.timedelta(microseconds=1)
else:
break
return uid
def _git_info(path):
from sh import git
git = git.bake('--no-pager')
rev_parse = getattr(git, 'rev-parse')
R = {'branch': rev_parse('--abbrev-ref', 'HEAD', _cwd=path).strip(),
'revision': rev_parse('HEAD', _cwd=path).strip(),
'untracked': getattr(git, 'ls-files')('--others', '--exclude-standard', '--directory', _cwd=path).strip(),
'status': git.status('-s', _cwd=path).strip(),
'diff': git.diff(_cwd=path).strip()}
R['clean'] = len(R['diff']) == 0
return R
def _to_list(v):
if isinstance(v, np.ndarray):
return v.tolist()
elif isinstance(v, (tuple, list)):
return [_to_list(x) for x in v]
else:
return v
def _excepthook(exc_type, exc_val, exc_tb):
global _run
if _run:
finished = datetime.datetime.now()
def dump_failed(filename):
with open(filename, 'wt') as f:
yaml.dump({'time': finished,
'why': repr(exc_val),
'traceback': traceback.extract_tb(exc_tb)},
f)
if _current_dataset:
_write_data(successful=False)
dump_failed(os.path.join(_current_dataset, 'FAILED'))
dump_failed(os.path.join(_db_path, 'RUNS', _run, 'FAILED'))
_run = None
_saved_excepthook(exc_type, exc_val, exc_tb)
_saved_excepthook, sys.excepthook = sys.excepthook, _excepthook
@atexit.register
def _exit_hook():
if _run:
finished = datetime.datetime.now()
if _current_dataset:
_write_data(successful=True)
with open(os.path.join(_current_dataset, 'FINISHED'), 'wt') as f:
yaml.dump(finished, f)
with open(os.path.join(_db_path, 'RUNS', _run, 'FINISHED'), 'wt') as f:
yaml.dump(finished, f)
| {
"content_hash": "fc877c5ee928436ab6d55cc2a1e9bcb1",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 115,
"avg_line_length": 31.559796437659035,
"alnum_prop": 0.5638152059985487,
"repo_name": "sdrave/simdb",
"id": "34c2529b89344505afac060b3ec76a20b5308c1e",
"size": "13769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simdb/run.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "29122"
}
],
"symlink_target": ""
} |
import os.path as op
from nose.tools import assert_true
from mne import read_dip
from mne.datasets import sample
data_path = sample.data_path(download=False)
dip_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_set1.dip')
@sample.requires_sample_data
def test_io_dip():
"""Test IO for .dip files
"""
time, pos, amplitude, ori, gof = read_dip(dip_fname)
assert_true(pos.shape[1] == 3)
assert_true(ori.shape[1] == 3)
assert_true(len(time) == len(pos))
assert_true(len(time) == gof.size)
assert_true(len(time) == amplitude.size)
| {
"content_hash": "f46d46881640232cf336f5ef223123a4",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 56,
"avg_line_length": 26.954545454545453,
"alnum_prop": 0.6475548060708263,
"repo_name": "jaeilepp/eggie",
"id": "d5314f496694d1a02a8c149a17f363a33d30c4a0",
"size": "593",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mne/tests/test_dipole.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3357472"
}
],
"symlink_target": ""
} |
from enum import Enum
from azure.core import CaseInsensitiveEnumMeta
class RoleAssignmentApprovalActorIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The identity type : user/servicePrincipal."""
USER = "user"
SERVICE_PRINCIPAL = "servicePrincipal"
class RoleAssignmentApprovalStepReviewResult(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The decision on the approval stage. This value is initially set to NotReviewed. Approvers can
take action of Approve/Deny.
"""
APPROVE = "Approve"
DENY = "Deny"
NOT_REVIEWED = "NotReviewed"
class RoleAssignmentApprovalStepStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""This read-only field specifies the status of an approval."""
NOT_STARTED = "NotStarted"
IN_PROGRESS = "InProgress"
COMPLETED = "Completed"
EXPIRED = "Expired"
INITIALIZING = "Initializing"
ESCALATING = "Escalating"
COMPLETING = "Completing"
ESCALATED = "Escalated"
| {
"content_hash": "a819a398b077ab9f37c5bdbbf3efac7c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 100,
"avg_line_length": 30.75,
"alnum_prop": 0.7327235772357723,
"repo_name": "Azure/azure-sdk-for-python",
"id": "74ba291f3bcca238da835c0e72383cce698e955d",
"size": "1452",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2021_01_01_preview/models/_authorization_management_client_enums.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from . import Check, CheckStatus, UnexpectedFailure
import socket
class PortOpenCheck(Check):
def __init__(self, payload, name=None, **kwargs):
super(PortOpenCheck, self).__init__(specification='PortOpenCheck(host={h}, port={p})'.format(
h=payload['hostname'],
p=payload['port']
),
payload=payload,
name=name,
**kwargs)
@Check.expected_failure_possible
def perform(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if sock.connect_ex((self._payload['hostname'], int(self._payload['port']))) == 0:
self._status = CheckStatus.SUCCESS
self.message = 'Port {p} on {h} is open'.format(h=self._payload['hostname'], p=self._payload['port'])
else:
self._status = CheckStatus.FAILED
self.message = 'Port {p} on {h} is closed'.format(h=self._payload['hostname'], p=self._payload['port'])
except socket.gaierror as e:
self._status = CheckStatus.FAILED
self.message = e.strerror
raise UnexpectedFailure
available_checks = {
'port_open': PortOpenCheck
}
| {
"content_hash": "3685d5d3a281ea9076868b76bd0d1600",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 119,
"avg_line_length": 36.515151515151516,
"alnum_prop": 0.5800829875518673,
"repo_name": "dopsi/sentry",
"id": "5f41605e63aa3ddd8d63baff70b6013bd26f10e6",
"size": "1205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentry/modules/port_open.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1543"
},
{
"name": "HTML",
"bytes": "1533"
},
{
"name": "Python",
"bytes": "19916"
}
],
"symlink_target": ""
} |
"""
Load centrality.
"""
# Copyright (C) 2004-2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
__author__ = "\n".join(['Aric Hagberg ([email protected])',
'Pieter Swart ([email protected])',
'Sasha Gutfraind ([email protected])'])
__all__ = ['load_centrality',
'edge_load']
import heapq
import networkx as nx
def newman_betweenness_centrality(G,v=None,cutoff=None,
normalized=True,
weighted_edges=False):
"""Compute load centrality for nodes.
The load centrality of a node is the fraction of all shortest
paths that pass through that node.
Parameters
----------
G : graph
A networkx graph
normalized : bool, optional
If True the betweenness values are normalized by b=b/(n-1)(n-2) where
n is the number of nodes in G.
weighted_edges : bool, optional
Consider the edge weights in determining the shortest paths.
If False, all edge weights are considered equal.
cutoff : bool, optional
If specified, only consider paths of length <= cutoff.
Returns
-------
nodes : dictionary
Dictionary of nodes with centrality as the value.
See Also
--------
betweenness_centrality()
Notes
-----
Load centrality is slightly different than betweenness.
For this load algorithm see the reference
Scientific collaboration networks: II.
Shortest paths, weighted networks, and centrality,
M. E. J. Newman, Phys. Rev. E 64, 016132 (2001).
"""
if v is not None: # only one node
betweenness=0.0
for source in G:
ubetween=_node_betweenness(G,source,
cutoff=cutoff,
normalized=normalized,
weighted_edges=weighted_edges)
betweenness+=ubetween[v]
return betweenness
else:
betweenness={}.fromkeys(G,0.0)
for source in betweenness:
ubetween=_node_betweenness(G,source,
cutoff=cutoff,
normalized=False,
weighted_edges=weighted_edges)
for vk in ubetween:
betweenness[vk]+=ubetween[vk]
if normalized:
order=len(betweenness)
if order <=2:
return betweenness # no normalization b=0 for all nodes
scale=1.0/((order-1)*(order-2))
for v in betweenness:
betweenness[v] *= scale
return betweenness # all nodes
def _node_betweenness(G,source,cutoff=False,normalized=True,weighted_edges=False):
"""Node betweenness helper:
see betweenness_centrality for what you probably want.
This actually computes "load" and not betweenness.
See https://networkx.lanl.gov/ticket/103
This calculates the load of each node for paths from a single source.
(The fraction of number of shortests paths from source that go
through each node.)
To get the load for a node you need to do all-pairs shortest paths.
If weighted_edges is True then use Dijkstra for finding shortest paths.
In this case a cutoff is not implemented and so is ignored.
"""
# get the predecessor and path length data
if weighted_edges:
(pred,length)=nx.dijkstra_predecessor_and_distance(G,source)
else:
(pred,length)=nx.predecessor(G,source,cutoff=cutoff,return_seen=True)
# order the nodes by path length
onodes = [ (l,vert) for (vert,l) in length.items() ]
onodes.sort()
onodes[:] = [vert for (l,vert) in onodes if l>0]
# intialize betweenness
between={}.fromkeys(length,1.0)
while onodes:
v=onodes.pop()
if v in pred:
num_paths=len(pred[v]) # Discount betweenness if more than
for x in pred[v]: # one shortest path.
if x==source: # stop if hit source because all remaining v
break # also have pred[v]==[source]
between[x]+=between[v]/float(num_paths)
# remove source
for v in between:
between[v]-=1
# rescale to be between 0 and 1
if normalized:
l=len(between)
if l > 2:
scale=1.0/float((l-1)*(l-2)) # 1/the number of possible paths
for v in between:
between[v] *= scale
return between
load_centrality=newman_betweenness_centrality
def edge_load(G,nodes=None,cutoff=False):
"""Compute edge load.
WARNING:
This module is for demonstration and testing purposes.
"""
betweenness={}
if not nodes: # find betweenness for every node in graph
nodes=G.nodes() # that probably is what you want...
for source in nodes:
ubetween=_edge_betweenness(G,source,nodes,cutoff=cutoff)
for v in ubetween.keys():
b=betweenness.setdefault(v,0) # get or set default
betweenness[v]=ubetween[v]+b # cumulative total
return betweenness
def _edge_betweenness(G,source,nodes,cutoff=False):
"""
Edge betweenness helper.
"""
between={}
# get the predecessor data
#(pred,length)=_fast_predecessor(G,source,cutoff=cutoff)
(pred,length)=nx.predecessor(G,source,cutoff=cutoff,return_seen=True)
# order the nodes by path length
onodes = [ nn for dd,nn in sorted( (dist,n) for n,dist in length.items() )]
# intialize betweenness, doesn't account for any edge weights
for u,v in G.edges(nodes):
between[(u,v)]=1.0
between[(v,u)]=1.0
while onodes: # work through all paths
v=onodes.pop()
if v in pred:
num_paths=len(pred[v]) # Discount betweenness if more than
for w in pred[v]: # one shortest path.
if w in pred:
num_paths=len(pred[w]) # Discount betweenness, mult path
for x in pred[w]:
between[(w,x)]+=between[(v,w)]/num_paths
between[(x,w)]+=between[(w,v)]/num_paths
return between
| {
"content_hash": "ae2c17a57e6514c859687e471891d4cc",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 82,
"avg_line_length": 33.276041666666664,
"alnum_prop": 0.580059477226483,
"repo_name": "rainest/dance-partner-matching",
"id": "a317e399fddad76a5a899086fd9f93b5c43113f2",
"size": "6389",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "networkx/algorithms/centrality/load.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1745363"
},
{
"name": "Shell",
"bytes": "348"
}
],
"symlink_target": ""
} |
import os
import sys
from optparse import OptionParser
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs
# Do not write pyc/pyo files
sys.dont_write_bytecode = True
# Import from 'dist' directory
curdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "dist")
sys.path.append(curdir)
import onetimepass as otp
__author__ = "Mark Embling <[email protected]>"
__version__ = "0.1"
__license__ = "BSD (3-clause)"
def _get_data(options, args):
'''Gets the raw secret data from either stdin or args'''
data = ""
if options.stdin:
# Read first line from stdin
data = sys.stdin.readline()
else:
if len(args) != 1:
parser.error("no secret provided")
data = args[0]
return data
def _get_secret(input):
'''Attempts to get the secret from the given string - either the whole
string itself, or the secret from within an otpauth:// URI.
'''
input = input.strip()
if input.startswith("otpauth://"):
u = urlparse(input)
q = parse_qs(u.query)
return (q["secret"][0], u.hostname)
return (input, None)
if __name__ == "__main__":
parser = OptionParser()
parser.description = "Prints the TOTP auth code for the given secret. Can be either the raw secret string or a otpauth:// URI; the script will attempt to auto-detect which is given."
parser.usage = "%prog [options] secret OR %prog [options] --stdin < secret.txt"
parser.epilog = "Copyright (c) Mark Embling 2013"
parser.add_option("--stdin", dest="stdin",
action="store_true", default=False,
help="Read the secret (raw secret or otpauth:// URI) from stdin [default: %default]")
parser.add_option("--type", dest="type",
choices=["TOTP", "HOTP"], default="TOTP",
help="Token type (HOTP or TOTP). If a URI is provided, the type will be determined from there. [default: %default]")
parser.add_option("--count", dest="count",
type="int", default=1,
help="Counter for HOTP [default: %default]")
# parser.add_option("-d", "--digits", dest="digits",
# choices=['6','8'], default='6',
# help="Number of digits to display (6 or 8) [default: %default]")
(options, args) = parser.parse_args()
# Get the secret and type
data = _get_data(options, args)
(secret, type_) = _get_secret(data)
if type_ is None:
type_ = options.type
# Get the token and print
if type_.upper() == "HOTP":
print(otp.get_hotp(secret, intervals_no=options.count, as_string=True))
else:
print(otp.get_totp(secret, as_string=True))
| {
"content_hash": "583a2dcd5d42facec6caf785730c87ed",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 186,
"avg_line_length": 35.8974358974359,
"alnum_prop": 0.6039285714285715,
"repo_name": "markembling/totp-tool",
"id": "3b9a8f82ca8d6564d82058166f5f596039528293",
"size": "2823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "totp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "30001"
}
],
"symlink_target": ""
} |
import json
import logging
from typing import Any, Dict, List, Optional, Union
from urllib import request
from urllib.error import URLError
from celery.utils.log import get_task_logger
from sqlalchemy import and_, func
from superset import app, db
from superset.extensions import celery_app
from superset.models.core import Log
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.models.tags import Tag, TaggedObject
from superset.utils.date_parser import parse_human_datetime
from superset.views.utils import build_extra_filters
logger = get_task_logger(__name__)
logger.setLevel(logging.INFO)
def get_form_data(
chart_id: int, dashboard: Optional[Dashboard] = None
) -> Dict[str, Any]:
"""
Build `form_data` for chart GET request from dashboard's `default_filters`.
When a dashboard has `default_filters` they need to be added as extra
filters in the GET request for charts.
"""
form_data: Dict[str, Any] = {"slice_id": chart_id}
if dashboard is None or not dashboard.json_metadata:
return form_data
json_metadata = json.loads(dashboard.json_metadata)
default_filters = json.loads(json_metadata.get("default_filters", "null"))
if not default_filters:
return form_data
filter_scopes = json_metadata.get("filter_scopes", {})
layout = json.loads(dashboard.position_json or "{}")
if (
isinstance(layout, dict)
and isinstance(filter_scopes, dict)
and isinstance(default_filters, dict)
):
extra_filters = build_extra_filters(
layout, filter_scopes, default_filters, chart_id
)
if extra_filters:
form_data["extra_filters"] = extra_filters
return form_data
def get_url(chart: Slice, extra_filters: Optional[Dict[str, Any]] = None) -> str:
"""Return external URL for warming up a given chart/table cache."""
with app.test_request_context():
baseurl = (
"{SUPERSET_WEBSERVER_PROTOCOL}://"
"{SUPERSET_WEBSERVER_ADDRESS}:"
"{SUPERSET_WEBSERVER_PORT}".format(**app.config)
)
return f"{baseurl}{chart.get_explore_url(overrides=extra_filters)}"
class Strategy:
"""
A cache warm up strategy.
Each strategy defines a `get_urls` method that returns a list of URLs to
be fetched from the `/superset/warm_up_cache/` endpoint.
Strategies can be configured in `superset/config.py`:
CELERYBEAT_SCHEDULE = {
'cache-warmup-hourly': {
'task': 'cache-warmup',
'schedule': crontab(minute=1, hour='*'), # @hourly
'kwargs': {
'strategy_name': 'top_n_dashboards',
'top_n': 10,
'since': '7 days ago',
},
},
}
"""
def __init__(self) -> None:
pass
def get_urls(self) -> List[str]:
raise NotImplementedError("Subclasses must implement get_urls!")
class DummyStrategy(Strategy):
"""
Warm up all charts.
This is a dummy strategy that will fetch all charts. Can be configured by:
CELERYBEAT_SCHEDULE = {
'cache-warmup-hourly': {
'task': 'cache-warmup',
'schedule': crontab(minute=1, hour='*'), # @hourly
'kwargs': {'strategy_name': 'dummy'},
},
}
"""
name = "dummy"
def get_urls(self) -> List[str]:
session = db.create_scoped_session()
charts = session.query(Slice).all()
return [get_url(chart) for chart in charts]
class TopNDashboardsStrategy(Strategy):
"""
Warm up charts in the top-n dashboards.
CELERYBEAT_SCHEDULE = {
'cache-warmup-hourly': {
'task': 'cache-warmup',
'schedule': crontab(minute=1, hour='*'), # @hourly
'kwargs': {
'strategy_name': 'top_n_dashboards',
'top_n': 5,
'since': '7 days ago',
},
},
}
"""
name = "top_n_dashboards"
def __init__(self, top_n: int = 5, since: str = "7 days ago") -> None:
super(TopNDashboardsStrategy, self).__init__()
self.top_n = top_n
self.since = parse_human_datetime(since) if since else None
def get_urls(self) -> List[str]:
urls = []
session = db.create_scoped_session()
records = (
session.query(Log.dashboard_id, func.count(Log.dashboard_id))
.filter(and_(Log.dashboard_id.isnot(None), Log.dttm >= self.since))
.group_by(Log.dashboard_id)
.order_by(func.count(Log.dashboard_id).desc())
.limit(self.top_n)
.all()
)
dash_ids = [record.dashboard_id for record in records]
dashboards = session.query(Dashboard).filter(Dashboard.id.in_(dash_ids)).all()
for dashboard in dashboards:
for chart in dashboard.slices:
form_data_with_filters = get_form_data(chart.id, dashboard)
urls.append(get_url(chart, form_data_with_filters))
return urls
class DashboardTagsStrategy(Strategy):
"""
Warm up charts in dashboards with custom tags.
CELERYBEAT_SCHEDULE = {
'cache-warmup-hourly': {
'task': 'cache-warmup',
'schedule': crontab(minute=1, hour='*'), # @hourly
'kwargs': {
'strategy_name': 'dashboard_tags',
'tags': ['core', 'warmup'],
},
},
}
"""
name = "dashboard_tags"
def __init__(self, tags: Optional[List[str]] = None) -> None:
super(DashboardTagsStrategy, self).__init__()
self.tags = tags or []
def get_urls(self) -> List[str]:
urls = []
session = db.create_scoped_session()
tags = session.query(Tag).filter(Tag.name.in_(self.tags)).all()
tag_ids = [tag.id for tag in tags]
# add dashboards that are tagged
tagged_objects = (
session.query(TaggedObject)
.filter(
and_(
TaggedObject.object_type == "dashboard",
TaggedObject.tag_id.in_(tag_ids),
)
)
.all()
)
dash_ids = [tagged_object.object_id for tagged_object in tagged_objects]
tagged_dashboards = session.query(Dashboard).filter(Dashboard.id.in_(dash_ids))
for dashboard in tagged_dashboards:
for chart in dashboard.slices:
urls.append(get_url(chart))
# add charts that are tagged
tagged_objects = (
session.query(TaggedObject)
.filter(
and_(
TaggedObject.object_type == "chart",
TaggedObject.tag_id.in_(tag_ids),
)
)
.all()
)
chart_ids = [tagged_object.object_id for tagged_object in tagged_objects]
tagged_charts = session.query(Slice).filter(Slice.id.in_(chart_ids))
for chart in tagged_charts:
urls.append(get_url(chart))
return urls
strategies = [DummyStrategy, TopNDashboardsStrategy, DashboardTagsStrategy]
@celery_app.task(name="cache-warmup")
def cache_warmup(
strategy_name: str, *args: Any, **kwargs: Any
) -> Union[Dict[str, List[str]], str]:
"""
Warm up cache.
This task periodically hits charts to warm up the cache.
"""
logger.info("Loading strategy")
class_ = None
for class_ in strategies:
if class_.name == strategy_name: # type: ignore
break
else:
message = f"No strategy {strategy_name} found!"
logger.error(message, exc_info=True)
return message
logger.info("Loading %s", class_.__name__)
try:
strategy = class_(*args, **kwargs)
logger.info("Success!")
except TypeError:
message = "Error loading strategy!"
logger.exception(message)
return message
results: Dict[str, List[str]] = {"success": [], "errors": []}
for url in strategy.get_urls():
try:
logger.info("Fetching %s", url)
request.urlopen(url)
results["success"].append(url)
except URLError:
logger.exception("Error warming up cache!")
results["errors"].append(url)
return results
| {
"content_hash": "511d6014ae34482be3efc9748d0a6c2f",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 87,
"avg_line_length": 30.519713261648747,
"alnum_prop": 0.5699354081033471,
"repo_name": "mistercrunch/panoramix",
"id": "546eaebdb056581d14db13c0604b60b7e4cbb4db",
"size": "9342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "superset/tasks/cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "46750"
},
{
"name": "HTML",
"bytes": "34140"
},
{
"name": "JavaScript",
"bytes": "81606"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "240195"
},
{
"name": "Shell",
"bytes": "213"
}
],
"symlink_target": ""
} |
class GeneralInquiryModel(object):
"""General Card Attributes Inquiry data object model.
:param str primaryAccountNumber: **Required**. Primary account number (PAN). 13-19 characters string.
**Request:**
.. code:: json
{
"primaryAccountNumber": "4465390000029077"
}
**Response:**
.. code:: json
{
"status": {
"statusCode": "CDI000",
"statusDescription": "Success"
},
"cardProductId": "K",
"cardProductName": "Visa Corporate T&E",
"cardProductSubtypeCode": "ON",
"cardProductSubtypeDescription": "Other Prepaid-Non Reloadable",
"cardTypeCode": "H",
"cardSubtypeCode": "R",
"cardPlatformCode": "CO",
"issuerName": "BANCO AGROMERCANTIL DE GUATEMALA S.A.",
"bin": "481507",
"countryCode": "332"
}
"""
ATTRS = [
'primaryAccountNumber'
]
def __init__(self, **kwargs):
for attr, value in kwargs.items():
if attr in self.ATTRS and value:
self.__setattr__(attr, value)
| {
"content_hash": "962662d5d9b1c34733385cbb85ce90b7",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 105,
"avg_line_length": 28.61904761904762,
"alnum_prop": 0.5099833610648918,
"repo_name": "ppokrovsky/pyvdp",
"id": "421b34213bfcbcd496687ba14bad3c45bfaa0e08",
"size": "1202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvdp/paai/generalattinq/cardattributes/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "266062"
}
],
"symlink_target": ""
} |
import unittest, rostest
import rosnode, rospy
import time
from pimouse_ros.msg import MotorFreqs
from geometry_msgs.msg import Twist
from std_srvs.srv import Trigger, TriggerResponse #追加
class MotorTest(unittest.TestCase):
def setUp(self): #このメソッドを追加
rospy.wait_for_service('/motor_on')
rospy.wait_for_service('/motor_off')
on = rospy.ServiceProxy('/motor_on', Trigger)
ret = on()
def file_check(self,dev,value,message):
with open("/dev/" + dev,"r") as f:
self.assertEqual(f.readline(),str(value)+"\n",message)
def test_node_exist(self):
nodes = rosnode.get_node_names()
self.assertIn('/motors', nodes, "node does not exist")
def test_put_freq(self):
pub = rospy.Publisher('/motor_raw', MotorFreqs)
m = MotorFreqs()
m.left_hz = 123
m.right_hz = 456
for i in range(10):
pub.publish(m)
time.sleep(0.1)
self.file_check("rtmotor_raw_l0",m.left_hz,"wrong left value from motor_raw")
self.file_check("rtmotor_raw_r0",m.right_hz,"wrong left value from motor_raw")
def test_put_cmd_vel(self):
pub = rospy.Publisher('/cmd_vel', Twist)
m = Twist()
m.linear.x = 0.1414
m.angular.z = 1.57
for i in range(10):
pub.publish(m)
time.sleep(0.1)
self.file_check("rtmotor_raw_l0",200,"wrong left value from cmd_vel")
self.file_check("rtmotor_raw_r0",600,"wrong right value from cmd_vel")
time.sleep(1.1)
self.file_check("rtmotor_raw_r0",0,"don't stop after 1[s]")
self.file_check("rtmotor_raw_l0",0,"don't stop after 1[s]")
def test_on_off(self): #このメソッドも追加
off = rospy.ServiceProxy('/motor_off', Trigger)
ret = off()
self.assertEqual(ret.success, True, "motor off does not succeeded")
self.assertEqual(ret.message, "OFF", "motor off wrong message")
with open("/dev/rtmotoren0","r") as f:
data = f.readline()
self.assertEqual(data,"0\n","wrong value in rtmotor0 at motor off")
on = rospy.ServiceProxy('/motor_on', Trigger)
ret = on()
self.assertEqual(ret.success, True, "motor on does not succeeded")
self.assertEqual(ret.message, "ON", "motor on wrong message")
with open("/dev/rtmotoren0","r") as f:
data = f.readline()
self.assertEqual(data,"1\n","wrong value in rtmotor0 at motor on")
if __name__ == '__main__':
rospy.init_node('travis_test_motors')
rostest.rosrun('pimouse_ros','travis_test_motors', MotorTest)
# Copyright 2016 Ryuichi Ueda
# Released under the BSD License.
# To make line numbers be identical with the book, this statement is written here. Don't move it to the header.
| {
"content_hash": "858ec343e8709c860078e7e16da3c446",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 111,
"avg_line_length": 38.58108108108108,
"alnum_prop": 0.603152364273205,
"repo_name": "Kageken/pimouse_ros",
"id": "baadf6d7f68827792714ff155655142500036fe1",
"size": "2933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/travis_test_motors2.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "6814"
},
{
"name": "Python",
"bytes": "27826"
},
{
"name": "Shell",
"bytes": "835"
}
],
"symlink_target": ""
} |
from tempest.api.compute import base
from tempest import config
from tempest import exceptions
from tempest import test
import time
CONF = config.CONF
class AttachInterfacesTestJSON(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(AttachInterfacesTestJSON, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException("Neutron is required")
if not CONF.compute_feature_enabled.interface_attach:
raise cls.skipException("Interface attachment is not available.")
@classmethod
def setup_credentials(cls):
# This test class requires network and subnet
cls.set_network_resources(network=True, subnet=True)
super(AttachInterfacesTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(AttachInterfacesTestJSON, cls).setup_clients()
cls.client = cls.os.interfaces_client
def _check_interface(self, iface, port_id=None, network_id=None,
fixed_ip=None, mac_addr=None):
self.assertIn('port_state', iface)
if port_id:
self.assertEqual(iface['port_id'], port_id)
if network_id:
self.assertEqual(iface['net_id'], network_id)
if fixed_ip:
self.assertEqual(iface['fixed_ips'][0]['ip_address'], fixed_ip)
if mac_addr:
self.assertEqual(iface['mac_addr'], mac_addr)
def _create_server_get_interfaces(self):
server = self.create_test_server(wait_until='ACTIVE')
resp, ifs = self.client.list_interfaces(server['id'])
self.assertEqual(200, resp.status)
resp, body = self.client.wait_for_interface_status(
server['id'], ifs[0]['port_id'], 'ACTIVE')
ifs[0]['port_state'] = body['port_state']
return server, ifs
def _test_create_interface(self, server):
resp, iface = self.client.create_interface(server['id'])
self.assertEqual(200, resp.status)
resp, iface = self.client.wait_for_interface_status(
server['id'], iface['port_id'], 'ACTIVE')
self._check_interface(iface)
return iface
def _test_create_interface_by_network_id(self, server, ifs):
network_id = ifs[0]['net_id']
resp, iface = self.client.create_interface(server['id'],
network_id=network_id)
self.assertEqual(200, resp.status)
resp, iface = self.client.wait_for_interface_status(
server['id'], iface['port_id'], 'ACTIVE')
self._check_interface(iface, network_id=network_id)
return iface
def _test_show_interface(self, server, ifs):
iface = ifs[0]
resp, _iface = self.client.show_interface(server['id'],
iface['port_id'])
self.assertEqual(200, resp.status)
self._check_interface(iface, port_id=_iface['port_id'],
network_id=_iface['net_id'],
fixed_ip=_iface['fixed_ips'][0]['ip_address'],
mac_addr=_iface['mac_addr'])
def _test_delete_interface(self, server, ifs):
# NOTE(danms): delete not the first or last, but one in the middle
iface = ifs[1]
resp, _ = self.client.delete_interface(server['id'], iface['port_id'])
self.assertEqual(202, resp.status)
_ifs = self.client.list_interfaces(server['id'])[1]
start = int(time.time())
while len(ifs) == len(_ifs):
time.sleep(self.build_interval)
_ifs = self.client.list_interfaces(server['id'])[1]
timed_out = int(time.time()) - start >= self.build_timeout
if len(ifs) == len(_ifs) and timed_out:
message = ('Failed to delete interface within '
'the required time: %s sec.' % self.build_timeout)
raise exceptions.TimeoutException(message)
self.assertNotIn(iface['port_id'], [i['port_id'] for i in _ifs])
return _ifs
def _compare_iface_list(self, list1, list2):
# NOTE(danms): port_state will likely have changed, so just
# confirm the port_ids are the same at least
list1 = [x['port_id'] for x in list1]
list2 = [x['port_id'] for x in list2]
self.assertEqual(sorted(list1), sorted(list2))
@test.attr(type='smoke')
@test.services('network')
def test_create_list_show_delete_interfaces(self):
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertTrue(interface_count > 0)
self._check_interface(ifs[0])
iface = self._test_create_interface(server)
ifs.append(iface)
iface = self._test_create_interface_by_network_id(server, ifs)
ifs.append(iface)
resp, _ifs = self.client.list_interfaces(server['id'])
self._compare_iface_list(ifs, _ifs)
self._test_show_interface(server, ifs)
_ifs = self._test_delete_interface(server, ifs)
self.assertEqual(len(ifs) - 1, len(_ifs))
@test.attr(type='smoke')
@test.services('network')
def test_add_remove_fixed_ip(self):
# Add and Remove the fixed IP to server.
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertTrue(interface_count > 0)
self._check_interface(ifs[0])
network_id = ifs[0]['net_id']
resp, body = self.client.add_fixed_ip(server['id'],
network_id)
self.assertEqual(202, resp.status)
# Remove the fixed IP from server.
server_detail = self.os.servers_client.get_server(
server['id'])
# Get the Fixed IP from server.
fixed_ip = None
for ip_set in server_detail['addresses']:
for ip in server_detail['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
fixed_ip = ip['addr']
break
if fixed_ip is not None:
break
resp, body = self.client.remove_fixed_ip(server['id'],
fixed_ip)
self.assertEqual(202, resp.status)
| {
"content_hash": "d304c3e3d3a005659ce049fbaa04bb6f",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 78,
"avg_line_length": 40.23566878980892,
"alnum_prop": 0.5822384043058414,
"repo_name": "Vaidyanath/tempest",
"id": "33995f30ae05d7e5df5f7d3d16b5d400dbe1165e",
"size": "6942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/compute/servers/test_attach_interfaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "695"
},
{
"name": "Python",
"bytes": "2788179"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
} |
import os
import shutil
import tempfile
import zipfile
from utils.file_util import get_files_by_re, gen_new_file_extension
def get_aar_files(proj_dir, des_dir):
rel_aar_dir = r"build\outputs\aar"
aar_dirs = [os.path.join(proj_dir, i) for i in os.listdir(proj_dir) if os.path.isdir(os.path.join(proj_dir, i))]
aar_dirs = [os.path.join(i, rel_aar_dir) for i in aar_dirs if os.path.exists(os.path.join(i, rel_aar_dir))]
for i in aar_dirs:
file = os.listdir(i)[0]
debug_aar = os.path.join(i, file)
print(debug_aar)
os.makedirs(des_dir, exist_ok=True)
shutil.copyfile(debug_aar, os.path.join(des_dir, file))
def using_local_aar(aar_dir):
# http://stackoverflow.com/a/24894387/1713757
# or you can just do it in android studio ui
s = 'configurations.maybeCreate("default")'
for i in os.listdir(aar_dir):
if i.endswith("aar"):
print("aar:", i)
t = "artifacts.add(\"default\", file('{}'))\n".format(i)
s += t
print(s)
build_script = os.path.join(aar_dir, "build.gradle")
open(build_script, mode='w', encoding='utf-8').write(s)
aar_module_name = os.path.basename(aar_dir)
print("add this to setting.gradle: ")
print("include ':{}'".format(aar_module_name))
print("\nadd this to mudule using aars: ")
print("compile project(':{}')".format(aar_module_name))
def extract_aar2jar(aar_dir):
aar_files = get_files_by_re(aar_dir, ".*aar")
for i in aar_files:
jar_name = gen_new_file_extension(i, "jar")
with zipfile.ZipFile(i, "r") as z:
temp_dir = tempfile.mkdtemp()
z.extract("classes.jar", temp_dir)
if os.path.exists(jar_name):
os.remove(jar_name)
shutil.move(os.path.join(temp_dir, "classes.jar"), jar_name)
| {
"content_hash": "5c3900ba812d851abfed9b7d8563b170",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 116,
"avg_line_length": 36.82,
"alnum_prop": 0.6121673003802282,
"repo_name": "gengjiawen/AndroidHelper",
"id": "e2673fded863a8226c1ee8b006bccf1afaff02da",
"size": "1841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aar_util/aar_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "FreeMarker",
"bytes": "7752"
},
{
"name": "HTML",
"bytes": "186"
},
{
"name": "Python",
"bytes": "14509"
}
],
"symlink_target": ""
} |
from concurrent.futures import ThreadPoolExecutor
from typing import List
from toolz import juxt
from itertools import repeat
from saapy.graphdb import Neo4jClient
class Neo4jAbstractQuery:
def __init__(self, labels=()):
self.labels = labels
def __call__(self, neo_client):
raise NotImplementedError()
class Neo4jImportQuery(Neo4jAbstractQuery):
def __init__(self,
query_params: List[dict] = (),
chunk_size: int = 1000, **kwargs):
self.query_params = query_params
self.chunk_size = chunk_size
super().__init__(**kwargs)
def __call__(self, neo4j_client: Neo4jClient):
result = neo4j_client.import_nodes(self.query_params,
labels=self.labels,
chunk_size=self.chunk_size)
return result
class Neo4jBatchQuery(Neo4jAbstractQuery):
def __init__(self, query_text: str,
query_params: List[dict]=None,
chunk_size: int = 1000, **kwargs):
if query_params is None:
query_params = []
self.query_text = query_text
self.query_params = query_params
self.chunk_size = chunk_size
super().__init__(**kwargs)
def __call__(self, neo4j_client: Neo4jClient):
result = neo4j_client.run_batch_query(self.query_text,
labels=self.labels,
params=self.query_params,
chunk_size=self.chunk_size)
return result
class Neo4jGenericQuery(Neo4jAbstractQuery):
def __init__(self, query_text: str, query_params: dict=None, **kwargs):
if query_params is None:
query_params = dict()
self.query_text = query_text
self.query_params = query_params
super().__init__(**kwargs)
def __call__(self, neo4j_client: Neo4jClient):
result = neo4j_client.run_query(self.query_text,
labels=self.labels,
params=self.query_params)
return result
class Neo4jQueryFactory:
def __init__(self, default_labels=()):
self.default_labels = default_labels
def query(self,
query_text: str,
labels=(),
query_params: dict=None,
**kwargs):
return Neo4jGenericQuery(query_text,
labels=self.default_labels + labels,
query_params=query_params,
**kwargs)
def import_query(self,
labels=(),
query_params: List[dict]=None,
chunk_size: int = 1000,
**kwargs):
return Neo4jImportQuery(labels=self.default_labels + labels,
query_params=query_params,
chunk_size=chunk_size,
**kwargs)
def batch_query(self,
query_text: str,
labels=(),
query_params: List[dict]=None,
chunk_size: int = 1000,
**kwargs):
return Neo4jBatchQuery(query_text,
labels=self.default_labels + labels,
query_params=query_params,
chunk_size=chunk_size,
**kwargs)
class Neo4jQueryInvoker:
def __init__(self, neo4j_client: Neo4jClient):
self.neo4j_client = neo4j_client
def run(self, *queries: Neo4jAbstractQuery):
results = juxt(*queries)(self.neo4j_client)
if len(results) > 1:
return results
elif len(results) == 1:
return results[0]
else:
return None
class Neo4jQueryAsyncInvoker(Neo4jQueryInvoker):
def __init__(self,
neo4j_client: Neo4jClient,
max_workers: int = None):
self.executor = ThreadPoolExecutor(max_workers=max_workers)
super().__init__(neo4j_client)
def run(self, *queries: Neo4jAbstractQuery):
fs = list(map(self.executor.submit,
queries,
repeat(self.neo4j_client)))
if len(fs) > 1:
return fs
elif len(fs) == 1:
return fs[0]
else:
return None
| {
"content_hash": "eb2e5995b02c9db371efdbb777adc90f",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 75,
"avg_line_length": 33.96992481203007,
"alnum_prop": 0.5061974324922532,
"repo_name": "ashapochka/saapy",
"id": "e96d139abf3453287d0d337ae32d1373d7c96a29",
"size": "4533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saapy/graphdb/neo4j_query.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "64263"
},
{
"name": "PLpgSQL",
"bytes": "35831"
},
{
"name": "Python",
"bytes": "1548547"
},
{
"name": "SQLPL",
"bytes": "30586"
}
],
"symlink_target": ""
} |
import pytest
CODEVERSION = '0.0.1'
NEW_CODEVERSION = '0.0.2'
@pytest.fixture()
def campaign():
from psiturk.models import Campaign
parameters = {
'codeversion': CODEVERSION,
'mode': 'sandbox',
'goal': 100,
'minutes_between_rounds': 1,
'assignments_per_round': 10,
'hit_reward': 1.00,
'hit_duration_hours': 1,
}
new_campaign = Campaign(**parameters)
from psiturk.db import db_session
db_session.add(new_campaign)
db_session.commit()
return new_campaign
def test_campaign_round_codeversion_change_cancel(patch_aws_services, campaign, mocker, caplog):
from psiturk.tasks import do_campaign_round
campaign_args = {
'campaign': campaign,
'job_id': campaign.campaign_job_id
}
from psiturk.experiment import app
mocker.patch.object(app.apscheduler,
'remove_job', lambda *args, **kwargs: True)
from psiturk.amt_services_wrapper import MTurkServicesWrapper
aws_services_wrapper = MTurkServicesWrapper()
aws_services_wrapper.config['Task Parameters']['experiment_code_version'] = NEW_CODEVERSION
import psiturk.tasks
mocker.patch.object(psiturk.tasks.TaskUtils, 'aws_services_wrapper', aws_services_wrapper)
import psiturk.experiment
remove_job_mock = mocker.patch.object(psiturk.experiment.app.apscheduler, 'remove_job')
do_campaign_round(**campaign_args)
remove_job_mock.assert_called()
def test_campaign_goal_met_cancel(patch_aws_services, campaign, mocker, caplog, stubber):
from psiturk.tasks import do_campaign_round
campaign_args = {
'campaign': campaign,
'job_id': campaign.campaign_job_id
}
from psiturk.experiment import app
mocker.patch.object(app.apscheduler,
'remove_job', lambda *args, **kwargs: True)
import psiturk.tasks
mocker.patch.object(psiturk.models.Participant, 'count_completed', lambda *args, **kwargs: campaign.goal)
import psiturk.experiment
remove_job_mock = mocker.patch.object(psiturk.experiment.app.apscheduler, 'remove_job')
do_campaign_round(**campaign_args)
remove_job_mock.assert_called()
assert not campaign.is_active
def test_campaign_posts_hits(patch_aws_services, stubber, campaign, mocker, caplog):
from psiturk.amt_services_wrapper import MTurkServicesWrapper
aws_services_wrapper = MTurkServicesWrapper()
import psiturk.tasks
mocker.patch.object(psiturk.tasks.TaskUtils, 'aws_services_wrapper', aws_services_wrapper)
mocked_create_hit = mocker.patch.object(aws_services_wrapper, 'create_hit')
campaign_args = {
'campaign': campaign,
'job_id': campaign.campaign_job_id
}
from psiturk.tasks import do_campaign_round
do_campaign_round(**campaign_args)
assert mocked_create_hit.call_count == 2
mocked_create_hit.assert_any_call(num_workers=9, reward=campaign.hit_reward, duration=campaign.hit_duration_hours)
mocked_create_hit.assert_any_call(num_workers=1, reward=campaign.hit_reward, duration=campaign.hit_duration_hours)
def test_task_approve_all(patch_aws_services, stubber, mocker, caplog):
from psiturk.amt_services_wrapper import MTurkServicesWrapper
aws_services_wrapper = MTurkServicesWrapper()
import psiturk.tasks
mocker.patch.object(psiturk.tasks.TaskUtils, 'aws_services_wrapper', aws_services_wrapper)
mocked_approve_all = mocker.patch.object(aws_services_wrapper, 'approve_all_assignments')
from psiturk.tasks import do_approve_all
do_approve_all('sandbox')
mocked_approve_all.assert_called_once()
| {
"content_hash": "d71ae409fde4d56686d81df9b01cde7b",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 118,
"avg_line_length": 33.7037037037037,
"alnum_prop": 0.7041208791208792,
"repo_name": "NYUCCL/psiTurk",
"id": "3e2750162578e0afa0a50c88dff6de04fcfe1bfb",
"size": "3640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1770"
},
{
"name": "HTML",
"bytes": "54741"
},
{
"name": "JavaScript",
"bytes": "47700"
},
{
"name": "Procfile",
"bytes": "25"
},
{
"name": "Python",
"bytes": "276143"
}
],
"symlink_target": ""
} |
from itertools import tee, chain, islice, groupby
from heapq import merge
def hamming_numbers():
# Generate "5-smooth" numbers, also called "Hamming numbers"
# or "Regular numbers". See: http://en.wikipedia.org/wiki/Regular_number
# Finds solutions to 2**i * 3**j * 5**k for some integers i, j, and k.
def deferred_output():
'Works like a forward reference to the "output" global variable'
for i in output:
yield i
result, p2, p3, p5 = tee(deferred_output(), 4) # split the output streams
m2 = (2*x for x in p2) # multiples of 2
m3 = (3*x for x in p3) # multiples of 3
m5 = (5*x for x in p5) # multiples of 5
merged = merge(m2, m3, m5)
combined = chain([1], merged) # prepend starting point
output = (k for k, v in groupby(combined)) # eliminate duplicates
return result
if __name__ == '__main__':
print(list(islice(hamming_numbers(), 1000))) # Show first 1000 hamming numbers
| {
"content_hash": "f800e849aa71837e5f0203e795047751",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 85,
"avg_line_length": 41.03846153846154,
"alnum_prop": 0.5773195876288659,
"repo_name": "ActiveState/code",
"id": "8ab9c9cc9bfcf96d22389c2db9078001e3357f97",
"size": "1067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/576961_Technique_for_cyclical_iteration/recipe-576961.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
import unittest
from mycroft.client.speech.hotword_factory import HotWordFactory
class PocketSphinxTest(unittest.TestCase):
def testDefault(self):
config = {
'hey mycroft': {
'module': 'pocketsphinx',
'phonemes': 'HH EY . M AY K R AO F T',
'threshold': 1e-90
}
}
p = HotWordFactory.create_hotword('hey mycroft', config)
config = config['hey mycroft']
self.assertEquals(config['phonemes'], p.phonemes)
self.assertEquals(config['threshold'], p.threshold)
def testInvalid(self):
config = {
'hey Zeds': {
'module': 'pocketsphinx',
'phonemes': 'ZZZZZZZZZ',
'threshold': 1e-90
}
}
p = HotWordFactory.create_hotword('hey Zeds', config)
self.assertEquals(p.phonemes, 'HH EY . M AY K R AO F T')
self.assertEquals(p.key_phrase, 'hey mycroft')
def testVictoria(self):
config = {
'hey victoria': {
'module': 'pocketsphinx',
'phonemes': 'HH EY . V IH K T AO R IY AH',
'threshold': 1e-90
}
}
p = HotWordFactory.create_hotword('hey victoria', config)
config = config['hey victoria']
self.assertEquals(config['phonemes'], p.phonemes)
self.assertEquals(p.key_phrase, 'hey victoria')
| {
"content_hash": "5f7b6fc596bcc4b84ce573923d35955b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 65,
"avg_line_length": 33.372093023255815,
"alnum_prop": 0.5358885017421603,
"repo_name": "linuxipho/mycroft-core",
"id": "e4063656f7e585a7edb2ddd1ea341bb570f5383a",
"size": "2015",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "test/unittests/client/test_hotword_factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1316535"
},
{
"name": "Shell",
"bytes": "69724"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class RetrieveToken(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the RetrieveToken Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(RetrieveToken, self).__init__(temboo_session, '/Library/Utilities/TokenStorage/RetrieveToken')
def new_input_set(self):
return RetrieveTokenInputSet()
def _make_result_set(self, result, path):
return RetrieveTokenResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RetrieveTokenChoreographyExecution(session, exec_id, path)
class RetrieveTokenInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the RetrieveToken
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_LockToken(self, value):
"""
Set the value of the LockToken input for this Choreo. ((optional, boolean) If set to true, the Choreo will attempt to lock the token after retrieving it. If the token is already locked, the Choreo will attempt to get the lock for up-to 1 minute.)
"""
super(RetrieveTokenInputSet, self)._set_input('LockToken', value)
def set_Name(self, value):
"""
Set the value of the Name input for this Choreo. ((required, string) The name of the token to retrieve.)
"""
super(RetrieveTokenInputSet, self)._set_input('Name', value)
class RetrieveTokenResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the RetrieveToken Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Token(self):
"""
Retrieve the value for the "Token" output from this Choreo execution. ((string) The token value. This will return an empty string if there is no token or if the token has expired.)
"""
return self._output.get('Token', None)
def get_Locked(self):
"""
Retrieve the value for the "Locked" output from this Choreo execution. ((boolean) Returns true or false depending on whether the token is locked or not.)
"""
return self._output.get('Locked', None)
def get_Valid(self):
"""
Retrieve the value for the "Valid" output from this Choreo execution. ((boolean) Returns true or false depending on whether the token is valid or not.)
"""
return self._output.get('Valid', None)
class RetrieveTokenChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RetrieveTokenResultSet(response, path)
| {
"content_hash": "8f5fef8cd6568a4567d48082d8d7f53d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 254,
"avg_line_length": 42.45070422535211,
"alnum_prop": 0.6950895819508959,
"repo_name": "jordanemedlock/psychtruths",
"id": "84ac11fe45f43e76b90ce7e24c0bd790a262dea2",
"size": "3861",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "temboo/Library/Utilities/TokenStorage/RetrieveToken.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
} |
"""Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
"""
import base64
import contextlib
import functools
import socket
import sys
import time
import traceback
import uuid
from cinderclient import exceptions as cinder_exception
import eventlet.event
from eventlet import greenthread
import eventlet.semaphore
import eventlet.timeout
from keystoneclient import exceptions as keystone_exception
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
import six
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import build_results
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova import consoleauth
import nova.context
from nova import exception
from nova import hooks
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova.image import glance
from nova import manager
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import loopingcall
from nova.openstack.common import periodic_task
from nova import paths
from nova import rpc
from nova import safe_utils
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import storage_users
from nova.virt import virtapi
from nova import volume
from nova.volume import encryptors
compute_opts = [
cfg.StrOpt('console_host',
default=socket.gethostname(),
help='Console proxy host to use to connect '
'to instances on this host.'),
cfg.StrOpt('default_access_ip_network_name',
help='Name of network to use to set access IPs for instances'),
cfg.BoolOpt('defer_iptables_apply',
default=False,
help='Whether to batch up the application of IPTables rules'
' during a host restart and apply all at the end of the'
' init phase'),
cfg.StrOpt('instances_path',
default=paths.state_path_def('instances'),
help='Where instances are stored on disk'),
cfg.BoolOpt('instance_usage_audit',
default=False,
help="Generate periodic compute.instance.exists"
" notifications"),
cfg.IntOpt('live_migration_retry_count',
default=30,
help="Number of 1 second retries needed in live_migration"),
cfg.BoolOpt('resume_guests_state_on_host_boot',
default=False,
help='Whether to start guests that were running before the '
'host rebooted'),
cfg.IntOpt('network_allocate_retries',
default=0,
help="Number of times to retry network allocation on failures"),
cfg.IntOpt('max_concurrent_builds',
default=10,
help='Maximum number of instance builds to run concurrently'),
cfg.IntOpt('block_device_allocate_retries',
default=60,
help='Number of times to retry block device'
' allocation on failures')
]
interval_opts = [
cfg.IntOpt('bandwidth_poll_interval',
default=600,
help='Interval to pull network bandwidth usage info. Not '
'supported on all hypervisors. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('sync_power_state_interval',
default=600,
help='Interval to sync power states between the database and '
'the hypervisor. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt("heal_instance_info_cache_interval",
default=60,
help="Number of seconds between instance info_cache self "
"healing updates"),
cfg.IntOpt('reclaim_instance_interval',
default=0,
help='Interval in seconds for reclaiming deleted instances'),
cfg.IntOpt('volume_usage_poll_interval',
default=0,
help='Interval in seconds for gathering volume usages'),
cfg.IntOpt('shelved_poll_interval',
default=3600,
help='Interval in seconds for polling shelved instances to '
'offload. Set to -1 to disable.'
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('shelved_offload_time',
default=0,
help='Time in seconds before a shelved instance is eligible '
'for removing from a host. -1 never offload, 0 offload '
'when shelved'),
cfg.IntOpt('instance_delete_interval',
default=300,
help='Interval in seconds for retrying failed instance file '
'deletes. Set to -1 to disable. '
'Setting this to 0 will run at the default rate.'),
cfg.IntOpt('block_device_allocate_retries_interval',
default=3,
help='Waiting time interval (seconds) between block'
' device allocation retries on failures')
]
timeout_opts = [
cfg.IntOpt("reboot_timeout",
default=0,
help="Automatically hard reboot an instance if it has been "
"stuck in a rebooting state longer than N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("instance_build_timeout",
default=0,
help="Amount of time in seconds an instance can be in BUILD "
"before going into ERROR status. "
"Set to 0 to disable."),
cfg.IntOpt("rescue_timeout",
default=0,
help="Automatically unrescue an instance after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("resize_confirm_window",
default=0,
help="Automatically confirm resizes after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("shutdown_timeout",
default=60,
help="Total amount of time to wait in seconds for an instance "
"to perform a clean shutdown."),
]
running_deleted_opts = [
cfg.StrOpt("running_deleted_instance_action",
default="reap",
help="Action to take if a running deleted instance is detected."
" Valid options are 'noop', 'log', 'shutdown', or 'reap'. "
"Set to 'noop' to take no action."),
cfg.IntOpt("running_deleted_instance_poll_interval",
default=1800,
help="Number of seconds to wait between runs of the cleanup "
"task."),
cfg.IntOpt("running_deleted_instance_timeout",
default=0,
help="Number of seconds after being deleted when a running "
"instance should be considered eligible for cleanup."),
]
instance_cleaning_opts = [
cfg.IntOpt('maximum_instance_delete_attempts',
default=5,
help='The number of times to attempt to reap an instance\'s '
'files.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.register_opts(interval_opts)
CONF.register_opts(timeout_opts)
CONF.register_opts(running_deleted_opts)
CONF.register_opts(instance_cleaning_opts)
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('console_topic', 'nova.console.rpcapi')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('vnc_enabled', 'nova.vnc')
CONF.import_opt('enabled', 'nova.spice', group='spice')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('image_cache_manager_interval', 'nova.virt.imagecache')
CONF.import_opt('enabled', 'nova.rdp', group='rdp')
CONF.import_opt('html5_proxy_base_url', 'nova.rdp', group='rdp')
CONF.import_opt('enabled', 'nova.console.serial', group='serial_console')
CONF.import_opt('base_url', 'nova.console.serial', group='serial_console')
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
@utils.expects_func_args('migration')
def errors_out_migration(function):
"""Decorator to error out migration on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
migration = kwargs['migration']
status = migration.status
if status not in ['migrating', 'post-migrating']:
return
migration.status = 'error'
try:
with migration.obj_as_admin():
migration.save()
except Exception:
LOG.debug('Error setting migration status '
'for instance %s.',
migration.instance_uuid, exc_info=True)
return decorated_function
@utils.expects_func_args('instance')
def reverts_task_state(function):
"""Decorator to revert task_state on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.UnexpectedTaskStateError as e:
# Note(maoy): unexpected task state means the current
# task is preempted. Do not clear task state in this
# case.
with excutils.save_and_reraise_exception():
LOG.info(_LI("Task possibly preempted: %s"),
e.format_message())
except Exception:
with excutils.save_and_reraise_exception():
try:
self._instance_update(context,
kwargs['instance']['uuid'],
task_state=None)
except Exception:
pass
return decorated_function
@utils.expects_func_args('instance')
def wrap_instance_fault(function):
"""Wraps a method to catch exceptions related to instances.
This decorator wraps a method to catch any exceptions having to do with
an instance that may get thrown. It then logs an instance fault in the db.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.InstanceNotFound:
raise
except Exception as e:
# NOTE(gtt): If argument 'instance' is in args rather than kwargs,
# we will get a KeyError exception which will cover up the real
# exception. So, we update kwargs with the values from args first.
# then, we can get 'instance' from kwargs easily.
kwargs.update(dict(zip(function.func_code.co_varnames[2:], args)))
with excutils.save_and_reraise_exception():
compute_utils.add_instance_fault_from_exc(context,
kwargs['instance'], e, sys.exc_info())
return decorated_function
@utils.expects_func_args('instance')
def wrap_instance_event(function):
"""Wraps a method to log the event taken on the instance, and result.
This decorator wraps a method to log the start and result of an event, as
part of an action taken on an instance.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
wrapped_func = utils.get_wrapped_function(function)
keyed_args = safe_utils.getcallargs(wrapped_func, context, *args,
**kwargs)
instance_uuid = keyed_args['instance']['uuid']
event_name = 'compute_{0}'.format(function.func_name)
with compute_utils.EventReporter(context, event_name, instance_uuid):
return function(self, context, *args, **kwargs)
return decorated_function
@utils.expects_func_args('image_id', 'instance')
def delete_image_on_error(function):
"""Used for snapshot related method to ensure the image created in
compute.api is deleted when an error occurs.
"""
@functools.wraps(function)
def decorated_function(self, context, image_id, instance,
*args, **kwargs):
try:
return function(self, context, image_id, instance,
*args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("Cleaning up image %s", image_id,
exc_info=True, instance=instance)
try:
self.image_api.delete(context, image_id)
except Exception:
LOG.exception(_LE("Error while trying to clean up "
"image %s"), image_id,
instance=instance)
return decorated_function
# TODO(danms): Remove me after Icehouse
# NOTE(mikal): if the method being decorated has more than one decorator, then
# put this one first. Otherwise the various exception handling decorators do
# not function correctly.
def object_compat(function):
"""Wraps a method that expects a new-world instance
This provides compatibility for callers passing old-style dict
instances.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
def _load_instance(instance_or_dict):
if isinstance(instance_or_dict, dict):
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance_or_dict,
expected_attrs=metas)
instance._context = context
return instance
return instance_or_dict
metas = ['metadata', 'system_metadata']
try:
kwargs['instance'] = _load_instance(kwargs['instance'])
except KeyError:
args = (_load_instance(args[0]),) + args[1:]
migration = kwargs.get('migration')
if isinstance(migration, dict):
migration = objects.Migration._from_db_object(
context.elevated(), objects.Migration(),
migration)
kwargs['migration'] = migration
return function(self, context, *args, **kwargs)
return decorated_function
# TODO(danms): Remove me after Icehouse
def aggregate_object_compat(function):
"""Wraps a method that expects a new-world aggregate."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
aggregate = kwargs.get('aggregate')
if isinstance(aggregate, dict):
aggregate = objects.Aggregate._from_db_object(
context.elevated(), objects.Aggregate(),
aggregate)
kwargs['aggregate'] = aggregate
return function(self, context, *args, **kwargs)
return decorated_function
class InstanceEvents(object):
def __init__(self):
self._events = {}
@staticmethod
def _lock_name(instance):
return '%s-%s' % (instance.uuid, 'events')
def prepare_for_instance_event(self, instance, event_name):
"""Prepare to receive an event for an instance.
This will register an event for the given instance that we will
wait on later. This should be called before initiating whatever
action will trigger the event. The resulting eventlet.event.Event
object should be wait()'d on to ensure completion.
:param instance: the instance for which the event will be generated
:param event_name: the name of the event we're expecting
:returns: an event object that should be wait()'d on
"""
@utils.synchronized(self._lock_name(instance))
def _create_or_get_event():
if instance.uuid not in self._events:
self._events.setdefault(instance.uuid, {})
return self._events[instance.uuid].setdefault(
event_name, eventlet.event.Event())
LOG.debug('Preparing to wait for external event %(event)s',
{'event': event_name}, instance=instance)
return _create_or_get_event()
def pop_instance_event(self, instance, event):
"""Remove a pending event from the wait list.
This will remove a pending event from the wait list so that it
can be used to signal the waiters to wake up.
:param instance: the instance for which the event was generated
:param event: the nova.objects.external_event.InstanceExternalEvent
that describes the event
:returns: the eventlet.event.Event object on which the waiters
are blocked
"""
no_events_sentinel = object()
no_matching_event_sentinel = object()
@utils.synchronized(self._lock_name(instance))
def _pop_event():
events = self._events.get(instance.uuid)
if not events:
return no_events_sentinel
_event = events.pop(event.key, None)
if not events:
del self._events[instance.uuid]
if _event is None:
return no_matching_event_sentinel
return _event
result = _pop_event()
if result == no_events_sentinel:
LOG.debug('No waiting events found dispatching %(event)s',
{'event': event.key},
instance=instance)
return None
elif result == no_matching_event_sentinel:
LOG.debug('No event matching %(event)s in %(events)s',
{'event': event.key,
'events': self._events.get(instance.uuid, {}).keys()},
instance=instance)
return None
else:
return result
def clear_events_for_instance(self, instance):
"""Remove all pending events for an instance.
This will remove all events currently pending for an instance
and return them (indexed by event name).
:param instance: the instance for which events should be purged
:returns: a dictionary of {event_name: eventlet.event.Event}
"""
@utils.synchronized(self._lock_name(instance))
def _clear_events():
return self._events.pop(instance.uuid, {})
return _clear_events()
class ComputeVirtAPI(virtapi.VirtAPI):
def __init__(self, compute):
super(ComputeVirtAPI, self).__init__()
self._compute = compute
def provider_fw_rule_get_all(self, context):
return self._compute.conductor_api.provider_fw_rule_get_all(context)
def _default_error_callback(self, event_name, instance):
raise exception.NovaException(_('Instance event failed'))
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
"""Plan to wait for some events, run some code, then wait.
This context manager will first create plans to wait for the
provided event_names, yield, and then wait for all the scheduled
events to complete.
Note that this uses an eventlet.timeout.Timeout to bound the
operation, so callers should be prepared to catch that
failure and handle that situation appropriately.
If the event is not received by the specified timeout deadline,
eventlet.timeout.Timeout is raised.
If the event is received but did not have a 'completed'
status, a NovaException is raised. If an error_callback is
provided, instead of raising an exception as detailed above
for the failure case, the callback will be called with the
event_name and instance, and can return True to continue
waiting for the rest of the events, False to stop processing,
or raise an exception which will bubble up to the waiter.
:param instance: The instance for which an event is expected
:param event_names: A list of event names. Each element can be a
string event name or tuple of strings to
indicate (name, tag).
:param deadline: Maximum number of seconds we should wait for all
of the specified events to arrive.
:param error_callback: A function to be called if an event arrives
"""
if error_callback is None:
error_callback = self._default_error_callback
events = {}
for event_name in event_names:
if isinstance(event_name, tuple):
name, tag = event_name
event_name = objects.InstanceExternalEvent.make_key(
name, tag)
events[event_name] = (
self._compute.instance_events.prepare_for_instance_event(
instance, event_name))
yield
with eventlet.timeout.Timeout(deadline):
for event_name, event in events.items():
actual_event = event.wait()
if actual_event.status == 'completed':
continue
decision = error_callback(event_name, instance)
if decision is False:
break
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='3.39')
# How long to wait in seconds before re-issuing a shutdown
# signal to a instance during power off. The overall
# time to wait is set by CONF.shutdown_timeout.
SHUTDOWN_RETRY_INTERVAL = 10
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
self.virtapi = ComputeVirtAPI(self)
self.network_api = network.API()
self.volume_api = volume.API()
self.image_api = image.API()
self._last_host_check = 0
self._last_bw_usage_poll = 0
self._bw_usage_supported = True
self._last_bw_usage_cell_update = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.conductor_api = conductor.API()
self.compute_task_api = conductor.ComputeTaskAPI()
self.is_neutron_security_groups = (
openstack_driver.is_neutron_security_groups())
self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self._resource_tracker_dict = {}
self.instance_events = InstanceEvents()
self._sync_power_pool = eventlet.GreenPool()
self._syncs_in_progress = {}
if CONF.max_concurrent_builds != 0:
self._build_semaphore = eventlet.semaphore.Semaphore(
CONF.max_concurrent_builds)
else:
self._build_semaphore = compute_utils.UnlimitedSemaphore()
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
# NOTE(russellb) Load the driver last. It may call back into the
# compute manager via the virtapi, so we want it to be fully
# initialized before that happens.
self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
self.use_legacy_block_device_info = \
self.driver.need_legacy_block_device_info
def _get_resource_tracker(self, nodename):
rt = self._resource_tracker_dict.get(nodename)
if not rt:
if not self.driver.node_is_available(nodename):
raise exception.NovaException(
_("%s is not a valid node managed by this "
"compute host.") % nodename)
rt = resource_tracker.ResourceTracker(self.host,
self.driver,
nodename)
self._resource_tracker_dict[nodename] = rt
return rt
def _update_resource_tracker(self, context, instance):
"""Let the resource tracker know that an instance has changed state."""
if (instance['host'] == self.host and
self.driver.node_is_available(instance['node'])):
rt = self._get_resource_tracker(instance.get('node'))
rt.update_usage(context, instance)
def _instance_update(self, context, instance_uuid, **kwargs):
"""Update an instance in the database using kwargs as value."""
instance_ref = self.conductor_api.instance_update(context,
instance_uuid,
**kwargs)
self._update_resource_tracker(context, instance_ref)
return instance_ref
def _set_instance_error_state(self, context, instance):
instance_uuid = instance.uuid
try:
self._instance_update(context, instance_uuid,
vm_state=vm_states.ERROR)
except exception.InstanceNotFound:
LOG.debug('Instance has been destroyed from under us while '
'trying to set it to ERROR',
instance_uuid=instance_uuid)
def _set_instance_obj_error_state(self, context, instance):
try:
instance.vm_state = vm_states.ERROR
instance.save()
except exception.InstanceNotFound:
LOG.debug('Instance has been destroyed from under us while '
'trying to set it to ERROR', instance=instance)
def _get_instances_on_driver(self, context, filters=None):
"""Return a list of instance records for the instances found
on the hypervisor which satisfy the specified filters. If filters=None
return a list of instance records for all the instances found on the
hypervisor.
"""
if not filters:
filters = {}
try:
driver_uuids = self.driver.list_instance_uuids()
if len(driver_uuids) == 0:
# Short circuit, don't waste a DB call
return objects.InstanceList()
filters['uuid'] = driver_uuids
local_instances = objects.InstanceList.get_by_filters(
context, filters, use_slave=True)
return local_instances
except NotImplementedError:
pass
# The driver doesn't support uuids listing, so we'll have
# to brute force.
driver_instances = self.driver.list_instances()
instances = objects.InstanceList.get_by_filters(context, filters,
use_slave=True)
name_map = {instance.name: instance for instance in instances}
local_instances = []
for driver_instance in driver_instances:
instance = name_map.get(driver_instance)
if not instance:
continue
local_instances.append(instance)
return local_instances
def _destroy_evacuated_instances(self, context):
"""Destroys evacuated instances.
While nova-compute was down, the instances running on it could be
evacuated to another host. Check that the instances reported
by the driver are still associated with this host. If they are
not, destroy them, with the exception of instances which are in
the MIGRATING, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH
task state or RESIZED vm state.
"""
our_host = self.host
filters = {'deleted': False}
local_instances = self._get_instances_on_driver(context, filters)
for instance in local_instances:
if instance.host != our_host:
if (instance.task_state in [task_states.MIGRATING,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH]
or instance.vm_state in [vm_states.RESIZED]):
LOG.debug('Will not delete instance as its host ('
'%(instance_host)s) is not equal to our '
'host (%(our_host)s) but its task state is '
'(%(task_state)s) and vm state is '
'(%(vm_state)s)',
{'instance_host': instance.host,
'our_host': our_host,
'task_state': instance.task_state,
'vm_state': instance.vm_state},
instance=instance)
continue
LOG.info(_LI('Deleting instance as its host ('
'%(instance_host)s) is not equal to our '
'host (%(our_host)s).'),
{'instance_host': instance.host,
'our_host': our_host}, instance=instance)
try:
network_info = self._get_instance_nw_info(context,
instance)
bdi = self._get_instance_block_device_info(context,
instance)
destroy_disks = not (self._is_instance_storage_shared(
context, instance))
except exception.InstanceNotFound:
network_info = network_model.NetworkInfo()
bdi = {}
LOG.info(_LI('Instance has been marked deleted already, '
'removing it from the hypervisor.'),
instance=instance)
# always destroy disks if the instance was deleted
destroy_disks = True
self.driver.destroy(context, instance,
network_info,
bdi, destroy_disks)
def _is_instance_storage_shared(self, context, instance):
shared_storage = True
data = None
try:
data = self.driver.check_instance_shared_storage_local(context,
instance)
if data:
shared_storage = (self.compute_rpcapi.
check_instance_shared_storage(context,
instance, data))
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'instance shared storage check, '
'assuming it\'s not on shared storage'),
instance=instance)
shared_storage = False
except Exception:
LOG.exception(_LE('Failed to check if instance shared'),
instance=instance)
finally:
if data:
self.driver.check_instance_shared_storage_cleanup(context,
data)
return shared_storage
def _complete_partial_deletion(self, context, instance):
"""Complete deletion for instances in DELETED status but not marked as
deleted in the DB
"""
instance.destroy()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
quotas = objects.Quotas(context)
project_id, user_id = objects.quotas.ids_from_instance(context,
instance)
quotas.reserve(context, project_id=project_id, user_id=user_id,
instances=-1, cores=-instance.vcpus,
ram=-instance.memory_mb)
self._complete_deletion(context,
instance,
bdms,
quotas,
instance.system_metadata)
def _complete_deletion(self, context, instance, bdms,
quotas, system_meta):
if quotas:
quotas.commit()
# ensure block device mappings are not leaked
for bdm in bdms:
bdm.destroy()
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
if CONF.vnc_enabled or CONF.spice.enabled:
if CONF.cells.enable:
self.cells_rpcapi.consoleauth_delete_tokens(context,
instance.uuid)
else:
self.consoleauth_rpcapi.delete_tokens_for_instance(context,
instance.uuid)
def _init_instance(self, context, instance):
'''Initialize this instance during service init.'''
# Instances that are shut down, or in an error state can not be
# initialized and are not attempted to be recovered. The exception
# to this are instances that are in RESIZE_MIGRATING or DELETING,
# which are dealt with further down.
if (instance.vm_state == vm_states.SOFT_DELETED or
(instance.vm_state == vm_states.ERROR and
instance.task_state not in
(task_states.RESIZE_MIGRATING, task_states.DELETING))):
LOG.debug("Instance is in %s state.",
instance.vm_state, instance=instance)
return
if instance.vm_state == vm_states.DELETED:
try:
self._complete_partial_deletion(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
return
if (instance.vm_state == vm_states.BUILDING or
instance.task_state in [task_states.SCHEDULING,
task_states.BLOCK_DEVICE_MAPPING,
task_states.NETWORKING,
task_states.SPAWNING]):
# NOTE(dave-mcnally) compute stopped before instance was fully
# spawned so set to ERROR state. This is safe to do as the state
# may be set by the api but the host is not so if we get here the
# instance has already been scheduled to this particular host.
LOG.debug("Instance failed to spawn correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state in [vm_states.ACTIVE, vm_states.STOPPED] and
instance.task_state in [task_states.REBUILDING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILD_SPAWNING]):
# NOTE(jichenjc) compute stopped before instance was fully
# spawned so set to ERROR state. This is consistent to BUILD
LOG.debug("Instance failed to rebuild correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING,
task_states.IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING,
task_states.IMAGE_SNAPSHOT]):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance.task_state, instance=instance)
try:
self._post_interrupted_snapshot_cleanup(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to cleanup snapshot.')
LOG.exception(msg, instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.DELETING:
try:
LOG.info(_LI('Service started deleting the instance during '
'the previous run, but did not finish. Restarting'
' the deletion now.'), instance=instance)
instance.obj_load_attr('metadata')
instance.obj_load_attr('system_metadata')
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# FIXME(comstud): This needs fixed. We should be creating
# reservations and updating quotas, because quotas
# wouldn't have been updated for this instance since it is
# still in DELETING. See bug 1296414.
#
# Create a dummy quota object for now.
quotas = objects.Quotas.from_reservations(
context, None, instance=instance)
self._delete_instance(context, instance, bdms, quotas)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
self._set_instance_error_state(context, instance)
return
try_reboot, reboot_type = self._retry_reboot(context, instance)
current_power_state = self._get_power_state(context, instance)
if try_reboot:
LOG.debug("Instance in transitional state (%(task_state)s) at "
"start-up and power state is (%(power_state)s), "
"triggering reboot",
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
self.compute_rpcapi.reboot_instance(context, instance,
block_device_info=None,
reboot_type=reboot_type)
return
elif (current_power_state == power_state.RUNNING and
instance.task_state in [task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD,
task_states.PAUSING,
task_states.UNPAUSING]):
LOG.warning(_LW("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state"),
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ACTIVE
instance.save()
elif (current_power_state == power_state.PAUSED and
instance.task_state == task_states.UNPAUSING):
LOG.warning(_LW("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state "
"and unpausing the instance"),
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
try:
self.unpause_instance(context, instance)
except NotImplementedError:
# Some virt driver didn't support pause and unpause
pass
except Exception:
LOG.exception(_LE('Failed to unpause instance'),
instance=instance)
return
if instance.task_state == task_states.POWERING_OFF:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying stop request",
instance.task_state, instance=instance)
self.stop_instance(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to stop instance')
LOG.exception(msg, instance=instance)
return
if instance.task_state == task_states.POWERING_ON:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying start request",
instance.task_state, instance=instance)
self.start_instance(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to start instance')
LOG.exception(msg, instance=instance)
return
net_info = compute_utils.get_nw_info_for_instance(instance)
try:
self.driver.plug_vifs(instance, net_info)
except NotImplementedError as e:
LOG.debug(e, instance=instance)
except exception.VirtualInterfacePlugException:
# we don't want an exception to block the init_host
LOG.exception(_LE("Vifs plug failed"), instance=instance)
self._set_instance_error_state(context, instance)
return
if instance.task_state == task_states.RESIZE_MIGRATING:
# We crashed during resize/migration, so roll back for safety
try:
# NOTE(mriedem): check old_vm_state for STOPPED here, if it's
# not in system_metadata we default to True for backwards
# compatibility
power_on = (instance.system_metadata.get('old_vm_state') !=
vm_states.STOPPED)
block_dev_info = self._get_instance_block_device_info(context,
instance)
self.driver.finish_revert_migration(context,
instance, net_info, block_dev_info, power_on)
except Exception as e:
LOG.exception(_LE('Failed to revert crashed migration'),
instance=instance)
finally:
LOG.info(_LI('Instance found in migrating state during '
'startup. Resetting task_state'),
instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.MIGRATING:
# Live migration did not complete, but instance is on this
# host, so reset the state.
instance.task_state = None
instance.save(expected_task_state=[task_states.MIGRATING])
db_state = instance.power_state
drv_state = self._get_power_state(context, instance)
expect_running = (db_state == power_state.RUNNING and
drv_state != db_state)
LOG.debug('Current state is %(drv_state)s, state in DB is '
'%(db_state)s.',
{'drv_state': drv_state, 'db_state': db_state},
instance=instance)
if expect_running and CONF.resume_guests_state_on_host_boot:
LOG.info(_LI('Rebooting instance after nova-compute restart.'),
instance=instance)
block_device_info = \
self._get_instance_block_device_info(context, instance)
try:
self.driver.resume_state_on_host_boot(
context, instance, net_info, block_device_info)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'resume guests'), instance=instance)
except Exception:
# NOTE(vish): The instance failed to resume, so we set the
# instance to error and attempt to continue.
LOG.warning(_LW('Failed to resume instance'),
instance=instance)
self._set_instance_error_state(context, instance)
elif drv_state == power_state.RUNNING:
# VMwareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance, net_info)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'firewall rules'), instance=instance)
def _retry_reboot(self, context, instance):
current_power_state = self._get_power_state(context, instance)
current_task_state = instance.task_state
retry_reboot = False
reboot_type = compute_utils.get_reboot_type(current_task_state,
current_power_state)
pending_soft = (current_task_state == task_states.REBOOT_PENDING and
instance.vm_state in vm_states.ALLOW_SOFT_REBOOT)
pending_hard = (current_task_state == task_states.REBOOT_PENDING_HARD
and instance.vm_state in vm_states.ALLOW_HARD_REBOOT)
started_not_running = (current_task_state in
[task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD] and
current_power_state != power_state.RUNNING)
if pending_soft or pending_hard or started_not_running:
retry_reboot = True
return retry_reboot, reboot_type
def handle_lifecycle_event(self, event):
LOG.info(_LI("VM %(state)s (Lifecycle Event)"),
{'state': event.get_name()},
instance_uuid=event.get_instance_uuid())
context = nova.context.get_admin_context(read_deleted='yes')
instance = objects.Instance.get_by_uuid(context,
event.get_instance_uuid(),
expected_attrs=[])
vm_power_state = None
if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED:
vm_power_state = power_state.SHUTDOWN
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED:
vm_power_state = power_state.RUNNING
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED:
vm_power_state = power_state.PAUSED
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED:
vm_power_state = power_state.RUNNING
else:
LOG.warning(_LW("Unexpected power state %d"),
event.get_transition())
if vm_power_state is not None:
LOG.debug('Synchronizing instance power state after lifecycle '
'event "%(event)s"; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, VM power_state: '
'%(vm_power_state)s',
dict(event=event.get_name(),
vm_state=instance.vm_state,
task_state=instance.task_state,
db_power_state=instance.power_state,
vm_power_state=vm_power_state),
instance_uuid=instance.uuid)
self._sync_instance_power_state(context,
instance,
vm_power_state)
def handle_events(self, event):
if isinstance(event, virtevent.LifecycleEvent):
try:
self.handle_lifecycle_event(event)
except exception.InstanceNotFound:
LOG.debug("Event %s arrived for non-existent instance. The "
"instance was probably deleted.", event)
else:
LOG.debug("Ignoring event %s", event)
def init_virt_events(self):
self.driver.register_event_listener(self.handle_events)
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=['info_cache'])
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
self.init_virt_events()
try:
# checking that instance was not already evacuated to other host
self._destroy_evacuated_instances(context)
for instance in instances:
self._init_instance(context, instance)
finally:
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_off()
def cleanup_host(self):
self.driver.cleanup_host(host=self.host)
def pre_start_hook(self):
"""After the service is initialized, but before we fully bring
the service up by listening on RPC queues, make sure to update
our available resources (and indirectly our available nodes).
"""
self.update_available_resource(nova.context.get_admin_context())
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug('Checking state', instance=instance)
try:
return self.driver.get_info(instance).state
except exception.InstanceNotFound:
return power_state.NOSTATE
def get_console_topic(self, context):
"""Retrieves the console host for a project on this host.
Currently this is just set in the flags for each compute host.
"""
# TODO(mdragon): perhaps make this variable by console_type?
return '%s.%s' % (CONF.console_topic, CONF.console_host)
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@wrap_exception()
def refresh_security_group_rules(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group rules.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_rules(security_group_id)
@wrap_exception()
def refresh_security_group_members(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group members.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_members(security_group_id)
@wrap_exception()
def refresh_instance_security_rules(self, context, instance):
"""Tell the virtualization driver to refresh security rules for
an instance.
Passes straight through to the virtualization driver.
Synchronise the call because we may still be in the middle of
creating the instance.
"""
@utils.synchronized(instance.uuid)
def _sync_refresh():
try:
return self.driver.refresh_instance_security_rules(instance)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'security groups.'), instance=instance)
return _sync_refresh()
@wrap_exception()
def refresh_provider_fw_rules(self, context):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
def _get_instance_nw_info(self, context, instance, use_slave=False):
"""Get a list of dictionaries of network data of an instance."""
if (not hasattr(instance, 'system_metadata') or
len(instance.system_metadata) == 0):
# NOTE(danms): Several places in the code look up instances without
# pulling system_metadata for performance, and call this function.
# If we get an instance without it, re-fetch so that the call
# to network_api (which requires it for instance_type) will
# succeed.
attrs = ['system_metadata']
instance = objects.Instance.get_by_uuid(context,
instance.uuid,
expected_attrs=attrs,
use_slave=use_slave)
network_info = self.network_api.get_instance_nw_info(context,
instance)
return network_info
def _await_block_device_map_created(self, context, vol_id):
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
# TODO(yamahata): eliminate dumb polling
start = time.time()
retries = CONF.block_device_allocate_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'block_device_retries' as 0."),
{'retries': retries})
# (1) treat negative config value as 0
# (2) the configured value is 0, one attempt should be made
# (3) the configured value is > 0, then the total number attempts
# is (retries + 1)
attempts = 1
if retries >= 1:
attempts = retries + 1
for attempt in range(1, attempts + 1):
volume = self.volume_api.get(context, vol_id)
volume_status = volume['status']
if volume_status not in ['creating', 'downloading']:
if volume_status != 'available':
LOG.warning(_LW("Volume id: %s finished being created but "
"was not set as 'available'"), vol_id)
return attempt
greenthread.sleep(CONF.block_device_allocate_retries_interval)
# NOTE(harlowja): Should only happen if we ran out of attempts
raise exception.VolumeNotCreated(volume_id=vol_id,
seconds=int(time.time() - start),
attempts=attempts)
def _decode_files(self, injected_files):
"""Base64 decode the list of files to inject."""
if not injected_files:
return []
def _decode(f):
path, contents = f
try:
decoded = base64.b64decode(contents)
return path, decoded
except TypeError:
raise exception.Base64Exception(path=path)
return [_decode(f) for f in injected_files]
def _run_instance(self, context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec):
"""Launch a new instance with specified options."""
extra_usage_info = {}
def notify(status, msg="", fault=None, **kwargs):
"""Send a create.{start,error,end} notification."""
type_ = "create.%(status)s" % dict(status=status)
info = extra_usage_info.copy()
info['message'] = msg
self._notify_about_instance_usage(context, instance, type_,
extra_usage_info=info, fault=fault, **kwargs)
try:
self._prebuild_instance(context, instance)
if request_spec and request_spec.get('image'):
image_meta = request_spec['image']
else:
image_meta = {}
extra_usage_info = {"image_name": image_meta.get('name', '')}
notify("start") # notify that build is starting
instance, network_info = self._build_instance(context,
request_spec, filter_properties, requested_networks,
injected_files, admin_password, is_first_time, node,
instance, image_meta, legacy_bdm_in_spec)
notify("end", msg=_("Success"), network_info=network_info)
except exception.RescheduledException as e:
# Instance build encountered an error, and has been rescheduled.
notify("error", fault=e)
except exception.BuildAbortException as e:
# Instance build aborted due to a non-failure
LOG.info(e)
notify("end", msg=e.format_message()) # notify that build is done
except Exception as e:
# Instance build encountered a non-recoverable error:
with excutils.save_and_reraise_exception():
self._set_instance_error_state(context, instance)
notify("error", fault=e) # notify that build failed
def _prebuild_instance(self, context, instance):
self._check_instance_exists(context, instance)
try:
self._start_building(context, instance)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = _("Instance disappeared before we could start it")
# Quickly bail out of here
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
def _validate_instance_group_policy(self, context, instance,
filter_properties):
# NOTE(russellb) Instance group policy is enforced by the scheduler.
# However, there is a race condition with the enforcement of
# anti-affinity. Since more than one instance may be scheduled at the
# same time, it's possible that more than one instance with an
# anti-affinity policy may end up here. This is a validation step to
# make sure that starting the instance here doesn't violate the policy.
scheduler_hints = filter_properties.get('scheduler_hints') or {}
group_hint = scheduler_hints.get('group')
if not group_hint:
return
@utils.synchronized(group_hint)
def _do_validation(context, instance, group_hint):
group = objects.InstanceGroup.get_by_hint(context, group_hint)
if 'anti-affinity' not in group.policies:
return
group_hosts = group.get_hosts(context, exclude=[instance.uuid])
if self.host in group_hosts:
msg = _("Anti-affinity instance group policy was violated.")
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=msg)
_do_validation(context, instance, group_hint)
def _build_instance(self, context, request_spec, filter_properties,
requested_networks, injected_files, admin_password, is_first_time,
node, instance, image_meta, legacy_bdm_in_spec):
original_context = context
context = context.elevated()
# NOTE(danms): This method is deprecated, but could be called,
# and if it is, it will have an old megatuple for requested_networks.
if requested_networks is not None:
requested_networks_obj = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in requested_networks])
else:
requested_networks_obj = None
# If neutron security groups pass requested security
# groups to allocate_for_instance()
if request_spec and self.is_neutron_security_groups:
security_groups = request_spec.get('security_group')
else:
security_groups = []
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug("No node specified, defaulting to %s", node)
network_info = None
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# b64 decode the files to inject:
injected_files_orig = injected_files
injected_files = self._decode_files(injected_files)
rt = self._get_resource_tracker(node)
try:
limits = filter_properties.get('limits', {})
with rt.instance_claim(context, instance, limits) as inst_claim:
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(original_context,
instance, requested_networks_obj, macs,
security_groups, dhcp_options)
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(context, instance, image_meta,
bdms)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.numa_topology = inst_claim.claimed_numa_topology
instance.save()
block_device_info = self._prep_block_device(
context, instance, bdms)
set_access_ip = (is_first_time and
not instance.access_ip_v4 and
not instance.access_ip_v6)
flavor = None
if filter_properties is not None:
flavor = filter_properties.get('instance_type')
instance = self._spawn(context, instance, image_meta,
network_info, block_device_info,
injected_files, admin_password,
set_access_ip=set_access_ip,
flavor=flavor)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the spawn
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._deallocate_network(context, instance)
except Exception:
msg = _LE('Failed to dealloc network '
'for deleted instance')
LOG.exception(msg, instance=instance)
raise exception.BuildAbortException(
instance_uuid=instance.uuid,
reason=_("Instance disappeared during build"))
except (exception.UnexpectedTaskStateError,
exception.VirtualInterfaceCreateException) as e:
# Don't try to reschedule, just log and reraise.
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
except exception.InvalidBDM:
with excutils.save_and_reraise_exception():
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._deallocate_network(context, instance)
except Exception:
msg = _LE('Failed to dealloc network '
'for failed instance')
LOG.exception(msg, instance=instance)
except Exception:
exc_info = sys.exc_info()
# try to re-schedule instance:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
rescheduled = self._reschedule_or_error(original_context, instance,
exc_info, requested_networks, admin_password,
injected_files_orig, is_first_time, request_spec,
filter_properties, bdms, legacy_bdm_in_spec)
if rescheduled:
# log the original build error
self._log_original_error(exc_info, instance.uuid)
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=six.text_type(exc_info[1]))
else:
# not re-scheduling, go to error:
raise exc_info[0], exc_info[1], exc_info[2]
# spawn success
return instance, network_info
def _log_original_error(self, exc_info, instance_uuid):
LOG.error(_LE('Error: %s'), exc_info[1], instance_uuid=instance_uuid,
exc_info=exc_info)
def _reschedule_or_error(self, context, instance, exc_info,
requested_networks, admin_password, injected_files, is_first_time,
request_spec, filter_properties, bdms=None,
legacy_bdm_in_spec=True):
"""Try to re-schedule the build or re-raise the original build error to
error out the instance.
"""
original_context = context
context = context.elevated()
instance_uuid = instance.uuid
rescheduled = False
compute_utils.add_instance_fault_from_exc(context,
instance, exc_info[1], exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'instance.create.error', fault=exc_info[1])
try:
LOG.debug("Clean up resource before rescheduling.",
instance=instance)
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._shutdown_instance(context, instance,
bdms, requested_networks)
self._cleanup_volumes(context, instance.uuid, bdms)
except Exception:
# do not attempt retry if clean up failed:
with excutils.save_and_reraise_exception():
self._log_original_error(exc_info, instance_uuid)
try:
method_args = (request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties,
legacy_bdm_in_spec)
task_state = task_states.SCHEDULING
rescheduled = self._reschedule(original_context, request_spec,
filter_properties, instance,
self.scheduler_rpcapi.run_instance, method_args,
task_state, exc_info)
except Exception:
rescheduled = False
LOG.exception(_LE("Error trying to reschedule"),
instance_uuid=instance_uuid)
return rescheduled
def _reschedule(self, context, request_spec, filter_properties,
instance, reschedule_method, method_args, task_state,
exc_info=None):
"""Attempt to re-schedule a compute operation."""
instance_uuid = instance.uuid
retry = filter_properties.get('retry', None)
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance_uuid=instance_uuid)
return
if not request_spec:
LOG.debug("No request spec, will not reschedule",
instance_uuid=instance_uuid)
return
LOG.debug("Re-scheduling %(method)s: attempt %(num)d",
{'method': reschedule_method.func_name,
'num': retry['num_attempts']}, instance_uuid=instance_uuid)
# reset the task state:
self._instance_update(context, instance_uuid, task_state=task_state)
if exc_info:
# stringify to avoid circular ref problem in json serialization:
retry['exc'] = traceback.format_exception_only(exc_info[0],
exc_info[1])
reschedule_method(context, *method_args)
return True
@periodic_task.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
timeout = CONF.instance_build_timeout
if timeout == 0:
return
filters = {'vm_state': vm_states.BUILDING,
'host': self.host}
building_insts = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
for instance in building_insts:
if timeutils.is_older_than(instance.created_at, timeout):
self._set_instance_error_state(context, instance)
LOG.warning(_LW("Instance build timed out. Set to error "
"state."), instance=instance)
def _check_instance_exists(self, context, instance):
"""Ensure an instance with the same name is not already present."""
if self.driver.instance_exists(instance):
raise exception.InstanceExists(name=instance.name)
def _start_building(self, context, instance):
"""Save the host and launched_on fields and log appropriately."""
LOG.info(_LI('Starting instance...'), context=context,
instance=instance)
self._instance_update(context, instance.uuid,
vm_state=vm_states.BUILDING,
task_state=None,
expected_task_state=(task_states.SCHEDULING,
None))
def _allocate_network_async(self, context, instance, requested_networks,
macs, security_groups, is_vpn, dhcp_options):
"""Method used to allocate networks in the background.
Broken out for testing.
"""
LOG.debug("Allocating IP information in the background.",
instance=instance)
retries = CONF.network_allocate_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'network_allocate_retries' as 0."),
{'retries': retries})
retries = 0
attempts = retries + 1
retry_time = 1
for attempt in range(1, attempts + 1):
try:
nwinfo = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks,
macs=macs,
security_groups=security_groups,
dhcp_options=dhcp_options)
LOG.debug('Instance network_info: |%s|', nwinfo,
instance=instance)
sys_meta = instance.system_metadata
sys_meta['network_allocated'] = 'True'
self._instance_update(context, instance.uuid,
system_metadata=sys_meta)
return nwinfo
except Exception:
exc_info = sys.exc_info()
log_info = {'attempt': attempt,
'attempts': attempts}
if attempt == attempts:
LOG.exception(_LE('Instance failed network setup '
'after %(attempts)d attempt(s)'),
log_info)
raise exc_info[0], exc_info[1], exc_info[2]
LOG.warning(_LW('Instance failed network setup '
'(attempt %(attempt)d of %(attempts)d)'),
log_info, instance=instance)
time.sleep(retry_time)
retry_time *= 2
if retry_time > 30:
retry_time = 30
# Not reached.
def _build_networks_for_instance(self, context, instance,
requested_networks, security_groups):
# If we're here from a reschedule the network may already be allocated.
if strutils.bool_from_string(
instance.system_metadata.get('network_allocated', 'False')):
# NOTE(alex_xu): The network_allocated is True means the network
# resource already allocated at previous scheduling, and the
# network setup is cleanup at previous. After rescheduling, the
# network resource need setup on the new host.
self.network_api.setup_instance_network_on_host(
context, instance, instance.host)
return self._get_instance_nw_info(context, instance)
if not self.is_neutron_security_groups:
security_groups = []
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(context, instance,
requested_networks, macs, security_groups, dhcp_options)
if not instance.access_ip_v4 and not instance.access_ip_v6:
# If CONF.default_access_ip_network_name is set, grab the
# corresponding network and set the access ip values accordingly.
# Note that when there are multiple ips to choose from, an
# arbitrary one will be chosen.
network_name = CONF.default_access_ip_network_name
if not network_name:
return network_info
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if ip['version'] == 6:
instance.access_ip_v6 = ip['address']
instance.save()
break
return network_info
def _allocate_network(self, context, instance, requested_networks, macs,
security_groups, dhcp_options):
"""Start network allocation asynchronously. Return an instance
of NetworkInfoAsyncWrapper that can be used to retrieve the
allocated networks when the operation has finished.
"""
# NOTE(comstud): Since we're allocating networks asynchronously,
# this task state has little meaning, as we won't be in this
# state for very long.
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.NETWORKING
instance.save(expected_task_state=[None])
self._update_resource_tracker(context, instance)
is_vpn = pipelib.is_vpn_image(instance.image_ref)
return network_model.NetworkInfoAsyncWrapper(
self._allocate_network_async, context, instance,
requested_networks, macs, security_groups, is_vpn,
dhcp_options)
def _default_root_device_name(self, instance, image_meta, root_bdm):
try:
return self.driver.default_root_device_name(instance,
image_meta,
root_bdm)
except NotImplementedError:
return compute_utils.get_next_device_name(instance, [])
def _default_device_names_for_instance(self, instance,
root_device_name,
*block_device_lists):
try:
self.driver.default_device_names_for_instance(instance,
root_device_name,
*block_device_lists)
except NotImplementedError:
compute_utils.default_device_names_for_instance(
instance, root_device_name, *block_device_lists)
def _default_block_device_names(self, context, instance,
image_meta, block_devices):
"""Verify that all the devices have the device_name set. If not,
provide a default name.
It also ensures that there is a root_device_name and is set to the
first block device in the boot sequence (boot_index=0).
"""
root_bdm = block_device.get_root_bdm(block_devices)
if not root_bdm:
return
# Get the root_device_name from the root BDM or the instance
root_device_name = None
update_root_bdm = False
if root_bdm.device_name:
root_device_name = root_bdm.device_name
instance.root_device_name = root_device_name
elif instance.root_device_name:
root_device_name = instance.root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
else:
root_device_name = self._default_root_device_name(instance,
image_meta,
root_bdm)
instance.root_device_name = root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
if update_root_bdm:
root_bdm.save()
ephemerals = filter(block_device.new_format_is_ephemeral,
block_devices)
swap = filter(block_device.new_format_is_swap,
block_devices)
block_device_mapping = filter(
driver_block_device.is_block_device_mapping, block_devices)
self._default_device_names_for_instance(instance,
root_device_name,
ephemerals,
swap,
block_device_mapping)
def _prep_block_device(self, context, instance, bdms,
do_check_attach=True):
"""Set up the block device for an instance with error logging."""
try:
block_device_info = {
'root_device_name': instance.root_device_name,
'swap': driver_block_device.convert_swap(bdms),
'ephemerals': driver_block_device.convert_ephemerals(bdms),
'block_device_mapping': (
driver_block_device.attach_block_devices(
driver_block_device.convert_volumes(bdms),
context, instance, self.volume_api,
self.driver, do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_snapshots(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created,
do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_images(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created,
do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_blanks(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created,
do_check_attach=do_check_attach))
}
if self.use_legacy_block_device_info:
for bdm_type in ('swap', 'ephemerals', 'block_device_mapping'):
block_device_info[bdm_type] = \
driver_block_device.legacy_block_devices(
block_device_info[bdm_type])
# Get swap out of the list
block_device_info['swap'] = driver_block_device.get_swap(
block_device_info['swap'])
return block_device_info
except exception.OverQuota:
msg = _LW('Failed to create block device for instance due to '
'being over volume resource quota')
LOG.warn(msg, instance=instance)
raise exception.InvalidBDM()
except Exception:
LOG.exception(_LE('Instance failed block device setup'),
instance=instance)
raise exception.InvalidBDM()
@object_compat
def _spawn(self, context, instance, image_meta, network_info,
block_device_info, injected_files, admin_password,
set_access_ip=False, flavor=None):
"""Spawn an instance with error logging and update its power state."""
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
instance.save(expected_task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
network_info,
block_device_info,
flavor=flavor)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
def _set_access_ip_values():
"""Add access ip values for a given instance.
If CONF.default_access_ip_network_name is set, this method will
grab the corresponding network and set the access ip values
accordingly. Note that when there are multiple ips to choose
from, an arbitrary one will be chosen.
"""
network_name = CONF.default_access_ip_network_name
if not network_name:
return
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if ip['version'] == 6:
instance.access_ip_v6 = ip['address']
return
if set_access_ip:
_set_access_ip_values()
network_info.wait(do_raise=True)
instance.info_cache.network_info = network_info
instance.save(expected_task_state=task_states.SPAWNING)
return instance
def _notify_about_instance_usage(self, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, fault=None):
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, event_suffix,
network_info=network_info,
system_metadata=system_metadata,
extra_usage_info=extra_usage_info, fault=fault)
def _deallocate_network(self, context, instance,
requested_networks=None):
LOG.debug('Deallocating network for instance', instance=instance)
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
def _get_instance_block_device_info(self, context, instance,
refresh_conn_info=False,
bdms=None):
"""Transform block devices to the driver block_device format."""
if not bdms:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
swap = driver_block_device.convert_swap(bdms)
ephemerals = driver_block_device.convert_ephemerals(bdms)
block_device_mapping = (
driver_block_device.convert_volumes(bdms) +
driver_block_device.convert_snapshots(bdms) +
driver_block_device.convert_images(bdms))
if not refresh_conn_info:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
block_device_mapping = [
bdm for bdm in block_device_mapping
if bdm.get('connection_info')]
else:
block_device_mapping = driver_block_device.refresh_conn_infos(
block_device_mapping, context, instance, self.volume_api,
self.driver)
if self.use_legacy_block_device_info:
swap = driver_block_device.legacy_block_devices(swap)
ephemerals = driver_block_device.legacy_block_devices(ephemerals)
block_device_mapping = driver_block_device.legacy_block_devices(
block_device_mapping)
# Get swap out of the list
swap = driver_block_device.get_swap(swap)
root_device_name = instance.get('root_device_name')
return {'swap': swap,
'root_device_name': root_device_name,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
# NOTE(mikal): No object_compat wrapper on this method because its
# callers all pass objects already
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def build_and_run_instance(self, context, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None):
# NOTE(danms): Remove this in v4.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in requested_networks])
# NOTE(melwitt): Remove this in v4.0 of the RPC API
flavor = filter_properties.get('instance_type')
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
filter_properties = dict(filter_properties, instance_type=flavor)
@utils.synchronized(instance.uuid)
def _locked_do_build_and_run_instance(*args, **kwargs):
# NOTE(danms): We grab the semaphore with the instance uuid
# locked because we could wait in line to build this instance
# for a while and we want to make sure that nothing else tries
# to do anything with this instance while we wait.
with self._build_semaphore:
self._do_build_and_run_instance(*args, **kwargs)
# NOTE(danms): We spawn here to return the RPC worker thread back to
# the pool. Since what follows could take a really long time, we don't
# want to tie up RPC workers.
utils.spawn_n(_locked_do_build_and_run_instance,
context, instance, image, request_spec,
filter_properties, admin_password, injected_files,
requested_networks, security_groups,
block_device_mapping, node, limits)
@hooks.add_hook('build_instance')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def _do_build_and_run_instance(self, context, instance, image,
request_spec, filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
node=None, limits=None):
try:
LOG.info(_LI('Starting instance...'), context=context,
instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = None
instance.save(expected_task_state=
(task_states.SCHEDULING, None))
except exception.InstanceNotFound:
msg = 'Instance disappeared before build.'
LOG.debug(msg, instance=instance)
return build_results.FAILED
except exception.UnexpectedTaskStateError as e:
LOG.debug(e.format_message(), instance=instance)
return build_results.FAILED
# b64 decode the files to inject:
decoded_files = self._decode_files(injected_files)
if limits is None:
limits = {}
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug('No node specified, defaulting to %s', node,
instance=instance)
try:
self._build_and_run_instance(context, instance, image,
decoded_files, admin_password, requested_networks,
security_groups, block_device_mapping, node, limits,
filter_properties)
return build_results.ACTIVE
except exception.RescheduledException as e:
LOG.debug(e.format_message(), instance=instance)
retry = filter_properties.get('retry', None)
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
compute_utils.add_instance_fault_from_exc(context,
instance, e, sys.exc_info())
self._set_instance_error_state(context, instance)
return build_results.FAILED
retry['exc'] = traceback.format_exception(*sys.exc_info())
# NOTE(comstud): Deallocate networks if the driver wants
# us to do so.
if self.driver.deallocate_networks_on_reschedule(instance):
self._cleanup_allocated_networks(context, instance,
requested_networks)
else:
# NOTE(alex_xu): Network already allocated and we don't
# want to deallocate them before rescheduling. But we need
# cleanup those network resource setup on this host before
# rescheduling.
self.network_api.cleanup_instance_network_on_host(
context, instance, self.host)
instance.task_state = task_states.SCHEDULING
instance.save()
self.compute_task_api.build_instances(context, [instance],
image, filter_properties, admin_password,
injected_files, requested_networks, security_groups,
block_device_mapping)
return build_results.RESCHEDULED
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = 'Instance disappeared during build.'
LOG.debug(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
return build_results.FAILED
except exception.BuildAbortException as e:
LOG.exception(e.format_message(), instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._set_instance_error_state(context, instance)
return build_results.FAILED
except Exception as e:
# Should not reach here.
msg = _LE('Unexpected build failure, not rescheduling build.')
LOG.exception(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._set_instance_error_state(context, instance)
return build_results.FAILED
def _build_and_run_instance(self, context, instance, image, injected_files,
admin_password, requested_networks, security_groups,
block_device_mapping, node, limits, filter_properties):
image_name = image.get('name')
self._notify_about_instance_usage(context, instance, 'create.start',
extra_usage_info={'image_name': image_name})
try:
rt = self._get_resource_tracker(node)
with rt.instance_claim(context, instance, limits) as inst_claim:
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
with self._build_resources(context, instance,
requested_networks, security_groups, image,
block_device_mapping) as resources:
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
instance.numa_topology = inst_claim.claimed_numa_topology
instance.save(expected_task_state=
task_states.BLOCK_DEVICE_MAPPING)
block_device_info = resources['block_device_info']
network_info = resources['network_info']
flavor = None
if filter_properties is not None:
flavor = filter_properties.get('instance_type')
self.driver.spawn(context, instance, image,
injected_files, admin_password,
network_info=network_info,
block_device_info=block_device_info,
flavor=flavor)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.end', fault=e)
except exception.ComputeResourcesUnavailable as e:
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=e.format_message())
except exception.BuildAbortException as e:
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
except (exception.FixedIpLimitExceeded,
exception.NoMoreNetworks, exception.NoMoreFixedIps) as e:
LOG.warning(_LW('No more network or fixed IP to be allocated'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
msg = _('Failed to allocate the network(s) with error %s, '
'not rescheduling.') % e.format_message()
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.VirtualInterfaceCreateException,
exception.VirtualInterfaceMacAddressException) as e:
LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.ImageNotActive,
exception.ImageUnacceptable) as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=six.text_type(e))
# NOTE(alaski): This is only useful during reschedules, remove it now.
instance.system_metadata.pop('network_allocated', None)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
try:
instance.save(expected_task_state=task_states.SPAWNING)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.end', fault=e)
self._notify_about_instance_usage(context, instance, 'create.end',
extra_usage_info={'message': _('Success')},
network_info=network_info)
@contextlib.contextmanager
def _build_resources(self, context, instance, requested_networks,
security_groups, image, block_device_mapping):
resources = {}
network_info = None
try:
network_info = self._build_networks_for_instance(context, instance,
requested_networks, security_groups)
resources['network_info'] = network_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
raise
except exception.UnexpectedTaskStateError as e:
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
# Because this allocation is async any failures are likely to occur
# when the driver accesses network_info during spawn().
LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(context, instance, image,
block_device_mapping)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.save()
block_device_info = self._prep_block_device(context, instance,
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
with excutils.save_and_reraise_exception() as ctxt:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
except exception.UnexpectedTaskStateError as e:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
LOG.exception(_LE('Failure prepping block device'),
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
msg = _('Failure prepping block device.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
yield resources
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if not isinstance(exc, (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError)):
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._shutdown_instance(context, instance,
block_device_mapping, requested_networks,
try_deallocate_networks=False)
except Exception:
ctxt.reraise = False
msg = _('Could not clean up failed build,'
' not rescheduling')
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=msg)
def _cleanup_allocated_networks(self, context, instance,
requested_networks):
try:
self._deallocate_network(context, instance, requested_networks)
except Exception:
msg = _LE('Failed to deallocate networks')
LOG.exception(msg, instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
try:
instance.save()
except exception.InstanceNotFound:
# NOTE(alaski): It's possible that we're cleaning up the networks
# because the instance was deleted. If that's the case then this
# exception will be raised by instance.save()
pass
@object_compat
@messaging.expected_exceptions(exception.BuildAbortException,
exception.UnexpectedTaskStateError,
exception.VirtualInterfaceCreateException,
exception.RescheduledException)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def run_instance(self, context, instance, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node, legacy_bdm_in_spec):
# NOTE(alaski) This method should be deprecated when the scheduler and
# compute rpc interfaces are bumped to 4.x, and slated for removal in
# 5.x as it is no longer used.
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance.uuid)
def do_run_instance():
self._run_instance(context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec)
do_run_instance()
def _try_deallocate_network(self, context, instance,
requested_networks=None):
try:
# tear down allocated network structure
self._deallocate_network(context, instance, requested_networks)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to deallocate network for instance.'),
instance=instance)
self._set_instance_error_state(context, instance)
def _get_power_off_values(self, context, instance, clean_shutdown):
"""Get the timing configuration for powering down this instance."""
if clean_shutdown:
timeout = compute_utils.get_value_from_system_metadata(instance,
key='image_os_shutdown_timeout', type=int,
default=CONF.shutdown_timeout)
retry_interval = self.SHUTDOWN_RETRY_INTERVAL
else:
timeout = 0
retry_interval = 0
return timeout, retry_interval
def _power_off_instance(self, context, instance, clean_shutdown=True):
"""Power off an instance on this host."""
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
self.driver.power_off(instance, timeout, retry_interval)
def _shutdown_instance(self, context, instance,
bdms, requested_networks=None, notify=True,
try_deallocate_networks=True):
"""Shutdown an instance on this host.
:param:context: security context
:param:instance: a nova.objects.Instance object
:param:bdms: the block devices for the instance to be torn
down
:param:requested_networks: the networks on which the instance
has ports
:param:notify: true if a final usage notification should be
emitted
:param:try_deallocate_networks: false if we should avoid
trying to teardown networking
"""
context = context.elevated()
LOG.info(_LI('%(action_str)s instance') %
{'action_str': 'Terminating'},
context=context, instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.start")
network_info = compute_utils.get_nw_info_for_instance(instance)
# NOTE(vish) get bdms before destroying the instance
vol_bdms = [bdm for bdm in bdms if bdm.is_volume]
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
# NOTE(melwitt): attempt driver destroy before releasing ip, may
# want to keep ip allocated for certain failures
try:
self.driver.destroy(context, instance, network_info,
block_device_info)
except exception.InstancePowerOffFailure:
# if the instance can't power off, don't release the ip
with excutils.save_and_reraise_exception():
pass
except Exception:
with excutils.save_and_reraise_exception():
# deallocate ip and fail without proceeding to
# volume api calls, preserving current behavior
if try_deallocate_networks:
self._try_deallocate_network(context, instance,
requested_networks)
if try_deallocate_networks:
self._try_deallocate_network(context, instance, requested_networks)
for bdm in vol_bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
# just tell cinder that we are done with it.
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(context, bdm.volume_id)
except exception.DiskNotFound as exc:
LOG.debug('Ignoring DiskNotFound: %s', exc,
instance=instance)
except exception.VolumeNotFound as exc:
LOG.debug('Ignoring VolumeNotFound: %s', exc,
instance=instance)
except (cinder_exception.EndpointNotFound,
keystone_exception.EndpointNotFound) as exc:
LOG.warning(_LW('Ignoring EndpointNotFound: %s'), exc,
instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.end")
def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True):
exc_info = None
for bdm in bdms:
LOG.debug("terminating bdm %s", bdm,
instance_uuid=instance_uuid)
if bdm.volume_id and bdm.delete_on_termination:
try:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
exc_info = sys.exc_info()
LOG.warning(_LW('Failed to delete volume: %(volume_id)s '
'due to %(exc)s'),
{'volume_id': bdm.volume_id, 'exc': exc})
if exc_info is not None and raise_exc:
six.reraise(exc_info[0], exc_info[1], exc_info[2])
@hooks.add_hook("delete_instance")
def _delete_instance(self, context, instance, bdms, quotas):
"""Delete an instance on this host. Commit or rollback quotas
as necessary.
:param context: nova request context
:param instance: nova.objects.instance.Instance object
:param bdms: nova.objects.block_device.BlockDeviceMappingList object
:param quotas: nova.objects.quotas.Quotas object
"""
was_soft_deleted = instance.vm_state == vm_states.SOFT_DELETED
if was_soft_deleted:
# Instances in SOFT_DELETED vm_state have already had quotas
# decremented.
try:
quotas.rollback()
except Exception:
pass
try:
events = self.instance_events.clear_events_for_instance(instance)
if events:
LOG.debug('Events pending at deletion: %(events)s',
{'events': ','.join(events.keys())},
instance=instance)
instance.info_cache.delete()
self._notify_about_instance_usage(context, instance,
"delete.start")
self._shutdown_instance(context, instance, bdms)
# NOTE(vish): We have already deleted the instance, so we have
# to ignore problems cleaning up the volumes. It
# would be nice to let the user know somehow that
# the volume deletion failed, but it is not
# acceptable to have an instance that can not be
# deleted. Perhaps this could be reworked in the
# future to set an instance fault the first time
# and to only ignore the failure if the instance
# is already in ERROR.
self._cleanup_volumes(context, instance.uuid, bdms,
raise_exc=False)
# if a delete task succeeded, always update vm state and task
# state without expecting task state to be DELETING
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.power_state = power_state.NOSTATE
instance.terminated_at = timeutils.utcnow()
instance.save()
self._update_resource_tracker(context, instance)
system_meta = instance.system_metadata
instance.destroy()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
self._complete_deletion(context,
instance,
bdms,
quotas,
system_meta)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def terminate_instance(self, context, instance, bdms, reservations):
"""Terminate an instance on this host."""
# NOTE (ndipanov): If we get non-object BDMs, just get them from the
# db again, as this means they are sent in the old format and we want
# to avoid converting them back when we can just get them.
# Remove this when we bump the RPC major version to 4.0
if (bdms and
any(not isinstance(bdm, obj_base.NovaObject)
for bdm in bdms)):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid)
def do_terminate_instance(instance, bdms):
try:
self._delete_instance(context, instance, bdms, quotas)
except exception.InstanceNotFound:
LOG.info(_LI("Instance disappeared during terminate"),
instance=instance)
except Exception:
# As we're trying to delete always go to Error if something
# goes wrong that _delete_instance can't handle.
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
self._set_instance_error_state(context, instance)
do_terminate_instance(instance, bdms)
# NOTE(johannes): This is probably better named power_off_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def stop_instance(self, context, instance, clean_shutdown=True):
"""Stopping an instance on this host."""
@utils.synchronized(instance.uuid)
def do_stop_instance():
current_power_state = self._get_power_state(context, instance)
LOG.debug('Stopping instance; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, current VM '
'power_state: %(current_power_state)s',
dict(vm_state=instance.vm_state,
task_state=instance.task_state,
db_power_state=instance.power_state,
current_power_state=current_power_state),
instance_uuid=instance.uuid)
# NOTE(mriedem): If the instance is already powered off, we are
# possibly tearing down and racing with other operations, so we can
# expect the task_state to be None if something else updates the
# instance and we're not locking it.
expected_task_state = [task_states.POWERING_OFF]
# The list of power states is from _sync_instance_power_state.
if current_power_state in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.info(_LI('Instance is already powered off in the '
'hypervisor when stop is called.'),
instance=instance)
expected_task_state.append(None)
self._notify_about_instance_usage(context, instance,
"power_off.start")
self._power_off_instance(context, instance, clean_shutdown)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.STOPPED
instance.task_state = None
instance.save(expected_task_state=expected_task_state)
self._notify_about_instance_usage(context, instance,
"power_off.end")
do_stop_instance()
def _power_on(self, context, instance):
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.power_on(context, instance,
network_info,
block_device_info)
# NOTE(johannes): This is probably better named power_on_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def start_instance(self, context, instance):
"""Starting an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.POWERING_ON)
self._notify_about_instance_usage(context, instance, "power_on.end")
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def soft_delete_instance(self, context, instance, reservations):
"""Soft delete an instance on this host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
self._notify_about_instance_usage(context, instance,
"soft_delete.start")
try:
self.driver.soft_delete(instance)
except NotImplementedError:
# Fallback to just powering off the instance if the
# hypervisor doesn't implement the soft_delete method
self.driver.power_off(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save(expected_task_state=[task_states.SOFT_DELETING])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
quotas.commit()
self._notify_about_instance_usage(context, instance, "soft_delete.end")
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def restore_instance(self, context, instance):
"""Restore a soft-deleted instance on this host."""
self._notify_about_instance_usage(context, instance, "restore.start")
try:
self.driver.restore(instance)
except NotImplementedError:
# Fallback to just powering on the instance if the hypervisor
# doesn't implement the restore method
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.RESTORING)
self._notify_about_instance_usage(context, instance, "restore.end")
def _rebuild_default_impl(self, context, instance, image_meta,
injected_files, admin_password, bdms,
detach_block_devices, attach_block_devices,
network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
if preserve_ephemeral:
# The default code path does not support preserving ephemeral
# partitions.
raise exception.PreserveEphemeralNotSupported()
detach_block_devices(context, bdms)
if not recreate:
self.driver.destroy(context, instance, network_info,
block_device_info=block_device_info)
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
new_block_device_info = attach_block_devices(context, instance, bdms)
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(
expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
self.driver.spawn(context, instance, image_meta, injected_files,
admin_password, network_info=network_info,
block_device_info=new_block_device_info)
@object_compat
@messaging.expected_exceptions(exception.PreserveEphemeralNotSupported)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
:param context: `nova.RequestContext` object
:param instance: Instance object
:param orig_image_ref: Original image_ref before rebuild
:param image_ref: New image_ref for rebuild
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
:param bdms: block-device-mappings to use for rebuild
:param recreate: True if the instance is being recreated (e.g. the
hypervisor it was on failed) - cleanup of old state will be
skipped.
:param on_shared_storage: True if instance files on shared storage
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
"""
context = context.elevated()
# NOTE (ndipanov): If we get non-object BDMs, just get them from the
# db again, as this means they are sent in the old format and we want
# to avoid converting them back when we can just get them.
# Remove this on the next major RPC version bump
if (bdms and
any(not isinstance(bdm, obj_base.NovaObject)
for bdm in bdms)):
bdms = None
orig_vm_state = instance.vm_state
with self._error_out_instance_on_exception(context, instance):
LOG.info(_LI("Rebuilding instance"), context=context,
instance=instance)
if recreate:
if not self.driver.capabilities["supports_recreate"]:
raise exception.InstanceRecreateNotSupported
self._check_instance_exists(context, instance)
# To cover case when admin expects that instance files are on
# shared storage, but not accessible and vice versa
if on_shared_storage != self.driver.instance_on_disk(instance):
raise exception.InvalidSharedStorage(
_("Invalid state of instance files on shared"
" storage"))
if on_shared_storage:
LOG.info(_LI('disk on shared storage, recreating using'
' existing disk'))
else:
image_ref = orig_image_ref = instance.image_ref
LOG.info(_LI("disk not on shared storage, rebuilding from:"
" '%s'"), str(image_ref))
# NOTE(mriedem): On a recreate (evacuate), we need to update
# the instance's host and node properties to reflect it's
# destination node for the recreate.
node_name = None
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception(_LE('Failed to get compute_info for %s'),
self.host)
finally:
instance.host = self.host
instance.node = node_name
instance.save()
if image_ref:
image_meta = self.image_api.get(context, image_ref)
else:
image_meta = {}
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
# TODO(jaypipes): Move generate_image_url() into the nova.image.api
orig_image_ref_url = glance.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
self.conductor_api.notify_usage_exists(context, instance,
current_period=True, system_metadata=orig_sys_metadata,
extra_usage_info=extra_usage_info)
# This message should contain the new image_ref
extra_usage_info = {'image_name': image_meta.get('name', '')}
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
instance.power_state = self._get_power_state(context, instance)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
if recreate:
self.network_api.setup_networks_on_host(
context, instance, self.host)
network_info = compute_utils.get_nw_info_for_instance(instance)
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = \
self._get_instance_block_device_info(
context, instance, bdms=bdms)
def detach_block_devices(context, bdms):
for bdm in bdms:
if bdm.is_volume:
self.volume_api.detach(context, bdm.volume_id)
files = self._decode_files(injected_files)
kwargs = dict(
context=context,
instance=instance,
image_meta=image_meta,
injected_files=files,
admin_password=new_pass,
bdms=bdms,
detach_block_devices=detach_block_devices,
attach_block_devices=self._prep_block_device,
block_device_info=block_device_info,
network_info=network_info,
preserve_ephemeral=preserve_ephemeral,
recreate=recreate)
try:
self.driver.rebuild(**kwargs)
except NotImplementedError:
# NOTE(rpodolyaka): driver doesn't provide specialized version
# of rebuild, fall back to the default implementation
self._rebuild_default_impl(**kwargs)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=[task_states.REBUILD_SPAWNING])
if orig_vm_state == vm_states.STOPPED:
LOG.info(_LI("bringing vm to original state: '%s'"),
orig_vm_state, instance=instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save()
self.stop_instance(context, instance)
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
extra_usage_info=extra_usage_info)
def _handle_bad_volumes_detached(self, context, instance, bad_devices,
block_device_info):
"""Handle cases where the virt-layer had to detach non-working volumes
in order to complete an operation.
"""
for bdm in block_device_info['block_device_mapping']:
if bdm.get('mount_device') in bad_devices:
try:
volume_id = bdm['connection_info']['data']['volume_id']
except KeyError:
continue
# NOTE(sirp): ideally we'd just call
# `compute_api.detach_volume` here but since that hits the
# DB directly, that's off limits from within the
# compute-manager.
#
# API-detach
LOG.info(_LI("Detaching from volume api: %s"), volume_id)
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume_id)
# Manager-detach
self.detach_volume(context, volume_id, instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def reboot_instance(self, context, instance, block_device_info,
reboot_type):
"""Reboot an instance on this host."""
# acknowledge the request made it to the manager
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_PENDING
expected_states = (task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED)
else:
instance.task_state = task_states.REBOOT_PENDING_HARD
expected_states = (task_states.REBOOTING_HARD,
task_states.REBOOT_PENDING_HARD,
task_states.REBOOT_STARTED_HARD)
context = context.elevated()
LOG.info(_LI("Rebooting instance"), context=context, instance=instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=expected_states)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning(_LW('trying to reboot a non-running instance:'
' (state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
context=context, instance=instance)
def bad_volumes_callback(bad_devices):
self._handle_bad_volumes_detached(
context, instance, bad_devices, block_device_info)
try:
# Don't change it out of rescue mode
if instance.vm_state == vm_states.RESCUED:
new_vm_state = vm_states.RESCUED
else:
new_vm_state = vm_states.ACTIVE
new_power_state = None
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_STARTED
expected_state = task_states.REBOOT_PENDING
else:
instance.task_state = task_states.REBOOT_STARTED_HARD
expected_state = task_states.REBOOT_PENDING_HARD
instance.save(expected_task_state=expected_state)
self.driver.reboot(context, instance,
network_info,
reboot_type,
block_device_info=block_device_info,
bad_volumes_callback=bad_volumes_callback)
except Exception as error:
with excutils.save_and_reraise_exception() as ctxt:
exc_info = sys.exc_info()
# if the reboot failed but the VM is running don't
# put it into an error state
new_power_state = self._get_power_state(context, instance)
if new_power_state == power_state.RUNNING:
LOG.warning(_LW('Reboot failed but instance is running'),
context=context, instance=instance)
compute_utils.add_instance_fault_from_exc(context,
instance, error, exc_info)
self._notify_about_instance_usage(context, instance,
'reboot.error', fault=error)
ctxt.reraise = False
else:
LOG.error(_LE('Cannot reboot instance: %s'), error,
context=context, instance=instance)
self._set_instance_obj_error_state(context, instance)
if not new_power_state:
new_power_state = self._get_power_state(context, instance)
try:
instance.power_state = new_power_state
instance.vm_state = new_vm_state
instance.task_state = None
instance.save()
except exception.InstanceNotFound:
LOG.warning(_LW("Instance disappeared during reboot"),
context=context, instance=instance)
self._notify_about_instance_usage(context, instance, "reboot.end")
@delete_image_on_error
def _do_snapshot_instance(self, context, image_id, instance, rotation):
if rotation < 0:
raise exception.RotationRequiredForBackup()
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_BACKUP)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def backup_instance(self, context, image_id, instance, backup_type,
rotation):
"""Backup an instance on this host.
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around
"""
self._do_snapshot_instance(context, image_id, instance, rotation)
self._rotate_backups(context, instance, backup_type, rotation)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
@delete_image_on_error
def snapshot_instance(self, context, image_id, instance):
"""Snapshot an instance on this host.
:param context: security context
:param instance: a nova.objects.instance.Instance object
:param image_id: glance.db.sqlalchemy.models.Image.Id
"""
# NOTE(dave-mcnally) the task state will already be set by the api
# but if the compute manager has crashed/been restarted prior to the
# request getting here the task state may have been cleared so we set
# it again and things continue normally
try:
instance.task_state = task_states.IMAGE_SNAPSHOT
instance.save(
expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING)
except exception.InstanceNotFound:
# possibility instance no longer exists, no point in continuing
LOG.debug("Instance not found, could not set state %s "
"for instance.",
task_states.IMAGE_SNAPSHOT, instance=instance)
return
except exception.UnexpectedDeletingTaskStateError:
LOG.debug("Instance being deleted, snapshot cannot continue",
instance=instance)
return
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_SNAPSHOT)
def _snapshot_instance(self, context, image_id, instance,
expected_task_state):
context = context.elevated()
instance.power_state = self._get_power_state(context, instance)
try:
instance.save()
LOG.info(_LI('instance snapshotting'), context=context,
instance=instance)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning(_LW('trying to snapshot a non-running instance: '
'(state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
instance=instance)
self._notify_about_instance_usage(
context, instance, "snapshot.start")
def update_task_state(task_state,
expected_state=expected_task_state):
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self.driver.snapshot(context, instance, image_id,
update_task_state)
instance.task_state = None
instance.save(expected_task_state=task_states.IMAGE_UPLOADING)
self._notify_about_instance_usage(context, instance,
"snapshot.end")
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the snapshot
# Quickly bail out of here
msg = 'Instance disappeared during snapshot'
LOG.debug(msg, instance=instance)
try:
image_service = glance.get_default_image_service()
image = image_service.show(context, image_id)
if image['status'] != 'active':
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Error while trying to clean up image %s"),
image_id, instance=instance)
except exception.ImageNotFound:
instance.task_state = None
instance.save()
msg = _("Image not found during snapshot")
LOG.warn(msg, instance=instance)
def _post_interrupted_snapshot_cleanup(self, context, instance):
self.driver.post_interrupted_snapshot_cleanup(context, instance)
@object_compat
@messaging.expected_exceptions(NotImplementedError)
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
self.driver.volume_snapshot_create(context, instance, volume_id,
create_info)
@object_compat
@messaging.expected_exceptions(NotImplementedError)
def volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info):
self.driver.volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info)
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
this method deletes the oldest backups that exceed the rotation
threshold.
:param context: security context
:param instance: Instance dict
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance.uuid}
images = self.image_api.get_all(context, filters=filters,
sort_key='created_at', sort_dir='desc')
num_images = len(images)
LOG.debug("Found %(num_images)d images (rotation: %(rotation)d)",
{'num_images': num_images, 'rotation': rotation},
instance=instance)
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug("Rotating out %d backups", excess,
instance=instance)
for i in xrange(excess):
image = images.pop()
image_id = image['id']
LOG.debug("Deleting image %s", image_id,
instance=instance)
self.image_api.delete(context, image_id)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def set_admin_password(self, context, instance, new_pass):
"""Set the root/admin password for an instance on this host.
This is generally only called by API password resets after an
image has been built.
@param context: Nova auth context.
@param instance: Nova instance object.
@param new_pass: The admin password for the instance.
"""
context = context.elevated()
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
instance.task_state = None
instance.save(expected_task_state=task_states.UPDATING_PASSWORD)
_msg = _('instance %s is not running') % instance.uuid
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
try:
self.driver.set_admin_password(instance, new_pass)
LOG.info(_LI("Root password set"), instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
except NotImplementedError:
LOG.warning(_LW('set_admin_password is not implemented '
'by this driver or guest instance.'),
instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
raise NotImplementedError(_('set_admin_password is not '
'implemented by this driver or guest '
'instance.'))
except exception.UnexpectedTaskStateError:
# interrupted by another (most likely delete) task
# do not retry
raise
except Exception as e:
# Catch all here because this could be anything.
LOG.exception(_LE('set_admin_password failed: %s'), e,
instance=instance)
self._set_instance_obj_error_state(context, instance)
# We create a new exception here so that we won't
# potentially reveal password information to the
# API caller. The real exception is logged above
_msg = _('error setting admin password')
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def inject_file(self, context, path, file_contents, instance):
"""Write a file to the specified path in an instance on this host."""
# NOTE(russellb) Remove this method, as well as the underlying virt
# driver methods, when the compute rpc interface is bumped to 4.x
# as it is no longer used.
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
LOG.warning(_LW('trying to inject a file into a non-running '
'(state: %(current_state)s expected: '
'%(expected_state)s)'),
{'current_state': current_power_state,
'expected_state': expected_state},
instance=instance)
LOG.info(_LI('injecting file to %s'), path,
instance=instance)
self.driver.inject_file(instance, path, file_contents)
def _get_rescue_image(self, context, instance, rescue_image_ref=None):
"""Determine what image should be used to boot the rescue VM."""
# 1. If rescue_image_ref is passed in, use that for rescue.
# 2. Else, use the base image associated with instance's current image.
# The idea here is to provide the customer with a rescue
# environment which they are familiar with.
# So, if they built their instance off of a Debian image,
# their rescue VM will also be Debian.
# 3. As a last resort, use instance's current image.
if not rescue_image_ref:
system_meta = utils.instance_sys_meta(instance)
rescue_image_ref = system_meta.get('image_base_image_ref')
if not rescue_image_ref:
LOG.warning(_LW('Unable to find a different image to use for '
'rescue VM, using instance\'s current image'),
instance=instance)
rescue_image_ref = instance.image_ref
image_meta = compute_utils.get_image_metadata(context, self.image_api,
rescue_image_ref,
instance)
# NOTE(belliott) bug #1227350 - xenapi needs the actual image id
image_meta['id'] = rescue_image_ref
return image_meta
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def rescue_instance(self, context, instance, rescue_password,
rescue_image_ref=None, clean_shutdown=True):
context = context.elevated()
LOG.info(_LI('Rescuing'), context=context, instance=instance)
admin_password = (rescue_password if rescue_password else
utils.generate_password())
network_info = self._get_instance_nw_info(context, instance)
rescue_image_meta = self._get_rescue_image(context, instance,
rescue_image_ref)
extra_usage_info = {'rescue_image_name':
rescue_image_meta.get('name', '')}
self._notify_about_instance_usage(context, instance,
"rescue.start", extra_usage_info=extra_usage_info,
network_info=network_info)
try:
self._power_off_instance(context, instance, clean_shutdown)
self.driver.rescue(context, instance,
network_info,
rescue_image_meta, admin_password)
except Exception as e:
LOG.exception(_LE("Error trying to Rescue Instance"),
instance=instance)
raise exception.InstanceNotRescuable(
instance_id=instance.uuid,
reason=_("Driver Error: %s") % e)
self.conductor_api.notify_usage_exists(context, instance,
current_period=True)
instance.vm_state = vm_states.RESCUED
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESCUING)
self._notify_about_instance_usage(context, instance,
"rescue.end", extra_usage_info=extra_usage_info,
network_info=network_info)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unrescue_instance(self, context, instance):
context = context.elevated()
LOG.info(_LI('Unrescuing'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance,
"unrescue.start", network_info=network_info)
with self._error_out_instance_on_exception(context, instance):
self.driver.unrescue(instance,
network_info)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=task_states.UNRESCUING)
self._notify_about_instance_usage(context,
instance,
"unrescue.end",
network_info=network_info)
@object_compat
@wrap_exception()
@wrap_instance_fault
def change_instance_metadata(self, context, diff, instance):
"""Update the metadata published to the instance."""
LOG.debug("Changing instance metadata according to %r",
diff, instance=instance)
self.driver.change_instance_metadata(context, instance, diff)
def _cleanup_stored_instance_types(self, instance, restore_old=False):
"""Clean up "old" and "new" instance_type information stored in
instance's system_metadata. Optionally update the "current"
instance_type to the saved old one first.
Returns the updated system_metadata as a dict, the
post-cleanup current instance type and the to-be dropped
instance type.
"""
sys_meta = instance.system_metadata
if restore_old:
instance_type = instance.get_flavor('old')
drop_instance_type = instance.get_flavor()
instance.set_flavor(instance_type)
else:
instance_type = instance.get_flavor()
drop_instance_type = instance.get_flavor('old')
instance.delete_flavor('old')
instance.delete_flavor('new')
return sys_meta, instance_type, drop_instance_type
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def confirm_resize(self, context, instance, reservations, migration):
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid)
def do_confirm_resize(context, instance, migration_id):
# NOTE(wangpan): Get the migration status from db, if it has been
# confirmed, we do nothing and return here
LOG.debug("Going to confirm migration %s", migration_id,
context=context, instance=instance)
try:
# TODO(russellb) Why are we sending the migration object just
# to turn around and look it up from the db again?
migration = objects.Migration.get_by_id(
context.elevated(), migration_id)
except exception.MigrationNotFound:
LOG.error(_LE("Migration %s is not found during confirmation"),
migration_id, context=context, instance=instance)
quotas.rollback()
return
if migration.status == 'confirmed':
LOG.info(_LI("Migration %s is already confirmed"),
migration_id, context=context, instance=instance)
quotas.rollback()
return
elif migration.status not in ('finished', 'confirming'):
LOG.warning(_LW("Unexpected confirmation status '%(status)s' "
"of migration %(id)s, exit confirmation "
"process"),
{"status": migration.status, "id": migration_id},
context=context, instance=instance)
quotas.rollback()
return
# NOTE(wangpan): Get the instance from db, if it has been
# deleted, we do nothing and return here
expected_attrs = ['metadata', 'system_metadata', 'flavor']
try:
instance = objects.Instance.get_by_uuid(
context, instance.uuid,
expected_attrs=expected_attrs)
except exception.InstanceNotFound:
LOG.info(_LI("Instance is not found during confirmation"),
context=context, instance=instance)
quotas.rollback()
return
self._confirm_resize(context, instance, quotas,
migration=migration)
do_confirm_resize(context, instance, migration.id)
def _confirm_resize(self, context, instance, quotas,
migration=None):
"""Destroys the source instance."""
self._notify_about_instance_usage(context, instance,
"resize.confirm.start")
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# NOTE(danms): delete stashed migration information
sys_meta, instance_type, old_instance_type = (
self._cleanup_stored_instance_types(instance))
sys_meta.pop('old_vm_state', None)
instance.system_metadata = sys_meta
instance.save()
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute, teardown=True)
network_info = self._get_instance_nw_info(context, instance)
self.driver.confirm_migration(migration, instance,
network_info)
migration.status = 'confirmed'
with migration.obj_as_admin():
migration.save()
rt = self._get_resource_tracker(migration.source_node)
rt.drop_resize_claim(context, instance, old_instance_type)
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
# might have manually powered up the instance to confirm the
# resize/migrate, so we need to check the current power state
# on the instance and set the vm_state appropriately. We default
# to ACTIVE because if the power state is not SHUTDOWN, we
# assume _sync_instance_power_state will clean it up.
p_state = instance.power_state
vm_state = None
if p_state == power_state.SHUTDOWN:
vm_state = vm_states.STOPPED
LOG.debug("Resized/migrated instance is powered off. "
"Setting vm_state to '%s'.", vm_state,
instance=instance)
else:
vm_state = vm_states.ACTIVE
instance.vm_state = vm_state
instance.task_state = None
instance.save(expected_task_state=[None, task_states.DELETING])
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
network_info=network_info)
quotas.commit()
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def revert_resize(self, context, instance, migration, reservations):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
# NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here.
self.conductor_api.notify_usage_exists(
context, instance, current_period=True)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
teardown=True)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
network_info = self._get_instance_nw_info(context, instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
destroy_disks = not self._is_instance_storage_shared(context,
instance)
self.driver.destroy(context, instance, network_info,
block_device_info, destroy_disks)
self._terminate_volume_connections(context, instance, bdms)
migration.status = 'reverted'
with migration.obj_as_admin():
migration.save()
rt = self._get_resource_tracker(instance.node)
rt.drop_resize_claim(context, instance)
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration.source_compute,
quotas.reservations)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def finish_revert_resize(self, context, instance, reservations, migration):
"""Finishes the second half of reverting a resize.
Bring the original source instance state back (active/shutoff) and
revert the resized attributes in the database.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
sys_meta, instance_type, drop_instance_type = (
self._cleanup_stored_instance_types(instance, True))
# NOTE(mriedem): delete stashed old_vm_state information; we
# default to ACTIVE for backwards compatibility if old_vm_state
# is not set
old_vm_state = sys_meta.pop('old_vm_state', vm_states.ACTIVE)
instance.system_metadata = sys_meta
instance.memory_mb = instance_type['memory_mb']
instance.vcpus = instance_type['vcpus']
instance.root_gb = instance_type['root_gb']
instance.ephemeral_gb = instance_type['ephemeral_gb']
instance.instance_type_id = instance_type['id']
instance.host = migration.source_compute
instance.node = migration.source_node
instance.save()
migration.dest_compute = migration.source_compute
with migration.obj_as_admin():
migration.save()
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
power_on = old_vm_state != vm_states.STOPPED
self.driver.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_REVERTING)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
# if the original vm state was STOPPED, set it back to STOPPED
LOG.info(_LI("Updating instance to original state: '%s'"),
old_vm_state)
if power_on:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save()
else:
instance.task_state = task_states.POWERING_OFF
instance.save()
self.stop_instance(context, instance=instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.end")
quotas.commit()
def _prep_resize(self, context, image, instance, instance_type,
quotas, request_spec, filter_properties, node,
clean_shutdown=True):
if not filter_properties:
filter_properties = {}
if not instance.host:
self._set_instance_error_state(context, instance)
msg = _('Instance has no source host')
raise exception.MigrationError(reason=msg)
same_host = instance.host == self.host
if same_host and not CONF.allow_resize_to_same_host:
self._set_instance_error_state(context, instance)
msg = _('destination same as source!')
raise exception.MigrationError(reason=msg)
# NOTE(danms): Stash the new instance_type to avoid having to
# look it up in the database later
instance.set_flavor(instance_type, 'new')
# NOTE(mriedem): Stash the old vm_state so we can set the
# resized/reverted instance back to the same state later.
vm_state = instance.vm_state
LOG.debug('Stashing vm_state: %s', vm_state, instance=instance)
instance.system_metadata['old_vm_state'] = vm_state
instance.save()
limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker(node)
with rt.resize_claim(context, instance, instance_type,
image_meta=image, limits=limits) as claim:
LOG.info(_LI('Migrating'), context=context, instance=instance)
self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image,
instance_type, quotas.reservations,
clean_shutdown)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node,
clean_shutdown=True):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
"""
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug("No node specified, defaulting to %s", node,
instance=instance)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
self.conductor_api.notify_usage_exists(
context, instance, current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
try:
self._prep_resize(context, image, instance,
instance_type, quotas,
request_spec, filter_properties,
node, clean_shutdown)
# NOTE(dgenin): This is thrown in LibvirtDriver when the
# instance to be migrated is backed by LVM.
# Remove when LVM migration is implemented.
except exception.MigrationPreCheckError:
raise
except Exception:
# try to re-schedule the resize elsewhere:
exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, image, instance,
exc_info, instance_type, quotas, request_spec,
filter_properties)
finally:
extra_usage_info = dict(
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id'])
self._notify_about_instance_usage(
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
instance_type, quotas, request_spec, filter_properties):
"""Try to re-schedule the resize or re-raise the original error to
error out the instance.
"""
if not request_spec:
request_spec = {}
if not filter_properties:
filter_properties = {}
rescheduled = False
instance_uuid = instance.uuid
try:
reschedule_method = self.compute_task_api.resize_instance
scheduler_hint = dict(filter_properties=filter_properties)
method_args = (instance, None, scheduler_hint, instance_type,
quotas.reservations)
task_state = task_states.RESIZE_PREP
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance, reschedule_method,
method_args, task_state, exc_info)
except Exception as error:
rescheduled = False
LOG.exception(_LE("Error trying to reschedule"),
instance_uuid=instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, error,
exc_info=sys.exc_info())
self._notify_about_instance_usage(context, instance,
'resize.error', fault=error)
if rescheduled:
self._log_original_error(exc_info, instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, exc_info[1], exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'resize.error', fault=exc_info[1])
else:
# not re-scheduling
raise exc_info[0], exc_info[1], exc_info[2]
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def resize_instance(self, context, instance, image,
reservations, migration, instance_type,
clean_shutdown=True):
"""Starts the migration of a running instance to another host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
if (not instance_type or
not isinstance(instance_type, objects.Flavor)):
instance_type = objects.Flavor.get_by_id(
context, migration['new_instance_type_id'])
network_info = self._get_instance_nw_info(context, instance)
migration.status = 'migrating'
with migration.obj_as_admin():
migration.save()
instance.task_state = task_states.RESIZE_MIGRATING
instance.save(expected_task_state=task_states.RESIZE_PREP)
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
block_device_info,
timeout, retry_interval)
self._terminate_volume_connections(context, instance, bdms)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
migration.status = 'post-migrating'
with migration.obj_as_admin():
migration.save()
instance.host = migration.dest_compute
instance.node = migration.dest_node
instance.task_state = task_states.RESIZE_MIGRATED
instance.save(expected_task_state=task_states.RESIZE_MIGRATING)
self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info,
migration.dest_compute, reservations=quotas.reservations)
self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
self.instance_events.clear_events_for_instance(instance)
def _terminate_volume_connections(self, context, instance, bdms):
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
if bdm.is_volume:
self.volume_api.terminate_connection(context, bdm.volume_id,
connector)
@staticmethod
def _set_instance_info(instance, instance_type):
instance.instance_type_id = instance_type['id']
instance.memory_mb = instance_type['memory_mb']
instance.vcpus = instance_type['vcpus']
instance.root_gb = instance_type['root_gb']
instance.ephemeral_gb = instance_type['ephemeral_gb']
instance.set_flavor(instance_type)
def _finish_resize(self, context, instance, migration, disk_info,
image):
resize_instance = False
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
old_instance_type = instance.get_flavor()
# NOTE(mriedem): Get the old_vm_state so we know if we should
# power on the instance. If old_vm_state is not set we need to default
# to ACTIVE for backwards compatibility
old_vm_state = instance.system_metadata.get('old_vm_state',
vm_states.ACTIVE)
instance.set_flavor(old_instance_type, 'old')
if old_instance_type_id != new_instance_type_id:
instance_type = instance.get_flavor('new')
self._set_instance_info(instance, instance_type)
resize_instance = True
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
migration['dest_compute'])
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
network_info = self._get_instance_nw_info(context, instance)
instance.task_state = task_states.RESIZE_FINISH
instance.save(expected_task_state=task_states.RESIZE_MIGRATED)
self._notify_about_instance_usage(
context, instance, "finish_resize.start",
network_info=network_info)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
# NOTE(mriedem): If the original vm_state was STOPPED, we don't
# automatically power on the instance after it's migrated
power_on = old_vm_state != vm_states.STOPPED
try:
self.driver.finish_migration(context, migration, instance,
disk_info,
network_info,
image, resize_instance,
block_device_info, power_on)
except Exception:
with excutils.save_and_reraise_exception():
if resize_instance:
self._set_instance_info(instance,
old_instance_type)
migration.status = 'finished'
with migration.obj_as_admin():
migration.save()
instance.vm_state = vm_states.RESIZED
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_FINISH)
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def finish_resize(self, context, disk_info, image, instance,
reservations, migration):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
new host machine.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
self._finish_resize(context, instance, migration,
disk_info, image)
quotas.commit()
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
with excutils.save_and_reraise_exception():
try:
quotas.rollback()
except Exception as qr_error:
LOG.exception(_LE("Failed to rollback quota for failed "
"finish_resize: %s"),
qr_error, instance=instance)
self._set_instance_error_state(context, instance)
@object_compat
@wrap_exception()
@wrap_instance_fault
def add_fixed_ip_to_instance(self, context, network_id, instance):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
self._notify_about_instance_usage(
context, instance, "create_ip.start")
network_info = self.network_api.add_fixed_ip_to_instance(context,
instance,
network_id)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "create_ip.end", network_info=network_info)
@object_compat
@wrap_exception()
@wrap_instance_fault
def remove_fixed_ip_from_instance(self, context, address, instance):
"""Calls network_api to remove existing fixed_ip from instance
by injecting the altered network info and resetting
instance networking.
"""
self._notify_about_instance_usage(
context, instance, "delete_ip.start")
network_info = self.network_api.remove_fixed_ip_from_instance(context,
instance,
address)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "delete_ip.end", network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def pause_instance(self, context, instance):
"""Pause an instance on this host."""
context = context.elevated()
LOG.info(_LI('Pausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'pause.start')
self.driver.pause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.PAUSED
instance.task_state = None
instance.save(expected_task_state=task_states.PAUSING)
self._notify_about_instance_usage(context, instance, 'pause.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unpause_instance(self, context, instance):
"""Unpause a paused instance on this host."""
context = context.elevated()
LOG.info(_LI('Unpausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'unpause.start')
self.driver.unpause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.UNPAUSING)
self._notify_about_instance_usage(context, instance, 'unpause.end')
@wrap_exception()
def host_power_action(self, context, action):
"""Reboots, shuts down or powers up the host."""
return self.driver.host_power_action(action)
@wrap_exception()
def host_maintenance_mode(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self.driver.host_maintenance_mode(host, mode)
@wrap_exception()
def set_host_enabled(self, context, enabled):
"""Sets the specified host's ability to accept new instances."""
return self.driver.set_host_enabled(enabled)
@wrap_exception()
def get_host_uptime(self, context):
"""Returns the result of calling "uptime" on the target host."""
return self.driver.get_host_uptime()
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context,
instance=instance)
return self.driver.get_diagnostics(instance)
else:
raise exception.InstanceInvalidState(
attr='power_state',
instance_uuid=instance.uuid,
state=instance.power_state,
method='get_diagnostics')
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), context=context,
instance=instance)
diags = self.driver.get_instance_diagnostics(instance)
return diags.serialize()
else:
raise exception.InstanceInvalidState(
attr='power_state',
instance_uuid=instance.uuid,
state=instance.power_state,
method='get_diagnostics')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def suspend_instance(self, context, instance):
"""Suspend the given instance."""
context = context.elevated()
# Store the old state
instance.system_metadata['old_vm_state'] = instance.vm_state
self._notify_about_instance_usage(context, instance, 'suspend.start')
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.suspend(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SUSPENDED
instance.task_state = None
instance.save(expected_task_state=task_states.SUSPENDING)
self._notify_about_instance_usage(context, instance, 'suspend.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def resume_instance(self, context, instance):
"""Resume the given suspended instance."""
context = context.elevated()
LOG.info(_LI('Resuming'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'resume.start')
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(
context, instance)
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.resume(context, instance, network_info,
block_device_info)
instance.power_state = self._get_power_state(context, instance)
# We default to the ACTIVE state for backwards compatibility
instance.vm_state = instance.system_metadata.pop('old_vm_state',
vm_states.ACTIVE)
instance.task_state = None
instance.save(expected_task_state=task_states.RESUMING)
self._notify_about_instance_usage(context, instance, 'resume.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def shelve_instance(self, context, instance, image_id,
clean_shutdown=True):
"""Shelve an instance.
This should be used when you want to take a snapshot of the instance.
It also adds system_metadata that can be used by a periodic task to
offload the shelved instance after a period of time.
:param context: request context
:param instance: an Instance object
:param image_id: an image id to snapshot to.
:param clean_shutdown: give the GuestOS a chance to stop
"""
self.conductor_api.notify_usage_exists(
context, instance, current_period=True)
self._notify_about_instance_usage(context, instance, 'shelve.start')
def update_task_state(task_state, expected_state=task_states.SHELVING):
shelving_state_map = {
task_states.IMAGE_PENDING_UPLOAD:
task_states.SHELVING_IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING:
task_states.SHELVING_IMAGE_UPLOADING,
task_states.SHELVING: task_states.SHELVING}
task_state = shelving_state_map[task_state]
expected_state = shelving_state_map[expected_state]
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self._power_off_instance(context, instance, clean_shutdown)
self.driver.snapshot(context, instance, image_id, update_task_state)
instance.system_metadata['shelved_at'] = timeutils.strtime()
instance.system_metadata['shelved_image_id'] = image_id
instance.system_metadata['shelved_host'] = self.host
instance.vm_state = vm_states.SHELVED
instance.task_state = None
if CONF.shelved_offload_time == 0:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=[
task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING])
self._notify_about_instance_usage(context, instance, 'shelve.end')
if CONF.shelved_offload_time == 0:
self.shelve_offload_instance(context, instance,
clean_shutdown=False)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def shelve_offload_instance(self, context, instance, clean_shutdown=True):
"""Remove a shelved instance from the hypervisor.
This frees up those resources for use by other instances, but may lead
to slower unshelve times for this instance. This method is used by
volume backed instances since restoring them doesn't involve the
potentially large download of an image.
:param context: request context
:param instance: nova.objects.instance.Instance
:param clean_shutdown: give the GuestOS a chance to stop
"""
self._notify_about_instance_usage(context, instance,
'shelve_offload.start')
self._power_off_instance(context, instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
self.network_api.cleanup_instance_network_on_host(context, instance,
instance.host)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.destroy(context, instance, network_info,
block_device_info)
instance.power_state = current_power_state
instance.host = None
instance.node = None
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = None
instance.save(expected_task_state=[task_states.SHELVING,
task_states.SHELVING_OFFLOADING])
self._notify_about_instance_usage(context, instance,
'shelve_offload.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unshelve_instance(self, context, instance, image,
filter_properties=None, node=None):
"""Unshelve the instance.
:param context: request context
:param instance: a nova.objects.instance.Instance object
:param image: an image to build from. If None we assume a
volume backed instance.
:param filter_properties: dict containing limits, retry info etc.
:param node: target compute node
"""
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance.uuid)
def do_unshelve_instance():
self._unshelve_instance(context, instance, image,
filter_properties, node)
do_unshelve_instance()
def _unshelve_instance_key_scrub(self, instance):
"""Remove data from the instance that may cause side effects."""
cleaned_keys = dict(
key_data=instance.key_data,
auto_disk_config=instance.auto_disk_config)
instance.key_data = None
instance.auto_disk_config = False
return cleaned_keys
def _unshelve_instance_key_restore(self, instance, keys):
"""Restore previously scrubbed keys before saving the instance."""
instance.update(keys)
def _unshelve_instance(self, context, instance, image, filter_properties,
node):
self._notify_about_instance_usage(context, instance, 'unshelve.start')
instance.task_state = task_states.SPAWNING
instance.save()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._prep_block_device(context, instance, bdms,
do_check_attach=False)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
if node is None:
node = self.driver.get_available_nodes()[0]
LOG.debug('No node specified, defaulting to %s', node,
instance=instance)
rt = self._get_resource_tracker(node)
limits = filter_properties.get('limits', {})
if image:
shelved_image_ref = instance.image_ref
instance.image_ref = image['id']
self.network_api.setup_instance_network_on_host(context, instance,
self.host)
network_info = self._get_instance_nw_info(context, instance)
try:
with rt.instance_claim(context, instance, limits):
flavor = None
if filter_properties is not None:
flavor = filter_properties.get('instance_type')
self.driver.spawn(context, instance, image, injected_files=[],
admin_password=None,
network_info=network_info,
block_device_info=block_device_info,
flavor=flavor)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
if image:
instance.image_ref = shelved_image_ref
self.image_api.delete(context, image['id'])
self._unshelve_instance_key_restore(instance, scrubbed_keys)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.SPAWNING)
self._notify_about_instance_usage(context, instance, 'unshelve.end')
@messaging.expected_exceptions(NotImplementedError)
@wrap_instance_fault
def reset_network(self, context, instance):
"""Reset networking on the given instance."""
LOG.debug('Reset network', context=context, instance=instance)
self.driver.reset_network(instance)
def _inject_network_info(self, context, instance, network_info):
"""Inject network info for the given instance."""
LOG.debug('Inject network info', context=context, instance=instance)
LOG.debug('network_info to inject: |%s|', network_info,
instance=instance)
self.driver.inject_network_info(instance,
network_info)
@wrap_instance_fault
def inject_network_info(self, context, instance):
"""Inject network info, but don't return the info."""
network_info = self._get_instance_nw_info(context, instance)
self._inject_network_info(context, instance, network_info)
@object_compat
@messaging.expected_exceptions(NotImplementedError,
exception.InstanceNotFound)
@wrap_exception()
@wrap_instance_fault
def get_console_output(self, context, instance, tail_length):
"""Send the console output for the given instance."""
context = context.elevated()
LOG.info(_LI("Get console output"), context=context,
instance=instance)
output = self.driver.get_console_output(context, instance)
if tail_length is not None:
output = self._tail_log(output, tail_length)
return output.decode('utf-8', 'replace').encode('ascii', 'replace')
def _tail_log(self, log, length):
try:
length = int(length)
except ValueError:
length = 0
if length == 0:
return ''
else:
return '\n'.join(log.split('\n')[-int(length):])
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_vnc_console(self, context, console_type, instance):
"""Return connection information for a vnc console."""
context = context.elevated()
LOG.debug("Getting vnc console", instance=instance)
token = str(uuid.uuid4())
if not CONF.vnc_enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html)
access_url = '%s?token=%s' % (CONF.novncproxy_base_url, token)
elif console_type == 'xvpvnc':
access_url = '%s?token=%s' % (CONF.xvpvncproxy_base_url, token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_vnc_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable)
@wrap_exception()
@wrap_instance_fault
def get_spice_console(self, context, console_type, instance):
"""Return connection information for a spice console."""
context = context.elevated()
LOG.debug("Getting spice console", instance=instance)
token = str(uuid.uuid4())
if not CONF.spice.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'spice-html5':
# For essex, spicehtml5proxy_base_url must include the full path
# including the html file (like http://myhost/spice_auto.html)
access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_spice_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_rdp_console(self, context, console_type, instance):
"""Return connection information for a RDP console."""
context = context.elevated()
LOG.debug("Getting RDP console", instance=instance)
token = str(uuid.uuid4())
if not CONF.rdp.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'rdp-html5':
access_url = '%s?token=%s' % (CONF.rdp.html5_proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_rdp_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(
exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
exception.SocketPortRangeExhaustedException,
exception.ImageSerialPortNumberInvalid,
exception.ImageSerialPortNumberExceedFlavorValue,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_serial_console(self, context, console_type, instance):
"""Returns connection information for a serial console."""
LOG.debug("Getting serial console", instance=instance)
if not CONF.serial_console.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
context = context.elevated()
token = str(uuid.uuid4())
access_url = '%s?token=%s' % (CONF.serial_console.base_url, token)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_serial_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound)
@object_compat
@wrap_exception()
@wrap_instance_fault
def validate_console_port(self, ctxt, instance, port, console_type):
if console_type == "spice-html5":
console_info = self.driver.get_spice_console(ctxt, instance)
elif console_type == "rdp-html5":
console_info = self.driver.get_rdp_console(ctxt, instance)
elif console_type == "serial":
console_info = self.driver.get_serial_console(ctxt, instance)
else:
console_info = self.driver.get_vnc_console(ctxt, instance)
return console_info.port == port
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def reserve_block_device_name(self, context, instance, device,
volume_id, disk_bus=None, device_type=None,
return_bdm_object=False):
# NOTE(ndipanov): disk_bus and device_type will be set to None if not
# passed (by older clients) and defaulted by the virt driver. Remove
# default values on the next major RPC version bump.
@utils.synchronized(instance.uuid)
def do_reserve():
bdms = (
objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid))
device_name = compute_utils.get_device_name_for_instance(
context, instance, bdms, device)
# NOTE(vish): create bdm here to avoid race condition
bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
instance_uuid=instance.uuid,
volume_id=volume_id or 'reserved',
device_name=device_name,
disk_bus=disk_bus, device_type=device_type)
bdm.create()
if return_bdm_object:
return bdm
else:
return device_name
return do_reserve()
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def attach_volume(self, context, volume_id, mountpoint,
instance, bdm=None):
"""Attach a volume to an instance."""
if not bdm:
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)
@utils.synchronized(instance.uuid)
def do_attach_volume(context, instance, driver_bdm):
try:
return self._attach_volume(context, instance, driver_bdm)
except Exception:
with excutils.save_and_reraise_exception():
bdm.destroy()
do_attach_volume(context, instance, driver_bdm)
def _attach_volume(self, context, instance, bdm):
context = context.elevated()
LOG.info(_LI('Attaching volume %(volume_id)s to %(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
try:
bdm.attach(context, instance, self.volume_api, self.driver,
do_check_attach=False, do_driver_attach=True)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to attach %(volume_id)s "
"at %(mountpoint)s"),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
self.volume_api.unreserve_volume(context, bdm.volume_id)
info = {'volume_id': bdm.volume_id}
self._notify_about_instance_usage(
context, instance, "volume.attach", extra_usage_info=info)
def _detach_volume(self, context, instance, bdm):
"""Do the actual driver detach using block device mapping."""
mp = bdm.device_name
volume_id = bdm.volume_id
LOG.info(_LI('Detach volume %(volume_id)s from mountpoint %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
connection_info = jsonutils.loads(bdm.connection_info)
# NOTE(vish): We currently don't use the serial when disconnecting,
# but added for completeness in case we ever do.
if connection_info and 'serial' not in connection_info:
connection_info['serial'] = volume_id
try:
if not self.driver.instance_exists(instance):
LOG.warning(_LW('Detaching volume from unknown instance'),
context=context, instance=instance)
encryption = encryptors.get_encryption_metadata(
context, self.volume_api, volume_id, connection_info)
self.driver.detach_volume(connection_info,
instance,
mp,
encryption=encryption)
except exception.DiskNotFound as err:
LOG.warning(_LW('Ignoring DiskNotFound exception while detaching '
'volume %(volume_id)s from %(mp)s: %(err)s'),
{'volume_id': volume_id, 'mp': mp, 'err': err},
instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to detach volume %(volume_id)s '
'from %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
self.volume_api.roll_detaching(context, volume_id)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def detach_volume(self, context, volume_id, instance):
"""Detach a volume from an instance."""
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
if CONF.volume_usage_poll_interval > 0:
vol_stats = []
mp = bdm.device_name
# Handle bootable volumes which will not contain /dev/
if '/dev/' in mp:
mp = mp[5:]
try:
vol_stats = self.driver.block_stats(instance, mp)
except NotImplementedError:
pass
if vol_stats:
LOG.debug("Updating volume usage cache with totals",
instance=instance)
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
self.conductor_api.vol_usage_update(context, volume_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance,
update_totals=True)
self._detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
bdm.destroy()
info = dict(volume_id=volume_id)
self._notify_about_instance_usage(
context, instance, "volume.detach", extra_usage_info=info)
self.volume_api.detach(context.elevated(), volume_id)
def _init_volume_connection(self, context, new_volume_id,
old_volume_id, connector, instance, bdm):
new_cinfo = self.volume_api.initialize_connection(context,
new_volume_id,
connector)
old_cinfo = jsonutils.loads(bdm['connection_info'])
if old_cinfo and 'serial' not in old_cinfo:
old_cinfo['serial'] = old_volume_id
new_cinfo['serial'] = old_cinfo['serial']
return (old_cinfo, new_cinfo)
def _swap_volume(self, context, instance, bdm, connector, old_volume_id,
new_volume_id):
mountpoint = bdm['device_name']
failed = False
new_cinfo = None
resize_to = 0
try:
old_cinfo, new_cinfo = self._init_volume_connection(context,
new_volume_id,
old_volume_id,
connector,
instance,
bdm)
old_vol_size = self.volume_api.get(context, old_volume_id)['size']
new_vol_size = self.volume_api.get(context, new_volume_id)['size']
if new_vol_size > old_vol_size:
resize_to = new_vol_size
self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint,
resize_to)
except Exception:
failed = True
with excutils.save_and_reraise_exception():
if new_cinfo:
msg = _LE("Failed to swap volume %(old_volume_id)s "
"for %(new_volume_id)s")
LOG.exception(msg, {'old_volume_id': old_volume_id,
'new_volume_id': new_volume_id},
context=context,
instance=instance)
else:
msg = _LE("Failed to connect to volume %(volume_id)s "
"with volume at %(mountpoint)s")
LOG.exception(msg, {'volume_id': new_volume_id,
'mountpoint': bdm['device_name']},
context=context,
instance=instance)
self.volume_api.roll_detaching(context, old_volume_id)
self.volume_api.unreserve_volume(context, new_volume_id)
finally:
conn_volume = new_volume_id if failed else old_volume_id
if new_cinfo:
self.volume_api.terminate_connection(context,
conn_volume,
connector)
# If Cinder initiated the swap, it will keep
# the original ID
comp_ret = self.volume_api.migrate_volume_completion(
context,
old_volume_id,
new_volume_id,
error=failed)
return (comp_ret, new_cinfo)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def swap_volume(self, context, old_volume_id, new_volume_id, instance):
"""Swap volume for an instance."""
context = context.elevated()
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, old_volume_id, instance_uuid=instance.uuid)
connector = self.driver.get_volume_connector(instance)
comp_ret, new_cinfo = self._swap_volume(context, instance,
bdm,
connector,
old_volume_id,
new_volume_id)
save_volume_id = comp_ret['save_volume_id']
# Update bdm
values = {
'connection_info': jsonutils.dumps(new_cinfo),
'delete_on_termination': False,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': save_volume_id,
'volume_size': None,
'no_device': None}
bdm.update(values)
bdm.save()
@wrap_exception()
def remove_volume_connection(self, context, volume_id, instance):
"""Remove a volume connection using the volume api."""
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# NOTE(PhilDay): Can't use object_compat decorator here as
# instance is not the second parameter
if isinstance(instance, dict):
metas = ['metadata', 'system_metadata']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=metas)
instance._context = context
try:
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
self._detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
except exception.NotFound:
pass
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
network_info = self.network_api.allocate_port_for_instance(
context, instance, port_id, network_id, requested_ip)
if len(network_info) != 1:
LOG.error(_LE('allocate_port_for_instance returned %(ports)s '
'ports'), dict(ports=len(network_info)))
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
image_ref = instance.get('image_ref')
image_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
self.driver.attach_interface(instance, image_meta, network_info[0])
return network_info[0]
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
network_info = instance.info_cache.network_info
condemned = None
for vif in network_info:
if vif['id'] == port_id:
condemned = vif
break
if condemned is None:
raise exception.PortNotFound(_("Port %s is not "
"attached") % port_id)
self.network_api.deallocate_port_for_instance(context, instance,
port_id)
self.driver.detach_interface(instance, condemned)
def _get_compute_info(self, context, host):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host)
@wrap_exception()
def check_instance_shared_storage(self, ctxt, instance, data):
"""Check if the instance files are shared
:param ctxt: security context
:param instance: dict of instance data
:param data: result of driver.check_instance_shared_storage_local
Returns True if instance disks located on shared storage and
False otherwise.
"""
return self.driver.check_instance_shared_storage_remote(ctxt, data)
@wrap_exception()
@wrap_instance_fault
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing migration info
"""
src_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, instance.host))
dst_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, CONF.host))
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
migrate_data = {}
try:
migrate_data = self.compute_rpcapi.\
check_can_live_migrate_source(ctxt, instance,
dest_check_data)
finally:
self.driver.check_can_live_migrate_destination_cleanup(ctxt,
dest_check_data)
if 'migrate_data' in dest_check_data:
migrate_data.update(dest_check_data['migrate_data'])
return migrate_data
@wrap_exception()
@wrap_instance_fault
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param ctxt: security context
:param instance: dict of instance data
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt,
instance)
dest_check_data['is_volume_backed'] = is_volume_backed
block_device_info = self._get_instance_block_device_info(
ctxt, instance, refresh_conn_info=True)
return self.driver.check_can_live_migrate_source(ctxt, instance,
dest_check_data,
block_device_info)
@object_compat
@wrap_exception()
@wrap_instance_fault
def pre_live_migration(self, context, instance, block_migration, disk,
migrate_data):
"""Preparations for live migration at dest host.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which holds data
required for live migration without shared
storage.
"""
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.start",
network_info=network_info)
pre_live_migration_data = self.driver.pre_live_migration(context,
instance,
block_device_info,
network_info,
disk,
migrate_data)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host)
# Creating filters to hypervisors and firewalls.
# An example is that nova-instance-instance-xxx,
# which is written to libvirt.xml(Check "virsh nwfilter-list")
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.end",
network_info=network_info)
return pre_live_migration_data
@wrap_exception()
@wrap_instance_fault
def live_migration(self, context, dest, instance, block_migration,
migrate_data):
"""Executing live migration.
:param context: security context
:param instance: a nova.objects.instance.Instance object
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: implementation specific params
"""
# NOTE(danms): since instance is not the first parameter, we can't
# use @object_compat on this method. Since this is the only example,
# we do this manually instead of complicating the decorator
if not isinstance(instance, obj_base.NovaObject):
expected = ['metadata', 'system_metadata',
'security_groups', 'info_cache']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=expected)
# Create a local copy since we'll be modifying the dictionary
migrate_data = dict(migrate_data or {})
try:
if block_migration:
block_device_info = self._get_instance_block_device_info(
context, instance)
disk = self.driver.get_instance_disk_info(
instance, block_device_info=block_device_info)
else:
disk = None
pre_migration_data = self.compute_rpcapi.pre_live_migration(
context, instance,
block_migration, disk, dest, migrate_data)
migrate_data['pre_live_migration_result'] = pre_migration_data
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Pre live migration failed at %s'),
dest, instance=instance)
self._rollback_live_migration(context, instance, dest,
block_migration, migrate_data)
# Executing live migration
# live_migration might raises exceptions, but
# nothing must be recovered in this version.
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
self._rollback_live_migration,
block_migration, migrate_data)
def _live_migration_cleanup_flags(self, block_migration, migrate_data):
"""Determine whether disks or intance path need to be cleaned up after
live migration (at source on success, at destination on rollback)
Block migration needs empty image at destination host before migration
starts, so if any failure occurs, any empty images has to be deleted.
Also Volume backed live migration w/o shared storage needs to delete
newly created instance-xxx dir on the destination as a part of its
rollback process
:param block_migration: if true, it was a block migration
:param migrate_data: implementation specific data
:returns: (bool, bool) -- do_cleanup, destroy_disks
"""
# NOTE(angdraug): block migration wouldn't have been allowed if either
# block storage or instance path were shared
is_shared_block_storage = not block_migration
is_shared_instance_path = not block_migration
if migrate_data:
is_shared_block_storage = migrate_data.get(
'is_shared_block_storage', is_shared_block_storage)
is_shared_instance_path = migrate_data.get(
'is_shared_instance_path', is_shared_instance_path)
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
# must be deleted for preparing next live migration w/o shared storage
do_cleanup = block_migration or not is_shared_instance_path
destroy_disks = not is_shared_block_storage
return (do_cleanup, destroy_disks)
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance,
dest, block_migration=False, migrate_data=None):
"""Post operations for live migration.
This method is called from live_migration
and mainly updating database record.
:param ctxt: security context
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which has data
required for live migration without shared storage
"""
LOG.info(_LI('_post_live_migration() is started..'),
instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance.uuid)
# Cleanup source host post live-migration
block_device_info = self._get_instance_block_device_info(
ctxt, instance, bdms=bdms)
self.driver.post_live_migration(ctxt, instance, block_device_info,
migrate_data)
# Detaching volumes.
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# remove the volume connection without detaching from hypervisor
# because the instance is not running anymore on the current host
if bdm.is_volume:
self.volume_api.terminate_connection(ctxt, bdm.volume_id,
connector)
# Releasing vlan.
# (not necessary in current implementation?)
network_info = self._get_instance_nw_info(ctxt, instance)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.start",
network_info=network_info)
# Releasing security group ingress rule.
self.driver.unfilter_instance(instance,
network_info)
migration = {'source_compute': self.host,
'dest_compute': dest, }
self.network_api.migrate_instance_start(ctxt,
instance,
migration)
destroy_vifs = False
try:
self.driver.post_live_migration_at_source(ctxt, instance,
network_info)
except NotImplementedError as ex:
LOG.debug(ex, instance=instance)
# For all hypervisors other than libvirt, there is a possibility
# they are unplugging networks from source node in the cleanup
# method
destroy_vifs = True
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
self.compute_rpcapi.post_live_migration_at_destination(ctxt,
instance, block_migration, dest)
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
block_migration, migrate_data)
if do_cleanup:
self.driver.cleanup(ctxt, instance, network_info,
destroy_disks=destroy_disks,
migrate_data=migrate_data,
destroy_vifs=destroy_vifs)
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(ctxt, instance,
self.host, teardown=True)
self.instance_events.clear_events_for_instance(instance)
# NOTE(timello): make sure we update available resources on source
# host even before next periodic task.
self.update_available_resource(ctxt)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.end",
network_info=network_info)
LOG.info(_LI('Migrating instance to %s finished successfully.'),
dest, instance=instance)
LOG.info(_LI("You may see the error \"libvirt: QEMU error: "
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."),
instance=instance)
if CONF.vnc_enabled or CONF.spice.enabled or CONF.rdp.enabled:
if CONF.cells.enable:
self.cells_rpcapi.consoleauth_delete_tokens(ctxt,
instance.uuid)
else:
self.consoleauth_rpcapi.delete_tokens_for_instance(ctxt,
instance.uuid)
@object_compat
@wrap_exception()
@wrap_instance_fault
def post_live_migration_at_destination(self, context, instance,
block_migration):
"""Post operations for live migration .
:param context: security context
:param instance: Instance dict
:param block_migration: if true, prepare for block migration
"""
LOG.info(_LI('Post operation of migration started'),
instance=instance)
# NOTE(tr3buchet): setup networks on destination host
# this is called a second time because
# multi_host does not create the bridge in
# plug_vifs
self.network_api.setup_networks_on_host(context, instance,
self.host)
migration = {'source_compute': instance.host,
'dest_compute': self.host, }
self.network_api.migrate_instance_finish(context,
instance,
migration)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.start",
network_info=network_info)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.post_live_migration_at_destination(context, instance,
network_info,
block_migration, block_device_info)
# Restore instance state
current_power_state = self._get_power_state(context, instance)
node_name = None
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception(_LE('Failed to get compute_info for %s'), self.host)
finally:
instance.host = self.host
instance.power_state = current_power_state
instance.task_state = None
instance.node = node_name
instance.save(expected_task_state=task_states.MIGRATING)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.end",
network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def _rollback_live_migration(self, context, instance,
dest, block_migration, migrate_data=None):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest:
This method is called from live migration src host.
This param specifies destination host.
:param block_migration: if true, prepare for block migration
:param migrate_data:
if not none, contains implementation specific data.
"""
instance.task_state = None
instance.save(expected_task_state=[task_states.MIGRATING])
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
self.network_api.setup_networks_on_host(context, instance, self.host)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
for bdm in bdms:
if bdm.is_volume:
self.compute_rpcapi.remove_volume_connection(
context, instance, bdm.volume_id, dest)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.start")
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
block_migration, migrate_data)
if do_cleanup:
self.compute_rpcapi.rollback_live_migration_at_destination(
context, instance, dest, destroy_disks=destroy_disks,
migrate_data=migrate_data)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.end")
@object_compat
@wrap_exception()
@wrap_instance_fault
def rollback_live_migration_at_destination(self, context, instance,
destroy_disks=True,
migrate_data=None):
"""Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance: a nova.objects.instance.Instance object sent over rpc
"""
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.start",
network_info=network_info)
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host, teardown=True)
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.rollback_live_migration_at_destination(
context, instance, network_info, block_device_info,
destroy_disks=destroy_disks, migrate_data=migrate_data)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.end",
network_info=network_info)
@periodic_task.periodic_task(
spacing=CONF.heal_instance_info_cache_interval)
def _heal_instance_info_cache(self, context):
"""Called periodically. On every call, try to update the
info_cache's network information for another instance by
calling to the network manager.
This is implemented by keeping a cache of uuids of instances
that live on this host. On each call, we pop one off of a
list, pull the DB record, and try the call to the network API.
If anything errors don't fail, as it's possible the instance
has been deleted, etc.
"""
heal_interval = CONF.heal_instance_info_cache_interval
if not heal_interval:
return
instance_uuids = getattr(self, '_instance_uuids_to_heal', [])
instance = None
LOG.debug('Starting heal instance info cache')
if not instance_uuids:
# The list of instances to heal is empty so rebuild it
LOG.debug('Rebuilding the list of instances to heal')
db_instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=[], use_slave=True)
for inst in db_instances:
# We don't want to refresh the cache for instances
# which are building or deleting so don't put them
# in the list. If they are building they will get
# added to the list next time we build it.
if (inst.vm_state == vm_states.BUILDING):
LOG.debug('Skipping network cache update for instance '
'because it is Building.', instance=inst)
continue
if (inst.task_state == task_states.DELETING):
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
continue
if not instance:
# Save the first one we find so we don't
# have to get it again
instance = inst
else:
instance_uuids.append(inst['uuid'])
self._instance_uuids_to_heal = instance_uuids
else:
# Find the next valid instance on the list
while instance_uuids:
try:
inst = objects.Instance.get_by_uuid(
context, instance_uuids.pop(0),
expected_attrs=['system_metadata', 'info_cache'],
use_slave=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
# Check the instance hasn't been migrated
if inst.host != self.host:
LOG.debug('Skipping network cache update for instance '
'because it has been migrated to another '
'host.', instance=inst)
# Check the instance isn't being deleting
elif inst.task_state == task_states.DELETING:
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
else:
instance = inst
break
if instance:
# We have an instance now to refresh
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
self._get_instance_nw_info(context, instance, use_slave=True)
LOG.debug('Updated the network info_cache for instance',
instance=instance)
except exception.InstanceNotFound:
# Instance is gone.
LOG.debug('Instance no longer exists. Unable to refresh',
instance=instance)
return
except Exception:
LOG.error(_LE('An error occurred while refreshing the network '
'cache.'), instance=instance, exc_info=True)
else:
LOG.debug("Didn't find any instances for network info cache "
"update.")
@periodic_task.periodic_task
def _poll_rebooting_instances(self, context):
if CONF.reboot_timeout > 0:
filters = {'task_state':
[task_states.REBOOTING,
task_states.REBOOT_STARTED,
task_states.REBOOT_PENDING],
'host': self.host}
rebooting = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=[], use_slave=True)
to_poll = []
for instance in rebooting:
if timeutils.is_older_than(instance.updated_at,
CONF.reboot_timeout):
to_poll.append(instance)
self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll)
@periodic_task.periodic_task
def _poll_rescued_instances(self, context):
if CONF.rescue_timeout > 0:
filters = {'vm_state': vm_states.RESCUED,
'host': self.host}
rescued_instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=["system_metadata"],
use_slave=True)
to_unrescue = []
for instance in rescued_instances:
if timeutils.is_older_than(instance.launched_at,
CONF.rescue_timeout):
to_unrescue.append(instance)
for instance in to_unrescue:
self.compute_api.unrescue(context, instance)
@periodic_task.periodic_task
def _poll_unconfirmed_resizes(self, context):
if CONF.resize_confirm_window == 0:
return
migrations = objects.MigrationList.get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host,
use_slave=True)
migrations_info = dict(migration_count=len(migrations),
confirm_window=CONF.resize_confirm_window)
if migrations_info["migration_count"] > 0:
LOG.info(_LI("Found %(migration_count)d unconfirmed migrations "
"older than %(confirm_window)d seconds"),
migrations_info)
def _set_migration_to_error(migration, reason, **kwargs):
LOG.warning(_LW("Setting migration %(migration_id)s to error: "
"%(reason)s"),
{'migration_id': migration['id'], 'reason': reason},
**kwargs)
migration.status = 'error'
with migration.obj_as_admin():
migration.save()
for migration in migrations:
instance_uuid = migration.instance_uuid
LOG.info(_LI("Automatically confirming migration "
"%(migration_id)s for instance %(instance_uuid)s"),
{'migration_id': migration.id,
'instance_uuid': instance_uuid})
expected_attrs = ['metadata', 'system_metadata']
try:
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
use_slave=True)
except exception.InstanceNotFound:
reason = (_("Instance %s not found") %
instance_uuid)
_set_migration_to_error(migration, reason)
continue
if instance.vm_state == vm_states.ERROR:
reason = _("In ERROR state")
_set_migration_to_error(migration, reason,
instance=instance)
continue
# race condition: The instance in DELETING state should not be
# set the migration state to error, otherwise the instance in
# to be deleted which is in RESIZED state
# will not be able to confirm resize
if instance.task_state in [task_states.DELETING,
task_states.SOFT_DELETING]:
msg = ("Instance being deleted or soft deleted during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
# race condition: This condition is hit when this method is
# called between the save of the migration record with a status of
# finished and the save of the instance object with a state of
# RESIZED. The migration record should not be set to error.
if instance.task_state == task_states.RESIZE_FINISH:
msg = ("Instance still resizing during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
vm_state = instance.vm_state
task_state = instance.task_state
if vm_state != vm_states.RESIZED or task_state is not None:
reason = (_("In states %(vm_state)s/%(task_state)s, not "
"RESIZED/None") %
{'vm_state': vm_state,
'task_state': task_state})
_set_migration_to_error(migration, reason,
instance=instance)
continue
try:
self.compute_api.confirm_resize(context, instance,
migration=migration)
except Exception as e:
LOG.info(_LI("Error auto-confirming resize: %s. "
"Will retry later."),
e, instance=instance)
@periodic_task.periodic_task(spacing=CONF.shelved_poll_interval)
def _poll_shelved_instances(self, context):
filters = {'vm_state': vm_states.SHELVED,
'host': self.host}
shelved_instances = objects.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
use_slave=True)
to_gc = []
for instance in shelved_instances:
sys_meta = instance.system_metadata
shelved_at = timeutils.parse_strtime(sys_meta['shelved_at'])
if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time):
to_gc.append(instance)
for instance in to_gc:
try:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save()
self.shelve_offload_instance(context, instance,
clean_shutdown=False)
except Exception:
LOG.exception(_LE('Periodic task failed to offload instance.'),
instance=instance)
@periodic_task.periodic_task
def _instance_usage_audit(self, context):
if not CONF.instance_usage_audit:
return
if compute_utils.has_audit_been_run(context,
self.conductor_api,
self.host):
return
begin, end = utils.last_completed_audit_period()
instances = objects.InstanceList.get_active_by_window_joined(
context, begin, end, host=self.host,
expected_attrs=['system_metadata', 'info_cache', 'metadata'],
use_slave=True)
num_instances = len(instances)
errors = 0
successes = 0
LOG.info(_LI("Running instance usage audit for"
" host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s"
" instances."),
dict(host=self.host,
begin_time=begin,
end_time=end,
number_instances=num_instances))
start_time = time.time()
compute_utils.start_instance_usage_audit(context,
self.conductor_api,
begin, end,
self.host, num_instances)
for instance in instances:
try:
self.conductor_api.notify_usage_exists(
context, instance,
ignore_missing_network_data=False)
successes += 1
except Exception:
LOG.exception(_LE('Failed to generate usage '
'audit for instance '
'on host %s'), self.host,
instance=instance)
errors += 1
compute_utils.finish_instance_usage_audit(context,
self.conductor_api,
begin, end,
self.host, errors,
"Instance usage audit ran "
"for host %s, %s instances "
"in %s seconds." % (
self.host,
num_instances,
time.time() - start_time))
@periodic_task.periodic_task(spacing=CONF.bandwidth_poll_interval)
def _poll_bandwidth_usage(self, context):
if not self._bw_usage_supported:
return
prev_time, start_time = utils.last_completed_audit_period()
curr_time = time.time()
if (curr_time - self._last_bw_usage_poll >
CONF.bandwidth_poll_interval):
self._last_bw_usage_poll = curr_time
LOG.info(_LI("Updating bandwidth usage cache"))
cells_update_interval = CONF.cells.bandwidth_update_interval
if (cells_update_interval > 0 and
curr_time - self._last_bw_usage_cell_update >
cells_update_interval):
self._last_bw_usage_cell_update = curr_time
update_cells = True
else:
update_cells = False
instances = objects.InstanceList.get_by_host(context,
self.host,
use_slave=True)
try:
bw_counters = self.driver.get_all_bw_counters(instances)
except NotImplementedError:
# NOTE(mdragon): Not all hypervisors have bandwidth polling
# implemented yet. If they don't it doesn't break anything,
# they just don't get the info in the usage events.
# NOTE(PhilDay): Record that its not supported so we can
# skip fast on future calls rather than waste effort getting
# the list of instances.
LOG.warning(_LW("Bandwidth usage not supported by "
"hypervisor."))
self._bw_usage_supported = False
return
refreshed = timeutils.utcnow()
for bw_ctr in bw_counters:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
bw_in = 0
bw_out = 0
last_ctr_in = None
last_ctr_out = None
usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=start_time, use_slave=True)
if usage:
bw_in = usage.bw_in
bw_out = usage.bw_out
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
else:
usage = (objects.BandwidthUsage.
get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=prev_time, use_slave=True))
if usage:
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
if last_ctr_in is not None:
if bw_ctr['bw_in'] < last_ctr_in:
# counter rollover
bw_in += bw_ctr['bw_in']
else:
bw_in += (bw_ctr['bw_in'] - last_ctr_in)
if last_ctr_out is not None:
if bw_ctr['bw_out'] < last_ctr_out:
# counter rollover
bw_out += bw_ctr['bw_out']
else:
bw_out += (bw_ctr['bw_out'] - last_ctr_out)
objects.BandwidthUsage(context=context).create(
bw_ctr['uuid'],
bw_ctr['mac_address'],
bw_in,
bw_out,
bw_ctr['bw_in'],
bw_ctr['bw_out'],
start_period=start_time,
last_refreshed=refreshed,
update_cells=update_cells)
def _get_host_volume_bdms(self, context, use_slave=False):
"""Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = objects.InstanceList.get_by_host(context, self.host,
use_slave=use_slave)
for instance in instances:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=use_slave)
instance_bdms = [bdm for bdm in bdms if bdm.is_volume]
compute_host_bdms.append(dict(instance=instance,
instance_bdms=instance_bdms))
return compute_host_bdms
def _update_volume_usage_cache(self, context, vol_usages):
"""Updates the volume usage cache table with a list of stats."""
for usage in vol_usages:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
self.conductor_api.vol_usage_update(context, usage['volume'],
usage['rd_req'],
usage['rd_bytes'],
usage['wr_req'],
usage['wr_bytes'],
usage['instance'])
@periodic_task.periodic_task(spacing=CONF.volume_usage_poll_interval)
def _poll_volume_usage(self, context, start_time=None):
if CONF.volume_usage_poll_interval == 0:
return
if not start_time:
start_time = utils.last_completed_audit_period()[1]
compute_host_bdms = self._get_host_volume_bdms(context,
use_slave=True)
if not compute_host_bdms:
return
LOG.debug("Updating volume usage cache")
try:
vol_usages = self.driver.get_all_volume_usage(context,
compute_host_bdms)
except NotImplementedError:
return
self._update_volume_usage_cache(context, vol_usages)
@periodic_task.periodic_task(spacing=CONF.sync_power_state_interval,
run_immediately=True)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
To sync power state data we make a DB call to get the number of
virtual machines known by the hypervisor and if the number matches the
number of virtual machines known by the database, we proceed in a lazy
loop, one database record at a time, checking if the hypervisor has the
same power state as is in the database.
"""
db_instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
if num_vm_instances != num_db_instances:
LOG.warning(_LW("While synchronizing instance power states, found "
"%(num_db_instances)s instances in the database "
"and %(num_vm_instances)s instances on the "
"hypervisor."),
{'num_db_instances': num_db_instances,
'num_vm_instances': num_vm_instances})
def _sync(db_instance):
# NOTE(melwitt): This must be synchronized as we query state from
# two separate sources, the driver and the database.
# They are set (in stop_instance) and read, in sync.
@utils.synchronized(db_instance.uuid)
def query_driver_power_state_and_sync():
self._query_driver_power_state_and_sync(context, db_instance)
try:
query_driver_power_state_and_sync()
except Exception:
LOG.exception(_LE("Periodic sync_power_state task had an "
"error while processing an instance."),
instance=db_instance)
self._syncs_in_progress.pop(db_instance.uuid)
for db_instance in db_instances:
# process syncs asynchronously - don't want instance locking to
# block entire periodic task thread
uuid = db_instance.uuid
if uuid in self._syncs_in_progress:
LOG.debug('Sync already in progress for %s' % uuid)
else:
LOG.debug('Triggering sync for uuid %s' % uuid)
self._syncs_in_progress[uuid] = True
self._sync_power_pool.spawn_n(_sync, db_instance)
def _query_driver_power_state_and_sync(self, context, db_instance):
if db_instance.task_state is not None:
LOG.info(_LI("During sync_power_state the instance has a "
"pending task (%(task)s). Skip."),
{'task': db_instance.task_state}, instance=db_instance)
return
# No pending tasks. Now try to figure out the real vm_power_state.
try:
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance.state
except exception.InstanceNotFound:
vm_power_state = power_state.NOSTATE
# Note(maoy): the above get_info call might take a long time,
# for example, because of a broken libvirt driver.
try:
self._sync_instance_power_state(context,
db_instance,
vm_power_state,
use_slave=True)
except exception.InstanceNotFound:
# NOTE(hanlind): If the instance gets deleted during sync,
# silently ignore.
pass
def _sync_instance_power_state(self, context, db_instance, vm_power_state,
use_slave=False):
"""Align instance power state between the database and hypervisor.
If the instance is not found on the hypervisor, but is in the database,
then a stop() API will be called on the instance.
"""
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
db_instance.refresh(use_slave=use_slave)
db_power_state = db_instance.power_state
vm_state = db_instance.vm_state
if self.host != db_instance.host:
# on the sending end of nova-compute _sync_power_state
# may have yielded to the greenthread performing a live
# migration; this in turn has changed the resident-host
# for the VM; However, the instance is still active, it
# is just in the process of migrating to another host.
# This implies that the compute source must relinquish
# control to the compute destination.
LOG.info(_LI("During the sync_power process the "
"instance has moved from "
"host %(src)s to host %(dst)s"),
{'src': db_instance.host,
'dst': self.host},
instance=db_instance)
return
elif db_instance.task_state is not None:
# on the receiving end of nova-compute, it could happen
# that the DB instance already report the new resident
# but the actual VM has not showed up on the hypervisor
# yet. In this case, let's allow the loop to continue
# and run the state sync in a later round
LOG.info(_LI("During sync_power_state the instance has a "
"pending task (%(task)s). Skip."),
{'task': db_instance.task_state},
instance=db_instance)
return
if vm_power_state != db_power_state:
# power_state is always updated from hypervisor to db
db_instance.power_state = vm_power_state
db_instance.save()
db_power_state = vm_power_state
# Note(maoy): Now resolve the discrepancy between vm_state and
# vm_power_state. We go through all possible vm_states.
if vm_state in (vm_states.BUILDING,
vm_states.RESCUED,
vm_states.RESIZED,
vm_states.SUSPENDED,
vm_states.ERROR):
# TODO(maoy): we ignore these vm_state for now.
pass
elif vm_state == vm_states.ACTIVE:
# The only rational power state should be RUNNING
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Instance shutdown by itself. Calling the "
"stop API. Current vm_state: %(vm_state)s, "
"current task_state: %(task_state)s, "
"current DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s"),
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# Note(maoy): here we call the API instead of
# brutally updating the vm_state in the database
# to allow all the hooks and checks to be performed.
if db_instance.shutdown_terminate:
self.compute_api.delete(context, db_instance)
else:
self.compute_api.stop(context, db_instance)
except Exception:
# Note(maoy): there is no need to propagate the error
# because the same power_state will be retrieved next
# time and retried.
# For example, there might be another task scheduled.
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.SUSPENDED:
LOG.warning(_LW("Instance is suspended unexpectedly. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.PAUSED:
# Note(maoy): a VM may get into the paused state not only
# because the user request via API calls, but also
# due to (temporary) external instrumentations.
# Before the virt layer can reliably report the reason,
# we simply ignore the state discrepancy. In many cases,
# the VM state will go back to running after the external
# instrumentation is done. See bug 1097806 for details.
LOG.warning(_LW("Instance is paused unexpectedly. Ignore."),
instance=db_instance)
elif vm_power_state == power_state.NOSTATE:
# Occasionally, depending on the status of the hypervisor,
# which could be restarting for example, an instance may
# not be found. Therefore just log the condition.
LOG.warning(_LW("Instance is unexpectedly not found. Ignore."),
instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Instance is not stopped. Calling "
"the stop API. Current vm_state: %(vm_state)s,"
" current task_state: %(task_state)s, "
"current DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s"),
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# NOTE(russellb) Force the stop, because normally the
# compute API would not allow an attempt to stop a stopped
# instance.
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state == vm_states.PAUSED:
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Paused instance shutdown by itself. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN):
# Note(maoy): this should be taken care of periodically in
# _cleanup_running_deleted_instances().
LOG.warning(_LW("Instance is not (soft-)deleted."),
instance=db_instance)
@periodic_task.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
interval = CONF.reclaim_instance_interval
if interval <= 0:
LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...")
return
# TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414.
# The only case that the quota might be inconsistent is
# the compute node died between set instance state to SOFT_DELETED
# and quota commit to DB. When compute node starts again
# it will have no idea the reservation is committed or not or even
# expired, since it's a rare case, so marked as todo.
quotas = objects.Quotas.from_reservations(context, None)
filters = {'vm_state': vm_states.SOFT_DELETED,
'task_state': None,
'host': self.host}
instances = objects.InstanceList.get_by_filters(
context, filters,
expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS,
use_slave=True)
for instance in instances:
if self._deleted_old_enough(instance, interval):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
LOG.info(_LI('Reclaiming deleted instance'), instance=instance)
try:
self._delete_instance(context, instance, bdms, quotas)
except Exception as e:
LOG.warning(_LW("Periodic reclaim failed to delete "
"instance: %s"),
e, instance=instance)
@periodic_task.periodic_task
def update_available_resource(self, context):
"""See driver.get_available_resource()
Periodic process that keeps that the compute host's understanding of
resource availability and usage in sync with the underlying hypervisor.
:param context: security context
"""
new_resource_tracker_dict = {}
nodenames = set(self.driver.get_available_nodes())
for nodename in nodenames:
rt = self._get_resource_tracker(nodename)
rt.update_available_resource(context)
new_resource_tracker_dict[nodename] = rt
# Delete orphan compute node not reported by driver but still in db
compute_nodes_in_db = self._get_compute_nodes_in_db(context,
use_slave=True)
for cn in compute_nodes_in_db:
if cn.hypervisor_hostname not in nodenames:
LOG.info(_LI("Deleting orphan compute node %s") % cn.id)
cn.destroy()
self._resource_tracker_dict = new_resource_tracker_dict
def _get_compute_nodes_in_db(self, context, use_slave=False):
try:
return objects.ComputeNodeList.get_all_by_host(context, self.host,
use_slave=use_slave)
except exception.NotFound:
LOG.error(_LE("No compute node record for host %s"), self.host)
return []
@periodic_task.periodic_task(
spacing=CONF.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
Valid actions to take are:
1. noop - do nothing
2. log - log which instances are erroneously running
3. reap - shutdown and cleanup any erroneously running instances
4. shutdown - power off *and disable* any erroneously running
instances
The use-case for this cleanup task is: for various reasons, it may be
possible for the database to show an instance as deleted but for that
instance to still be running on a host machine (see bug
https://bugs.launchpad.net/nova/+bug/911366).
This cleanup task is a cross-hypervisor utility for finding these
zombied instances and either logging the discrepancy (likely what you
should do in production), or automatically reaping the instances (more
appropriate for dev environments).
"""
action = CONF.running_deleted_instance_action
if action == "noop":
return
# NOTE(sirp): admin contexts don't ordinarily return deleted records
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
if action == "log":
LOG.warning(_LW("Detected instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
elif action == 'shutdown':
LOG.info(_LI("Powering off instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
try:
try:
# disable starting the instance
self.driver.set_bootable(instance, False)
except NotImplementedError:
LOG.warning(_LW("set_bootable is not implemented "
"for the current driver"))
# and power it off
self.driver.power_off(instance)
except Exception:
msg = _("Failed to power off instance")
LOG.warn(msg, instance=instance, exc_info=True)
elif action == 'reap':
LOG.info(_LI("Destroying instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=True)
self.instance_events.clear_events_for_instance(instance)
try:
self._shutdown_instance(context, instance, bdms,
notify=False)
self._cleanup_volumes(context, instance.uuid, bdms)
except Exception as e:
LOG.warning(_LW("Periodic cleanup failed to delete "
"instance: %s"),
e, instance=instance)
else:
raise Exception(_("Unrecognized value '%s'"
" for CONF.running_deleted_"
"instance_action") % action)
def _running_deleted_instances(self, context):
"""Returns a list of instances nova thinks is deleted,
but the hypervisor thinks is still running.
"""
timeout = CONF.running_deleted_instance_timeout
filters = {'deleted': True,
'soft_deleted': False,
'host': self.host}
instances = self._get_instances_on_driver(context, filters)
return [i for i in instances if self._deleted_old_enough(i, timeout)]
def _deleted_old_enough(self, instance, timeout):
deleted_at = instance['deleted_at']
if isinstance(instance, obj_base.NovaObject) and deleted_at:
deleted_at = deleted_at.replace(tzinfo=None)
return (not deleted_at or timeutils.is_older_than(deleted_at, timeout))
@contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance,
quotas=None,
instance_state=vm_states.ACTIVE):
instance_uuid = instance.uuid
try:
yield
except NotImplementedError as error:
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
LOG.info(_LI("Setting instance back to %(state)s after: "
"%(error)s"),
{'state': instance_state, 'error': error},
instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=instance_state,
task_state=None)
except exception.InstanceFaultRollback as error:
if quotas:
quotas.rollback()
LOG.info(_LI("Setting instance back to ACTIVE after: %s"),
error, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=vm_states.ACTIVE,
task_state=None)
raise error.inner_exception
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance_uuid=instance_uuid)
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
self._set_instance_error_state(context, instance)
@aggregate_object_compat
@wrap_exception()
def add_aggregate_host(self, context, aggregate, host, slave_info):
"""Notify hypervisor of change (for hypervisor pools)."""
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'add_aggregate_host')
except exception.AggregateError:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.delete_host,
aggregate, host)
@aggregate_object_compat
@wrap_exception()
def remove_aggregate_host(self, context, host, slave_info, aggregate):
"""Removes a host from a physical hypervisor pool."""
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'remove_aggregate_host')
except (exception.AggregateError,
exception.InvalidAggregateAction) as e:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.add_host,
aggregate, host,
isinstance(e, exception.AggregateError))
def _process_instance_event(self, instance, event):
_event = self.instance_events.pop_instance_event(instance, event)
if _event:
LOG.debug('Processing event %(event)s',
{'event': event.key}, instance=instance)
_event.send(event)
@wrap_exception()
def external_instance_event(self, context, instances, events):
# NOTE(danms): Some event types are handled by the manager, such
# as when we're asked to update the instance's info_cache. If it's
# not one of those, look for some thread(s) waiting for the event and
# unblock them if so.
for event in events:
instance = [inst for inst in instances
if inst.uuid == event.instance_uuid][0]
LOG.debug('Received event %(event)s',
{'event': event.key},
instance=instance)
if event.name == 'network-changed':
self.network_api.get_instance_nw_info(context, instance)
else:
self._process_instance_event(instance, event)
@periodic_task.periodic_task(spacing=CONF.image_cache_manager_interval,
external_process_ok=True)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
if not self.driver.capabilities["has_imagecache"]:
return
# Determine what other nodes use this storage
storage_users.register_storage_use(CONF.instances_path, CONF.host)
nodes = storage_users.get_storage_users(CONF.instances_path)
# Filter all_instances to only include those nodes which share this
# storage path.
# TODO(mikal): this should be further refactored so that the cache
# cleanup code doesn't know what those instances are, just a remote
# count, and then this logic should be pushed up the stack.
filters = {'deleted': False,
'soft_deleted': True,
'host': nodes}
filtered_instances = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
self.driver.manage_image_cache(context, filtered_instances)
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _run_pending_deletes(self, context):
"""Retry any pending instance file deletes."""
LOG.debug('Cleaning up deleted instances')
filters = {'deleted': True,
'soft_deleted': False,
'host': CONF.host,
'cleaned': False}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=attrs, use_slave=True)
LOG.debug('There are %d instances to clean', len(instances))
for instance in instances:
attempts = int(instance.system_metadata.get('clean_attempts', '0'))
LOG.debug('Instance has had %(attempts)s of %(max)s '
'cleanup attempts',
{'attempts': attempts,
'max': CONF.maximum_instance_delete_attempts},
instance=instance)
if attempts < CONF.maximum_instance_delete_attempts:
success = self.driver.delete_instance_files(instance)
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
with utils.temporary_mutation(context, read_deleted='yes'):
instance.save()
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def quiesce_instance(self, context, instance):
"""Quiesce an instance on this host."""
context = context.elevated()
image_ref = instance.image_ref
image_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
self.driver.quiesce(context, instance, image_meta)
def _wait_for_snapshots_completion(self, context, mapping):
for mapping_dict in mapping:
if mapping_dict.get('source_type') == 'snapshot':
def _wait_snapshot():
snapshot = self.volume_api.get_snapshot(
context, mapping_dict['snapshot_id'])
if snapshot.get('status') != 'creating':
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_snapshot)
timer.start(interval=0.5).wait()
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def unquiesce_instance(self, context, instance, mapping=None):
"""Unquiesce an instance on this host.
If snapshots' image mapping is provided, it waits until snapshots are
completed before unqueiscing.
"""
context = context.elevated()
if mapping:
try:
self._wait_for_snapshots_completion(context, mapping)
except Exception as error:
LOG.exception(_LE("Exception while waiting completion of "
"volume snapshots: %s"),
error, instance=instance)
image_ref = instance.image_ref
image_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
self.driver.unquiesce(context, instance, image_meta)
| {
"content_hash": "1e50d3f2f262c906676d42f14b722658",
"timestamp": "",
"source": "github",
"line_count": 6361,
"max_line_length": 79,
"avg_line_length": 45.71765445684641,
"alnum_prop": 0.5579209793335855,
"repo_name": "affo/nova",
"id": "b0975edae4a6d7680390c85645ffcbafacddeaee",
"size": "291580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/compute/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "15659662"
},
{
"name": "Shell",
"bytes": "20716"
}
],
"symlink_target": ""
} |
"""Pyhole Logging"""
import bz2
import glob
import logging
import logging.handlers
import os
import requests
import shutil
import utils
LOG_DIR = utils.get_directory("logs")
LOG_ARCHIVE_DIR = utils.get_directory(os.path.join("logs", "archive"))
LOG_FORMAT = "%(asctime)s [%(name)s] %(message)s"
LOG_DATEFMT = "%H:%M:%S"
class PyholeFileHandler(logging.handlers.TimedRotatingFileHandler):
def doRollover(self):
result = super(PyholeFileHandler, self).doRollover()
self.archive_old_logs()
return result
def archive_old_logs(self):
matcher = "*.log.*[!b][!z][!2]"
files = glob.glob(os.path.join(LOG_DIR, matcher))
for file_path in files:
filename = os.path.basename(file_path)
compressed_filename = filename + ".bz2"
network_name = filename[:filename.rfind(".log")]
archive_dir = os.path.join(LOG_ARCHIVE_DIR, network_name)
utils.make_directory(archive_dir)
compressed_file_path = os.path.join(archive_dir,
compressed_filename)
with open(file_path, "rb") as fp:
with bz2.BZ2File(compressed_file_path, "wb",
compresslevel=9) as output:
shutil.copyfileobj(fp, output)
os.remove(file_path)
def setup_logger(name):
"""Setup the logger."""
# NOTE(jk0): Disable unnecessary requests logging.
requests.packages.urllib3.disable_warnings()
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
debug = utils.debug_enabled()
log_level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(level=log_level,
format=LOG_FORMAT,
datefmt=LOG_DATEFMT)
log_file = os.path.join(LOG_DIR, name.lower() + ".log")
log = PyholeFileHandler(log_file, "midnight")
log.setLevel(log_level)
formatter = logging.Formatter(LOG_FORMAT, LOG_DATEFMT)
log.setFormatter(formatter)
logging.getLogger(name).addHandler(log)
log.archive_old_logs()
def get_logger(name="Pyhole"):
return logging.getLogger(name)
| {
"content_hash": "2ed22221e5852454b3d4853bc4a0e319",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 70,
"avg_line_length": 30.625,
"alnum_prop": 0.6217687074829932,
"repo_name": "jk0/pyhole",
"id": "f271eb39e97680acb90fbc3675580a5f79e8d9b4",
"size": "2807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyhole/core/logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "410"
},
{
"name": "HTML",
"bytes": "4045"
},
{
"name": "Python",
"bytes": "113192"
},
{
"name": "Shell",
"bytes": "2134"
}
],
"symlink_target": ""
} |
import os
import ssl
import time
import datetime
import ipaddress
import sys
import typing
from pyasn1.type import univ, constraint, char, namedtype, tag
from pyasn1.codec.der.decoder import decode
from pyasn1.error import PyAsn1Error
import OpenSSL
from mitmproxy.coretypes import serializable
# Default expiry must not be too long: https://github.com/mitmproxy/mitmproxy/issues/815
DEFAULT_EXP = 94608000 # = 24 * 60 * 60 * 365 * 3
# Generated with "openssl dhparam". It's too slow to generate this on startup.
DEFAULT_DHPARAM = b"""
-----BEGIN DH PARAMETERS-----
MIICCAKCAgEAyT6LzpwVFS3gryIo29J5icvgxCnCebcdSe/NHMkD8dKJf8suFCg3
O2+dguLakSVif/t6dhImxInJk230HmfC8q93hdcg/j8rLGJYDKu3ik6H//BAHKIv
j5O9yjU3rXCfmVJQic2Nne39sg3CreAepEts2TvYHhVv3TEAzEqCtOuTjgDv0ntJ
Gwpj+BJBRQGG9NvprX1YGJ7WOFBP/hWU7d6tgvE6Xa7T/u9QIKpYHMIkcN/l3ZFB
chZEqVlyrcngtSXCROTPcDOQ6Q8QzhaBJS+Z6rcsd7X+haiQqvoFcmaJ08Ks6LQC
ZIL2EtYJw8V8z7C0igVEBIADZBI6OTbuuhDwRw//zU1uq52Oc48CIZlGxTYG/Evq
o9EWAXUYVzWkDSTeBH1r4z/qLPE2cnhtMxbFxuvK53jGB0emy2y1Ei6IhKshJ5qX
IB/aE7SSHyQ3MDHHkCmQJCsOd4Mo26YX61NZ+n501XjqpCBQ2+DfZCBh8Va2wDyv
A2Ryg9SUz8j0AXViRNMJgJrr446yro/FuJZwnQcO3WQnXeqSBnURqKjmqkeFP+d8
6mk2tqJaY507lRNqtGlLnj7f5RNoBFJDCLBNurVgfvq9TCVWKDIFD4vZRjCrnl6I
rD693XKIHUCWOjMh1if6omGXKHH40QuME2gNa50+YPn1iYDl88uDbbMCAQI=
-----END DH PARAMETERS-----
"""
def create_ca(o, cn, exp):
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
cert = OpenSSL.crypto.X509()
cert.set_serial_number(int(time.time() * 10000))
cert.set_version(2)
cert.get_subject().CN = cn
cert.get_subject().O = o
cert.gmtime_adj_notBefore(-3600 * 48)
cert.gmtime_adj_notAfter(exp)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(key)
cert.add_extensions([
OpenSSL.crypto.X509Extension(
b"basicConstraints",
True,
b"CA:TRUE"
),
OpenSSL.crypto.X509Extension(
b"nsCertType",
False,
b"sslCA"
),
OpenSSL.crypto.X509Extension(
b"extendedKeyUsage",
False,
b"serverAuth,clientAuth,emailProtection,timeStamping,msCodeInd,msCodeCom,msCTLSign,msSGC,msEFS,nsSGC"
),
OpenSSL.crypto.X509Extension(
b"keyUsage",
True,
b"keyCertSign, cRLSign"
),
OpenSSL.crypto.X509Extension(
b"subjectKeyIdentifier",
False,
b"hash",
subject=cert
),
])
cert.sign(key, "sha256")
return key, cert
def dummy_cert(privkey, cacert, commonname, sans):
"""
Generates a dummy certificate.
privkey: CA private key
cacert: CA certificate
commonname: Common name for the generated certificate.
sans: A list of Subject Alternate Names.
Returns cert if operation succeeded, None if not.
"""
ss = []
for i in sans:
try:
ipaddress.ip_address(i.decode("ascii"))
except ValueError:
ss.append(b"DNS:%s" % i)
else:
ss.append(b"IP:%s" % i)
ss = b", ".join(ss)
cert = OpenSSL.crypto.X509()
cert.gmtime_adj_notBefore(-3600 * 48)
cert.gmtime_adj_notAfter(DEFAULT_EXP)
cert.set_issuer(cacert.get_subject())
if commonname is not None and len(commonname) < 64:
cert.get_subject().CN = commonname
cert.set_serial_number(int(time.time() * 10000))
if ss:
cert.set_version(2)
cert.add_extensions(
[OpenSSL.crypto.X509Extension(b"subjectAltName", False, ss)])
cert.set_pubkey(cacert.get_pubkey())
cert.sign(privkey, "sha256")
return Cert(cert)
class CertStoreEntry:
def __init__(self, cert, privatekey, chain_file):
self.cert = cert
self.privatekey = privatekey
self.chain_file = chain_file
TCustomCertId = bytes # manually provided certs (e.g. mitmproxy's --certs)
TGeneratedCertId = typing.Tuple[typing.Optional[bytes], typing.Tuple[bytes, ...]] # (common_name, sans)
TCertId = typing.Union[TCustomCertId, TGeneratedCertId]
class CertStore:
"""
Implements an in-memory certificate store.
"""
STORE_CAP = 100
def __init__(
self,
default_privatekey,
default_ca,
default_chain_file,
dhparams):
self.default_privatekey = default_privatekey
self.default_ca = default_ca
self.default_chain_file = default_chain_file
self.dhparams = dhparams
self.certs = {} # type: typing.Dict[TCertId, CertStoreEntry]
self.expire_queue = []
def expire(self, entry):
self.expire_queue.append(entry)
if len(self.expire_queue) > self.STORE_CAP:
d = self.expire_queue.pop(0)
for k, v in list(self.certs.items()):
if v == d:
del self.certs[k]
@staticmethod
def load_dhparam(path):
# mitmproxy<=0.10 doesn't generate a dhparam file.
# Create it now if neccessary.
if not os.path.exists(path):
with open(path, "wb") as f:
f.write(DEFAULT_DHPARAM)
bio = OpenSSL.SSL._lib.BIO_new_file(path.encode(sys.getfilesystemencoding()), b"r")
if bio != OpenSSL.SSL._ffi.NULL:
bio = OpenSSL.SSL._ffi.gc(bio, OpenSSL.SSL._lib.BIO_free)
dh = OpenSSL.SSL._lib.PEM_read_bio_DHparams(
bio,
OpenSSL.SSL._ffi.NULL,
OpenSSL.SSL._ffi.NULL,
OpenSSL.SSL._ffi.NULL)
dh = OpenSSL.SSL._ffi.gc(dh, OpenSSL.SSL._lib.DH_free)
return dh
@classmethod
def from_store(cls, path, basename):
ca_path = os.path.join(path, basename + "-ca.pem")
if not os.path.exists(ca_path):
key, ca = cls.create_store(path, basename)
else:
with open(ca_path, "rb") as f:
raw = f.read()
ca = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
raw)
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
raw)
dh_path = os.path.join(path, basename + "-dhparam.pem")
dh = cls.load_dhparam(dh_path)
return cls(key, ca, ca_path, dh)
@staticmethod
def create_store(path, basename, o=None, cn=None, expiry=DEFAULT_EXP):
if not os.path.exists(path):
os.makedirs(path)
o = o or basename
cn = cn or basename
key, ca = create_ca(o=o, cn=cn, exp=expiry)
# Dump the CA plus private key
with open(os.path.join(path, basename + "-ca.pem"), "wb") as f:
f.write(
OpenSSL.crypto.dump_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
key))
f.write(
OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM,
ca))
# Dump the certificate in PEM format
with open(os.path.join(path, basename + "-ca-cert.pem"), "wb") as f:
f.write(
OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM,
ca))
# Create a .cer file with the same contents for Android
with open(os.path.join(path, basename + "-ca-cert.cer"), "wb") as f:
f.write(
OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM,
ca))
# Dump the certificate in PKCS12 format for Windows devices
with open(os.path.join(path, basename + "-ca-cert.p12"), "wb") as f:
p12 = OpenSSL.crypto.PKCS12()
p12.set_certificate(ca)
f.write(p12.export())
# Dump the certificate and key in a PKCS12 format for Windows devices
with open(os.path.join(path, basename + "-ca.p12"), "wb") as f:
p12 = OpenSSL.crypto.PKCS12()
p12.set_certificate(ca)
p12.set_privatekey(key)
f.write(p12.export())
with open(os.path.join(path, basename + "-dhparam.pem"), "wb") as f:
f.write(DEFAULT_DHPARAM)
return key, ca
def add_cert_file(self, spec: str, path: str) -> None:
with open(path, "rb") as f:
raw = f.read()
cert = Cert(
OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
raw))
try:
privatekey = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
raw)
except Exception:
privatekey = self.default_privatekey
self.add_cert(
CertStoreEntry(cert, privatekey, path),
spec.encode("idna")
)
def add_cert(self, entry: CertStoreEntry, *names: bytes):
"""
Adds a cert to the certstore. We register the CN in the cert plus
any SANs, and also the list of names provided as an argument.
"""
if entry.cert.cn:
self.certs[entry.cert.cn] = entry
for i in entry.cert.altnames:
self.certs[i] = entry
for i in names:
self.certs[i] = entry
@staticmethod
def asterisk_forms(dn: bytes) -> typing.List[bytes]:
"""
Return all asterisk forms for a domain. For example, for www.example.com this will return
[b"www.example.com", b"*.example.com", b"*.com"]. The single wildcard "*" is omitted.
"""
parts = dn.split(b".")
ret = [dn]
for i in range(1, len(parts)):
ret.append(b"*." + b".".join(parts[i:]))
return ret
def get_cert(self, commonname: typing.Optional[bytes], sans: typing.List[bytes]):
"""
Returns an (cert, privkey, cert_chain) tuple.
commonname: Common name for the generated certificate. Must be a
valid, plain-ASCII, IDNA-encoded domain name.
sans: A list of Subject Alternate Names.
"""
potential_keys = [] # type: typing.List[TCertId]
if commonname:
potential_keys.extend(self.asterisk_forms(commonname))
for s in sans:
potential_keys.extend(self.asterisk_forms(s))
potential_keys.append(b"*")
potential_keys.append((commonname, tuple(sans)))
name = next(
filter(lambda key: key in self.certs, potential_keys),
None
)
if name:
entry = self.certs[name]
else:
entry = CertStoreEntry(
cert=dummy_cert(
self.default_privatekey,
self.default_ca,
commonname,
sans),
privatekey=self.default_privatekey,
chain_file=self.default_chain_file)
self.certs[(commonname, tuple(sans))] = entry
self.expire(entry)
return entry.cert, entry.privatekey, entry.chain_file
class _GeneralName(univ.Choice):
# We only care about dNSName and iPAddress
componentType = namedtype.NamedTypes(
namedtype.NamedType('dNSName', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)
)),
namedtype.NamedType('iPAddress', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7)
)),
)
class _GeneralNames(univ.SequenceOf):
componentType = _GeneralName()
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
class Cert(serializable.Serializable):
def __init__(self, cert):
"""
Returns a (common name, [subject alternative names]) tuple.
"""
self.x509 = cert
def __eq__(self, other):
return self.digest("sha256") == other.digest("sha256")
def get_state(self):
return self.to_pem()
def set_state(self, state):
self.x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, state)
@classmethod
def from_state(cls, state):
return cls.from_pem(state)
@classmethod
def from_pem(cls, txt):
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, txt)
return cls(x509)
@classmethod
def from_der(cls, der):
pem = ssl.DER_cert_to_PEM_cert(der)
return cls.from_pem(pem)
def to_pem(self):
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM,
self.x509)
def digest(self, name):
return self.x509.digest(name)
@property
def issuer(self):
return self.x509.get_issuer().get_components()
@property
def notbefore(self):
t = self.x509.get_notBefore()
return datetime.datetime.strptime(t.decode("ascii"), "%Y%m%d%H%M%SZ")
@property
def notafter(self):
t = self.x509.get_notAfter()
return datetime.datetime.strptime(t.decode("ascii"), "%Y%m%d%H%M%SZ")
@property
def has_expired(self):
return self.x509.has_expired()
@property
def subject(self):
return self.x509.get_subject().get_components()
@property
def serial(self):
return self.x509.get_serial_number()
@property
def keyinfo(self):
pk = self.x509.get_pubkey()
types = {
OpenSSL.crypto.TYPE_RSA: "RSA",
OpenSSL.crypto.TYPE_DSA: "DSA",
}
return (
types.get(pk.type(), "UNKNOWN"),
pk.bits()
)
@property
def cn(self):
c = None
for i in self.subject:
if i[0] == b"CN":
c = i[1]
return c
@property
def altnames(self):
"""
Returns:
All DNS altnames.
"""
# tcp.TCPClient.convert_to_tls assumes that this property only contains DNS altnames for hostname verification.
altnames = []
for i in range(self.x509.get_extension_count()):
ext = self.x509.get_extension(i)
if ext.get_short_name() == b"subjectAltName":
try:
dec = decode(ext.get_data(), asn1Spec=_GeneralNames())
except PyAsn1Error:
continue
for i in dec[0]:
if i[0].hasValue():
e = i[0].asOctets()
altnames.append(e)
return altnames
| {
"content_hash": "67f4443fcdbf055ea59f208e4bc117ed",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 119,
"avg_line_length": 32.12141280353201,
"alnum_prop": 0.5828465397567177,
"repo_name": "MatthewShao/mitmproxy",
"id": "4e10529abb01b33a4538725725372d28f14e6074",
"size": "14551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mitmproxy/certs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20941"
},
{
"name": "HTML",
"bytes": "14747"
},
{
"name": "JavaScript",
"bytes": "276302"
},
{
"name": "PowerShell",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1726585"
},
{
"name": "Shell",
"bytes": "4644"
}
],
"symlink_target": ""
} |
"""
BART: Denoising Sequence-to-Sequence Pre-training for
Natural Language Generation, Translation, and Comprehension
See https://arxiv.org/abs/1910.13461.
The BART agent can be instantiated as simply `-m bart`,
however it is recommended to specify `--init-model zoo:bart/bart_large/model`
or `-mf zoo:bart/bart_large/model` to ensure correct dictionaries are saved.
"""
import os
import torch
from typing import Optional, Dict, Any
from parlai.agents.bart.convert_fairseq_to_parlai import ConversionScript
from parlai.agents.bart.modules import BartModel
from parlai.agents.transformer.transformer import TransformerGeneratorAgent
from parlai.core.agents import compare_init_model_opts
from parlai.core.message import Message
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.torch_agent import History
from parlai.utils.typing import TShared
from parlai.utils.io import PathManager
from parlai.zoo.bart.build import download, CONVERSION_ARGS, BART_ARGS
class BartAgent(TransformerGeneratorAgent):
"""
BART Agent.
Relies on the BART model implemented in fairseq.
If you have a fine-tuned BART model from fairseq, you can specify the
`--init-fairseq-model` arg, which will convert your fine-tuned model
to a ParlAI model.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Override to add init-fairseq-model arg.
"""
super().add_cmdline_args(parser, partial_opt=partial_opt)
group = parser.add_argument_group('Bart Args')
group.add_argument(
'--init-fairseq-model',
type=str,
default=None,
help='fairseq checkpoint for bart',
)
group.add_argument(
'--output-conversion-path',
type=str,
default=None,
help='where to save fairseq conversion',
)
parser.set_defaults(dict_tokenizer='gpt2')
parser.set_defaults(**BART_ARGS)
return parser
def __init__(self, opt: Opt, shared: TShared = None):
if not shared:
opt = self._initialize_bart(opt)
super().__init__(opt, shared)
def _initialize_bart(self, opt: Opt) -> Opt:
"""
Download and convert BART pre-trained models.
Additionally, convert `init-fairseq-model` if necessary.
:param opt:
ParlAI-parsed options
:return opt:
return opt with BART-specific args.
"""
init_model, _ = self._get_init_model(opt, None)
if not opt.get('converting') and (
init_model is None or not PathManager.exists(init_model)
):
download(opt['datapath'])
opt['init_model'] = os.path.join(
opt['datapath'], 'models/bart/bart_large/model'
)
if opt.get('init_fairseq_model'):
opt = self._convert_model(opt)
compare_init_model_opts(opt, opt)
return opt
def _get_conversion_args(self, opt: Opt) -> Dict[str, Any]:
"""
Get args for fairseq model conversion.
:param opt:
ParlAI Opt
:return args:
returns dictionary of args to send to conversion script.
"""
model_name = os.path.split(opt['init_fairseq_model'])[-1]
args = CONVERSION_ARGS.copy()
args['input'] = [opt['init_fairseq_model']]
if opt.get('model_file') and not os.path.exists(opt['model_file']):
args['output'] = opt['model_file']
elif opt.get('output_conversion_path'):
args['output'] = opt['output_conversion_path']
else:
args['output'] = os.path.join(
opt['datapath'], 'models/converted_fairseq_models/', model_name
)
return args
def _convert_model(self, opt: Opt) -> Opt:
"""
Convert fairseq init model to ParlAI Model.
:param opt:
options
:return opt:
return opt with new init_model path
"""
args = self._get_conversion_args(opt)
ConversionScript.main(**args)
opt['init_model'] = args['output']
return opt
def build_model(self) -> BartModel:
"""
Build and return model.
"""
model = BartModel(self.opt, self.dict)
if self.opt['embedding_type'] != 'random':
self._copy_embeddings(
model.encoder.embeddings.weight, self.opt['embedding_type']
)
return model
def _set_text_vec(
self, obs: Message, history: History, truncate: Optional[int]
) -> Message:
"""
Override to prepend start token and append end token.
"""
obs = super()._set_text_vec(obs, history, truncate)
if 'text' not in obs or 'text_vec' not in obs:
return obs
vec = obs['text_vec']
# add start/end tokens
if 'added_start_end_tokens' not in obs:
if truncate is not None:
vec = torch.LongTensor( # type: ignore
self._check_truncate(obs['text_vec'], truncate - 2, True)
)
obs.force_set(
'text_vec',
self._add_start_end_tokens(vec, add_start=True, add_end=True),
)
obs['added_start_end_tokens'] = True
return obs
def _get_initial_decoder_input(
self, bsz: int, beam_size: int, dev: torch.device
) -> torch.LongTensor:
"""
Override to seed decoder with EOS BOS token.
"""
return (
torch.LongTensor([self.END_IDX, self.START_IDX]) # type: ignore
.expand(bsz * beam_size, 2)
.to(dev)
)
| {
"content_hash": "2ac324af309aba70066cfd9add6f1400",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 79,
"avg_line_length": 32.248618784530386,
"alnum_prop": 0.5886585574781565,
"repo_name": "facebookresearch/ParlAI",
"id": "27cd5ee39f714a4de1a9125d87b33e87767a93c1",
"size": "6036",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parlai/agents/bart/bart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
from .serializers import ActivitySerializer
from ..models import Activity
from helpers.api.viewsets import AuthenticatedModelViewSet
from rest_framework import viewsets
class ActivityViewSet(AuthenticatedModelViewSet):
serializer_class = ActivitySerializer
queryset = Activity.objects.all()
| {
"content_hash": "b2d43bd73e6cbef7314b2ff248aa0c22",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 58,
"avg_line_length": 30.2,
"alnum_prop": 0.8311258278145696,
"repo_name": "toss-app/toss-backend",
"id": "250a37415b0b1fa21be704227be043132ec24f70",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toss/activities/api/viewsets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "723"
},
{
"name": "Python",
"bytes": "23372"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
from hashlib import sha1
from os import link, makedirs, path, remove
import shutil
from subprocess import check_call, CalledProcessError
from sys import stderr
from util import hash_file, resolve_url
from zipfile import ZipFile, BadZipfile, LargeZipFile
CURRENT_LOCATION = path.dirname(path.realpath(__file__))
GERRIT_HOME = path.expanduser('~/.gerritcodereview')
CACHE_DIR = path.join(GERRIT_HOME, 'bazel-cache', 'downloaded-artifacts')
LOCAL_PROPERTIES = 'local.properties'
def safe_mkdirs(d):
if path.isdir(d):
return
try:
makedirs(d)
except OSError as err:
if not path.isdir(d):
raise err
def download_properties(root_dir):
""" Get the download properties.
First tries to find the properties file in the given root directory,
and if not found there,
Second tries in the Gerrit settings folder in the user's home directory,
and if not found there,
Finally tries in the current execution path for gerrit build.
Returns a set of download properties, which may be empty.
"""
p = {}
local_prop = path.join(root_dir, LOCAL_PROPERTIES)
if not path.isfile(local_prop):
local_prop = path.join(GERRIT_HOME, LOCAL_PROPERTIES)
if not path.isfile(local_prop):
gerrit_workspace = path.join(CURRENT_LOCATION, "../", 'WORKSPACE')
if path.isfile(gerrit_workspace):
local_prop = path.join(CURRENT_LOCATION, "../", LOCAL_PROPERTIES)
if path.isfile(local_prop):
try:
with open(local_prop) as fd:
for line in fd:
if line.startswith('download.'):
d = [e.strip() for e in line.split('=', 1)]
name, url = d[0], d[1]
p[name[len('download.'):]] = url
except OSError:
pass
return p
def cache_entry(args):
if args.v:
h = args.v
else:
h = sha1(args.u.encode('utf-8')).hexdigest()
name = '%s-%s' % (path.basename(args.o), h)
return path.join(CACHE_DIR, name)
parser = argparse.ArgumentParser()
parser.add_argument('-o', help='local output file')
parser.add_argument('-u', help='URL to download')
parser.add_argument('-v', help='expected content SHA-1')
parser.add_argument('-x', action='append', help='file to delete from ZIP')
parser.add_argument('--exclude_java_sources', action='store_true')
parser.add_argument('--unsign', action='store_true')
args = parser.parse_args()
root_dir = args.o
while root_dir and path.dirname(root_dir) != root_dir:
root_dir, n = path.split(root_dir)
if n == 'WORKSPACE':
break
redirects = download_properties(root_dir)
cache_ent = cache_entry(args)
src_url = resolve_url(args.u, redirects)
if not path.exists(cache_ent):
try:
safe_mkdirs(path.dirname(cache_ent))
except OSError as err:
print('error creating directory %s: %s' %
(path.dirname(cache_ent), err), file=stderr)
exit(1)
print('Download %s' % src_url, file=stderr)
try:
check_call(['curl', '--proxy-anyauth', '-ksSfLo', cache_ent, src_url])
except OSError as err:
print('could not invoke curl: %s\nis curl installed?' % err,
file=stderr)
exit(1)
except CalledProcessError as err:
print('error using curl: %s' % err, file=stderr)
exit(1)
if args.v:
have = hash_file(sha1(), cache_ent).hexdigest()
if args.v != have:
print((
'%s:\n' +
'expected %s\n' +
'received %s\n') % (src_url, args.v, have), file=stderr)
try:
remove(cache_ent)
except OSError as err:
if path.exists(cache_ent):
print('error removing %s: %s' % (cache_ent, err), file=stderr)
exit(1)
exclude = []
if args.x:
exclude += args.x
if args.exclude_java_sources:
try:
with ZipFile(cache_ent, 'r') as zf:
for n in zf.namelist():
if n.endswith('.java'):
exclude.append(n)
except (BadZipfile, LargeZipFile) as err:
print('error opening %s: %s' % (cache_ent, err), file=stderr)
exit(1)
if args.unsign:
try:
with ZipFile(cache_ent, 'r') as zf:
for n in zf.namelist():
if (n.endswith('.RSA')
or n.endswith('.SF')
or n.endswith('.LIST')):
exclude.append(n)
except (BadZipfile, LargeZipFile) as err:
print('error opening %s: %s' % (cache_ent, err), file=stderr)
exit(1)
safe_mkdirs(path.dirname(args.o))
if exclude:
try:
shutil.copyfile(cache_ent, args.o)
except (shutil.Error, IOError) as err:
print('error copying to %s: %s' % (args.o, err), file=stderr)
exit(1)
try:
check_call(['zip', '-d', args.o] + exclude)
except CalledProcessError as err:
print('error removing files from zip: %s' % err, file=stderr)
exit(1)
else:
try:
link(cache_ent, args.o)
except OSError as err:
try:
shutil.copyfile(cache_ent, args.o)
except (shutil.Error, IOError) as err:
print('error copying to %s: %s' % (args.o, err), file=stderr)
exit(1)
| {
"content_hash": "e73b261cd6eff543318edf8da8e5e569",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 78,
"avg_line_length": 31.892215568862277,
"alnum_prop": 0.5912504693954187,
"repo_name": "WANdisco/gerrit",
"id": "bd498f9b16941b1489f2c2892a8595282743a2a8",
"size": "5944",
"binary": false,
"copies": "1",
"ref": "refs/heads/2.16.21_WD",
"path": "tools/download_file.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "47431"
},
{
"name": "GAP",
"bytes": "4119"
},
{
"name": "Go",
"bytes": "5563"
},
{
"name": "HTML",
"bytes": "726266"
},
{
"name": "Java",
"bytes": "11491861"
},
{
"name": "JavaScript",
"bytes": "404723"
},
{
"name": "Makefile",
"bytes": "7107"
},
{
"name": "PLpgSQL",
"bytes": "3576"
},
{
"name": "Perl",
"bytes": "9943"
},
{
"name": "Prolog",
"bytes": "17904"
},
{
"name": "Python",
"bytes": "267395"
},
{
"name": "Roff",
"bytes": "32749"
},
{
"name": "Shell",
"bytes": "133358"
}
],
"symlink_target": ""
} |
from flask.ext.wtf import Form
from wtforms.fields import TextField,\
PasswordField,\
BooleanField
from wtforms.validators import Required
class LoginForm(Form):
username = TextField('Username', validators=[Required()])
password = PasswordField('Password', validators=[Required()])
remember = BooleanField('Remember Me', default=False)
| {
"content_hash": "a3f1023db14423e26d1221b7b0ec0c04",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 65,
"avg_line_length": 33.45454545454545,
"alnum_prop": 0.7255434782608695,
"repo_name": "taeram/ineffable",
"id": "d897c02f5be927b4cfbf3a45dba51785ddccd5e1",
"size": "368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/forms/login.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5389"
},
{
"name": "HTML",
"bytes": "15833"
},
{
"name": "JavaScript",
"bytes": "56491"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "44942"
}
],
"symlink_target": ""
} |
from zimsoap import zobjects
class MethodMixin:
def create_contact(self, attrs, members=None, folder_id=None, tags=None):
"""Create a contact
Does not include VCARD nor group membership yet
XML example :
<cn l="7> ## ContactSpec
<a n="lastName">MARTIN</a>
<a n="firstName">Pierre</a>
<a n="email">[email protected]</a>
</cn>
Which would be in zimsoap : attrs = { 'lastname': 'MARTIN',
'firstname': 'Pierre',
'email': '[email protected]' }
folder_id = 7
:param folder_id: a string of the ID's folder where to create
contact. Default '7'
:param tags: comma-separated list of tag names
:param attrs: a dictionary of attributes to set ({key:value,...}). At
least one attr is required
:returns: the created zobjects.Contact
"""
cn = {}
if folder_id:
cn['l'] = str(folder_id)
if tags:
tags = self._return_comma_list(tags)
cn['tn'] = tags
if members:
cn['m'] = members
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
cn['a'] = attrs
resp = self.request_single('CreateContact', {'cn': cn})
return zobjects.Contact.from_dict(resp)
def get_contacts(self, ids=None, **kwargs):
""" Get all contacts for the current user
:param l: string of a folder id
:param ids: An coma separated list of contact's ID to look for
:returns: a list of zobjects.Contact
"""
params = {}
if ids:
ids = self._return_comma_list(ids)
params['cn'] = {'id': ids}
for key, value in kwargs.items():
if key in ['a', 'ma']:
params[key] = {'n': value}
else:
params[key] = value
contacts = self.request_list('GetContacts', params)
return [zobjects.Contact.from_dict(i) for i in contacts]
def modify_contact(self, contact_id, attrs=None, members=None, tags=None):
"""
:param contact_id: zimbra id of the targetd contact
:param attrs : a dictionary of attributes to set ({key:value,...})
:param members: list of dict representing contacts and
operation (+|-|reset)
:param tags: comma-separated list of tag names
:returns: the modified zobjects.Contact
"""
cn = {}
if tags:
tags = self._return_comma_list(tags)
cn['tn'] = tags
if members:
cn['m'] = members
if attrs:
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
cn['a'] = attrs
cn['id'] = contact_id
resp = self.request_single('ModifyContact', {'cn': cn})
return zobjects.Contact.from_dict(resp)
def delete_contacts(self, ids):
""" Delete selected contacts for the current user
:param ids: list of ids
"""
str_ids = self._return_comma_list(ids)
self.request('ContactAction', {'action': {'op': 'delete',
'id': str_ids}})
def create_group(self, attrs, members, folder_id=None, tags=None):
"""Create a contact group
XML example :
<cn l="7> ## ContactSpec
<a n="lastName">MARTIN</a>
<a n="firstName">Pierre</a>
<a n="email">[email protected]</a>
</cn>
Which would be in zimsoap : attrs = { 'lastname': 'MARTIN',
'firstname': 'Pierre',
'email': '[email protected]' }
folder_id = 7
:param folder_id: a string of the ID's folder where to create
contact. Default '7'
:param tags: comma-separated list of tag names
:param members: list of dict. Members with their type. Example
{'type': 'I', 'value': '[email protected]'}.
:param attrs: a dictionary of attributes to set ({key:value,...}). At
least one attr is required
:returns: the created zobjects.Contact
"""
cn = {}
cn['m'] = members
if folder_id:
cn['l'] = str(folder_id)
if tags:
cn['tn'] = tags
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
attrs.append({'n': 'type', '_content': 'group'})
cn['a'] = attrs
resp = self.request_single('CreateContact', {'cn': cn})
return zobjects.Contact.from_dict(resp)
| {
"content_hash": "26e5303afc84b2696cd4dc1fd06795a0",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 79,
"avg_line_length": 34.94117647058823,
"alnum_prop": 0.5090488215488216,
"repo_name": "zacbri/zimsoap",
"id": "a012ec836c742dffd337cec1862153f7917a8d76",
"size": "4752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zimsoap/client/mail/methods/contacts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "54"
},
{
"name": "Python",
"bytes": "266657"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='photos2geojson',
version='1.3',
description="Makes geojson from EXIF data.",
author="Visgean Skeloru",
author_email='[email protected]',
url='https://github.com/visgean/photos2geojson',
packages=[
'photos2geojson',
],
package_dir={'photos2geojson': 'photos2geojson'},
license="MIT",
keywords='photos geojson exif',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
install_requires=[
'requests',
'exifread',
],
entry_points={
'console_scripts': [
'photos2geojson = photos2geojson.main:main'
]
},
package_data={
'photos2geojson': ['*.html']
},
) | {
"content_hash": "b1cbf57a3ae383e53a271654d37ad670",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 55,
"avg_line_length": 24.405405405405407,
"alnum_prop": 0.5891472868217055,
"repo_name": "Visgean/photos2geojson",
"id": "57e7b1d185b52bb68bee1fa8fa4e642121fab168",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2112"
},
{
"name": "Makefile",
"bytes": "936"
},
{
"name": "Python",
"bytes": "6674"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import datetime
import jenkins
from celery.utils.log import get_task_logger
from django.conf import settings
from django.utils import timezone
logger = get_task_logger(__name__)
def _get_jenkins_client(jenkins_config):
return jenkins.Jenkins(jenkins_config.jenkins_api,
username=jenkins_config.jenkins_user,
password=jenkins_config.jenkins_pass)
def get_job_status(jenkins_config, jobname):
ret = {
'active': None,
'succeeded': None,
'job_number': None,
'blocked_build_time': None,
}
client = _get_jenkins_client(jenkins_config)
try:
job = client.get_job_info(jobname)
last_completed_build = job['lastCompletedBuild']
if not last_completed_build:
raise Exception("job has no build")
last_build = client.get_build_info(jobname, last_completed_build['number'])
if job['lastSuccessfulBuild']:
last_good_build_number = job['lastSuccessfulBuild']['number']
else:
last_good_build_number = 0
ret['status_code'] = 200
ret['job_number'] = last_build['number']
ret['active'] = job['color'] != 'disabled'
ret['succeeded'] = ret['active'] and last_build['result'] == 'SUCCESS'
ret['consecutive_failures'] = last_build['number'] - last_good_build_number
if job['inQueue']:
in_queued_since = job['queueItem']['inQueueSince']
time_blocked_since = datetime.utcfromtimestamp(
float(in_queued_since) / 1000).replace(tzinfo=timezone.utc)
ret['blocked_build_time'] = (timezone.now() - time_blocked_since).total_seconds()
ret['queued_job_number'] = job['lastBuild']['number']
return ret
except jenkins.NotFoundException:
ret['status_code'] = 404
return ret
| {
"content_hash": "6d74a8c64f143971adda19798b9edf9d",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 93,
"avg_line_length": 36.37735849056604,
"alnum_prop": 0.6161825726141079,
"repo_name": "arachnys/cabot",
"id": "362e6ec11fb8eeb605806b4f9d913d6af6a84451",
"size": "1928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cabot/cabotapp/jenkins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21368"
},
{
"name": "Dockerfile",
"bytes": "818"
},
{
"name": "HTML",
"bytes": "69814"
},
{
"name": "JavaScript",
"bytes": "368548"
},
{
"name": "Less",
"bytes": "657"
},
{
"name": "Python",
"bytes": "214687"
},
{
"name": "Shell",
"bytes": "4127"
}
],
"symlink_target": ""
} |
from django.urls import reverse_lazy
from django.views.generic import CreateView, DeleteView, DetailView, ListView
from boilerplate.mixins import (
ActionListMixin, CreateMessageMixin, DeleteMessageMixin
)
from core import tasks
from core.models import Invite
from core.views.mixins import (
CompanyCreateMixin, CompanyQuerySetMixin, ModelActionMixin
)
from panel import forms
class InviteListView(
ActionListMixin, CompanyQuerySetMixin, ListView
):
action_list = ('add', )
model = Invite
paginate_by = 30
permission_required = 'core:view_invite'
related_properties = ('user', )
template_name = 'panel/invite/invite_list.html'
class InviteCreateView(
CompanyCreateMixin, CreateMessageMixin, CreateView
):
model = Invite
permission_required = 'core:add_invite'
success_url = reverse_lazy('panel:invite_list')
template_name = 'panel/invite/invite_form.html'
def get_form_class(self):
return forms.get_invite_form(self.request.company)
class InviteDeleteView(
CompanyQuerySetMixin, DeleteMessageMixin, DeleteView
):
model = Invite
permission_required = 'core:delete_invite'
success_url = reverse_lazy('panel:invite_list')
template_name = 'panel/invite/invite_form.html'
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(
date_send__isnull=True
)
class InviteSendView(
ModelActionMixin, DetailView
):
model_action = 'send'
model = Invite
permission_required = 'core:send_invite'
success_url = reverse_lazy('panel:invite_list')
task_module = tasks
def get_action_kwargs(self, **kwargs):
kwargs = super().get_action_kwargs(**kwargs)
kwargs.update({
'scheme': self.request.scheme,
'host': self.request.get_host()
})
return kwargs
| {
"content_hash": "8ad2b11422d0066991bb266618d76773",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 77,
"avg_line_length": 27.057971014492754,
"alnum_prop": 0.6898768077129084,
"repo_name": "ikcam/django-skeleton",
"id": "242675f6fef8398d1fad4630e03246cba804e1e7",
"size": "1867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "panel/views/invite.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "198458"
},
{
"name": "HTML",
"bytes": "75155"
},
{
"name": "JavaScript",
"bytes": "28974"
},
{
"name": "Python",
"bytes": "217638"
},
{
"name": "Shell",
"bytes": "1972"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from django.contrib import admin
from .views import (reports_edit_category, reports_edit_product,
reports_edit_report, reports_edit_unit, reports_navigate,
reports_new_category, reports_new_product,
reports_new_report, reports_new_unit,
reports_show_report)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', reports_navigate, name='navigate'),
url(r'^(?P<report_id>\d{0,17})/$', reports_show_report, name='show'),
url(r'^new/$', reports_new_report, name='new'),
url(r'^edit/(?P<report_id>\d{0,17})/$', reports_edit_report, name='edit'),
url(r'^new/category/$', reports_new_category, name='new_category'),
url(
r'^edit/category/(?P<category_id>\d{0,17})/$',
reports_edit_category,
name='edit_category'
),
url(r'^new/unit/$', reports_new_unit, name='new_unit'),
url(
r'^edit/unit/(?P<unit_id>\d{0,17})/$',
reports_edit_unit,
name='edit_unit'
),
url(r'^new/product/$', reports_new_product, name='new_product'),
url(
r'^edit/product/(?P<product_id>\d{0,17})/$',
reports_edit_product,
name='edit_product'
),
]
| {
"content_hash": "42ca895ae09bf2dcc7432039a9f70a76",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 78,
"avg_line_length": 33.23684210526316,
"alnum_prop": 0.5740300870942201,
"repo_name": "VirrageS/io-kawiarnie",
"id": "f90c63ae76ccfe396af1aac31c5f0fb51060c30b",
"size": "1263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caffe/reports/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42462"
},
{
"name": "HTML",
"bytes": "57136"
},
{
"name": "JavaScript",
"bytes": "49334"
},
{
"name": "Python",
"bytes": "344245"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
} |
import uuid
import os
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from ajax_upload.models import UploadedFile
from . import settings as upload_settings
class UploadedFileForm(forms.ModelForm):
if getattr(settings, 'AJAX_UPLOAD_USE_IMAGE_FIELD', False):
file = forms.ImageField()
ERRORS = {
'max_filesize': _('The file is bigger than the maximum size allowed.'),
}
class Meta:
model = UploadedFile
fields = ('file',)
def clean_file(self):
data = self.cleaned_data['file']
# Change the name of the file to something unguessable
# Construct the new name as <unique-hex>-<original>.<ext>
original_name, ext = os.path.splitext(data.name)
data.name = u'%s-%s%s' % (uuid.uuid4().hex, original_name[:32], ext[:4])
max_upload_size = getattr(settings, 'AJAX_UPLOAD_MAX_FILESIZE', upload_settings.DEFAULT_MAX_FILESIZE)
if 0 < max_upload_size < data.size:
raise forms.ValidationError(self.ERRORS['max_filesize'])
return data
| {
"content_hash": "f4a66f00b80cad1e6169cc5ee25add7e",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 109,
"avg_line_length": 31.13888888888889,
"alnum_prop": 0.6610169491525424,
"repo_name": "DjangoAdminHackers/django-ajax-upload-widget",
"id": "2a5b61a6ba99909e307c042194db55d19abde403",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ajax_upload/forms.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "1102"
},
{
"name": "JavaScript",
"bytes": "16713"
},
{
"name": "Python",
"bytes": "21926"
}
],
"symlink_target": ""
} |
import os
import time
import re
import urllib.request
import shutil
import datetime
import sys
MONTHS = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
GENRES = ['Rap', 'Hip-Hop', 'RB', 'Pop', 'Soul',
'Fusion', 'Electro', '', 'Grime']
def parse_date(song_date):
"""
A helper function to parse the multiple date formats used in the site.
It could be either already a datetime object, or one of two types of string
"""
if type(song_date) == tuple:
return datetime.datetime(song_date[0], song_date[1], song_date[2])
elif 'ago' in song_date:
"""
If the song has been uploaded soon, instead of a date it
has somehing along the lines of "uploaded 2 hours ago".
"""
return datetime.datetime.now()
else:
"""
Otherwise it will be in the format
"Jun 12, 2014" and has to be parsed
"""
song_date = song_date.split()
return datetime.datetime(int(song_date[2]), #year
MONTHS.index(song_date[0]) + 1, #month
int(song_date[1][0:-1])) #day (slicing ',')
class Song:
def __init__(self, viperial_id, title, date):
self.title = title
self.viperial_id = viperial_id
self.date = parse_date(date)
self.sharebeast_id = None
self.download_url = None
def get_song_directory(self):
"""
Each song should be saved in a directory
following the year/month format
"""
return str(self.date.year) + "/" + MONTHS[self.date.month - 1]
def download_song(self):
os.makedirs(self.get_song_directory(), exist_ok=True)
if not os.path.exists(self.get_song_directory() + '/'
+ self.title + '.mp3'):
print(" Starting download...")
response = urllib.request.urlopen(self.download_url)
out_file = open(self.get_song_directory() + '/' +
self.title + ".mp3", 'wb')
shutil.copyfileobj(response, out_file)
print(" Finished download!")
else:
print(" This song has already been downloaded")
def get_sharebeast_id(self):
pattern = r'="http://www.sharebeast.com/(.*?)" target'
viperial_song_page = "http://www.viperial.com/tracks/view/{}/".format(
self.viperial_id)
html_request = urllib.request.urlopen(viperial_song_page)
bytecode = html_request.read()
htmlstr = bytecode.decode()
sharebeast_id = re.search(pattern, htmlstr)
try:
result_id = sharebeast_id.groups()[0]
except AttributeError:
result_id = None
self.sharebeast_id = result_id
def get_download_url(self):
if self.sharebeast_id is None:
self.download_url = None
else:
pattern = r'mp3player.*?src="(.*?)".*?"audio/mpeg"'
html_request = urllib.request.urlopen("http://www.sharebeast.com/"
+ self.sharebeast_id)
bytecode = html_request.read()
htmlstr = bytecode.decode()
result = re.findall(pattern, htmlstr)
self.download_url = result[0]
def is_song_wanted(self, time_period):
song_wanted = False
if(time_period[0] >= self.date >= time_period[1]):
song_wanted = True
return song_wanted
def crawl_entire_page(time_period, genre, current_page):
# song_id title date
# 01 01 2015 28 02 2015
pattern = r'hot(?:1|2).*?/(\d+)/(.*?)".*?<i>(.*?)</i>'
genre_url = str(GENRES.index(genre)+1) + '-' + genre
list_url = "http://www.viperial.com/tracks/list/genre/{}/".format(genre_url)
wanted_song_list = []
html_request = urllib.request.urlopen(list_url + str(current_page))
bytecode = html_request.read()
htmlstr = bytecode.decode()
page_song_list = re.findall(pattern, htmlstr.replace('\n', ''))
print(page_song_list)
if not len(page_song_list) == 0:
wanted_song_list = []
for song in page_song_list:
current_song = Song(*song)
if current_song.is_song_wanted(time_period):
wanted_song_list.append(current_song)
if (Song(*page_song_list[-1]).date < time_period[1]
and len(wanted_song_list) == 0):
return None
return wanted_song_list
def download_entire_page(wanted_songs_list):
for song in wanted_songs_list:
print(" Attempting to download {}.".format(song.title))
song.get_sharebeast_id()
song.get_download_url()
if song.download_url == None:
print(" This song has been removed from ShareBeast. Sorry!")
else:
song.download_song()
def input_genres():
print("""To select genres, please type the genre id.
To do so type in a number that includes the
genres you want. For example for Rap and Hip-Hop type 12 or 21.
input_genresThe genres are:
1-Rap 2-Hip-hop 3-R&B 4-Pop 5-Soul 6-Fusion
7-Electro 9-Grime 8- for all above""")
wanted_genres = set()
genres = input('-->')
for genre_id in genres:
genre_id = int(genre_id)
if not int(genre_id) in range(0, 9):
print(" Wrong input! You are going ot have to start over. Sorry!")
return input_genres()
if genre_id == 8:
for i in range(0, 7):
wanted_genres.add(GENRES[i])
wanted_genres.add(GENRES[8])
else:
wanted_genres.add(GENRES[genre_id - 1])
return wanted_genres
def input_period():
print("""To select a time period, please type in a
time period in the format DD MM YYYY DD MM YYYY""")
dates = input('-->')
try:
dates = dates.split()
begin_date = datetime.datetime(int(dates[-1]),
int(dates[-2]),
int(dates[-3]))
end_date = datetime.datetime(int(dates[-4]),
int(dates[-5]),
int(dates[-6]))
except:
print(" Something went wrong, try again!")
return input_period()
if(begin_date < end_date):
begin_date, end_date = end_date, begin_date
time_period = (begin_date, end_date)
return time_period
def download_songs():
wanted_genres = input_genres()
time_period = input_period()
print("Starting downloads!")
for genre in wanted_genres:
current_page = 1
while True:
wanted_song_list = crawl_entire_page(time_period,
genre,
current_page)
if wanted_song_list is not None:
print(" Now on page {} of {} songs:".format(current_page,
genre))
download_entire_page(wanted_song_list)
else:
break
current_page = current_page + 1
print("All downloads finished!")
def main():
print("Welcome to the viperial crawler!")
download_songs()
print("Bye!")
main()
| {
"content_hash": "03f26e476dbafa75736627567cb88979",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 80,
"avg_line_length": 34.825471698113205,
"alnum_prop": 0.5388053636733036,
"repo_name": "PeterParushev/viperial-crawler",
"id": "1f01274e6442b0cbbf1ef7f0c7a71240bf933f33",
"size": "7383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "viperial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7383"
}
],
"symlink_target": ""
} |
"""
pretty_helper.py
Contains all of the functions that do the actual analyzing and formatting of the lines of a text file.
"""
import re
def to_text(lines):
"""Re-joins the lines into a single newline-separated string."""
return '\n'.join(lines)
def search_backwards(lines, i):
"""Search backwards until a blank line is found."""
for j in range(i-1, -1, -1):
if lines[j][1] == '':
return j
def search_forwards(lines, i):
"""Search forwards until a blank line is found."""
for k in range(i+1, len(lines), 1):
if lines[k][1] == '':
return k
return k
def line_looks_like_code(line, code_symbol_ratio):
"""Sloppy. We just check if the line is short and has symbols. Should be followed up by checking if it is preceded and prepended by a blank line."""
code_chars = ['#', '[', ']', '=', '==', '<', '>']
count = 0
if len(line) > 0:
if line[-1] == '.' or '. ' in line:
return False
for match in code_chars:
if match in line:
count += 1
if '(' in line: # Only count '(' if there is a matching ')'
if ')' in line[line.index('(')+1:]:
count += 1
tempstr = ' '.join(line.split())
if len(tempstr) == 0:
return False
else:
ratio = 1.0 * count / len(tempstr)
if ratio > code_symbol_ratio:
return True
return False
def line_starts_with_enumeration(line):
try:
# Check if the line begins with (#), #), or #.
m = re.finditer('\(*\d+\)|\d+\.', line).next()
if m.start(0) == 0:
return m.end(0)
except Exception as e:
# Otherwise, nothing found
pass
return False
def find_headers(lines):
"""Identify headers and line breaks."""
for i in range(len(lines)):
if lines[i][1] == '-'*78:
lines[i][0] = 'pagebreak'
lines[i+2][0] = 'header'
i += 3
return lines
def find_code(lines, MAGIC_CODE_RATIO):
"""Identify code blocks in a limited fashion. Does not identify inline code."""
i = 0
while i < len(lines):
# If a line isn't 'text', then it's already been labeled as something else. Skip it.
if lines[i][0] == 'text':
# Most code blocks start with a function defintition.
if lines[i][1][:4] == 'def ':
# Occasionally there might be an import line or something similar prior to the 'def', so we look back until we find a blank line, as well as ahead to find the end.
block_start = search_backwards(lines, i)
block_end = search_forwards(lines, i)
for j in range(block_start+2, block_end):
lines[j][0] = 'code'
lines[block_start+1][0] = 'code-start'
lines[block_end][0] = 'code-end'
i = block_end + 1
# Some don't!
elif line_looks_like_code(lines[i][1], MAGIC_CODE_RATIO):
if lines[i-1][0] in ['text', 'code-end'] and lines[i-1][1] == '':
block_end = search_forwards(lines, i)
if lines[i-1][0] == 'code-end':
lines[i-1][0] = 'code'
lines[i][0] = 'code'
else:
lines[i][0] = 'code-start'
lines[block_end][0] = 'code-end'
for j in range(i+1, block_end-1):
lines[j][0] = 'code'
i = block_end + 1
else:
i += 1
else:
i += 1
else:
i += 1
return lines
def find_lists(lines):
"""Identify all numbered lists."""
i = 0
while i < len(lines): #i in range(len(lines)):
# If a line isn't 'text', then it's already been labeled as something else. Skip it.
if lines[i][0] == 'text':
k = line_starts_with_enumeration(lines[i][1])
if k:
# Let's keep track of each line # where a list item occurs, and where in that line the numbering ends (and thus where the actual content of the line begins)
list_items = [(i, k)]
# If we're in this list block, we're within a list.
# We'll assume our code is perfect, and not search backwards for previous list items-- our perfect code surely has already found them!
# However, we should search forwards. Key info: Dr. Pattis' list items are separated by blank lines.
j = i
while j < len(lines):
# Find the next blank line, and look at the line which follows it
j = search_forwards(lines, j)
# Have we found another list item?
k = line_starts_with_enumeration(lines[j+1][1])
if k:
list_items.append((j+1, k)) # The list continues
else:
break # The list terminated
for k in range(i+1, j):
lines[k][0] = 'list'
for item in list_items:
# Label the items, and remove the original numbering (Markdown will supply its own numbering)
lines[item[0]][0] = 'list-item'
lines[item[0]][1] = lines[item[0]][1][item[1]:]
i = j + 1
else:
i += 1
else:
i += 1
return lines
def find_diagrams(lines):
"""Identify all diagrams."""
i = 0
while i < len(lines):
# If a line isn't 'text', then it's already been labeled as something else. Skip it.
if lines[i][0] == 'text':
# Assume diagram lines usually start with a tab.
if lines[i][1][0:2] == ' ':
# Search forward for a blank line.
break_flag = False
block_end = search_forwards(lines, i)
if block_end == i+1:
break_flag = True
# for k in range(i, block_end):
# if lines[k][0] != 'text':
# i = block_end+1
if not break_flag:
for j in range(i+1, block_end-1):
lines[j][0] = 'diagram'
lines[i][0] = 'diagram-start'
lines[block_end-1][0] = 'diagram-end'
i = block_end + 1
else:
i += 1
else:
i += 1
return lines
def protect_special_chars(lines):
"""Add \ in front of * or _ so that Markdown doesn't interpret them."""
for i in range(len(lines)):
if lines[i][0] in ['text', 'list', 'list-item']:
protectedline = []
for c in lines[i][1]:
if c in '_*':
protectedline.append('\\' + c)
else:
protectedline.append(c)
lines[i][1] = ''.join(protectedline)
return lines
def apply_line_formatting(lines):
"""Here is where we apply Markdown formatting (and insert lines, as necessary), based on the line labels that we have created. This should be called after all find_[foo]() functions have been applied."""
i = 1
lines[0][1] = '# <div align=center>' + lines[0][1] + '</div>'
lines.insert(1, ['added', '***'])
headers = []
while i < len(lines):
if lines[i][0] == 'text':
pass
elif lines[i][0] == 'pagebreak':
lines[i][1] = '***'
elif lines[i][0] == 'header':
if lines[i][1][-1] == ':': # Remove colons which occasionally appear at the end of headers
lines[i][1] = lines[i][1][:-1]
headers.append(lines[i][1])
lines[i][1] = '## <a name=\"anchor-' + str(len(headers)) + '\"></a>' + headers[-1]
lines.insert(i+1, ['added', '\[[top](#top)\]'])
i += 1
elif lines[i][0] == 'code-start':
lines.insert(i, ['added', '```python'])
i += 1
elif lines[i][0] == 'code-end':
lines.insert(i+1, ['added', '```'])
i += 1
elif lines[i][0] == 'list-item':
lines[i][1] = '1. ' + lines[i][1]
elif lines[i][0] in ['diagram', 'diagram-start', 'diagram-end']:
lines[i][1] = '\t' + lines[i][1]
pass
i += 1
# Build the table of contents using headers[]
lines.insert(2, ['added', '## <a name=\"top\"></a> Table of Contents'])
for n in range(len(headers)):
lines.insert(3+n, ['added', '1. [' + headers[n] + '](#anchor-' + str(n+1) + ')'])
lines.insert(3+len(headers), ['added', '***'])
return lines, headers
| {
"content_hash": "87a8b66293acb400d1da5999336d0e70",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 208,
"avg_line_length": 39.72072072072072,
"alnum_prop": 0.48673168518938537,
"repo_name": "Ryan-Holben/prettify",
"id": "ff618d2562add87ca31a58f21f8a5453dc8ca538",
"size": "8818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pretty_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19824"
},
{
"name": "HTML",
"bytes": "226425"
},
{
"name": "Python",
"bytes": "14236"
}
],
"symlink_target": ""
} |
"""
A mixin which provides listenable methods to an object.
"""
class ListenedObject:
"""
A mixin which provides convenience methods for storing, adding, and removing L{AbstractListener}C{s} to an object.
"""
def __init__(self):
self._listeners = []
def getListeners(self):
"""
Returns the list of listeners.
@return: A list of L{AbstractListener}C{s}.
@rtype: C{list}
"""
return self._listeners
def addListener(self, listener):
"""
Adds a listener if it is not already listening.
@param listener: A new listener.
@type listener: L{AbstractListener}
"""
if not listener in self._listeners:
self._listeners.append(listener)
def removeListener(self, listener):
"""
Removes a listener if it is currently listening.
@param listener: The listener to be removed.
@type listener: L{AbstractListener}
"""
if listener in self._listeners:
self._listeners.remove(listener)
| {
"content_hash": "cc517abed3a5bcd57172d01726ac59e7",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 115,
"avg_line_length": 23.58974358974359,
"alnum_prop": 0.7010869565217391,
"repo_name": "jeremyflores/cocosCairo",
"id": "bc13c6c2d42a37957c940a981545c80e611c6294",
"size": "920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cocosCairo/ListenedObject.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "314437"
},
{
"name": "Shell",
"bytes": "44"
}
],
"symlink_target": ""
} |
"""Tests for the multi-processing base process."""
import unittest
from plaso.multi_processing import base_process
from tests import test_lib as shared_test_lib
class TestProcess(base_process.MultiProcessBaseProcess):
"""Implementation of the multi-processing base process for testing."""
def _GetStatus(self):
"""Returns status information.
Returns:
dict [str, object]: status attributes, indexed by name.
"""
# TODO: implement.
return
def _Main(self):
"""The process main loop.
This method is called when the process is ready to start. A sub class
should override this method to do the necessary actions in the main loop.
"""
# TODO: implement.
return
def SignalAbort(self):
"""Signals the process to abort."""
# TODO: implement.
return
class MultiProcessBaseProcessTest(shared_test_lib.BaseTestCase):
"""Tests the multi-processing base process."""
# pylint: disable=protected-access
def testInitialization(self):
"""Tests the initialization."""
test_process = TestProcess(name=u'TestBase')
self.assertIsNotNone(test_process)
# TODO: add test for name property.
# TODO: add test for _OnCriticalError.
# TODO: add test for _SigSegvHandler.
# TODO: add test for _SigTermHandler.
# TODO: add test for _StartProcessStatusRPCServer.
# TODO: add test for _StopProcessStatusRPCServer.
# TODO: add test for _WaitForStatusNotRunning.
# TODO: add test for run.
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "fd9fefb904ebba15b55fe1ef520da918",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 26.189655172413794,
"alnum_prop": 0.7011191573403555,
"repo_name": "dc3-plaso/plaso",
"id": "e91992b82dc79cb7b0911034b7feac2f361bcf95",
"size": "1561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/multi_processing/base_process.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1683"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Python",
"bytes": "3875098"
},
{
"name": "Shell",
"bytes": "17861"
}
],
"symlink_target": ""
} |
import functools
import hashlib
import os
import re
from inspect import isclass
from inspect import ismodule
import pyramid.url
from colander import EMAIL_RE
from pyramid.testing import DummyRequest
from pyramid.threadlocal import get_current_registry
# Regexps for underscore/camelcase convertions
CAMELCASE_RE = re.compile("(.)([A-Z]{1})")
UNDERSCORE_RE = re.compile(r"(?:^|_)(.)")
# ISO date format
ISO_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
def get_settings():
"""
Get application settings.
Application settings are customized in the ".ini" file.
Returns a Dictionary.
"""
return get_current_registry().settings
def is_valid_email(email):
"""
Check if a string is a valid email.
Returns a Boolean.
"""
try:
return re.match(EMAIL_RE, email) is not None
except TypeError:
return False
def camelcase_to_underscore(name):
"""
Convert camelcase names to underscore.
Returns a String.
"""
if not name:
return name
return CAMELCASE_RE.sub(r'\1_\2', name).lower()
def underscore_to_camelcase(name):
"""
Convert underscore names to camelcase.
Returns a String.
"""
def replace_fn(match):
"""
Upercase first char after "_".
Returns a char.
"""
return match.group(1).upper()
if not name:
return name
name = UNDERSCORE_RE.sub(replace_fn, name)
return name[0].lower() + name[1:]
def camelcase_dict(obj):
"""
Create a new dictionary with camelcase keys using the given one.
Returns a Dictionary.
"""
u2c = underscore_to_camelcase
return {u2c(key): value for (key, value) in obj.iteritems()}
def underscore_dict(obj):
"""
Create a new dictionary with underscore keys using the given one.
Returns a Dictionary.
"""
c2u = camelcase_to_underscore
return {c2u(key): value for (key, value) in obj.iteritems()}
class mixedmethod(object):
"""
Decorator that allows a method to be both a class method
and an instance method at the same time.
Note: To avoid pylint warnings in decorated methods use
first method argument as a keyword.
"""
def __init__(self, method):
self.method = method
def __get__(self, obj=None, objtype=None):
@functools.wraps(self.method)
def method_wrapper(*args, **kwargs):
if obj is not None:
return self.method(obj, *args, **kwargs)
else:
return self.method(objtype, *args, **kwargs)
return method_wrapper
def route_path(route_name, request=None, **kwargs):
"""
Get a route path for an existing route.
A `DummyRequest` is used when no request is given.
Returns a String.
"""
if not request:
request = DummyRequest()
return pyramid.url.route_path(route_name, request, **kwargs)
def generate_random_hash(salt='', hash='sha1'):
"""
Generate a random hash string.
By default generate a `sha1` hash.
Other hash can be specified. If so all supported hash
algorithms are listed in `hashlib.algorithms`.
Returns a String.
"""
if hash not in hashlib.algorithms:
raise Exception('Invalid hash algorithm %s' % hash)
if isinstance(salt, unicode):
salt = salt.encode('utf8')
return generate_hash(os.urandom(48) + salt, hash=hash)
def generate_hash(value, hash='sha1'):
"""
Generate a hash for a given value.
By default generate a `sha1` hash.
Other hash can be specified. If so all supported hash
algorithms are listed in `hashlib.algorithms`.
Returns a String.
"""
sha_obj = getattr(hashlib, hash)(value)
return sha_obj.hexdigest()
def get_app_namespace(context):
"""
Get appplication name for a context.
Sandglass applications are organized under a `sandglass`
prefix. So all applications module names follow the format::
sandglass.APP_NAME
This method returns the `APP_NAME` extracted from the module
where given context object/class is defined.
When context is a string it is used as sandglass module name.
Returns a String.
"""
if isinstance(context, basestring):
module_name = context
elif ismodule(context):
module_name = context.__name__
else:
cls = (context if isclass(context) else context.__class__)
module_name = cls.__module__
parts = module_name.lower().split('.')
if len(parts) < 2 or parts[0] != 'sandglass':
msg = "Context '{ctx}' module '{mod}' is not 'sandglass' namespaced"
raise Exception(msg.format(ctx=context, mod=module_name))
# Get the second element in module path
return parts[1]
| {
"content_hash": "224888f7b5cbc50d334bb1ceedf8e3ca",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 76,
"avg_line_length": 22.578199052132703,
"alnum_prop": 0.6389588581024349,
"repo_name": "sanglass/sandglass.time",
"id": "77c3062f6ad71e782c58f2b3460e2ed6d589a260",
"size": "4795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandglass/time/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Nginx",
"bytes": "1262"
},
{
"name": "Python",
"bytes": "221820"
}
],
"symlink_target": ""
} |
import os
import hashlib
import hmac
from flask import Flask, request, abort
import config
app = Flask(__name__)
def verify_github_signature(req):
reqsig = request.headers.get('X-Hub-Signature')
data = request.get_data()
secret = os.environ.get('GITHUB_SECRET', '')
if not reqsig.startswith("sha1=") or len(secret) < 1:
abort(401, 'Unauthorized')
reqsig = reqsig[len("sha1="):]
secret = secret.encode('utf-8')
digest = hmac.new(secret, data, hashlib.sha1).hexdigest()
print("Validate Github Sig: digest:", digest, "request:", reqsig)
return hmac.compare_digest(digest, reqsig)
@app.route('/', methods=['GET', 'POST'])
def root():
if request.method != 'POST':
return '', 204
# Fail if the sig does not match
if not verify_github_signature(request):
abort(401, 'Unauthorized')
data = request.get_json()
if not data:
abort(404, 'JSON request not found')
# Only accept 'push' events for now
event = request.headers.get('X-GitHub-Event')
if event not in config.ALLOWED_EVENTS:
abort(404, 'GitHub Event not found')
# Only accept known repos
if data['repository']['full_name'] not in config.ALLOWED_REPOS:
abort(404, 'Invalid repo')
# return the data back to the Tekton event listener
return data
if __name__ == '__main__':
print("Running flask webhook app")
app.run(host="0.0.0.0", port=config.PORT, debug=config.DEBUG_MODE, load_dotenv=False)
| {
"content_hash": "c88e674aeb1695ed484a3ac3a2dac1f7",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 89,
"avg_line_length": 25.810344827586206,
"alnum_prop": 0.6439545758183033,
"repo_name": "tensorflow/build",
"id": "ae910f6a1ef2a58ceb326a114a0699ac3f4437b6",
"size": "1497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tekton/webhook/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "90"
},
{
"name": "Dockerfile",
"bytes": "10936"
},
{
"name": "Go",
"bytes": "602"
},
{
"name": "Python",
"bytes": "3026"
},
{
"name": "Shell",
"bytes": "21210"
},
{
"name": "Starlark",
"bytes": "83"
}
],
"symlink_target": ""
} |
"""
Created on Thu Mar 3 14:45:35 2016
@author: swoboj
"""
import os, glob,getopt,sys
import scipy as sp
import matplotlib
matplotlib.use('Agg') # for use where you're running on a command line
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from GeoData.plotting import scatterGD, slice2DGD,insertinfo
from GeoData.GeoData import GeoData
from GeoData.utilityfuncs import readIonofiles, readAllskyFITS,readSRI_h5
from plotdata import plottecvstime
if __name__== '__main__':
datadir = 'TempGeo'
flist1 = glob.glob(os.path.join(datadir,'*.h5'))
TEClist1= map(GeoData.read_h5,flist1)
flist=[]
TEClist = []
satnum=23
for i,j in enumerate(TEClist1):
if sp.any(j.data['satnum']==satnum):
TEClist.append(j)
flist.append(flist1[i])
col = 2.
numr = sp.ceil(len(flist)/col)
dxs = 4.0
dys = 2.
fig, axmat = plt.subplots(int(numr),int(col),dpi=300,sharex=True,sharey=True,figsize=(dxs*col,dys*(numr+1)))
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
axvec= axmat.flatten()
dnames = [os.path.splitext(os.path.split(i)[-1])[0] for i in flist]
for i,iGD in enumerate(TEClist):
lines = plottecvstime(iGD,satnum,fig,axvec[i])
axvec[i].set_title(dnames[i])
plt.suptitle('Data from Sat: {0:d}'.format(satnum))
plt.subplots_adjust(top=0.85)
plt.savefig('TECMaps') | {
"content_hash": "6cb6db117b1da7a31037e0c88101c725",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 112,
"avg_line_length": 28.48,
"alnum_prop": 0.6537921348314607,
"repo_name": "jswoboda/MahaliPlotting",
"id": "6c0d72a169f8c3c44c2a8a56b916c2003f754437",
"size": "1446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "makeTECtimeplot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87400"
},
{
"name": "Shell",
"bytes": "1033"
}
],
"symlink_target": ""
} |
from pyglet_gui.controllers import ContinuousStateController
from pyglet_gui.core import Viewer
class Slider(ContinuousStateController, Viewer):
PATH = 'slider'
IMAGE_BAR = 'bar'
IMAGE_KNOB = 'knob'
IMAGE_STEP = 'step'
def __init__(self, value=0.0, min_value=0.0, max_value=1.0, on_set=None, steps=None, width=0, height=0):
ContinuousStateController.__init__(self, value=value,
min_value=min_value,
max_value=max_value,
on_set=on_set)
Viewer.__init__(self, width, height)
self._bar = None # a bar where the knob slides.
self._knob = None # the knob that moves along the bar.
self._offset = (0, 0) # offset of the knob image to its central position
self._padding = (0, 0, 0, 0) # padding of the bar image to its central position
self.steps = steps
self._markers = [] # markers in case of discrete steps.
self._step_offset = (0, 0)
def get_path(self):
return self.PATH
def load_graphics(self):
theme = self.theme[self.get_path()]
color = theme['gui_color']
self._bar = theme[self.IMAGE_BAR]['image'].generate(color, **self.get_batch('foreground'))
self._padding = theme[self.IMAGE_BAR]['padding']
self._knob = theme[self.IMAGE_KNOB]['image'].generate(color, **self.get_batch('highlight'))
self._offset = theme[self.IMAGE_KNOB]['offset']
if self.steps is not None:
image_path = self.IMAGE_STEP
for n in range(0, self.steps + 1):
self._markers.append(theme[image_path]['image'].generate(color, **self.get_batch('background')))
self._step_offset = theme[image_path]['offset']
def unload_graphics(self):
self._knob.unload()
self._bar.unload()
for marker in self._markers:
marker.unload()
self._markers = []
def hit_test(self, x, y):
return self.is_inside(x, y)
def set_knob_pos(self, pos):
"""
A setter for value, but using normalized values.
"""
pos = max(min(pos, 1.0), 0.0)
self.set_value(self._min_value + (self._max_value - self._min_value) * pos)
if self._bar is not None and self._knob is not None:
x, y, width, height = self._bar.get_content_region()
offset_x, offset_y = self._offset
self._knob.update(x + int(width * pos) + offset_x,
y + offset_y,
self._knob.width, self._knob.height)
def _knob_pos(self):
"""
The position of the knob in the bar computed by our value.
"""
return max(min(float(self._value - self._min_value) / (self._max_value - self._min_value), 1.0), 0.0)
def _snap_to_nearest(self):
"""
Snaps the knob and value to a discrete value dictated by steps.
"""
assert self.steps is not None
pos = float(int(self._knob_pos() * self.steps + 0.5))/self.steps
self.set_knob_pos(pos)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
raise NotImplementedError
def on_mouse_press(self, x, y, button, modifiers):
return self.on_mouse_drag(x, y, 0, 0, button, modifiers)
def on_mouse_release(self, x, y, button, modifiers):
if self.steps is not None:
self._snap_to_nearest()
def delete(self):
ContinuousStateController.delete(self)
Viewer.delete(self)
class HorizontalSlider(Slider):
def __init__(self, value=0.0, min_value=0.0, max_value=1.0, steps=None,
width=100, on_set=None):
Slider.__init__(self, value=value,
min_value=min_value,
max_value=max_value,
steps=steps,
on_set=on_set)
self.min_width = width
def layout(self):
left, right, top, bottom = self._padding
self._bar.update(self.x + left, self.y + bottom,
self.width - left - right,
self.height - top - bottom)
x, y, width, height = self._bar.get_content_region()
# knob is positioned with an (x,y) offset
# since its graphics are on its bottom-left corner.
offset_x, offset_y = self._offset
self._knob.update(x + int(width * self._knob_pos()) + offset_x,
y + offset_y,
self._knob.width, self._knob.height)
if self.steps is not None:
step = float(width) / self.steps
offset_x, offset_y = self._step_offset
for n in range(0, self.steps + 1):
self._markers[n].update(int(x + step * n) + offset_x,
y + offset_y,
self._markers[n].width,
self._markers[n].height)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
bar_x, bar_y, bar_width, bar_height = self._bar.get_content_region()
self.set_knob_pos(float(x - bar_x) / bar_width)
return True
def compute_size(self):
width, height = self._bar.get_needed_size(self.min_width, 0)
left, right, top, bottom = self._padding
return width + left + right, height + top + bottom
| {
"content_hash": "c487cfbd2fac51397323c5d7a22dff0b",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 112,
"avg_line_length": 37.813793103448276,
"alnum_prop": 0.5407623563742476,
"repo_name": "jorgecarleitao/pyglet-gui",
"id": "7194a4d6bd8225e2c4406c73155c64adfaed7738",
"size": "5483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyglet_gui/sliders.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "134819"
}
],
"symlink_target": ""
} |
import os
from celery import Celery
env=os.environ
CELERY_BROKER_URL=env.get('CELERY_BROKER_URL','redis://localhost:6379'),
CELERY_RESULT_BACKEND=env.get('CELERY_RESULT_BACKEND','redis://localhost:6379')
celery= Celery('tasks',
broker=CELERY_BROKER_URL,
backend=CELERY_RESULT_BACKEND)
| {
"content_hash": "7f2185c4fe13dc1130e7950bac2e193f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 79,
"avg_line_length": 29.09090909090909,
"alnum_prop": 0.69375,
"repo_name": "CSUChico-CINS465/CINS465-Spring2017-Lecture-Examples",
"id": "910b62b894a0bdf053edbb0a53314daeee2fd6da",
"size": "320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask-celery-docker-scale/flask-app/worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1176"
},
{
"name": "HTML",
"bytes": "75319"
},
{
"name": "JavaScript",
"bytes": "15083"
},
{
"name": "Python",
"bytes": "21984"
},
{
"name": "Shell",
"bytes": "3282"
}
],
"symlink_target": ""
} |
__author__ = 'toure'
from iridium.plugins.inspector import Plugin
import novaclient.client as nvclient
from iridium.libs.openstack import keystone
class NovaBase(object):
"""
This class is serving as a common interface for both local plugins as well as
openstack client methods.
"""
def __init__(self, version='2'):
creds = keystone.keystone_retrieve(version='v2')
nova_cred_list = [creds[key] for key in ["username", "password", "tenant_name", "auth_url"]]
self.nova_session = nvclient.Client(version, *nova_cred_list)
def __getattr__(self, item):
"""
getattr is responsible for searching requested methods which exist in the
plugin tree.
:param item: name of method
:return: remote method.
"""
__plugin = Plugin()
__ext = __plugin.activate_plugins('nova')
return getattr(__ext.Common(self.nova_session), item)
| {
"content_hash": "cd817daad546d9b9fb1a3884c4026f56",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 100,
"avg_line_length": 33.464285714285715,
"alnum_prop": 0.6414087513340448,
"repo_name": "Toure/Iridium",
"id": "876901c732e13ce51314ac68e5421ffe607b775f",
"size": "937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iridium/libs/openstack/nova.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "118101"
}
],
"symlink_target": ""
} |
"""A local settings template.
Be careful changing this file as it will affect all development users.
"""
import fnmatch
# * imports should really never be used. Given this is passing settings around,
# this particular setup is getting a special pass.
from default import *
# local settings
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Set this to wherever you want to connect to. It is currently setup to
# run against sqlite.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
# For linux/mac hosts.
'NAME': '/tmp/dev.db',
# If you are working in windows, setup with a writable directory path.
# 'NAME': 'C:\TEMP\dev.db',
}
}
if DEBUG:
# set INTERNAL_IPS to entire local network
class CheckLocalNetwork(list):
def __contains__(self, key):
for address in self:
if fnmatch.fnmatch(key, address):
return True
return False
INTERNAL_IPS = CheckLocalNetwork(['127.0.0.1', '192.168.*.*'])
# Additive installed apps.
INSTALLED_APPS += (
)
# Additive middleware classes
MIDDLEWARE_CLASSES += (
)
| {
"content_hash": "d9922e6c6af50696dba6eccaf0bcbc7d",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 25.195652173913043,
"alnum_prop": 0.6289905090595341,
"repo_name": "iamkelroy/CS673_G1_T3_BUGTRACKER",
"id": "7bb6fc45d0531ecf356cf13e262a4f89152ed6c9",
"size": "1159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "issue_tracker/settings/local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7867"
},
{
"name": "HTML",
"bytes": "45224"
},
{
"name": "JavaScript",
"bytes": "17361"
},
{
"name": "Python",
"bytes": "52868"
}
],
"symlink_target": ""
} |
Subsets and Splits