repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
diux-dev/ncluster
|
ncluster/local_backend.py
|
Run.run_with_output
|
def run_with_output(self, *args, **kwargs):
"""Runs command on every first job in the run, returns stdout."""
for job in self.jobs:
job.run_with_output(*args, **kwargs)
|
python
|
def run_with_output(self, *args, **kwargs):
"""Runs command on every first job in the run, returns stdout."""
for job in self.jobs:
job.run_with_output(*args, **kwargs)
|
[
"def",
"run_with_output",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"job",
"in",
"self",
".",
"jobs",
":",
"job",
".",
"run_with_output",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Runs command on every first job in the run, returns stdout.
|
[
"Runs",
"command",
"on",
"every",
"first",
"job",
"in",
"the",
"run",
"returns",
"stdout",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/local_backend.py#L428-L431
|
train
|
diux-dev/ncluster
|
ncluster/local_backend.py
|
Run._run_raw
|
def _run_raw(self, *args, **kwargs):
"""_run_raw on every job in the run."""
for job in self.jobs:
job._run_raw(*args, **kwargs)
|
python
|
def _run_raw(self, *args, **kwargs):
"""_run_raw on every job in the run."""
for job in self.jobs:
job._run_raw(*args, **kwargs)
|
[
"def",
"_run_raw",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"job",
"in",
"self",
".",
"jobs",
":",
"job",
".",
"_run_raw",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
_run_raw on every job in the run.
|
[
"_run_raw",
"on",
"every",
"job",
"in",
"the",
"run",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/local_backend.py#L433-L436
|
train
|
diux-dev/ncluster
|
ncluster/aws_create_resources.py
|
keypair_setup
|
def keypair_setup():
"""Creates keypair if necessary, saves private key locally, returns contents
of private key file."""
os.system('mkdir -p ' + u.PRIVATE_KEY_LOCATION)
keypair_name = u.get_keypair_name()
keypair = u.get_keypair_dict().get(keypair_name, None)
keypair_fn = u.get_keypair_fn()
if keypair:
print("Reusing keypair " + keypair_name)
# check that local pem file exists and is readable
assert os.path.exists(
keypair_fn), "Keypair %s exists, but corresponding .pem file %s is not found, delete keypair %s through console and run again to recreate keypair/.pem together" % (
keypair_name, keypair_fn, keypair_name)
keypair_contents = open(keypair_fn).read()
assert len(keypair_contents) > 0
else:
print("Creating keypair " + keypair_name)
ec2 = u.get_ec2_resource()
assert not os.path.exists(
keypair_fn), "previous keypair exists, delete it with 'sudo rm %s' and also delete corresponding keypair through console" % (
keypair_fn)
keypair = ec2.create_key_pair(KeyName=keypair_name)
open(keypair_fn, 'w').write(keypair.key_material)
os.system('chmod 400 ' + keypair_fn)
return keypair
|
python
|
def keypair_setup():
"""Creates keypair if necessary, saves private key locally, returns contents
of private key file."""
os.system('mkdir -p ' + u.PRIVATE_KEY_LOCATION)
keypair_name = u.get_keypair_name()
keypair = u.get_keypair_dict().get(keypair_name, None)
keypair_fn = u.get_keypair_fn()
if keypair:
print("Reusing keypair " + keypair_name)
# check that local pem file exists and is readable
assert os.path.exists(
keypair_fn), "Keypair %s exists, but corresponding .pem file %s is not found, delete keypair %s through console and run again to recreate keypair/.pem together" % (
keypair_name, keypair_fn, keypair_name)
keypair_contents = open(keypair_fn).read()
assert len(keypair_contents) > 0
else:
print("Creating keypair " + keypair_name)
ec2 = u.get_ec2_resource()
assert not os.path.exists(
keypair_fn), "previous keypair exists, delete it with 'sudo rm %s' and also delete corresponding keypair through console" % (
keypair_fn)
keypair = ec2.create_key_pair(KeyName=keypair_name)
open(keypair_fn, 'w').write(keypair.key_material)
os.system('chmod 400 ' + keypair_fn)
return keypair
|
[
"def",
"keypair_setup",
"(",
")",
":",
"os",
".",
"system",
"(",
"'mkdir -p '",
"+",
"u",
".",
"PRIVATE_KEY_LOCATION",
")",
"keypair_name",
"=",
"u",
".",
"get_keypair_name",
"(",
")",
"keypair",
"=",
"u",
".",
"get_keypair_dict",
"(",
")",
".",
"get",
"(",
"keypair_name",
",",
"None",
")",
"keypair_fn",
"=",
"u",
".",
"get_keypair_fn",
"(",
")",
"if",
"keypair",
":",
"print",
"(",
"\"Reusing keypair \"",
"+",
"keypair_name",
")",
"# check that local pem file exists and is readable",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"keypair_fn",
")",
",",
"\"Keypair %s exists, but corresponding .pem file %s is not found, delete keypair %s through console and run again to recreate keypair/.pem together\"",
"%",
"(",
"keypair_name",
",",
"keypair_fn",
",",
"keypair_name",
")",
"keypair_contents",
"=",
"open",
"(",
"keypair_fn",
")",
".",
"read",
"(",
")",
"assert",
"len",
"(",
"keypair_contents",
")",
">",
"0",
"else",
":",
"print",
"(",
"\"Creating keypair \"",
"+",
"keypair_name",
")",
"ec2",
"=",
"u",
".",
"get_ec2_resource",
"(",
")",
"assert",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"keypair_fn",
")",
",",
"\"previous keypair exists, delete it with 'sudo rm %s' and also delete corresponding keypair through console\"",
"%",
"(",
"keypair_fn",
")",
"keypair",
"=",
"ec2",
".",
"create_key_pair",
"(",
"KeyName",
"=",
"keypair_name",
")",
"open",
"(",
"keypair_fn",
",",
"'w'",
")",
".",
"write",
"(",
"keypair",
".",
"key_material",
")",
"os",
".",
"system",
"(",
"'chmod 400 '",
"+",
"keypair_fn",
")",
"return",
"keypair"
] |
Creates keypair if necessary, saves private key locally, returns contents
of private key file.
|
[
"Creates",
"keypair",
"if",
"necessary",
"saves",
"private",
"key",
"locally",
"returns",
"contents",
"of",
"private",
"key",
"file",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_create_resources.py#L226-L254
|
train
|
diux-dev/ncluster
|
ncluster/aws_create_resources.py
|
placement_group_setup
|
def placement_group_setup(group_name):
"""Creates placement_group group if necessary. Returns True if new placement_group
group was created, False otherwise."""
existing_placement_groups = u.get_placement_group_dict()
group = existing_placement_groups.get(group_name, None)
if group:
assert group.state == 'available'
assert group.strategy == 'cluster'
print("Reusing group ", group.name)
return group
print("Creating group " + group_name)
ec2 = u.get_ec2_resource()
group = ec2.create_placement_group(GroupName=group_name, Strategy='cluster')
return group
|
python
|
def placement_group_setup(group_name):
"""Creates placement_group group if necessary. Returns True if new placement_group
group was created, False otherwise."""
existing_placement_groups = u.get_placement_group_dict()
group = existing_placement_groups.get(group_name, None)
if group:
assert group.state == 'available'
assert group.strategy == 'cluster'
print("Reusing group ", group.name)
return group
print("Creating group " + group_name)
ec2 = u.get_ec2_resource()
group = ec2.create_placement_group(GroupName=group_name, Strategy='cluster')
return group
|
[
"def",
"placement_group_setup",
"(",
"group_name",
")",
":",
"existing_placement_groups",
"=",
"u",
".",
"get_placement_group_dict",
"(",
")",
"group",
"=",
"existing_placement_groups",
".",
"get",
"(",
"group_name",
",",
"None",
")",
"if",
"group",
":",
"assert",
"group",
".",
"state",
"==",
"'available'",
"assert",
"group",
".",
"strategy",
"==",
"'cluster'",
"print",
"(",
"\"Reusing group \"",
",",
"group",
".",
"name",
")",
"return",
"group",
"print",
"(",
"\"Creating group \"",
"+",
"group_name",
")",
"ec2",
"=",
"u",
".",
"get_ec2_resource",
"(",
")",
"group",
"=",
"ec2",
".",
"create_placement_group",
"(",
"GroupName",
"=",
"group_name",
",",
"Strategy",
"=",
"'cluster'",
")",
"return",
"group"
] |
Creates placement_group group if necessary. Returns True if new placement_group
group was created, False otherwise.
|
[
"Creates",
"placement_group",
"group",
"if",
"necessary",
".",
"Returns",
"True",
"if",
"new",
"placement_group",
"group",
"was",
"created",
"False",
"otherwise",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_create_resources.py#L257-L273
|
train
|
diux-dev/ncluster
|
ncluster/backend.py
|
Task.upload
|
def upload(self, local_fn: str, remote_fn: str = '',
dont_overwrite: bool = False):
"""Uploads given file to the task. If remote_fn is not specified, dumps it
into task current directory with the same name.
Args:
local_fn: location of file locally
remote_fn: location of file on task
dont_overwrite: if True, will be no-op if target file exists
"""
raise NotImplementedError()
|
python
|
def upload(self, local_fn: str, remote_fn: str = '',
dont_overwrite: bool = False):
"""Uploads given file to the task. If remote_fn is not specified, dumps it
into task current directory with the same name.
Args:
local_fn: location of file locally
remote_fn: location of file on task
dont_overwrite: if True, will be no-op if target file exists
"""
raise NotImplementedError()
|
[
"def",
"upload",
"(",
"self",
",",
"local_fn",
":",
"str",
",",
"remote_fn",
":",
"str",
"=",
"''",
",",
"dont_overwrite",
":",
"bool",
"=",
"False",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] |
Uploads given file to the task. If remote_fn is not specified, dumps it
into task current directory with the same name.
Args:
local_fn: location of file locally
remote_fn: location of file on task
dont_overwrite: if True, will be no-op if target file exists
|
[
"Uploads",
"given",
"file",
"to",
"the",
"task",
".",
"If",
"remote_fn",
"is",
"not",
"specified",
"dumps",
"it",
"into",
"task",
"current",
"directory",
"with",
"the",
"same",
"name",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/backend.py#L139-L149
|
train
|
diux-dev/ncluster
|
ncluster/backend.py
|
Job._non_blocking_wrapper
|
def _non_blocking_wrapper(self, method, *args, **kwargs):
"""Runs given method on every task in the job. Blocks until all tasks finish. Propagates exception from first
failed task."""
exceptions = []
def task_run(task):
try:
getattr(task, method)(*args, **kwargs)
except Exception as e:
exceptions.append(e)
threads = [threading.Thread(name=f'task_{method}_{i}',
target=task_run, args=[t])
for i, t in enumerate(self.tasks)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if exceptions:
raise exceptions[0]
|
python
|
def _non_blocking_wrapper(self, method, *args, **kwargs):
"""Runs given method on every task in the job. Blocks until all tasks finish. Propagates exception from first
failed task."""
exceptions = []
def task_run(task):
try:
getattr(task, method)(*args, **kwargs)
except Exception as e:
exceptions.append(e)
threads = [threading.Thread(name=f'task_{method}_{i}',
target=task_run, args=[t])
for i, t in enumerate(self.tasks)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if exceptions:
raise exceptions[0]
|
[
"def",
"_non_blocking_wrapper",
"(",
"self",
",",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"exceptions",
"=",
"[",
"]",
"def",
"task_run",
"(",
"task",
")",
":",
"try",
":",
"getattr",
"(",
"task",
",",
"method",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"exceptions",
".",
"append",
"(",
"e",
")",
"threads",
"=",
"[",
"threading",
".",
"Thread",
"(",
"name",
"=",
"f'task_{method}_{i}'",
",",
"target",
"=",
"task_run",
",",
"args",
"=",
"[",
"t",
"]",
")",
"for",
"i",
",",
"t",
"in",
"enumerate",
"(",
"self",
".",
"tasks",
")",
"]",
"for",
"thread",
"in",
"threads",
":",
"thread",
".",
"start",
"(",
")",
"for",
"thread",
"in",
"threads",
":",
"thread",
".",
"join",
"(",
")",
"if",
"exceptions",
":",
"raise",
"exceptions",
"[",
"0",
"]"
] |
Runs given method on every task in the job. Blocks until all tasks finish. Propagates exception from first
failed task.
|
[
"Runs",
"given",
"method",
"on",
"every",
"task",
"in",
"the",
"job",
".",
"Blocks",
"until",
"all",
"tasks",
"finish",
".",
"Propagates",
"exception",
"from",
"first",
"failed",
"task",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/backend.py#L202-L222
|
train
|
diux-dev/ncluster
|
ncluster/aws_util.py
|
get_default_vpc
|
def get_default_vpc():
"""
Return default VPC or none if not present
"""
ec2 = get_ec2_resource()
for vpc in ec2.vpcs.all():
if vpc.is_default:
return vpc
|
python
|
def get_default_vpc():
"""
Return default VPC or none if not present
"""
ec2 = get_ec2_resource()
for vpc in ec2.vpcs.all():
if vpc.is_default:
return vpc
|
[
"def",
"get_default_vpc",
"(",
")",
":",
"ec2",
"=",
"get_ec2_resource",
"(",
")",
"for",
"vpc",
"in",
"ec2",
".",
"vpcs",
".",
"all",
"(",
")",
":",
"if",
"vpc",
".",
"is_default",
":",
"return",
"vpc"
] |
Return default VPC or none if not present
|
[
"Return",
"default",
"VPC",
"or",
"none",
"if",
"not",
"present"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L75-L83
|
train
|
diux-dev/ncluster
|
ncluster/aws_util.py
|
get_subnet_dict
|
def get_subnet_dict():
"""Returns dictionary of "availability zone" -> subnet for current VPC."""
subnet_dict = {}
vpc = get_vpc()
for subnet in vpc.subnets.all():
zone = subnet.availability_zone
assert zone not in subnet_dict, "More than one subnet in %s, why?" % (zone,)
subnet_dict[zone] = subnet
return subnet_dict
|
python
|
def get_subnet_dict():
"""Returns dictionary of "availability zone" -> subnet for current VPC."""
subnet_dict = {}
vpc = get_vpc()
for subnet in vpc.subnets.all():
zone = subnet.availability_zone
assert zone not in subnet_dict, "More than one subnet in %s, why?" % (zone,)
subnet_dict[zone] = subnet
return subnet_dict
|
[
"def",
"get_subnet_dict",
"(",
")",
":",
"subnet_dict",
"=",
"{",
"}",
"vpc",
"=",
"get_vpc",
"(",
")",
"for",
"subnet",
"in",
"vpc",
".",
"subnets",
".",
"all",
"(",
")",
":",
"zone",
"=",
"subnet",
".",
"availability_zone",
"assert",
"zone",
"not",
"in",
"subnet_dict",
",",
"\"More than one subnet in %s, why?\"",
"%",
"(",
"zone",
",",
")",
"subnet_dict",
"[",
"zone",
"]",
"=",
"subnet",
"return",
"subnet_dict"
] |
Returns dictionary of "availability zone" -> subnet for current VPC.
|
[
"Returns",
"dictionary",
"of",
"availability",
"zone",
"-",
">",
"subnet",
"for",
"current",
"VPC",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L86-L94
|
train
|
diux-dev/ncluster
|
ncluster/aws_util.py
|
get_keypair_name
|
def get_keypair_name():
"""Returns current keypair name."""
username = get_username()
assert '-' not in username, "username must not contain -, change $USER"
validate_aws_name(username)
assert len(username) < 30 # to avoid exceeding AWS 127 char limit
return get_prefix() + '-' + username
|
python
|
def get_keypair_name():
"""Returns current keypair name."""
username = get_username()
assert '-' not in username, "username must not contain -, change $USER"
validate_aws_name(username)
assert len(username) < 30 # to avoid exceeding AWS 127 char limit
return get_prefix() + '-' + username
|
[
"def",
"get_keypair_name",
"(",
")",
":",
"username",
"=",
"get_username",
"(",
")",
"assert",
"'-'",
"not",
"in",
"username",
",",
"\"username must not contain -, change $USER\"",
"validate_aws_name",
"(",
"username",
")",
"assert",
"len",
"(",
"username",
")",
"<",
"30",
"# to avoid exceeding AWS 127 char limit",
"return",
"get_prefix",
"(",
")",
"+",
"'-'",
"+",
"username"
] |
Returns current keypair name.
|
[
"Returns",
"current",
"keypair",
"name",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L247-L254
|
train
|
diux-dev/ncluster
|
ncluster/aws_util.py
|
get_keypair_fn
|
def get_keypair_fn():
"""Location of .pem file for current keypair"""
keypair_name = get_keypair_name()
account = get_account_number()
region = get_region()
fn = f'{PRIVATE_KEY_LOCATION}/{keypair_name}-{account}-{region}.pem'
return fn
|
python
|
def get_keypair_fn():
"""Location of .pem file for current keypair"""
keypair_name = get_keypair_name()
account = get_account_number()
region = get_region()
fn = f'{PRIVATE_KEY_LOCATION}/{keypair_name}-{account}-{region}.pem'
return fn
|
[
"def",
"get_keypair_fn",
"(",
")",
":",
"keypair_name",
"=",
"get_keypair_name",
"(",
")",
"account",
"=",
"get_account_number",
"(",
")",
"region",
"=",
"get_region",
"(",
")",
"fn",
"=",
"f'{PRIVATE_KEY_LOCATION}/{keypair_name}-{account}-{region}.pem'",
"return",
"fn"
] |
Location of .pem file for current keypair
|
[
"Location",
"of",
".",
"pem",
"file",
"for",
"current",
"keypair"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L266-L273
|
train
|
diux-dev/ncluster
|
ncluster/aws_util.py
|
lookup_instance
|
def lookup_instance(name: str, instance_type: str = '', image_name: str = '',
states: tuple = ('running', 'stopped', 'initializing')):
"""Looks up AWS instance for given instance name, like
simple.worker. If no instance found in current AWS environment, returns None. """
ec2 = get_ec2_resource()
instances = ec2.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': states}])
prefix = get_prefix()
username = get_username()
# look for an existing instance matching job, ignore instances launched
# by different user or under different resource name
result = []
for i in instances.all():
instance_name = get_name(i)
if instance_name != name:
continue
seen_prefix, seen_username = parse_key_name(i.key_name)
if prefix != seen_prefix:
print(f"Found {name} launched under {seen_prefix}, ignoring")
continue
if username != seen_username:
print(f"Found {name} launched by {seen_username}, ignoring")
continue
if instance_type:
assert i.instance_type == instance_type, f"Found existing instance for job {name} but different instance type ({i.instance_type}) than requested ({instance_type}), terminate {name} first or use new task name."
if image_name:
assert i.image.name == image_name, f"Found existing instance for job {name} but launched with different image ({i.image.name}) than requested ({image_name}), terminate {name} first or use new task name."
result.append(i)
assert len(result) < 2, f"Found two instances with name {name}"
if not result:
return None
else:
return result[0]
|
python
|
def lookup_instance(name: str, instance_type: str = '', image_name: str = '',
states: tuple = ('running', 'stopped', 'initializing')):
"""Looks up AWS instance for given instance name, like
simple.worker. If no instance found in current AWS environment, returns None. """
ec2 = get_ec2_resource()
instances = ec2.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': states}])
prefix = get_prefix()
username = get_username()
# look for an existing instance matching job, ignore instances launched
# by different user or under different resource name
result = []
for i in instances.all():
instance_name = get_name(i)
if instance_name != name:
continue
seen_prefix, seen_username = parse_key_name(i.key_name)
if prefix != seen_prefix:
print(f"Found {name} launched under {seen_prefix}, ignoring")
continue
if username != seen_username:
print(f"Found {name} launched by {seen_username}, ignoring")
continue
if instance_type:
assert i.instance_type == instance_type, f"Found existing instance for job {name} but different instance type ({i.instance_type}) than requested ({instance_type}), terminate {name} first or use new task name."
if image_name:
assert i.image.name == image_name, f"Found existing instance for job {name} but launched with different image ({i.image.name}) than requested ({image_name}), terminate {name} first or use new task name."
result.append(i)
assert len(result) < 2, f"Found two instances with name {name}"
if not result:
return None
else:
return result[0]
|
[
"def",
"lookup_instance",
"(",
"name",
":",
"str",
",",
"instance_type",
":",
"str",
"=",
"''",
",",
"image_name",
":",
"str",
"=",
"''",
",",
"states",
":",
"tuple",
"=",
"(",
"'running'",
",",
"'stopped'",
",",
"'initializing'",
")",
")",
":",
"ec2",
"=",
"get_ec2_resource",
"(",
")",
"instances",
"=",
"ec2",
".",
"instances",
".",
"filter",
"(",
"Filters",
"=",
"[",
"{",
"'Name'",
":",
"'instance-state-name'",
",",
"'Values'",
":",
"states",
"}",
"]",
")",
"prefix",
"=",
"get_prefix",
"(",
")",
"username",
"=",
"get_username",
"(",
")",
"# look for an existing instance matching job, ignore instances launched",
"# by different user or under different resource name",
"result",
"=",
"[",
"]",
"for",
"i",
"in",
"instances",
".",
"all",
"(",
")",
":",
"instance_name",
"=",
"get_name",
"(",
"i",
")",
"if",
"instance_name",
"!=",
"name",
":",
"continue",
"seen_prefix",
",",
"seen_username",
"=",
"parse_key_name",
"(",
"i",
".",
"key_name",
")",
"if",
"prefix",
"!=",
"seen_prefix",
":",
"print",
"(",
"f\"Found {name} launched under {seen_prefix}, ignoring\"",
")",
"continue",
"if",
"username",
"!=",
"seen_username",
":",
"print",
"(",
"f\"Found {name} launched by {seen_username}, ignoring\"",
")",
"continue",
"if",
"instance_type",
":",
"assert",
"i",
".",
"instance_type",
"==",
"instance_type",
",",
"f\"Found existing instance for job {name} but different instance type ({i.instance_type}) than requested ({instance_type}), terminate {name} first or use new task name.\"",
"if",
"image_name",
":",
"assert",
"i",
".",
"image",
".",
"name",
"==",
"image_name",
",",
"f\"Found existing instance for job {name} but launched with different image ({i.image.name}) than requested ({image_name}), terminate {name} first or use new task name.\"",
"result",
".",
"append",
"(",
"i",
")",
"assert",
"len",
"(",
"result",
")",
"<",
"2",
",",
"f\"Found two instances with name {name}\"",
"if",
"not",
"result",
":",
"return",
"None",
"else",
":",
"return",
"result",
"[",
"0",
"]"
] |
Looks up AWS instance for given instance name, like
simple.worker. If no instance found in current AWS environment, returns None.
|
[
"Looks",
"up",
"AWS",
"instance",
"for",
"given",
"instance",
"name",
"like",
"simple",
".",
"worker",
".",
"If",
"no",
"instance",
"found",
"in",
"current",
"AWS",
"environment",
"returns",
"None",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L326-L366
|
train
|
diux-dev/ncluster
|
ncluster/aws_util.py
|
ssh_to_task
|
def ssh_to_task(task) -> paramiko.SSHClient:
"""Create ssh connection to task's machine
returns Paramiko SSH client connected to host.
"""
username = task.ssh_username
hostname = task.public_ip
ssh_key_fn = get_keypair_fn()
print(f"ssh -i {ssh_key_fn} {username}@{hostname}")
pkey = paramiko.RSAKey.from_private_key_file(ssh_key_fn)
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
assert ssh_client
counter = 1
while True:
try:
ssh_client.connect(hostname=hostname, username=username, pkey=pkey)
if counter % 11 == 0: # occasionally re-obtain public ip, machine could've gotten restarted
hostname = task.public_ip
break
except Exception as e:
print(
f'{task.name}: Exception connecting to {hostname} via ssh (could be a timeout): {e}')
time.sleep(RETRY_INTERVAL_SEC)
return ssh_client
|
python
|
def ssh_to_task(task) -> paramiko.SSHClient:
"""Create ssh connection to task's machine
returns Paramiko SSH client connected to host.
"""
username = task.ssh_username
hostname = task.public_ip
ssh_key_fn = get_keypair_fn()
print(f"ssh -i {ssh_key_fn} {username}@{hostname}")
pkey = paramiko.RSAKey.from_private_key_file(ssh_key_fn)
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
assert ssh_client
counter = 1
while True:
try:
ssh_client.connect(hostname=hostname, username=username, pkey=pkey)
if counter % 11 == 0: # occasionally re-obtain public ip, machine could've gotten restarted
hostname = task.public_ip
break
except Exception as e:
print(
f'{task.name}: Exception connecting to {hostname} via ssh (could be a timeout): {e}')
time.sleep(RETRY_INTERVAL_SEC)
return ssh_client
|
[
"def",
"ssh_to_task",
"(",
"task",
")",
"->",
"paramiko",
".",
"SSHClient",
":",
"username",
"=",
"task",
".",
"ssh_username",
"hostname",
"=",
"task",
".",
"public_ip",
"ssh_key_fn",
"=",
"get_keypair_fn",
"(",
")",
"print",
"(",
"f\"ssh -i {ssh_key_fn} {username}@{hostname}\"",
")",
"pkey",
"=",
"paramiko",
".",
"RSAKey",
".",
"from_private_key_file",
"(",
"ssh_key_fn",
")",
"ssh_client",
"=",
"paramiko",
".",
"SSHClient",
"(",
")",
"ssh_client",
".",
"set_missing_host_key_policy",
"(",
"paramiko",
".",
"AutoAddPolicy",
"(",
")",
")",
"assert",
"ssh_client",
"counter",
"=",
"1",
"while",
"True",
":",
"try",
":",
"ssh_client",
".",
"connect",
"(",
"hostname",
"=",
"hostname",
",",
"username",
"=",
"username",
",",
"pkey",
"=",
"pkey",
")",
"if",
"counter",
"%",
"11",
"==",
"0",
":",
"# occasionally re-obtain public ip, machine could've gotten restarted",
"hostname",
"=",
"task",
".",
"public_ip",
"break",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"f'{task.name}: Exception connecting to {hostname} via ssh (could be a timeout): {e}'",
")",
"time",
".",
"sleep",
"(",
"RETRY_INTERVAL_SEC",
")",
"return",
"ssh_client"
] |
Create ssh connection to task's machine
returns Paramiko SSH client connected to host.
|
[
"Create",
"ssh",
"connection",
"to",
"task",
"s",
"machine"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L369-L398
|
train
|
diux-dev/ncluster
|
ncluster/aws_util.py
|
delete_efs_by_id
|
def delete_efs_by_id(efs_id):
"""Deletion sometimes fails, try several times."""
start_time = time.time()
efs_client = get_efs_client()
sys.stdout.write("deleting %s ... " % (efs_id,))
while True:
try:
response = efs_client.delete_file_system(FileSystemId=efs_id)
if is_good_response(response):
print("succeeded")
break
time.sleep(RETRY_INTERVAL_SEC)
except Exception as e:
print("Failed with %s" % (e,))
if time.time() - start_time - RETRY_INTERVAL_SEC < RETRY_TIMEOUT_SEC:
print("Retrying in %s sec" % (RETRY_INTERVAL_SEC,))
time.sleep(RETRY_INTERVAL_SEC)
else:
print("Giving up")
break
|
python
|
def delete_efs_by_id(efs_id):
"""Deletion sometimes fails, try several times."""
start_time = time.time()
efs_client = get_efs_client()
sys.stdout.write("deleting %s ... " % (efs_id,))
while True:
try:
response = efs_client.delete_file_system(FileSystemId=efs_id)
if is_good_response(response):
print("succeeded")
break
time.sleep(RETRY_INTERVAL_SEC)
except Exception as e:
print("Failed with %s" % (e,))
if time.time() - start_time - RETRY_INTERVAL_SEC < RETRY_TIMEOUT_SEC:
print("Retrying in %s sec" % (RETRY_INTERVAL_SEC,))
time.sleep(RETRY_INTERVAL_SEC)
else:
print("Giving up")
break
|
[
"def",
"delete_efs_by_id",
"(",
"efs_id",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"efs_client",
"=",
"get_efs_client",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"deleting %s ... \"",
"%",
"(",
"efs_id",
",",
")",
")",
"while",
"True",
":",
"try",
":",
"response",
"=",
"efs_client",
".",
"delete_file_system",
"(",
"FileSystemId",
"=",
"efs_id",
")",
"if",
"is_good_response",
"(",
"response",
")",
":",
"print",
"(",
"\"succeeded\"",
")",
"break",
"time",
".",
"sleep",
"(",
"RETRY_INTERVAL_SEC",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"Failed with %s\"",
"%",
"(",
"e",
",",
")",
")",
"if",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"-",
"RETRY_INTERVAL_SEC",
"<",
"RETRY_TIMEOUT_SEC",
":",
"print",
"(",
"\"Retrying in %s sec\"",
"%",
"(",
"RETRY_INTERVAL_SEC",
",",
")",
")",
"time",
".",
"sleep",
"(",
"RETRY_INTERVAL_SEC",
")",
"else",
":",
"print",
"(",
"\"Giving up\"",
")",
"break"
] |
Deletion sometimes fails, try several times.
|
[
"Deletion",
"sometimes",
"fails",
"try",
"several",
"times",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L488-L507
|
train
|
diux-dev/ncluster
|
ncluster/aws_util.py
|
extract_attr_for_match
|
def extract_attr_for_match(items, **kwargs):
"""Helper method to get attribute value for an item matching some criterion.
Specify target criteria value as dict, with target attribute having value -1
Example:
to extract state of vpc matching given vpc id
response = [{'State': 'available', 'VpcId': 'vpc-2bb1584c'}]
extract_attr_for_match(response, State=-1, VpcId='vpc-2bb1584c') #=> 'available'"""
# find the value of attribute to return
query_arg = None
for arg, value in kwargs.items():
if value == -1:
assert query_arg is None, "Only single query arg (-1 valued) is allowed"
query_arg = arg
result = []
filterset = set(kwargs.keys())
for item in items:
match = True
assert filterset.issubset(
item.keys()), "Filter set contained %s which was not in record %s" % (
filterset.difference(item.keys()),
item)
for arg in item:
if arg == query_arg:
continue
if arg in kwargs:
if item[arg] != kwargs[arg]:
match = False
break
if match:
result.append(item[query_arg])
assert len(result) <= 1, "%d values matched %s, only allow 1" % (
len(result), kwargs)
if result:
return result[0]
return None
|
python
|
def extract_attr_for_match(items, **kwargs):
"""Helper method to get attribute value for an item matching some criterion.
Specify target criteria value as dict, with target attribute having value -1
Example:
to extract state of vpc matching given vpc id
response = [{'State': 'available', 'VpcId': 'vpc-2bb1584c'}]
extract_attr_for_match(response, State=-1, VpcId='vpc-2bb1584c') #=> 'available'"""
# find the value of attribute to return
query_arg = None
for arg, value in kwargs.items():
if value == -1:
assert query_arg is None, "Only single query arg (-1 valued) is allowed"
query_arg = arg
result = []
filterset = set(kwargs.keys())
for item in items:
match = True
assert filterset.issubset(
item.keys()), "Filter set contained %s which was not in record %s" % (
filterset.difference(item.keys()),
item)
for arg in item:
if arg == query_arg:
continue
if arg in kwargs:
if item[arg] != kwargs[arg]:
match = False
break
if match:
result.append(item[query_arg])
assert len(result) <= 1, "%d values matched %s, only allow 1" % (
len(result), kwargs)
if result:
return result[0]
return None
|
[
"def",
"extract_attr_for_match",
"(",
"items",
",",
"*",
"*",
"kwargs",
")",
":",
"# find the value of attribute to return",
"query_arg",
"=",
"None",
"for",
"arg",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"value",
"==",
"-",
"1",
":",
"assert",
"query_arg",
"is",
"None",
",",
"\"Only single query arg (-1 valued) is allowed\"",
"query_arg",
"=",
"arg",
"result",
"=",
"[",
"]",
"filterset",
"=",
"set",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
"for",
"item",
"in",
"items",
":",
"match",
"=",
"True",
"assert",
"filterset",
".",
"issubset",
"(",
"item",
".",
"keys",
"(",
")",
")",
",",
"\"Filter set contained %s which was not in record %s\"",
"%",
"(",
"filterset",
".",
"difference",
"(",
"item",
".",
"keys",
"(",
")",
")",
",",
"item",
")",
"for",
"arg",
"in",
"item",
":",
"if",
"arg",
"==",
"query_arg",
":",
"continue",
"if",
"arg",
"in",
"kwargs",
":",
"if",
"item",
"[",
"arg",
"]",
"!=",
"kwargs",
"[",
"arg",
"]",
":",
"match",
"=",
"False",
"break",
"if",
"match",
":",
"result",
".",
"append",
"(",
"item",
"[",
"query_arg",
"]",
")",
"assert",
"len",
"(",
"result",
")",
"<=",
"1",
",",
"\"%d values matched %s, only allow 1\"",
"%",
"(",
"len",
"(",
"result",
")",
",",
"kwargs",
")",
"if",
"result",
":",
"return",
"result",
"[",
"0",
"]",
"return",
"None"
] |
Helper method to get attribute value for an item matching some criterion.
Specify target criteria value as dict, with target attribute having value -1
Example:
to extract state of vpc matching given vpc id
response = [{'State': 'available', 'VpcId': 'vpc-2bb1584c'}]
extract_attr_for_match(response, State=-1, VpcId='vpc-2bb1584c') #=> 'available
|
[
"Helper",
"method",
"to",
"get",
"attribute",
"value",
"for",
"an",
"item",
"matching",
"some",
"criterion",
".",
"Specify",
"target",
"criteria",
"value",
"as",
"dict",
"with",
"target",
"attribute",
"having",
"value",
"-",
"1"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L510-L548
|
train
|
diux-dev/ncluster
|
ncluster/aws_util.py
|
get_instance_property
|
def get_instance_property(instance, property_name):
"""Retrieves property of an instance, keeps retrying until getting a non-None"""
name = get_name(instance)
while True:
try:
value = getattr(instance, property_name)
if value is not None:
break
print(f"retrieving {property_name} on {name} produced None, retrying")
time.sleep(RETRY_INTERVAL_SEC)
instance.reload()
continue
except Exception as e:
print(f"retrieving {property_name} on {name} failed with {e}, retrying")
time.sleep(RETRY_INTERVAL_SEC)
try:
instance.reload()
except Exception:
pass
continue
return value
|
python
|
def get_instance_property(instance, property_name):
"""Retrieves property of an instance, keeps retrying until getting a non-None"""
name = get_name(instance)
while True:
try:
value = getattr(instance, property_name)
if value is not None:
break
print(f"retrieving {property_name} on {name} produced None, retrying")
time.sleep(RETRY_INTERVAL_SEC)
instance.reload()
continue
except Exception as e:
print(f"retrieving {property_name} on {name} failed with {e}, retrying")
time.sleep(RETRY_INTERVAL_SEC)
try:
instance.reload()
except Exception:
pass
continue
return value
|
[
"def",
"get_instance_property",
"(",
"instance",
",",
"property_name",
")",
":",
"name",
"=",
"get_name",
"(",
"instance",
")",
"while",
"True",
":",
"try",
":",
"value",
"=",
"getattr",
"(",
"instance",
",",
"property_name",
")",
"if",
"value",
"is",
"not",
"None",
":",
"break",
"print",
"(",
"f\"retrieving {property_name} on {name} produced None, retrying\"",
")",
"time",
".",
"sleep",
"(",
"RETRY_INTERVAL_SEC",
")",
"instance",
".",
"reload",
"(",
")",
"continue",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"f\"retrieving {property_name} on {name} failed with {e}, retrying\"",
")",
"time",
".",
"sleep",
"(",
"RETRY_INTERVAL_SEC",
")",
"try",
":",
"instance",
".",
"reload",
"(",
")",
"except",
"Exception",
":",
"pass",
"continue",
"return",
"value"
] |
Retrieves property of an instance, keeps retrying until getting a non-None
|
[
"Retrieves",
"property",
"of",
"an",
"instance",
"keeps",
"retrying",
"until",
"getting",
"a",
"non",
"-",
"None"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L565-L587
|
train
|
diux-dev/ncluster
|
ncluster/aws_util.py
|
wait_until_available
|
def wait_until_available(resource):
"""Waits until interval state becomes 'available'"""
while True:
resource.load()
if resource.state == 'available':
break
time.sleep(RETRY_INTERVAL_SEC)
|
python
|
def wait_until_available(resource):
"""Waits until interval state becomes 'available'"""
while True:
resource.load()
if resource.state == 'available':
break
time.sleep(RETRY_INTERVAL_SEC)
|
[
"def",
"wait_until_available",
"(",
"resource",
")",
":",
"while",
"True",
":",
"resource",
".",
"load",
"(",
")",
"if",
"resource",
".",
"state",
"==",
"'available'",
":",
"break",
"time",
".",
"sleep",
"(",
"RETRY_INTERVAL_SEC",
")"
] |
Waits until interval state becomes 'available
|
[
"Waits",
"until",
"interval",
"state",
"becomes",
"available"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L675-L681
|
train
|
diux-dev/ncluster
|
ncluster/aws_util.py
|
maybe_create_placement_group
|
def maybe_create_placement_group(name='', max_retries=10):
"""Creates placement_group group or reuses existing one. Crash if unable to create
placement_group group. If name is empty, ignores request."""
if not name:
return
client = get_ec2_client()
while True:
try:
client.describe_placement_groups(GroupNames=[name])
print("Reusing placement_group group: " + name)
break # no Exception means group name was found
except Exception:
print("Creating placement_group group: " + name)
try:
_response = client.create_placement_group(GroupName=name,
Strategy='cluster')
except Exception:
# because of race can get InvalidPlacementGroup.Duplicate
pass
counter = 0
while True:
try:
res = client.describe_placement_groups(GroupNames=[name])
res_entry = res['PlacementGroups'][0]
if res_entry['State'] == 'available':
assert res_entry['Strategy'] == 'cluster'
break
except Exception as e:
print("Got exception: %s" % (e,))
counter += 1
if counter >= max_retries:
assert False, f'Failed to create placement_group group {name} in {max_retries} attempts'
time.sleep(RETRY_INTERVAL_SEC)
|
python
|
def maybe_create_placement_group(name='', max_retries=10):
"""Creates placement_group group or reuses existing one. Crash if unable to create
placement_group group. If name is empty, ignores request."""
if not name:
return
client = get_ec2_client()
while True:
try:
client.describe_placement_groups(GroupNames=[name])
print("Reusing placement_group group: " + name)
break # no Exception means group name was found
except Exception:
print("Creating placement_group group: " + name)
try:
_response = client.create_placement_group(GroupName=name,
Strategy='cluster')
except Exception:
# because of race can get InvalidPlacementGroup.Duplicate
pass
counter = 0
while True:
try:
res = client.describe_placement_groups(GroupNames=[name])
res_entry = res['PlacementGroups'][0]
if res_entry['State'] == 'available':
assert res_entry['Strategy'] == 'cluster'
break
except Exception as e:
print("Got exception: %s" % (e,))
counter += 1
if counter >= max_retries:
assert False, f'Failed to create placement_group group {name} in {max_retries} attempts'
time.sleep(RETRY_INTERVAL_SEC)
|
[
"def",
"maybe_create_placement_group",
"(",
"name",
"=",
"''",
",",
"max_retries",
"=",
"10",
")",
":",
"if",
"not",
"name",
":",
"return",
"client",
"=",
"get_ec2_client",
"(",
")",
"while",
"True",
":",
"try",
":",
"client",
".",
"describe_placement_groups",
"(",
"GroupNames",
"=",
"[",
"name",
"]",
")",
"print",
"(",
"\"Reusing placement_group group: \"",
"+",
"name",
")",
"break",
"# no Exception means group name was found",
"except",
"Exception",
":",
"print",
"(",
"\"Creating placement_group group: \"",
"+",
"name",
")",
"try",
":",
"_response",
"=",
"client",
".",
"create_placement_group",
"(",
"GroupName",
"=",
"name",
",",
"Strategy",
"=",
"'cluster'",
")",
"except",
"Exception",
":",
"# because of race can get InvalidPlacementGroup.Duplicate",
"pass",
"counter",
"=",
"0",
"while",
"True",
":",
"try",
":",
"res",
"=",
"client",
".",
"describe_placement_groups",
"(",
"GroupNames",
"=",
"[",
"name",
"]",
")",
"res_entry",
"=",
"res",
"[",
"'PlacementGroups'",
"]",
"[",
"0",
"]",
"if",
"res_entry",
"[",
"'State'",
"]",
"==",
"'available'",
":",
"assert",
"res_entry",
"[",
"'Strategy'",
"]",
"==",
"'cluster'",
"break",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"Got exception: %s\"",
"%",
"(",
"e",
",",
")",
")",
"counter",
"+=",
"1",
"if",
"counter",
">=",
"max_retries",
":",
"assert",
"False",
",",
"f'Failed to create placement_group group {name} in {max_retries} attempts'",
"time",
".",
"sleep",
"(",
"RETRY_INTERVAL_SEC",
")"
] |
Creates placement_group group or reuses existing one. Crash if unable to create
placement_group group. If name is empty, ignores request.
|
[
"Creates",
"placement_group",
"group",
"or",
"reuses",
"existing",
"one",
".",
"Crash",
"if",
"unable",
"to",
"create",
"placement_group",
"group",
".",
"If",
"name",
"is",
"empty",
"ignores",
"request",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_util.py#L684-L719
|
train
|
diux-dev/ncluster
|
ncluster/ncluster_globals.py
|
is_chief
|
def is_chief(task: backend.Task, run_name: str):
"""Returns True if task is chief task in the corresponding run"""
global run_task_dict
if run_name not in run_task_dict:
return True
task_list = run_task_dict[run_name]
assert task in task_list, f"Task {task.name} doesn't belong to run {run_name}"
return task_list[0] == task
|
python
|
def is_chief(task: backend.Task, run_name: str):
"""Returns True if task is chief task in the corresponding run"""
global run_task_dict
if run_name not in run_task_dict:
return True
task_list = run_task_dict[run_name]
assert task in task_list, f"Task {task.name} doesn't belong to run {run_name}"
return task_list[0] == task
|
[
"def",
"is_chief",
"(",
"task",
":",
"backend",
".",
"Task",
",",
"run_name",
":",
"str",
")",
":",
"global",
"run_task_dict",
"if",
"run_name",
"not",
"in",
"run_task_dict",
":",
"return",
"True",
"task_list",
"=",
"run_task_dict",
"[",
"run_name",
"]",
"assert",
"task",
"in",
"task_list",
",",
"f\"Task {task.name} doesn't belong to run {run_name}\"",
"return",
"task_list",
"[",
"0",
"]",
"==",
"task"
] |
Returns True if task is chief task in the corresponding run
|
[
"Returns",
"True",
"if",
"task",
"is",
"chief",
"task",
"in",
"the",
"corresponding",
"run"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/ncluster_globals.py#L88-L95
|
train
|
diux-dev/ncluster
|
benchmarks/util.py
|
ossystem
|
def ossystem(cmd):
"""Like os.system, but returns output of command as string."""
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(stdout, stderr) = p.communicate()
return stdout.decode('ascii')
|
python
|
def ossystem(cmd):
"""Like os.system, but returns output of command as string."""
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(stdout, stderr) = p.communicate()
return stdout.decode('ascii')
|
[
"def",
"ossystem",
"(",
"cmd",
")",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"(",
"stdout",
",",
"stderr",
")",
"=",
"p",
".",
"communicate",
"(",
")",
"return",
"stdout",
".",
"decode",
"(",
"'ascii'",
")"
] |
Like os.system, but returns output of command as string.
|
[
"Like",
"os",
".",
"system",
"but",
"returns",
"output",
"of",
"command",
"as",
"string",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/benchmarks/util.py#L40-L45
|
train
|
diux-dev/ncluster
|
ncluster/aws_backend.py
|
_maybe_create_resources
|
def _maybe_create_resources(logging_task: Task = None):
"""Use heuristics to decide to possibly create resources"""
def log(*args):
if logging_task:
logging_task.log(*args)
else:
util.log(*args)
def should_create_resources():
"""Check if gateway, keypair, vpc exist."""
prefix = u.get_prefix()
if u.get_keypair_name() not in u.get_keypair_dict():
log(f"Missing {u.get_keypair_name()} keypair, creating resources")
return True
vpcs = u.get_vpc_dict()
if prefix not in vpcs:
log(f"Missing {prefix} vpc, creating resources")
return True
vpc = vpcs[prefix]
gateways = u.get_gateway_dict(vpc)
if prefix not in gateways:
log(f"Missing {prefix} gateway, creating resources")
return True
return False
try:
# this locking is approximate, still possible for threads to slip through
if os.path.exists(AWS_LOCK_FN):
pid, ts, lock_taskname = open(AWS_LOCK_FN).read().split('-')
ts = int(ts)
log(f"waiting for aws resource creation, another resource initiation was "
f"initiated {int(time.time()-ts)} seconds ago by "
f"{lock_taskname}, delete lock file "
f"{AWS_LOCK_FN} if this is an error")
while True:
if os.path.exists(AWS_LOCK_FN):
log(f"waiting for lock file {AWS_LOCK_FN} to get deleted "
f"initiated {int(time.time()-ts)} seconds ago by ")
time.sleep(2)
continue
else:
break
return
with open(AWS_LOCK_FN, 'w') as f:
f.write(
f'{os.getpid()}-{int(time.time())}-{logging_task.name if logging_task else ""}')
if not should_create_resources():
util.log("Resources already created, no-op")
os.remove(AWS_LOCK_FN)
return
create_lib.create_resources()
finally:
if os.path.exists(AWS_LOCK_FN):
os.remove(AWS_LOCK_FN)
|
python
|
def _maybe_create_resources(logging_task: Task = None):
"""Use heuristics to decide to possibly create resources"""
def log(*args):
if logging_task:
logging_task.log(*args)
else:
util.log(*args)
def should_create_resources():
"""Check if gateway, keypair, vpc exist."""
prefix = u.get_prefix()
if u.get_keypair_name() not in u.get_keypair_dict():
log(f"Missing {u.get_keypair_name()} keypair, creating resources")
return True
vpcs = u.get_vpc_dict()
if prefix not in vpcs:
log(f"Missing {prefix} vpc, creating resources")
return True
vpc = vpcs[prefix]
gateways = u.get_gateway_dict(vpc)
if prefix not in gateways:
log(f"Missing {prefix} gateway, creating resources")
return True
return False
try:
# this locking is approximate, still possible for threads to slip through
if os.path.exists(AWS_LOCK_FN):
pid, ts, lock_taskname = open(AWS_LOCK_FN).read().split('-')
ts = int(ts)
log(f"waiting for aws resource creation, another resource initiation was "
f"initiated {int(time.time()-ts)} seconds ago by "
f"{lock_taskname}, delete lock file "
f"{AWS_LOCK_FN} if this is an error")
while True:
if os.path.exists(AWS_LOCK_FN):
log(f"waiting for lock file {AWS_LOCK_FN} to get deleted "
f"initiated {int(time.time()-ts)} seconds ago by ")
time.sleep(2)
continue
else:
break
return
with open(AWS_LOCK_FN, 'w') as f:
f.write(
f'{os.getpid()}-{int(time.time())}-{logging_task.name if logging_task else ""}')
if not should_create_resources():
util.log("Resources already created, no-op")
os.remove(AWS_LOCK_FN)
return
create_lib.create_resources()
finally:
if os.path.exists(AWS_LOCK_FN):
os.remove(AWS_LOCK_FN)
|
[
"def",
"_maybe_create_resources",
"(",
"logging_task",
":",
"Task",
"=",
"None",
")",
":",
"def",
"log",
"(",
"*",
"args",
")",
":",
"if",
"logging_task",
":",
"logging_task",
".",
"log",
"(",
"*",
"args",
")",
"else",
":",
"util",
".",
"log",
"(",
"*",
"args",
")",
"def",
"should_create_resources",
"(",
")",
":",
"\"\"\"Check if gateway, keypair, vpc exist.\"\"\"",
"prefix",
"=",
"u",
".",
"get_prefix",
"(",
")",
"if",
"u",
".",
"get_keypair_name",
"(",
")",
"not",
"in",
"u",
".",
"get_keypair_dict",
"(",
")",
":",
"log",
"(",
"f\"Missing {u.get_keypair_name()} keypair, creating resources\"",
")",
"return",
"True",
"vpcs",
"=",
"u",
".",
"get_vpc_dict",
"(",
")",
"if",
"prefix",
"not",
"in",
"vpcs",
":",
"log",
"(",
"f\"Missing {prefix} vpc, creating resources\"",
")",
"return",
"True",
"vpc",
"=",
"vpcs",
"[",
"prefix",
"]",
"gateways",
"=",
"u",
".",
"get_gateway_dict",
"(",
"vpc",
")",
"if",
"prefix",
"not",
"in",
"gateways",
":",
"log",
"(",
"f\"Missing {prefix} gateway, creating resources\"",
")",
"return",
"True",
"return",
"False",
"try",
":",
"# this locking is approximate, still possible for threads to slip through",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"AWS_LOCK_FN",
")",
":",
"pid",
",",
"ts",
",",
"lock_taskname",
"=",
"open",
"(",
"AWS_LOCK_FN",
")",
".",
"read",
"(",
")",
".",
"split",
"(",
"'-'",
")",
"ts",
"=",
"int",
"(",
"ts",
")",
"log",
"(",
"f\"waiting for aws resource creation, another resource initiation was \"",
"f\"initiated {int(time.time()-ts)} seconds ago by \"",
"f\"{lock_taskname}, delete lock file \"",
"f\"{AWS_LOCK_FN} if this is an error\"",
")",
"while",
"True",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"AWS_LOCK_FN",
")",
":",
"log",
"(",
"f\"waiting for lock file {AWS_LOCK_FN} to get deleted \"",
"f\"initiated {int(time.time()-ts)} seconds ago by \"",
")",
"time",
".",
"sleep",
"(",
"2",
")",
"continue",
"else",
":",
"break",
"return",
"with",
"open",
"(",
"AWS_LOCK_FN",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"f'{os.getpid()}-{int(time.time())}-{logging_task.name if logging_task else \"\"}'",
")",
"if",
"not",
"should_create_resources",
"(",
")",
":",
"util",
".",
"log",
"(",
"\"Resources already created, no-op\"",
")",
"os",
".",
"remove",
"(",
"AWS_LOCK_FN",
")",
"return",
"create_lib",
".",
"create_resources",
"(",
")",
"finally",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"AWS_LOCK_FN",
")",
":",
"os",
".",
"remove",
"(",
"AWS_LOCK_FN",
")"
] |
Use heuristics to decide to possibly create resources
|
[
"Use",
"heuristics",
"to",
"decide",
"to",
"possibly",
"create",
"resources"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_backend.py#L933-L990
|
train
|
diux-dev/ncluster
|
ncluster/aws_backend.py
|
_set_aws_environment
|
def _set_aws_environment(task: Task = None):
"""Sets up AWS environment from NCLUSTER environment variables"""
current_zone = os.environ.get('NCLUSTER_ZONE', '')
current_region = os.environ.get('AWS_DEFAULT_REGION', '')
def log(*args):
if task:
task.log(*args)
else:
util.log(*args)
if current_region and current_zone:
assert current_zone.startswith(
current_region), f'Current zone "{current_zone}" ($NCLUSTER_ZONE) is not ' \
f'in current region "{current_region} ($AWS_DEFAULT_REGION)'
assert u.get_session().region_name == current_region # setting from ~/.aws
# zone is set, set region from zone
if current_zone and not current_region:
current_region = current_zone[:-1]
os.environ['AWS_DEFAULT_REGION'] = current_region
# neither zone nor region not set, use default setting for region
# if default is not set, use NCLUSTER_DEFAULT_REGION
if not current_region:
current_region = u.get_session().region_name
if not current_region:
log(f"No default region available, using {NCLUSTER_DEFAULT_REGION}")
current_region = NCLUSTER_DEFAULT_REGION
os.environ['AWS_DEFAULT_REGION'] = current_region
# zone not set, use first zone of the region
# if not current_zone:
# current_zone = current_region + 'a'
# os.environ['NCLUSTER_ZONE'] = current_zone
log(f"Using account {u.get_account_number()}, region {current_region}, "
f"zone {current_zone}")
|
python
|
def _set_aws_environment(task: Task = None):
"""Sets up AWS environment from NCLUSTER environment variables"""
current_zone = os.environ.get('NCLUSTER_ZONE', '')
current_region = os.environ.get('AWS_DEFAULT_REGION', '')
def log(*args):
if task:
task.log(*args)
else:
util.log(*args)
if current_region and current_zone:
assert current_zone.startswith(
current_region), f'Current zone "{current_zone}" ($NCLUSTER_ZONE) is not ' \
f'in current region "{current_region} ($AWS_DEFAULT_REGION)'
assert u.get_session().region_name == current_region # setting from ~/.aws
# zone is set, set region from zone
if current_zone and not current_region:
current_region = current_zone[:-1]
os.environ['AWS_DEFAULT_REGION'] = current_region
# neither zone nor region not set, use default setting for region
# if default is not set, use NCLUSTER_DEFAULT_REGION
if not current_region:
current_region = u.get_session().region_name
if not current_region:
log(f"No default region available, using {NCLUSTER_DEFAULT_REGION}")
current_region = NCLUSTER_DEFAULT_REGION
os.environ['AWS_DEFAULT_REGION'] = current_region
# zone not set, use first zone of the region
# if not current_zone:
# current_zone = current_region + 'a'
# os.environ['NCLUSTER_ZONE'] = current_zone
log(f"Using account {u.get_account_number()}, region {current_region}, "
f"zone {current_zone}")
|
[
"def",
"_set_aws_environment",
"(",
"task",
":",
"Task",
"=",
"None",
")",
":",
"current_zone",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'NCLUSTER_ZONE'",
",",
"''",
")",
"current_region",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'AWS_DEFAULT_REGION'",
",",
"''",
")",
"def",
"log",
"(",
"*",
"args",
")",
":",
"if",
"task",
":",
"task",
".",
"log",
"(",
"*",
"args",
")",
"else",
":",
"util",
".",
"log",
"(",
"*",
"args",
")",
"if",
"current_region",
"and",
"current_zone",
":",
"assert",
"current_zone",
".",
"startswith",
"(",
"current_region",
")",
",",
"f'Current zone \"{current_zone}\" ($NCLUSTER_ZONE) is not '",
"f'in current region \"{current_region} ($AWS_DEFAULT_REGION)'",
"assert",
"u",
".",
"get_session",
"(",
")",
".",
"region_name",
"==",
"current_region",
"# setting from ~/.aws",
"# zone is set, set region from zone",
"if",
"current_zone",
"and",
"not",
"current_region",
":",
"current_region",
"=",
"current_zone",
"[",
":",
"-",
"1",
"]",
"os",
".",
"environ",
"[",
"'AWS_DEFAULT_REGION'",
"]",
"=",
"current_region",
"# neither zone nor region not set, use default setting for region",
"# if default is not set, use NCLUSTER_DEFAULT_REGION",
"if",
"not",
"current_region",
":",
"current_region",
"=",
"u",
".",
"get_session",
"(",
")",
".",
"region_name",
"if",
"not",
"current_region",
":",
"log",
"(",
"f\"No default region available, using {NCLUSTER_DEFAULT_REGION}\"",
")",
"current_region",
"=",
"NCLUSTER_DEFAULT_REGION",
"os",
".",
"environ",
"[",
"'AWS_DEFAULT_REGION'",
"]",
"=",
"current_region",
"# zone not set, use first zone of the region",
"# if not current_zone:",
"# current_zone = current_region + 'a'",
"# os.environ['NCLUSTER_ZONE'] = current_zone",
"log",
"(",
"f\"Using account {u.get_account_number()}, region {current_region}, \"",
"f\"zone {current_zone}\"",
")"
] |
Sets up AWS environment from NCLUSTER environment variables
|
[
"Sets",
"up",
"AWS",
"environment",
"from",
"NCLUSTER",
"environment",
"variables"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_backend.py#L993-L1030
|
train
|
diux-dev/ncluster
|
ncluster/aws_backend.py
|
Task.join
|
def join(self, ignore_errors=False):
"""Waits until last executed command completed."""
assert self._status_fn, "Asked to join a task which hasn't had any commands executed on it"
check_interval = 0.2
status_fn = self._status_fn
if not self.wait_for_file(status_fn, max_wait_sec=30):
self.log(f"Retrying waiting for {status_fn}")
while not self.exists(status_fn):
self.log(f"Still waiting for {self._cmd}")
self.wait_for_file(status_fn, max_wait_sec=30)
contents = self.read(status_fn)
# if empty wait a bit to allow for race condition
if len(contents) == 0:
time.sleep(check_interval)
contents = self.read(status_fn)
status = int(contents.strip())
self.last_status = status
if status != 0:
extra_msg = '(ignoring error)' if ignore_errors else '(failing)'
if util.is_set('NCLUSTER_RUN_WITH_OUTPUT_ON_FAILURE') or True:
self.log(
f"Start failing output {extra_msg}: \n{'*'*80}\n\n '{self.read(self._out_fn)}'")
self.log(f"\n{'*'*80}\nEnd failing output")
if not ignore_errors:
raise RuntimeError(f"Command {self._cmd} returned status {status}")
else:
self.log(f"Warning: command {self._cmd} returned status {status}")
return status
|
python
|
def join(self, ignore_errors=False):
"""Waits until last executed command completed."""
assert self._status_fn, "Asked to join a task which hasn't had any commands executed on it"
check_interval = 0.2
status_fn = self._status_fn
if not self.wait_for_file(status_fn, max_wait_sec=30):
self.log(f"Retrying waiting for {status_fn}")
while not self.exists(status_fn):
self.log(f"Still waiting for {self._cmd}")
self.wait_for_file(status_fn, max_wait_sec=30)
contents = self.read(status_fn)
# if empty wait a bit to allow for race condition
if len(contents) == 0:
time.sleep(check_interval)
contents = self.read(status_fn)
status = int(contents.strip())
self.last_status = status
if status != 0:
extra_msg = '(ignoring error)' if ignore_errors else '(failing)'
if util.is_set('NCLUSTER_RUN_WITH_OUTPUT_ON_FAILURE') or True:
self.log(
f"Start failing output {extra_msg}: \n{'*'*80}\n\n '{self.read(self._out_fn)}'")
self.log(f"\n{'*'*80}\nEnd failing output")
if not ignore_errors:
raise RuntimeError(f"Command {self._cmd} returned status {status}")
else:
self.log(f"Warning: command {self._cmd} returned status {status}")
return status
|
[
"def",
"join",
"(",
"self",
",",
"ignore_errors",
"=",
"False",
")",
":",
"assert",
"self",
".",
"_status_fn",
",",
"\"Asked to join a task which hasn't had any commands executed on it\"",
"check_interval",
"=",
"0.2",
"status_fn",
"=",
"self",
".",
"_status_fn",
"if",
"not",
"self",
".",
"wait_for_file",
"(",
"status_fn",
",",
"max_wait_sec",
"=",
"30",
")",
":",
"self",
".",
"log",
"(",
"f\"Retrying waiting for {status_fn}\"",
")",
"while",
"not",
"self",
".",
"exists",
"(",
"status_fn",
")",
":",
"self",
".",
"log",
"(",
"f\"Still waiting for {self._cmd}\"",
")",
"self",
".",
"wait_for_file",
"(",
"status_fn",
",",
"max_wait_sec",
"=",
"30",
")",
"contents",
"=",
"self",
".",
"read",
"(",
"status_fn",
")",
"# if empty wait a bit to allow for race condition",
"if",
"len",
"(",
"contents",
")",
"==",
"0",
":",
"time",
".",
"sleep",
"(",
"check_interval",
")",
"contents",
"=",
"self",
".",
"read",
"(",
"status_fn",
")",
"status",
"=",
"int",
"(",
"contents",
".",
"strip",
"(",
")",
")",
"self",
".",
"last_status",
"=",
"status",
"if",
"status",
"!=",
"0",
":",
"extra_msg",
"=",
"'(ignoring error)'",
"if",
"ignore_errors",
"else",
"'(failing)'",
"if",
"util",
".",
"is_set",
"(",
"'NCLUSTER_RUN_WITH_OUTPUT_ON_FAILURE'",
")",
"or",
"True",
":",
"self",
".",
"log",
"(",
"f\"Start failing output {extra_msg}: \\n{'*'*80}\\n\\n '{self.read(self._out_fn)}'\"",
")",
"self",
".",
"log",
"(",
"f\"\\n{'*'*80}\\nEnd failing output\"",
")",
"if",
"not",
"ignore_errors",
":",
"raise",
"RuntimeError",
"(",
"f\"Command {self._cmd} returned status {status}\"",
")",
"else",
":",
"self",
".",
"log",
"(",
"f\"Warning: command {self._cmd} returned status {status}\"",
")",
"return",
"status"
] |
Waits until last executed command completed.
|
[
"Waits",
"until",
"last",
"executed",
"command",
"completed",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_backend.py#L272-L302
|
train
|
diux-dev/ncluster
|
ncluster/aws_backend.py
|
Task._run_with_output_on_failure
|
def _run_with_output_on_failure(self, cmd, non_blocking=False,
ignore_errors=False,
max_wait_sec=365 * 24 * 3600,
check_interval=0.2) -> str:
"""Experimental version of run propagates error messages to client. This command will be default "run" eventually"""
if not self._can_run:
assert False, "Using .run before initialization finished"
if '\n' in cmd:
assert False, "Don't support multi-line for run2"
cmd = cmd.strip()
if cmd.startswith('#'): # ignore empty/commented out lines
return ''
self.run_counter += 1
self.log("tmux> %s", cmd)
self._cmd = cmd
self._cmd_fn = f'{self.remote_scratch}/{self.run_counter}.cmd'
self._status_fn = f'{self.remote_scratch}/{self.run_counter}.status'
self._out_fn = f'{self.remote_scratch}/{self.run_counter}.out'
cmd = util.shell_strip_comment(cmd)
assert '&' not in cmd, f"cmd {cmd} contains &, that breaks things"
# modify command to dump shell success status into file
self.file_write(self._cmd_fn, cmd + '\n')
# modified_cmd = f'{cmd} > {out_fn} 2>&1; echo $? > {status_fn}'
# https://stackoverflow.com/a/692407/419116
# $cmd > >(tee -a fn) 2> >(tee -a fn >&2)
modified_cmd = f'{cmd} > >(tee -a {self._out_fn}) 2> >(tee -a {self._out_fn} >&2); echo $? > {self._status_fn}'
modified_cmd = shlex.quote(modified_cmd)
start_time = time.time()
tmux_window = self.tmux_session + ':' + str(self.tmux_window_id)
tmux_cmd = f"tmux send-keys -t {tmux_window} {modified_cmd} Enter"
self._run_raw(tmux_cmd, ignore_errors=ignore_errors)
if non_blocking:
return 0
if not self.wait_for_file(self._status_fn, max_wait_sec=60):
self.log(f"Retrying waiting for {self._status_fn}")
elapsed_time = time.time() - start_time
while not self.exists(self._status_fn) and elapsed_time < max_wait_sec:
self.log(f"Still waiting for {cmd}")
self.wait_for_file(self._status_fn, max_wait_sec=60)
elapsed_time = time.time() - start_time
contents = self.read(self._status_fn)
# if empty wait a bit to allow for race condition
if len(contents) == 0:
time.sleep(check_interval)
contents = self.read(self._status_fn)
status = int(contents.strip())
self.last_status = status
if status != 0:
extra_msg = '(ignoring error)' if ignore_errors else '(failing)'
self.log(
f"Start failing output {extra_msg}: \n{'*'*80}\n\n '{self.read(self._out_fn)}'")
self.log(f"\n{'*'*80}\nEnd failing output")
if not ignore_errors:
raise RuntimeError(f"Command {cmd} returned status {status}")
else:
self.log(f"Warning: command {cmd} returned status {status}")
return self.read(self._out_fn)
|
python
|
def _run_with_output_on_failure(self, cmd, non_blocking=False,
ignore_errors=False,
max_wait_sec=365 * 24 * 3600,
check_interval=0.2) -> str:
"""Experimental version of run propagates error messages to client. This command will be default "run" eventually"""
if not self._can_run:
assert False, "Using .run before initialization finished"
if '\n' in cmd:
assert False, "Don't support multi-line for run2"
cmd = cmd.strip()
if cmd.startswith('#'): # ignore empty/commented out lines
return ''
self.run_counter += 1
self.log("tmux> %s", cmd)
self._cmd = cmd
self._cmd_fn = f'{self.remote_scratch}/{self.run_counter}.cmd'
self._status_fn = f'{self.remote_scratch}/{self.run_counter}.status'
self._out_fn = f'{self.remote_scratch}/{self.run_counter}.out'
cmd = util.shell_strip_comment(cmd)
assert '&' not in cmd, f"cmd {cmd} contains &, that breaks things"
# modify command to dump shell success status into file
self.file_write(self._cmd_fn, cmd + '\n')
# modified_cmd = f'{cmd} > {out_fn} 2>&1; echo $? > {status_fn}'
# https://stackoverflow.com/a/692407/419116
# $cmd > >(tee -a fn) 2> >(tee -a fn >&2)
modified_cmd = f'{cmd} > >(tee -a {self._out_fn}) 2> >(tee -a {self._out_fn} >&2); echo $? > {self._status_fn}'
modified_cmd = shlex.quote(modified_cmd)
start_time = time.time()
tmux_window = self.tmux_session + ':' + str(self.tmux_window_id)
tmux_cmd = f"tmux send-keys -t {tmux_window} {modified_cmd} Enter"
self._run_raw(tmux_cmd, ignore_errors=ignore_errors)
if non_blocking:
return 0
if not self.wait_for_file(self._status_fn, max_wait_sec=60):
self.log(f"Retrying waiting for {self._status_fn}")
elapsed_time = time.time() - start_time
while not self.exists(self._status_fn) and elapsed_time < max_wait_sec:
self.log(f"Still waiting for {cmd}")
self.wait_for_file(self._status_fn, max_wait_sec=60)
elapsed_time = time.time() - start_time
contents = self.read(self._status_fn)
# if empty wait a bit to allow for race condition
if len(contents) == 0:
time.sleep(check_interval)
contents = self.read(self._status_fn)
status = int(contents.strip())
self.last_status = status
if status != 0:
extra_msg = '(ignoring error)' if ignore_errors else '(failing)'
self.log(
f"Start failing output {extra_msg}: \n{'*'*80}\n\n '{self.read(self._out_fn)}'")
self.log(f"\n{'*'*80}\nEnd failing output")
if not ignore_errors:
raise RuntimeError(f"Command {cmd} returned status {status}")
else:
self.log(f"Warning: command {cmd} returned status {status}")
return self.read(self._out_fn)
|
[
"def",
"_run_with_output_on_failure",
"(",
"self",
",",
"cmd",
",",
"non_blocking",
"=",
"False",
",",
"ignore_errors",
"=",
"False",
",",
"max_wait_sec",
"=",
"365",
"*",
"24",
"*",
"3600",
",",
"check_interval",
"=",
"0.2",
")",
"->",
"str",
":",
"if",
"not",
"self",
".",
"_can_run",
":",
"assert",
"False",
",",
"\"Using .run before initialization finished\"",
"if",
"'\\n'",
"in",
"cmd",
":",
"assert",
"False",
",",
"\"Don't support multi-line for run2\"",
"cmd",
"=",
"cmd",
".",
"strip",
"(",
")",
"if",
"cmd",
".",
"startswith",
"(",
"'#'",
")",
":",
"# ignore empty/commented out lines",
"return",
"''",
"self",
".",
"run_counter",
"+=",
"1",
"self",
".",
"log",
"(",
"\"tmux> %s\"",
",",
"cmd",
")",
"self",
".",
"_cmd",
"=",
"cmd",
"self",
".",
"_cmd_fn",
"=",
"f'{self.remote_scratch}/{self.run_counter}.cmd'",
"self",
".",
"_status_fn",
"=",
"f'{self.remote_scratch}/{self.run_counter}.status'",
"self",
".",
"_out_fn",
"=",
"f'{self.remote_scratch}/{self.run_counter}.out'",
"cmd",
"=",
"util",
".",
"shell_strip_comment",
"(",
"cmd",
")",
"assert",
"'&'",
"not",
"in",
"cmd",
",",
"f\"cmd {cmd} contains &, that breaks things\"",
"# modify command to dump shell success status into file",
"self",
".",
"file_write",
"(",
"self",
".",
"_cmd_fn",
",",
"cmd",
"+",
"'\\n'",
")",
"# modified_cmd = f'{cmd} > {out_fn} 2>&1; echo $? > {status_fn}'",
"# https://stackoverflow.com/a/692407/419116",
"# $cmd > >(tee -a fn) 2> >(tee -a fn >&2)",
"modified_cmd",
"=",
"f'{cmd} > >(tee -a {self._out_fn}) 2> >(tee -a {self._out_fn} >&2); echo $? > {self._status_fn}'",
"modified_cmd",
"=",
"shlex",
".",
"quote",
"(",
"modified_cmd",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"tmux_window",
"=",
"self",
".",
"tmux_session",
"+",
"':'",
"+",
"str",
"(",
"self",
".",
"tmux_window_id",
")",
"tmux_cmd",
"=",
"f\"tmux send-keys -t {tmux_window} {modified_cmd} Enter\"",
"self",
".",
"_run_raw",
"(",
"tmux_cmd",
",",
"ignore_errors",
"=",
"ignore_errors",
")",
"if",
"non_blocking",
":",
"return",
"0",
"if",
"not",
"self",
".",
"wait_for_file",
"(",
"self",
".",
"_status_fn",
",",
"max_wait_sec",
"=",
"60",
")",
":",
"self",
".",
"log",
"(",
"f\"Retrying waiting for {self._status_fn}\"",
")",
"elapsed_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"while",
"not",
"self",
".",
"exists",
"(",
"self",
".",
"_status_fn",
")",
"and",
"elapsed_time",
"<",
"max_wait_sec",
":",
"self",
".",
"log",
"(",
"f\"Still waiting for {cmd}\"",
")",
"self",
".",
"wait_for_file",
"(",
"self",
".",
"_status_fn",
",",
"max_wait_sec",
"=",
"60",
")",
"elapsed_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"contents",
"=",
"self",
".",
"read",
"(",
"self",
".",
"_status_fn",
")",
"# if empty wait a bit to allow for race condition",
"if",
"len",
"(",
"contents",
")",
"==",
"0",
":",
"time",
".",
"sleep",
"(",
"check_interval",
")",
"contents",
"=",
"self",
".",
"read",
"(",
"self",
".",
"_status_fn",
")",
"status",
"=",
"int",
"(",
"contents",
".",
"strip",
"(",
")",
")",
"self",
".",
"last_status",
"=",
"status",
"if",
"status",
"!=",
"0",
":",
"extra_msg",
"=",
"'(ignoring error)'",
"if",
"ignore_errors",
"else",
"'(failing)'",
"self",
".",
"log",
"(",
"f\"Start failing output {extra_msg}: \\n{'*'*80}\\n\\n '{self.read(self._out_fn)}'\"",
")",
"self",
".",
"log",
"(",
"f\"\\n{'*'*80}\\nEnd failing output\"",
")",
"if",
"not",
"ignore_errors",
":",
"raise",
"RuntimeError",
"(",
"f\"Command {cmd} returned status {status}\"",
")",
"else",
":",
"self",
".",
"log",
"(",
"f\"Warning: command {cmd} returned status {status}\"",
")",
"return",
"self",
".",
"read",
"(",
"self",
".",
"_out_fn",
")"
] |
Experimental version of run propagates error messages to client. This command will be default "run" eventually
|
[
"Experimental",
"version",
"of",
"run",
"propagates",
"error",
"messages",
"to",
"client",
".",
"This",
"command",
"will",
"be",
"default",
"run",
"eventually"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_backend.py#L304-L373
|
train
|
diux-dev/ncluster
|
ncluster/aws_backend.py
|
Task.upload
|
def upload(self, local_fn: str, remote_fn: str = '',
dont_overwrite: bool = False) -> None:
"""Uploads file to remote instance. If location not specified, dumps it
into default directory. If remote location has files or directories with the
same name, behavior is undefined."""
# support wildcard through glob
if '*' in local_fn:
for local_subfn in glob.glob(local_fn):
self.upload(local_subfn)
return
if '#' in local_fn: # hashes also give problems from shell commands
self.log("skipping backup file {local_fn}")
return
if not self.sftp:
self.sftp = u.call_with_retries(self.ssh_client.open_sftp,
'self.ssh_client.open_sftp')
def maybe_fix_mode(local_fn_, remote_fn_):
"""Makes remote file execute for locally executable files"""
mode = oct(os.stat(local_fn_)[stat.ST_MODE])[-3:]
if '7' in mode:
self.log(f"Making {remote_fn_} executable with mode {mode}")
# use raw run, in case tmux is unavailable
self._run_raw(f"chmod {mode} {remote_fn_}")
# augmented SFTP client that can transfer directories, from
# https://stackoverflow.com/a/19974994/419116
def _put_dir(source, target):
""" Uploads the contents of the source directory to the target path."""
def _safe_mkdir(path, mode=511, ignore_existing=True):
""" Augments mkdir by adding an option to not fail if the folder exists asdf asdf asdf as"""
try:
self.sftp.mkdir(path, mode)
except IOError:
if ignore_existing:
pass
else:
raise
assert os.path.isdir(source)
_safe_mkdir(target)
for item in os.listdir(source):
if os.path.isfile(os.path.join(source, item)):
self.sftp.put(os.path.join(source, item), os.path.join(target, item))
maybe_fix_mode(os.path.join(source, item), os.path.join(target, item))
else:
_safe_mkdir(f'{target}/{item}')
_put_dir(f'{source}/{item}', f'{target}/{item}')
if not remote_fn:
remote_fn = os.path.basename(local_fn)
self.log('uploading ' + local_fn + ' to ' + remote_fn)
remote_fn = remote_fn.replace('~', self.homedir)
if '/' in remote_fn:
remote_dir = os.path.dirname(remote_fn)
assert self.exists(
remote_dir), f"Remote dir {remote_dir} doesn't exist"
if dont_overwrite and self.exists(remote_fn):
self.log("Remote file %s exists, skipping" % (remote_fn,))
return
assert os.path.exists(local_fn), f"{local_fn} not found"
if os.path.isdir(local_fn):
_put_dir(local_fn, remote_fn)
else:
assert os.path.isfile(local_fn), "%s is not a file" % (local_fn,)
# this crashes with IOError when upload failed
if self.exists(remote_fn) and self.isdir(remote_fn):
remote_fn = remote_fn + '/' + os.path.basename(local_fn)
self.sftp.put(localpath=local_fn, remotepath=remote_fn)
maybe_fix_mode(local_fn, remote_fn)
|
python
|
def upload(self, local_fn: str, remote_fn: str = '',
dont_overwrite: bool = False) -> None:
"""Uploads file to remote instance. If location not specified, dumps it
into default directory. If remote location has files or directories with the
same name, behavior is undefined."""
# support wildcard through glob
if '*' in local_fn:
for local_subfn in glob.glob(local_fn):
self.upload(local_subfn)
return
if '#' in local_fn: # hashes also give problems from shell commands
self.log("skipping backup file {local_fn}")
return
if not self.sftp:
self.sftp = u.call_with_retries(self.ssh_client.open_sftp,
'self.ssh_client.open_sftp')
def maybe_fix_mode(local_fn_, remote_fn_):
"""Makes remote file execute for locally executable files"""
mode = oct(os.stat(local_fn_)[stat.ST_MODE])[-3:]
if '7' in mode:
self.log(f"Making {remote_fn_} executable with mode {mode}")
# use raw run, in case tmux is unavailable
self._run_raw(f"chmod {mode} {remote_fn_}")
# augmented SFTP client that can transfer directories, from
# https://stackoverflow.com/a/19974994/419116
def _put_dir(source, target):
""" Uploads the contents of the source directory to the target path."""
def _safe_mkdir(path, mode=511, ignore_existing=True):
""" Augments mkdir by adding an option to not fail if the folder exists asdf asdf asdf as"""
try:
self.sftp.mkdir(path, mode)
except IOError:
if ignore_existing:
pass
else:
raise
assert os.path.isdir(source)
_safe_mkdir(target)
for item in os.listdir(source):
if os.path.isfile(os.path.join(source, item)):
self.sftp.put(os.path.join(source, item), os.path.join(target, item))
maybe_fix_mode(os.path.join(source, item), os.path.join(target, item))
else:
_safe_mkdir(f'{target}/{item}')
_put_dir(f'{source}/{item}', f'{target}/{item}')
if not remote_fn:
remote_fn = os.path.basename(local_fn)
self.log('uploading ' + local_fn + ' to ' + remote_fn)
remote_fn = remote_fn.replace('~', self.homedir)
if '/' in remote_fn:
remote_dir = os.path.dirname(remote_fn)
assert self.exists(
remote_dir), f"Remote dir {remote_dir} doesn't exist"
if dont_overwrite and self.exists(remote_fn):
self.log("Remote file %s exists, skipping" % (remote_fn,))
return
assert os.path.exists(local_fn), f"{local_fn} not found"
if os.path.isdir(local_fn):
_put_dir(local_fn, remote_fn)
else:
assert os.path.isfile(local_fn), "%s is not a file" % (local_fn,)
# this crashes with IOError when upload failed
if self.exists(remote_fn) and self.isdir(remote_fn):
remote_fn = remote_fn + '/' + os.path.basename(local_fn)
self.sftp.put(localpath=local_fn, remotepath=remote_fn)
maybe_fix_mode(local_fn, remote_fn)
|
[
"def",
"upload",
"(",
"self",
",",
"local_fn",
":",
"str",
",",
"remote_fn",
":",
"str",
"=",
"''",
",",
"dont_overwrite",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"# support wildcard through glob",
"if",
"'*'",
"in",
"local_fn",
":",
"for",
"local_subfn",
"in",
"glob",
".",
"glob",
"(",
"local_fn",
")",
":",
"self",
".",
"upload",
"(",
"local_subfn",
")",
"return",
"if",
"'#'",
"in",
"local_fn",
":",
"# hashes also give problems from shell commands",
"self",
".",
"log",
"(",
"\"skipping backup file {local_fn}\"",
")",
"return",
"if",
"not",
"self",
".",
"sftp",
":",
"self",
".",
"sftp",
"=",
"u",
".",
"call_with_retries",
"(",
"self",
".",
"ssh_client",
".",
"open_sftp",
",",
"'self.ssh_client.open_sftp'",
")",
"def",
"maybe_fix_mode",
"(",
"local_fn_",
",",
"remote_fn_",
")",
":",
"\"\"\"Makes remote file execute for locally executable files\"\"\"",
"mode",
"=",
"oct",
"(",
"os",
".",
"stat",
"(",
"local_fn_",
")",
"[",
"stat",
".",
"ST_MODE",
"]",
")",
"[",
"-",
"3",
":",
"]",
"if",
"'7'",
"in",
"mode",
":",
"self",
".",
"log",
"(",
"f\"Making {remote_fn_} executable with mode {mode}\"",
")",
"# use raw run, in case tmux is unavailable",
"self",
".",
"_run_raw",
"(",
"f\"chmod {mode} {remote_fn_}\"",
")",
"# augmented SFTP client that can transfer directories, from",
"# https://stackoverflow.com/a/19974994/419116",
"def",
"_put_dir",
"(",
"source",
",",
"target",
")",
":",
"\"\"\" Uploads the contents of the source directory to the target path.\"\"\"",
"def",
"_safe_mkdir",
"(",
"path",
",",
"mode",
"=",
"511",
",",
"ignore_existing",
"=",
"True",
")",
":",
"\"\"\" Augments mkdir by adding an option to not fail if the folder exists asdf asdf asdf as\"\"\"",
"try",
":",
"self",
".",
"sftp",
".",
"mkdir",
"(",
"path",
",",
"mode",
")",
"except",
"IOError",
":",
"if",
"ignore_existing",
":",
"pass",
"else",
":",
"raise",
"assert",
"os",
".",
"path",
".",
"isdir",
"(",
"source",
")",
"_safe_mkdir",
"(",
"target",
")",
"for",
"item",
"in",
"os",
".",
"listdir",
"(",
"source",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"source",
",",
"item",
")",
")",
":",
"self",
".",
"sftp",
".",
"put",
"(",
"os",
".",
"path",
".",
"join",
"(",
"source",
",",
"item",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"target",
",",
"item",
")",
")",
"maybe_fix_mode",
"(",
"os",
".",
"path",
".",
"join",
"(",
"source",
",",
"item",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"target",
",",
"item",
")",
")",
"else",
":",
"_safe_mkdir",
"(",
"f'{target}/{item}'",
")",
"_put_dir",
"(",
"f'{source}/{item}'",
",",
"f'{target}/{item}'",
")",
"if",
"not",
"remote_fn",
":",
"remote_fn",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"local_fn",
")",
"self",
".",
"log",
"(",
"'uploading '",
"+",
"local_fn",
"+",
"' to '",
"+",
"remote_fn",
")",
"remote_fn",
"=",
"remote_fn",
".",
"replace",
"(",
"'~'",
",",
"self",
".",
"homedir",
")",
"if",
"'/'",
"in",
"remote_fn",
":",
"remote_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"remote_fn",
")",
"assert",
"self",
".",
"exists",
"(",
"remote_dir",
")",
",",
"f\"Remote dir {remote_dir} doesn't exist\"",
"if",
"dont_overwrite",
"and",
"self",
".",
"exists",
"(",
"remote_fn",
")",
":",
"self",
".",
"log",
"(",
"\"Remote file %s exists, skipping\"",
"%",
"(",
"remote_fn",
",",
")",
")",
"return",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"local_fn",
")",
",",
"f\"{local_fn} not found\"",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"local_fn",
")",
":",
"_put_dir",
"(",
"local_fn",
",",
"remote_fn",
")",
"else",
":",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"local_fn",
")",
",",
"\"%s is not a file\"",
"%",
"(",
"local_fn",
",",
")",
"# this crashes with IOError when upload failed",
"if",
"self",
".",
"exists",
"(",
"remote_fn",
")",
"and",
"self",
".",
"isdir",
"(",
"remote_fn",
")",
":",
"remote_fn",
"=",
"remote_fn",
"+",
"'/'",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"local_fn",
")",
"self",
".",
"sftp",
".",
"put",
"(",
"localpath",
"=",
"local_fn",
",",
"remotepath",
"=",
"remote_fn",
")",
"maybe_fix_mode",
"(",
"local_fn",
",",
"remote_fn",
")"
] |
Uploads file to remote instance. If location not specified, dumps it
into default directory. If remote location has files or directories with the
same name, behavior is undefined.
|
[
"Uploads",
"file",
"to",
"remote",
"instance",
".",
"If",
"location",
"not",
"specified",
"dumps",
"it",
"into",
"default",
"directory",
".",
"If",
"remote",
"location",
"has",
"files",
"or",
"directories",
"with",
"the",
"same",
"name",
"behavior",
"is",
"undefined",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/aws_backend.py#L398-L475
|
train
|
diux-dev/ncluster
|
examples/gpubox.py
|
_replace_lines
|
def _replace_lines(fn, startswith, new_line):
"""Replace lines starting with starts_with in fn with new_line."""
new_lines = []
for line in open(fn):
if line.startswith(startswith):
new_lines.append(new_line)
else:
new_lines.append(line)
with open(fn, 'w') as f:
f.write('\n'.join(new_lines))
|
python
|
def _replace_lines(fn, startswith, new_line):
"""Replace lines starting with starts_with in fn with new_line."""
new_lines = []
for line in open(fn):
if line.startswith(startswith):
new_lines.append(new_line)
else:
new_lines.append(line)
with open(fn, 'w') as f:
f.write('\n'.join(new_lines))
|
[
"def",
"_replace_lines",
"(",
"fn",
",",
"startswith",
",",
"new_line",
")",
":",
"new_lines",
"=",
"[",
"]",
"for",
"line",
"in",
"open",
"(",
"fn",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"startswith",
")",
":",
"new_lines",
".",
"append",
"(",
"new_line",
")",
"else",
":",
"new_lines",
".",
"append",
"(",
"line",
")",
"with",
"open",
"(",
"fn",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"new_lines",
")",
")"
] |
Replace lines starting with starts_with in fn with new_line.
|
[
"Replace",
"lines",
"starting",
"with",
"starts_with",
"in",
"fn",
"with",
"new_line",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/examples/gpubox.py#L59-L68
|
train
|
diux-dev/ncluster
|
ncluster/util.py
|
now_micros
|
def now_micros(absolute=False) -> int:
"""Return current micros since epoch as integer."""
micros = int(time.time() * 1e6)
if absolute:
return micros
return micros - EPOCH_MICROS
|
python
|
def now_micros(absolute=False) -> int:
"""Return current micros since epoch as integer."""
micros = int(time.time() * 1e6)
if absolute:
return micros
return micros - EPOCH_MICROS
|
[
"def",
"now_micros",
"(",
"absolute",
"=",
"False",
")",
"->",
"int",
":",
"micros",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1e6",
")",
"if",
"absolute",
":",
"return",
"micros",
"return",
"micros",
"-",
"EPOCH_MICROS"
] |
Return current micros since epoch as integer.
|
[
"Return",
"current",
"micros",
"since",
"epoch",
"as",
"integer",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/util.py#L23-L28
|
train
|
diux-dev/ncluster
|
ncluster/util.py
|
now_millis
|
def now_millis(absolute=False) -> int:
"""Return current millis since epoch as integer."""
millis = int(time.time() * 1e3)
if absolute:
return millis
return millis - EPOCH_MICROS // 1000
|
python
|
def now_millis(absolute=False) -> int:
"""Return current millis since epoch as integer."""
millis = int(time.time() * 1e3)
if absolute:
return millis
return millis - EPOCH_MICROS // 1000
|
[
"def",
"now_millis",
"(",
"absolute",
"=",
"False",
")",
"->",
"int",
":",
"millis",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1e3",
")",
"if",
"absolute",
":",
"return",
"millis",
"return",
"millis",
"-",
"EPOCH_MICROS",
"//",
"1000"
] |
Return current millis since epoch as integer.
|
[
"Return",
"current",
"millis",
"since",
"epoch",
"as",
"integer",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/util.py#L31-L36
|
train
|
diux-dev/ncluster
|
ncluster/util.py
|
install_pdb_handler
|
def install_pdb_handler():
"""Make CTRL+\ break into gdb."""
import signal
import pdb
def handler(_signum, _frame):
pdb.set_trace()
signal.signal(signal.SIGQUIT, handler)
|
python
|
def install_pdb_handler():
"""Make CTRL+\ break into gdb."""
import signal
import pdb
def handler(_signum, _frame):
pdb.set_trace()
signal.signal(signal.SIGQUIT, handler)
|
[
"def",
"install_pdb_handler",
"(",
")",
":",
"import",
"signal",
"import",
"pdb",
"def",
"handler",
"(",
"_signum",
",",
"_frame",
")",
":",
"pdb",
".",
"set_trace",
"(",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGQUIT",
",",
"handler",
")"
] |
Make CTRL+\ break into gdb.
|
[
"Make",
"CTRL",
"+",
"\\",
"break",
"into",
"gdb",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/util.py#L56-L65
|
train
|
diux-dev/ncluster
|
ncluster/util.py
|
shell_add_echo
|
def shell_add_echo(script):
"""Goes over each line script, adds "echo cmd" in front of each cmd.
ls a
becomes
echo * ls a
ls a
"""
new_script = ""
for cmd in script.split('\n'):
cmd = cmd.strip()
if not cmd:
continue
new_script += "echo \\* " + shlex.quote(cmd) + "\n"
new_script += cmd + "\n"
return new_script
|
python
|
def shell_add_echo(script):
"""Goes over each line script, adds "echo cmd" in front of each cmd.
ls a
becomes
echo * ls a
ls a
"""
new_script = ""
for cmd in script.split('\n'):
cmd = cmd.strip()
if not cmd:
continue
new_script += "echo \\* " + shlex.quote(cmd) + "\n"
new_script += cmd + "\n"
return new_script
|
[
"def",
"shell_add_echo",
"(",
"script",
")",
":",
"new_script",
"=",
"\"\"",
"for",
"cmd",
"in",
"script",
".",
"split",
"(",
"'\\n'",
")",
":",
"cmd",
"=",
"cmd",
".",
"strip",
"(",
")",
"if",
"not",
"cmd",
":",
"continue",
"new_script",
"+=",
"\"echo \\\\* \"",
"+",
"shlex",
".",
"quote",
"(",
"cmd",
")",
"+",
"\"\\n\"",
"new_script",
"+=",
"cmd",
"+",
"\"\\n\"",
"return",
"new_script"
] |
Goes over each line script, adds "echo cmd" in front of each cmd.
ls a
becomes
echo * ls a
ls a
|
[
"Goes",
"over",
"each",
"line",
"script",
"adds",
"echo",
"cmd",
"in",
"front",
"of",
"each",
"cmd",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/util.py#L68-L85
|
train
|
diux-dev/ncluster
|
ncluster/util.py
|
random_id
|
def random_id(k=5):
"""Random id to use for AWS identifiers."""
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=k))
|
python
|
def random_id(k=5):
"""Random id to use for AWS identifiers."""
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=k))
|
[
"def",
"random_id",
"(",
"k",
"=",
"5",
")",
":",
"# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python",
"return",
"''",
".",
"join",
"(",
"random",
".",
"choices",
"(",
"string",
".",
"ascii_lowercase",
"+",
"string",
".",
"digits",
",",
"k",
"=",
"k",
")",
")"
] |
Random id to use for AWS identifiers.
|
[
"Random",
"id",
"to",
"use",
"for",
"AWS",
"identifiers",
"."
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/util.py#L96-L99
|
train
|
diux-dev/ncluster
|
ncluster/util.py
|
alphanumeric_hash
|
def alphanumeric_hash(s: str, size=5):
"""Short alphanumeric string derived from hash of given string"""
import hashlib
import base64
hash_object = hashlib.md5(s.encode('ascii'))
s = base64.b32encode(hash_object.digest())
result = s[:size].decode('ascii').lower()
return result
|
python
|
def alphanumeric_hash(s: str, size=5):
"""Short alphanumeric string derived from hash of given string"""
import hashlib
import base64
hash_object = hashlib.md5(s.encode('ascii'))
s = base64.b32encode(hash_object.digest())
result = s[:size].decode('ascii').lower()
return result
|
[
"def",
"alphanumeric_hash",
"(",
"s",
":",
"str",
",",
"size",
"=",
"5",
")",
":",
"import",
"hashlib",
"import",
"base64",
"hash_object",
"=",
"hashlib",
".",
"md5",
"(",
"s",
".",
"encode",
"(",
"'ascii'",
")",
")",
"s",
"=",
"base64",
".",
"b32encode",
"(",
"hash_object",
".",
"digest",
"(",
")",
")",
"result",
"=",
"s",
"[",
":",
"size",
"]",
".",
"decode",
"(",
"'ascii'",
")",
".",
"lower",
"(",
")",
"return",
"result"
] |
Short alphanumeric string derived from hash of given string
|
[
"Short",
"alphanumeric",
"string",
"derived",
"from",
"hash",
"of",
"given",
"string"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/util.py#L102-L109
|
train
|
diux-dev/ncluster
|
ncluster/util.py
|
is_bash_builtin
|
def is_bash_builtin(cmd):
"""Return true if command is invoking bash built-in
"""
# from compgen -b
bash_builtins = ['alias', 'bg', 'bind', 'alias', 'bg', 'bind', 'break',
'builtin', 'caller', 'cd', 'command', 'compgen', 'complete',
'compopt', 'continue', 'declare', 'dirs', 'disown', 'echo',
'enable', 'eval', 'exec', 'exit', 'export', 'false', 'fc',
'fg', 'getopts', 'hash', 'help', 'history', 'jobs', 'kill',
'let', 'local', 'logout', 'mapfile', 'popd', 'printf',
'pushd', 'pwd', 'read', 'readarray', 'readonly', 'return',
'set', 'shift', 'shopt', 'source', 'suspend', 'test',
'times', 'trap', 'true', 'type', 'typeset', 'ulimit',
'umask', 'unalias', 'unset', 'wait']
toks = cmd.split()
if toks and toks[0] in bash_builtins:
return True
return False
|
python
|
def is_bash_builtin(cmd):
"""Return true if command is invoking bash built-in
"""
# from compgen -b
bash_builtins = ['alias', 'bg', 'bind', 'alias', 'bg', 'bind', 'break',
'builtin', 'caller', 'cd', 'command', 'compgen', 'complete',
'compopt', 'continue', 'declare', 'dirs', 'disown', 'echo',
'enable', 'eval', 'exec', 'exit', 'export', 'false', 'fc',
'fg', 'getopts', 'hash', 'help', 'history', 'jobs', 'kill',
'let', 'local', 'logout', 'mapfile', 'popd', 'printf',
'pushd', 'pwd', 'read', 'readarray', 'readonly', 'return',
'set', 'shift', 'shopt', 'source', 'suspend', 'test',
'times', 'trap', 'true', 'type', 'typeset', 'ulimit',
'umask', 'unalias', 'unset', 'wait']
toks = cmd.split()
if toks and toks[0] in bash_builtins:
return True
return False
|
[
"def",
"is_bash_builtin",
"(",
"cmd",
")",
":",
"# from compgen -b",
"bash_builtins",
"=",
"[",
"'alias'",
",",
"'bg'",
",",
"'bind'",
",",
"'alias'",
",",
"'bg'",
",",
"'bind'",
",",
"'break'",
",",
"'builtin'",
",",
"'caller'",
",",
"'cd'",
",",
"'command'",
",",
"'compgen'",
",",
"'complete'",
",",
"'compopt'",
",",
"'continue'",
",",
"'declare'",
",",
"'dirs'",
",",
"'disown'",
",",
"'echo'",
",",
"'enable'",
",",
"'eval'",
",",
"'exec'",
",",
"'exit'",
",",
"'export'",
",",
"'false'",
",",
"'fc'",
",",
"'fg'",
",",
"'getopts'",
",",
"'hash'",
",",
"'help'",
",",
"'history'",
",",
"'jobs'",
",",
"'kill'",
",",
"'let'",
",",
"'local'",
",",
"'logout'",
",",
"'mapfile'",
",",
"'popd'",
",",
"'printf'",
",",
"'pushd'",
",",
"'pwd'",
",",
"'read'",
",",
"'readarray'",
",",
"'readonly'",
",",
"'return'",
",",
"'set'",
",",
"'shift'",
",",
"'shopt'",
",",
"'source'",
",",
"'suspend'",
",",
"'test'",
",",
"'times'",
",",
"'trap'",
",",
"'true'",
",",
"'type'",
",",
"'typeset'",
",",
"'ulimit'",
",",
"'umask'",
",",
"'unalias'",
",",
"'unset'",
",",
"'wait'",
"]",
"toks",
"=",
"cmd",
".",
"split",
"(",
")",
"if",
"toks",
"and",
"toks",
"[",
"0",
"]",
"in",
"bash_builtins",
":",
"return",
"True",
"return",
"False"
] |
Return true if command is invoking bash built-in
|
[
"Return",
"true",
"if",
"command",
"is",
"invoking",
"bash",
"built",
"-",
"in"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/util.py#L130-L147
|
train
|
diux-dev/ncluster
|
ncluster/util.py
|
is_set
|
def is_set(name):
"""Helper method to check if given property is set"""
val = os.environ.get(name, '0')
assert val == '0' or val == '1', f"env var {name} has value {val}, expected 0 or 1"
return val == '1'
|
python
|
def is_set(name):
"""Helper method to check if given property is set"""
val = os.environ.get(name, '0')
assert val == '0' or val == '1', f"env var {name} has value {val}, expected 0 or 1"
return val == '1'
|
[
"def",
"is_set",
"(",
"name",
")",
":",
"val",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"name",
",",
"'0'",
")",
"assert",
"val",
"==",
"'0'",
"or",
"val",
"==",
"'1'",
",",
"f\"env var {name} has value {val}, expected 0 or 1\"",
"return",
"val",
"==",
"'1'"
] |
Helper method to check if given property is set
|
[
"Helper",
"method",
"to",
"check",
"if",
"given",
"property",
"is",
"set"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/util.py#L150-L154
|
train
|
diux-dev/ncluster
|
ncluster/util.py
|
assert_script_in_current_directory
|
def assert_script_in_current_directory():
"""Assert fail if current directory is different from location of the script"""
script = sys.argv[0]
assert os.path.abspath(os.path.dirname(script)) == os.path.abspath(
'.'), f"Change into directory of script {script} and run again."
|
python
|
def assert_script_in_current_directory():
"""Assert fail if current directory is different from location of the script"""
script = sys.argv[0]
assert os.path.abspath(os.path.dirname(script)) == os.path.abspath(
'.'), f"Change into directory of script {script} and run again."
|
[
"def",
"assert_script_in_current_directory",
"(",
")",
":",
"script",
"=",
"sys",
".",
"argv",
"[",
"0",
"]",
"assert",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"script",
")",
")",
"==",
"os",
".",
"path",
".",
"abspath",
"(",
"'.'",
")",
",",
"f\"Change into directory of script {script} and run again.\""
] |
Assert fail if current directory is different from location of the script
|
[
"Assert",
"fail",
"if",
"current",
"directory",
"is",
"different",
"from",
"location",
"of",
"the",
"script"
] |
2fd359621896717197b479c7174d06d80df1529b
|
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/util.py#L157-L162
|
train
|
croach/Flask-Fixtures
|
flask_fixtures/__init__.py
|
load_fixtures
|
def load_fixtures(db, fixtures):
"""Loads the given fixtures into the database.
"""
conn = db.engine.connect()
metadata = db.metadata
for fixture in fixtures:
if 'model' in fixture:
module_name, class_name = fixture['model'].rsplit('.', 1)
module = importlib.import_module(module_name)
model = getattr(module, class_name)
for fields in fixture['records']:
obj = model(**fields)
db.session.add(obj)
db.session.commit()
elif 'table' in fixture:
table = Table(fixture['table'], metadata)
conn.execute(table.insert(), fixture['records'])
else:
raise ValueError("Fixture missing a 'model' or 'table' field: {0}".format(json.dumps(fixture)))
|
python
|
def load_fixtures(db, fixtures):
"""Loads the given fixtures into the database.
"""
conn = db.engine.connect()
metadata = db.metadata
for fixture in fixtures:
if 'model' in fixture:
module_name, class_name = fixture['model'].rsplit('.', 1)
module = importlib.import_module(module_name)
model = getattr(module, class_name)
for fields in fixture['records']:
obj = model(**fields)
db.session.add(obj)
db.session.commit()
elif 'table' in fixture:
table = Table(fixture['table'], metadata)
conn.execute(table.insert(), fixture['records'])
else:
raise ValueError("Fixture missing a 'model' or 'table' field: {0}".format(json.dumps(fixture)))
|
[
"def",
"load_fixtures",
"(",
"db",
",",
"fixtures",
")",
":",
"conn",
"=",
"db",
".",
"engine",
".",
"connect",
"(",
")",
"metadata",
"=",
"db",
".",
"metadata",
"for",
"fixture",
"in",
"fixtures",
":",
"if",
"'model'",
"in",
"fixture",
":",
"module_name",
",",
"class_name",
"=",
"fixture",
"[",
"'model'",
"]",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"module_name",
")",
"model",
"=",
"getattr",
"(",
"module",
",",
"class_name",
")",
"for",
"fields",
"in",
"fixture",
"[",
"'records'",
"]",
":",
"obj",
"=",
"model",
"(",
"*",
"*",
"fields",
")",
"db",
".",
"session",
".",
"add",
"(",
"obj",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"elif",
"'table'",
"in",
"fixture",
":",
"table",
"=",
"Table",
"(",
"fixture",
"[",
"'table'",
"]",
",",
"metadata",
")",
"conn",
".",
"execute",
"(",
"table",
".",
"insert",
"(",
")",
",",
"fixture",
"[",
"'records'",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Fixture missing a 'model' or 'table' field: {0}\"",
".",
"format",
"(",
"json",
".",
"dumps",
"(",
"fixture",
")",
")",
")"
] |
Loads the given fixtures into the database.
|
[
"Loads",
"the",
"given",
"fixtures",
"into",
"the",
"database",
"."
] |
b34597d165b33cc47cdd632ac0f3cf8a07428675
|
https://github.com/croach/Flask-Fixtures/blob/b34597d165b33cc47cdd632ac0f3cf8a07428675/flask_fixtures/__init__.py#L130-L149
|
train
|
croach/Flask-Fixtures
|
flask_fixtures/__init__.py
|
MetaFixturesMixin.setup_handler
|
def setup_handler(setup_fixtures_fn, setup_fn):
"""Returns a function that adds fixtures handling to the setup method.
Makes sure that fixtures are setup before calling the given setup method.
"""
def handler(obj):
setup_fixtures_fn(obj)
setup_fn(obj)
return handler
|
python
|
def setup_handler(setup_fixtures_fn, setup_fn):
"""Returns a function that adds fixtures handling to the setup method.
Makes sure that fixtures are setup before calling the given setup method.
"""
def handler(obj):
setup_fixtures_fn(obj)
setup_fn(obj)
return handler
|
[
"def",
"setup_handler",
"(",
"setup_fixtures_fn",
",",
"setup_fn",
")",
":",
"def",
"handler",
"(",
"obj",
")",
":",
"setup_fixtures_fn",
"(",
"obj",
")",
"setup_fn",
"(",
"obj",
")",
"return",
"handler"
] |
Returns a function that adds fixtures handling to the setup method.
Makes sure that fixtures are setup before calling the given setup method.
|
[
"Returns",
"a",
"function",
"that",
"adds",
"fixtures",
"handling",
"to",
"the",
"setup",
"method",
"."
] |
b34597d165b33cc47cdd632ac0f3cf8a07428675
|
https://github.com/croach/Flask-Fixtures/blob/b34597d165b33cc47cdd632ac0f3cf8a07428675/flask_fixtures/__init__.py#L180-L188
|
train
|
croach/Flask-Fixtures
|
flask_fixtures/__init__.py
|
MetaFixturesMixin.teardown_handler
|
def teardown_handler(teardown_fixtures_fn, teardown_fn):
"""Returns a function that adds fixtures handling to the teardown method.
Calls the given teardown method first before calling the fixtures teardown.
"""
def handler(obj):
teardown_fn(obj)
teardown_fixtures_fn(obj)
return handler
|
python
|
def teardown_handler(teardown_fixtures_fn, teardown_fn):
"""Returns a function that adds fixtures handling to the teardown method.
Calls the given teardown method first before calling the fixtures teardown.
"""
def handler(obj):
teardown_fn(obj)
teardown_fixtures_fn(obj)
return handler
|
[
"def",
"teardown_handler",
"(",
"teardown_fixtures_fn",
",",
"teardown_fn",
")",
":",
"def",
"handler",
"(",
"obj",
")",
":",
"teardown_fn",
"(",
"obj",
")",
"teardown_fixtures_fn",
"(",
"obj",
")",
"return",
"handler"
] |
Returns a function that adds fixtures handling to the teardown method.
Calls the given teardown method first before calling the fixtures teardown.
|
[
"Returns",
"a",
"function",
"that",
"adds",
"fixtures",
"handling",
"to",
"the",
"teardown",
"method",
"."
] |
b34597d165b33cc47cdd632ac0f3cf8a07428675
|
https://github.com/croach/Flask-Fixtures/blob/b34597d165b33cc47cdd632ac0f3cf8a07428675/flask_fixtures/__init__.py#L191-L199
|
train
|
croach/Flask-Fixtures
|
flask_fixtures/__init__.py
|
MetaFixturesMixin.get_child_fn
|
def get_child_fn(attrs, names, bases):
"""Returns a function from the child class that matches one of the names.
Searches the child class's set of methods (i.e., the attrs dict) for all
the functions matching the given list of names. If more than one is found,
an exception is raised, if one is found, it is returned, and if none are
found, a function that calls the default method on each parent class is
returned.
"""
def call_method(obj, method):
"""Calls a method as either a class method or an instance method.
"""
# The __get__ method takes an instance and an owner which changes
# depending on the calling object. If the calling object is a class,
# the instance is None and the owner will be the object itself. If the
# calling object is an instance, the instance will be the calling object
# and the owner will be its class. For more info on the __get__ method,
# see http://docs.python.org/2/reference/datamodel.html#object.__get__.
if isinstance(obj, type):
instance = None
owner = obj
else:
instance = obj
owner = obj.__class__
method.__get__(instance, owner)()
# Create a default function that calls the default method on each parent
default_name = names[0]
def default_fn(obj):
for cls in bases:
if hasattr(cls, default_name):
call_method(obj, getattr(cls, default_name))
default_fn.__name__ = default_name
# Get all of the functions in the child class that match the list of names
fns = [(name, attrs[name]) for name in names if name in attrs]
# Raise an error if more than one setup/teardown method is found
if len(fns) > 1:
raise RuntimeError("Cannot have more than one setup or teardown method per context (class or test).")
# If one setup/teardown function was found, return it
elif len(fns) == 1:
name, fn = fns[0]
def child_fn(obj):
call_method(obj, fn)
child_fn.__name__ = name
return child_fn
# Otherwise, return the default function
else:
return default_fn
|
python
|
def get_child_fn(attrs, names, bases):
"""Returns a function from the child class that matches one of the names.
Searches the child class's set of methods (i.e., the attrs dict) for all
the functions matching the given list of names. If more than one is found,
an exception is raised, if one is found, it is returned, and if none are
found, a function that calls the default method on each parent class is
returned.
"""
def call_method(obj, method):
"""Calls a method as either a class method or an instance method.
"""
# The __get__ method takes an instance and an owner which changes
# depending on the calling object. If the calling object is a class,
# the instance is None and the owner will be the object itself. If the
# calling object is an instance, the instance will be the calling object
# and the owner will be its class. For more info on the __get__ method,
# see http://docs.python.org/2/reference/datamodel.html#object.__get__.
if isinstance(obj, type):
instance = None
owner = obj
else:
instance = obj
owner = obj.__class__
method.__get__(instance, owner)()
# Create a default function that calls the default method on each parent
default_name = names[0]
def default_fn(obj):
for cls in bases:
if hasattr(cls, default_name):
call_method(obj, getattr(cls, default_name))
default_fn.__name__ = default_name
# Get all of the functions in the child class that match the list of names
fns = [(name, attrs[name]) for name in names if name in attrs]
# Raise an error if more than one setup/teardown method is found
if len(fns) > 1:
raise RuntimeError("Cannot have more than one setup or teardown method per context (class or test).")
# If one setup/teardown function was found, return it
elif len(fns) == 1:
name, fn = fns[0]
def child_fn(obj):
call_method(obj, fn)
child_fn.__name__ = name
return child_fn
# Otherwise, return the default function
else:
return default_fn
|
[
"def",
"get_child_fn",
"(",
"attrs",
",",
"names",
",",
"bases",
")",
":",
"def",
"call_method",
"(",
"obj",
",",
"method",
")",
":",
"\"\"\"Calls a method as either a class method or an instance method.\n \"\"\"",
"# The __get__ method takes an instance and an owner which changes",
"# depending on the calling object. If the calling object is a class,",
"# the instance is None and the owner will be the object itself. If the",
"# calling object is an instance, the instance will be the calling object",
"# and the owner will be its class. For more info on the __get__ method,",
"# see http://docs.python.org/2/reference/datamodel.html#object.__get__.",
"if",
"isinstance",
"(",
"obj",
",",
"type",
")",
":",
"instance",
"=",
"None",
"owner",
"=",
"obj",
"else",
":",
"instance",
"=",
"obj",
"owner",
"=",
"obj",
".",
"__class__",
"method",
".",
"__get__",
"(",
"instance",
",",
"owner",
")",
"(",
")",
"# Create a default function that calls the default method on each parent",
"default_name",
"=",
"names",
"[",
"0",
"]",
"def",
"default_fn",
"(",
"obj",
")",
":",
"for",
"cls",
"in",
"bases",
":",
"if",
"hasattr",
"(",
"cls",
",",
"default_name",
")",
":",
"call_method",
"(",
"obj",
",",
"getattr",
"(",
"cls",
",",
"default_name",
")",
")",
"default_fn",
".",
"__name__",
"=",
"default_name",
"# Get all of the functions in the child class that match the list of names",
"fns",
"=",
"[",
"(",
"name",
",",
"attrs",
"[",
"name",
"]",
")",
"for",
"name",
"in",
"names",
"if",
"name",
"in",
"attrs",
"]",
"# Raise an error if more than one setup/teardown method is found",
"if",
"len",
"(",
"fns",
")",
">",
"1",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot have more than one setup or teardown method per context (class or test).\"",
")",
"# If one setup/teardown function was found, return it",
"elif",
"len",
"(",
"fns",
")",
"==",
"1",
":",
"name",
",",
"fn",
"=",
"fns",
"[",
"0",
"]",
"def",
"child_fn",
"(",
"obj",
")",
":",
"call_method",
"(",
"obj",
",",
"fn",
")",
"child_fn",
".",
"__name__",
"=",
"name",
"return",
"child_fn",
"# Otherwise, return the default function",
"else",
":",
"return",
"default_fn"
] |
Returns a function from the child class that matches one of the names.
Searches the child class's set of methods (i.e., the attrs dict) for all
the functions matching the given list of names. If more than one is found,
an exception is raised, if one is found, it is returned, and if none are
found, a function that calls the default method on each parent class is
returned.
|
[
"Returns",
"a",
"function",
"from",
"the",
"child",
"class",
"that",
"matches",
"one",
"of",
"the",
"names",
"."
] |
b34597d165b33cc47cdd632ac0f3cf8a07428675
|
https://github.com/croach/Flask-Fixtures/blob/b34597d165b33cc47cdd632ac0f3cf8a07428675/flask_fixtures/__init__.py#L202-L252
|
train
|
croach/Flask-Fixtures
|
flask_fixtures/utils.py
|
print_msg
|
def print_msg(msg, header, file=sys.stdout):
"""Prints a boardered message to the screen"""
DEFAULT_MSG_BLOCK_WIDTH = 60
# Calculate the length of the boarder on each side of the header and the
# total length of the bottom boarder
side_boarder_length = (DEFAULT_MSG_BLOCK_WIDTH - (len(header) + 2)) // 2
msg_block_width = side_boarder_length * 2 + (len(header) + 2)
# Create the top and bottom boarders
side_boarder = '#' * side_boarder_length
top_boarder = '{0} {1} {2}'.format(side_boarder, header, side_boarder)
bottom_boarder = '#' * msg_block_width
def pad(line, length):
"""Returns a string padded and centered by the given length"""
padding_length = length - len(line)
left_padding = ' ' * (padding_length//2)
right_padding = ' ' * (padding_length - len(left_padding))
return '{0} {1} {2}'.format(left_padding, line, right_padding)
words = msg.split(' ')
lines = []
line = ''
for word in words:
if len(line + ' ' + word) <= msg_block_width - 4:
line = (line + ' ' + word).strip()
else:
lines.append('#{0}#'.format(pad(line, msg_block_width - 4)))
line = word
lines.append('#{0}#'.format(pad(line, msg_block_width - 4)))
# Print the full message
print(file=file)
print(top_boarder, file=file)
print('#{0}#'.format(pad('', msg_block_width - 4)), file=file)
for line in lines:
print(line, file=file)
print('#{0}#'.format(pad('', msg_block_width - 4)), file=file)
print(bottom_boarder, file=file)
print(file=file)
|
python
|
def print_msg(msg, header, file=sys.stdout):
"""Prints a boardered message to the screen"""
DEFAULT_MSG_BLOCK_WIDTH = 60
# Calculate the length of the boarder on each side of the header and the
# total length of the bottom boarder
side_boarder_length = (DEFAULT_MSG_BLOCK_WIDTH - (len(header) + 2)) // 2
msg_block_width = side_boarder_length * 2 + (len(header) + 2)
# Create the top and bottom boarders
side_boarder = '#' * side_boarder_length
top_boarder = '{0} {1} {2}'.format(side_boarder, header, side_boarder)
bottom_boarder = '#' * msg_block_width
def pad(line, length):
"""Returns a string padded and centered by the given length"""
padding_length = length - len(line)
left_padding = ' ' * (padding_length//2)
right_padding = ' ' * (padding_length - len(left_padding))
return '{0} {1} {2}'.format(left_padding, line, right_padding)
words = msg.split(' ')
lines = []
line = ''
for word in words:
if len(line + ' ' + word) <= msg_block_width - 4:
line = (line + ' ' + word).strip()
else:
lines.append('#{0}#'.format(pad(line, msg_block_width - 4)))
line = word
lines.append('#{0}#'.format(pad(line, msg_block_width - 4)))
# Print the full message
print(file=file)
print(top_boarder, file=file)
print('#{0}#'.format(pad('', msg_block_width - 4)), file=file)
for line in lines:
print(line, file=file)
print('#{0}#'.format(pad('', msg_block_width - 4)), file=file)
print(bottom_boarder, file=file)
print(file=file)
|
[
"def",
"print_msg",
"(",
"msg",
",",
"header",
",",
"file",
"=",
"sys",
".",
"stdout",
")",
":",
"DEFAULT_MSG_BLOCK_WIDTH",
"=",
"60",
"# Calculate the length of the boarder on each side of the header and the",
"# total length of the bottom boarder",
"side_boarder_length",
"=",
"(",
"DEFAULT_MSG_BLOCK_WIDTH",
"-",
"(",
"len",
"(",
"header",
")",
"+",
"2",
")",
")",
"//",
"2",
"msg_block_width",
"=",
"side_boarder_length",
"*",
"2",
"+",
"(",
"len",
"(",
"header",
")",
"+",
"2",
")",
"# Create the top and bottom boarders",
"side_boarder",
"=",
"'#'",
"*",
"side_boarder_length",
"top_boarder",
"=",
"'{0} {1} {2}'",
".",
"format",
"(",
"side_boarder",
",",
"header",
",",
"side_boarder",
")",
"bottom_boarder",
"=",
"'#'",
"*",
"msg_block_width",
"def",
"pad",
"(",
"line",
",",
"length",
")",
":",
"\"\"\"Returns a string padded and centered by the given length\"\"\"",
"padding_length",
"=",
"length",
"-",
"len",
"(",
"line",
")",
"left_padding",
"=",
"' '",
"*",
"(",
"padding_length",
"//",
"2",
")",
"right_padding",
"=",
"' '",
"*",
"(",
"padding_length",
"-",
"len",
"(",
"left_padding",
")",
")",
"return",
"'{0} {1} {2}'",
".",
"format",
"(",
"left_padding",
",",
"line",
",",
"right_padding",
")",
"words",
"=",
"msg",
".",
"split",
"(",
"' '",
")",
"lines",
"=",
"[",
"]",
"line",
"=",
"''",
"for",
"word",
"in",
"words",
":",
"if",
"len",
"(",
"line",
"+",
"' '",
"+",
"word",
")",
"<=",
"msg_block_width",
"-",
"4",
":",
"line",
"=",
"(",
"line",
"+",
"' '",
"+",
"word",
")",
".",
"strip",
"(",
")",
"else",
":",
"lines",
".",
"append",
"(",
"'#{0}#'",
".",
"format",
"(",
"pad",
"(",
"line",
",",
"msg_block_width",
"-",
"4",
")",
")",
")",
"line",
"=",
"word",
"lines",
".",
"append",
"(",
"'#{0}#'",
".",
"format",
"(",
"pad",
"(",
"line",
",",
"msg_block_width",
"-",
"4",
")",
")",
")",
"# Print the full message",
"print",
"(",
"file",
"=",
"file",
")",
"print",
"(",
"top_boarder",
",",
"file",
"=",
"file",
")",
"print",
"(",
"'#{0}#'",
".",
"format",
"(",
"pad",
"(",
"''",
",",
"msg_block_width",
"-",
"4",
")",
")",
",",
"file",
"=",
"file",
")",
"for",
"line",
"in",
"lines",
":",
"print",
"(",
"line",
",",
"file",
"=",
"file",
")",
"print",
"(",
"'#{0}#'",
".",
"format",
"(",
"pad",
"(",
"''",
",",
"msg_block_width",
"-",
"4",
")",
")",
",",
"file",
"=",
"file",
")",
"print",
"(",
"bottom_boarder",
",",
"file",
"=",
"file",
")",
"print",
"(",
"file",
"=",
"file",
")"
] |
Prints a boardered message to the screen
|
[
"Prints",
"a",
"boardered",
"message",
"to",
"the",
"screen"
] |
b34597d165b33cc47cdd632ac0f3cf8a07428675
|
https://github.com/croach/Flask-Fixtures/blob/b34597d165b33cc47cdd632ac0f3cf8a07428675/flask_fixtures/utils.py#L20-L60
|
train
|
croach/Flask-Fixtures
|
flask_fixtures/utils.py
|
can_persist_fixtures
|
def can_persist_fixtures():
"""Returns True if it's possible to persist fixtures across tests.
Flask-Fixtures uses the setUpClass and tearDownClass methods to persist
fixtures across tests. These methods were added to unittest.TestCase in
python 2.7. So, we can only persist fixtures when using python 2.7.
However, the nose and py.test libraries add support for these methods
regardless of what version of python we're running, so if we're running
with either of those libraries, return True to persist fixtures.
"""
# If we're running python 2.7 or greater, we're fine
if sys.hexversion >= 0x02070000:
return True
# Otherwise, nose and py.test support the setUpClass and tearDownClass
# methods, so if we're using either of those, go ahead and run the tests
filename = inspect.stack()[-1][1]
executable = os.path.split(filename)[1]
return executable in ('py.test', 'nosetests')
|
python
|
def can_persist_fixtures():
"""Returns True if it's possible to persist fixtures across tests.
Flask-Fixtures uses the setUpClass and tearDownClass methods to persist
fixtures across tests. These methods were added to unittest.TestCase in
python 2.7. So, we can only persist fixtures when using python 2.7.
However, the nose and py.test libraries add support for these methods
regardless of what version of python we're running, so if we're running
with either of those libraries, return True to persist fixtures.
"""
# If we're running python 2.7 or greater, we're fine
if sys.hexversion >= 0x02070000:
return True
# Otherwise, nose and py.test support the setUpClass and tearDownClass
# methods, so if we're using either of those, go ahead and run the tests
filename = inspect.stack()[-1][1]
executable = os.path.split(filename)[1]
return executable in ('py.test', 'nosetests')
|
[
"def",
"can_persist_fixtures",
"(",
")",
":",
"# If we're running python 2.7 or greater, we're fine",
"if",
"sys",
".",
"hexversion",
">=",
"0x02070000",
":",
"return",
"True",
"# Otherwise, nose and py.test support the setUpClass and tearDownClass",
"# methods, so if we're using either of those, go ahead and run the tests",
"filename",
"=",
"inspect",
".",
"stack",
"(",
")",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"executable",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"[",
"1",
"]",
"return",
"executable",
"in",
"(",
"'py.test'",
",",
"'nosetests'",
")"
] |
Returns True if it's possible to persist fixtures across tests.
Flask-Fixtures uses the setUpClass and tearDownClass methods to persist
fixtures across tests. These methods were added to unittest.TestCase in
python 2.7. So, we can only persist fixtures when using python 2.7.
However, the nose and py.test libraries add support for these methods
regardless of what version of python we're running, so if we're running
with either of those libraries, return True to persist fixtures.
|
[
"Returns",
"True",
"if",
"it",
"s",
"possible",
"to",
"persist",
"fixtures",
"across",
"tests",
"."
] |
b34597d165b33cc47cdd632ac0f3cf8a07428675
|
https://github.com/croach/Flask-Fixtures/blob/b34597d165b33cc47cdd632ac0f3cf8a07428675/flask_fixtures/utils.py#L65-L84
|
train
|
shichao-an/twitter-photos
|
twphotos/increment.py
|
read_since_ids
|
def read_since_ids(users):
"""
Read max ids of the last downloads
:param users: A list of users
Return a dictionary mapping users to ids
"""
since_ids = {}
for user in users:
if config.has_option(SECTIONS['INCREMENTS'], user):
since_ids[user] = config.getint(SECTIONS['INCREMENTS'], user) + 1
return since_ids
|
python
|
def read_since_ids(users):
"""
Read max ids of the last downloads
:param users: A list of users
Return a dictionary mapping users to ids
"""
since_ids = {}
for user in users:
if config.has_option(SECTIONS['INCREMENTS'], user):
since_ids[user] = config.getint(SECTIONS['INCREMENTS'], user) + 1
return since_ids
|
[
"def",
"read_since_ids",
"(",
"users",
")",
":",
"since_ids",
"=",
"{",
"}",
"for",
"user",
"in",
"users",
":",
"if",
"config",
".",
"has_option",
"(",
"SECTIONS",
"[",
"'INCREMENTS'",
"]",
",",
"user",
")",
":",
"since_ids",
"[",
"user",
"]",
"=",
"config",
".",
"getint",
"(",
"SECTIONS",
"[",
"'INCREMENTS'",
"]",
",",
"user",
")",
"+",
"1",
"return",
"since_ids"
] |
Read max ids of the last downloads
:param users: A list of users
Return a dictionary mapping users to ids
|
[
"Read",
"max",
"ids",
"of",
"the",
"last",
"downloads"
] |
32de6e8805edcbb431d08af861e9d2f0ab221106
|
https://github.com/shichao-an/twitter-photos/blob/32de6e8805edcbb431d08af861e9d2f0ab221106/twphotos/increment.py#L19-L31
|
train
|
shichao-an/twitter-photos
|
twphotos/increment.py
|
set_max_ids
|
def set_max_ids(max_ids):
"""
Set max ids of the current downloads
:param max_ids: A dictionary mapping users to ids
"""
config.read(CONFIG)
for user, max_id in max_ids.items():
config.set(SECTIONS['INCREMENTS'], user, str(max_id))
with open(CONFIG, 'w') as f:
config.write(f)
|
python
|
def set_max_ids(max_ids):
"""
Set max ids of the current downloads
:param max_ids: A dictionary mapping users to ids
"""
config.read(CONFIG)
for user, max_id in max_ids.items():
config.set(SECTIONS['INCREMENTS'], user, str(max_id))
with open(CONFIG, 'w') as f:
config.write(f)
|
[
"def",
"set_max_ids",
"(",
"max_ids",
")",
":",
"config",
".",
"read",
"(",
"CONFIG",
")",
"for",
"user",
",",
"max_id",
"in",
"max_ids",
".",
"items",
"(",
")",
":",
"config",
".",
"set",
"(",
"SECTIONS",
"[",
"'INCREMENTS'",
"]",
",",
"user",
",",
"str",
"(",
"max_id",
")",
")",
"with",
"open",
"(",
"CONFIG",
",",
"'w'",
")",
"as",
"f",
":",
"config",
".",
"write",
"(",
"f",
")"
] |
Set max ids of the current downloads
:param max_ids: A dictionary mapping users to ids
|
[
"Set",
"max",
"ids",
"of",
"the",
"current",
"downloads"
] |
32de6e8805edcbb431d08af861e9d2f0ab221106
|
https://github.com/shichao-an/twitter-photos/blob/32de6e8805edcbb431d08af861e9d2f0ab221106/twphotos/increment.py#L34-L44
|
train
|
davedoesdev/dxf
|
dxf/__init__.py
|
DXFBase.authenticate
|
def authenticate(self,
username=None, password=None,
actions=None, response=None,
authorization=None):
# pylint: disable=too-many-arguments,too-many-locals
"""
Authenticate to the registry using a username and password,
an authorization header or otherwise as the anonymous user.
:param username: User name to authenticate as.
:type username: str
:param password: User's password.
:type password: str
:param actions: If you know which types of operation you need to make on the registry, specify them here. Valid actions are ``pull``, ``push`` and ``*``.
:type actions: list
:param response: When the ``auth`` function you passed to :class:`DXFBase`'s constructor is called, it is passed a HTTP response object. Pass it back to :meth:`authenticate` to have it automatically detect which actions are required.
:type response: requests.Response
:param authorization: ``Authorization`` header value.
:type authorization: str
:rtype: str
:returns: Authentication token, if the registry supports bearer tokens. Otherwise ``None``, and HTTP Basic auth is used (if the registry requires authentication).
"""
if response is None:
with warnings.catch_warnings():
_ignore_warnings(self)
response = self._sessions[0].get(self._base_url, verify=self._tlsverify)
if response.ok:
return None
# pylint: disable=no-member
if response.status_code != requests.codes.unauthorized:
raise exceptions.DXFUnexpectedStatusCodeError(response.status_code,
requests.codes.unauthorized)
if self._insecure:
raise exceptions.DXFAuthInsecureError()
parsed = www_authenticate.parse(response.headers['www-authenticate'])
if username is not None and password is not None:
headers = {
'Authorization': 'Basic ' + base64.b64encode(_to_bytes_2and3(username + ':' + password)).decode('utf-8')
}
elif authorization is not None:
headers = {
'Authorization': authorization
}
else:
headers = {}
if 'bearer' in parsed:
info = parsed['bearer']
if actions and self._repo:
scope = 'repository:' + self._repo + ':' + ','.join(actions)
elif 'scope' in info:
scope = info['scope']
else:
scope = ''
url_parts = list(urlparse.urlparse(info['realm']))
query = urlparse.parse_qs(url_parts[4])
query.update({
'service': info['service'],
'scope': scope
})
url_parts[4] = urlencode(query, True)
url_parts[0] = 'https'
if self._auth_host:
url_parts[1] = self._auth_host
auth_url = urlparse.urlunparse(url_parts)
with warnings.catch_warnings():
_ignore_warnings(self)
r = self._sessions[0].get(auth_url, headers=headers, verify=self._tlsverify)
_raise_for_status(r)
rjson = r.json()
self.token = rjson['access_token'] if 'access_token' in rjson else rjson['token']
return self._token
self._headers = headers
return None
|
python
|
def authenticate(self,
username=None, password=None,
actions=None, response=None,
authorization=None):
# pylint: disable=too-many-arguments,too-many-locals
"""
Authenticate to the registry using a username and password,
an authorization header or otherwise as the anonymous user.
:param username: User name to authenticate as.
:type username: str
:param password: User's password.
:type password: str
:param actions: If you know which types of operation you need to make on the registry, specify them here. Valid actions are ``pull``, ``push`` and ``*``.
:type actions: list
:param response: When the ``auth`` function you passed to :class:`DXFBase`'s constructor is called, it is passed a HTTP response object. Pass it back to :meth:`authenticate` to have it automatically detect which actions are required.
:type response: requests.Response
:param authorization: ``Authorization`` header value.
:type authorization: str
:rtype: str
:returns: Authentication token, if the registry supports bearer tokens. Otherwise ``None``, and HTTP Basic auth is used (if the registry requires authentication).
"""
if response is None:
with warnings.catch_warnings():
_ignore_warnings(self)
response = self._sessions[0].get(self._base_url, verify=self._tlsverify)
if response.ok:
return None
# pylint: disable=no-member
if response.status_code != requests.codes.unauthorized:
raise exceptions.DXFUnexpectedStatusCodeError(response.status_code,
requests.codes.unauthorized)
if self._insecure:
raise exceptions.DXFAuthInsecureError()
parsed = www_authenticate.parse(response.headers['www-authenticate'])
if username is not None and password is not None:
headers = {
'Authorization': 'Basic ' + base64.b64encode(_to_bytes_2and3(username + ':' + password)).decode('utf-8')
}
elif authorization is not None:
headers = {
'Authorization': authorization
}
else:
headers = {}
if 'bearer' in parsed:
info = parsed['bearer']
if actions and self._repo:
scope = 'repository:' + self._repo + ':' + ','.join(actions)
elif 'scope' in info:
scope = info['scope']
else:
scope = ''
url_parts = list(urlparse.urlparse(info['realm']))
query = urlparse.parse_qs(url_parts[4])
query.update({
'service': info['service'],
'scope': scope
})
url_parts[4] = urlencode(query, True)
url_parts[0] = 'https'
if self._auth_host:
url_parts[1] = self._auth_host
auth_url = urlparse.urlunparse(url_parts)
with warnings.catch_warnings():
_ignore_warnings(self)
r = self._sessions[0].get(auth_url, headers=headers, verify=self._tlsverify)
_raise_for_status(r)
rjson = r.json()
self.token = rjson['access_token'] if 'access_token' in rjson else rjson['token']
return self._token
self._headers = headers
return None
|
[
"def",
"authenticate",
"(",
"self",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"actions",
"=",
"None",
",",
"response",
"=",
"None",
",",
"authorization",
"=",
"None",
")",
":",
"# pylint: disable=too-many-arguments,too-many-locals",
"if",
"response",
"is",
"None",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"_ignore_warnings",
"(",
"self",
")",
"response",
"=",
"self",
".",
"_sessions",
"[",
"0",
"]",
".",
"get",
"(",
"self",
".",
"_base_url",
",",
"verify",
"=",
"self",
".",
"_tlsverify",
")",
"if",
"response",
".",
"ok",
":",
"return",
"None",
"# pylint: disable=no-member",
"if",
"response",
".",
"status_code",
"!=",
"requests",
".",
"codes",
".",
"unauthorized",
":",
"raise",
"exceptions",
".",
"DXFUnexpectedStatusCodeError",
"(",
"response",
".",
"status_code",
",",
"requests",
".",
"codes",
".",
"unauthorized",
")",
"if",
"self",
".",
"_insecure",
":",
"raise",
"exceptions",
".",
"DXFAuthInsecureError",
"(",
")",
"parsed",
"=",
"www_authenticate",
".",
"parse",
"(",
"response",
".",
"headers",
"[",
"'www-authenticate'",
"]",
")",
"if",
"username",
"is",
"not",
"None",
"and",
"password",
"is",
"not",
"None",
":",
"headers",
"=",
"{",
"'Authorization'",
":",
"'Basic '",
"+",
"base64",
".",
"b64encode",
"(",
"_to_bytes_2and3",
"(",
"username",
"+",
"':'",
"+",
"password",
")",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"}",
"elif",
"authorization",
"is",
"not",
"None",
":",
"headers",
"=",
"{",
"'Authorization'",
":",
"authorization",
"}",
"else",
":",
"headers",
"=",
"{",
"}",
"if",
"'bearer'",
"in",
"parsed",
":",
"info",
"=",
"parsed",
"[",
"'bearer'",
"]",
"if",
"actions",
"and",
"self",
".",
"_repo",
":",
"scope",
"=",
"'repository:'",
"+",
"self",
".",
"_repo",
"+",
"':'",
"+",
"','",
".",
"join",
"(",
"actions",
")",
"elif",
"'scope'",
"in",
"info",
":",
"scope",
"=",
"info",
"[",
"'scope'",
"]",
"else",
":",
"scope",
"=",
"''",
"url_parts",
"=",
"list",
"(",
"urlparse",
".",
"urlparse",
"(",
"info",
"[",
"'realm'",
"]",
")",
")",
"query",
"=",
"urlparse",
".",
"parse_qs",
"(",
"url_parts",
"[",
"4",
"]",
")",
"query",
".",
"update",
"(",
"{",
"'service'",
":",
"info",
"[",
"'service'",
"]",
",",
"'scope'",
":",
"scope",
"}",
")",
"url_parts",
"[",
"4",
"]",
"=",
"urlencode",
"(",
"query",
",",
"True",
")",
"url_parts",
"[",
"0",
"]",
"=",
"'https'",
"if",
"self",
".",
"_auth_host",
":",
"url_parts",
"[",
"1",
"]",
"=",
"self",
".",
"_auth_host",
"auth_url",
"=",
"urlparse",
".",
"urlunparse",
"(",
"url_parts",
")",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"_ignore_warnings",
"(",
"self",
")",
"r",
"=",
"self",
".",
"_sessions",
"[",
"0",
"]",
".",
"get",
"(",
"auth_url",
",",
"headers",
"=",
"headers",
",",
"verify",
"=",
"self",
".",
"_tlsverify",
")",
"_raise_for_status",
"(",
"r",
")",
"rjson",
"=",
"r",
".",
"json",
"(",
")",
"self",
".",
"token",
"=",
"rjson",
"[",
"'access_token'",
"]",
"if",
"'access_token'",
"in",
"rjson",
"else",
"rjson",
"[",
"'token'",
"]",
"return",
"self",
".",
"_token",
"self",
".",
"_headers",
"=",
"headers",
"return",
"None"
] |
Authenticate to the registry using a username and password,
an authorization header or otherwise as the anonymous user.
:param username: User name to authenticate as.
:type username: str
:param password: User's password.
:type password: str
:param actions: If you know which types of operation you need to make on the registry, specify them here. Valid actions are ``pull``, ``push`` and ``*``.
:type actions: list
:param response: When the ``auth`` function you passed to :class:`DXFBase`'s constructor is called, it is passed a HTTP response object. Pass it back to :meth:`authenticate` to have it automatically detect which actions are required.
:type response: requests.Response
:param authorization: ``Authorization`` header value.
:type authorization: str
:rtype: str
:returns: Authentication token, if the registry supports bearer tokens. Otherwise ``None``, and HTTP Basic auth is used (if the registry requires authentication).
|
[
"Authenticate",
"to",
"the",
"registry",
"using",
"a",
"username",
"and",
"password",
"an",
"authorization",
"header",
"or",
"otherwise",
"as",
"the",
"anonymous",
"user",
"."
] |
63fad55e0f0086e5f6d3511670db1ef23b5298f6
|
https://github.com/davedoesdev/dxf/blob/63fad55e0f0086e5f6d3511670db1ef23b5298f6/dxf/__init__.py#L228-L312
|
train
|
davedoesdev/dxf
|
dxf/__init__.py
|
DXFBase.list_repos
|
def list_repos(self, batch_size=None, iterate=False):
"""
List all repositories in the registry.
:param batch_size: Number of repository names to ask the server for at a time.
:type batch_size: int
:param iterate: Whether to return iterator over the names or a list of all the names.
:type iterate: bool
:rtype: list or iterator of strings
:returns: Repository names.
"""
it = PaginatingResponse(self, '_base_request',
'_catalog', 'repositories',
params={'n': batch_size})
return it if iterate else list(it)
|
python
|
def list_repos(self, batch_size=None, iterate=False):
"""
List all repositories in the registry.
:param batch_size: Number of repository names to ask the server for at a time.
:type batch_size: int
:param iterate: Whether to return iterator over the names or a list of all the names.
:type iterate: bool
:rtype: list or iterator of strings
:returns: Repository names.
"""
it = PaginatingResponse(self, '_base_request',
'_catalog', 'repositories',
params={'n': batch_size})
return it if iterate else list(it)
|
[
"def",
"list_repos",
"(",
"self",
",",
"batch_size",
"=",
"None",
",",
"iterate",
"=",
"False",
")",
":",
"it",
"=",
"PaginatingResponse",
"(",
"self",
",",
"'_base_request'",
",",
"'_catalog'",
",",
"'repositories'",
",",
"params",
"=",
"{",
"'n'",
":",
"batch_size",
"}",
")",
"return",
"it",
"if",
"iterate",
"else",
"list",
"(",
"it",
")"
] |
List all repositories in the registry.
:param batch_size: Number of repository names to ask the server for at a time.
:type batch_size: int
:param iterate: Whether to return iterator over the names or a list of all the names.
:type iterate: bool
:rtype: list or iterator of strings
:returns: Repository names.
|
[
"List",
"all",
"repositories",
"in",
"the",
"registry",
"."
] |
63fad55e0f0086e5f6d3511670db1ef23b5298f6
|
https://github.com/davedoesdev/dxf/blob/63fad55e0f0086e5f6d3511670db1ef23b5298f6/dxf/__init__.py#L314-L330
|
train
|
davedoesdev/dxf
|
dxf/__init__.py
|
DXF.pull_blob
|
def pull_blob(self, digest, size=False, chunk_size=None):
"""
Download a blob from the registry given the hash of its content.
:param digest: Hash of the blob's content (prefixed by ``sha256:``).
:type digest: str
:param size: Whether to return the size of the blob too.
:type size: bool
:param chunk_size: Number of bytes to download at a time. Defaults to 8192.
:type chunk_size: int
:rtype: iterator
:returns: If ``size`` is falsey, a byte string iterator over the blob's content. If ``size`` is truthy, a tuple containing the iterator and the blob's size.
"""
if chunk_size is None:
chunk_size = 8192
r = self._request('get', 'blobs/' + digest, stream=True)
class Chunks(object):
# pylint: disable=too-few-public-methods
def __iter__(self):
sha256 = hashlib.sha256()
for chunk in r.iter_content(chunk_size):
sha256.update(chunk)
yield chunk
dgst = 'sha256:' + sha256.hexdigest()
if dgst != digest:
raise exceptions.DXFDigestMismatchError(dgst, digest)
return (Chunks(), long(r.headers['content-length'])) if size else Chunks()
|
python
|
def pull_blob(self, digest, size=False, chunk_size=None):
"""
Download a blob from the registry given the hash of its content.
:param digest: Hash of the blob's content (prefixed by ``sha256:``).
:type digest: str
:param size: Whether to return the size of the blob too.
:type size: bool
:param chunk_size: Number of bytes to download at a time. Defaults to 8192.
:type chunk_size: int
:rtype: iterator
:returns: If ``size`` is falsey, a byte string iterator over the blob's content. If ``size`` is truthy, a tuple containing the iterator and the blob's size.
"""
if chunk_size is None:
chunk_size = 8192
r = self._request('get', 'blobs/' + digest, stream=True)
class Chunks(object):
# pylint: disable=too-few-public-methods
def __iter__(self):
sha256 = hashlib.sha256()
for chunk in r.iter_content(chunk_size):
sha256.update(chunk)
yield chunk
dgst = 'sha256:' + sha256.hexdigest()
if dgst != digest:
raise exceptions.DXFDigestMismatchError(dgst, digest)
return (Chunks(), long(r.headers['content-length'])) if size else Chunks()
|
[
"def",
"pull_blob",
"(",
"self",
",",
"digest",
",",
"size",
"=",
"False",
",",
"chunk_size",
"=",
"None",
")",
":",
"if",
"chunk_size",
"is",
"None",
":",
"chunk_size",
"=",
"8192",
"r",
"=",
"self",
".",
"_request",
"(",
"'get'",
",",
"'blobs/'",
"+",
"digest",
",",
"stream",
"=",
"True",
")",
"class",
"Chunks",
"(",
"object",
")",
":",
"# pylint: disable=too-few-public-methods",
"def",
"__iter__",
"(",
"self",
")",
":",
"sha256",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"for",
"chunk",
"in",
"r",
".",
"iter_content",
"(",
"chunk_size",
")",
":",
"sha256",
".",
"update",
"(",
"chunk",
")",
"yield",
"chunk",
"dgst",
"=",
"'sha256:'",
"+",
"sha256",
".",
"hexdigest",
"(",
")",
"if",
"dgst",
"!=",
"digest",
":",
"raise",
"exceptions",
".",
"DXFDigestMismatchError",
"(",
"dgst",
",",
"digest",
")",
"return",
"(",
"Chunks",
"(",
")",
",",
"long",
"(",
"r",
".",
"headers",
"[",
"'content-length'",
"]",
")",
")",
"if",
"size",
"else",
"Chunks",
"(",
")"
] |
Download a blob from the registry given the hash of its content.
:param digest: Hash of the blob's content (prefixed by ``sha256:``).
:type digest: str
:param size: Whether to return the size of the blob too.
:type size: bool
:param chunk_size: Number of bytes to download at a time. Defaults to 8192.
:type chunk_size: int
:rtype: iterator
:returns: If ``size`` is falsey, a byte string iterator over the blob's content. If ``size`` is truthy, a tuple containing the iterator and the blob's size.
|
[
"Download",
"a",
"blob",
"from",
"the",
"registry",
"given",
"the",
"hash",
"of",
"its",
"content",
"."
] |
63fad55e0f0086e5f6d3511670db1ef23b5298f6
|
https://github.com/davedoesdev/dxf/blob/63fad55e0f0086e5f6d3511670db1ef23b5298f6/dxf/__init__.py#L438-L467
|
train
|
davedoesdev/dxf
|
dxf/__init__.py
|
DXF.blob_size
|
def blob_size(self, digest):
"""
Return the size of a blob in the registry given the hash of its content.
:param digest: Hash of the blob's content (prefixed by ``sha256:``).
:type digest: str
:rtype: long
:returns: Whether the blob exists.
"""
r = self._request('head', 'blobs/' + digest)
return long(r.headers['content-length'])
|
python
|
def blob_size(self, digest):
"""
Return the size of a blob in the registry given the hash of its content.
:param digest: Hash of the blob's content (prefixed by ``sha256:``).
:type digest: str
:rtype: long
:returns: Whether the blob exists.
"""
r = self._request('head', 'blobs/' + digest)
return long(r.headers['content-length'])
|
[
"def",
"blob_size",
"(",
"self",
",",
"digest",
")",
":",
"r",
"=",
"self",
".",
"_request",
"(",
"'head'",
",",
"'blobs/'",
"+",
"digest",
")",
"return",
"long",
"(",
"r",
".",
"headers",
"[",
"'content-length'",
"]",
")"
] |
Return the size of a blob in the registry given the hash of its content.
:param digest: Hash of the blob's content (prefixed by ``sha256:``).
:type digest: str
:rtype: long
:returns: Whether the blob exists.
|
[
"Return",
"the",
"size",
"of",
"a",
"blob",
"in",
"the",
"registry",
"given",
"the",
"hash",
"of",
"its",
"content",
"."
] |
63fad55e0f0086e5f6d3511670db1ef23b5298f6
|
https://github.com/davedoesdev/dxf/blob/63fad55e0f0086e5f6d3511670db1ef23b5298f6/dxf/__init__.py#L469-L480
|
train
|
davedoesdev/dxf
|
dxf/__init__.py
|
DXF.get_manifest_and_response
|
def get_manifest_and_response(self, alias):
"""
Request the manifest for an alias and return the manifest and the
response.
:param alias: Alias name.
:type alias: str
:rtype: tuple
:returns: Tuple containing the manifest as a string (JSON) and the `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`_
"""
r = self._request('get',
'manifests/' + alias,
headers={'Accept': _schema2_mimetype + ', ' +
_schema1_mimetype})
return r.content.decode('utf-8'), r
|
python
|
def get_manifest_and_response(self, alias):
"""
Request the manifest for an alias and return the manifest and the
response.
:param alias: Alias name.
:type alias: str
:rtype: tuple
:returns: Tuple containing the manifest as a string (JSON) and the `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`_
"""
r = self._request('get',
'manifests/' + alias,
headers={'Accept': _schema2_mimetype + ', ' +
_schema1_mimetype})
return r.content.decode('utf-8'), r
|
[
"def",
"get_manifest_and_response",
"(",
"self",
",",
"alias",
")",
":",
"r",
"=",
"self",
".",
"_request",
"(",
"'get'",
",",
"'manifests/'",
"+",
"alias",
",",
"headers",
"=",
"{",
"'Accept'",
":",
"_schema2_mimetype",
"+",
"', '",
"+",
"_schema1_mimetype",
"}",
")",
"return",
"r",
".",
"content",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"r"
] |
Request the manifest for an alias and return the manifest and the
response.
:param alias: Alias name.
:type alias: str
:rtype: tuple
:returns: Tuple containing the manifest as a string (JSON) and the `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`_
|
[
"Request",
"the",
"manifest",
"for",
"an",
"alias",
"and",
"return",
"the",
"manifest",
"and",
"the",
"response",
"."
] |
63fad55e0f0086e5f6d3511670db1ef23b5298f6
|
https://github.com/davedoesdev/dxf/blob/63fad55e0f0086e5f6d3511670db1ef23b5298f6/dxf/__init__.py#L555-L570
|
train
|
davedoesdev/dxf
|
dxf/__init__.py
|
DXF.get_alias
|
def get_alias(self,
alias=None,
manifest=None,
verify=True,
sizes=False,
dcd=None):
# pylint: disable=too-many-arguments
"""
Get the blob hashes assigned to an alias.
:param alias: Alias name. You almost definitely will only need to pass this argument.
:type alias: str
:param manifest: If you previously obtained a manifest, specify it here instead of ``alias``. You almost definitely won't need to do this.
:type manifest: str
:param verify: (v1 schema only) Whether to verify the integrity of the alias definition in the registry itself. You almost definitely won't need to change this from the default (``True``).
:type verify: bool
:param sizes: Whether to return sizes of the blobs along with their hashes
:type sizes: bool
:param dcd: (if ``manifest`` is specified) The Docker-Content-Digest header returned when getting the manifest. If present, this is checked against the manifest.
:type dcd: str
:rtype: list
:returns: If ``sizes`` is falsey, a list of blob hashes (strings) which are assigned to the alias. If ``sizes`` is truthy, a list of (hash,size) tuples for each blob.
"""
return self._get_alias(alias, manifest, verify, sizes, dcd, False)
|
python
|
def get_alias(self,
alias=None,
manifest=None,
verify=True,
sizes=False,
dcd=None):
# pylint: disable=too-many-arguments
"""
Get the blob hashes assigned to an alias.
:param alias: Alias name. You almost definitely will only need to pass this argument.
:type alias: str
:param manifest: If you previously obtained a manifest, specify it here instead of ``alias``. You almost definitely won't need to do this.
:type manifest: str
:param verify: (v1 schema only) Whether to verify the integrity of the alias definition in the registry itself. You almost definitely won't need to change this from the default (``True``).
:type verify: bool
:param sizes: Whether to return sizes of the blobs along with their hashes
:type sizes: bool
:param dcd: (if ``manifest`` is specified) The Docker-Content-Digest header returned when getting the manifest. If present, this is checked against the manifest.
:type dcd: str
:rtype: list
:returns: If ``sizes`` is falsey, a list of blob hashes (strings) which are assigned to the alias. If ``sizes`` is truthy, a list of (hash,size) tuples for each blob.
"""
return self._get_alias(alias, manifest, verify, sizes, dcd, False)
|
[
"def",
"get_alias",
"(",
"self",
",",
"alias",
"=",
"None",
",",
"manifest",
"=",
"None",
",",
"verify",
"=",
"True",
",",
"sizes",
"=",
"False",
",",
"dcd",
"=",
"None",
")",
":",
"# pylint: disable=too-many-arguments",
"return",
"self",
".",
"_get_alias",
"(",
"alias",
",",
"manifest",
",",
"verify",
",",
"sizes",
",",
"dcd",
",",
"False",
")"
] |
Get the blob hashes assigned to an alias.
:param alias: Alias name. You almost definitely will only need to pass this argument.
:type alias: str
:param manifest: If you previously obtained a manifest, specify it here instead of ``alias``. You almost definitely won't need to do this.
:type manifest: str
:param verify: (v1 schema only) Whether to verify the integrity of the alias definition in the registry itself. You almost definitely won't need to change this from the default (``True``).
:type verify: bool
:param sizes: Whether to return sizes of the blobs along with their hashes
:type sizes: bool
:param dcd: (if ``manifest`` is specified) The Docker-Content-Digest header returned when getting the manifest. If present, this is checked against the manifest.
:type dcd: str
:rtype: list
:returns: If ``sizes`` is falsey, a list of blob hashes (strings) which are assigned to the alias. If ``sizes`` is truthy, a list of (hash,size) tuples for each blob.
|
[
"Get",
"the",
"blob",
"hashes",
"assigned",
"to",
"an",
"alias",
"."
] |
63fad55e0f0086e5f6d3511670db1ef23b5298f6
|
https://github.com/davedoesdev/dxf/blob/63fad55e0f0086e5f6d3511670db1ef23b5298f6/dxf/__init__.py#L630-L658
|
train
|
davedoesdev/dxf
|
dxf/__init__.py
|
DXF._get_dcd
|
def _get_dcd(self, alias):
"""
Get the Docker-Content-Digest header for an alias.
:param alias: Alias name.
:type alias: str
:rtype: str
:returns: DCD header for the alias.
"""
# https://docs.docker.com/registry/spec/api/#deleting-an-image
# Note When deleting a manifest from a registry version 2.3 or later,
# the following header must be used when HEAD or GET-ing the manifest
# to obtain the correct digest to delete:
# Accept: application/vnd.docker.distribution.manifest.v2+json
return self._request(
'head',
'manifests/{}'.format(alias),
headers={'Accept': _schema2_mimetype},
).headers.get('Docker-Content-Digest')
|
python
|
def _get_dcd(self, alias):
"""
Get the Docker-Content-Digest header for an alias.
:param alias: Alias name.
:type alias: str
:rtype: str
:returns: DCD header for the alias.
"""
# https://docs.docker.com/registry/spec/api/#deleting-an-image
# Note When deleting a manifest from a registry version 2.3 or later,
# the following header must be used when HEAD or GET-ing the manifest
# to obtain the correct digest to delete:
# Accept: application/vnd.docker.distribution.manifest.v2+json
return self._request(
'head',
'manifests/{}'.format(alias),
headers={'Accept': _schema2_mimetype},
).headers.get('Docker-Content-Digest')
|
[
"def",
"_get_dcd",
"(",
"self",
",",
"alias",
")",
":",
"# https://docs.docker.com/registry/spec/api/#deleting-an-image",
"# Note When deleting a manifest from a registry version 2.3 or later,",
"# the following header must be used when HEAD or GET-ing the manifest",
"# to obtain the correct digest to delete:",
"# Accept: application/vnd.docker.distribution.manifest.v2+json",
"return",
"self",
".",
"_request",
"(",
"'head'",
",",
"'manifests/{}'",
".",
"format",
"(",
"alias",
")",
",",
"headers",
"=",
"{",
"'Accept'",
":",
"_schema2_mimetype",
"}",
",",
")",
".",
"headers",
".",
"get",
"(",
"'Docker-Content-Digest'",
")"
] |
Get the Docker-Content-Digest header for an alias.
:param alias: Alias name.
:type alias: str
:rtype: str
:returns: DCD header for the alias.
|
[
"Get",
"the",
"Docker",
"-",
"Content",
"-",
"Digest",
"header",
"for",
"an",
"alias",
"."
] |
63fad55e0f0086e5f6d3511670db1ef23b5298f6
|
https://github.com/davedoesdev/dxf/blob/63fad55e0f0086e5f6d3511670db1ef23b5298f6/dxf/__init__.py#L691-L710
|
train
|
shinichi-takii/ddlparse
|
ddlparse/ddlparse.py
|
DdlParseTableColumnBase.get_name
|
def get_name(self, name_case=DdlParseBase.NAME_CASE.original):
"""
Get Name converted case
:param name_case: name case type
* DdlParse.NAME_CASE.original : Return to no convert
* DdlParse.NAME_CASE.lower : Return to lower
* DdlParse.NAME_CASE.upper : Return to upper
:return: name
"""
if name_case == self.NAME_CASE.lower:
return self._name.lower()
elif name_case == self.NAME_CASE.upper:
return self._name.upper()
else:
return self._name
|
python
|
def get_name(self, name_case=DdlParseBase.NAME_CASE.original):
"""
Get Name converted case
:param name_case: name case type
* DdlParse.NAME_CASE.original : Return to no convert
* DdlParse.NAME_CASE.lower : Return to lower
* DdlParse.NAME_CASE.upper : Return to upper
:return: name
"""
if name_case == self.NAME_CASE.lower:
return self._name.lower()
elif name_case == self.NAME_CASE.upper:
return self._name.upper()
else:
return self._name
|
[
"def",
"get_name",
"(",
"self",
",",
"name_case",
"=",
"DdlParseBase",
".",
"NAME_CASE",
".",
"original",
")",
":",
"if",
"name_case",
"==",
"self",
".",
"NAME_CASE",
".",
"lower",
":",
"return",
"self",
".",
"_name",
".",
"lower",
"(",
")",
"elif",
"name_case",
"==",
"self",
".",
"NAME_CASE",
".",
"upper",
":",
"return",
"self",
".",
"_name",
".",
"upper",
"(",
")",
"else",
":",
"return",
"self",
".",
"_name"
] |
Get Name converted case
:param name_case: name case type
* DdlParse.NAME_CASE.original : Return to no convert
* DdlParse.NAME_CASE.lower : Return to lower
* DdlParse.NAME_CASE.upper : Return to upper
:return: name
|
[
"Get",
"Name",
"converted",
"case"
] |
7328656ee807d14960999a98ace8cd76f0fe3ff8
|
https://github.com/shinichi-takii/ddlparse/blob/7328656ee807d14960999a98ace8cd76f0fe3ff8/ddlparse/ddlparse.py#L55-L71
|
train
|
shinichi-takii/ddlparse
|
ddlparse/ddlparse.py
|
DdlParseColumn.bigquery_data_type
|
def bigquery_data_type(self):
"""Get BigQuery Legacy SQL data type"""
# BigQuery data type = {source_database: [data type, ...], ...}
BQ_DATA_TYPE_DIC = OrderedDict()
BQ_DATA_TYPE_DIC["STRING"] = {None: [re.compile(r"(CHAR|TEXT|CLOB|JSON|UUID)")]}
BQ_DATA_TYPE_DIC["INTEGER"] = {None: [re.compile(r"INT|SERIAL|YEAR")]}
BQ_DATA_TYPE_DIC["FLOAT"] = {None: [re.compile(r"(FLOAT|DOUBLE)"), "REAL", "MONEY"]}
BQ_DATA_TYPE_DIC["DATETIME"] = {
None: ["DATETIME", "TIMESTAMP", "TIMESTAMP WITHOUT TIME ZONE"],
self.DATABASE.oracle: ["DATE"]
}
BQ_DATA_TYPE_DIC["TIMESTAMP"] = {None: ["TIMESTAMPTZ", "TIMESTAMP WITH TIME ZONE"]}
BQ_DATA_TYPE_DIC["DATE"] = {None: ["DATE"]}
BQ_DATA_TYPE_DIC["TIME"] = {None: ["TIME"]}
BQ_DATA_TYPE_DIC["BOOLEAN"] = {None: [re.compile(r"BOOL")]}
for bq_type, conditions in BQ_DATA_TYPE_DIC.items():
for source_db, source_datatypes in conditions.items():
for source_datatype in source_datatypes:
if isinstance(source_datatype, str):
if self._data_type == source_datatype \
and ( self._source_database == source_db
or (self._source_database is not None and source_db is None)):
return bq_type
elif re.search(source_datatype, self._data_type) \
and ( self._source_database == source_db
or (self._source_database is not None and source_db is None)):
return bq_type
if self._data_type in ["NUMERIC", "NUMBER", "DECIMAL"]:
if self._scale is not None:
return "FLOAT"
if self._data_type == "NUMBER" \
and self._source_database == self.DATABASE.oracle \
and self._length is None:
return "FLOAT"
return "INTEGER"
raise ValueError("Unknown data type : '{}'".format(self._data_type))
|
python
|
def bigquery_data_type(self):
"""Get BigQuery Legacy SQL data type"""
# BigQuery data type = {source_database: [data type, ...], ...}
BQ_DATA_TYPE_DIC = OrderedDict()
BQ_DATA_TYPE_DIC["STRING"] = {None: [re.compile(r"(CHAR|TEXT|CLOB|JSON|UUID)")]}
BQ_DATA_TYPE_DIC["INTEGER"] = {None: [re.compile(r"INT|SERIAL|YEAR")]}
BQ_DATA_TYPE_DIC["FLOAT"] = {None: [re.compile(r"(FLOAT|DOUBLE)"), "REAL", "MONEY"]}
BQ_DATA_TYPE_DIC["DATETIME"] = {
None: ["DATETIME", "TIMESTAMP", "TIMESTAMP WITHOUT TIME ZONE"],
self.DATABASE.oracle: ["DATE"]
}
BQ_DATA_TYPE_DIC["TIMESTAMP"] = {None: ["TIMESTAMPTZ", "TIMESTAMP WITH TIME ZONE"]}
BQ_DATA_TYPE_DIC["DATE"] = {None: ["DATE"]}
BQ_DATA_TYPE_DIC["TIME"] = {None: ["TIME"]}
BQ_DATA_TYPE_DIC["BOOLEAN"] = {None: [re.compile(r"BOOL")]}
for bq_type, conditions in BQ_DATA_TYPE_DIC.items():
for source_db, source_datatypes in conditions.items():
for source_datatype in source_datatypes:
if isinstance(source_datatype, str):
if self._data_type == source_datatype \
and ( self._source_database == source_db
or (self._source_database is not None and source_db is None)):
return bq_type
elif re.search(source_datatype, self._data_type) \
and ( self._source_database == source_db
or (self._source_database is not None and source_db is None)):
return bq_type
if self._data_type in ["NUMERIC", "NUMBER", "DECIMAL"]:
if self._scale is not None:
return "FLOAT"
if self._data_type == "NUMBER" \
and self._source_database == self.DATABASE.oracle \
and self._length is None:
return "FLOAT"
return "INTEGER"
raise ValueError("Unknown data type : '{}'".format(self._data_type))
|
[
"def",
"bigquery_data_type",
"(",
"self",
")",
":",
"# BigQuery data type = {source_database: [data type, ...], ...}",
"BQ_DATA_TYPE_DIC",
"=",
"OrderedDict",
"(",
")",
"BQ_DATA_TYPE_DIC",
"[",
"\"STRING\"",
"]",
"=",
"{",
"None",
":",
"[",
"re",
".",
"compile",
"(",
"r\"(CHAR|TEXT|CLOB|JSON|UUID)\"",
")",
"]",
"}",
"BQ_DATA_TYPE_DIC",
"[",
"\"INTEGER\"",
"]",
"=",
"{",
"None",
":",
"[",
"re",
".",
"compile",
"(",
"r\"INT|SERIAL|YEAR\"",
")",
"]",
"}",
"BQ_DATA_TYPE_DIC",
"[",
"\"FLOAT\"",
"]",
"=",
"{",
"None",
":",
"[",
"re",
".",
"compile",
"(",
"r\"(FLOAT|DOUBLE)\"",
")",
",",
"\"REAL\"",
",",
"\"MONEY\"",
"]",
"}",
"BQ_DATA_TYPE_DIC",
"[",
"\"DATETIME\"",
"]",
"=",
"{",
"None",
":",
"[",
"\"DATETIME\"",
",",
"\"TIMESTAMP\"",
",",
"\"TIMESTAMP WITHOUT TIME ZONE\"",
"]",
",",
"self",
".",
"DATABASE",
".",
"oracle",
":",
"[",
"\"DATE\"",
"]",
"}",
"BQ_DATA_TYPE_DIC",
"[",
"\"TIMESTAMP\"",
"]",
"=",
"{",
"None",
":",
"[",
"\"TIMESTAMPTZ\"",
",",
"\"TIMESTAMP WITH TIME ZONE\"",
"]",
"}",
"BQ_DATA_TYPE_DIC",
"[",
"\"DATE\"",
"]",
"=",
"{",
"None",
":",
"[",
"\"DATE\"",
"]",
"}",
"BQ_DATA_TYPE_DIC",
"[",
"\"TIME\"",
"]",
"=",
"{",
"None",
":",
"[",
"\"TIME\"",
"]",
"}",
"BQ_DATA_TYPE_DIC",
"[",
"\"BOOLEAN\"",
"]",
"=",
"{",
"None",
":",
"[",
"re",
".",
"compile",
"(",
"r\"BOOL\"",
")",
"]",
"}",
"for",
"bq_type",
",",
"conditions",
"in",
"BQ_DATA_TYPE_DIC",
".",
"items",
"(",
")",
":",
"for",
"source_db",
",",
"source_datatypes",
"in",
"conditions",
".",
"items",
"(",
")",
":",
"for",
"source_datatype",
"in",
"source_datatypes",
":",
"if",
"isinstance",
"(",
"source_datatype",
",",
"str",
")",
":",
"if",
"self",
".",
"_data_type",
"==",
"source_datatype",
"and",
"(",
"self",
".",
"_source_database",
"==",
"source_db",
"or",
"(",
"self",
".",
"_source_database",
"is",
"not",
"None",
"and",
"source_db",
"is",
"None",
")",
")",
":",
"return",
"bq_type",
"elif",
"re",
".",
"search",
"(",
"source_datatype",
",",
"self",
".",
"_data_type",
")",
"and",
"(",
"self",
".",
"_source_database",
"==",
"source_db",
"or",
"(",
"self",
".",
"_source_database",
"is",
"not",
"None",
"and",
"source_db",
"is",
"None",
")",
")",
":",
"return",
"bq_type",
"if",
"self",
".",
"_data_type",
"in",
"[",
"\"NUMERIC\"",
",",
"\"NUMBER\"",
",",
"\"DECIMAL\"",
"]",
":",
"if",
"self",
".",
"_scale",
"is",
"not",
"None",
":",
"return",
"\"FLOAT\"",
"if",
"self",
".",
"_data_type",
"==",
"\"NUMBER\"",
"and",
"self",
".",
"_source_database",
"==",
"self",
".",
"DATABASE",
".",
"oracle",
"and",
"self",
".",
"_length",
"is",
"None",
":",
"return",
"\"FLOAT\"",
"return",
"\"INTEGER\"",
"raise",
"ValueError",
"(",
"\"Unknown data type : '{}'\"",
".",
"format",
"(",
"self",
".",
"_data_type",
")",
")"
] |
Get BigQuery Legacy SQL data type
|
[
"Get",
"BigQuery",
"Legacy",
"SQL",
"data",
"type"
] |
7328656ee807d14960999a98ace8cd76f0fe3ff8
|
https://github.com/shinichi-takii/ddlparse/blob/7328656ee807d14960999a98ace8cd76f0fe3ff8/ddlparse/ddlparse.py#L173-L216
|
train
|
shinichi-takii/ddlparse
|
ddlparse/ddlparse.py
|
DdlParseColumn.to_bigquery_field
|
def to_bigquery_field(self, name_case=DdlParseBase.NAME_CASE.original):
"""Generate BigQuery JSON field define"""
col_name = self.get_name(name_case)
mode = self.bigquery_mode
if self.array_dimensional <= 1:
# no or one dimensional array data type
type = self.bigquery_legacy_data_type
else:
# multiple dimensional array data type
type = "RECORD"
fields = OrderedDict()
fields_cur = fields
for i in range(1, self.array_dimensional):
is_last = True if i == self.array_dimensional - 1 else False
fields_cur['fields'] = [OrderedDict()]
fields_cur = fields_cur['fields'][0]
fields_cur['name'] = "dimension_{}".format(i)
fields_cur['type'] = self.bigquery_legacy_data_type if is_last else "RECORD"
fields_cur['mode'] = self.bigquery_mode if is_last else "REPEATED"
col = OrderedDict()
col['name'] = col_name
col['type'] = type
col['mode'] = mode
if self.array_dimensional > 1:
col['fields'] = fields['fields']
return json.dumps(col)
|
python
|
def to_bigquery_field(self, name_case=DdlParseBase.NAME_CASE.original):
"""Generate BigQuery JSON field define"""
col_name = self.get_name(name_case)
mode = self.bigquery_mode
if self.array_dimensional <= 1:
# no or one dimensional array data type
type = self.bigquery_legacy_data_type
else:
# multiple dimensional array data type
type = "RECORD"
fields = OrderedDict()
fields_cur = fields
for i in range(1, self.array_dimensional):
is_last = True if i == self.array_dimensional - 1 else False
fields_cur['fields'] = [OrderedDict()]
fields_cur = fields_cur['fields'][0]
fields_cur['name'] = "dimension_{}".format(i)
fields_cur['type'] = self.bigquery_legacy_data_type if is_last else "RECORD"
fields_cur['mode'] = self.bigquery_mode if is_last else "REPEATED"
col = OrderedDict()
col['name'] = col_name
col['type'] = type
col['mode'] = mode
if self.array_dimensional > 1:
col['fields'] = fields['fields']
return json.dumps(col)
|
[
"def",
"to_bigquery_field",
"(",
"self",
",",
"name_case",
"=",
"DdlParseBase",
".",
"NAME_CASE",
".",
"original",
")",
":",
"col_name",
"=",
"self",
".",
"get_name",
"(",
"name_case",
")",
"mode",
"=",
"self",
".",
"bigquery_mode",
"if",
"self",
".",
"array_dimensional",
"<=",
"1",
":",
"# no or one dimensional array data type",
"type",
"=",
"self",
".",
"bigquery_legacy_data_type",
"else",
":",
"# multiple dimensional array data type",
"type",
"=",
"\"RECORD\"",
"fields",
"=",
"OrderedDict",
"(",
")",
"fields_cur",
"=",
"fields",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"self",
".",
"array_dimensional",
")",
":",
"is_last",
"=",
"True",
"if",
"i",
"==",
"self",
".",
"array_dimensional",
"-",
"1",
"else",
"False",
"fields_cur",
"[",
"'fields'",
"]",
"=",
"[",
"OrderedDict",
"(",
")",
"]",
"fields_cur",
"=",
"fields_cur",
"[",
"'fields'",
"]",
"[",
"0",
"]",
"fields_cur",
"[",
"'name'",
"]",
"=",
"\"dimension_{}\"",
".",
"format",
"(",
"i",
")",
"fields_cur",
"[",
"'type'",
"]",
"=",
"self",
".",
"bigquery_legacy_data_type",
"if",
"is_last",
"else",
"\"RECORD\"",
"fields_cur",
"[",
"'mode'",
"]",
"=",
"self",
".",
"bigquery_mode",
"if",
"is_last",
"else",
"\"REPEATED\"",
"col",
"=",
"OrderedDict",
"(",
")",
"col",
"[",
"'name'",
"]",
"=",
"col_name",
"col",
"[",
"'type'",
"]",
"=",
"type",
"col",
"[",
"'mode'",
"]",
"=",
"mode",
"if",
"self",
".",
"array_dimensional",
">",
"1",
":",
"col",
"[",
"'fields'",
"]",
"=",
"fields",
"[",
"'fields'",
"]",
"return",
"json",
".",
"dumps",
"(",
"col",
")"
] |
Generate BigQuery JSON field define
|
[
"Generate",
"BigQuery",
"JSON",
"field",
"define"
] |
7328656ee807d14960999a98ace8cd76f0fe3ff8
|
https://github.com/shinichi-takii/ddlparse/blob/7328656ee807d14960999a98ace8cd76f0fe3ff8/ddlparse/ddlparse.py#L250-L284
|
train
|
shinichi-takii/ddlparse
|
ddlparse/ddlparse.py
|
DdlParseTable.to_bigquery_ddl
|
def to_bigquery_ddl(self, name_case=DdlParseBase.NAME_CASE.original):
"""
Generate BigQuery CREATE TABLE statements
:param name_case: name case type
* DdlParse.NAME_CASE.original : Return to no convert
* DdlParse.NAME_CASE.lower : Return to lower
* DdlParse.NAME_CASE.upper : Return to upper
:return: BigQuery CREATE TABLE statements
"""
if self.schema is None:
dataset = "dataset"
elif name_case == self.NAME_CASE.lower:
dataset = self.schema.lower()
elif name_case == self.NAME_CASE.upper:
dataset = self.schema.upper()
else:
dataset = self.schema
cols_defs = []
for col in self.columns.values():
col_name = col.get_name(name_case)
if col.array_dimensional < 1:
# no array data type
type = col.bigquery_standard_data_type
not_null = " NOT NULL" if col.not_null else ""
else:
# one or multiple dimensional array data type
type_front = "ARRAY<"
type_back = ">"
for i in range(1, col.array_dimensional):
type_front += "STRUCT<dimension_{} ARRAY<".format(i)
type_back += ">>"
type = "{}{}{}".format(type_front, col.bigquery_standard_data_type, type_back)
not_null = ""
cols_defs.append("{name} {type}{not_null}".format(
name=col_name,
type=type,
not_null=not_null,
))
return textwrap.dedent(
"""\
#standardSQL
CREATE TABLE `project.{dataset}.{table}`
(
{colmns_define}
)""").format(
dataset=dataset,
table=self.get_name(name_case),
colmns_define=",\n ".join(cols_defs),
)
|
python
|
def to_bigquery_ddl(self, name_case=DdlParseBase.NAME_CASE.original):
"""
Generate BigQuery CREATE TABLE statements
:param name_case: name case type
* DdlParse.NAME_CASE.original : Return to no convert
* DdlParse.NAME_CASE.lower : Return to lower
* DdlParse.NAME_CASE.upper : Return to upper
:return: BigQuery CREATE TABLE statements
"""
if self.schema is None:
dataset = "dataset"
elif name_case == self.NAME_CASE.lower:
dataset = self.schema.lower()
elif name_case == self.NAME_CASE.upper:
dataset = self.schema.upper()
else:
dataset = self.schema
cols_defs = []
for col in self.columns.values():
col_name = col.get_name(name_case)
if col.array_dimensional < 1:
# no array data type
type = col.bigquery_standard_data_type
not_null = " NOT NULL" if col.not_null else ""
else:
# one or multiple dimensional array data type
type_front = "ARRAY<"
type_back = ">"
for i in range(1, col.array_dimensional):
type_front += "STRUCT<dimension_{} ARRAY<".format(i)
type_back += ">>"
type = "{}{}{}".format(type_front, col.bigquery_standard_data_type, type_back)
not_null = ""
cols_defs.append("{name} {type}{not_null}".format(
name=col_name,
type=type,
not_null=not_null,
))
return textwrap.dedent(
"""\
#standardSQL
CREATE TABLE `project.{dataset}.{table}`
(
{colmns_define}
)""").format(
dataset=dataset,
table=self.get_name(name_case),
colmns_define=",\n ".join(cols_defs),
)
|
[
"def",
"to_bigquery_ddl",
"(",
"self",
",",
"name_case",
"=",
"DdlParseBase",
".",
"NAME_CASE",
".",
"original",
")",
":",
"if",
"self",
".",
"schema",
"is",
"None",
":",
"dataset",
"=",
"\"dataset\"",
"elif",
"name_case",
"==",
"self",
".",
"NAME_CASE",
".",
"lower",
":",
"dataset",
"=",
"self",
".",
"schema",
".",
"lower",
"(",
")",
"elif",
"name_case",
"==",
"self",
".",
"NAME_CASE",
".",
"upper",
":",
"dataset",
"=",
"self",
".",
"schema",
".",
"upper",
"(",
")",
"else",
":",
"dataset",
"=",
"self",
".",
"schema",
"cols_defs",
"=",
"[",
"]",
"for",
"col",
"in",
"self",
".",
"columns",
".",
"values",
"(",
")",
":",
"col_name",
"=",
"col",
".",
"get_name",
"(",
"name_case",
")",
"if",
"col",
".",
"array_dimensional",
"<",
"1",
":",
"# no array data type",
"type",
"=",
"col",
".",
"bigquery_standard_data_type",
"not_null",
"=",
"\" NOT NULL\"",
"if",
"col",
".",
"not_null",
"else",
"\"\"",
"else",
":",
"# one or multiple dimensional array data type",
"type_front",
"=",
"\"ARRAY<\"",
"type_back",
"=",
"\">\"",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"col",
".",
"array_dimensional",
")",
":",
"type_front",
"+=",
"\"STRUCT<dimension_{} ARRAY<\"",
".",
"format",
"(",
"i",
")",
"type_back",
"+=",
"\">>\"",
"type",
"=",
"\"{}{}{}\"",
".",
"format",
"(",
"type_front",
",",
"col",
".",
"bigquery_standard_data_type",
",",
"type_back",
")",
"not_null",
"=",
"\"\"",
"cols_defs",
".",
"append",
"(",
"\"{name} {type}{not_null}\"",
".",
"format",
"(",
"name",
"=",
"col_name",
",",
"type",
"=",
"type",
",",
"not_null",
"=",
"not_null",
",",
")",
")",
"return",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\\\n #standardSQL\n CREATE TABLE `project.{dataset}.{table}`\n (\n {colmns_define}\n )\"\"\"",
")",
".",
"format",
"(",
"dataset",
"=",
"dataset",
",",
"table",
"=",
"self",
".",
"get_name",
"(",
"name_case",
")",
",",
"colmns_define",
"=",
"\",\\n \"",
".",
"join",
"(",
"cols_defs",
")",
",",
")"
] |
Generate BigQuery CREATE TABLE statements
:param name_case: name case type
* DdlParse.NAME_CASE.original : Return to no convert
* DdlParse.NAME_CASE.lower : Return to lower
* DdlParse.NAME_CASE.upper : Return to upper
:return: BigQuery CREATE TABLE statements
|
[
"Generate",
"BigQuery",
"CREATE",
"TABLE",
"statements"
] |
7328656ee807d14960999a98ace8cd76f0fe3ff8
|
https://github.com/shinichi-takii/ddlparse/blob/7328656ee807d14960999a98ace8cd76f0fe3ff8/ddlparse/ddlparse.py#L393-L450
|
train
|
shinichi-takii/ddlparse
|
ddlparse/ddlparse.py
|
DdlParse.parse
|
def parse(self, ddl=None, source_database=None):
"""
Parse DDL script.
:param ddl: DDL script
:return: DdlParseTable, Parsed table define info.
"""
if ddl is not None:
self._ddl = ddl
if source_database is not None:
self.source_database = source_database
if self._ddl is None:
raise ValueError("DDL is not specified")
ret = self._DDL_PARSE_EXPR.parseString(self._ddl)
# print(ret.dump())
if "schema" in ret:
self._table.schema = ret["schema"]
self._table.name = ret["table"]
self._table.is_temp = True if "temp" in ret else False
for ret_col in ret["columns"]:
if ret_col.getName() == "column":
# add column
col = self._table.columns.append(
column_name=ret_col["name"],
data_type_array=ret_col["type"],
array_brackets=ret_col['array_brackets'] if "array_brackets" in ret_col else None)
if "constraint" in ret_col:
col.constraint = ret_col["constraint"]
elif ret_col.getName() == "constraint":
# set column constraint
for col_name in ret_col["constraint_columns"]:
col = self._table.columns[col_name]
if ret_col["type"] == "PRIMARY KEY":
col.not_null = True
col.primary_key = True
elif ret_col["type"] in ["UNIQUE", "UNIQUE KEY"]:
col.unique = True
elif ret_col["type"] == "NOT NULL":
col.not_null = True
return self._table
|
python
|
def parse(self, ddl=None, source_database=None):
"""
Parse DDL script.
:param ddl: DDL script
:return: DdlParseTable, Parsed table define info.
"""
if ddl is not None:
self._ddl = ddl
if source_database is not None:
self.source_database = source_database
if self._ddl is None:
raise ValueError("DDL is not specified")
ret = self._DDL_PARSE_EXPR.parseString(self._ddl)
# print(ret.dump())
if "schema" in ret:
self._table.schema = ret["schema"]
self._table.name = ret["table"]
self._table.is_temp = True if "temp" in ret else False
for ret_col in ret["columns"]:
if ret_col.getName() == "column":
# add column
col = self._table.columns.append(
column_name=ret_col["name"],
data_type_array=ret_col["type"],
array_brackets=ret_col['array_brackets'] if "array_brackets" in ret_col else None)
if "constraint" in ret_col:
col.constraint = ret_col["constraint"]
elif ret_col.getName() == "constraint":
# set column constraint
for col_name in ret_col["constraint_columns"]:
col = self._table.columns[col_name]
if ret_col["type"] == "PRIMARY KEY":
col.not_null = True
col.primary_key = True
elif ret_col["type"] in ["UNIQUE", "UNIQUE KEY"]:
col.unique = True
elif ret_col["type"] == "NOT NULL":
col.not_null = True
return self._table
|
[
"def",
"parse",
"(",
"self",
",",
"ddl",
"=",
"None",
",",
"source_database",
"=",
"None",
")",
":",
"if",
"ddl",
"is",
"not",
"None",
":",
"self",
".",
"_ddl",
"=",
"ddl",
"if",
"source_database",
"is",
"not",
"None",
":",
"self",
".",
"source_database",
"=",
"source_database",
"if",
"self",
".",
"_ddl",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"DDL is not specified\"",
")",
"ret",
"=",
"self",
".",
"_DDL_PARSE_EXPR",
".",
"parseString",
"(",
"self",
".",
"_ddl",
")",
"# print(ret.dump())",
"if",
"\"schema\"",
"in",
"ret",
":",
"self",
".",
"_table",
".",
"schema",
"=",
"ret",
"[",
"\"schema\"",
"]",
"self",
".",
"_table",
".",
"name",
"=",
"ret",
"[",
"\"table\"",
"]",
"self",
".",
"_table",
".",
"is_temp",
"=",
"True",
"if",
"\"temp\"",
"in",
"ret",
"else",
"False",
"for",
"ret_col",
"in",
"ret",
"[",
"\"columns\"",
"]",
":",
"if",
"ret_col",
".",
"getName",
"(",
")",
"==",
"\"column\"",
":",
"# add column",
"col",
"=",
"self",
".",
"_table",
".",
"columns",
".",
"append",
"(",
"column_name",
"=",
"ret_col",
"[",
"\"name\"",
"]",
",",
"data_type_array",
"=",
"ret_col",
"[",
"\"type\"",
"]",
",",
"array_brackets",
"=",
"ret_col",
"[",
"'array_brackets'",
"]",
"if",
"\"array_brackets\"",
"in",
"ret_col",
"else",
"None",
")",
"if",
"\"constraint\"",
"in",
"ret_col",
":",
"col",
".",
"constraint",
"=",
"ret_col",
"[",
"\"constraint\"",
"]",
"elif",
"ret_col",
".",
"getName",
"(",
")",
"==",
"\"constraint\"",
":",
"# set column constraint",
"for",
"col_name",
"in",
"ret_col",
"[",
"\"constraint_columns\"",
"]",
":",
"col",
"=",
"self",
".",
"_table",
".",
"columns",
"[",
"col_name",
"]",
"if",
"ret_col",
"[",
"\"type\"",
"]",
"==",
"\"PRIMARY KEY\"",
":",
"col",
".",
"not_null",
"=",
"True",
"col",
".",
"primary_key",
"=",
"True",
"elif",
"ret_col",
"[",
"\"type\"",
"]",
"in",
"[",
"\"UNIQUE\"",
",",
"\"UNIQUE KEY\"",
"]",
":",
"col",
".",
"unique",
"=",
"True",
"elif",
"ret_col",
"[",
"\"type\"",
"]",
"==",
"\"NOT NULL\"",
":",
"col",
".",
"not_null",
"=",
"True",
"return",
"self",
".",
"_table"
] |
Parse DDL script.
:param ddl: DDL script
:return: DdlParseTable, Parsed table define info.
|
[
"Parse",
"DDL",
"script",
"."
] |
7328656ee807d14960999a98ace8cd76f0fe3ff8
|
https://github.com/shinichi-takii/ddlparse/blob/7328656ee807d14960999a98ace8cd76f0fe3ff8/ddlparse/ddlparse.py#L540-L591
|
train
|
rhelmot/nclib
|
nclib/process.py
|
Process.launch
|
def launch(program, sock, stderr=True, cwd=None, env=None):
"""
A static method for launching a process that is connected to a given
socket. Same rules from the Process constructor apply.
"""
if stderr is True:
err = sock # redirect to socket
elif stderr is False:
err = open(os.devnull, 'wb') # hide
elif stderr is None:
err = None # redirect to console
p = subprocess.Popen(program,
shell=type(program) not in (list, tuple),
stdin=sock, stdout=sock, stderr=err,
cwd=cwd, env=env,
close_fds=True)
sock.close()
return p
|
python
|
def launch(program, sock, stderr=True, cwd=None, env=None):
"""
A static method for launching a process that is connected to a given
socket. Same rules from the Process constructor apply.
"""
if stderr is True:
err = sock # redirect to socket
elif stderr is False:
err = open(os.devnull, 'wb') # hide
elif stderr is None:
err = None # redirect to console
p = subprocess.Popen(program,
shell=type(program) not in (list, tuple),
stdin=sock, stdout=sock, stderr=err,
cwd=cwd, env=env,
close_fds=True)
sock.close()
return p
|
[
"def",
"launch",
"(",
"program",
",",
"sock",
",",
"stderr",
"=",
"True",
",",
"cwd",
"=",
"None",
",",
"env",
"=",
"None",
")",
":",
"if",
"stderr",
"is",
"True",
":",
"err",
"=",
"sock",
"# redirect to socket",
"elif",
"stderr",
"is",
"False",
":",
"err",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"'wb'",
")",
"# hide",
"elif",
"stderr",
"is",
"None",
":",
"err",
"=",
"None",
"# redirect to console",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"program",
",",
"shell",
"=",
"type",
"(",
"program",
")",
"not",
"in",
"(",
"list",
",",
"tuple",
")",
",",
"stdin",
"=",
"sock",
",",
"stdout",
"=",
"sock",
",",
"stderr",
"=",
"err",
",",
"cwd",
"=",
"cwd",
",",
"env",
"=",
"env",
",",
"close_fds",
"=",
"True",
")",
"sock",
".",
"close",
"(",
")",
"return",
"p"
] |
A static method for launching a process that is connected to a given
socket. Same rules from the Process constructor apply.
|
[
"A",
"static",
"method",
"for",
"launching",
"a",
"process",
"that",
"is",
"connected",
"to",
"a",
"given",
"socket",
".",
"Same",
"rules",
"from",
"the",
"Process",
"constructor",
"apply",
"."
] |
6147779766557ee4fafcbae683bdd2f74157e825
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/process.py#L85-L104
|
train
|
rhelmot/nclib
|
nclib/server.py
|
UDPServer.respond
|
def respond(self, packet, peer, flags=0):
"""
Send a message back to a peer.
:param packet: The data to send
:param peer: The address to send to, as a tuple (host, port)
:param flags: Any sending flags you want to use for some reason
"""
self.sock.sendto(packet, flags, peer)
|
python
|
def respond(self, packet, peer, flags=0):
"""
Send a message back to a peer.
:param packet: The data to send
:param peer: The address to send to, as a tuple (host, port)
:param flags: Any sending flags you want to use for some reason
"""
self.sock.sendto(packet, flags, peer)
|
[
"def",
"respond",
"(",
"self",
",",
"packet",
",",
"peer",
",",
"flags",
"=",
"0",
")",
":",
"self",
".",
"sock",
".",
"sendto",
"(",
"packet",
",",
"flags",
",",
"peer",
")"
] |
Send a message back to a peer.
:param packet: The data to send
:param peer: The address to send to, as a tuple (host, port)
:param flags: Any sending flags you want to use for some reason
|
[
"Send",
"a",
"message",
"back",
"to",
"a",
"peer",
"."
] |
6147779766557ee4fafcbae683bdd2f74157e825
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/server.py#L75-L83
|
train
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.shutdown_rd
|
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
|
python
|
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
|
[
"def",
"shutdown_rd",
"(",
"self",
")",
":",
"if",
"self",
".",
"_sock_send",
"is",
"not",
"None",
":",
"self",
".",
"sock",
".",
"close",
"(",
")",
"else",
":",
"return",
"self",
".",
"shutdown",
"(",
"socket",
".",
"SHUT_RD",
")"
] |
Send a shutdown signal for reading - you may no longer read from this
socket.
|
[
"Send",
"a",
"shutdown",
"signal",
"for",
"reading",
"-",
"you",
"may",
"no",
"longer",
"read",
"from",
"this",
"socket",
"."
] |
6147779766557ee4fafcbae683bdd2f74157e825
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L443-L451
|
train
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.shutdown_wr
|
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
|
python
|
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
|
[
"def",
"shutdown_wr",
"(",
"self",
")",
":",
"if",
"self",
".",
"_sock_send",
"is",
"not",
"None",
":",
"self",
".",
"_sock_send",
".",
"close",
"(",
")",
"else",
":",
"return",
"self",
".",
"shutdown",
"(",
"socket",
".",
"SHUT_WR",
")"
] |
Send a shutdown signal for writing - you may no longer write to this
socket.
|
[
"Send",
"a",
"shutdown",
"signal",
"for",
"writing",
"-",
"you",
"may",
"no",
"longer",
"write",
"to",
"this",
"socket",
"."
] |
6147779766557ee4fafcbae683bdd2f74157e825
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L453-L461
|
train
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat._recv_predicate
|
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
|
python
|
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
|
[
"def",
"_recv_predicate",
"(",
"self",
",",
"predicate",
",",
"timeout",
"=",
"'default'",
",",
"raise_eof",
"=",
"True",
")",
":",
"if",
"timeout",
"==",
"'default'",
":",
"timeout",
"=",
"self",
".",
"_timeout",
"self",
".",
"timed_out",
"=",
"False",
"start",
"=",
"time",
".",
"time",
"(",
")",
"try",
":",
"while",
"True",
":",
"cut_at",
"=",
"predicate",
"(",
"self",
".",
"buf",
")",
"if",
"cut_at",
">",
"0",
":",
"break",
"if",
"timeout",
"is",
"not",
"None",
":",
"time_elapsed",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start",
"if",
"time_elapsed",
">",
"timeout",
":",
"raise",
"socket",
".",
"timeout",
"self",
".",
"_settimeout",
"(",
"timeout",
"-",
"time_elapsed",
")",
"data",
"=",
"self",
".",
"_recv",
"(",
"4096",
")",
"self",
".",
"_log_recv",
"(",
"data",
",",
"False",
")",
"self",
".",
"buf",
"+=",
"data",
"if",
"not",
"data",
":",
"if",
"raise_eof",
":",
"raise",
"NetcatError",
"(",
"\"Connection dropped!\"",
")",
"cut_at",
"=",
"len",
"(",
"self",
".",
"buf",
")",
"break",
"except",
"KeyboardInterrupt",
":",
"self",
".",
"_print_header",
"(",
"'\\n======== Connection interrupted! ========'",
")",
"raise",
"except",
"socket",
".",
"timeout",
":",
"self",
".",
"timed_out",
"=",
"True",
"if",
"self",
".",
"_raise_timeout",
":",
"raise",
"NetcatTimeout",
"(",
")",
"return",
"b''",
"except",
"socket",
".",
"error",
"as",
"exc",
":",
"raise",
"NetcatError",
"(",
"'Socket error: %r'",
"%",
"exc",
")",
"self",
".",
"_settimeout",
"(",
"self",
".",
"_timeout",
")",
"ret",
"=",
"self",
".",
"buf",
"[",
":",
"cut_at",
"]",
"self",
".",
"buf",
"=",
"self",
".",
"buf",
"[",
"cut_at",
":",
"]",
"self",
".",
"_log_recv",
"(",
"ret",
",",
"True",
")",
"return",
"ret"
] |
Receive until predicate returns a positive integer.
The returned number is the size to return.
|
[
"Receive",
"until",
"predicate",
"returns",
"a",
"positive",
"integer",
".",
"The",
"returned",
"number",
"is",
"the",
"size",
"to",
"return",
"."
] |
6147779766557ee4fafcbae683bdd2f74157e825
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L569-L618
|
train
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.recv_until
|
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
|
python
|
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
|
[
"def",
"recv_until",
"(",
"self",
",",
"s",
",",
"max_size",
"=",
"None",
",",
"timeout",
"=",
"'default'",
")",
":",
"self",
".",
"_print_recv_header",
"(",
"'======== Receiving until {0}{timeout_text} ========'",
",",
"timeout",
",",
"repr",
"(",
"s",
")",
")",
"if",
"max_size",
"is",
"None",
":",
"max_size",
"=",
"2",
"**",
"62",
"def",
"_predicate",
"(",
"buf",
")",
":",
"try",
":",
"return",
"min",
"(",
"buf",
".",
"index",
"(",
"s",
")",
"+",
"len",
"(",
"s",
")",
",",
"max_size",
")",
"except",
"ValueError",
":",
"return",
"0",
"if",
"len",
"(",
"buf",
")",
"<",
"max_size",
"else",
"max_size",
"return",
"self",
".",
"_recv_predicate",
"(",
"_predicate",
",",
"timeout",
")"
] |
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
|
[
"Recieve",
"data",
"from",
"the",
"socket",
"until",
"the",
"given",
"substring",
"is",
"observed",
".",
"Data",
"in",
"the",
"same",
"datagram",
"as",
"the",
"substring",
"following",
"the",
"substring",
"will",
"not",
"be",
"returned",
"and",
"will",
"be",
"cached",
"for",
"future",
"receives",
"."
] |
6147779766557ee4fafcbae683bdd2f74157e825
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L652-L672
|
train
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.recv_all
|
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
|
python
|
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
|
[
"def",
"recv_all",
"(",
"self",
",",
"timeout",
"=",
"'default'",
")",
":",
"self",
".",
"_print_recv_header",
"(",
"'======== Receiving until close{timeout_text} ========'",
",",
"timeout",
")",
"return",
"self",
".",
"_recv_predicate",
"(",
"lambda",
"s",
":",
"0",
",",
"timeout",
",",
"raise_eof",
"=",
"False",
")"
] |
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
|
[
"Return",
"all",
"data",
"recieved",
"until",
"connection",
"closes",
"."
] |
6147779766557ee4fafcbae683bdd2f74157e825
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L674-L683
|
train
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.recv_exactly
|
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
|
python
|
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
|
[
"def",
"recv_exactly",
"(",
"self",
",",
"n",
",",
"timeout",
"=",
"'default'",
")",
":",
"self",
".",
"_print_recv_header",
"(",
"'======== Receiving until exactly {0}B{timeout_text} ========'",
",",
"timeout",
",",
"n",
")",
"return",
"self",
".",
"_recv_predicate",
"(",
"lambda",
"s",
":",
"n",
"if",
"len",
"(",
"s",
")",
">=",
"n",
"else",
"0",
",",
"timeout",
")"
] |
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
|
[
"Recieve",
"exactly",
"n",
"bytes"
] |
6147779766557ee4fafcbae683bdd2f74157e825
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L685-L695
|
train
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.send
|
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
|
python
|
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
|
[
"def",
"send",
"(",
"self",
",",
"s",
")",
":",
"self",
".",
"_print_header",
"(",
"'======== Sending ({0}) ========'",
".",
"format",
"(",
"len",
"(",
"s",
")",
")",
")",
"self",
".",
"_log_send",
"(",
"s",
")",
"out",
"=",
"len",
"(",
"s",
")",
"while",
"s",
":",
"s",
"=",
"s",
"[",
"self",
".",
"_send",
"(",
"s",
")",
":",
"]",
"return",
"out"
] |
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
|
[
"Sends",
"all",
"the",
"given",
"data",
"to",
"the",
"socket",
"."
] |
6147779766557ee4fafcbae683bdd2f74157e825
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L697-L710
|
train
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.interact
|
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
|
python
|
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
|
[
"def",
"interact",
"(",
"self",
",",
"insock",
"=",
"sys",
".",
"stdin",
",",
"outsock",
"=",
"sys",
".",
"stdout",
")",
":",
"self",
".",
"_print_header",
"(",
"'======== Beginning interactive session ========'",
")",
"if",
"hasattr",
"(",
"outsock",
",",
"'buffer'",
")",
":",
"outsock",
"=",
"outsock",
".",
"buffer",
"# pylint: disable=no-member",
"self",
".",
"timed_out",
"=",
"False",
"save_verbose",
"=",
"self",
".",
"verbose",
"self",
".",
"verbose",
"=",
"0",
"try",
":",
"if",
"self",
".",
"buf",
":",
"outsock",
".",
"write",
"(",
"self",
".",
"buf",
")",
"outsock",
".",
"flush",
"(",
")",
"self",
".",
"buf",
"=",
"b''",
"while",
"True",
":",
"readable_socks",
"=",
"select",
"(",
"self",
".",
"sock",
",",
"insock",
")",
"for",
"readable",
"in",
"readable_socks",
":",
"if",
"readable",
"is",
"insock",
":",
"data",
"=",
"os",
".",
"read",
"(",
"insock",
".",
"fileno",
"(",
")",
",",
"4096",
")",
"self",
".",
"send",
"(",
"data",
")",
"if",
"not",
"data",
":",
"raise",
"NetcatError",
"else",
":",
"data",
"=",
"self",
".",
"recv",
"(",
"timeout",
"=",
"None",
")",
"outsock",
".",
"write",
"(",
"data",
")",
"outsock",
".",
"flush",
"(",
")",
"if",
"not",
"data",
":",
"raise",
"NetcatError",
"except",
"KeyboardInterrupt",
":",
"self",
".",
"verbose",
"=",
"save_verbose",
"self",
".",
"_print_header",
"(",
"'\\n======== Connection interrupted! ========'",
")",
"raise",
"except",
"(",
"socket",
".",
"error",
",",
"NetcatError",
")",
":",
"self",
".",
"verbose",
"=",
"save_verbose",
"self",
".",
"_print_header",
"(",
"'\\n======== Connection dropped! ========'",
")",
"finally",
":",
"self",
".",
"verbose",
"=",
"save_verbose"
] |
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
|
[
"Connects",
"the",
"socket",
"to",
"the",
"terminal",
"for",
"user",
"interaction",
".",
"Alternate",
"input",
"and",
"output",
"files",
"may",
"be",
"specified",
"."
] |
6147779766557ee4fafcbae683bdd2f74157e825
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L712-L758
|
train
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.recv_line
|
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
|
python
|
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
|
[
"def",
"recv_line",
"(",
"self",
",",
"max_size",
"=",
"None",
",",
"timeout",
"=",
"'default'",
",",
"ending",
"=",
"None",
")",
":",
"if",
"ending",
"is",
"None",
":",
"ending",
"=",
"self",
".",
"LINE_ENDING",
"return",
"self",
".",
"recv_until",
"(",
"ending",
",",
"max_size",
",",
"timeout",
")"
] |
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
|
[
"Recieve",
"until",
"the",
"next",
"newline",
"default",
"\\\\",
"n",
".",
"The",
"newline",
"string",
"can",
"be",
"changed",
"by",
"changing",
"nc",
".",
"LINE_ENDING",
".",
"The",
"newline",
"will",
"be",
"returned",
"as",
"part",
"of",
"the",
"string",
"."
] |
6147779766557ee4fafcbae683bdd2f74157e825
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L762-L772
|
train
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.send_line
|
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
|
python
|
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
|
[
"def",
"send_line",
"(",
"self",
",",
"line",
",",
"ending",
"=",
"None",
")",
":",
"if",
"ending",
"is",
"None",
":",
"ending",
"=",
"self",
".",
"LINE_ENDING",
"return",
"self",
".",
"send",
"(",
"line",
"+",
"ending",
")"
] |
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
|
[
"Write",
"the",
"string",
"to",
"the",
"wire",
"followed",
"by",
"a",
"newline",
".",
"The",
"newline",
"string",
"can",
"be",
"changed",
"by",
"changing",
"nc",
".",
"LINE_ENDING",
"."
] |
6147779766557ee4fafcbae683bdd2f74157e825
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L774-L783
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/resultmodulation.py
|
Resultmodulation.is_active
|
def is_active(self, timperiods):
"""
Know if this result modulation is active now
:return: True is we are in the period, otherwise False
:rtype: bool
"""
now = int(time.time())
timperiod = timperiods[self.modulation_period]
if not timperiod or timperiod.is_time_valid(now):
return True
return False
|
python
|
def is_active(self, timperiods):
"""
Know if this result modulation is active now
:return: True is we are in the period, otherwise False
:rtype: bool
"""
now = int(time.time())
timperiod = timperiods[self.modulation_period]
if not timperiod or timperiod.is_time_valid(now):
return True
return False
|
[
"def",
"is_active",
"(",
"self",
",",
"timperiods",
")",
":",
"now",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"timperiod",
"=",
"timperiods",
"[",
"self",
".",
"modulation_period",
"]",
"if",
"not",
"timperiod",
"or",
"timperiod",
".",
"is_time_valid",
"(",
"now",
")",
":",
"return",
"True",
"return",
"False"
] |
Know if this result modulation is active now
:return: True is we are in the period, otherwise False
:rtype: bool
|
[
"Know",
"if",
"this",
"result",
"modulation",
"is",
"active",
"now"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/resultmodulation.py#L93-L104
|
train
|
Alignak-monitoring/alignak
|
alignak/http/scheduler_interface.py
|
SchedulerInterface.object
|
def object(self, o_type, o_name=None):
"""Get an object from the scheduler.
The result is a serialized object which is a Json structure containing:
- content: the serialized object content
- __sys_python_module__: the python class of the returned object
The Alignak unserialize function of the alignak.misc.serialization package allows
to restore the initial object.
.. code-block:: python
from alignak.misc.serialization import unserialize
from alignak.objects.hostgroup import Hostgroup
raw_data = req.get("http://127.0.0.1:7768/object/hostgroup/allhosts")
print("Got: %s / %s" % (raw_data.status_code, raw_data.content))
assert raw_data.status_code == 200
object = raw_data.json()
group = unserialize(object, True)
assert group.__class__ == Hostgroup
assert group.get_name() == 'allhosts'
As an example:
{
"__sys_python_module__": "alignak.objects.hostgroup.Hostgroup",
"content": {
"uuid": "32248642-97dd-4f39-aaa2-5120112a765d",
"name": "",
"hostgroup_name": "allhosts",
"use": [],
"tags": [],
"alias": "All Hosts",
"notes": "",
"definition_order": 100,
"register": true,
"unknown_members": [],
"notes_url": "",
"action_url": "",
"imported_from": "unknown",
"conf_is_correct": true,
"configuration_errors": [],
"configuration_warnings": [],
"realm": "",
"downtimes": {},
"hostgroup_members": [],
"members": [
"553d47bc-27aa-426c-a664-49c4c0c4a249",
"f88093ca-e61b-43ff-a41e-613f7ad2cea2",
"df1e2e13-552d-43de-ad2a-fe80ad4ba979",
"d3d667dd-f583-4668-9f44-22ef3dcb53ad"
]
}
}
:param o_type: searched object type
:type o_type: str
:param o_name: searched object name (or uuid)
:type o_name: str
:return: serialized object information
:rtype: str
"""
o_found = self._get_object(o_type=o_type, o_name=o_name)
if not o_found:
return {'_status': u'ERR', '_message': u'Required %s not found.' % o_type}
return o_found
|
python
|
def object(self, o_type, o_name=None):
"""Get an object from the scheduler.
The result is a serialized object which is a Json structure containing:
- content: the serialized object content
- __sys_python_module__: the python class of the returned object
The Alignak unserialize function of the alignak.misc.serialization package allows
to restore the initial object.
.. code-block:: python
from alignak.misc.serialization import unserialize
from alignak.objects.hostgroup import Hostgroup
raw_data = req.get("http://127.0.0.1:7768/object/hostgroup/allhosts")
print("Got: %s / %s" % (raw_data.status_code, raw_data.content))
assert raw_data.status_code == 200
object = raw_data.json()
group = unserialize(object, True)
assert group.__class__ == Hostgroup
assert group.get_name() == 'allhosts'
As an example:
{
"__sys_python_module__": "alignak.objects.hostgroup.Hostgroup",
"content": {
"uuid": "32248642-97dd-4f39-aaa2-5120112a765d",
"name": "",
"hostgroup_name": "allhosts",
"use": [],
"tags": [],
"alias": "All Hosts",
"notes": "",
"definition_order": 100,
"register": true,
"unknown_members": [],
"notes_url": "",
"action_url": "",
"imported_from": "unknown",
"conf_is_correct": true,
"configuration_errors": [],
"configuration_warnings": [],
"realm": "",
"downtimes": {},
"hostgroup_members": [],
"members": [
"553d47bc-27aa-426c-a664-49c4c0c4a249",
"f88093ca-e61b-43ff-a41e-613f7ad2cea2",
"df1e2e13-552d-43de-ad2a-fe80ad4ba979",
"d3d667dd-f583-4668-9f44-22ef3dcb53ad"
]
}
}
:param o_type: searched object type
:type o_type: str
:param o_name: searched object name (or uuid)
:type o_name: str
:return: serialized object information
:rtype: str
"""
o_found = self._get_object(o_type=o_type, o_name=o_name)
if not o_found:
return {'_status': u'ERR', '_message': u'Required %s not found.' % o_type}
return o_found
|
[
"def",
"object",
"(",
"self",
",",
"o_type",
",",
"o_name",
"=",
"None",
")",
":",
"o_found",
"=",
"self",
".",
"_get_object",
"(",
"o_type",
"=",
"o_type",
",",
"o_name",
"=",
"o_name",
")",
"if",
"not",
"o_found",
":",
"return",
"{",
"'_status'",
":",
"u'ERR'",
",",
"'_message'",
":",
"u'Required %s not found.'",
"%",
"o_type",
"}",
"return",
"o_found"
] |
Get an object from the scheduler.
The result is a serialized object which is a Json structure containing:
- content: the serialized object content
- __sys_python_module__: the python class of the returned object
The Alignak unserialize function of the alignak.misc.serialization package allows
to restore the initial object.
.. code-block:: python
from alignak.misc.serialization import unserialize
from alignak.objects.hostgroup import Hostgroup
raw_data = req.get("http://127.0.0.1:7768/object/hostgroup/allhosts")
print("Got: %s / %s" % (raw_data.status_code, raw_data.content))
assert raw_data.status_code == 200
object = raw_data.json()
group = unserialize(object, True)
assert group.__class__ == Hostgroup
assert group.get_name() == 'allhosts'
As an example:
{
"__sys_python_module__": "alignak.objects.hostgroup.Hostgroup",
"content": {
"uuid": "32248642-97dd-4f39-aaa2-5120112a765d",
"name": "",
"hostgroup_name": "allhosts",
"use": [],
"tags": [],
"alias": "All Hosts",
"notes": "",
"definition_order": 100,
"register": true,
"unknown_members": [],
"notes_url": "",
"action_url": "",
"imported_from": "unknown",
"conf_is_correct": true,
"configuration_errors": [],
"configuration_warnings": [],
"realm": "",
"downtimes": {},
"hostgroup_members": [],
"members": [
"553d47bc-27aa-426c-a664-49c4c0c4a249",
"f88093ca-e61b-43ff-a41e-613f7ad2cea2",
"df1e2e13-552d-43de-ad2a-fe80ad4ba979",
"d3d667dd-f583-4668-9f44-22ef3dcb53ad"
]
}
}
:param o_type: searched object type
:type o_type: str
:param o_name: searched object name (or uuid)
:type o_name: str
:return: serialized object information
:rtype: str
|
[
"Get",
"an",
"object",
"from",
"the",
"scheduler",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/http/scheduler_interface.py#L48-L113
|
train
|
Alignak-monitoring/alignak
|
alignak/http/scheduler_interface.py
|
SchedulerInterface.monitoring_problems
|
def monitoring_problems(self):
"""Get Alignak scheduler monitoring status
Returns an object with the scheduler livesynthesis
and the known problems
:return: scheduler live synthesis
:rtype: dict
"""
if self.app.type != 'scheduler':
return {'_status': u'ERR',
'_message': u"This service is only available for a scheduler daemon"}
res = self.identity()
res.update(self.app.get_monitoring_problems())
return res
|
python
|
def monitoring_problems(self):
"""Get Alignak scheduler monitoring status
Returns an object with the scheduler livesynthesis
and the known problems
:return: scheduler live synthesis
:rtype: dict
"""
if self.app.type != 'scheduler':
return {'_status': u'ERR',
'_message': u"This service is only available for a scheduler daemon"}
res = self.identity()
res.update(self.app.get_monitoring_problems())
return res
|
[
"def",
"monitoring_problems",
"(",
"self",
")",
":",
"if",
"self",
".",
"app",
".",
"type",
"!=",
"'scheduler'",
":",
"return",
"{",
"'_status'",
":",
"u'ERR'",
",",
"'_message'",
":",
"u\"This service is only available for a scheduler daemon\"",
"}",
"res",
"=",
"self",
".",
"identity",
"(",
")",
"res",
".",
"update",
"(",
"self",
".",
"app",
".",
"get_monitoring_problems",
"(",
")",
")",
"return",
"res"
] |
Get Alignak scheduler monitoring status
Returns an object with the scheduler livesynthesis
and the known problems
:return: scheduler live synthesis
:rtype: dict
|
[
"Get",
"Alignak",
"scheduler",
"monitoring",
"status"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/http/scheduler_interface.py#L329-L344
|
train
|
Alignak-monitoring/alignak
|
alignak/http/scheduler_interface.py
|
SchedulerInterface._wait_new_conf
|
def _wait_new_conf(self):
"""Ask the scheduler to drop its configuration and wait for a new one.
This overrides the default method from GenericInterface
:return: None
"""
# Stop the scheduling loop
self.app.sched.stop_scheduling()
super(SchedulerInterface, self)._wait_new_conf()
|
python
|
def _wait_new_conf(self):
"""Ask the scheduler to drop its configuration and wait for a new one.
This overrides the default method from GenericInterface
:return: None
"""
# Stop the scheduling loop
self.app.sched.stop_scheduling()
super(SchedulerInterface, self)._wait_new_conf()
|
[
"def",
"_wait_new_conf",
"(",
"self",
")",
":",
"# Stop the scheduling loop",
"self",
".",
"app",
".",
"sched",
".",
"stop_scheduling",
"(",
")",
"super",
"(",
"SchedulerInterface",
",",
"self",
")",
".",
"_wait_new_conf",
"(",
")"
] |
Ask the scheduler to drop its configuration and wait for a new one.
This overrides the default method from GenericInterface
:return: None
|
[
"Ask",
"the",
"scheduler",
"to",
"drop",
"its",
"configuration",
"and",
"wait",
"for",
"a",
"new",
"one",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/http/scheduler_interface.py#L357-L366
|
train
|
Alignak-monitoring/alignak
|
alignak/http/scheduler_interface.py
|
SchedulerInterface._initial_broks
|
def _initial_broks(self, broker_name):
"""Get initial_broks from the scheduler
This is used by the brokers to prepare the initial status broks
This do not send broks, it only makes scheduler internal processing. Then the broker
must use the *_broks* API to get all the stuff
:param broker_name: broker name, used to filter broks
:type broker_name: str
:return: None
"""
with self.app.conf_lock:
logger.info("A new broker just connected : %s", broker_name)
return self.app.sched.fill_initial_broks(broker_name)
|
python
|
def _initial_broks(self, broker_name):
"""Get initial_broks from the scheduler
This is used by the brokers to prepare the initial status broks
This do not send broks, it only makes scheduler internal processing. Then the broker
must use the *_broks* API to get all the stuff
:param broker_name: broker name, used to filter broks
:type broker_name: str
:return: None
"""
with self.app.conf_lock:
logger.info("A new broker just connected : %s", broker_name)
return self.app.sched.fill_initial_broks(broker_name)
|
[
"def",
"_initial_broks",
"(",
"self",
",",
"broker_name",
")",
":",
"with",
"self",
".",
"app",
".",
"conf_lock",
":",
"logger",
".",
"info",
"(",
"\"A new broker just connected : %s\"",
",",
"broker_name",
")",
"return",
"self",
".",
"app",
".",
"sched",
".",
"fill_initial_broks",
"(",
"broker_name",
")"
] |
Get initial_broks from the scheduler
This is used by the brokers to prepare the initial status broks
This do not send broks, it only makes scheduler internal processing. Then the broker
must use the *_broks* API to get all the stuff
:param broker_name: broker name, used to filter broks
:type broker_name: str
:return: None
|
[
"Get",
"initial_broks",
"from",
"the",
"scheduler"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/http/scheduler_interface.py#L370-L384
|
train
|
Alignak-monitoring/alignak
|
alignak/http/scheduler_interface.py
|
SchedulerInterface._broks
|
def _broks(self, broker_name):
"""Get the broks from a scheduler, used by brokers
This is used by the brokers to get the broks list of a scheduler
:param broker_name: broker name, used to filter broks
:type broker_name: str
:return: serialized brok list
:rtype: dict
"""
logger.debug("Getting broks for %s from the scheduler", broker_name)
for broker_link in list(self.app.brokers.values()):
if broker_name == broker_link.name:
break
else:
logger.warning("Requesting broks for an unknown broker: %s", broker_name)
return {}
# Now get the broks for this specific broker
with self.app.broks_lock:
res = self.app.get_broks(broker_name)
return serialize(res, True)
|
python
|
def _broks(self, broker_name):
"""Get the broks from a scheduler, used by brokers
This is used by the brokers to get the broks list of a scheduler
:param broker_name: broker name, used to filter broks
:type broker_name: str
:return: serialized brok list
:rtype: dict
"""
logger.debug("Getting broks for %s from the scheduler", broker_name)
for broker_link in list(self.app.brokers.values()):
if broker_name == broker_link.name:
break
else:
logger.warning("Requesting broks for an unknown broker: %s", broker_name)
return {}
# Now get the broks for this specific broker
with self.app.broks_lock:
res = self.app.get_broks(broker_name)
return serialize(res, True)
|
[
"def",
"_broks",
"(",
"self",
",",
"broker_name",
")",
":",
"logger",
".",
"debug",
"(",
"\"Getting broks for %s from the scheduler\"",
",",
"broker_name",
")",
"for",
"broker_link",
"in",
"list",
"(",
"self",
".",
"app",
".",
"brokers",
".",
"values",
"(",
")",
")",
":",
"if",
"broker_name",
"==",
"broker_link",
".",
"name",
":",
"break",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Requesting broks for an unknown broker: %s\"",
",",
"broker_name",
")",
"return",
"{",
"}",
"# Now get the broks for this specific broker",
"with",
"self",
".",
"app",
".",
"broks_lock",
":",
"res",
"=",
"self",
".",
"app",
".",
"get_broks",
"(",
"broker_name",
")",
"return",
"serialize",
"(",
"res",
",",
"True",
")"
] |
Get the broks from a scheduler, used by brokers
This is used by the brokers to get the broks list of a scheduler
:param broker_name: broker name, used to filter broks
:type broker_name: str
:return: serialized brok list
:rtype: dict
|
[
"Get",
"the",
"broks",
"from",
"a",
"scheduler",
"used",
"by",
"brokers"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/http/scheduler_interface.py#L388-L410
|
train
|
Alignak-monitoring/alignak
|
alignak/http/scheduler_interface.py
|
SchedulerInterface._get_objects
|
def _get_objects(self, o_type):
"""Get an object list from the scheduler
Returns None if the required object type (`o_type`) is not known or an exception is raised.
Else returns the objects list
:param o_type: searched object type
:type o_type: str
:return: objects list
:rtype: alignak.objects.item.Items
"""
if o_type not in [t for t in self.app.sched.pushed_conf.types_creations]:
return None
try:
_, _, strclss, _, _ = self.app.sched.pushed_conf.types_creations[o_type]
o_list = getattr(self.app.sched, strclss)
except Exception: # pylint: disable=broad-except
return None
return o_list
|
python
|
def _get_objects(self, o_type):
"""Get an object list from the scheduler
Returns None if the required object type (`o_type`) is not known or an exception is raised.
Else returns the objects list
:param o_type: searched object type
:type o_type: str
:return: objects list
:rtype: alignak.objects.item.Items
"""
if o_type not in [t for t in self.app.sched.pushed_conf.types_creations]:
return None
try:
_, _, strclss, _, _ = self.app.sched.pushed_conf.types_creations[o_type]
o_list = getattr(self.app.sched, strclss)
except Exception: # pylint: disable=broad-except
return None
return o_list
|
[
"def",
"_get_objects",
"(",
"self",
",",
"o_type",
")",
":",
"if",
"o_type",
"not",
"in",
"[",
"t",
"for",
"t",
"in",
"self",
".",
"app",
".",
"sched",
".",
"pushed_conf",
".",
"types_creations",
"]",
":",
"return",
"None",
"try",
":",
"_",
",",
"_",
",",
"strclss",
",",
"_",
",",
"_",
"=",
"self",
".",
"app",
".",
"sched",
".",
"pushed_conf",
".",
"types_creations",
"[",
"o_type",
"]",
"o_list",
"=",
"getattr",
"(",
"self",
".",
"app",
".",
"sched",
",",
"strclss",
")",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"return",
"None",
"return",
"o_list"
] |
Get an object list from the scheduler
Returns None if the required object type (`o_type`) is not known or an exception is raised.
Else returns the objects list
:param o_type: searched object type
:type o_type: str
:return: objects list
:rtype: alignak.objects.item.Items
|
[
"Get",
"an",
"object",
"list",
"from",
"the",
"scheduler"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/http/scheduler_interface.py#L498-L518
|
train
|
Alignak-monitoring/alignak
|
alignak/http/scheduler_interface.py
|
SchedulerInterface._get_object
|
def _get_object(self, o_type, o_name=None):
"""Get an object from the scheduler
Returns None if the required object type (`o_type`) is not known.
Else returns the serialized object if found. The object is searched first with
o_name as its name and then with o_name as its uuid.
:param o_type: searched object type
:type o_type: str
:param name: searched object name
:type name: str
:return: serialized object
:rtype: str
"""
try:
o_found = None
o_list = self._get_objects(o_type)
if o_list:
if o_name is None:
return serialize(o_list, True) if o_list else None
# We expected a name...
o_found = o_list.find_by_name(o_name)
if not o_found:
# ... but perharps we got an object uuid
o_found = o_list[o_name]
except Exception: # pylint: disable=broad-except
return None
return serialize(o_found, True) if o_found else None
|
python
|
def _get_object(self, o_type, o_name=None):
"""Get an object from the scheduler
Returns None if the required object type (`o_type`) is not known.
Else returns the serialized object if found. The object is searched first with
o_name as its name and then with o_name as its uuid.
:param o_type: searched object type
:type o_type: str
:param name: searched object name
:type name: str
:return: serialized object
:rtype: str
"""
try:
o_found = None
o_list = self._get_objects(o_type)
if o_list:
if o_name is None:
return serialize(o_list, True) if o_list else None
# We expected a name...
o_found = o_list.find_by_name(o_name)
if not o_found:
# ... but perharps we got an object uuid
o_found = o_list[o_name]
except Exception: # pylint: disable=broad-except
return None
return serialize(o_found, True) if o_found else None
|
[
"def",
"_get_object",
"(",
"self",
",",
"o_type",
",",
"o_name",
"=",
"None",
")",
":",
"try",
":",
"o_found",
"=",
"None",
"o_list",
"=",
"self",
".",
"_get_objects",
"(",
"o_type",
")",
"if",
"o_list",
":",
"if",
"o_name",
"is",
"None",
":",
"return",
"serialize",
"(",
"o_list",
",",
"True",
")",
"if",
"o_list",
"else",
"None",
"# We expected a name...",
"o_found",
"=",
"o_list",
".",
"find_by_name",
"(",
"o_name",
")",
"if",
"not",
"o_found",
":",
"# ... but perharps we got an object uuid",
"o_found",
"=",
"o_list",
"[",
"o_name",
"]",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"return",
"None",
"return",
"serialize",
"(",
"o_found",
",",
"True",
")",
"if",
"o_found",
"else",
"None"
] |
Get an object from the scheduler
Returns None if the required object type (`o_type`) is not known.
Else returns the serialized object if found. The object is searched first with
o_name as its name and then with o_name as its uuid.
:param o_type: searched object type
:type o_type: str
:param name: searched object name
:type name: str
:return: serialized object
:rtype: str
|
[
"Get",
"an",
"object",
"from",
"the",
"scheduler"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/http/scheduler_interface.py#L520-L547
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/module.py
|
Module.is_a_module
|
def is_a_module(self, module_type):
"""
Is the module of the required type?
:param module_type: module type to check
:type: str
:return: True / False
"""
if hasattr(self, 'type'):
return module_type in self.type
return module_type in self.module_types
|
python
|
def is_a_module(self, module_type):
"""
Is the module of the required type?
:param module_type: module type to check
:type: str
:return: True / False
"""
if hasattr(self, 'type'):
return module_type in self.type
return module_type in self.module_types
|
[
"def",
"is_a_module",
"(",
"self",
",",
"module_type",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'type'",
")",
":",
"return",
"module_type",
"in",
"self",
".",
"type",
"return",
"module_type",
"in",
"self",
".",
"module_types"
] |
Is the module of the required type?
:param module_type: module type to check
:type: str
:return: True / False
|
[
"Is",
"the",
"module",
"of",
"the",
"required",
"type?"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/module.py#L198-L208
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/module.py
|
Module.serialize
|
def serialize(self):
"""A module may have some properties that are not defined in the class properties list.
Serializing a module is the same as serializing an Item but we also also include all the
existing properties that are not defined in the properties or running_properties
class list.
We must also exclude the reference to the daemon that loaded the module!
"""
res = super(Module, self).serialize()
cls = self.__class__
for prop in self.__dict__:
if prop in cls.properties or prop in cls.running_properties or prop in ['properties',
'my_daemon']:
continue
res[prop] = getattr(self, prop)
return res
|
python
|
def serialize(self):
"""A module may have some properties that are not defined in the class properties list.
Serializing a module is the same as serializing an Item but we also also include all the
existing properties that are not defined in the properties or running_properties
class list.
We must also exclude the reference to the daemon that loaded the module!
"""
res = super(Module, self).serialize()
cls = self.__class__
for prop in self.__dict__:
if prop in cls.properties or prop in cls.running_properties or prop in ['properties',
'my_daemon']:
continue
res[prop] = getattr(self, prop)
return res
|
[
"def",
"serialize",
"(",
"self",
")",
":",
"res",
"=",
"super",
"(",
"Module",
",",
"self",
")",
".",
"serialize",
"(",
")",
"cls",
"=",
"self",
".",
"__class__",
"for",
"prop",
"in",
"self",
".",
"__dict__",
":",
"if",
"prop",
"in",
"cls",
".",
"properties",
"or",
"prop",
"in",
"cls",
".",
"running_properties",
"or",
"prop",
"in",
"[",
"'properties'",
",",
"'my_daemon'",
"]",
":",
"continue",
"res",
"[",
"prop",
"]",
"=",
"getattr",
"(",
"self",
",",
"prop",
")",
"return",
"res"
] |
A module may have some properties that are not defined in the class properties list.
Serializing a module is the same as serializing an Item but we also also include all the
existing properties that are not defined in the properties or running_properties
class list.
We must also exclude the reference to the daemon that loaded the module!
|
[
"A",
"module",
"may",
"have",
"some",
"properties",
"that",
"are",
"not",
"defined",
"in",
"the",
"class",
"properties",
"list",
".",
"Serializing",
"a",
"module",
"is",
"the",
"same",
"as",
"serializing",
"an",
"Item",
"but",
"we",
"also",
"also",
"include",
"all",
"the",
"existing",
"properties",
"that",
"are",
"not",
"defined",
"in",
"the",
"properties",
"or",
"running_properties",
"class",
"list",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/module.py#L210-L227
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/module.py
|
Modules.linkify_s_by_plug
|
def linkify_s_by_plug(self):
"""Link a module to some other modules
:return: None
"""
for module in self:
new_modules = []
for related in getattr(module, 'modules', []):
related = related.strip()
if not related:
continue
o_related = self.find_by_name(related)
if o_related is not None:
new_modules.append(o_related.uuid)
else:
self.add_error("the module '%s' for the module '%s' is unknown!"
% (related, module.get_name()))
module.modules = new_modules
|
python
|
def linkify_s_by_plug(self):
"""Link a module to some other modules
:return: None
"""
for module in self:
new_modules = []
for related in getattr(module, 'modules', []):
related = related.strip()
if not related:
continue
o_related = self.find_by_name(related)
if o_related is not None:
new_modules.append(o_related.uuid)
else:
self.add_error("the module '%s' for the module '%s' is unknown!"
% (related, module.get_name()))
module.modules = new_modules
|
[
"def",
"linkify_s_by_plug",
"(",
"self",
")",
":",
"for",
"module",
"in",
"self",
":",
"new_modules",
"=",
"[",
"]",
"for",
"related",
"in",
"getattr",
"(",
"module",
",",
"'modules'",
",",
"[",
"]",
")",
":",
"related",
"=",
"related",
".",
"strip",
"(",
")",
"if",
"not",
"related",
":",
"continue",
"o_related",
"=",
"self",
".",
"find_by_name",
"(",
"related",
")",
"if",
"o_related",
"is",
"not",
"None",
":",
"new_modules",
".",
"append",
"(",
"o_related",
".",
"uuid",
")",
"else",
":",
"self",
".",
"add_error",
"(",
"\"the module '%s' for the module '%s' is unknown!\"",
"%",
"(",
"related",
",",
"module",
".",
"get_name",
"(",
")",
")",
")",
"module",
".",
"modules",
"=",
"new_modules"
] |
Link a module to some other modules
:return: None
|
[
"Link",
"a",
"module",
"to",
"some",
"other",
"modules"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/module.py#L245-L262
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
get_start_of_day
|
def get_start_of_day(year, month, day):
"""Get the timestamp associated to the first second of a specific day
:param year: date year
:type year: int
:param month: date month
:type month: int
:param day: date day
:type day: int
:return: timestamp
:rtype: int
"""
# DST is not known in the provided date
try:
timestamp = time.mktime((year, month, day, 00, 00, 00, 0, 0, -1))
except (OverflowError, ValueError):
# Windows mktime sometimes crashes on (1970, 1, 1, ...)
timestamp = 0.0
return int(timestamp)
|
python
|
def get_start_of_day(year, month, day):
"""Get the timestamp associated to the first second of a specific day
:param year: date year
:type year: int
:param month: date month
:type month: int
:param day: date day
:type day: int
:return: timestamp
:rtype: int
"""
# DST is not known in the provided date
try:
timestamp = time.mktime((year, month, day, 00, 00, 00, 0, 0, -1))
except (OverflowError, ValueError):
# Windows mktime sometimes crashes on (1970, 1, 1, ...)
timestamp = 0.0
return int(timestamp)
|
[
"def",
"get_start_of_day",
"(",
"year",
",",
"month",
",",
"day",
")",
":",
"# DST is not known in the provided date",
"try",
":",
"timestamp",
"=",
"time",
".",
"mktime",
"(",
"(",
"year",
",",
"month",
",",
"day",
",",
"00",
",",
"00",
",",
"00",
",",
"0",
",",
"0",
",",
"-",
"1",
")",
")",
"except",
"(",
"OverflowError",
",",
"ValueError",
")",
":",
"# Windows mktime sometimes crashes on (1970, 1, 1, ...)",
"timestamp",
"=",
"0.0",
"return",
"int",
"(",
"timestamp",
")"
] |
Get the timestamp associated to the first second of a specific day
:param year: date year
:type year: int
:param month: date month
:type month: int
:param day: date day
:type day: int
:return: timestamp
:rtype: int
|
[
"Get",
"the",
"timestamp",
"associated",
"to",
"the",
"first",
"second",
"of",
"a",
"specific",
"day"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L63-L82
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
get_end_of_day
|
def get_end_of_day(year, month, day):
"""Get the timestamp associated to the last second of a specific day
:param year: date year
:type year: int
:param month: date month (int)
:type month: int
:param day: date day
:type day: int
:return: timestamp
:rtype: int
"""
# DST is not known in the provided date
timestamp = time.mktime((year, month, day, 23, 59, 59, 0, 0, -1))
return int(timestamp)
|
python
|
def get_end_of_day(year, month, day):
"""Get the timestamp associated to the last second of a specific day
:param year: date year
:type year: int
:param month: date month (int)
:type month: int
:param day: date day
:type day: int
:return: timestamp
:rtype: int
"""
# DST is not known in the provided date
timestamp = time.mktime((year, month, day, 23, 59, 59, 0, 0, -1))
return int(timestamp)
|
[
"def",
"get_end_of_day",
"(",
"year",
",",
"month",
",",
"day",
")",
":",
"# DST is not known in the provided date",
"timestamp",
"=",
"time",
".",
"mktime",
"(",
"(",
"year",
",",
"month",
",",
"day",
",",
"23",
",",
"59",
",",
"59",
",",
"0",
",",
"0",
",",
"-",
"1",
")",
")",
"return",
"int",
"(",
"timestamp",
")"
] |
Get the timestamp associated to the last second of a specific day
:param year: date year
:type year: int
:param month: date month (int)
:type month: int
:param day: date day
:type day: int
:return: timestamp
:rtype: int
|
[
"Get",
"the",
"timestamp",
"associated",
"to",
"the",
"last",
"second",
"of",
"a",
"specific",
"day"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L85-L99
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
get_sec_from_morning
|
def get_sec_from_morning(timestamp):
"""Get the number of seconds elapsed since the beginning of the
day deducted from the provided timestamp
:param timestamp: time to use for computation
:type timestamp: int
:return: timestamp
:rtype: int
"""
t_lt = time.localtime(timestamp)
return t_lt.tm_hour * 3600 + t_lt.tm_min * 60 + t_lt.tm_sec
|
python
|
def get_sec_from_morning(timestamp):
"""Get the number of seconds elapsed since the beginning of the
day deducted from the provided timestamp
:param timestamp: time to use for computation
:type timestamp: int
:return: timestamp
:rtype: int
"""
t_lt = time.localtime(timestamp)
return t_lt.tm_hour * 3600 + t_lt.tm_min * 60 + t_lt.tm_sec
|
[
"def",
"get_sec_from_morning",
"(",
"timestamp",
")",
":",
"t_lt",
"=",
"time",
".",
"localtime",
"(",
"timestamp",
")",
"return",
"t_lt",
".",
"tm_hour",
"*",
"3600",
"+",
"t_lt",
".",
"tm_min",
"*",
"60",
"+",
"t_lt",
".",
"tm_sec"
] |
Get the number of seconds elapsed since the beginning of the
day deducted from the provided timestamp
:param timestamp: time to use for computation
:type timestamp: int
:return: timestamp
:rtype: int
|
[
"Get",
"the",
"number",
"of",
"seconds",
"elapsed",
"since",
"the",
"beginning",
"of",
"the",
"day",
"deducted",
"from",
"the",
"provided",
"timestamp"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L126-L136
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
find_day_by_weekday_offset
|
def find_day_by_weekday_offset(year, month, weekday, offset):
"""Get the day number based on a date and offset
:param year: date year
:type year: int
:param month: date month
:type month: int
:param weekday: date week day
:type weekday: int
:param offset: offset (-1 is last, 1 is first etc)
:type offset: int
:return: day number in the month
:rtype: int
>>> find_day_by_weekday_offset(2010, 7, 1, -1)
27
"""
# thanks calendar :)
cal = calendar.monthcalendar(year, month)
# If we ask for a -1 day, just reverse cal
if offset < 0:
offset = abs(offset)
cal.reverse()
# ok go for it
nb_found = 0
try:
for i in range(0, offset + 1):
# in cal 0 mean "there are no day here :)"
if cal[i][weekday] != 0:
nb_found += 1
if nb_found == offset:
return cal[i][weekday]
return None
except KeyError:
return None
|
python
|
def find_day_by_weekday_offset(year, month, weekday, offset):
"""Get the day number based on a date and offset
:param year: date year
:type year: int
:param month: date month
:type month: int
:param weekday: date week day
:type weekday: int
:param offset: offset (-1 is last, 1 is first etc)
:type offset: int
:return: day number in the month
:rtype: int
>>> find_day_by_weekday_offset(2010, 7, 1, -1)
27
"""
# thanks calendar :)
cal = calendar.monthcalendar(year, month)
# If we ask for a -1 day, just reverse cal
if offset < 0:
offset = abs(offset)
cal.reverse()
# ok go for it
nb_found = 0
try:
for i in range(0, offset + 1):
# in cal 0 mean "there are no day here :)"
if cal[i][weekday] != 0:
nb_found += 1
if nb_found == offset:
return cal[i][weekday]
return None
except KeyError:
return None
|
[
"def",
"find_day_by_weekday_offset",
"(",
"year",
",",
"month",
",",
"weekday",
",",
"offset",
")",
":",
"# thanks calendar :)",
"cal",
"=",
"calendar",
".",
"monthcalendar",
"(",
"year",
",",
"month",
")",
"# If we ask for a -1 day, just reverse cal",
"if",
"offset",
"<",
"0",
":",
"offset",
"=",
"abs",
"(",
"offset",
")",
"cal",
".",
"reverse",
"(",
")",
"# ok go for it",
"nb_found",
"=",
"0",
"try",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"offset",
"+",
"1",
")",
":",
"# in cal 0 mean \"there are no day here :)\"",
"if",
"cal",
"[",
"i",
"]",
"[",
"weekday",
"]",
"!=",
"0",
":",
"nb_found",
"+=",
"1",
"if",
"nb_found",
"==",
"offset",
":",
"return",
"cal",
"[",
"i",
"]",
"[",
"weekday",
"]",
"return",
"None",
"except",
"KeyError",
":",
"return",
"None"
] |
Get the day number based on a date and offset
:param year: date year
:type year: int
:param month: date month
:type month: int
:param weekday: date week day
:type weekday: int
:param offset: offset (-1 is last, 1 is first etc)
:type offset: int
:return: day number in the month
:rtype: int
>>> find_day_by_weekday_offset(2010, 7, 1, -1)
27
|
[
"Get",
"the",
"day",
"number",
"based",
"on",
"a",
"date",
"and",
"offset"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L139-L175
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
find_day_by_offset
|
def find_day_by_offset(year, month, offset):
"""Get the month day based on date and offset
:param year: date year
:type year: int
:param month: date month
:type month: int
:param offset: offset in day to compute (usually negative)
:type offset: int
:return: day number in the month
:rtype: int
>>> find_day_by_offset(2015, 7, -1)
31
"""
(_, days_in_month) = calendar.monthrange(year, month)
if offset >= 0:
return min(offset, days_in_month)
return max(1, days_in_month + offset + 1)
|
python
|
def find_day_by_offset(year, month, offset):
"""Get the month day based on date and offset
:param year: date year
:type year: int
:param month: date month
:type month: int
:param offset: offset in day to compute (usually negative)
:type offset: int
:return: day number in the month
:rtype: int
>>> find_day_by_offset(2015, 7, -1)
31
"""
(_, days_in_month) = calendar.monthrange(year, month)
if offset >= 0:
return min(offset, days_in_month)
return max(1, days_in_month + offset + 1)
|
[
"def",
"find_day_by_offset",
"(",
"year",
",",
"month",
",",
"offset",
")",
":",
"(",
"_",
",",
"days_in_month",
")",
"=",
"calendar",
".",
"monthrange",
"(",
"year",
",",
"month",
")",
"if",
"offset",
">=",
"0",
":",
"return",
"min",
"(",
"offset",
",",
"days_in_month",
")",
"return",
"max",
"(",
"1",
",",
"days_in_month",
"+",
"offset",
"+",
"1",
")"
] |
Get the month day based on date and offset
:param year: date year
:type year: int
:param month: date month
:type month: int
:param offset: offset in day to compute (usually negative)
:type offset: int
:return: day number in the month
:rtype: int
>>> find_day_by_offset(2015, 7, -1)
31
|
[
"Get",
"the",
"month",
"day",
"based",
"on",
"date",
"and",
"offset"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L178-L197
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
Timerange.is_time_valid
|
def is_time_valid(self, timestamp):
"""Check if time is valid for this Timerange
If sec_from_morning is not provided, get the value.
:param timestamp: time to check
:type timestamp: int
:return: True if time is valid (in interval), False otherwise
:rtype: bool
"""
sec_from_morning = get_sec_from_morning(timestamp)
return (self.is_valid and
self.hstart * 3600 + self.mstart * 60 <=
sec_from_morning <=
self.hend * 3600 + self.mend * 60)
|
python
|
def is_time_valid(self, timestamp):
"""Check if time is valid for this Timerange
If sec_from_morning is not provided, get the value.
:param timestamp: time to check
:type timestamp: int
:return: True if time is valid (in interval), False otherwise
:rtype: bool
"""
sec_from_morning = get_sec_from_morning(timestamp)
return (self.is_valid and
self.hstart * 3600 + self.mstart * 60 <=
sec_from_morning <=
self.hend * 3600 + self.mend * 60)
|
[
"def",
"is_time_valid",
"(",
"self",
",",
"timestamp",
")",
":",
"sec_from_morning",
"=",
"get_sec_from_morning",
"(",
"timestamp",
")",
"return",
"(",
"self",
".",
"is_valid",
"and",
"self",
".",
"hstart",
"*",
"3600",
"+",
"self",
".",
"mstart",
"*",
"60",
"<=",
"sec_from_morning",
"<=",
"self",
".",
"hend",
"*",
"3600",
"+",
"self",
".",
"mend",
"*",
"60",
")"
] |
Check if time is valid for this Timerange
If sec_from_morning is not provided, get the value.
:param timestamp: time to check
:type timestamp: int
:return: True if time is valid (in interval), False otherwise
:rtype: bool
|
[
"Check",
"if",
"time",
"is",
"valid",
"for",
"this",
"Timerange"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L268-L282
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
AbstractDaterange.is_time_valid
|
def is_time_valid(self, timestamp):
"""Check if time is valid for one of the timerange.
:param timestamp: time to check
:type timestamp: int
:return: True if one of the timerange is valid for t, False otherwise
:rtype: bool
"""
if self.is_time_day_valid(timestamp):
for timerange in self.timeranges:
if timerange.is_time_valid(timestamp):
return True
return False
|
python
|
def is_time_valid(self, timestamp):
"""Check if time is valid for one of the timerange.
:param timestamp: time to check
:type timestamp: int
:return: True if one of the timerange is valid for t, False otherwise
:rtype: bool
"""
if self.is_time_day_valid(timestamp):
for timerange in self.timeranges:
if timerange.is_time_valid(timestamp):
return True
return False
|
[
"def",
"is_time_valid",
"(",
"self",
",",
"timestamp",
")",
":",
"if",
"self",
".",
"is_time_day_valid",
"(",
"timestamp",
")",
":",
"for",
"timerange",
"in",
"self",
".",
"timeranges",
":",
"if",
"timerange",
".",
"is_time_valid",
"(",
"timestamp",
")",
":",
"return",
"True",
"return",
"False"
] |
Check if time is valid for one of the timerange.
:param timestamp: time to check
:type timestamp: int
:return: True if one of the timerange is valid for t, False otherwise
:rtype: bool
|
[
"Check",
"if",
"time",
"is",
"valid",
"for",
"one",
"of",
"the",
"timerange",
"."
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L379-L391
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
AbstractDaterange.get_min_sec_from_morning
|
def get_min_sec_from_morning(self):
"""Get the first second from midnight where a timerange is effective
:return: smallest amount of second from midnight of all timerange
:rtype: int
"""
mins = []
for timerange in self.timeranges:
mins.append(timerange.get_sec_from_morning())
return min(mins)
|
python
|
def get_min_sec_from_morning(self):
"""Get the first second from midnight where a timerange is effective
:return: smallest amount of second from midnight of all timerange
:rtype: int
"""
mins = []
for timerange in self.timeranges:
mins.append(timerange.get_sec_from_morning())
return min(mins)
|
[
"def",
"get_min_sec_from_morning",
"(",
"self",
")",
":",
"mins",
"=",
"[",
"]",
"for",
"timerange",
"in",
"self",
".",
"timeranges",
":",
"mins",
".",
"append",
"(",
"timerange",
".",
"get_sec_from_morning",
"(",
")",
")",
"return",
"min",
"(",
"mins",
")"
] |
Get the first second from midnight where a timerange is effective
:return: smallest amount of second from midnight of all timerange
:rtype: int
|
[
"Get",
"the",
"first",
"second",
"from",
"midnight",
"where",
"a",
"timerange",
"is",
"effective"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L393-L402
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
AbstractDaterange.is_time_day_valid
|
def is_time_day_valid(self, timestamp):
"""Check if it is within start time and end time of the DateRange
:param timestamp: time to check
:type timestamp: int
:return: True if t in range, False otherwise
:rtype: bool
"""
(start_time, end_time) = self.get_start_and_end_time(timestamp)
return start_time <= timestamp <= end_time
|
python
|
def is_time_day_valid(self, timestamp):
"""Check if it is within start time and end time of the DateRange
:param timestamp: time to check
:type timestamp: int
:return: True if t in range, False otherwise
:rtype: bool
"""
(start_time, end_time) = self.get_start_and_end_time(timestamp)
return start_time <= timestamp <= end_time
|
[
"def",
"is_time_day_valid",
"(",
"self",
",",
"timestamp",
")",
":",
"(",
"start_time",
",",
"end_time",
")",
"=",
"self",
".",
"get_start_and_end_time",
"(",
"timestamp",
")",
"return",
"start_time",
"<=",
"timestamp",
"<=",
"end_time"
] |
Check if it is within start time and end time of the DateRange
:param timestamp: time to check
:type timestamp: int
:return: True if t in range, False otherwise
:rtype: bool
|
[
"Check",
"if",
"it",
"is",
"within",
"start",
"time",
"and",
"end",
"time",
"of",
"the",
"DateRange"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L428-L437
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
AbstractDaterange.get_next_future_timerange_invalid
|
def get_next_future_timerange_invalid(self, timestamp):
"""Get next invalid time for timeranges
:param timestamp: time to check
:type timestamp: int
:return: next time when a timerange is not valid
:rtype: None | int
"""
sec_from_morning = get_sec_from_morning(timestamp)
ends = []
for timerange in self.timeranges:
tr_end = timerange.hend * 3600 + timerange.mend * 60
if tr_end >= sec_from_morning:
# Remove the last second of the day for 00->24h"
if tr_end == 86400:
tr_end = 86399
ends.append(tr_end)
if ends != []:
return min(ends)
return None
|
python
|
def get_next_future_timerange_invalid(self, timestamp):
"""Get next invalid time for timeranges
:param timestamp: time to check
:type timestamp: int
:return: next time when a timerange is not valid
:rtype: None | int
"""
sec_from_morning = get_sec_from_morning(timestamp)
ends = []
for timerange in self.timeranges:
tr_end = timerange.hend * 3600 + timerange.mend * 60
if tr_end >= sec_from_morning:
# Remove the last second of the day for 00->24h"
if tr_end == 86400:
tr_end = 86399
ends.append(tr_end)
if ends != []:
return min(ends)
return None
|
[
"def",
"get_next_future_timerange_invalid",
"(",
"self",
",",
"timestamp",
")",
":",
"sec_from_morning",
"=",
"get_sec_from_morning",
"(",
"timestamp",
")",
"ends",
"=",
"[",
"]",
"for",
"timerange",
"in",
"self",
".",
"timeranges",
":",
"tr_end",
"=",
"timerange",
".",
"hend",
"*",
"3600",
"+",
"timerange",
".",
"mend",
"*",
"60",
"if",
"tr_end",
">=",
"sec_from_morning",
":",
"# Remove the last second of the day for 00->24h\"",
"if",
"tr_end",
"==",
"86400",
":",
"tr_end",
"=",
"86399",
"ends",
".",
"append",
"(",
"tr_end",
")",
"if",
"ends",
"!=",
"[",
"]",
":",
"return",
"min",
"(",
"ends",
")",
"return",
"None"
] |
Get next invalid time for timeranges
:param timestamp: time to check
:type timestamp: int
:return: next time when a timerange is not valid
:rtype: None | int
|
[
"Get",
"next",
"invalid",
"time",
"for",
"timeranges"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L468-L488
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
AbstractDaterange.get_next_valid_day
|
def get_next_valid_day(self, timestamp):
"""Get next valid day for timerange
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next valid day (midnight) in LOCAL time.
:rtype: int | None
"""
if self.get_next_future_timerange_valid(timestamp) is None:
# this day is finish, we check for next period
(start_time, _) = self.get_start_and_end_time(get_day(timestamp) + 86400)
else:
(start_time, _) = self.get_start_and_end_time(timestamp)
if timestamp <= start_time:
return get_day(start_time)
if self.is_time_day_valid(timestamp):
return get_day(timestamp)
return None
|
python
|
def get_next_valid_day(self, timestamp):
"""Get next valid day for timerange
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next valid day (midnight) in LOCAL time.
:rtype: int | None
"""
if self.get_next_future_timerange_valid(timestamp) is None:
# this day is finish, we check for next period
(start_time, _) = self.get_start_and_end_time(get_day(timestamp) + 86400)
else:
(start_time, _) = self.get_start_and_end_time(timestamp)
if timestamp <= start_time:
return get_day(start_time)
if self.is_time_day_valid(timestamp):
return get_day(timestamp)
return None
|
[
"def",
"get_next_valid_day",
"(",
"self",
",",
"timestamp",
")",
":",
"if",
"self",
".",
"get_next_future_timerange_valid",
"(",
"timestamp",
")",
"is",
"None",
":",
"# this day is finish, we check for next period",
"(",
"start_time",
",",
"_",
")",
"=",
"self",
".",
"get_start_and_end_time",
"(",
"get_day",
"(",
"timestamp",
")",
"+",
"86400",
")",
"else",
":",
"(",
"start_time",
",",
"_",
")",
"=",
"self",
".",
"get_start_and_end_time",
"(",
"timestamp",
")",
"if",
"timestamp",
"<=",
"start_time",
":",
"return",
"get_day",
"(",
"start_time",
")",
"if",
"self",
".",
"is_time_day_valid",
"(",
"timestamp",
")",
":",
"return",
"get_day",
"(",
"timestamp",
")",
"return",
"None"
] |
Get next valid day for timerange
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next valid day (midnight) in LOCAL time.
:rtype: int | None
|
[
"Get",
"next",
"valid",
"day",
"for",
"timerange"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L490-L510
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
AbstractDaterange.get_next_valid_time_from_t
|
def get_next_valid_time_from_t(self, timestamp):
"""Get next valid time for time range
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next valid time (LOCAL TIME)
:rtype: int | None
"""
if self.is_time_valid(timestamp):
return timestamp
# First we search for the day of t
t_day = self.get_next_valid_day(timestamp)
if t_day is None:
return t_day
# We search for the min of all tr.start > sec_from_morning
# if it's the next day, use a start of the day search for timerange
if timestamp < t_day:
sec_from_morning = self.get_next_future_timerange_valid(t_day)
else: # it is in this day, so look from t (can be in the evening or so)
sec_from_morning = self.get_next_future_timerange_valid(timestamp)
if sec_from_morning is not None:
if t_day is not None and sec_from_morning is not None:
return t_day + sec_from_morning
# Then we search for the next day of t
# The sec will be the min of the day
timestamp = get_day(timestamp) + 86400
t_day2 = self.get_next_valid_day(timestamp)
sec_from_morning = self.get_next_future_timerange_valid(t_day2)
if t_day2 is not None and sec_from_morning is not None:
return t_day2 + sec_from_morning
# I did not found any valid time
return None
|
python
|
def get_next_valid_time_from_t(self, timestamp):
"""Get next valid time for time range
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next valid time (LOCAL TIME)
:rtype: int | None
"""
if self.is_time_valid(timestamp):
return timestamp
# First we search for the day of t
t_day = self.get_next_valid_day(timestamp)
if t_day is None:
return t_day
# We search for the min of all tr.start > sec_from_morning
# if it's the next day, use a start of the day search for timerange
if timestamp < t_day:
sec_from_morning = self.get_next_future_timerange_valid(t_day)
else: # it is in this day, so look from t (can be in the evening or so)
sec_from_morning = self.get_next_future_timerange_valid(timestamp)
if sec_from_morning is not None:
if t_day is not None and sec_from_morning is not None:
return t_day + sec_from_morning
# Then we search for the next day of t
# The sec will be the min of the day
timestamp = get_day(timestamp) + 86400
t_day2 = self.get_next_valid_day(timestamp)
sec_from_morning = self.get_next_future_timerange_valid(t_day2)
if t_day2 is not None and sec_from_morning is not None:
return t_day2 + sec_from_morning
# I did not found any valid time
return None
|
[
"def",
"get_next_valid_time_from_t",
"(",
"self",
",",
"timestamp",
")",
":",
"if",
"self",
".",
"is_time_valid",
"(",
"timestamp",
")",
":",
"return",
"timestamp",
"# First we search for the day of t",
"t_day",
"=",
"self",
".",
"get_next_valid_day",
"(",
"timestamp",
")",
"if",
"t_day",
"is",
"None",
":",
"return",
"t_day",
"# We search for the min of all tr.start > sec_from_morning",
"# if it's the next day, use a start of the day search for timerange",
"if",
"timestamp",
"<",
"t_day",
":",
"sec_from_morning",
"=",
"self",
".",
"get_next_future_timerange_valid",
"(",
"t_day",
")",
"else",
":",
"# it is in this day, so look from t (can be in the evening or so)",
"sec_from_morning",
"=",
"self",
".",
"get_next_future_timerange_valid",
"(",
"timestamp",
")",
"if",
"sec_from_morning",
"is",
"not",
"None",
":",
"if",
"t_day",
"is",
"not",
"None",
"and",
"sec_from_morning",
"is",
"not",
"None",
":",
"return",
"t_day",
"+",
"sec_from_morning",
"# Then we search for the next day of t",
"# The sec will be the min of the day",
"timestamp",
"=",
"get_day",
"(",
"timestamp",
")",
"+",
"86400",
"t_day2",
"=",
"self",
".",
"get_next_valid_day",
"(",
"timestamp",
")",
"sec_from_morning",
"=",
"self",
".",
"get_next_future_timerange_valid",
"(",
"t_day2",
")",
"if",
"t_day2",
"is",
"not",
"None",
"and",
"sec_from_morning",
"is",
"not",
"None",
":",
"return",
"t_day2",
"+",
"sec_from_morning",
"# I did not found any valid time",
"return",
"None"
] |
Get next valid time for time range
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next valid time (LOCAL TIME)
:rtype: int | None
|
[
"Get",
"next",
"valid",
"time",
"for",
"time",
"range"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L512-L548
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
AbstractDaterange.get_next_invalid_day
|
def get_next_invalid_day(self, timestamp):
# pylint: disable=no-else-return
"""Get next day where timerange is not active
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next invalid day (midnight) in LOCAL time.
:rtype: int | None
"""
if self.is_time_day_invalid(timestamp):
return timestamp
next_future_timerange_invalid = self.get_next_future_timerange_invalid(timestamp)
# If today there is no more unavailable timerange, search the next day
if next_future_timerange_invalid is None:
# this day is finish, we check for next period
(start_time, end_time) = self.get_start_and_end_time(get_day(timestamp))
else:
(start_time, end_time) = self.get_start_and_end_time(timestamp)
# (start_time, end_time) = self.get_start_and_end_time(t)
# The next invalid day can be t day if there a possible
# invalid time range (timerange is not 00->24
if next_future_timerange_invalid is not None:
if start_time <= timestamp <= end_time:
return get_day(timestamp)
if start_time >= timestamp:
return get_day(start_time)
else:
# Else, there is no possibility than in our start_time<->end_time we got
# any invalid time (full period out). So it's end_time+1 sec (tomorrow of end_time)
return get_day(end_time + 1)
return None
|
python
|
def get_next_invalid_day(self, timestamp):
# pylint: disable=no-else-return
"""Get next day where timerange is not active
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next invalid day (midnight) in LOCAL time.
:rtype: int | None
"""
if self.is_time_day_invalid(timestamp):
return timestamp
next_future_timerange_invalid = self.get_next_future_timerange_invalid(timestamp)
# If today there is no more unavailable timerange, search the next day
if next_future_timerange_invalid is None:
# this day is finish, we check for next period
(start_time, end_time) = self.get_start_and_end_time(get_day(timestamp))
else:
(start_time, end_time) = self.get_start_and_end_time(timestamp)
# (start_time, end_time) = self.get_start_and_end_time(t)
# The next invalid day can be t day if there a possible
# invalid time range (timerange is not 00->24
if next_future_timerange_invalid is not None:
if start_time <= timestamp <= end_time:
return get_day(timestamp)
if start_time >= timestamp:
return get_day(start_time)
else:
# Else, there is no possibility than in our start_time<->end_time we got
# any invalid time (full period out). So it's end_time+1 sec (tomorrow of end_time)
return get_day(end_time + 1)
return None
|
[
"def",
"get_next_invalid_day",
"(",
"self",
",",
"timestamp",
")",
":",
"# pylint: disable=no-else-return",
"if",
"self",
".",
"is_time_day_invalid",
"(",
"timestamp",
")",
":",
"return",
"timestamp",
"next_future_timerange_invalid",
"=",
"self",
".",
"get_next_future_timerange_invalid",
"(",
"timestamp",
")",
"# If today there is no more unavailable timerange, search the next day",
"if",
"next_future_timerange_invalid",
"is",
"None",
":",
"# this day is finish, we check for next period",
"(",
"start_time",
",",
"end_time",
")",
"=",
"self",
".",
"get_start_and_end_time",
"(",
"get_day",
"(",
"timestamp",
")",
")",
"else",
":",
"(",
"start_time",
",",
"end_time",
")",
"=",
"self",
".",
"get_start_and_end_time",
"(",
"timestamp",
")",
"# (start_time, end_time) = self.get_start_and_end_time(t)",
"# The next invalid day can be t day if there a possible",
"# invalid time range (timerange is not 00->24",
"if",
"next_future_timerange_invalid",
"is",
"not",
"None",
":",
"if",
"start_time",
"<=",
"timestamp",
"<=",
"end_time",
":",
"return",
"get_day",
"(",
"timestamp",
")",
"if",
"start_time",
">=",
"timestamp",
":",
"return",
"get_day",
"(",
"start_time",
")",
"else",
":",
"# Else, there is no possibility than in our start_time<->end_time we got",
"# any invalid time (full period out). So it's end_time+1 sec (tomorrow of end_time)",
"return",
"get_day",
"(",
"end_time",
"+",
"1",
")",
"return",
"None"
] |
Get next day where timerange is not active
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next invalid day (midnight) in LOCAL time.
:rtype: int | None
|
[
"Get",
"next",
"day",
"where",
"timerange",
"is",
"not",
"active"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L550-L584
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
AbstractDaterange.get_next_invalid_time_from_t
|
def get_next_invalid_time_from_t(self, timestamp):
"""Get next invalid time for time range
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next invalid time (LOCAL TIME)
:rtype: int
"""
if not self.is_time_valid(timestamp):
return timestamp
# First we search for the day of time range
t_day = self.get_next_invalid_day(timestamp)
# We search for the min of all tr.start > sec_from_morning
# if it's the next day, use a start of the day search for timerange
if timestamp < t_day:
sec_from_morning = self.get_next_future_timerange_invalid(t_day)
else: # it is in this day, so look from t (can be in the evening or so)
sec_from_morning = self.get_next_future_timerange_invalid(timestamp)
# tr can't be valid, or it will be return at the beginning
# sec_from_morning = self.get_next_future_timerange_invalid(t)
# Ok we've got a next invalid day and a invalid possibility in
# timerange, so the next invalid is this day+sec_from_morning
if t_day is not None and sec_from_morning is not None:
return t_day + sec_from_morning + 1
# We've got a day but no sec_from_morning: the timerange is full (0->24h)
# so the next invalid is this day at the day_start
if t_day is not None and sec_from_morning is None:
return t_day
# Then we search for the next day of t
# The sec will be the min of the day
timestamp = get_day(timestamp) + 86400
t_day2 = self.get_next_invalid_day(timestamp)
sec_from_morning = self.get_next_future_timerange_invalid(t_day2)
if t_day2 is not None and sec_from_morning is not None:
return t_day2 + sec_from_morning + 1
if t_day2 is not None and sec_from_morning is None:
return t_day2
# I did not found any valid time
return None
|
python
|
def get_next_invalid_time_from_t(self, timestamp):
"""Get next invalid time for time range
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next invalid time (LOCAL TIME)
:rtype: int
"""
if not self.is_time_valid(timestamp):
return timestamp
# First we search for the day of time range
t_day = self.get_next_invalid_day(timestamp)
# We search for the min of all tr.start > sec_from_morning
# if it's the next day, use a start of the day search for timerange
if timestamp < t_day:
sec_from_morning = self.get_next_future_timerange_invalid(t_day)
else: # it is in this day, so look from t (can be in the evening or so)
sec_from_morning = self.get_next_future_timerange_invalid(timestamp)
# tr can't be valid, or it will be return at the beginning
# sec_from_morning = self.get_next_future_timerange_invalid(t)
# Ok we've got a next invalid day and a invalid possibility in
# timerange, so the next invalid is this day+sec_from_morning
if t_day is not None and sec_from_morning is not None:
return t_day + sec_from_morning + 1
# We've got a day but no sec_from_morning: the timerange is full (0->24h)
# so the next invalid is this day at the day_start
if t_day is not None and sec_from_morning is None:
return t_day
# Then we search for the next day of t
# The sec will be the min of the day
timestamp = get_day(timestamp) + 86400
t_day2 = self.get_next_invalid_day(timestamp)
sec_from_morning = self.get_next_future_timerange_invalid(t_day2)
if t_day2 is not None and sec_from_morning is not None:
return t_day2 + sec_from_morning + 1
if t_day2 is not None and sec_from_morning is None:
return t_day2
# I did not found any valid time
return None
|
[
"def",
"get_next_invalid_time_from_t",
"(",
"self",
",",
"timestamp",
")",
":",
"if",
"not",
"self",
".",
"is_time_valid",
"(",
"timestamp",
")",
":",
"return",
"timestamp",
"# First we search for the day of time range",
"t_day",
"=",
"self",
".",
"get_next_invalid_day",
"(",
"timestamp",
")",
"# We search for the min of all tr.start > sec_from_morning",
"# if it's the next day, use a start of the day search for timerange",
"if",
"timestamp",
"<",
"t_day",
":",
"sec_from_morning",
"=",
"self",
".",
"get_next_future_timerange_invalid",
"(",
"t_day",
")",
"else",
":",
"# it is in this day, so look from t (can be in the evening or so)",
"sec_from_morning",
"=",
"self",
".",
"get_next_future_timerange_invalid",
"(",
"timestamp",
")",
"# tr can't be valid, or it will be return at the beginning",
"# sec_from_morning = self.get_next_future_timerange_invalid(t)",
"# Ok we've got a next invalid day and a invalid possibility in",
"# timerange, so the next invalid is this day+sec_from_morning",
"if",
"t_day",
"is",
"not",
"None",
"and",
"sec_from_morning",
"is",
"not",
"None",
":",
"return",
"t_day",
"+",
"sec_from_morning",
"+",
"1",
"# We've got a day but no sec_from_morning: the timerange is full (0->24h)",
"# so the next invalid is this day at the day_start",
"if",
"t_day",
"is",
"not",
"None",
"and",
"sec_from_morning",
"is",
"None",
":",
"return",
"t_day",
"# Then we search for the next day of t",
"# The sec will be the min of the day",
"timestamp",
"=",
"get_day",
"(",
"timestamp",
")",
"+",
"86400",
"t_day2",
"=",
"self",
".",
"get_next_invalid_day",
"(",
"timestamp",
")",
"sec_from_morning",
"=",
"self",
".",
"get_next_future_timerange_invalid",
"(",
"t_day2",
")",
"if",
"t_day2",
"is",
"not",
"None",
"and",
"sec_from_morning",
"is",
"not",
"None",
":",
"return",
"t_day2",
"+",
"sec_from_morning",
"+",
"1",
"if",
"t_day2",
"is",
"not",
"None",
"and",
"sec_from_morning",
"is",
"None",
":",
"return",
"t_day2",
"# I did not found any valid time",
"return",
"None"
] |
Get next invalid time for time range
:param timestamp: time we compute from
:type timestamp: int
:return: timestamp of the next invalid time (LOCAL TIME)
:rtype: int
|
[
"Get",
"next",
"invalid",
"time",
"for",
"time",
"range"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L586-L632
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
CalendarDaterange.get_start_and_end_time
|
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for CalendarDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
return (get_start_of_day(self.syear, int(self.smon), self.smday),
get_end_of_day(self.eyear, int(self.emon), self.emday))
|
python
|
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for CalendarDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
return (get_start_of_day(self.syear, int(self.smon), self.smday),
get_end_of_day(self.eyear, int(self.emon), self.emday))
|
[
"def",
"get_start_and_end_time",
"(",
"self",
",",
"ref",
"=",
"None",
")",
":",
"return",
"(",
"get_start_of_day",
"(",
"self",
".",
"syear",
",",
"int",
"(",
"self",
".",
"smon",
")",
",",
"self",
".",
"smday",
")",
",",
"get_end_of_day",
"(",
"self",
".",
"eyear",
",",
"int",
"(",
"self",
".",
"emon",
")",
",",
"self",
".",
"emday",
")",
")"
] |
Specific function to get start time and end time for CalendarDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
|
[
"Specific",
"function",
"to",
"get",
"start",
"time",
"and",
"end",
"time",
"for",
"CalendarDaterange"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L728-L737
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
StandardDaterange.get_start_and_end_time
|
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for StandardDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
self.syear = now.tm_year
self.month = now.tm_mon
self.wday = now.tm_wday
day_id = Daterange.get_weekday_id(self.day)
today_morning = get_start_of_day(now.tm_year, now.tm_mon, now.tm_mday)
tonight = get_end_of_day(now.tm_year, now.tm_mon, now.tm_mday)
day_diff = (day_id - now.tm_wday) % 7
morning = datetime.fromtimestamp(today_morning) + timedelta(days=day_diff)
night = datetime.fromtimestamp(tonight) + timedelta(days=day_diff)
return (int(morning.strftime("%s")), int(night.strftime("%s")))
|
python
|
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for StandardDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
self.syear = now.tm_year
self.month = now.tm_mon
self.wday = now.tm_wday
day_id = Daterange.get_weekday_id(self.day)
today_morning = get_start_of_day(now.tm_year, now.tm_mon, now.tm_mday)
tonight = get_end_of_day(now.tm_year, now.tm_mon, now.tm_mday)
day_diff = (day_id - now.tm_wday) % 7
morning = datetime.fromtimestamp(today_morning) + timedelta(days=day_diff)
night = datetime.fromtimestamp(tonight) + timedelta(days=day_diff)
return (int(morning.strftime("%s")), int(night.strftime("%s")))
|
[
"def",
"get_start_and_end_time",
"(",
"self",
",",
"ref",
"=",
"None",
")",
":",
"now",
"=",
"time",
".",
"localtime",
"(",
"ref",
")",
"self",
".",
"syear",
"=",
"now",
".",
"tm_year",
"self",
".",
"month",
"=",
"now",
".",
"tm_mon",
"self",
".",
"wday",
"=",
"now",
".",
"tm_wday",
"day_id",
"=",
"Daterange",
".",
"get_weekday_id",
"(",
"self",
".",
"day",
")",
"today_morning",
"=",
"get_start_of_day",
"(",
"now",
".",
"tm_year",
",",
"now",
".",
"tm_mon",
",",
"now",
".",
"tm_mday",
")",
"tonight",
"=",
"get_end_of_day",
"(",
"now",
".",
"tm_year",
",",
"now",
".",
"tm_mon",
",",
"now",
".",
"tm_mday",
")",
"day_diff",
"=",
"(",
"day_id",
"-",
"now",
".",
"tm_wday",
")",
"%",
"7",
"morning",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"today_morning",
")",
"+",
"timedelta",
"(",
"days",
"=",
"day_diff",
")",
"night",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"tonight",
")",
"+",
"timedelta",
"(",
"days",
"=",
"day_diff",
")",
"return",
"(",
"int",
"(",
"morning",
".",
"strftime",
"(",
"\"%s\"",
")",
")",
",",
"int",
"(",
"night",
".",
"strftime",
"(",
"\"%s\"",
")",
")",
")"
] |
Specific function to get start time and end time for StandardDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
|
[
"Specific",
"function",
"to",
"get",
"start",
"time",
"and",
"end",
"time",
"for",
"StandardDaterange"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L794-L812
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
MonthWeekDayDaterange.get_start_and_end_time
|
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for MonthWeekDayDaterange
:param ref: time in seconds
:type ref: int | None
:return: tuple with start and end time
:rtype: tuple
"""
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
day_start = find_day_by_weekday_offset(self.syear, self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, self.smon, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
day_end = find_day_by_weekday_offset(self.eyear, self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, self.emon, day_end)
now_epoch = time.mktime(now)
if start_time > end_time: # the period is between years
if now_epoch > end_time: # check for next year
day_end = find_day_by_weekday_offset(self.eyear + 1,
self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
else:
# it s just that the start was the last year
day_start = find_day_by_weekday_offset(self.syear - 1,
self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear - 1, self.smon, day_start)
else:
if now_epoch > end_time:
# just have to check for next year if necessary
day_start = find_day_by_weekday_offset(self.syear + 1,
self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear + 1, self.smon, day_start)
day_end = find_day_by_weekday_offset(self.eyear + 1,
self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
return (start_time, end_time)
|
python
|
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for MonthWeekDayDaterange
:param ref: time in seconds
:type ref: int | None
:return: tuple with start and end time
:rtype: tuple
"""
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
day_start = find_day_by_weekday_offset(self.syear, self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, self.smon, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
day_end = find_day_by_weekday_offset(self.eyear, self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, self.emon, day_end)
now_epoch = time.mktime(now)
if start_time > end_time: # the period is between years
if now_epoch > end_time: # check for next year
day_end = find_day_by_weekday_offset(self.eyear + 1,
self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
else:
# it s just that the start was the last year
day_start = find_day_by_weekday_offset(self.syear - 1,
self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear - 1, self.smon, day_start)
else:
if now_epoch > end_time:
# just have to check for next year if necessary
day_start = find_day_by_weekday_offset(self.syear + 1,
self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear + 1, self.smon, day_start)
day_end = find_day_by_weekday_offset(self.eyear + 1,
self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
return (start_time, end_time)
|
[
"def",
"get_start_and_end_time",
"(",
"self",
",",
"ref",
"=",
"None",
")",
":",
"now",
"=",
"time",
".",
"localtime",
"(",
"ref",
")",
"if",
"self",
".",
"syear",
"==",
"0",
":",
"self",
".",
"syear",
"=",
"now",
".",
"tm_year",
"day_start",
"=",
"find_day_by_weekday_offset",
"(",
"self",
".",
"syear",
",",
"self",
".",
"smon",
",",
"self",
".",
"swday",
",",
"self",
".",
"swday_offset",
")",
"start_time",
"=",
"get_start_of_day",
"(",
"self",
".",
"syear",
",",
"self",
".",
"smon",
",",
"day_start",
")",
"if",
"self",
".",
"eyear",
"==",
"0",
":",
"self",
".",
"eyear",
"=",
"now",
".",
"tm_year",
"day_end",
"=",
"find_day_by_weekday_offset",
"(",
"self",
".",
"eyear",
",",
"self",
".",
"emon",
",",
"self",
".",
"ewday",
",",
"self",
".",
"ewday_offset",
")",
"end_time",
"=",
"get_end_of_day",
"(",
"self",
".",
"eyear",
",",
"self",
".",
"emon",
",",
"day_end",
")",
"now_epoch",
"=",
"time",
".",
"mktime",
"(",
"now",
")",
"if",
"start_time",
">",
"end_time",
":",
"# the period is between years",
"if",
"now_epoch",
">",
"end_time",
":",
"# check for next year",
"day_end",
"=",
"find_day_by_weekday_offset",
"(",
"self",
".",
"eyear",
"+",
"1",
",",
"self",
".",
"emon",
",",
"self",
".",
"ewday",
",",
"self",
".",
"ewday_offset",
")",
"end_time",
"=",
"get_end_of_day",
"(",
"self",
".",
"eyear",
"+",
"1",
",",
"self",
".",
"emon",
",",
"day_end",
")",
"else",
":",
"# it s just that the start was the last year",
"day_start",
"=",
"find_day_by_weekday_offset",
"(",
"self",
".",
"syear",
"-",
"1",
",",
"self",
".",
"smon",
",",
"self",
".",
"swday",
",",
"self",
".",
"swday_offset",
")",
"start_time",
"=",
"get_start_of_day",
"(",
"self",
".",
"syear",
"-",
"1",
",",
"self",
".",
"smon",
",",
"day_start",
")",
"else",
":",
"if",
"now_epoch",
">",
"end_time",
":",
"# just have to check for next year if necessary",
"day_start",
"=",
"find_day_by_weekday_offset",
"(",
"self",
".",
"syear",
"+",
"1",
",",
"self",
".",
"smon",
",",
"self",
".",
"swday",
",",
"self",
".",
"swday_offset",
")",
"start_time",
"=",
"get_start_of_day",
"(",
"self",
".",
"syear",
"+",
"1",
",",
"self",
".",
"smon",
",",
"day_start",
")",
"day_end",
"=",
"find_day_by_weekday_offset",
"(",
"self",
".",
"eyear",
"+",
"1",
",",
"self",
".",
"emon",
",",
"self",
".",
"ewday",
",",
"self",
".",
"ewday_offset",
")",
"end_time",
"=",
"get_end_of_day",
"(",
"self",
".",
"eyear",
"+",
"1",
",",
"self",
".",
"emon",
",",
"day_end",
")",
"return",
"(",
"start_time",
",",
"end_time",
")"
] |
Specific function to get start time and end time for MonthWeekDayDaterange
:param ref: time in seconds
:type ref: int | None
:return: tuple with start and end time
:rtype: tuple
|
[
"Specific",
"function",
"to",
"get",
"start",
"time",
"and",
"end",
"time",
"for",
"MonthWeekDayDaterange"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L837-L878
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
MonthDateDaterange.get_start_and_end_time
|
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for MonthDateDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
day_start = find_day_by_offset(self.syear, self.smon, self.smday)
start_time = get_start_of_day(self.syear, self.smon, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
day_end = find_day_by_offset(self.eyear, self.emon, self.emday)
end_time = get_end_of_day(self.eyear, self.emon, day_end)
now_epoch = time.mktime(now)
if start_time > end_time: # the period is between years
if now_epoch > end_time:
# check for next year
day_end = find_day_by_offset(self.eyear + 1, self.emon, self.emday)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
else:
# it s just that start was the last year
day_start = find_day_by_offset(self.syear - 1, self.smon, self.emday)
start_time = get_start_of_day(self.syear - 1, self.smon, day_start)
else:
if now_epoch > end_time:
# just have to check for next year if necessary
day_start = find_day_by_offset(self.syear + 1, self.smon, self.smday)
start_time = get_start_of_day(self.syear + 1, self.smon, day_start)
day_end = find_day_by_offset(self.eyear + 1, self.emon, self.emday)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
return (start_time, end_time)
|
python
|
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for MonthDateDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
day_start = find_day_by_offset(self.syear, self.smon, self.smday)
start_time = get_start_of_day(self.syear, self.smon, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
day_end = find_day_by_offset(self.eyear, self.emon, self.emday)
end_time = get_end_of_day(self.eyear, self.emon, day_end)
now_epoch = time.mktime(now)
if start_time > end_time: # the period is between years
if now_epoch > end_time:
# check for next year
day_end = find_day_by_offset(self.eyear + 1, self.emon, self.emday)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
else:
# it s just that start was the last year
day_start = find_day_by_offset(self.syear - 1, self.smon, self.emday)
start_time = get_start_of_day(self.syear - 1, self.smon, day_start)
else:
if now_epoch > end_time:
# just have to check for next year if necessary
day_start = find_day_by_offset(self.syear + 1, self.smon, self.smday)
start_time = get_start_of_day(self.syear + 1, self.smon, day_start)
day_end = find_day_by_offset(self.eyear + 1, self.emon, self.emday)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
return (start_time, end_time)
|
[
"def",
"get_start_and_end_time",
"(",
"self",
",",
"ref",
"=",
"None",
")",
":",
"now",
"=",
"time",
".",
"localtime",
"(",
"ref",
")",
"if",
"self",
".",
"syear",
"==",
"0",
":",
"self",
".",
"syear",
"=",
"now",
".",
"tm_year",
"day_start",
"=",
"find_day_by_offset",
"(",
"self",
".",
"syear",
",",
"self",
".",
"smon",
",",
"self",
".",
"smday",
")",
"start_time",
"=",
"get_start_of_day",
"(",
"self",
".",
"syear",
",",
"self",
".",
"smon",
",",
"day_start",
")",
"if",
"self",
".",
"eyear",
"==",
"0",
":",
"self",
".",
"eyear",
"=",
"now",
".",
"tm_year",
"day_end",
"=",
"find_day_by_offset",
"(",
"self",
".",
"eyear",
",",
"self",
".",
"emon",
",",
"self",
".",
"emday",
")",
"end_time",
"=",
"get_end_of_day",
"(",
"self",
".",
"eyear",
",",
"self",
".",
"emon",
",",
"day_end",
")",
"now_epoch",
"=",
"time",
".",
"mktime",
"(",
"now",
")",
"if",
"start_time",
">",
"end_time",
":",
"# the period is between years",
"if",
"now_epoch",
">",
"end_time",
":",
"# check for next year",
"day_end",
"=",
"find_day_by_offset",
"(",
"self",
".",
"eyear",
"+",
"1",
",",
"self",
".",
"emon",
",",
"self",
".",
"emday",
")",
"end_time",
"=",
"get_end_of_day",
"(",
"self",
".",
"eyear",
"+",
"1",
",",
"self",
".",
"emon",
",",
"day_end",
")",
"else",
":",
"# it s just that start was the last year",
"day_start",
"=",
"find_day_by_offset",
"(",
"self",
".",
"syear",
"-",
"1",
",",
"self",
".",
"smon",
",",
"self",
".",
"emday",
")",
"start_time",
"=",
"get_start_of_day",
"(",
"self",
".",
"syear",
"-",
"1",
",",
"self",
".",
"smon",
",",
"day_start",
")",
"else",
":",
"if",
"now_epoch",
">",
"end_time",
":",
"# just have to check for next year if necessary",
"day_start",
"=",
"find_day_by_offset",
"(",
"self",
".",
"syear",
"+",
"1",
",",
"self",
".",
"smon",
",",
"self",
".",
"smday",
")",
"start_time",
"=",
"get_start_of_day",
"(",
"self",
".",
"syear",
"+",
"1",
",",
"self",
".",
"smon",
",",
"day_start",
")",
"day_end",
"=",
"find_day_by_offset",
"(",
"self",
".",
"eyear",
"+",
"1",
",",
"self",
".",
"emon",
",",
"self",
".",
"emday",
")",
"end_time",
"=",
"get_end_of_day",
"(",
"self",
".",
"eyear",
"+",
"1",
",",
"self",
".",
"emon",
",",
"day_end",
")",
"return",
"(",
"start_time",
",",
"end_time",
")"
] |
Specific function to get start time and end time for MonthDateDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
|
[
"Specific",
"function",
"to",
"get",
"start",
"time",
"and",
"end",
"time",
"for",
"MonthDateDaterange"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L885-L922
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
WeekDayDaterange.get_start_and_end_time
|
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for WeekDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
# If no year, it's our year
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
day_start = find_day_by_weekday_offset(self.syear,
month_start_id, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# Same for end year
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
day_end = find_day_by_weekday_offset(self.eyear, month_end_id, self.ewday,
self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
# Maybe end_time is before start. So look for the
# next month
if start_time > end_time:
month_end_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
day_end = find_day_by_weekday_offset(self.eyear,
month_end_id, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
# But maybe we look not enought far. We should add a month
if end_time < now_epoch:
month_end_id += 1
month_start_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
# First start
day_start = find_day_by_weekday_offset(self.syear,
month_start_id, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# Then end
day_end = find_day_by_weekday_offset(self.eyear,
month_end_id, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time)
|
python
|
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for WeekDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
# If no year, it's our year
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
day_start = find_day_by_weekday_offset(self.syear,
month_start_id, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# Same for end year
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
day_end = find_day_by_weekday_offset(self.eyear, month_end_id, self.ewday,
self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
# Maybe end_time is before start. So look for the
# next month
if start_time > end_time:
month_end_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
day_end = find_day_by_weekday_offset(self.eyear,
month_end_id, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
# But maybe we look not enought far. We should add a month
if end_time < now_epoch:
month_end_id += 1
month_start_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
# First start
day_start = find_day_by_weekday_offset(self.syear,
month_start_id, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# Then end
day_end = find_day_by_weekday_offset(self.eyear,
month_end_id, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time)
|
[
"def",
"get_start_and_end_time",
"(",
"self",
",",
"ref",
"=",
"None",
")",
":",
"now",
"=",
"time",
".",
"localtime",
"(",
"ref",
")",
"# If no year, it's our year",
"if",
"self",
".",
"syear",
"==",
"0",
":",
"self",
".",
"syear",
"=",
"now",
".",
"tm_year",
"month_start_id",
"=",
"now",
".",
"tm_mon",
"day_start",
"=",
"find_day_by_weekday_offset",
"(",
"self",
".",
"syear",
",",
"month_start_id",
",",
"self",
".",
"swday",
",",
"self",
".",
"swday_offset",
")",
"start_time",
"=",
"get_start_of_day",
"(",
"self",
".",
"syear",
",",
"month_start_id",
",",
"day_start",
")",
"# Same for end year",
"if",
"self",
".",
"eyear",
"==",
"0",
":",
"self",
".",
"eyear",
"=",
"now",
".",
"tm_year",
"month_end_id",
"=",
"now",
".",
"tm_mon",
"day_end",
"=",
"find_day_by_weekday_offset",
"(",
"self",
".",
"eyear",
",",
"month_end_id",
",",
"self",
".",
"ewday",
",",
"self",
".",
"ewday_offset",
")",
"end_time",
"=",
"get_end_of_day",
"(",
"self",
".",
"eyear",
",",
"month_end_id",
",",
"day_end",
")",
"# Maybe end_time is before start. So look for the",
"# next month",
"if",
"start_time",
">",
"end_time",
":",
"month_end_id",
"+=",
"1",
"if",
"month_end_id",
">",
"12",
":",
"month_end_id",
"=",
"1",
"self",
".",
"eyear",
"+=",
"1",
"day_end",
"=",
"find_day_by_weekday_offset",
"(",
"self",
".",
"eyear",
",",
"month_end_id",
",",
"self",
".",
"ewday",
",",
"self",
".",
"ewday_offset",
")",
"end_time",
"=",
"get_end_of_day",
"(",
"self",
".",
"eyear",
",",
"month_end_id",
",",
"day_end",
")",
"now_epoch",
"=",
"time",
".",
"mktime",
"(",
"now",
")",
"# But maybe we look not enought far. We should add a month",
"if",
"end_time",
"<",
"now_epoch",
":",
"month_end_id",
"+=",
"1",
"month_start_id",
"+=",
"1",
"if",
"month_end_id",
">",
"12",
":",
"month_end_id",
"=",
"1",
"self",
".",
"eyear",
"+=",
"1",
"if",
"month_start_id",
">",
"12",
":",
"month_start_id",
"=",
"1",
"self",
".",
"syear",
"+=",
"1",
"# First start",
"day_start",
"=",
"find_day_by_weekday_offset",
"(",
"self",
".",
"syear",
",",
"month_start_id",
",",
"self",
".",
"swday",
",",
"self",
".",
"swday_offset",
")",
"start_time",
"=",
"get_start_of_day",
"(",
"self",
".",
"syear",
",",
"month_start_id",
",",
"day_start",
")",
"# Then end",
"day_end",
"=",
"find_day_by_weekday_offset",
"(",
"self",
".",
"eyear",
",",
"month_end_id",
",",
"self",
".",
"ewday",
",",
"self",
".",
"ewday_offset",
")",
"end_time",
"=",
"get_end_of_day",
"(",
"self",
".",
"eyear",
",",
"month_end_id",
",",
"day_end",
")",
"return",
"(",
"start_time",
",",
"end_time",
")"
] |
Specific function to get start time and end time for WeekDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
|
[
"Specific",
"function",
"to",
"get",
"start",
"time",
"and",
"end",
"time",
"for",
"WeekDayDaterange"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L929-L986
|
train
|
Alignak-monitoring/alignak
|
alignak/daterange.py
|
MonthDayDaterange.get_start_and_end_time
|
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for MonthDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
if start_time > end_time:
month_start_id -= 1
if month_start_id < 1:
month_start_id = 12
self.syear -= 1
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if end_time < now_epoch:
month_end_id += 1
month_start_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
# For the start
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# For the end
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time)
|
python
|
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for MonthDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
if start_time > end_time:
month_start_id -= 1
if month_start_id < 1:
month_start_id = 12
self.syear -= 1
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if end_time < now_epoch:
month_end_id += 1
month_start_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
# For the start
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# For the end
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time)
|
[
"def",
"get_start_and_end_time",
"(",
"self",
",",
"ref",
"=",
"None",
")",
":",
"now",
"=",
"time",
".",
"localtime",
"(",
"ref",
")",
"if",
"self",
".",
"syear",
"==",
"0",
":",
"self",
".",
"syear",
"=",
"now",
".",
"tm_year",
"month_start_id",
"=",
"now",
".",
"tm_mon",
"day_start",
"=",
"find_day_by_offset",
"(",
"self",
".",
"syear",
",",
"month_start_id",
",",
"self",
".",
"smday",
")",
"start_time",
"=",
"get_start_of_day",
"(",
"self",
".",
"syear",
",",
"month_start_id",
",",
"day_start",
")",
"if",
"self",
".",
"eyear",
"==",
"0",
":",
"self",
".",
"eyear",
"=",
"now",
".",
"tm_year",
"month_end_id",
"=",
"now",
".",
"tm_mon",
"day_end",
"=",
"find_day_by_offset",
"(",
"self",
".",
"eyear",
",",
"month_end_id",
",",
"self",
".",
"emday",
")",
"end_time",
"=",
"get_end_of_day",
"(",
"self",
".",
"eyear",
",",
"month_end_id",
",",
"day_end",
")",
"now_epoch",
"=",
"time",
".",
"mktime",
"(",
"now",
")",
"if",
"start_time",
">",
"end_time",
":",
"month_start_id",
"-=",
"1",
"if",
"month_start_id",
"<",
"1",
":",
"month_start_id",
"=",
"12",
"self",
".",
"syear",
"-=",
"1",
"day_start",
"=",
"find_day_by_offset",
"(",
"self",
".",
"syear",
",",
"month_start_id",
",",
"self",
".",
"smday",
")",
"start_time",
"=",
"get_start_of_day",
"(",
"self",
".",
"syear",
",",
"month_start_id",
",",
"day_start",
")",
"if",
"end_time",
"<",
"now_epoch",
":",
"month_end_id",
"+=",
"1",
"month_start_id",
"+=",
"1",
"if",
"month_end_id",
">",
"12",
":",
"month_end_id",
"=",
"1",
"self",
".",
"eyear",
"+=",
"1",
"if",
"month_start_id",
">",
"12",
":",
"month_start_id",
"=",
"1",
"self",
".",
"syear",
"+=",
"1",
"# For the start",
"day_start",
"=",
"find_day_by_offset",
"(",
"self",
".",
"syear",
",",
"month_start_id",
",",
"self",
".",
"smday",
")",
"start_time",
"=",
"get_start_of_day",
"(",
"self",
".",
"syear",
",",
"month_start_id",
",",
"day_start",
")",
"# For the end",
"day_end",
"=",
"find_day_by_offset",
"(",
"self",
".",
"eyear",
",",
"month_end_id",
",",
"self",
".",
"emday",
")",
"end_time",
"=",
"get_end_of_day",
"(",
"self",
".",
"eyear",
",",
"month_end_id",
",",
"day_end",
")",
"return",
"(",
"start_time",
",",
"end_time",
")"
] |
Specific function to get start time and end time for MonthDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
|
[
"Specific",
"function",
"to",
"get",
"start",
"time",
"and",
"end",
"time",
"for",
"MonthDayDaterange"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L993-L1042
|
train
|
Alignak-monitoring/alignak
|
alignak/external_command.py
|
ExternalCommandManager.get_unknown_check_result_brok
|
def get_unknown_check_result_brok(cmd_line):
"""Create unknown check result brok and fill it with command data
:param cmd_line: command line to extract data
:type cmd_line: str
:return: unknown check result brok
:rtype: alignak.objects.brok.Brok
"""
match = re.match(
r'^\[([0-9]{10})] PROCESS_(SERVICE)_CHECK_RESULT;'
r'([^\;]*);([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?', cmd_line)
if not match:
match = re.match(
r'^\[([0-9]{10})] PROCESS_(HOST)_CHECK_RESULT;'
r'([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?', cmd_line)
if not match:
return None
data = {
'time_stamp': int(match.group(1)),
'host_name': match.group(3),
}
if match.group(2) == 'SERVICE':
data['service_description'] = match.group(4)
data['return_code'] = match.group(5)
data['output'] = match.group(6)
data['perf_data'] = match.group(7)
else:
data['return_code'] = match.group(4)
data['output'] = match.group(5)
data['perf_data'] = match.group(6)
return Brok({'type': 'unknown_%s_check_result' % match.group(2).lower(), 'data': data})
|
python
|
def get_unknown_check_result_brok(cmd_line):
"""Create unknown check result brok and fill it with command data
:param cmd_line: command line to extract data
:type cmd_line: str
:return: unknown check result brok
:rtype: alignak.objects.brok.Brok
"""
match = re.match(
r'^\[([0-9]{10})] PROCESS_(SERVICE)_CHECK_RESULT;'
r'([^\;]*);([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?', cmd_line)
if not match:
match = re.match(
r'^\[([0-9]{10})] PROCESS_(HOST)_CHECK_RESULT;'
r'([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?', cmd_line)
if not match:
return None
data = {
'time_stamp': int(match.group(1)),
'host_name': match.group(3),
}
if match.group(2) == 'SERVICE':
data['service_description'] = match.group(4)
data['return_code'] = match.group(5)
data['output'] = match.group(6)
data['perf_data'] = match.group(7)
else:
data['return_code'] = match.group(4)
data['output'] = match.group(5)
data['perf_data'] = match.group(6)
return Brok({'type': 'unknown_%s_check_result' % match.group(2).lower(), 'data': data})
|
[
"def",
"get_unknown_check_result_brok",
"(",
"cmd_line",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r'^\\[([0-9]{10})] PROCESS_(SERVICE)_CHECK_RESULT;'",
"r'([^\\;]*);([^\\;]*);([^\\;]*);([^\\|]*)(?:\\|(.*))?'",
",",
"cmd_line",
")",
"if",
"not",
"match",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r'^\\[([0-9]{10})] PROCESS_(HOST)_CHECK_RESULT;'",
"r'([^\\;]*);([^\\;]*);([^\\|]*)(?:\\|(.*))?'",
",",
"cmd_line",
")",
"if",
"not",
"match",
":",
"return",
"None",
"data",
"=",
"{",
"'time_stamp'",
":",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
",",
"'host_name'",
":",
"match",
".",
"group",
"(",
"3",
")",
",",
"}",
"if",
"match",
".",
"group",
"(",
"2",
")",
"==",
"'SERVICE'",
":",
"data",
"[",
"'service_description'",
"]",
"=",
"match",
".",
"group",
"(",
"4",
")",
"data",
"[",
"'return_code'",
"]",
"=",
"match",
".",
"group",
"(",
"5",
")",
"data",
"[",
"'output'",
"]",
"=",
"match",
".",
"group",
"(",
"6",
")",
"data",
"[",
"'perf_data'",
"]",
"=",
"match",
".",
"group",
"(",
"7",
")",
"else",
":",
"data",
"[",
"'return_code'",
"]",
"=",
"match",
".",
"group",
"(",
"4",
")",
"data",
"[",
"'output'",
"]",
"=",
"match",
".",
"group",
"(",
"5",
")",
"data",
"[",
"'perf_data'",
"]",
"=",
"match",
".",
"group",
"(",
"6",
")",
"return",
"Brok",
"(",
"{",
"'type'",
":",
"'unknown_%s_check_result'",
"%",
"match",
".",
"group",
"(",
"2",
")",
".",
"lower",
"(",
")",
",",
"'data'",
":",
"data",
"}",
")"
] |
Create unknown check result brok and fill it with command data
:param cmd_line: command line to extract data
:type cmd_line: str
:return: unknown check result brok
:rtype: alignak.objects.brok.Brok
|
[
"Create",
"unknown",
"check",
"result",
"brok",
"and",
"fill",
"it",
"with",
"command",
"data"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L676-L710
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/hostdependency.py
|
Hostdependency.get_name
|
def get_name(self):
"""Get name based on dependent_host_name and host_name attributes
Each attribute is substituted by 'unknown' if attribute does not exist
:return: dependent_host_name/host_name
:rtype: str
"""
dependent_host_name = 'unknown'
if getattr(self, 'dependent_host_name', None):
dependent_host_name = getattr(
getattr(self, 'dependent_host_name'), 'host_name', 'unknown'
)
host_name = 'unknown'
if getattr(self, 'host_name', None):
host_name = getattr(getattr(self, 'host_name'), 'host_name', 'unknown')
return dependent_host_name + '/' + host_name
|
python
|
def get_name(self):
"""Get name based on dependent_host_name and host_name attributes
Each attribute is substituted by 'unknown' if attribute does not exist
:return: dependent_host_name/host_name
:rtype: str
"""
dependent_host_name = 'unknown'
if getattr(self, 'dependent_host_name', None):
dependent_host_name = getattr(
getattr(self, 'dependent_host_name'), 'host_name', 'unknown'
)
host_name = 'unknown'
if getattr(self, 'host_name', None):
host_name = getattr(getattr(self, 'host_name'), 'host_name', 'unknown')
return dependent_host_name + '/' + host_name
|
[
"def",
"get_name",
"(",
"self",
")",
":",
"dependent_host_name",
"=",
"'unknown'",
"if",
"getattr",
"(",
"self",
",",
"'dependent_host_name'",
",",
"None",
")",
":",
"dependent_host_name",
"=",
"getattr",
"(",
"getattr",
"(",
"self",
",",
"'dependent_host_name'",
")",
",",
"'host_name'",
",",
"'unknown'",
")",
"host_name",
"=",
"'unknown'",
"if",
"getattr",
"(",
"self",
",",
"'host_name'",
",",
"None",
")",
":",
"host_name",
"=",
"getattr",
"(",
"getattr",
"(",
"self",
",",
"'host_name'",
")",
",",
"'host_name'",
",",
"'unknown'",
")",
"return",
"dependent_host_name",
"+",
"'/'",
"+",
"host_name"
] |
Get name based on dependent_host_name and host_name attributes
Each attribute is substituted by 'unknown' if attribute does not exist
:return: dependent_host_name/host_name
:rtype: str
|
[
"Get",
"name",
"based",
"on",
"dependent_host_name",
"and",
"host_name",
"attributes",
"Each",
"attribute",
"is",
"substituted",
"by",
"unknown",
"if",
"attribute",
"does",
"not",
"exist"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/hostdependency.py#L110-L125
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/hostdependency.py
|
Hostdependencies.linkify_hd_by_h
|
def linkify_hd_by_h(self, hosts):
"""Replace dependent_host_name and host_name
in host dependency by the real object
:param hosts: host list, used to look for a specific one
:type hosts: alignak.objects.host.Hosts
:return: None
"""
for hostdep in self:
try:
h_name = hostdep.host_name
dh_name = hostdep.dependent_host_name
host = hosts.find_by_name(h_name)
if host is None:
err = "Error: the host dependency got a bad host_name definition '%s'" % h_name
hostdep.add_error(err)
dephost = hosts.find_by_name(dh_name)
if dephost is None:
err = "Error: the host dependency got " \
"a bad dependent_host_name definition '%s'" % dh_name
hostdep.add_error(err)
if host:
hostdep.host_name = host.uuid
if dephost:
hostdep.dependent_host_name = dephost.uuid
except AttributeError as exp:
err = "Error: the host dependency miss a property '%s'" % exp
hostdep.add_error(err)
|
python
|
def linkify_hd_by_h(self, hosts):
"""Replace dependent_host_name and host_name
in host dependency by the real object
:param hosts: host list, used to look for a specific one
:type hosts: alignak.objects.host.Hosts
:return: None
"""
for hostdep in self:
try:
h_name = hostdep.host_name
dh_name = hostdep.dependent_host_name
host = hosts.find_by_name(h_name)
if host is None:
err = "Error: the host dependency got a bad host_name definition '%s'" % h_name
hostdep.add_error(err)
dephost = hosts.find_by_name(dh_name)
if dephost is None:
err = "Error: the host dependency got " \
"a bad dependent_host_name definition '%s'" % dh_name
hostdep.add_error(err)
if host:
hostdep.host_name = host.uuid
if dephost:
hostdep.dependent_host_name = dephost.uuid
except AttributeError as exp:
err = "Error: the host dependency miss a property '%s'" % exp
hostdep.add_error(err)
|
[
"def",
"linkify_hd_by_h",
"(",
"self",
",",
"hosts",
")",
":",
"for",
"hostdep",
"in",
"self",
":",
"try",
":",
"h_name",
"=",
"hostdep",
".",
"host_name",
"dh_name",
"=",
"hostdep",
".",
"dependent_host_name",
"host",
"=",
"hosts",
".",
"find_by_name",
"(",
"h_name",
")",
"if",
"host",
"is",
"None",
":",
"err",
"=",
"\"Error: the host dependency got a bad host_name definition '%s'\"",
"%",
"h_name",
"hostdep",
".",
"add_error",
"(",
"err",
")",
"dephost",
"=",
"hosts",
".",
"find_by_name",
"(",
"dh_name",
")",
"if",
"dephost",
"is",
"None",
":",
"err",
"=",
"\"Error: the host dependency got \"",
"\"a bad dependent_host_name definition '%s'\"",
"%",
"dh_name",
"hostdep",
".",
"add_error",
"(",
"err",
")",
"if",
"host",
":",
"hostdep",
".",
"host_name",
"=",
"host",
".",
"uuid",
"if",
"dephost",
":",
"hostdep",
".",
"dependent_host_name",
"=",
"dephost",
".",
"uuid",
"except",
"AttributeError",
"as",
"exp",
":",
"err",
"=",
"\"Error: the host dependency miss a property '%s'\"",
"%",
"exp",
"hostdep",
".",
"add_error",
"(",
"err",
")"
] |
Replace dependent_host_name and host_name
in host dependency by the real object
:param hosts: host list, used to look for a specific one
:type hosts: alignak.objects.host.Hosts
:return: None
|
[
"Replace",
"dependent_host_name",
"and",
"host_name",
"in",
"host",
"dependency",
"by",
"the",
"real",
"object"
] |
f3c145207e83159b799d3714e4241399c7740a64
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/hostdependency.py#L224-L251
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.